repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
aringh/odl
|
refs/heads/master
|
odl/solvers/functional/default_functionals.py
|
1
|
# Copyright 2014-2017 The ODL contributors
#
# This file is part of ODL.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.
"""Default functionals defined on any space similar to R^n or L^2."""
from __future__ import print_function, division, absolute_import
from numbers import Integral
import numpy as np
from odl.solvers.functional.functional import (Functional,
FunctionalQuadraticPerturb)
from odl.space import ProductSpace
from odl.operator import (Operator, ConstantOperator, ZeroOperator,
ScalingOperator, DiagonalOperator, PointwiseNorm)
from odl.solvers.nonsmooth.proximal_operators import (
proximal_l1, proximal_convex_conj_l1,
proximal_l1_l2, proximal_convex_conj_l1_l2,
proximal_l2, proximal_convex_conj_l2, proximal_l2_squared,
proximal_huber,
proximal_const_func, proximal_box_constraint,
proximal_convex_conj_kl, proximal_convex_conj_kl_cross_entropy,
combine_proximals, proximal_convex_conj)
from odl.util import conj_exponent, moveaxis
__all__ = ('ZeroFunctional', 'ConstantFunctional', 'ScalingFunctional',
'IdentityFunctional',
'LpNorm', 'L1Norm', 'GroupL1Norm', 'L2Norm', 'L2NormSquared',
'Huber', 'NuclearNorm',
'IndicatorZero', 'IndicatorBox', 'IndicatorNonnegativity',
'IndicatorLpUnitBall', 'IndicatorGroupL1UnitBall',
'IndicatorNuclearNormUnitBall',
'KullbackLeibler', 'KullbackLeiblerCrossEntropy',
'QuadraticForm',
'SeparableSum', 'MoreauEnvelope')
class LpNorm(Functional):
"""The functional corresponding to the Lp-norm.
Notes
-----
If the functional is defined on an :math:`\mathbb{R}^n`-like space, the
:math:`\| \cdot \|_p`-norm is defined as
.. math::
\| x \|_p = \\left(\\sum_{i=1}^n |x_i|^p \\right)^{1/p}.
If the functional is defined on an :math:`L_2`-like space, the
:math:`\| \cdot \|_p`-norm is defined as
.. math::
\| x \|_p = \\left(\\int_\Omega |x(t)|^p dt. \\right)^{1/p}
"""
def __init__(self, space, exponent):
"""Initialize a new instance.
Parameters
----------
space : `DiscreteLp` or `TensorSpace`
Domain of the functional.
exponent : float
Exponent for the norm (``p``).
"""
super(LpNorm, self).__init__(
space=space, linear=False, grad_lipschitz=np.nan)
self.exponent = float(exponent)
# TODO: update when integration operator is in place: issue #440
def _call(self, x):
"""Return the Lp-norm of ``x``."""
if self.exponent == 0:
return self.domain.one().inner(np.not_equal(x, 0))
elif self.exponent == 1:
return x.ufuncs.absolute().inner(self.domain.one())
elif self.exponent == 2:
return np.sqrt(x.inner(x))
elif np.isfinite(self.exponent):
tmp = x.ufuncs.absolute()
tmp.ufuncs.power(self.exponent, out=tmp)
return np.power(tmp.inner(self.domain.one()), 1 / self.exponent)
elif self.exponent == np.inf:
return x.ufuncs.absolute().ufuncs.max()
elif self.exponent == -np.inf:
return x.ufuncs.absolute().ufuncs.min()
else:
raise RuntimeError('unknown exponent')
@property
def convex_conj(self):
"""The convex conjugate functional of the Lp-norm."""
return IndicatorLpUnitBall(self.domain,
exponent=conj_exponent(self.exponent))
@property
def proximal(self):
"""Return the proximal factory of the functional.
See Also
--------
odl.solvers.nonsmooth.proximal_operators.proximal_l1 :
proximal factory for the L1-norm.
odl.solvers.nonsmooth.proximal_operators.proximal_l2 :
proximal factory for the L2-norm.
"""
if self.exponent == 1:
return proximal_l1(space=self.domain)
elif self.exponent == 2:
return proximal_l2(space=self.domain)
else:
raise NotImplementedError('`proximal` only implemented for p=1 or '
'p=2')
@property
def gradient(self):
"""Gradient operator of the functional.
The functional is not differentiable in ``x=0``. However, when
evaluating the gradient operator in this point it will return 0.
"""
functional = self
if self.exponent == 1:
class L1Gradient(Operator):
"""The gradient operator of this functional."""
def __init__(self):
"""Initialize a new instance."""
super(L1Gradient, self).__init__(
functional.domain, functional.domain, linear=False)
def _call(self, x):
"""Apply the gradient operator to the given point."""
return x.ufuncs.sign()
def derivative(self, x):
"""Derivative is a.e. zero."""
return ZeroOperator(self.domain)
return L1Gradient()
elif self.exponent == 2:
class L2Gradient(Operator):
"""The gradient operator of this functional."""
def __init__(self):
"""Initialize a new instance."""
super(L2Gradient, self).__init__(
functional.domain, functional.domain, linear=False)
def _call(self, x):
"""Apply the gradient operator to the given point.
The gradient is not defined in 0.
"""
norm_of_x = x.norm()
if norm_of_x == 0:
return self.domain.zero()
else:
return x / norm_of_x
return L2Gradient()
else:
raise NotImplementedError('`gradient` only implemented for p=1 or '
'p=2')
def __repr__(self):
"""Return ``repr(self)``."""
return '{}({!r}, {!r})'.format(self.__class__.__name__,
self.domain,
self.exponent)
class GroupL1Norm(Functional):
"""The functional corresponding to the mixed L1--Lp norm on `ProductSpace`.
The L1-norm, ``|| ||x||_p ||_1``, is defined as the integral/sum of
``||x||_p``, where ``||x||_p`` is the pointwise p-norm.
This is also known as the cross norm.
Notes
-----
If the functional is defined on an :math:`\mathbb{R}^{n \\times m}`-like
space, the group :math:`L_1`-norm, denoted
:math:`\| \\cdot \|_{\\times, p}` is defined as
.. math::
\|F\|_{\\times, p} =
\\sum_{i = 1}^n \\left(\\sum_{j=1}^m |F_{i,j}|^p\\right)^{1/p}
If the functional is defined on an :math:`(\\mathcal{L}^p)^m`-like space,
the group :math:`L_1`-norm is defined as
.. math::
\| F \|_{\\times, p} =
\\int_{\Omega} \\left(\\sum_{j = 1}^m |F_j(x)|^p\\right)^{1/p}
\mathrm{d}x.
"""
def __init__(self, vfspace, exponent=None):
"""Initialize a new instance.
Parameters
----------
vfspace : `ProductSpace`
Space of vector fields on which the operator acts.
It has to be a product space of identical spaces, i.e. a
power space.
exponent : non-zero float, optional
Exponent of the norm in each point. Values between
0 and 1 are currently not supported due to numerical
instability. Infinity gives the supremum norm.
Default: ``vfspace.exponent``, usually 2.
Examples
--------
>>> space = odl.rn(2)
>>> pspace = odl.ProductSpace(space, 2)
>>> op = GroupL1Norm(pspace)
>>> op([[3, 3], [4, 4]])
10.0
Set exponent of inner (p) norm:
>>> op2 = GroupL1Norm(pspace, exponent=1)
>>> op2([[3, 3], [4, 4]])
14.0
"""
if not isinstance(vfspace, ProductSpace):
raise TypeError('`space` must be a `ProductSpace`')
if not vfspace.is_power_space:
raise TypeError('`space.is_power_space` must be `True`')
super(GroupL1Norm, self).__init__(
space=vfspace, linear=False, grad_lipschitz=np.nan)
self.pointwise_norm = PointwiseNorm(vfspace, exponent)
def _call(self, x):
"""Return the group L1-norm of ``x``."""
# TODO: update when integration operator is in place: issue #440
pointwise_norm = self.pointwise_norm(x)
return pointwise_norm.inner(pointwise_norm.space.one())
@property
def gradient(self):
"""Gradient operator of the functional.
The functional is not differentiable in ``x=0``. However, when
evaluating the gradient operator in this point it will return 0.
Notes
-----
The gradient is given by
.. math::
\\left[ \\nabla \| \|f\|_1 \|_1 \\right]_i =
\\frac{f_i}{|f_i|}
.. math::
\\left[ \\nabla \| \|f\|_2 \|_1 \\right]_i =
\\frac{f_i}{\|f\|_2}
else:
.. math::
\\left[ \\nabla || ||f||_p ||_1 \\right]_i =
\\frac{| f_i |^{p-2} f_i}{||f||_p^{p-1}}
"""
functional = self
class GroupL1Gradient(Operator):
"""The gradient operator of the `GroupL1Norm` functional."""
def __init__(self):
"""Initialize a new instance."""
super(GroupL1Gradient, self).__init__(
functional.domain, functional.domain, linear=False)
def _call(self, x, out):
"""Return ``self(x)``."""
pwnorm_x = functional.pointwise_norm(x)
pwnorm_x.ufuncs.sign(out=pwnorm_x)
functional.pointwise_norm.derivative(x).adjoint(pwnorm_x,
out=out)
return out
return GroupL1Gradient()
@property
def proximal(self):
"""Return the ``proximal factory`` of the functional.
See Also
--------
proximal_l1 : `proximal factory` for the L1-norm.
"""
if self.pointwise_norm.exponent == 1:
return proximal_l1(space=self.domain)
elif self.pointwise_norm.exponent == 2:
return proximal_l1_l2(space=self.domain)
else:
raise NotImplementedError('`proximal` only implemented for p = 1 '
'or 2')
@property
def convex_conj(self):
"""The convex conjugate functional of the group L1-norm."""
conj_exp = conj_exponent(self.pointwise_norm.exponent)
return IndicatorGroupL1UnitBall(self.domain, exponent=conj_exp)
def __repr__(self):
"""Return ``repr(self)``."""
return '{}({!r}, exponent={})'.format(self.__class__.__name__,
self.domain,
self.pointwise_norm.exponent)
class IndicatorGroupL1UnitBall(Functional):
"""The convex conjugate to the mixed L1--Lp norm on `ProductSpace`.
See Also
--------
GroupL1Norm
"""
def __init__(self, vfspace, exponent=None):
"""Initialize a new instance.
Parameters
----------
vfspace : `ProductSpace`
Space of vector fields on which the operator acts.
It has to be a product space of identical spaces, i.e. a
power space.
exponent : non-zero float, optional
Exponent of the norm in each point. Values between
0 and 1 are currently not supported due to numerical
instability. Infinity gives the supremum norm.
Default: ``vfspace.exponent``, usually 2.
Examples
--------
>>> space = odl.rn(2)
>>> pspace = odl.ProductSpace(space, 2)
>>> op = IndicatorGroupL1UnitBall(pspace)
>>> op([[0.1, 0.5], [0.2, 0.3]])
0
>>> op([[3, 3], [4, 4]])
inf
Set exponent of inner (p) norm:
>>> op2 = IndicatorGroupL1UnitBall(pspace, exponent=1)
"""
if not isinstance(vfspace, ProductSpace):
raise TypeError('`space` must be a `ProductSpace`')
if not vfspace.is_power_space:
raise TypeError('`space.is_power_space` must be `True`')
super(IndicatorGroupL1UnitBall, self).__init__(
space=vfspace, linear=False, grad_lipschitz=np.nan)
self.pointwise_norm = PointwiseNorm(vfspace, exponent)
def _call(self, x):
"""Return ``self(x)``."""
x_norm = self.pointwise_norm(x).ufuncs.max()
if x_norm > 1:
return np.inf
else:
return 0
@property
def proximal(self):
"""Return the `proximal factory` of the functional.
See Also
--------
proximal_convex_conj_l1 : `proximal factory` for the L1-norms convex
conjugate.
"""
if self.pointwise_norm.exponent == np.inf:
return proximal_convex_conj_l1(space=self.domain)
elif self.pointwise_norm.exponent == 2:
return proximal_convex_conj_l1_l2(space=self.domain)
else:
raise NotImplementedError('`proximal` only implemented for p = 1 '
'or 2')
@property
def convex_conj(self):
"""Convex conjugate functional of IndicatorLpUnitBall.
Returns
-------
convex_conj : GroupL1Norm
The convex conjugate is the the group L1-norm.
"""
conj_exp = conj_exponent(self.pointwise_norm.exponent)
return GroupL1Norm(self.domain, exponent=conj_exp)
def __repr__(self):
"""Return ``repr(self)``."""
return '{}({!r}, exponent={})'.format(self.__class__.__name__,
self.domain,
self.pointwise_norm.exponent)
class IndicatorLpUnitBall(Functional):
"""The indicator function on the unit ball in given the ``Lp`` norm.
It does not implement `gradient` since it is not differentiable everywhere.
Notes
-----
This functional is defined as
.. math::
f(x) = \\left\{ \\begin{array}{ll}
0 & \\text{if } ||x||_{L_p} \\leq 1, \\\\
\\infty & \\text{else,}
\\end{array} \\right.
where :math:`||x||_{L_p}` is the :math:`L_p`-norm, which for finite values
of :math:`p` is defined as
.. math::
\| x \|_{L_p} = \\left( \\int_{\Omega} |x|^p dx \\right)^{1/p},
and for :math:`p = \\infty` it is defined as
.. math::
||x||_{\\infty} = \max_x (|x|).
The functional also allows noninteger and nonpositive values of the
exponent :math:`p`, however in this case :math:`\| x \|_{L_p}` is not a
norm.
"""
def __init__(self, space, exponent):
"""Initialize a new instance.
Parameters
----------
space : `DiscreteLp` or `TensorSpace`
Domain of the functional.
exponent : int or infinity
Specifies wich norm to use.
"""
super(IndicatorLpUnitBall, self).__init__(space=space, linear=False)
self.__norm = LpNorm(space, exponent)
self.__exponent = float(exponent)
@property
def exponent(self):
"""Exponent corresponding to the norm."""
return self.__exponent
def _call(self, x):
"""Apply the functional to the given point."""
x_norm = self.__norm(x)
if x_norm > 1:
return np.inf
else:
return 0
@property
def convex_conj(self):
"""The conjugate functional of IndicatorLpUnitBall.
The convex conjugate functional of an ``Lp`` norm, ``p < infty`` is the
indicator function on the unit ball defined by the corresponding dual
norm ``q``, given by ``1/p + 1/q = 1`` and where ``q = infty`` if
``p = 1`` [Roc1970]. By the Fenchel-Moreau theorem, the convex
conjugate functional of indicator function on the unit ball in ``Lq``
is the corresponding Lp-norm [BC2011].
References
----------
[Roc1970] Rockafellar, R. T. *Convex analysis*. Princeton
University Press, 1970.
[BC2011] Bauschke, H H, and Combettes, P L. *Convex analysis and
monotone operator theory in Hilbert spaces*. Springer, 2011.
"""
if self.exponent == np.inf:
return L1Norm(self.domain)
elif self.exponent == 2:
return L2Norm(self.domain)
else:
return LpNorm(self.domain, exponent=conj_exponent(self.exponent))
@property
def proximal(self):
"""Return the `proximal factory` of the functional.
See Also
--------
odl.solvers.nonsmooth.proximal_operators.proximal_convex_conj_l1 :
`proximal factory` for convex conjuagte of L1-norm.
odl.solvers.nonsmooth.proximal_operators.proximal_convex_conj_l2 :
`proximal factory` for convex conjuagte of L2-norm.
"""
if self.exponent == np.inf:
return proximal_convex_conj_l1(space=self.domain)
elif self.exponent == 2:
return proximal_convex_conj_l2(space=self.domain)
else:
raise NotImplementedError('`gradient` only implemented for p=2 or '
'p=inf')
def __repr__(self):
"""Return ``repr(self)``."""
return '{}({!r},{!r})'.format(self.__class__.__name__,
self.domain, self.exponent)
class L1Norm(LpNorm):
"""The functional corresponding to L1-norm.
The L1-norm, ``||x||_1``, is defined as the integral/sum of ``|x|``.
Notes
-----
If the functional is defined on an :math:`\mathbb{R}^n`-like space, the
:math:`\| \cdot \|_1`-norm is defined as
.. math::
\| x \|_1 = \\sum_{i=1}^n |x_i|.
If the functional is defined on an :math:`L_2`-like space, the
:math:`\| \cdot \|_1`-norm is defined as
.. math::
\| x \|_1 = \\int_\Omega |x(t)| dt.
The `proximal` factory allows using vector-valued stepsizes:
>>> space = odl.rn(3)
>>> f = odl.solvers.L1Norm(space)
>>> x = space.one()
>>> f.proximal([0.5, 1.0, 1.5])(x)
rn(3).element([ 0.5, 0. , 0. ])
"""
def __init__(self, space):
"""Initialize a new instance.
Parameters
----------
space : `DiscreteLp` or `TensorSpace`
Domain of the functional.
"""
super(L1Norm, self).__init__(space=space, exponent=1)
def __repr__(self):
"""Return ``repr(self)``."""
return '{}({!r})'.format(self.__class__.__name__,
self.domain)
class L2Norm(LpNorm):
"""The functional corresponding to the L2-norm.
The L2-norm, ``||x||_2``, is defined as the square-root out of the
integral/sum of ``x^2``.
Notes
-----
If the functional is defined on an :math:`\mathbb{R}^n`-like space, the
:math:`\| \cdot \|_2`-norm is defined as
.. math::
\| x \|_2 = \\sqrt{ \\sum_{i=1}^n |x_i|^2 }.
If the functional is defined on an :math:`L_2`-like space, the
:math:`\| \cdot \|_2`-norm is defined as
.. math::
\| x \|_2 = \\sqrt{ \\int_\Omega |x(t)|^2 dt. }
"""
def __init__(self, space):
"""Initialize a new instance.
Parameters
----------
space : `DiscreteLp` or `TensorSpace`
Domain of the functional.
"""
super(L2Norm, self).__init__(space=space, exponent=2)
def __repr__(self):
"""Return ``repr(self)``."""
return '{}({!r})'.format(self.__class__.__name__,
self.domain)
class L2NormSquared(Functional):
"""The functional corresponding to the squared L2-norm.
The squared L2-norm, ``||x||_2^2``, is defined as the integral/sum of
``x^2``.
Notes
-----
If the functional is defined on an :math:`\mathbb{R}^n`-like space, the
:math:`\| \cdot \|_2^2`-functional is defined as
.. math::
\| x \|_2^2 = \\sum_{i=1}^n |x_i|^2.
If the functional is defined on an :math:`L_2`-like space, the
:math:`\| \cdot \|_2^2`-functional is defined as
.. math::
\| x \|_2^2 = \\int_\Omega |x(t)|^2 dt.
The `proximal` factory allows using vector-valued stepsizes:
>>> space = odl.rn(3)
>>> f = odl.solvers.L2NormSquared(space)
>>> x = space.one()
>>> f.proximal([0.5, 1.5, 2.0])(x)
rn(3).element([ 0.5 , 0.25, 0.2 ])
"""
def __init__(self, space):
"""Initialize a new instance.
Parameters
----------
space : `DiscreteLp` or `TensorSpace`
Domain of the functional.
"""
super(L2NormSquared, self).__init__(
space=space, linear=False, grad_lipschitz=2)
# TODO: update when integration operator is in place: issue #440
def _call(self, x):
"""Return the squared L2-norm of ``x``."""
return x.inner(x)
@property
def gradient(self):
"""Gradient operator of the functional."""
return ScalingOperator(self.domain, 2.0)
@property
def proximal(self):
"""Return the `proximal factory` of the functional.
See Also
--------
odl.solvers.nonsmooth.proximal_operators.proximal_l2_squared :
`proximal factory` for the squared L2-norm.
"""
return proximal_l2_squared(space=self.domain)
@property
def convex_conj(self):
"""The convex conjugate functional of the squared L2-norm.
Notes
-----
The conjugate functional of :math:`\| \\cdot \|_2^2` is
:math:`\\frac{1}{4}\| \\cdot \|_2^2`
"""
return (1.0 / 4) * L2NormSquared(self.domain)
def __repr__(self):
"""Return ``repr(self)``."""
return '{}({!r})'.format(self.__class__.__name__, self.domain)
class ConstantFunctional(Functional):
"""The constant functional.
This functional maps all elements in the domain to a given, constant value.
"""
def __init__(self, space, constant):
"""Initialize a new instance.
Parameters
----------
space : `LinearSpace`
Domain of the functional.
constant : element in ``domain.field``
The constant value of the functional
"""
super(ConstantFunctional, self).__init__(
space=space, linear=(constant == 0), grad_lipschitz=0)
self.__constant = self.range.element(constant)
@property
def constant(self):
"""The constant value of the functional."""
return self.__constant
def _call(self, x):
"""Return a constant value."""
return self.constant
@property
def gradient(self):
"""Gradient operator of the functional."""
return ZeroOperator(self.domain)
@property
def proximal(self):
"""Return the `proximal factory` of the functional."""
return proximal_const_func(self.domain)
@property
def convex_conj(self):
"""Convex conjugate functional of the constant functional.
Notes
-----
This functional is defined as
.. math::
f^*(x) = \\left\{ \\begin{array}{ll}
-constant & \\text{if } x = 0, \\\\
\\infty & \\text{else}
\\end{array} \\right.
"""
return IndicatorZero(self.domain, -self.constant)
def __repr__(self):
"""Return ``repr(self)``."""
return '{}({!r}, {!r})'.format(self.__class__.__name__,
self.domain, self.constant)
class ZeroFunctional(ConstantFunctional):
"""Functional that maps all elements in the domain to zero."""
def __init__(self, space):
"""Initialize a new instance.
Parameters
----------
space : `LinearSpace`
Domain of the functional.
"""
super(ZeroFunctional, self).__init__(space=space, constant=0)
def __repr__(self):
"""Return ``repr(self)``."""
return '{}({!r})'.format(self.__class__.__name__, self.domain)
class ScalingFunctional(Functional, ScalingOperator):
"""Functional that scales the input argument by a value.
Since the range of a functional is always a field, the domain of this
functional is also a field, i.e. real or complex numbers.
"""
def __init__(self, field, scale):
"""Initialize a new instance.
Parameters
----------
field : `Field`
Domain of the functional.
scale : element in ``domain``
The constant value to scale by.
Examples
--------
>>> field = odl.RealNumbers()
>>> func = ScalingFunctional(field, 3)
>>> func(5)
15.0
"""
Functional.__init__(self, space=field, linear=True, grad_lipschitz=0)
ScalingOperator.__init__(self, field, scale)
@property
def gradient(self):
"""Gradient operator of the functional."""
return ConstantFunctional(self.domain, self.scalar)
class IdentityFunctional(ScalingFunctional):
"""Functional that maps a scalar to itself.
See Also
--------
odl.operator.IdentityOperator
"""
def __init__(self, field):
"""Initialize a new instance.
Parameters
----------
field : `Field`
Domain of the functional.
"""
super(IdentityFunctional, self).__init__(field, 1.0)
class IndicatorBox(Functional):
"""Indicator on some box shaped domain.
Notes
-----
The indicator :math:`F` with lower bound :math:`a` and upper bound
:math:`b` is defined as:
.. math::
F(x) = \\begin{cases}
0 & \\text{if } a \\leq x \\leq b \\text{ everywhere}, \\\\
\\infty & \\text{else}
\\end{cases}
"""
def __init__(self, space, lower=None, upper=None):
"""Initialize an instance.
Parameters
----------
space : `LinearSpace`
Domain of the functional.
lower : ``space.field`` element or ``space`` `element-like`, optional
The lower bound.
Default: ``None``, interpreted as -infinity
upper : ``space.field`` element or ``space`` `element-like`, optional
The upper bound.
Default: ``None``, interpreted as +infinity
Examples
--------
>>> space = odl.rn(3)
>>> func = IndicatorBox(space, 0, 2)
>>> func([0, 1, 2]) # all points inside
0
>>> func([0, 1, 3]) # one point outside
inf
"""
super(IndicatorBox, self).__init__(space, linear=False)
self.lower = lower
self.upper = upper
def _call(self, x):
"""Apply the functional to the given point."""
# Since the proximal projects onto our feasible set we can simply
# check if it changes anything
proj = self.proximal(1)(x)
return np.inf if x.dist(proj) > 0 else 0
@property
def proximal(self):
"""Return the `proximal factory` of the functional."""
return proximal_box_constraint(self.domain, self.lower, self.upper)
def __repr__(self):
"""Return ``repr(self)``."""
return '{}({!r}, {!r}, {!r})'.format(self.__class__.__name__,
self.domain,
self.lower, self.upper)
class IndicatorNonnegativity(IndicatorBox):
"""Indicator on the set of non-negative numbers.
Notes
-----
The nonnegativity indicator :math:`F` is defined as:
.. math::
F(x) = \\begin{cases}
0 & \\text{if } 0 \\leq x \\text{ everywhere}, \\\\
\\infty & \\text{else}
\\end{cases}
"""
def __init__(self, space):
"""Initialize an instance.
Parameters
----------
space : `LinearSpace`
Domain of the functional.
Examples
--------
>>> space = odl.rn(3)
>>> func = IndicatorNonnegativity(space)
>>> func([0, 1, 2]) # all points positive
0
>>> func([0, 1, -3]) # one point negative
inf
"""
super(IndicatorNonnegativity, self).__init__(
space, lower=0, upper=None)
def __repr__(self):
"""Return ``repr(self)``."""
return '{}({!r})'.format(self.__class__.__name__, self.domain)
class IndicatorZero(Functional):
"""The indicator function of the singleton set {0}.
The function has a constant value if the input is zero, otherwise infinity.
"""
def __init__(self, space, constant=0):
"""Initialize a new instance.
Parameters
----------
space : `LinearSpace`
Domain of the functional.
constant : element in ``domain.field``, optional
The constant value of the functional
Examples
--------
>>> space = odl.rn(3)
>>> func = IndicatorZero(space)
>>> func([0, 0, 0])
0
>>> func([0, 0, 1])
inf
>>> func = IndicatorZero(space, constant=2)
>>> func([0, 0, 0])
2
"""
super(IndicatorZero, self).__init__(space, linear=False)
self.__constant = constant
@property
def constant(self):
"""The constant value of the functional if ``x=0``."""
return self.__constant
def _call(self, x):
"""Apply the functional to the given point."""
if x.norm() == 0:
# In this case x is the zero-element.
return self.constant
else:
return np.inf
@property
def convex_conj(self):
"""The convex conjugate functional.
Notes
-----
By the Fenchel-Moreau theorem the convex conjugate is the constant
functional [BC2011] with the constant value of -`constant`.
References
----------
[BC2011] Bauschke, H H, and Combettes, P L. *Convex analysis and
monotone operator theory in Hilbert spaces*. Springer, 2011.
"""
return ConstantFunctional(self.domain, -self.constant)
@property
def proximal(self):
"""Return the proximal factory of the functional.
This is the zero operator.
"""
def zero_proximal(sigma=1.0):
"""Proximal factory for zero operator.
Parameters
----------
sigma : positive float, optional
Step size parameter.
"""
return ZeroOperator(self.domain)
return zero_proximal
def __repr__(self):
"""Return ``repr(self)``."""
return '{}({!r}, {!r})'.format(self.__class__.__name__,
self.domain, self.constant)
class KullbackLeibler(Functional):
"""The Kullback-Leibler divergence functional.
Notes
-----
The functional :math:`F` with prior :math:`g>=0` is given by:
.. math::
F(x)
=
\\begin{cases}
\\sum_{i} \left( x_i - g_i + g_i \log \left( \\frac{g_i}{x_i}
\\right) \\right) & \\text{if } x_i > 0 \\forall i
\\\\
+\\infty & \\text{else.}
\\end{cases}
Note that we use the common definition 0 log(0) := 0.
KL based objectives are common in MLEM optimization problems and are often
used as data-matching term when data noise governed by a multivariate
Poisson probability distribution is significant.
The functional is related to the Kullback-Leibler cross entropy functional
`KullbackLeiblerCrossEntropy`. The KL cross entropy is the one
diescribed in `this Wikipedia article
<https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence>`_, and
the functional :math:`F` is obtained by switching place of the prior and
the varialbe in the KL cross entropy functional.
For a theoretical exposition, see `Csiszar1991`_.
See Also
--------
KullbackLeiblerConvexConj : the convex conjugate functional
KullbackLeiblerCrossEntropy : related functional
References
----------
.. _Csiszar1991: http://www.jstor.org/stable/2241918
"""
def __init__(self, space, prior=None):
"""Initialize a new instance.
Parameters
----------
space : `DiscreteLp` or `TensorSpace`
Domain of the functional.
prior : ``space`` `element-like`, optional
Depending on the context, the prior, target or data
distribution. It is assumed to be nonnegative.
Default: if None it is take as the one-element.
Examples
--------
Test that KullbackLeibler(x,x) = 0
>>> space = odl.rn(3)
>>> prior = 3 * space.one()
>>> func = odl.solvers.KullbackLeibler(space, prior=prior)
>>> func(prior)
0.0
Test that zeros in the prior are handled correctly
>>> prior = space.zero()
>>> func = odl.solvers.KullbackLeibler(space, prior=prior)
>>> x = space.one()
>>> func(x)
3.0
"""
super(KullbackLeibler, self).__init__(
space=space, linear=False, grad_lipschitz=np.nan)
if prior is not None and prior not in self.domain:
raise ValueError('`prior` not in `domain`'
''.format(prior, self.domain))
self.__prior = prior
@property
def prior(self):
"""The prior in the Kullback-Leibler functional."""
return self.__prior
# TODO: update when integration operator is in place: issue #440
def _call(self, x):
"""Return the KL-diveregnce in the point ``x``.
If any components of ``x`` is non-positive, the value is positive
infinity.
"""
# Lazy import to improve `import odl` time
import scipy.special
if self.prior is None:
tmp = ((x - 1 - np.log(x)).inner(self.domain.one()))
else:
tmp = ((x - self.prior +
scipy.special.xlogy(self.prior, self.prior / x))
.inner(self.domain.one()))
if np.isnan(tmp):
# In this case, some element was less than or equal to zero
return np.inf
else:
return tmp
@property
def gradient(self):
"""The gradient of `KullbackLeibler` with ``prior`` :math:`g` is given as
.. math::
\\nabla F(x) = 1 - \frac{g}{x}.
The gradient is not defined in points where one or more components
are non-positive.
"""
functional = self
class KLGradient(Operator):
"""The gradient operator of this functional."""
def __init__(self):
"""Initialize a new instance."""
super(KLGradient, self).__init__(
functional.domain, functional.domain, linear=False)
def _call(self, x):
"""Apply the gradient operator to the given point.
The gradient is not defined in points where one or more
components are non-positive.
"""
if functional.prior is None:
return (-1.0) / x + 1
else:
return (-functional.prior) / x + 1
return KLGradient()
@property
def proximal(self):
"""Return the `proximal factory` of the functional.
See Also
--------
odl.solvers.nonsmooth.proximal_operators.proximal_convex_conj_kl :
`proximal factory` for convex conjugate of KL.
odl.solvers.nonsmooth.proximal_operators.proximal_convex_conj :
Proximal of the convex conjugate of a functional.
"""
return proximal_convex_conj(proximal_convex_conj_kl(space=self.domain,
g=self.prior))
@property
def convex_conj(self):
"""The convex conjugate functional of the KL-functional."""
return KullbackLeiblerConvexConj(self.domain, self.prior)
def __repr__(self):
"""Return ``repr(self)``."""
return '{}({!r}, {!r})'.format(self.__class__.__name__,
self.domain, self.prior)
class KullbackLeiblerConvexConj(Functional):
"""The convex conjugate of Kullback-Leibler divergence functional.
Notes
-----
The functional :math:`F^*` with prior :math:`g>=0` is given by:
.. math::
F^*(x)
=
\\begin{cases}
\\sum_{i} \left( -g_i \ln(1 - x_i) \\right)
& \\text{if } x_i < 1 \\forall i
\\\\
+\\infty & \\text{else}
\\end{cases}
See Also
--------
KullbackLeibler : convex conjugate functional
"""
def __init__(self, space, prior=None):
"""Initialize a new instance.
Parameters
----------
space : `DiscreteLp` or `TensorSpace`
Domain of the functional.
prior : ``space`` `element-like`, optional
Depending on the context, the prior, target or data
distribution. It is assumed to be nonnegative.
Default: if None it is take as the one-element.
"""
super(KullbackLeiblerConvexConj, self).__init__(
space=space, linear=False, grad_lipschitz=np.nan)
if prior is not None and prior not in self.domain:
raise ValueError('`prior` not in `domain`'
''.format(prior, self.domain))
self.__prior = prior
@property
def prior(self):
"""The prior in convex conjugate Kullback-Leibler functional."""
return self.__prior
# TODO: update when integration operator is in place: issue #440
def _call(self, x):
"""Return the value in the point ``x``.
If any components of ``x`` is larger than or equal to 1, the value is
positive infinity.
"""
# Lazy import to improve `import odl` time
import scipy.special
if self.prior is None:
tmp = self.domain.element(
-1.0 * (np.log(1 - x))).inner(self.domain.one())
else:
tmp = self.domain.element(-scipy.special.xlogy(
self.prior, 1 - x)).inner(self.domain.one())
if np.isnan(tmp):
# In this case, some element was larger than or equal to one
return np.inf
else:
return tmp
@property
def gradient(self):
"""Gradient operator of the functional.
The gradient is not defined in points where one or more components
are larger than or equal to one.
"""
functional = self
class KLCCGradient(Operator):
"""The gradient operator of this functional."""
def __init__(self):
"""Initialize a new instance."""
super(KLCCGradient, self).__init__(
functional.domain, functional.domain, linear=False)
def _call(self, x):
"""Apply the gradient operator to the given point.
The gradient is not defined in points where one or more
components are larger than or equal to one.
"""
if functional.prior is None:
return 1.0 / (1 - x)
else:
return functional.prior / (1 - x)
return KLCCGradient()
@property
def proximal(self):
"""Return the `proximal factory` of the functional.
See Also
--------
odl.solvers.nonsmooth.proximal_operators.proximal_convex_conj_kl :
`proximal factory` for convex conjugate of KL.
odl.solvers.nonsmooth.proximal_operators.proximal_convex_conj :
Proximal of the convex conjugate of a functional.
"""
return proximal_convex_conj_kl(space=self.domain, g=self.prior)
@property
def convex_conj(self):
"""The convex conjugate functional of the conjugate KL-functional."""
return KullbackLeibler(self.domain, self.prior)
def __repr__(self):
"""Return ``repr(self)``."""
return '{}({!r}, {!r})'.format(self.__class__.__name__,
self.domain, self.prior)
class KullbackLeiblerCrossEntropy(Functional):
"""The Kullback-Leibler Cross Entropy divergence functional.
Notes
-----
The functional :math:`F` with prior :math:`g>0` is given by:
.. math::
F(x)
=
\\begin{cases}
\\sum_{i} \left( g_i - x_i + x_i \log \left( \\frac{x_i}{g_i}
\\right) \\right)
& \\text{if } g_i > 0 \\forall i
\\\\
+\\infty & \\text{else}
\\end{cases}
For further information about the functional, see the
`Wikipedia article on the Kullback Leibler divergence
<https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence>`_,
or read for example `this article
<http://ieeexplore.ieee.org/document/1056144/?arnumber=1056144>`_.
The KL cross entropy functional :math:`F`, described above, is related to
another functional which is also know as KL divergence. This functional
is often used as data discrepancy term in inverse problems, when data is
corrupted with Poisson noise. It is obtained by changing place
of the prior and the variable. See the See Also section.
For a theoretical exposition, see `Csiszar1991`_.
See Also
--------
KullbackLeibler : related functional
KullbackLeiblerCrossEntropyConvexConj : the convex conjugate functional
References
----------
.. _Csiszar1991: http://www.jstor.org/stable/2241918
"""
def __init__(self, space, prior=None):
"""Initialize a new instance.
Parameters
----------
space : `DiscreteLp` or `TensorSpace`
Domain of the functional.
prior : ``space`` `element-like`, optional
Depending on the context, the prior, target or data
distribution. It is assumed to be nonnegative.
Default: if None it is take as the one-element.
"""
super(KullbackLeiblerCrossEntropy, self).__init__(
space=space, linear=False, grad_lipschitz=np.nan)
if prior is not None and prior not in self.domain:
raise ValueError('`prior` not in `domain`'
''.format(prior, self.domain))
self.__prior = prior
@property
def prior(self):
"""The prior in the Kullback-Leibler functional."""
return self.__prior
# TODO: update when integration operator is in place: issue #440
def _call(self, x):
"""Return the KL-diveregnce in the point ``x``.
If any components of ``x`` is non-positive, the value is positive
infinity.
"""
# Lazy import to improve `import odl` time
import scipy.special
if self.prior is None:
tmp = (1 - x + scipy.special.xlogy(x, x)).inner(self.domain.one())
else:
tmp = ((self.prior - x + scipy.special.xlogy(x, x / self.prior))
.inner(self.domain.one()))
if np.isnan(tmp):
# In this case, some element was less than or equal to zero
return np.inf
else:
return tmp
@property
def gradient(self):
"""Gradient operator of the functional.
The gradient is not defined in points where one or more components
are less than or equal to 0.
"""
functional = self
class KLCrossEntropyGradient(Operator):
"""The gradient operator of this functional."""
def __init__(self):
"""Initialize a new instance."""
super(KLCrossEntropyGradient, self).__init__(
functional.domain, functional.domain, linear=False)
def _call(self, x):
"""Apply the gradient operator to the given point.
The gradient is not defined in for points with components less
than or equal to zero.
"""
if functional.prior is None:
tmp = np.log(x)
else:
tmp = np.log(x / functional.prior)
if np.all(np.isfinite(tmp)):
return tmp
else:
# The derivative is not defined.
raise ValueError('The gradient of the Kullback-Leibler '
'Cross Entropy functional is not defined '
'for `x` with one or more components '
'less than or equal to zero.'.format(x))
return KLCrossEntropyGradient()
@property
def proximal(self):
"""Return the `proximal factory` of the functional.
See Also
--------
odl.solvers.nonsmooth.proximal_operators.\
proximal_convex_conj_kl_cross_entropy :
`proximal factory` for convex conjugate of the KL cross entropy.
odl.solvers.nonsmooth.proximal_operators.proximal_convex_conj :
Proximal of the convex conjugate of a functional.
"""
return proximal_convex_conj(proximal_convex_conj_kl_cross_entropy(
space=self.domain, g=self.prior))
@property
def convex_conj(self):
"""The convex conjugate functional of the KL-functional."""
return KullbackLeiblerCrossEntropyConvexConj(self.domain, self.prior)
def __repr__(self):
"""Return ``repr(self)``."""
return '{}({!r}, {!r})'.format(self.__class__.__name__,
self.domain, self.prior)
class KullbackLeiblerCrossEntropyConvexConj(Functional):
"""The convex conjugate of Kullback-Leibler Cross Entorpy functional.
Notes
-----
The functional :math:`F^*` with prior :math:`g>0` is given by
.. math::
F^*(x) = \\sum_i g_i \\left(e^{x_i} - 1\\right)
See Also
--------
KullbackLeiblerCrossEntropy : convex conjugate functional
"""
def __init__(self, space, prior=None):
"""Initialize a new instance.
Parameters
----------
space : `DiscreteLp` or `TensorSpace`
Domain of the functional.
prior : ``space`` `element-like`, optional
Depending on the context, the prior, target or data
distribution. It is assumed to be nonnegative.
Default: if None it is take as the one-element.
"""
super(KullbackLeiblerCrossEntropyConvexConj, self).__init__(
space=space, linear=False, grad_lipschitz=np.nan)
if prior is not None and prior not in self.domain:
raise ValueError('`prior` not in `domain`'
''.format(prior, self.domain))
self.__prior = prior
@property
def prior(self):
"""The prior in convex conjugate Kullback-Leibler Cross Entorpy."""
return self.__prior
# TODO: update when integration operator is in place: issue #440
def _call(self, x):
"""Return the value in the point ``x``."""
if self.prior is None:
tmp = self.domain.element((np.exp(x) - 1)).inner(self.domain.one())
else:
tmp = (self.prior * (np.exp(x) - 1)).inner(self.domain.one())
return tmp
# TODO: replace this when UFuncOperators is in place: PL #576
@property
def gradient(self):
"""Gradient operator of the functional."""
functional = self
class KLCrossEntCCGradient(Operator):
"""The gradient operator of this functional."""
def __init__(self):
"""Initialize a new instance."""
super(KLCrossEntCCGradient, self).__init__(
functional.domain, functional.domain, linear=False)
def _call(self, x):
"""Apply the gradient operator to the given point."""
if functional.prior is None:
return self.domain.element(np.exp(x))
else:
return functional.prior * np.exp(x)
return KLCrossEntCCGradient()
@property
def proximal(self):
"""Return the `proximal factory` of the functional.
See Also
--------
odl.solvers.nonsmooth.proximal_operators.\
proximal_convex_conj_kl_cross_entropy :
`proximal factory` for convex conjugate of the KL cross entropy.
"""
return proximal_convex_conj_kl_cross_entropy(space=self.domain,
g=self.prior)
@property
def convex_conj(self):
"""The convex conjugate functional of the conjugate KL-functional."""
return KullbackLeiblerCrossEntropy(self.domain, self.prior)
def __repr__(self):
"""Return ``repr(self)``."""
return '{}({!r}, {!r})'.format(self.__class__.__name__,
self.domain, self.prior)
class SeparableSum(Functional):
"""The functional corresponding to separable sum of functionals.
The separable sum of functionals ``f_1, f_2, ..., f_n`` is given by::
h(x_1, x_2, ..., x_n) = sum_i^n f_i(x_i)
The separable sum is thus defined for any collection of functionals with
the same range.
Notes
-----
The separable sum of functionals :math:`f_1, f_2, ..., f_n` is given by
.. math::
h(x_1, x_2, ..., x_n) = \sum_{i=1}^n f_i(x_i)
It has several useful features that also distribute. For example, the
gradient is a `DiagonalOperator`:
.. math::
[\\nabla h](x_1, x_2, ..., x_n) =
[\\nabla f_1(x_i), \\nabla f_2(x_i), ..., \\nabla f_n(x_i)]
The convex conjugate is also a separable sum:
.. math::
[h^*](y_1, y_2, ..., y_n) = \sum_{i=1}^n f_i^*(y_i)
And the proximal distributes:
.. math::
\mathrm{prox}_{\\sigma h}(x_1, x_2, ..., x_n) =
[\mathrm{prox}_{\\sigma f_1}(x_1),
\mathrm{prox}_{\\sigma f_2}(x_2),
...,
\mathrm{prox}_{\\sigma f_n}(x_n)].
If :math:`\\sigma = (\\sigma_1, \\sigma_2, \\ldots, \\sigma_n)` is a list
of positive `float`s, then it distributes, too:
.. math::
\mathrm{prox}_{\\sigma h}(x_1, x_2, ..., x_n) =
[\mathrm{prox}_{\\sigma_1 f_1}(x_1),
\mathrm{prox}_{\\sigma_2 f_2}(x_2),
...,
\mathrm{prox}_{\\sigma_n f_n}(x_n)].
"""
def __init__(self, *functionals):
"""Initialize a new instance.
Parameters
----------
functional1, ..., functionalN : `Functional`
The functionals in the sum.
Can also be given as ``space, n`` with ``n`` integer,
in which case the functional is repeated ``n`` times.
Examples
--------
Create functional ``f([x1, x2]) = ||x1||_1 + ||x2||_2``:
>>> space = odl.rn(3)
>>> l1 = odl.solvers.L1Norm(space)
>>> l2 = odl.solvers.L2Norm(space)
>>> f_sum = odl.solvers.SeparableSum(l1, l2)
The `proximal` factory allows using vector-valued stepsizes:
>>> x = f_sum.domain.one()
>>> f_sum.proximal([0.5, 2.0])(x)
ProductSpace(rn(3), 2).element([
[ 0.5, 0.5, 0.5],
[ 0., 0., 0.]
])
Create functional ``f([x1, ... ,xn]) = \sum_i ||xi||_1``:
>>> f_sum = odl.solvers.SeparableSum(l1, 5)
"""
# Make a power space if the second argument is an integer
if (len(functionals) == 2 and
isinstance(functionals[1], Integral)):
functionals = [functionals[0]] * functionals[1]
domains = [func.domain for func in functionals]
domain = ProductSpace(*domains)
linear = all(func.is_linear for func in functionals)
super(SeparableSum, self).__init__(space=domain, linear=linear)
self.__functionals = tuple(functionals)
def _call(self, x):
"""Return the separable sum evaluated in ``x``."""
return sum(fi(xi) for xi, fi in zip(x, self.functionals))
@property
def functionals(self):
"""The summands of the functional."""
return self.__functionals
def __getitem__(self, indices):
"""Return ``self[index]``.
Parameters
----------
indices : index expression
Object determining which parts of the sum to extract.
Returns
-------
subfunctional : `Functional` or `SeparableSum`
Functional corresponding to the given indices.
Examples
--------
>>> space = odl.rn(3)
>>> l1 = odl.solvers.L1Norm(space)
>>> l2 = odl.solvers.L2Norm(space)
>>> f_sum = odl.solvers.SeparableSum(l1, l2, 2*l2)
Extract single sub-functional via integer index:
>>> f_sum[0]
L1Norm(rn(3))
Extract subset of functionals:
>>> f_sum[:2]
SeparableSum(L1Norm(rn(3)), L2Norm(rn(3)))
"""
result = self.functionals[indices]
if isinstance(result, tuple):
return SeparableSum(*result)
else:
return result
@property
def gradient(self):
"""Gradient operator of the functional."""
gradients = [func.gradient for func in self.functionals]
return DiagonalOperator(*gradients)
@property
def proximal(self):
"""Return the `proximal factory` of the functional.
The proximal operator separates over separable sums.
Returns
-------
proximal : combine_proximals
"""
proximals = [func.proximal for func in self.functionals]
return combine_proximals(*proximals)
@property
def convex_conj(self):
"""The convex conjugate functional.
Convex conjugate distributes over separable sums, so the result is
simply the separable sum of the convex conjugates.
"""
convex_conjs = [func.convex_conj for func in self.functionals]
return SeparableSum(*convex_conjs)
def __repr__(self):
"""Return ``repr(self)``."""
func_repr = ', '.join(repr(func) for func in self.functionals)
return '{}({})'.format(self.__class__.__name__, func_repr)
class QuadraticForm(Functional):
"""Functional for a general quadratic form ``x^T A x + b^T x + c``."""
def __init__(self, operator=None, vector=None, constant=0):
"""Initialize a new instance.
All parameters are optional, but at least one of ``op`` and ``vector``
have to be provided in order to infer the space.
The computed value is::
x.inner(operator(x)) + vector.inner(x) + constant
Parameters
----------
operator : `Operator`, optional
Operator for the quadratic part of the functional.
``None`` means that this part is ignored.
vector : `Operator`, optional
Vector for the linear part of the functional.
``None`` means that this part is ignored.
constant : `Operator`, optional
Constant offset of the functional.
"""
if operator is None and vector is None:
raise ValueError('need to provide at least one of `operator` and '
'`vector`')
if operator is not None:
domain = operator.domain
elif vector is not None:
domain = vector.space
if (operator is not None and vector is not None and
vector not in operator.domain):
raise ValueError('domain of `operator` and space of `vector` need '
'to match')
super(QuadraticForm, self).__init__(
space=domain, linear=(operator is None and constant == 0))
self.__operator = operator
self.__vector = vector
self.__constant = constant
if self.constant not in self.range:
raise ValueError('`constant` must be an element in the range of '
'the functional')
@property
def operator(self):
"""Operator for the quadratic part of the functional."""
return self.__operator
@property
def vector(self):
"""Vector for the linear part of the functional."""
return self.__vector
@property
def constant(self):
"""Constant offset of the functional."""
return self.__constant
def _call(self, x):
"""Return ``self(x)``."""
if self.operator is None:
return self.vector.inner(x) + self.constant
elif self.vector is None:
return x.inner(self.operator(x)) + self.constant
else:
tmp = self.operator(x)
tmp += self.vector
return x.inner(tmp) + self.constant
@property
def gradient(self):
"""Gradient operator of the functional."""
if self.operator is None:
return ConstantOperator(self.vector, self.domain)
else:
if not self.operator.is_linear:
# TODO: Acutally works otherwise, but needs more work
raise NotImplementedError('`operator` must be linear')
# Figure out if operator is symmetric
opadjoint = self.operator.adjoint
if opadjoint == self.operator:
gradient = 2 * self.operator
else:
gradient = self.operator + opadjoint
# Return gradient
if self.vector is None:
return gradient
else:
return gradient + self.vector
@property
def convex_conj(self):
"""The convex conjugate functional of the quadratic form.
Notes
-----
The convex conjugate of the quadratic form :math:`<x, Ax> + <b, x> + c`
is given by
.. math::
(<x, Ax> + <b, x> + c)^* (x) =
<(x - b), A^-1 (x - b)> - c =
<x , A^-1 x> - <x, A^-* b> - <x, A^-1 b> + <b, A^-1 b> - c.
If the quadratic part of the functional is zero it is instead given
by a translated indicator function on zero, i.e., if
.. math::
f(x) = <b, x> + c,
then
.. math::
f^*(x^*) =
\\begin{cases}
-c & \\text{if } x^* = b \\\\
\\infty & \\text{else.}
\\end{cases}
See Also
--------
IndicatorZero
"""
if self.operator is None:
tmp = IndicatorZero(space=self.domain, constant=-self.constant)
if self.vector is None:
return tmp
else:
return tmp.translated(self.vector)
if self.vector is None:
# Handle trivial case separately
return QuadraticForm(operator=self.operator.inverse,
constant=-self.constant)
else:
# Compute the needed variables
opinv = self.operator.inverse
vector = -opinv.adjoint(self.vector) - opinv(self.vector)
constant = self.vector.inner(opinv(self.vector)) - self.constant
# Create new quadratic form
return QuadraticForm(operator=opinv,
vector=vector,
constant=constant)
class NuclearNorm(Functional):
"""Nuclear norm for matrix valued functions.
Notes
-----
For a matrix-valued function
:math:`f : \\Omega \\rightarrow \\mathbb{R}^{n \\times m}`,
the nuclear norm with parameters :math:`p` and :math:`q` is defined by
.. math::
\\left( \int_\Omega \|\sigma(f(x))\|_p^q d x \\right)^{1/q},
where :math:`\sigma(f(x))` is the vector of singular values of the matrix
:math:`f(x)` and :math:`\| \cdot \|_p` is the usual :math:`p`-norm on
:math:`\mathbb{R}^{\min(n, m)}`.
For a detailed description of its properties, e.g, its proximal, convex
conjugate and more, see [Du+2016].
References
----------
[Du+2016] J. Duran, M. Moeller, C. Sbert, and D. Cremers.
*Collaborative Total Variation: A General Framework for Vectorial TV
Models* SIAM Journal of Imaging Sciences 9(1): 116--151, 2016.
"""
def __init__(self, space, outer_exp=1, singular_vector_exp=2):
"""Initialize a new instance.
Parameters
----------
space : `ProductSpace` of `ProductSpace` of `TensorSpace`
Domain of the functional.
outer_exp : {1, 2, inf}, optional
Exponent for the outer norm.
singular_vector_exp : {1, 2, inf}, optional
Exponent for the norm for the singular vectors.
Examples
--------
Simple example, nuclear norm of matrix valued function with all ones
in 3 points. The singular values are [2, 0], which has squared 2-norm
2. Since there are 3 points, the expected total value is 6.
>>> r3 = odl.rn(3)
>>> space = odl.ProductSpace(odl.ProductSpace(r3, 2), 2)
>>> norm = NuclearNorm(space)
>>> norm(space.one())
6.0
"""
if (not isinstance(space, ProductSpace) or
not isinstance(space[0], ProductSpace)):
raise TypeError('`space` must be a `ProductSpace` of '
'`ProductSpace`s')
if (not space.is_power_space or not space[0].is_power_space):
raise TypeError('`space` must be of the form `TensorSpace^(nxm)`')
super(NuclearNorm, self).__init__(
space=space, linear=False, grad_lipschitz=np.nan)
self.outernorm = LpNorm(self.domain[0, 0], exponent=outer_exp)
self.pwisenorm = PointwiseNorm(self.domain[0],
exponent=singular_vector_exp)
self.pshape = (len(self.domain), len(self.domain[0]))
def _asarray(self, vec):
"""Convert ``x`` to an array.
Here the indices are changed such that the "outer" indices come last
in order to have the access order as `numpy.linalg.svd` needs it.
This is the inverse of `_asvector`.
"""
shape = self.domain[0, 0].shape + self.pshape
arr = np.empty(shape, dtype=self.domain.dtype)
for i, xi in enumerate(vec):
for j, xij in enumerate(xi):
arr[..., i, j] = xij.asarray()
return arr
def _asvector(self, arr):
"""Convert ``arr`` to a `domain` element.
This is the inverse of `_asarray`.
"""
result = moveaxis(arr, [-2, -1], [0, 1])
return self.domain.element(result)
def _call(self, x):
"""Return ``self(x)``."""
# Convert to array with most
arr = self._asarray(x)
svd_diag = np.linalg.svd(arr, compute_uv=False)
# Rotate the axes so the svd-direction is first
s_reordered = moveaxis(svd_diag, -1, 0)
# Return nuclear norm
return self.outernorm(self.pwisenorm(s_reordered))
@property
def proximal(self):
"""Return the proximal operator.
Raises
------
NotImplementedError
if ``outer_exp`` is not 1 or ``singular_vector_exp`` is not 1, 2 or
infinity
"""
if self.outernorm.exponent != 1:
raise NotImplementedError('`proximal` only implemented for '
'`outer_exp==1`')
if self.pwisenorm.exponent not in [1, 2, np.inf]:
raise NotImplementedError('`proximal` only implemented for '
'`singular_vector_exp` in [1, 2, inf]')
def nddot(a, b):
"""Compute pointwise matrix product in the last indices."""
return np.einsum('...ij,...jk->...ik', a, b)
func = self
# Add epsilon to fix rounding errors, i.e. make sure that when we
# project on the unit ball, we actually end up slightly inside the unit
# ball. Without, we may end up slightly outside.
dtype = getattr(self.domain, 'dtype', float)
eps = np.finfo(dtype).resolution * 10
class NuclearNormProximal(Operator):
"""Proximal operator of `NuclearNorm`."""
def __init__(self, sigma):
self.sigma = float(sigma)
super(NuclearNormProximal, self).__init__(
func.domain, func.domain, linear=False)
def _call(self, x):
"""Return ``self(x)``."""
arr = func._asarray(x)
# Compute SVD
U, s, Vt = np.linalg.svd(arr, full_matrices=False)
# transpose pointwise
V = Vt.swapaxes(-1, -2)
# Take pseudoinverse of s
sinv = s.copy()
sinv[sinv != 0] = 1 / sinv[sinv != 0]
# Take pointwise proximal operator of s w.r.t. the norm
# on the singular vectors
if func.pwisenorm.exponent == 1:
abss = np.abs(s) - (self.sigma - eps)
sprox = np.sign(s) * np.maximum(abss, 0)
elif func.pwisenorm.exponent == 2:
s_reordered = moveaxis(s, -1, 0)
snorm = func.pwisenorm(s_reordered).asarray()
snorm = np.maximum(self.sigma, snorm, out=snorm)
sprox = ((1 - eps) - self.sigma / snorm)[..., None] * s
elif func.pwisenorm.exponent == np.inf:
snorm = np.sum(np.abs(s), axis=-1)
snorm = np.maximum(self.sigma, snorm, out=snorm)
sprox = ((1 - eps) - self.sigma / snorm)[..., None] * s
else:
raise RuntimeError
# Compute s matrix
sproxsinv = (sprox * sinv)[..., :, None]
# Compute the final result
result = nddot(nddot(arr, V), sproxsinv * Vt)
# Cast to vector and return. Note array and vector have
# different shapes.
return func._asvector(result)
def __repr__(self):
"""Return ``repr(self)``."""
return '{!r}.proximal({})'.format(func, self.sigma)
return NuclearNormProximal
@property
def convex_conj(self):
"""Convex conjugate of the nuclear norm.
The convex conjugate is the indicator function on the unit ball of
the dual norm where the dual norm is obtained by taking the conjugate
exponent of both the outer and singular vector exponents.
"""
return IndicatorNuclearNormUnitBall(
self.domain,
conj_exponent(self.outernorm.exponent),
conj_exponent(self.pwisenorm.exponent))
def __repr__(self):
"""Return ``repr(self)``."""
return '{}({!r}, {}, {})'.format(self.__class__.__name__,
self.domain,
self.outernorm.exponent,
self.pwisenorm.exponent)
class IndicatorNuclearNormUnitBall(Functional):
"""Indicator on unit ball of nuclear norm for matrix valued functions.
Notes
-----
For a matrix-valued function
:math:`f : \\Omega \\rightarrow \\mathbb{R}^{n \\times m}`,
the nuclear norm with parameters :math:`p` and :math:`q` is defined by
.. math::
\\left( \int_\Omega \|\sigma(f(x))\|_p^q d x \\right)^{1/q},
where :math:`\sigma(f(x))` is the vector of singular values of the matrix
:math:`f(x)` and :math:`\| \cdot \|_p` is the usual :math:`p`-norm on
:math:`\mathbb{R}^{\min(n, m)}`.
This function is defined as the indicator on the unit ball of the nuclear
norm, that is, 0 if the nuclear norm is less than 1, and infinity else.
For a detailed description of its properties, e.g, its proximal, convex
conjugate and more, see [Du+2016].
References
----------
[Du+2016] J. Duran, M. Moeller, C. Sbert, and D. Cremers.
*Collaborative Total Variation: A General Framework for Vectorial TV
Models* SIAM Journal of Imaging Sciences 9(1): 116--151, 2016.
"""
def __init__(self, space, outer_exp=1, singular_vector_exp=2):
"""Initialize a new instance.
Parameters
----------
space : `ProductSpace` of `ProductSpace` of `TensorSpace`
Domain of the functional.
outer_exp : {1, 2, inf}, optional
Exponent for the outer norm.
singular_vector_exp : {1, 2, inf}, optional
Exponent for the norm for the singular vectors.
Examples
--------
Simple example, nuclear norm of matrix valued function with all ones
in 3 points. The singular values are [2, 0], which has squared 2-norm
2. Since there are 3 points, the expected total value is 6.
Since the nuclear norm is larger than 1, the indicator is infinity.
>>> r3 = odl.rn(3)
>>> space = odl.ProductSpace(odl.ProductSpace(r3, 2), 2)
>>> norm = IndicatorNuclearNormUnitBall(space)
>>> norm(space.one())
inf
"""
super(IndicatorNuclearNormUnitBall, self).__init__(
space=space, linear=False, grad_lipschitz=np.nan)
self.__norm = NuclearNorm(space, outer_exp, singular_vector_exp)
def _call(self, x):
"""Return ``self(x)``."""
x_norm = self.__norm(x)
if x_norm > 1:
return np.inf
else:
return 0
@property
def proximal(self):
"""The proximal operator."""
# Implement proximal via duality
return proximal_convex_conj(self.convex_conj.proximal)
@property
def convex_conj(self):
"""Convex conjugate of the unit ball indicator of the nuclear norm.
The convex conjugate is the dual nuclear norm where the dual norm is
obtained by taking the conjugate exponent of both the outer and
singular vector exponents.
"""
return NuclearNorm(self.domain,
conj_exponent(self.__norm.outernorm.exponent),
conj_exponent(self.__norm.pwisenorm.exponent))
def __repr__(self):
"""Return ``repr(self)``."""
return '{}({!r}, {}, {})'.format(self.__class__.__name__,
self.domain,
self.__norm.outernorm.exponent,
self.__norm.pwisenorm.exponent)
class MoreauEnvelope(Functional):
"""Moreau envelope of a convex functional.
The Moreau envelope is a way to smooth an arbitrary convex functional
such that its gradient can be computed given the proximal of the original
functional.
The new functional has the same critical points as the original.
It is also called the Moreau-Yosida regularization.
Note that the only computable property of the Moreau envelope is the
gradient, the functional itself cannot be evaluated efficiently.
See `Proximal Algorithms`_ for more information.
Notes
-----
The Moreau envelope of a convex functional
:math:`f : \mathcal{X} \\rightarrow \mathbb{R}` multiplied by a scalar
:math:`\\sigma` is defined by
.. math::
\mathrm{env}_{\\sigma f}(x) =
\\inf_{y \\in \\mathcal{X}}
\\left\{ \\frac{1}{2 \\sigma} \| x - y \|_2^2 + f(y) \\right\}
The gradient of the envelope is given by
.. math::
[\\nabla \mathrm{env}_{\\sigma f}](x) =
\\frac{1}{\\sigma} (x - \mathrm{prox}_{\\sigma f}(x))
Example: if :math:`f = \| \cdot \|_1`, then
.. math::
[\mathrm{env}_{\\sigma \| \cdot \|_1}(x)]_i =
\\begin{cases}
\\frac{1}{2 \\sigma} x_i^2 & \\text{if } |x_i| \leq \\sigma \\\\
|x_i| - \\frac{\\sigma}{2} & \\text{if } |x_i| > \\sigma,
\\end{cases}
which is the usual Huber functional.
References
----------
.. _Proximal Algorithms: \
https://web.stanford.edu/~boyd/papers/pdf/prox_algs.pdf
"""
def __init__(self, functional, sigma=1.0):
"""Initialize an instance.
Parameters
----------
functional : `Functional`
The functional ``f`` in the definition of the Moreau envelope that
is to be smoothed.
sigma : positive float, optional
The scalar ``sigma`` in the definition of the Moreau envelope.
Larger values mean stronger smoothing.
Examples
--------
Create smoothed l1 norm:
>>> space = odl.rn(3)
>>> l1_norm = odl.solvers.L1Norm(space)
>>> smoothed_l1 = MoreauEnvelope(l1_norm)
"""
super(MoreauEnvelope, self).__init__(
space=functional.domain, linear=False)
self.__functional = functional
self.__sigma = sigma
@property
def functional(self):
"""The functional that has been regularized."""
return self.__functional
@property
def sigma(self):
"""Regularization constant, larger means stronger regularization."""
return self.__sigma
@property
def gradient(self):
"""The gradient operator."""
return (ScalingOperator(self.domain, 1 / self.sigma) -
(1 / self.sigma) * self.functional.proximal(self.sigma))
class Huber(Functional):
"""The Huber functional.
Notes
-----
The Huber norm is the integral over a smoothed norm. In detail, it is given
by
.. math::
F(x) = \\int_\Omega f_{\\gamma}(||x(y)||_2) dy
where :mth:`||\cdot||_2` denotes the Euclidean norm for vector-valued
functions which reduces to the absolute value for scalar-valued functions.
The function :math:`f` with smoothing :math:`\\gamma` is given by
.. math::
f_{\\gamma}(t) =
\\begin{cases}
\\frac{1}{2 \\gamma} t^2 & \\text{if } |t| \leq \\gamma \\\\
|t| - \\frac{\\gamma}{2} & \\text{else}
\\end{cases}.
"""
def __init__(self, space, gamma):
"""Initialize a new instance.
Parameters
----------
space : `TensorSpace`
Domain of the functional.
gamma : float
Smoothing parameter of the Huber functional. If ``gamma = 0``,
the functional is non-smooth and corresponds to the usual L1 norm.
For ``gamma > 0``, it has a ``1/gamma``-Lipschitz gradient so that
its convex conjugate is ``gamma``-strongly convex.
Examples
--------
Example of initializing the Huber functional:
>>> space = odl.uniform_discr(0, 1, 14)
>>> gamma = 0.1
>>> huber_norm = odl.solvers.Huber(space, gamma=0.1)
Check that if all elements are > ``gamma`` we get the L1-norm up to a
constant:
>>> x = 2 * gamma * space.one()
>>> tol = 1e-5
>>> constant = gamma / 2 * space.one().inner(space.one())
>>> f = odl.solvers.L1Norm(space) - constant
>>> abs(huber_norm(x) - f(x)) < tol
True
Check that if all elements are < ``gamma`` we get the squared L2-norm
times the weight ``1/(2*gamma)``:
>>> x = gamma / 2 * space.one()
>>> f = 1 / (2 * gamma) * odl.solvers.L2NormSquared(space)
>>> abs(huber_norm(x) - f(x)) < tol
True
Compare Huber- and L1-norm for vanishing smoothing ``gamma=0``:
>>> x = odl.phantom.white_noise(space)
>>> huber_norm = odl.solvers.Huber(space, gamma=0)
>>> l1_norm = odl.solvers.L1Norm(space)
>>> abs(huber_norm(x) - l1_norm(x)) < tol
True
Redo previous example for a product space in two dimensions:
>>> domain = odl.uniform_discr([0, 0], [1, 1], [5, 5])
>>> space = odl.ProductSpace(domain, 2)
>>> x = odl.phantom.white_noise(space)
>>> huber_norm = odl.solvers.Huber(space, gamma=0)
>>> l1_norm = odl.solvers.GroupL1Norm(space, 2)
>>> abs(huber_norm(x) - l1_norm(x)) < tol
True
"""
self.__gamma = float(gamma)
if self.gamma > 0:
grad_lipschitz = 1 / self.gamma
else:
grad_lipschitz = np.inf
super(Huber, self).__init__(
space=space, linear=False, grad_lipschitz=grad_lipschitz)
@property
def gamma(self):
"""The smoothing parameter of the Huber norm functional."""
return self.__gamma
def _call(self, x):
"""Return ``self(x)``."""
if isinstance(self.domain, ProductSpace):
norm = PointwiseNorm(self.domain, 2)(x)
else:
norm = x.ufuncs.absolute()
if self.gamma > 0:
tmp = norm.ufuncs.square()
tmp *= 1 / (2 * self.gamma)
index = norm.ufuncs.greater_equal(self.gamma)
tmp[index] = norm[index] - self.gamma / 2
else:
tmp = norm
return tmp.inner(tmp.space.one())
@property
def convex_conj(self):
"""The convex conjugate"""
if isinstance(self.domain, ProductSpace):
norm = GroupL1Norm(self.domain, 2)
else:
norm = L1Norm(self.domain)
return FunctionalQuadraticPerturb(norm.convex_conj,
quadratic_coeff=self.gamma / 2)
@property
def proximal(self):
"""Return the ``proximal factory`` of the functional.
See Also
--------
odl.solvers.proximal_huber : `proximal factory` for the Huber
norm.
"""
return proximal_huber(space=self.domain, gamma=self.gamma)
@property
def gradient(self):
"""Gradient operator of the functional.
The gradient of the Huber functional is given by
.. math::
\\nabla f_{\\gamma}(x) =
\\begin{cases}
\\frac{1}{\\gamma} x & \\text{if } \|x\|_2 \leq \\gamma \\\\
\\frac{1}{\|x\|_2} x & \\text{else}
\\end{cases}.
Examples
--------
Check that the gradient norm is less than the norm of the one element:
>>> space = odl.uniform_discr(0, 1, 14)
>>> norm_one = space.one().norm()
>>> x = odl.phantom.white_noise(space)
>>> huber_norm = odl.solvers.Huber(space, gamma=0.1)
>>> grad = huber_norm.gradient(x)
>>> tol = 1e-5
>>> grad.norm() <= norm_one + tol
True
Redo previous example for a product space in two dimensions:
>>> domain = odl.uniform_discr([0, 0], [1, 1], [5, 5])
>>> space = odl.ProductSpace(domain, 2)
>>> norm_one = space.one().norm()
>>> x = odl.phantom.white_noise(space)
>>> huber_norm = odl.solvers.Huber(space, gamma=0.2)
>>> grad = huber_norm.gradient(x)
>>> tol = 1e-5
>>> grad.norm() <= norm_one + tol
True
"""
functional = self
class HuberGradient(Operator):
"""The gradient operator of this functional."""
def __init__(self):
"""Initialize a new instance."""
super(HuberGradient, self).__init__(
functional.domain, functional.domain, linear=False)
def _call(self, x):
"""Apply the gradient operator to the given point."""
if isinstance(self.domain, ProductSpace):
norm = PointwiseNorm(self.domain, 2)(x)
else:
norm = x.ufuncs.absolute()
grad = x / functional.gamma
index = norm.ufuncs.greater_equal(functional.gamma)
if isinstance(self.domain, ProductSpace):
for xi, gi in zip(x, grad):
gi[index] = xi[index] / norm[index]
else:
grad[index] = x[index] / norm[index]
return grad
return HuberGradient()
def __repr__(self):
'''Return ``repr(self)``.'''
return '{}({!r}, {!r})'.format(self.__class__.__name__, self.domain,
self.gamma)
if __name__ == '__main__':
from odl.util.testutils import run_doctests
run_doctests()
|
nicky-ji/edx-nicky
|
refs/heads/master
|
lms/envs/devplus.py
|
50
|
"""
This config file tries to mimic the production environment more closely than the
normal dev.py. It assumes you're running a local instance of MySQL 5.1 and that
you're running memcached. You'll want to use this to test caching and database
migrations.
Assumptions:
* MySQL 5.1 (version important? (askbot breaks on 5.5, but that's gone now))
Dir structure:
/envroot/
/edx-platform # The location of this repo
/log # Where we're going to write log files
"""
# We intentionally define lots of variables that aren't used, and
# want to import all variables from base settings files
# pylint: disable=W0401, W0614
from .dev import *
WIKI_ENABLED = True
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'wwc',
'USER': 'root',
'PASSWORD': '',
'HOST': '127.0.0.1',
'PORT': '3306',
}
}
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': '127.0.0.1:11211',
'KEY_FUNCTION': 'util.memcache.safe_key',
},
'general': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': '127.0.0.1:11211',
'KEY_PREFIX': 'general',
'VERSION': 5,
'KEY_FUNCTION': 'util.memcache.safe_key',
}
}
SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
################################ DEBUG TOOLBAR #################################
INSTALLED_APPS += ('debug_toolbar',)
MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware',)
INTERNAL_IPS = ('127.0.0.1',)
DEBUG_TOOLBAR_PANELS = (
'debug_toolbar.panels.version.VersionDebugPanel',
'debug_toolbar.panels.timer.TimerDebugPanel',
'debug_toolbar.panels.settings_vars.SettingsVarsDebugPanel',
'debug_toolbar.panels.headers.HeaderDebugPanel',
'debug_toolbar.panels.request_vars.RequestVarsDebugPanel',
'debug_toolbar.panels.sql.SQLDebugPanel',
'debug_toolbar.panels.signals.SignalDebugPanel',
'debug_toolbar.panels.logger.LoggingPanel',
# Enabling the profiler has a weird bug as of django-debug-toolbar==0.9.4 and
# Django=1.3.1/1.4 where requests to views get duplicated (your method gets
# hit twice). So you can uncomment when you need to diagnose performance
# problems, but you shouldn't leave it on.
'debug_toolbar.panels.profiling.ProfilingDebugPanel',
)
#PIPELINE = True
|
v1bri/gnuradio
|
refs/heads/master
|
gr-fec/python/fec/polar/channel_construction.py
|
17
|
#!/usr/bin/env python
#
# Copyright 2015 Free Software Foundation, Inc.
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
'''
[0] Erdal Arikan: 'Channel Polarization: A Method for Constructing Capacity-Achieving Codes for Symmetric Binary-Input Memoryless Channels', 2009
foundational paper for polar codes.
'''
from channel_construction_bec import calculate_bec_channel_capacities
from channel_construction_bec import design_snr_to_bec_eta
from channel_construction_bec import bhattacharyya_bounds
from channel_construction_awgn import tal_vardy_tpm_algorithm
from helper_functions import *
Z_PARAM_FIRST_HEADER_LINE = "Bhattacharyya parameters (Z-parameters) for a polar code"
def get_frozen_bit_indices_from_capacities(chan_caps, nfrozen):
indexes = np.array([], dtype=int)
while indexes.size < nfrozen:
index = np.argmin(chan_caps)
indexes = np.append(indexes, index)
chan_caps[index] = 2.0 # make absolutely sure value is out of range!
return np.sort(indexes)
def get_frozen_bit_indices_from_z_parameters(z_params, nfrozen):
indexes = np.array([], dtype=int)
while indexes.size < nfrozen:
index = np.argmax(z_params)
indexes = np.append(indexes, index)
z_params[index] = -1.0
return np.sort(indexes)
def get_bec_frozen_indices(nblock, kfrozen, eta):
bec_caps = calculate_bec_channel_capacities(eta, nblock)
positions = get_frozen_bit_indices_from_capacities(bec_caps, kfrozen)
return positions
def get_frozen_bit_mask(frozen_indices, block_size):
frozen_mask = np.zeros(block_size, dtype=int)
frozen_mask[frozen_indices] = 1
return frozen_mask
def frozen_bit_positions(block_size, info_size, design_snr=0.0):
if not design_snr > -1.5917:
print('bad value for design_nsr, must be > -1.5917! default=0.0')
design_snr = 0.0
eta = design_snr_to_bec_eta(design_snr)
return get_bec_frozen_indices(block_size, block_size - info_size, eta)
def generate_filename(block_size, design_snr, mu):
filename = "polar_code_z_parameters_N" + str(int(block_size))
filename += "_SNR" + str(float(design_snr)) + "_MU" + str(int(mu)) + ".polar"
return filename
def default_dir():
dir_def = "~/.gnuradio/polar/"
import os
path = os.path.expanduser(dir_def)
try:
os.makedirs(path)
except OSError:
if not os.path.isdir(path):
raise
return path
def save_z_parameters(z_params, block_size, design_snr, mu, alt_construction_method='Tal-Vardy algorithm'):
path = default_dir()
filename = generate_filename(block_size, design_snr, mu)
header = Z_PARAM_FIRST_HEADER_LINE + "\n"
header += "Channel construction method: " + alt_construction_method + "\n"
header += "Parameters:\n"
header += "block_size=" + str(block_size) + "\n"
header += "design_snr=" + str(design_snr) + "\n"
header += "mu=" + str(mu)
np.savetxt(path + filename, z_params, header=header)
def load_z_parameters(block_size, design_snr, mu):
path = default_dir()
filename = generate_filename(block_size, design_snr, mu)
full_file = path + filename
import os
if not os.path.isfile(full_file):
z_params = tal_vardy_tpm_algorithm(block_size, design_snr, mu)
save_z_parameters(z_params, block_size, design_snr, mu)
z_params = np.loadtxt(full_file)
return z_params
def main():
np.set_printoptions(precision=3, linewidth=150)
print 'channel construction Bhattacharyya bounds by Arikan'
n = 10
m = 2 ** n
k = m // 2
design_snr = 0.0
mu = 32
z_params = load_z_parameters(m, design_snr, mu)
z_bounds = bhattacharyya_bounds(design_snr, m)
print(z_params[-10:])
if 0:
import matplotlib.pyplot as plt
plt.plot(z_params)
plt.plot(z_bounds)
plt.show()
if __name__ == '__main__':
main()
|
aventurella/mockingbird
|
refs/heads/master
|
tests/__init__.py
|
12133432
| |
tomchristie/django
|
refs/heads/master
|
tests/gis_tests/layermap/__init__.py
|
12133432
| |
trishnaguha/ansible
|
refs/heads/devel
|
lib/ansible/module_utils/network/aireos/__init__.py
|
12133432
| |
teracyhq/flask-boilerplate
|
refs/heads/develop
|
tests/integration/api_1_0/__init__.py
|
12133432
| |
mosf1k/cocktail
|
refs/heads/master
|
cocktail/__init__.py
|
12133432
| |
SaganBolliger/nupic
|
refs/heads/master
|
tests/unit/nupic/research/__init__.py
|
12133432
| |
sgallagher/anaconda
|
refs/heads/master
|
pyanaconda/modules/payloads/payload/live_image/__init__.py
|
12133432
| |
ababic/wagtailmenus
|
refs/heads/master
|
wagtailmenus/migrations/0011_auto_20160415_1519.py
|
3
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wagtailmenus', '0010_auto_20160201_1558'),
]
operations = [
migrations.AlterField(
model_name='flatmenuitem',
name='url_append',
field=models.CharField(help_text="Use this to optionally append a #hash or querystring to the above page's URL.", max_length=255, verbose_name='Append to URL', blank=True),
),
migrations.AlterField(
model_name='mainmenuitem',
name='url_append',
field=models.CharField(help_text="Use this to optionally append a #hash or querystring to the above page's URL.", max_length=255, verbose_name='Append to URL', blank=True),
),
]
|
marcuskelly/recover
|
refs/heads/master
|
Lib/encodings/shift_jis_2004.py
|
816
|
#
# shift_jis_2004.py: Python Unicode Codec for SHIFT_JIS_2004
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_jp, codecs
import _multibytecodec as mbc
codec = _codecs_jp.getcodec('shift_jis_2004')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='shift_jis_2004',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
|
KILLER-CHIEF/uqcs-hackathon-2016
|
refs/heads/master
|
tornado/backports/__init__.py
|
253
|
# This is a Python "namespace package" http://www.python.org/dev/peps/pep-0382/
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
|
c22n/ion-channel-ABC
|
refs/heads/master
|
docs/examples/hl1/experiments/ikr_generic.py
|
1
|
from ionchannelABC import (Experiment,
ExperimentData,
ExperimentStimProtocol,
IonChannelModel)
import data.ikr.data_ikr as data
import numpy as np
from functools import partial
from scipy.optimize import OptimizeWarning
import myokit
modelfile = 'models/Generic_iKr.mmt'
ikr = IonChannelModel('ikr',
modelfile,
vvar='membrane.V',
logvars=myokit.LOG_ALL)#['environment.time',
#'ikr.i_Kr',
#'ikr.G_Kr'])
### Exp 1 - IV curve.
iv_vsteps, iv_curr, iv_errs, iv_N = data.IV_Toyoda()
iv_data = ExperimentData(x=iv_vsteps, y=iv_curr,
errs=iv_errs)
stim_times = [1000, 1000, 100]
stim_levels = [-50, iv_vsteps, -50]
def tail_curr(data):
return data[0]['ikr.i_Kr'][-1]
iv_prot = ExperimentStimProtocol(stim_times, stim_levels,
measure_index=1,
measure_fn=tail_curr)
toyoda_conditions1 = dict(K_o=5400,
K_i=130000,
Na_o=140330,
Na_i=6000,
T=308)
iv_exp = Experiment(iv_prot, iv_data, toyoda_conditions1)
### Exp 2 - Activation curve.
act_vsteps, act_cond, act_errs, act_N = data.Act_Toyoda()
act_data = ExperimentData(x=act_vsteps, y=act_cond,
errs=act_errs)
stim_times = [1000, 1000, 500]
stim_levels = [-50, act_vsteps, -50]
def max_gkr(data):
return max(data[0]['ikr.G_Kr'], key=abs)
def normalise(sim_results, ind_var):
max_cond = abs(max(sim_results, key=abs))
sim_results = [result / max_cond for result in sim_results]
return sim_results, False
act_prot = ExperimentStimProtocol(stim_times, stim_levels,
measure_index=2, measure_fn=max_gkr,
post_fn=normalise)
act_exp = Experiment(act_prot, act_data, toyoda_conditions1)
### Exp 3 - Activation kinetics.
akin_vsteps, akin_tau, akin_errs, akin_N = data.ActKin_Toyoda()
akin_data = ExperimentData(x=akin_vsteps, y=akin_tau,
errs=akin_errs)
intervals = np.arange(25, 975+50, 50)
stim_times = []
stim_levels = []
measure_index = []
for i, interval in enumerate(intervals):
stim_times = stim_times + [1000, interval, 1000]
stim_levels = stim_levels + [-50, akin_vsteps, -50]
measure_index = measure_index + [3*i + 2,]
def measure_maxes(data):
maxes = []
for d in data:
maxes.append(max(d['ikr.i_Kr'], key=abs))
return maxes
def fit_single_exp(data, xvar=intervals):
import numpy as np
import scipy.optimize as so
import warnings
with warnings.catch_warnings():
warnings.simplefilter('error', OptimizeWarning)
warnings.simplefilter('error', RuntimeWarning)
try:
def single_exp(t, Ass, A, tau):
return Ass + A * np.exp(-t / tau)
[_, _, tau], _ = so.curve_fit(single_exp, xvar, data,
bounds=([-50, -50, 0],
[50, 50, 5000]))
if np.isclose(tau, 5000):
raise Exception('Optimisation hit bounds')
return tau
except (Exception, OptimizeWarning, RuntimeWarning):
return float("inf")
def map_return(func, iterable, ind_var=None):
out = []
for i in iterable:
out.append(func(i))
return out, False
akin_prot = ExperimentStimProtocol(stim_times, stim_levels,
measure_index=measure_index,
measure_fn=measure_maxes,
post_fn=partial(map_return, fit_single_exp))
akin_exp = Experiment(akin_prot, akin_data, toyoda_conditions1)
### Exp 4, 5, 6 - Deactivation kinetics (fast and slow).
deact_vsteps, deact_tauf, deactfast_errs, _ = data.DeactKinFast_Toyoda()
_, deact_taus, deactslow_errs, _ = data.DeactKinSlow_Toyoda()
_, deact_amp, deactamp_errs, _ = data.DeactKinRelAmp_Toyoda()
deact_f_data = ExperimentData(x=deact_vsteps, y=deact_tauf,
errs=deactfast_errs)
deact_s_data = ExperimentData(x=deact_vsteps, y=deact_taus,
errs=deactslow_errs)
deact_amp_data = ExperimentData(x=deact_vsteps, y=deact_amp,
errs=deactamp_errs)
stim_times = [1000, 1000, 1000]
stim_levels = [-50, 20, deact_vsteps]
def double_exp_decay_fit(data):
import numpy as np
import scipy.optimize as so
import warnings
with warnings.catch_warnings():
warnings.simplefilter('error',OptimizeWarning)
warnings.simplefilter('error',RuntimeWarning)
try:
curr = data[0]['ikr.i_Kr']
time = data[0]['environment.time']
# Get peak current
index = np.argwhere(np.isclose(curr, max(curr, key=abs)))
# Set time zero to peak current
index = index[0][0]
curr = curr[index:]
# Zero time from peak current time
time = time[index:]
t0 = time[0]
time = [t - t0 for t in time]
if len(time) == 0 or len(curr) == 0:
raise Exception('Could not find peak current')
def sum_of_exp(t, Ass, Af, tauf, As, taus):
return (Ass + Af * np.exp(-t / tauf) + As * np.exp(-t / taus))
popt, _ = so.curve_fit(sum_of_exp, time, curr,
p0=[1, 1, 5, 0.2, 70],
bounds=([-50, -50, 0, -50, 50],
[50, 50, 100, 50, 2000]))
tauf = popt[2]
taus = popt[4]
Af = abs(popt[1])
As = abs(popt[3])
A_rel = Af / (Af + As)
return (tauf, taus, A_rel)
except (Exception, RuntimeWarning, OptimizeWarning, RuntimeError):
return (float("inf"), float("inf"), float("inf"))
def takefirst(data, ind_var): return [d[0] for d in data], False
def takesecond(data, ind_var): return [d[1] for d in data], False
def takethird(data, ind_var): return [d[2] for d in data], False
deact_f_prot = ExperimentStimProtocol(stim_times, stim_levels,
measure_index=2,
measure_fn=double_exp_decay_fit,
post_fn=takefirst)
deact_s_prot = ExperimentStimProtocol(stim_times, stim_levels,
measure_index=2,
measure_fn=double_exp_decay_fit,
post_fn=takesecond)
deact_amp_prot = ExperimentStimProtocol(stim_times, stim_levels,
measure_index=2,
measure_fn=double_exp_decay_fit,
post_fn=takethird)
deact_f_exp = Experiment(deact_f_prot, deact_f_data, toyoda_conditions1)
deact_s_exp = Experiment(deact_s_prot, deact_s_data, toyoda_conditions1)
deact_amp_exp = Experiment(deact_amp_prot, deact_amp_data,
toyoda_conditions1)
### Exp 7 - Kinetic properties of recovery from inactivation
inact_vsteps, inact_tau, inactkin_errs, _, = data.InactKin_Toyoda()
inact_kin_data = ExperimentData(x=inact_vsteps, y=inact_tau,
errs=inactkin_errs)
stim_times = [1000, 1000, 1000]
stim_levels = [-50, 20, inact_vsteps]
def fit_exp_rising_phase(data):
import numpy as np
import scipy.optimize as so
import warnings
with warnings.catch_warnings():
warnings.simplefilter('error',OptimizeWarning)
warnings.simplefilter('error',RuntimeWarning)
try:
curr = data[0]['ikr.i_Kr']
time = data[0]['environment.time']
# Get peak current
index = np.argwhere(np.isclose(curr, max(curr, key=abs)))
# Take subset up to peak current
index = index[0][0]
curr = curr[:index+1]
# Zero time
time = time[:index+1]
t0 = time[0]
time = [t - t0 for t in time]
if len(time) == 0 or len(curr) == 0:
raise Exception('Could not find a peak current')
def single_exp(t, Ass, A, tau):
return Ass + A * np.exp(-t / tau)
[_, _, tau], _ = so.curve_fit(single_exp, time, curr,
p0=[1, -1, 5],
bounds=([-50, -50, 0],
[50, 50, 100]))
if np.isclose(tau, 100):
raise Exception('Optimisation hit bounds')
return tau
except (Exception, OptimizeWarning, RuntimeWarning):
return float("inf")
inact_kin_prot = ExperimentStimProtocol(stim_times, stim_levels,
measure_index=2,
measure_fn=fit_exp_rising_phase)
toyoda_conditions2 = dict(K_o=5400,
K_i=130000,
Na_o=140330,
Na_i=6000,
T=298)
inact_kin_exp = Experiment(inact_kin_prot, inact_kin_data,
toyoda_conditions2)
### Exp 8 - Voltage dependence of steady-state inactivation.
inact_vsteps, inact_cond, _, _ = data.Inact_Toyoda()
inact_data = ExperimentData(x=inact_vsteps, y=inact_cond)
stim_times = [1000, 1000, 10, 1000]
stim_levels = [-50, 20, inact_vsteps, 20]
inact_prot = ExperimentStimProtocol(stim_times, stim_levels,
measure_index=3,
measure_fn=max_gkr,
post_fn=normalise)
inact_exp = Experiment(inact_prot, inact_data, toyoda_conditions2)
ikr.add_experiments([iv_exp, act_exp, akin_exp,
deact_f_exp, deact_s_exp, deact_amp_exp,
inact_kin_exp, inact_exp])
|
aldenjenkins/foobargamingwebsite
|
refs/heads/master
|
paypal/standard/models.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from warnings import warn
from django.conf import settings
from django.db import models
from django.utils.functional import cached_property
from paypal.standard.conf import POSTBACK_ENDPOINT, SANDBOX_POSTBACK_ENDPOINT
from paypal.standard.helpers import check_secret, duplicate_txn_id
ST_PP_ACTIVE = 'Active'
ST_PP_CANCELLED = 'Cancelled'
ST_PP_CANCELED_REVERSAL = 'Canceled_Reversal'
ST_PP_CLEARED = 'Cleared'
ST_PP_COMPLETED = 'Completed'
ST_PP_CREATED = 'Created'
ST_PP_DECLINED = 'Declined'
ST_PP_DENIED = 'Denied'
ST_PP_EXPIRED = 'Expired'
ST_PP_FAILED = 'Failed'
ST_PP_PAID = 'Paid'
ST_PP_PENDING = 'Pending'
ST_PP_PROCESSED = 'Processed'
ST_PP_REFUNDED = 'Refunded'
ST_PP_REFUSED = 'Refused'
ST_PP_REVERSED = 'Reversed'
ST_PP_REWARDED = 'Rewarded'
ST_PP_UNCLAIMED = 'Unclaimed'
ST_PP_UNCLEARED = 'Uncleared'
ST_PP_VOIDED = 'Voided'
try:
from idmapper.models import SharedMemoryModel as Model
except ImportError:
Model = models.Model
DEFAULT_ENCODING = 'windows-1252' # PayPal seems to normally use this.
class PayPalStandardBase(Model):
"""Base class for common variables shared by IPN and PDT"""
# See https://developer.paypal.com/docs/classic/ipn/integration-guide/IPNandPDTVariables/
# @@@ Might want to add all these one distant day.
# FLAG_CODE_CHOICES = (
# PAYMENT_STATUS_CHOICES = "Canceled_ Reversal Completed Denied Expired " \
# "Failed Pending Processed Refunded Reversed Voided".split()
PAYMENT_STATUS_CHOICES = [ST_PP_ACTIVE,
ST_PP_CANCELLED,
ST_PP_CANCELED_REVERSAL,
ST_PP_CLEARED,
ST_PP_COMPLETED,
ST_PP_CREATED,
ST_PP_DECLINED,
ST_PP_DENIED,
ST_PP_EXPIRED,
ST_PP_FAILED,
ST_PP_PAID,
ST_PP_PENDING,
ST_PP_PROCESSED,
ST_PP_REFUNDED,
ST_PP_REFUSED,
ST_PP_REVERSED,
ST_PP_REWARDED,
ST_PP_UNCLAIMED,
ST_PP_UNCLEARED,
ST_PP_VOIDED,
]
# AUTH_STATUS_CHOICES = "Completed Pending Voided".split()
# ADDRESS_STATUS_CHOICES = "confirmed unconfirmed".split()
# PAYER_STATUS_CHOICES = "verified / unverified".split()
# PAYMENT_TYPE_CHOICES = "echeck / instant.split()
# PENDING_REASON = "address authorization echeck intl multi-currency unilateral upgrade verify other".split()
# REASON_CODE = "chargeback guarantee buyer_complaint refund other".split()
# TRANSACTION_ENTITY_CHOICES = "auth reauth order payment".split()
# Transaction and Notification-Related Variables
business = models.CharField(max_length=127, blank=True, help_text="Email where the money was sent.")
charset = models.CharField(max_length=255, blank=True)
custom = models.CharField(max_length=256, blank=True)
notify_version = models.DecimalField(max_digits=64, decimal_places=2, default=0, blank=True, null=True)
parent_txn_id = models.CharField("Parent Transaction ID", max_length=19, blank=True)
receiver_email = models.EmailField(max_length=254, blank=True)
receiver_id = models.CharField(max_length=255, blank=True) # 258DLEHY2BDK6
residence_country = models.CharField(max_length=2, blank=True)
test_ipn = models.BooleanField(default=False, blank=True)
txn_id = models.CharField("Transaction ID", max_length=255, blank=True, help_text="PayPal transaction ID.",
db_index=True)
txn_type = models.CharField("Transaction Type", max_length=255, blank=True, help_text="PayPal transaction type.")
verify_sign = models.CharField(max_length=255, blank=True)
# Buyer Information Variables
address_country = models.CharField(max_length=64, blank=True)
address_city = models.CharField(max_length=40, blank=True)
address_country_code = models.CharField(max_length=64, blank=True, help_text="ISO 3166")
address_name = models.CharField(max_length=128, blank=True)
address_state = models.CharField(max_length=40, blank=True)
address_status = models.CharField(max_length=255, blank=True)
address_street = models.CharField(max_length=200, blank=True)
address_zip = models.CharField(max_length=20, blank=True)
contact_phone = models.CharField(max_length=20, blank=True)
first_name = models.CharField(max_length=64, blank=True)
last_name = models.CharField(max_length=64, blank=True)
payer_business_name = models.CharField(max_length=127, blank=True)
payer_email = models.CharField(max_length=127, blank=True)
payer_id = models.CharField(max_length=13, blank=True)
# Payment Information Variables
auth_amount = models.DecimalField(max_digits=64, decimal_places=2, default=0, blank=True, null=True)
auth_exp = models.CharField(max_length=28, blank=True)
auth_id = models.CharField(max_length=19, blank=True)
auth_status = models.CharField(max_length=255, blank=True)
exchange_rate = models.DecimalField(max_digits=64, decimal_places=16, default=0, blank=True, null=True)
invoice = models.CharField(max_length=127, blank=True)
item_name = models.CharField(max_length=127, blank=True)
item_number = models.CharField(max_length=127, blank=True)
mc_currency = models.CharField(max_length=32, default="USD", blank=True)
mc_fee = models.DecimalField(max_digits=64, decimal_places=2, default=0, blank=True, null=True)
mc_gross = models.DecimalField(max_digits=64, decimal_places=2, default=0, blank=True, null=True)
mc_handling = models.DecimalField(max_digits=64, decimal_places=2, default=0, blank=True, null=True)
mc_shipping = models.DecimalField(max_digits=64, decimal_places=2, default=0, blank=True, null=True)
memo = models.CharField(max_length=255, blank=True)
num_cart_items = models.IntegerField(blank=True, default=0, null=True)
option_name1 = models.CharField(max_length=64, blank=True)
option_name2 = models.CharField(max_length=64, blank=True)
option_selection1 = models.CharField(max_length=200, blank=True)
option_selection2 = models.CharField(max_length=200, blank=True)
payer_status = models.CharField(max_length=255, blank=True)
payment_date = models.DateTimeField(blank=True, null=True, help_text="HH:MM:SS DD Mmm YY, YYYY PST")
payment_gross = models.DecimalField(max_digits=64, decimal_places=2, default=0, blank=True, null=True)
payment_status = models.CharField(max_length=255, blank=True)
payment_type = models.CharField(max_length=255, blank=True)
pending_reason = models.CharField(max_length=255, blank=True)
protection_eligibility = models.CharField(max_length=255, blank=True)
quantity = models.IntegerField(blank=True, default=1, null=True)
reason_code = models.CharField(max_length=255, blank=True)
remaining_settle = models.DecimalField(max_digits=64, decimal_places=2, default=0, blank=True, null=True)
settle_amount = models.DecimalField(max_digits=64, decimal_places=2, default=0, blank=True, null=True)
settle_currency = models.CharField(max_length=32, blank=True)
shipping = models.DecimalField(max_digits=64, decimal_places=2, default=0, blank=True, null=True)
shipping_method = models.CharField(max_length=255, blank=True)
tax = models.DecimalField(max_digits=64, decimal_places=2, default=0, blank=True, null=True)
transaction_entity = models.CharField(max_length=255, blank=True)
# Auction Variables
auction_buyer_id = models.CharField(max_length=64, blank=True)
auction_closing_date = models.DateTimeField(blank=True, null=True, help_text="HH:MM:SS DD Mmm YY, YYYY PST")
auction_multi_item = models.IntegerField(blank=True, default=0, null=True)
for_auction = models.DecimalField(max_digits=64, decimal_places=2, default=0, blank=True, null=True)
# Recurring Payments Variables
amount = models.DecimalField(max_digits=64, decimal_places=2, default=0, blank=True, null=True)
amount_per_cycle = models.DecimalField(max_digits=64, decimal_places=2, default=0, blank=True, null=True)
initial_payment_amount = models.DecimalField(max_digits=64, decimal_places=2, default=0, blank=True, null=True)
next_payment_date = models.DateTimeField(blank=True, null=True, help_text="HH:MM:SS DD Mmm YY, YYYY PST")
outstanding_balance = models.DecimalField(max_digits=64, decimal_places=2, default=0, blank=True, null=True)
payment_cycle = models.CharField(max_length=255, blank=True) # Monthly
period_type = models.CharField(max_length=255, blank=True)
product_name = models.CharField(max_length=255, blank=True)
product_type = models.CharField(max_length=255, blank=True)
profile_status = models.CharField(max_length=255, blank=True)
recurring_payment_id = models.CharField(max_length=255, blank=True) # I-FA4XVST722B9
rp_invoice_id = models.CharField(max_length=127, blank=True) # 1335-7816-2936-1451
time_created = models.DateTimeField(blank=True, null=True, help_text="HH:MM:SS DD Mmm YY, YYYY PST")
# Subscription Variables
amount1 = models.DecimalField(max_digits=64, decimal_places=2, default=0, blank=True, null=True)
amount2 = models.DecimalField(max_digits=64, decimal_places=2, default=0, blank=True, null=True)
amount3 = models.DecimalField(max_digits=64, decimal_places=2, default=0, blank=True, null=True)
mc_amount1 = models.DecimalField(max_digits=64, decimal_places=2, default=0, blank=True, null=True)
mc_amount2 = models.DecimalField(max_digits=64, decimal_places=2, default=0, blank=True, null=True)
mc_amount3 = models.DecimalField(max_digits=64, decimal_places=2, default=0, blank=True, null=True)
password = models.CharField(max_length=24, blank=True)
period1 = models.CharField(max_length=255, blank=True)
period2 = models.CharField(max_length=255, blank=True)
period3 = models.CharField(max_length=255, blank=True)
reattempt = models.CharField(max_length=1, blank=True)
recur_times = models.IntegerField(blank=True, default=0, null=True)
recurring = models.CharField(max_length=1, blank=True)
retry_at = models.DateTimeField(blank=True, null=True, help_text="HH:MM:SS DD Mmm YY, YYYY PST")
subscr_date = models.DateTimeField(blank=True, null=True, help_text="HH:MM:SS DD Mmm YY, YYYY PST")
subscr_effective = models.DateTimeField(blank=True, null=True, help_text="HH:MM:SS DD Mmm YY, YYYY PST")
subscr_id = models.CharField(max_length=19, blank=True)
username = models.CharField(max_length=64, blank=True)
# Billing Agreement Variables
mp_id = models.CharField(max_length=128, blank=True, null=True) # B-0G433009BJ555711U
# Dispute Resolution Variables
case_creation_date = models.DateTimeField(blank=True, null=True, help_text="HH:MM:SS DD Mmm YY, YYYY PST")
case_id = models.CharField(max_length=255, blank=True)
case_type = models.CharField(max_length=255, blank=True)
# Variables not categorized
receipt_id = models.CharField(max_length=255, blank=True) # 1335-7816-2936-1451
currency_code = models.CharField(max_length=32, default="USD", blank=True)
handling_amount = models.DecimalField(max_digits=64, decimal_places=2, default=0, blank=True, null=True)
# This undocumented variable apparently contains the same as the 'custom'
# field - http://stackoverflow.com/questions/8464442/set-transaction-subject-paypal-ipn
transaction_subject = models.CharField(max_length=256, blank=True)
# @@@ Mass Pay Variables (Not Implemented, needs a separate model, for each transaction x)
# fraud_managment_pending_filters_x = models.CharField(max_length=255, blank=True)
# option_selection1_x = models.CharField(max_length=200, blank=True)
# option_selection2_x = models.CharField(max_length=200, blank=True)
# masspay_txn_id_x = models.CharField(max_length=19, blank=True)
# mc_currency_x = models.CharField(max_length=32, default="USD", blank=True)
# mc_fee_x = models.DecimalField(max_digits=64, decimal_places=2, default=0, blank=True, null=True)
# mc_gross_x = models.DecimalField(max_digits=64, decimal_places=2, default=0, blank=True, null=True)
# mc_handlingx = models.DecimalField(max_digits=64, decimal_places=2, default=0, blank=True, null=True)
# payment_date = models.DateTimeField(blank=True, null=True, help_text="HH:MM:SS DD Mmm YY, YYYY PST")
# payment_status = models.CharField(max_length=9, blank=True)
# reason_code = models.CharField(max_length=15, blank=True)
# receiver_email_x = models.EmailField(max_length=127, blank=True)
# status_x = models.CharField(max_length=9, blank=True)
# unique_id_x = models.CharField(max_length=13, blank=True)
# Non-PayPal Variables - full IPN/PDT query and time fields.
ipaddress = models.GenericIPAddressField(blank=True, null=True)
flag = models.BooleanField(default=False, blank=True)
flag_code = models.CharField(max_length=16, blank=True)
flag_info = models.TextField(blank=True)
query = models.TextField(blank=True) # What Paypal sent to us initially
response = models.TextField(blank=True) # What we got back from our request
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
# Where did it come from?
from_view = models.CharField(max_length=6, null=True, blank=True)
class Meta:
abstract = True
app_label = 'paypal_standard_base' # Keep Django 1.7 quiet
def __unicode__(self):
if self.is_transaction():
return self.format % ("Transaction", self.txn_id)
elif self.is_subscription():
return self.format % ("Subscription", self.subscr_id)
else:
return self.format % ("Recurring", self.recurring_payment_id)
@cached_property
def posted_data_dict(self):
"""
All the data that PayPal posted to us, as a correctly parsed dictionary of values.
"""
if not self.query:
return None
from django.http import QueryDict
roughdecode = dict(item.split('=', 1) for item in self.query.split('&'))
encoding = roughdecode.get('charset', None)
if encoding is None:
encoding = DEFAULT_ENCODING
query = self.query.encode('ascii')
data = QueryDict(query, encoding=encoding)
return data.dict()
def is_transaction(self):
return len(self.txn_id) > 0
def is_refund(self):
return self.payment_status == ST_PP_REFUNDED
def is_reversed(self):
return self.payment_status == ST_PP_REVERSED
def is_recurring(self):
return len(self.recurring_payment_id) > 0
def is_subscription(self):
return len(self.subscr_id) > 0
def is_subscription_payment(self):
return self.txn_type == "subscr_payment"
def is_subscription_failed(self):
return self.txn_type == "subscr_failed"
def is_subscription_cancellation(self):
return self.txn_type == "subscr_cancel"
def is_subscription_end_of_term(self):
return self.txn_type == "subscr_eot"
def is_subscription_modified(self):
return self.txn_type == "subscr_modify"
def is_subscription_signup(self):
return self.txn_type == "subscr_signup"
def is_recurring_create(self):
return self.txn_type == "recurring_payment_profile_created"
def is_recurring_payment(self):
return self.txn_type == "recurring_payment"
def is_recurring_cancel(self):
return self.txn_type == "recurring_payment_profile_cancel"
def is_recurring_skipped(self):
return self.txn_type == "recurring_payment_skipped"
def is_recurring_failed(self):
return self.txn_type == "recurring_payment_failed"
def is_recurring_suspended(self):
return self.txn_type == "recurring_payment_suspended"
def is_recurring_suspended_due_to_max_failed_payment(self):
return self.txn_type == "recurring_payment_suspended_due_to_max_failed_payment"
def is_billing_agreement(self):
return len(self.mp_id) > 0
def is_billing_agreement_create(self):
return self.txn_type == "mp_signup"
def is_billing_agreement_cancel(self):
return self.txn_type == "mp_cancel"
def set_flag(self, info, code=None):
"""Sets a flag on the transaction and also sets a reason."""
self.flag = True
self.flag_info += info
if code is not None:
self.flag_code = code
def clear_flag(self):
self.flag = False
self.flag_info = ""
self.flag_code = ""
def verify(self):
"""
Verifies an IPN and a PDT.
Checks for obvious signs of weirdness in the payment and flags appropriately.
"""
self.response = self._postback().decode('ascii')
self.clear_flag()
self._verify_postback()
if not self.flag:
if self.is_transaction():
if self.payment_status not in self.PAYMENT_STATUS_CHOICES:
self.set_flag("Invalid payment_status. (%s)" % self.payment_status)
if duplicate_txn_id(self):
self.set_flag("Duplicate txn_id. (%s)" % self.txn_id)
if hasattr(settings, 'PAYPAL_RECEIVER_EMAIL'):
warn("Use of PAYPAL_RECEIVER_EMAIL in settings has been Deprecated.\n"
"Check of valid email must be done when receiving the\n"
"valid_ipn_received signal",
DeprecationWarning)
if self.receiver_email != settings.PAYPAL_RECEIVER_EMAIL:
self.set_flag("Invalid receiver_email. (%s)" % self.receiver_email)
else:
# @@@ Run a different series of checks on recurring payments.
pass
self.save()
def verify_secret(self, form_instance, secret):
"""Verifies an IPN payment over SSL using EWP."""
if not check_secret(form_instance, secret):
self.set_flag("Invalid secret. (%s)") % secret
self.save()
def get_endpoint(self):
"""Set Sandbox endpoint if the test variable is present."""
if self.test_ipn:
return SANDBOX_POSTBACK_ENDPOINT
else:
return POSTBACK_ENDPOINT
def send_signals(self):
"""Shout for the world to hear whether a txn was successful."""
raise NotImplementedError
def initialize(self, request):
"""Store the data we'll need to make the postback from the request object."""
if request.method == 'GET':
# PDT only - this data is currently unused
self.query = request.META.get('QUERY_STRING', '')
elif request.method == 'POST':
# The following works if paypal sends an ASCII bytestring, which it does.
self.query = request.body.decode('ascii')
self.ipaddress = request.META.get('REMOTE_ADDR', '')
def _postback(self):
"""Perform postback to PayPal and store the response in self.response."""
raise NotImplementedError
def _verify_postback(self):
"""Check self.response is valid andcall self.set_flag if there is an error."""
raise NotImplementedError
|
foreni-packages/xortool
|
refs/heads/master
|
xortool/args.py
|
8
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
from docopt import docopt
from routine import parse_char
class ArgError(Exception):
pass
def parse_parameters(doc, version):
p = docopt(doc, version=version)
p = {k.lstrip("-"): v for k, v in p.items()}
try:
return {
"input_is_hex": bool(p["hex"]),
"max_key_length": int(p["max-keylen"]),
"known_key_length": int(p["key-length"]) if p["key-length"] else None,
"most_frequent_char": parse_char(p["char"]) if p["char"] else None,
"brute_chars": bool(p["brute-chars"]),
"brute_printable": bool(p["brute-printable"]),
"frequency_spread": 0, # to be removed
"filename": p["FILE"] if p["FILE"] else "-", # stdin by default
}
except ValueError as err:
raise ArgError(str(err))
|
ngageoint/scale
|
refs/heads/master
|
scale/job/test/execution/configuration/__init__.py
|
12133432
| |
Elico-Corp/odoo-addons
|
refs/heads/8.0
|
account_prepayment/models/account.py
|
1
|
# -*- coding: utf-8 -*-
# © 2015 Elico corp (www.elico-corp.com)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openerp.osv import osv, fields
from openerp.tools.translate import _
class AccountVoucher(osv.osv):
_inherit = "account.voucher"
_columns = {
'purchase_id': fields.many2one(
'purchase.order',
'Purchase Order',
domain=[('invoiced', '=', False)],
ondelete='set null'),
'use_prepayment_account': fields.boolean(
'Use Prepayment account',
help="Check this if you want to input a prepayment \
on the prepayment accounts."),
'sale_id': fields.many2one(
'sale.order',
'Sale Order',
domain=[('invoiced', '=', False)],
ondelete='set null'),
}
_defaults = {'use_prepayment_account': False, }
def onchange_sale_id(self, cr, uid, ids, sale_id):
res = {}
if not sale_id:
return res
amount = 0
so_obj = self.pool.get('sale.order')
so = so_obj.browse(cr, uid, sale_id)
if so.invoiced:
res['warning'] = {'title': _('Warning!'),
'message': _('Selected Sale Order was paid.')}
for invoice in so.invoice_ids:
amount = invoice.residual
res['value'] = {'partner_id': so.partner_id.id, 'amount': amount}
return res
def onchange_purchase_id(self, cr, uid, ids, purchase_id):
res = {}
if not purchase_id:
return res
amount = 0
po_obj = self.pool.get('purchase.order')
po = po_obj.browse(cr, uid, purchase_id)
if po.invoiced:
res['warning'] = {'title': _('Warning!'),
'message': _('Selected Purchase Order was \
paid.')}
for invoice in po.invoice_ids:
amount = invoice.residual
res['value'] = {'partner_id': po.partner_id.id, 'amount': amount}
return res
def onchange_prepayment_account(
self, cr, uid, ids, use_prepayment_account):
res = {}
if not use_prepayment_account:
return res
res['value'] = {'line_cr_ids': [], 'line_dr_ids': [], 'line_ids': []}
return res
def writeoff_move_line_get(self,
cr,
uid,
voucher_id,
line_total,
move_id,
name,
company_currency,
current_currency,
context=None):
line_vals = super(AccountVoucher, self).writeoff_move_line_get(
cr,
uid,
voucher_id,
line_total,
move_id,
name,
company_currency,
current_currency,
context=context)
if line_vals:
account_id = False
voucher_brw = self.pool.get('account.voucher').browse(
cr, uid, voucher_id, context)
if voucher_brw.use_prepayment_account:
if voucher_brw.payment_option == 'with_writeoff':
account_id = voucher_brw.writeoff_acc_id.id
elif voucher_brw.type in ('sale', 'receipt'):
if not voucher_brw.partner_id.\
property_account_prereceivable:
raise osv.except_osv(
_('Unable to validate payment !'),
_('Please configure the partner Prereceivable \
Account at first!'))
account_id = voucher_brw.partner_id.\
property_account_prereceivable.id
else:
if not voucher_brw.partner_id.property_account_prepayable:
raise osv.except_osv(
_('Unable to validate payment !'),
_('Please configure the partner Prepayable Account\
at first!'))
account_id = voucher_brw.partner_id.\
property_account_prepayable.id
if account_id:
line_vals['account_id'] = account_id
return line_vals
AccountVoucher()
|
wgwoods/lorax
|
refs/heads/master
|
src/pylorax/api/checkparams.py
|
2
|
#
# Copyright (C) 2018 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import logging
log = logging.getLogger("lorax-composer")
from flask import jsonify
from functools import update_wrapper
# A decorator for checking the parameters provided to the API route implementing
# functions. The tuples parameter is a list of tuples. Each tuple is the string
# name of a parameter ("blueprint_name", not blueprint_name), the value it's set
# to by flask if the caller did not provide it, and a message to be returned to
# the user.
#
# If the parameter is set to its default, the error message is returned. Otherwise,
# the decorated function is called and its return value is returned.
def checkparams(tuples):
def decorator(f):
def wrapped_function(*args, **kwargs):
for tup in tuples:
if kwargs[tup[0]] == tup[1]:
log.error("(%s) %s", f.__name__, tup[2])
return jsonify(status=False, errors=[tup[2]]), 400
return f(*args, **kwargs)
return update_wrapper(wrapped_function, f)
return decorator
|
lz1988/company-site
|
refs/heads/master
|
tests/modeltests/unmanaged_models/models.py
|
115
|
"""
Models can have a ``managed`` attribute, which specifies whether the SQL code
is generated for the table on various manage.py operations.
"""
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
# All of these models are created in the database by Django.
@python_2_unicode_compatible
class A01(models.Model):
f_a = models.CharField(max_length=10, db_index=True)
f_b = models.IntegerField()
class Meta:
db_table = 'a01'
def __str__(self):
return self.f_a
@python_2_unicode_compatible
class B01(models.Model):
fk_a = models.ForeignKey(A01)
f_a = models.CharField(max_length=10, db_index=True)
f_b = models.IntegerField()
class Meta:
db_table = 'b01'
# 'managed' is True by default. This tests we can set it explicitly.
managed = True
def __str__(self):
return self.f_a
@python_2_unicode_compatible
class C01(models.Model):
mm_a = models.ManyToManyField(A01, db_table='d01')
f_a = models.CharField(max_length=10, db_index=True)
f_b = models.IntegerField()
class Meta:
db_table = 'c01'
def __str__(self):
return self.f_a
# All of these models use the same tables as the previous set (they are shadows
# of possibly a subset of the columns). There should be no creation errors,
# since we have told Django they aren't managed by Django.
@python_2_unicode_compatible
class A02(models.Model):
f_a = models.CharField(max_length=10, db_index=True)
class Meta:
db_table = 'a01'
managed = False
def __str__(self):
return self.f_a
@python_2_unicode_compatible
class B02(models.Model):
class Meta:
db_table = 'b01'
managed = False
fk_a = models.ForeignKey(A02)
f_a = models.CharField(max_length=10, db_index=True)
f_b = models.IntegerField()
def __str__(self):
return self.f_a
# To re-use the many-to-many intermediate table, we need to manually set up
# things up.
@python_2_unicode_compatible
class C02(models.Model):
mm_a = models.ManyToManyField(A02, through="Intermediate")
f_a = models.CharField(max_length=10, db_index=True)
f_b = models.IntegerField()
class Meta:
db_table = 'c01'
managed = False
def __str__(self):
return self.f_a
class Intermediate(models.Model):
a02 = models.ForeignKey(A02, db_column="a01_id")
c02 = models.ForeignKey(C02, db_column="c01_id")
class Meta:
db_table = 'd01'
managed = False
#
# These next models test the creation (or not) of many to many join tables
# between managed and unmanaged models. A join table between two unmanaged
# models shouldn't be automatically created (see #10647).
#
# Firstly, we need some models that will create the tables, purely so that the
# tables are created. This is a test setup, not a requirement for unmanaged
# models.
class Proxy1(models.Model):
class Meta:
db_table = "unmanaged_models_proxy1"
class Proxy2(models.Model):
class Meta:
db_table = "unmanaged_models_proxy2"
class Unmanaged1(models.Model):
class Meta:
managed = False
db_table = "unmanaged_models_proxy1"
# Unmanged with an m2m to unmanaged: the intermediary table won't be created.
class Unmanaged2(models.Model):
mm = models.ManyToManyField(Unmanaged1)
class Meta:
managed = False
db_table = "unmanaged_models_proxy2"
# Here's an unmanaged model with an m2m to a managed one; the intermediary
# table *will* be created (unless given a custom `through` as for C02 above).
class Managed1(models.Model):
mm = models.ManyToManyField(Unmanaged1)
|
randomtask1155/gpdb
|
refs/heads/master
|
src/test/unit/mock/special.py
|
20
|
class SpecialFuncs(object):
@classmethod
def make_body(cls, func):
key = 'make_body_' + func.funcname
if key in cls.__dict__:
return cls.__dict__[key].__get__(None, SpecialFuncs)(func)
@staticmethod
def make_body_MemoryContextAllocZeroImpl(func):
return """
void *p = malloc(size);
memset(p, 0, size);
return p;
"""
@staticmethod
def make_body_MemoryContextAllocImpl(func):
return """
void *p = malloc(size);
return p;
"""
@staticmethod
def make_body_MemoryContextFreeImpl(func):
return """
free(pointer);
"""
@staticmethod
def make_body_MemoryContextStrdup(func):
return """
return strdup(string);
"""
@staticmethod
def make_body_MemoryContextReallocImpl(func):
return """
return realloc(pointer, size);
"""
@staticmethod
def make_body_MemoryContextAllocZeroAlignedImpl(func):
return """
void *p = malloc(size);
memset(p, 0, size);
return p;
"""
class ByValStructs(object):
"""These are structs over 32 bit and possibly passed by-value.
As our mock framework doesn't accept 64 bit integer in some platform,
we have to treat them specially.
"""
type_names = set([
'ArrayTuple',
'CdbPathLocus',
'Complex',
'DbDirNode',
'DirectDispatchCalculationInfo',
'FileRepIdentifier_u',
'FileRepOperationDescription_u',
'FileRepRelFileNodeInfo_s',
'FileRepVerifyArguments',
'FileRepVerifyLogControl_s',
'FileRepVerifyRequest_s',
'instr_time',
'Interval',
'ItemPointerData',
'NameData',
'mpp_fd_set',
'PGSemaphoreData',
'PossibleValueSet',
'PrimaryMirrorModeTransitionArguments',
'RelFileNode',
'struct timeval',
'VariableStatData',
'XLogRecPtr'
])
@classmethod
def has(cls, argtype):
return argtype in cls.type_names
|
40223101/2015final2
|
refs/heads/master
|
static/Brython3.1.3-20150514-095342/Lib/unittest/test/test_break.py
|
785
|
import gc
import io
import os
import sys
import signal
import weakref
import unittest
@unittest.skipUnless(hasattr(os, 'kill'), "Test requires os.kill")
@unittest.skipIf(sys.platform =="win32", "Test cannot run on Windows")
@unittest.skipIf(sys.platform == 'freebsd6', "Test kills regrtest on freebsd6 "
"if threads have been used")
class TestBreak(unittest.TestCase):
def setUp(self):
self._default_handler = signal.getsignal(signal.SIGINT)
def tearDown(self):
signal.signal(signal.SIGINT, self._default_handler)
unittest.signals._results = weakref.WeakKeyDictionary()
unittest.signals._interrupt_handler = None
def testInstallHandler(self):
default_handler = signal.getsignal(signal.SIGINT)
unittest.installHandler()
self.assertNotEqual(signal.getsignal(signal.SIGINT), default_handler)
try:
pid = os.getpid()
os.kill(pid, signal.SIGINT)
except KeyboardInterrupt:
self.fail("KeyboardInterrupt not handled")
self.assertTrue(unittest.signals._interrupt_handler.called)
def testRegisterResult(self):
result = unittest.TestResult()
unittest.registerResult(result)
for ref in unittest.signals._results:
if ref is result:
break
elif ref is not result:
self.fail("odd object in result set")
else:
self.fail("result not found")
def testInterruptCaught(self):
default_handler = signal.getsignal(signal.SIGINT)
result = unittest.TestResult()
unittest.installHandler()
unittest.registerResult(result)
self.assertNotEqual(signal.getsignal(signal.SIGINT), default_handler)
def test(result):
pid = os.getpid()
os.kill(pid, signal.SIGINT)
result.breakCaught = True
self.assertTrue(result.shouldStop)
try:
test(result)
except KeyboardInterrupt:
self.fail("KeyboardInterrupt not handled")
self.assertTrue(result.breakCaught)
def testSecondInterrupt(self):
result = unittest.TestResult()
unittest.installHandler()
unittest.registerResult(result)
def test(result):
pid = os.getpid()
os.kill(pid, signal.SIGINT)
result.breakCaught = True
self.assertTrue(result.shouldStop)
os.kill(pid, signal.SIGINT)
self.fail("Second KeyboardInterrupt not raised")
try:
test(result)
except KeyboardInterrupt:
pass
else:
self.fail("Second KeyboardInterrupt not raised")
self.assertTrue(result.breakCaught)
def testTwoResults(self):
unittest.installHandler()
result = unittest.TestResult()
unittest.registerResult(result)
new_handler = signal.getsignal(signal.SIGINT)
result2 = unittest.TestResult()
unittest.registerResult(result2)
self.assertEqual(signal.getsignal(signal.SIGINT), new_handler)
result3 = unittest.TestResult()
def test(result):
pid = os.getpid()
os.kill(pid, signal.SIGINT)
try:
test(result)
except KeyboardInterrupt:
self.fail("KeyboardInterrupt not handled")
self.assertTrue(result.shouldStop)
self.assertTrue(result2.shouldStop)
self.assertFalse(result3.shouldStop)
def testHandlerReplacedButCalled(self):
# If our handler has been replaced (is no longer installed) but is
# called by the *new* handler, then it isn't safe to delay the
# SIGINT and we should immediately delegate to the default handler
unittest.installHandler()
handler = signal.getsignal(signal.SIGINT)
def new_handler(frame, signum):
handler(frame, signum)
signal.signal(signal.SIGINT, new_handler)
try:
pid = os.getpid()
os.kill(pid, signal.SIGINT)
except KeyboardInterrupt:
pass
else:
self.fail("replaced but delegated handler doesn't raise interrupt")
def testRunner(self):
# Creating a TextTestRunner with the appropriate argument should
# register the TextTestResult it creates
runner = unittest.TextTestRunner(stream=io.StringIO())
result = runner.run(unittest.TestSuite())
self.assertIn(result, unittest.signals._results)
def testWeakReferences(self):
# Calling registerResult on a result should not keep it alive
result = unittest.TestResult()
unittest.registerResult(result)
ref = weakref.ref(result)
del result
# For non-reference counting implementations
gc.collect();gc.collect()
self.assertIsNone(ref())
def testRemoveResult(self):
result = unittest.TestResult()
unittest.registerResult(result)
unittest.installHandler()
self.assertTrue(unittest.removeResult(result))
# Should this raise an error instead?
self.assertFalse(unittest.removeResult(unittest.TestResult()))
try:
pid = os.getpid()
os.kill(pid, signal.SIGINT)
except KeyboardInterrupt:
pass
self.assertFalse(result.shouldStop)
def testMainInstallsHandler(self):
failfast = object()
test = object()
verbosity = object()
result = object()
default_handler = signal.getsignal(signal.SIGINT)
class FakeRunner(object):
initArgs = []
runArgs = []
def __init__(self, *args, **kwargs):
self.initArgs.append((args, kwargs))
def run(self, test):
self.runArgs.append(test)
return result
class Program(unittest.TestProgram):
def __init__(self, catchbreak):
self.exit = False
self.verbosity = verbosity
self.failfast = failfast
self.catchbreak = catchbreak
self.testRunner = FakeRunner
self.test = test
self.result = None
p = Program(False)
p.runTests()
self.assertEqual(FakeRunner.initArgs, [((), {'buffer': None,
'verbosity': verbosity,
'failfast': failfast,
'warnings': None})])
self.assertEqual(FakeRunner.runArgs, [test])
self.assertEqual(p.result, result)
self.assertEqual(signal.getsignal(signal.SIGINT), default_handler)
FakeRunner.initArgs = []
FakeRunner.runArgs = []
p = Program(True)
p.runTests()
self.assertEqual(FakeRunner.initArgs, [((), {'buffer': None,
'verbosity': verbosity,
'failfast': failfast,
'warnings': None})])
self.assertEqual(FakeRunner.runArgs, [test])
self.assertEqual(p.result, result)
self.assertNotEqual(signal.getsignal(signal.SIGINT), default_handler)
def testRemoveHandler(self):
default_handler = signal.getsignal(signal.SIGINT)
unittest.installHandler()
unittest.removeHandler()
self.assertEqual(signal.getsignal(signal.SIGINT), default_handler)
# check that calling removeHandler multiple times has no ill-effect
unittest.removeHandler()
self.assertEqual(signal.getsignal(signal.SIGINT), default_handler)
def testRemoveHandlerAsDecorator(self):
default_handler = signal.getsignal(signal.SIGINT)
unittest.installHandler()
@unittest.removeHandler
def test():
self.assertEqual(signal.getsignal(signal.SIGINT), default_handler)
test()
self.assertNotEqual(signal.getsignal(signal.SIGINT), default_handler)
|
freakynit/vertx-web
|
refs/heads/master
|
src/test/sockjs-protocol/venv/lib/python2.7/site-packages/setuptools/svn_utils.py
|
141
|
import os
import re
import sys
from distutils import log
import xml.dom.pulldom
import shlex
import locale
import codecs
import unicodedata
import warnings
from setuptools.compat import unicode
from xml.sax.saxutils import unescape
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
from subprocess import Popen as _Popen, PIPE as _PIPE
#NOTE: Use of the command line options require SVN 1.3 or newer (December 2005)
# and SVN 1.3 hasn't been supported by the developers since mid 2008.
#subprocess is called several times with shell=(sys.platform=='win32')
#see the follow for more information:
# http://bugs.python.org/issue8557
# http://stackoverflow.com/questions/5658622/
# python-subprocess-popen-environment-path
def _run_command(args, stdout=_PIPE, stderr=_PIPE, encoding=None, stream=0):
#regarding the shell argument, see: http://bugs.python.org/issue8557
try:
proc = _Popen(args, stdout=stdout, stderr=stderr,
shell=(sys.platform == 'win32'))
data = proc.communicate()[stream]
except OSError:
return 1, ''
#doubled checked and
data = decode_as_string(data, encoding)
#communciate calls wait()
return proc.returncode, data
def _get_entry_schedule(entry):
schedule = entry.getElementsByTagName('schedule')[0]
return "".join([t.nodeValue
for t in schedule.childNodes
if t.nodeType == t.TEXT_NODE])
def _get_target_property(target):
property_text = target.getElementsByTagName('property')[0]
return "".join([t.nodeValue
for t in property_text.childNodes
if t.nodeType == t.TEXT_NODE])
def _get_xml_data(decoded_str):
if sys.version_info < (3, 0):
#old versions want an encoded string
data = decoded_str.encode('utf-8')
else:
data = decoded_str
return data
def joinpath(prefix, *suffix):
if not prefix or prefix == '.':
return os.path.join(*suffix)
return os.path.join(prefix, *suffix)
def determine_console_encoding():
try:
#try for the preferred encoding
encoding = locale.getpreferredencoding()
#see if the locale.getdefaultlocale returns null
#some versions of python\platforms return US-ASCII
#when it cannot determine an encoding
if not encoding or encoding == "US-ASCII":
encoding = locale.getdefaultlocale()[1]
if encoding:
codecs.lookup(encoding) # make sure a lookup error is not made
except (locale.Error, LookupError):
encoding = None
is_osx = sys.platform == "darwin"
if not encoding:
return ["US-ASCII", "utf-8"][is_osx]
elif encoding.startswith("mac-") and is_osx:
#certain versions of python would return mac-roman as default
#OSX as a left over of earlier mac versions.
return "utf-8"
else:
return encoding
_console_encoding = determine_console_encoding()
def decode_as_string(text, encoding=None):
"""
Decode the console or file output explicitly using getpreferredencoding.
The text paraemeter should be a encoded string, if not no decode occurs
If no encoding is given, getpreferredencoding is used. If encoding is
specified, that is used instead. This would be needed for SVN --xml
output. Unicode is explicitly put in composed NFC form.
--xml should be UTF-8 (SVN Issue 2938) the discussion on the Subversion
DEV List from 2007 seems to indicate the same.
"""
#text should be a byte string
if encoding is None:
encoding = _console_encoding
if not isinstance(text, unicode):
text = text.decode(encoding)
text = unicodedata.normalize('NFC', text)
return text
def parse_dir_entries(decoded_str):
'''Parse the entries from a recursive info xml'''
doc = xml.dom.pulldom.parseString(_get_xml_data(decoded_str))
entries = list()
for event, node in doc:
if event == 'START_ELEMENT' and node.nodeName == 'entry':
doc.expandNode(node)
if not _get_entry_schedule(node).startswith('delete'):
entries.append((node.getAttribute('path'),
node.getAttribute('kind')))
return entries[1:] # do not want the root directory
def parse_externals_xml(decoded_str, prefix=''):
'''Parse a propget svn:externals xml'''
prefix = os.path.normpath(prefix)
prefix = os.path.normcase(prefix)
doc = xml.dom.pulldom.parseString(_get_xml_data(decoded_str))
externals = list()
for event, node in doc:
if event == 'START_ELEMENT' and node.nodeName == 'target':
doc.expandNode(node)
path = os.path.normpath(node.getAttribute('path'))
if os.path.normcase(path).startswith(prefix):
path = path[len(prefix)+1:]
data = _get_target_property(node)
#data should be decoded already
for external in parse_external_prop(data):
externals.append(joinpath(path, external))
return externals # do not want the root directory
def parse_external_prop(lines):
"""
Parse the value of a retrieved svn:externals entry.
possible token setups (with quotng and backscaping in laters versions)
URL[@#] EXT_FOLDERNAME
[-r#] URL EXT_FOLDERNAME
EXT_FOLDERNAME [-r#] URL
"""
externals = []
for line in lines.splitlines():
line = line.lstrip() # there might be a "\ "
if not line:
continue
if sys.version_info < (3, 0):
#shlex handles NULLs just fine and shlex in 2.7 tries to encode
#as ascii automatiically
line = line.encode('utf-8')
line = shlex.split(line)
if sys.version_info < (3, 0):
line = [x.decode('utf-8') for x in line]
#EXT_FOLDERNAME is either the first or last depending on where
#the URL falls
if urlparse.urlsplit(line[-1])[0]:
external = line[0]
else:
external = line[-1]
external = decode_as_string(external, encoding="utf-8")
externals.append(os.path.normpath(external))
return externals
def parse_prop_file(filename, key):
found = False
f = open(filename, 'rt')
data = ''
try:
for line in iter(f.readline, ''): # can't use direct iter!
parts = line.split()
if len(parts) == 2:
kind, length = parts
data = f.read(int(length))
if kind == 'K' and data == key:
found = True
elif kind == 'V' and found:
break
finally:
f.close()
return data
class SvnInfo(object):
'''
Generic svn_info object. No has little knowledge of how to extract
information. Use cls.load to instatiate according svn version.
Paths are not filesystem encoded.
'''
@staticmethod
def get_svn_version():
code, data = _run_command(['svn', '--version', '--quiet'])
if code == 0 and data:
return data.strip()
else:
return ''
#svnversion return values (previous implementations return max revision)
# 4123:4168 mixed revision working copy
# 4168M modified working copy
# 4123S switched working copy
# 4123:4168MS mixed revision, modified, switched working copy
revision_re = re.compile(r'(?:([\-0-9]+):)?(\d+)([a-z]*)\s*$', re.I)
@classmethod
def load(cls, dirname=''):
normdir = os.path.normpath(dirname)
code, data = _run_command(['svn', 'info', normdir])
# Must check for some contents, as some use empty directories
# in testcases
svn_dir = os.path.join(normdir, '.svn')
has_svn = (os.path.isfile(os.path.join(svn_dir, 'entries')) or
os.path.isfile(os.path.join(svn_dir, 'dir-props')) or
os.path.isfile(os.path.join(svn_dir, 'dir-prop-base')))
svn_version = tuple(cls.get_svn_version().split('.'))
try:
base_svn_version = tuple(int(x) for x in svn_version[:2])
except ValueError:
base_svn_version = tuple()
if not has_svn:
return SvnInfo(dirname)
if code or not base_svn_version or base_svn_version < (1, 3):
warnings.warn(("No SVN 1.3+ command found: falling back "
"on pre 1.7 .svn parsing"), DeprecationWarning)
return SvnFileInfo(dirname)
if base_svn_version < (1, 5):
return Svn13Info(dirname)
return Svn15Info(dirname)
def __init__(self, path=''):
self.path = path
self._entries = None
self._externals = None
def get_revision(self):
'Retrieve the directory revision informatino using svnversion'
code, data = _run_command(['svnversion', '-c', self.path])
if code:
log.warn("svnversion failed")
return 0
parsed = self.revision_re.match(data)
if parsed:
return int(parsed.group(2))
else:
return 0
@property
def entries(self):
if self._entries is None:
self._entries = self.get_entries()
return self._entries
@property
def externals(self):
if self._externals is None:
self._externals = self.get_externals()
return self._externals
def iter_externals(self):
'''
Iterate over the svn:external references in the repository path.
'''
for item in self.externals:
yield item
def iter_files(self):
'''
Iterate over the non-deleted file entries in the repository path
'''
for item, kind in self.entries:
if kind.lower() == 'file':
yield item
def iter_dirs(self, include_root=True):
'''
Iterate over the non-deleted file entries in the repository path
'''
if include_root:
yield self.path
for item, kind in self.entries:
if kind.lower() == 'dir':
yield item
def get_entries(self):
return []
def get_externals(self):
return []
class Svn13Info(SvnInfo):
def get_entries(self):
code, data = _run_command(['svn', 'info', '-R', '--xml', self.path],
encoding="utf-8")
if code:
log.debug("svn info failed")
return []
return parse_dir_entries(data)
def get_externals(self):
#Previous to 1.5 --xml was not supported for svn propget and the -R
#output format breaks the shlex compatible semantics.
cmd = ['svn', 'propget', 'svn:externals']
result = []
for folder in self.iter_dirs():
code, lines = _run_command(cmd + [folder], encoding="utf-8")
if code != 0:
log.warn("svn propget failed")
return []
#lines should a str
for external in parse_external_prop(lines):
if folder:
external = os.path.join(folder, external)
result.append(os.path.normpath(external))
return result
class Svn15Info(Svn13Info):
def get_externals(self):
cmd = ['svn', 'propget', 'svn:externals', self.path, '-R', '--xml']
code, lines = _run_command(cmd, encoding="utf-8")
if code:
log.debug("svn propget failed")
return []
return parse_externals_xml(lines, prefix=os.path.abspath(self.path))
class SvnFileInfo(SvnInfo):
def __init__(self, path=''):
super(SvnFileInfo, self).__init__(path)
self._directories = None
self._revision = None
def _walk_svn(self, base):
entry_file = joinpath(base, '.svn', 'entries')
if os.path.isfile(entry_file):
entries = SVNEntriesFile.load(base)
yield (base, False, entries.parse_revision())
for path in entries.get_undeleted_records():
path = decode_as_string(path)
path = joinpath(base, path)
if os.path.isfile(path):
yield (path, True, None)
elif os.path.isdir(path):
for item in self._walk_svn(path):
yield item
def _build_entries(self):
entries = list()
rev = 0
for path, isfile, dir_rev in self._walk_svn(self.path):
if isfile:
entries.append((path, 'file'))
else:
entries.append((path, 'dir'))
rev = max(rev, dir_rev)
self._entries = entries
self._revision = rev
def get_entries(self):
if self._entries is None:
self._build_entries()
return self._entries
def get_revision(self):
if self._revision is None:
self._build_entries()
return self._revision
def get_externals(self):
prop_files = [['.svn', 'dir-prop-base'],
['.svn', 'dir-props']]
externals = []
for dirname in self.iter_dirs():
prop_file = None
for rel_parts in prop_files:
filename = joinpath(dirname, *rel_parts)
if os.path.isfile(filename):
prop_file = filename
if prop_file is not None:
ext_prop = parse_prop_file(prop_file, 'svn:externals')
#ext_prop should be utf-8 coming from svn:externals
ext_prop = decode_as_string(ext_prop, encoding="utf-8")
externals.extend(parse_external_prop(ext_prop))
return externals
def svn_finder(dirname=''):
#combined externals due to common interface
#combined externals and entries due to lack of dir_props in 1.7
info = SvnInfo.load(dirname)
for path in info.iter_files():
yield path
for path in info.iter_externals():
sub_info = SvnInfo.load(path)
for sub_path in sub_info.iter_files():
yield sub_path
class SVNEntriesFile(object):
def __init__(self, data):
self.data = data
@classmethod
def load(class_, base):
filename = os.path.join(base, '.svn', 'entries')
f = open(filename)
try:
result = SVNEntriesFile.read(f)
finally:
f.close()
return result
@classmethod
def read(class_, fileobj):
data = fileobj.read()
is_xml = data.startswith('<?xml')
class_ = [SVNEntriesFileText, SVNEntriesFileXML][is_xml]
return class_(data)
def parse_revision(self):
all_revs = self.parse_revision_numbers() + [0]
return max(all_revs)
class SVNEntriesFileText(SVNEntriesFile):
known_svn_versions = {
'1.4.x': 8,
'1.5.x': 9,
'1.6.x': 10,
}
def __get_cached_sections(self):
return self.sections
def get_sections(self):
SECTION_DIVIDER = '\f\n'
sections = self.data.split(SECTION_DIVIDER)
sections = [x for x in map(str.splitlines, sections)]
try:
# remove the SVN version number from the first line
svn_version = int(sections[0].pop(0))
if not svn_version in self.known_svn_versions.values():
log.warn("Unknown subversion verson %d", svn_version)
except ValueError:
return
self.sections = sections
self.get_sections = self.__get_cached_sections
return self.sections
def is_valid(self):
return bool(self.get_sections())
def get_url(self):
return self.get_sections()[0][4]
def parse_revision_numbers(self):
revision_line_number = 9
rev_numbers = [
int(section[revision_line_number])
for section in self.get_sections()
if (len(section) > revision_line_number
and section[revision_line_number])
]
return rev_numbers
def get_undeleted_records(self):
undeleted = lambda s: s and s[0] and (len(s) < 6 or s[5] != 'delete')
result = [
section[0]
for section in self.get_sections()
if undeleted(section)
]
return result
class SVNEntriesFileXML(SVNEntriesFile):
def is_valid(self):
return True
def get_url(self):
"Get repository URL"
urlre = re.compile('url="([^"]+)"')
return urlre.search(self.data).group(1)
def parse_revision_numbers(self):
revre = re.compile(r'committed-rev="(\d+)"')
return [
int(m.group(1))
for m in revre.finditer(self.data)
]
def get_undeleted_records(self):
entries_pattern = \
re.compile(r'name="([^"]+)"(?![^>]+deleted="true")', re.I)
results = [
unescape(match.group(1))
for match in entries_pattern.finditer(self.data)
]
return results
if __name__ == '__main__':
for name in svn_finder(sys.argv[1]):
print(name)
|
jvrsantacruz/XlsxWriter
|
refs/heads/master
|
xlsxwriter/test/comparison/test_print_area01.py
|
8
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'print_area01.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = ['xl/printerSettings/printerSettings1.bin',
'xl/worksheets/_rels/sheet1.xml.rels']
self.ignore_elements = {'[Content_Types].xml': ['<Default Extension="bin"'],
'xl/worksheets/sheet1.xml': ['<pageMargins', '<pageSetup']}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with a print area."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.print_area('A1:A1')
worksheet.write('A1', 'Foo')
workbook.close()
self.assertExcelEqual()
def test_create_file_single_cell(self):
"""Test the creation of a simple XlsxWriter file with a print area."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.print_area('A1')
worksheet.write('A1', 'Foo')
workbook.close()
self.assertExcelEqual()
|
torufuru/oolhackathon
|
refs/heads/hackathon
|
ryu/services/protocols/vrrp/api.py
|
48
|
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2013 Isaku Yamahata <yamahata at private email ne jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ryu.base import app_manager
from ryu.services.protocols.vrrp import event as vrrp_event
def vrrp_config(app, interface, config):
"""create an instance.
returns EventVRRPConfigReply(instance.name, interface, config)
on success.
returns EventVRRPConfigReply(None, interface, config)
on failure.
"""
config_request = vrrp_event.EventVRRPConfigRequest(interface, config)
config_request.sync = True
return app.send_request(config_request)
def vrrp_shutdown(app, instance_name):
"""shutdown the instance.
"""
shutdown_request = vrrp_event.EventVRRPShutdownRequest(instance_name)
app.send_event(vrrp_event.VRRP_MANAGER_NAME, shutdown_request)
def vrrp_transmit(app, monitor_name, data):
"""transmit a packet from the switch. this is internal use only.
data is str-like, a packet to send.
"""
transmit_request = vrrp_event.EventVRRPTransmitRequest(data)
app.send_event(monitor_name, transmit_request)
def vrrp_list(app, instance_name=None):
"""list instances.
returns EventVRRPListReply([VRRPInstance]).
"""
list_request = vrrp_event.EventVRRPListRequest(instance_name)
list_request.dst = vrrp_event.VRRP_MANAGER_NAME
return app.send_request(list_request)
def vrrp_config_change(app, instance_name,
priority=None, advertisement_interval=None,
preempt_mode=None, accept_mode=None):
"""change configuration of an instance.
None means no change.
"""
config_change = vrrp_event.EventVRRPConfigChangeRequest(
instance_name, priority, advertisement_interval,
preempt_mode, accept_mode)
return app.send_event(vrrp_event.VRRP_MANAGER_NAME, config_change)
app_manager.require_app('ryu.services.protocols.vrrp.manager', api_style=True)
|
koyuawsmbrtn/eclock
|
refs/heads/master
|
windows/kivy/examples/kv/builder_template.py
|
60
|
from kivy.lang import Builder
from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
Builder.load_string('''
[BlehItem@BoxLayout]:
orientation: 'vertical'
Label:
text: str(ctx.idx)
Button:
text: ctx.word
''')
class BlehApp(App):
def build(self):
root = BoxLayout()
for idx, word in enumerate(('Hello', 'World')):
wid = Builder.template('BlehItem', **{
'idx': idx, 'word': word,
})
root.add_widget(wid)
return root
if __name__ == '__main__':
BlehApp().run()
|
guziy/basemap
|
refs/heads/master
|
examples/plotmap.py
|
2
|
from __future__ import (absolute_import, division, print_function)
# make plot of etopo bathymetry/topography data on
# lambert conformal conic map projection, drawing coastlines, state and
# country boundaries, and parallels/meridians.
# the data is interpolated to the native projection grid.
from mpl_toolkits.basemap import Basemap, shiftgrid
import numpy as np
import matplotlib.pyplot as plt
# read in topo data (on a regular lat/lon grid)
# longitudes go from 20 to 380.
topoin = np.loadtxt('etopo20data.gz')
lons = np.loadtxt('etopo20lons.gz')
lats = np.loadtxt('etopo20lats.gz')
# shift data so lons go from -180 to 180 instead of 20 to 380.
topoin,lons = shiftgrid(180.,topoin,lons,start=False)
# setup of basemap ('lcc' = lambert conformal conic).
# use major and minor sphere radii from WGS84 ellipsoid.
m = Basemap(llcrnrlon=-145.5,llcrnrlat=1.,urcrnrlon=-2.566,urcrnrlat=46.352,\
rsphere=(6378137.00,6356752.3142),\
resolution='l',area_thresh=1000.,projection='lcc',\
lat_1=50.,lon_0=-107.)
# transform to nx x ny regularly spaced native projection grid
nx = int((m.xmax-m.xmin)/40000.)+1; ny = int((m.ymax-m.ymin)/40000.)+1
topodat,x,y = m.transform_scalar(topoin,lons,lats,nx,ny,returnxy=True)
# create the figure.
fig=plt.figure(figsize=(8,8))
# add an axes.
ax = fig.add_axes([0.1,0.1,0.8,0.8])
# associate this axes with the Basemap instance.
m.ax = ax
# plot image over map with imshow.
im = m.imshow(topodat,plt.cm.jet)
cb = m.colorbar(im,location='right',pad='10%') # draw colorbar
# plot blue dot on boulder, colorado and label it as such.
xpt,ypt = m(-104.237,40.125)
m.plot([xpt],[ypt],'bo')
ax.text(xpt+100000,ypt+100000,'Boulder')
# draw coastlines and political boundaries.
m.drawcoastlines()
m.drawcountries()
m.drawstates()
# draw parallels and meridians.
# label on left, right and bottom of map.
parallels = np.arange(0.,80,20.)
m.drawparallels(parallels,labels=[1,1,0,1])
meridians = np.arange(10.,360.,30.)
m.drawmeridians(meridians,labels=[1,1,0,1])
# set title.
ax.set_title('ETOPO Topography - Lambert Conformal Conic')
plt.show()
|
elyast/saltstack-formulas
|
refs/heads/master
|
_modules/riemann.py
|
1
|
import logging
import os
import re
import requests
import socket
log = logging.getLogger(__name__)
def master():
"""Gets riemann server FQDN
:return:
"""
return __salt__['search.mine_by_host']('roles:riemann.server')[0]
def kafka_jmx_checks(my_host):
"""Generates jmx checks for kafka brokers
:param my_host: my fqdn
:return:
"""
jmx_queries = __pillar__['riemann_checks'].get('jmx', {}).get('kafka-mesos', [])
if len(jmx_queries) == 0:
return []
status_endpoint = __salt__['marathon_client.wait_for_healthy_api']('kafka-mesos', '/api/broker/list')
if status_endpoint is None:
return []
r = requests.get(url=status_endpoint + '/api/broker/list')
if r.status_code != 200:
return []
kafka_meta = r.json()
kafka_servers = map(lambda x: x['task']['hostname'], kafka_meta['brokers'])
jmx_port = __pillar__['kafka-mesos'].get('jmxPort', -1)
if len(filter(lambda x: x == my_host, kafka_servers)) == 0 or jmx_port == -1:
return []
return [{'name': 'kafka-{0}'.format(jmx_port), 'app_id': 'kafka-mesos', 'port': jmx_port, 'queries': jmx_queries}]
def cassandra_jmx_checks(my_host):
"""Generates checks for cassandra node
:param my_host: my FQDN
:return:
"""
jmx_queries = __pillar__['riemann_checks'].get('jmx', {}).get('cassandra-mesos', [])
if len(jmx_queries) == 0:
return []
status_endpoint = __salt__['marathon_client.wait_for_healthy_api']('cassandra-mesos', '/live-nodes')
if status_endpoint is None:
return []
r = requests.get(url=status_endpoint + '/live-nodes')
if r.status_code != 200:
return []
cassandra_meta = r.json()
my_ip = socket.gethostbyname(my_host)
jmx_port = cassandra_meta['jmxPort']
cassandra_servers = cassandra_meta['liveNodes']
if len(filter(lambda x: x == my_ip, cassandra_servers)) == 0:
return []
return [{'name': 'cassandra-{0}'.format(jmx_port), 'my_host': 'localhost', 'app_id': 'cassandra-mesos',
'port': jmx_port, 'queries': jmx_queries}]
# jmx_map = {'cassandra': [{'obj':'x', 'attr':'x'}], 'kafka': [{'obj':'z'}, {'attr':'ww'}]}
# my_host = 'hadoop-worker-8'
def jmx_checks(my_host):
"""Generate jmx checks for servers run through Marathon
:param my_host: my FQDN
:return:
"""
apps = __salt__['marathon_client.apps']()
jmx_map = {key: value for key, value in __pillar__['riemann_checks'].get('jmx', {}).iteritems() if
not (key in ['kafka-mesos', 'cassandra-mesos'])}
return _join(jmx_map, apps, my_host)
# import marathon_client
# marathon_addresses = ['http://hadoop-ha-1:8773', 'http://hadoop-ha-2:8773']
# app_name = 'redis'
# apps = marathon_client._apps(marathon_addresses, None)
# app = marathon_client._apps(marathon_addresses, app_name)
# port_index = 0
# my_host = 'hadoop-worker-2'
# result = {}
def checks(app_name, my_host):
"""Generates checks for applications run through Marathon
:param app_name:
:param my_host:
:return:
"""
app = __salt__['marathon_client.apps'](app_name)
port_index = __pillar__.get(app_name, {}).get('check_port_index', 0)
if app_name in app:
tasks = [{'host': str(t.host), 'enabled': True, 'port': t.ports[port_index]} for t in app[app_name] if
t.host == my_host]
else:
tasks = []
current_tasks = _list_current_services(app_name, my_host)
result = {'{0}-{1}'.format(t['host'], t['port']): t for t in current_tasks}
result.update({'{0}-{1}'.format(t['host'], t['port']): t for t in tasks})
return [t for n, t in result.iteritems()]
def _join(check_defs, apps, my_host):
result = []
for name, queries in check_defs.iteritems():
for app_id, tasks in apps.iteritems():
if not re.match(name, app_id):
continue
port_index = __pillar__[app_id].get('check_port_index', 0)
for t in tasks:
if t.host != my_host:
continue
port = t.ports[port_index]
result.append(
{'name': '{0}-{1}'.format(app_id, port), 'app_id': str(app_id), 'port': port, 'queries': queries})
return result
# jmx_checks = [{'name':'x'}, {'name':'y'}]
# jmx_checks = []
# as_file_names(jmx_checks)
def as_file_names(checks_names):
"""Generates checks name with yml extension
:param checks_names:
:return:
"""
if len(checks_names) == 0:
return ''
else:
return '{0}.yml'.format('.yml '.join([check['name'] for check in checks_names]))
# _list_current_services()
# root_dir = "/Users/lukaszjastrzebski/Downloads/init"
# app_name = 'redis'
# my_host = 'hadoop-worker-2'
# current_tasks = [{'host':my_host, 'enabled':False, 'port': int(x.group(1))} for x in current_services]
def _list_current_services(app_name, my_host):
root_dir = '/etc/init/'
service_regexp = 'riemann-{0}-(\d+).conf'.format(app_name)
try:
files = os.listdir(root_dir)
current_services = [re.search(service_regexp, f) for f in files if re.match(service_regexp, f)]
return [{'host': str(my_host), 'enabled': False, 'port': int(x.group(1))} for x in current_services]
except OSError:
return []
# to_check = {'hdfs.namenode': [{'regexp':'NameNode', 'name':'n'}, {'regexp':'DFSZK', 'name':'z'}], 'mesos.master':
# [{'regexp':'mesos-master', 'name':'m'}]}
# roles = ['hdfs.namenode', 'mesos.master']
def proc_checks():
"""Generates processes checks
:return:
"""
to_check = __pillar__['riemann_checks']['proc']
roles = __salt__['grains.get']('roles')
my_proc = [settings for app_name, settings in to_check.items() if app_name in roles]
regexps = [item for sublist in my_proc for item in sublist]
return regexps
|
clumsy/intellij-community
|
refs/heads/master
|
python/lib/Lib/site-packages/django/contrib/gis/gdal/field.py
|
264
|
from ctypes import byref, c_int
from datetime import date, datetime, time
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.error import OGRException
from django.contrib.gis.gdal.prototypes import ds as capi
# For more information, see the OGR C API source code:
# http://www.gdal.org/ogr/ogr__api_8h.html
#
# The OGR_Fld_* routines are relevant here.
class Field(GDALBase):
"A class that wraps an OGR Field, needs to be instantiated from a Feature object."
#### Python 'magic' routines ####
def __init__(self, feat, index):
"""
Initializes on the feature pointer and the integer index of
the field within the feature.
"""
# Setting the feature pointer and index.
self._feat = feat
self._index = index
# Getting the pointer for this field.
fld_ptr = capi.get_feat_field_defn(feat, index)
if not fld_ptr:
raise OGRException('Cannot create OGR Field, invalid pointer given.')
self.ptr = fld_ptr
# Setting the class depending upon the OGR Field Type (OFT)
self.__class__ = OGRFieldTypes[self.type]
# OFTReal with no precision should be an OFTInteger.
if isinstance(self, OFTReal) and self.precision == 0:
self.__class__ = OFTInteger
def __str__(self):
"Returns the string representation of the Field."
return str(self.value).strip()
#### Field Methods ####
def as_double(self):
"Retrieves the Field's value as a double (float)."
return capi.get_field_as_double(self._feat, self._index)
def as_int(self):
"Retrieves the Field's value as an integer."
return capi.get_field_as_integer(self._feat, self._index)
def as_string(self):
"Retrieves the Field's value as a string."
return capi.get_field_as_string(self._feat, self._index)
def as_datetime(self):
"Retrieves the Field's value as a tuple of date & time components."
yy, mm, dd, hh, mn, ss, tz = [c_int() for i in range(7)]
status = capi.get_field_as_datetime(self._feat, self._index, byref(yy), byref(mm), byref(dd),
byref(hh), byref(mn), byref(ss), byref(tz))
if status:
return (yy, mm, dd, hh, mn, ss, tz)
else:
raise OGRException('Unable to retrieve date & time information from the field.')
#### Field Properties ####
@property
def name(self):
"Returns the name of this Field."
return capi.get_field_name(self.ptr)
@property
def precision(self):
"Returns the precision of this Field."
return capi.get_field_precision(self.ptr)
@property
def type(self):
"Returns the OGR type of this Field."
return capi.get_field_type(self.ptr)
@property
def type_name(self):
"Return the OGR field type name for this Field."
return capi.get_field_type_name(self.type)
@property
def value(self):
"Returns the value of this Field."
# Default is to get the field as a string.
return self.as_string()
@property
def width(self):
"Returns the width of this Field."
return capi.get_field_width(self.ptr)
### The Field sub-classes for each OGR Field type. ###
class OFTInteger(Field):
@property
def value(self):
"Returns an integer contained in this field."
return self.as_int()
@property
def type(self):
"""
GDAL uses OFTReals to represent OFTIntegers in created
shapefiles -- forcing the type here since the underlying field
type may actually be OFTReal.
"""
return 0
class OFTReal(Field):
@property
def value(self):
"Returns a float contained in this field."
return self.as_double()
# String & Binary fields, just subclasses
class OFTString(Field): pass
class OFTWideString(Field): pass
class OFTBinary(Field): pass
# OFTDate, OFTTime, OFTDateTime fields.
class OFTDate(Field):
@property
def value(self):
"Returns a Python `date` object for the OFTDate field."
try:
yy, mm, dd, hh, mn, ss, tz = self.as_datetime()
return date(yy.value, mm.value, dd.value)
except (ValueError, OGRException):
return None
class OFTDateTime(Field):
@property
def value(self):
"Returns a Python `datetime` object for this OFTDateTime field."
# TODO: Adapt timezone information.
# See http://lists.maptools.org/pipermail/gdal-dev/2006-February/007990.html
# The `tz` variable has values of: 0=unknown, 1=localtime (ambiguous),
# 100=GMT, 104=GMT+1, 80=GMT-5, etc.
try:
yy, mm, dd, hh, mn, ss, tz = self.as_datetime()
return datetime(yy.value, mm.value, dd.value, hh.value, mn.value, ss.value)
except (ValueError, OGRException):
return None
class OFTTime(Field):
@property
def value(self):
"Returns a Python `time` object for this OFTTime field."
try:
yy, mm, dd, hh, mn, ss, tz = self.as_datetime()
return time(hh.value, mn.value, ss.value)
except (ValueError, OGRException):
return None
# List fields are also just subclasses
class OFTIntegerList(Field): pass
class OFTRealList(Field): pass
class OFTStringList(Field): pass
class OFTWideStringList(Field): pass
# Class mapping dictionary for OFT Types and reverse mapping.
OGRFieldTypes = { 0 : OFTInteger,
1 : OFTIntegerList,
2 : OFTReal,
3 : OFTRealList,
4 : OFTString,
5 : OFTStringList,
6 : OFTWideString,
7 : OFTWideStringList,
8 : OFTBinary,
9 : OFTDate,
10 : OFTTime,
11 : OFTDateTime,
}
ROGRFieldTypes = dict([(cls, num) for num, cls in OGRFieldTypes.items()])
|
geromath/ToDoMe
|
refs/heads/master
|
root/quizzes/admin.py
|
1
|
from django.contrib import admin
from .models import Question, Category, SubCategory, Quiz, Progress, MCQuestion, Answer
from django import forms
from django.contrib.admin.widgets import FilteredSelectMultiple
from django.utils.translation import ugettext_lazy as _
class AnswerInline(admin.TabularInline):
model = Answer
class QuizAdminForm(forms.ModelForm):
class Meta:
model = Quiz
exclude = []
questions = forms.ModelMultipleChoiceField(
queryset=Question.objects.all().select_subclasses(),
required=False,
label=_("Questions"),
widget=FilteredSelectMultiple(
verbose_name=_("Questions"),
is_stacked=False))
def __init__(self, *args, **kwargs):
super(QuizAdminForm, self).__init__(*args, **kwargs)
if self.instance.pk:
self.fields['questions'].initial =\
self.instance.question_set.all().select_subclasses()
def save(self, commit=True):
quiz = super(QuizAdminForm, self).save(commit=False)
quiz.save()
quiz.question_set = self.cleaned_data['questions']
self.save_m2m()
return quiz
class QuizAdmin(admin.ModelAdmin):
form = QuizAdminForm
list_display = ('title', 'category', )
list_filter = ('category',)
search_fields = ('description', 'category', )
class CategoryAdmin(admin.ModelAdmin):
search_fields = ('category', )
class SubCategoryAdmin(admin.ModelAdmin):
search_fields = ('sub_category', )
list_display = ('sub_category', 'category',)
list_filter = ('category',)
class MCQuestionAdmin(admin.ModelAdmin):
list_display = ('content', 'category', )
list_filter = ('category',)
fields = ('content', 'category', 'sub_category',
'figure', 'quiz', 'explanation', 'answer_order')
search_fields = ('content', 'explanation')
filter_horizontal = ('quiz',)
inlines = [AnswerInline]
class ProgressAdmin(admin.ModelAdmin):
"""
to do:
create a user section
"""
search_fields = ('user', 'score', )
admin.site.register(Quiz, QuizAdmin)
admin.site.register(Category, CategoryAdmin)
admin.site.register(SubCategory, SubCategoryAdmin)
admin.site.register(MCQuestion, MCQuestionAdmin)
admin.site.register(Progress, ProgressAdmin)
|
mr-karan/coala-bears
|
refs/heads/master
|
tests/natural_language/AlexBearTest.py
|
2
|
from bears.natural_language.AlexBear import AlexBear
from tests.LocalBearTestHelper import verify_local_bear
good_file = "Their network looks good."
bad_file = "His network looks good."
AlexBearTest = verify_local_bear(AlexBear,
valid_files=(good_file,),
invalid_files=(bad_file,))
|
arenadata/ambari
|
refs/heads/branch-adh-1.6
|
ambari-server/src/main/resources/stacks/ADH/1.5/services/HIVE/package/alerts/alert_hive_thrift_port.py
|
11
|
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import socket
import time
import logging
import traceback
from resource_management.libraries.functions import hive_check
from resource_management.libraries.functions import format
from resource_management.libraries.functions import get_kinit_path
from ambari_commons.os_check import OSConst
from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
OK_MESSAGE = "TCP OK - {0:.3f}s response on port {1}"
CRITICAL_MESSAGE = "Connection failed on host {0}:{1} ({2})"
HIVE_SERVER_THRIFT_PORT_KEY = '{{hive-site/hive.server2.thrift.port}}'
HIVE_SERVER_THRIFT_HTTP_PORT_KEY = '{{hive-site/hive.server2.thrift.http.port}}'
HIVE_SERVER_TRANSPORT_MODE_KEY = '{{hive-site/hive.server2.transport.mode}}'
SECURITY_ENABLED_KEY = '{{cluster-env/security_enabled}}'
HIVE_SERVER2_AUTHENTICATION_KEY = '{{hive-site/hive.server2.authentication}}'
HIVE_SERVER_PRINCIPAL_KEY = '{{hive-site/hive.server2.authentication.kerberos.principal}}'
SMOKEUSER_KEYTAB_KEY = '{{cluster-env/smokeuser_keytab}}'
SMOKEUSER_PRINCIPAL_KEY = '{{cluster-env/smokeuser_principal_name}}'
SMOKEUSER_KEY = '{{cluster-env/smokeuser}}'
HIVE_SSL = '{{hive-site/hive.server2.use.SSL}}'
HIVE_SSL_KEYSTORE_PATH = '{{hive-site/hive.server2.keystore.path}}'
HIVE_SSL_KEYSTORE_PASSWORD = '{{hive-site/hive.server2.keystore.password}}'
# The configured Kerberos executable search paths, if any
KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY = '{{kerberos-env/executable_search_paths}}'
THRIFT_PORT_DEFAULT = 10000
HIVE_SERVER_TRANSPORT_MODE_DEFAULT = 'binary'
HIVE_SERVER_PRINCIPAL_DEFAULT = 'hive/_HOST@EXAMPLE.COM'
HIVE_SERVER2_AUTHENTICATION_DEFAULT = 'NOSASL'
# default keytab location
SMOKEUSER_KEYTAB_SCRIPT_PARAM_KEY = 'default.smoke.keytab'
SMOKEUSER_KEYTAB_DEFAULT = '/etc/security/keytabs/smokeuser.headless.keytab'
# default smoke principal
SMOKEUSER_PRINCIPAL_SCRIPT_PARAM_KEY = 'default.smoke.principal'
SMOKEUSER_PRINCIPAL_DEFAULT = 'ambari-qa@EXAMPLE.COM'
# default smoke user
SMOKEUSER_SCRIPT_PARAM_KEY = 'default.smoke.user'
SMOKEUSER_DEFAULT = 'ambari-qa'
HADOOPUSER_KEY = '{{cluster-env/hadoop.user.name}}'
HADOOPUSER_DEFAULT = 'hadoop'
CHECK_COMMAND_TIMEOUT_KEY = 'check.command.timeout'
CHECK_COMMAND_TIMEOUT_DEFAULT = 60.0
logger = logging.getLogger('ambari_alerts')
@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
def get_tokens():
"""
Returns a tuple of tokens in the format {{site/property}} that will be used
to build the dictionary passed into execute
"""
return (HIVE_SERVER_THRIFT_PORT_KEY, SECURITY_ENABLED_KEY, SMOKEUSER_KEY,
HIVE_SERVER2_AUTHENTICATION_KEY, HIVE_SERVER_PRINCIPAL_KEY,
SMOKEUSER_KEYTAB_KEY, SMOKEUSER_PRINCIPAL_KEY, HIVE_SERVER_THRIFT_HTTP_PORT_KEY,
HIVE_SERVER_TRANSPORT_MODE_KEY, KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY, HIVE_SSL,
HIVE_SSL_KEYSTORE_PATH, HIVE_SSL_KEYSTORE_PASSWORD)
@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
def get_tokens():
"""
Returns a tuple of tokens in the format {{site/property}} that will be used
to build the dictionary passed into execute
"""
return (HIVE_SERVER_THRIFT_PORT_KEY, HIVE_SERVER_THRIFT_HTTP_PORT_KEY,
HIVE_SERVER_TRANSPORT_MODE_KEY, HADOOPUSER_KEY)
@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
def execute(configurations={}, parameters={}, host_name=None):
"""
Returns a tuple containing the result code and a pre-formatted result label
Keyword arguments:
configurations (dictionary): a mapping of configuration key to value
parameters (dictionary): a mapping of script parameter key to value
host_name (string): the name of this host where the alert is running
"""
if configurations is None:
return ('UNKNOWN', ['There were no configurations supplied to the script.'])
transport_mode = HIVE_SERVER_TRANSPORT_MODE_DEFAULT
if HIVE_SERVER_TRANSPORT_MODE_KEY in configurations:
transport_mode = configurations[HIVE_SERVER_TRANSPORT_MODE_KEY]
port = THRIFT_PORT_DEFAULT
if transport_mode.lower() == 'binary' and HIVE_SERVER_THRIFT_PORT_KEY in configurations:
port = int(configurations[HIVE_SERVER_THRIFT_PORT_KEY])
elif transport_mode.lower() == 'http' and HIVE_SERVER_THRIFT_HTTP_PORT_KEY in configurations:
port = int(configurations[HIVE_SERVER_THRIFT_HTTP_PORT_KEY])
security_enabled = False
if SECURITY_ENABLED_KEY in configurations:
security_enabled = str(configurations[SECURITY_ENABLED_KEY]).upper() == 'TRUE'
check_command_timeout = CHECK_COMMAND_TIMEOUT_DEFAULT
if CHECK_COMMAND_TIMEOUT_KEY in parameters:
check_command_timeout = float(parameters[CHECK_COMMAND_TIMEOUT_KEY])
hive_server2_authentication = HIVE_SERVER2_AUTHENTICATION_DEFAULT
if HIVE_SERVER2_AUTHENTICATION_KEY in configurations:
hive_server2_authentication = configurations[HIVE_SERVER2_AUTHENTICATION_KEY]
hive_ssl = False
if HIVE_SSL in configurations:
hive_ssl = configurations[HIVE_SSL]
hive_ssl_keystore_path = None
if HIVE_SSL_KEYSTORE_PATH in configurations:
hive_ssl_keystore_path = configurations[HIVE_SSL_KEYSTORE_PATH]
hive_ssl_keystore_password = None
if HIVE_SSL_KEYSTORE_PASSWORD in configurations:
hive_ssl_keystore_password = configurations[HIVE_SSL_KEYSTORE_PASSWORD]
# defaults
smokeuser_keytab = SMOKEUSER_KEYTAB_DEFAULT
smokeuser_principal = SMOKEUSER_PRINCIPAL_DEFAULT
smokeuser = SMOKEUSER_DEFAULT
# check script params
if SMOKEUSER_PRINCIPAL_SCRIPT_PARAM_KEY in parameters:
smokeuser_principal = parameters[SMOKEUSER_PRINCIPAL_SCRIPT_PARAM_KEY]
if SMOKEUSER_SCRIPT_PARAM_KEY in parameters:
smokeuser = parameters[SMOKEUSER_SCRIPT_PARAM_KEY]
if SMOKEUSER_KEYTAB_SCRIPT_PARAM_KEY in parameters:
smokeuser_keytab = parameters[SMOKEUSER_KEYTAB_SCRIPT_PARAM_KEY]
# check configurations last as they should always take precedence
if SMOKEUSER_PRINCIPAL_KEY in configurations:
smokeuser_principal = configurations[SMOKEUSER_PRINCIPAL_KEY]
if SMOKEUSER_KEY in configurations:
smokeuser = configurations[SMOKEUSER_KEY]
result_code = None
if security_enabled:
hive_server_principal = HIVE_SERVER_PRINCIPAL_DEFAULT
if HIVE_SERVER_PRINCIPAL_KEY in configurations:
hive_server_principal = configurations[HIVE_SERVER_PRINCIPAL_KEY]
if SMOKEUSER_KEYTAB_KEY in configurations:
smokeuser_keytab = configurations[SMOKEUSER_KEYTAB_KEY]
# Get the configured Kerberos executable search paths, if any
if KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY in configurations:
kerberos_executable_search_paths = configurations[KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY]
else:
kerberos_executable_search_paths = None
kinit_path_local = get_kinit_path(kerberos_executable_search_paths)
kinitcmd=format("{kinit_path_local} -kt {smokeuser_keytab} {smokeuser_principal}; ")
else:
hive_server_principal = None
kinitcmd=None
try:
if host_name is None:
host_name = socket.getfqdn()
start_time = time.time()
try:
hive_check.check_thrift_port_sasl(host_name, port, hive_server2_authentication, hive_server_principal,
kinitcmd, smokeuser, transport_mode=transport_mode, ssl=hive_ssl,
ssl_keystore=hive_ssl_keystore_path, ssl_password=hive_ssl_keystore_password,
check_command_timeout=int(check_command_timeout))
result_code = 'OK'
total_time = time.time() - start_time
label = OK_MESSAGE.format(total_time, port)
except:
result_code = 'CRITICAL'
label = CRITICAL_MESSAGE.format(host_name, port, traceback.format_exc())
except:
label = traceback.format_exc()
result_code = 'UNKNOWN'
return (result_code, [label])
@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
def execute(configurations={}, parameters={}, host_name=None):
"""
Returns a tuple containing the result code and a pre-formatted result label
Keyword arguments:
configurations (dictionary): a mapping of configuration key to value
parameters (dictionary): a mapping of script parameter key to value
host_name (string): the name of this host where the alert is running
"""
from resource_management.libraries.functions import reload_windows_env
from resource_management.core.resources import Execute
reload_windows_env()
hive_home = os.environ['HIVE_HOME']
if configurations is None:
return ('UNKNOWN', ['There were no configurations supplied to the script.'])
transport_mode = HIVE_SERVER_TRANSPORT_MODE_DEFAULT
if HIVE_SERVER_TRANSPORT_MODE_KEY in configurations:
transport_mode = configurations[HIVE_SERVER_TRANSPORT_MODE_KEY]
port = THRIFT_PORT_DEFAULT
if transport_mode.lower() == 'binary' and HIVE_SERVER_THRIFT_PORT_KEY in configurations:
port = int(configurations[HIVE_SERVER_THRIFT_PORT_KEY])
elif transport_mode.lower() == 'http' and HIVE_SERVER_THRIFT_HTTP_PORT_KEY in configurations:
port = int(configurations[HIVE_SERVER_THRIFT_HTTP_PORT_KEY])
hiveuser = HADOOPUSER_DEFAULT
if HADOOPUSER_KEY in configurations:
hiveuser = configurations[HADOOPUSER_KEY]
result_code = None
try:
if host_name is None:
host_name = socket.getfqdn()
beeline_url = ['jdbc:hive2://{host_name}:{port}/', "transportMode={transport_mode}"]
# append url according to used transport
if transport_mode == "http":
beeline_url.append('httpPath=cliservice')
beeline_url_string = format(";".join(beeline_url))
beeline_cmd = os.path.join(hive_home, "bin", "beeline.cmd")
cmd = format("cmd /c {beeline_cmd} -u {beeline_url_string} -e '' 2>&1 | findstr Connected")
start_time = time.time()
try:
Execute(cmd, user=hiveuser, timeout=30)
total_time = time.time() - start_time
result_code = 'OK'
label = OK_MESSAGE.format(total_time, port)
except:
result_code = 'CRITICAL'
label = CRITICAL_MESSAGE.format(host_name, port, traceback.format_exc())
except:
label = traceback.format_exc()
result_code = 'UNKNOWN'
return (result_code, [label])
|
xfournet/intellij-community
|
refs/heads/master
|
python/testData/inspections/ChainedComparisonWithCommonBinaryExpression_after.py
|
79
|
if 3 < x + 1 < 10:
pass
|
huzq/scikit-learn
|
refs/heads/master
|
sklearn/experimental/tests/__init__.py
|
12133432
| |
moreati/django
|
refs/heads/master
|
django/contrib/sitemaps/management/__init__.py
|
12133432
| |
django-nonrel/django
|
refs/heads/nonrel-1.6
|
tests/raw_query/__init__.py
|
12133432
| |
sander76/home-assistant
|
refs/heads/dev
|
homeassistant/components/openexchangerates/__init__.py
|
36
|
"""The openexchangerates component."""
|
cheesechoi/Triton
|
refs/heads/master
|
cheese/test/crackme_hash_collision.py
|
1
|
from triton import *
import smt2lib
# Triton found several collisions. Example with the first collision:
# $ ./samples/crackmes/crackme_hash lrdxq
# Win
# $
#
expr = str()
def cafter(instruction):
global expr
# movzx esi,BYTE PTR [rax]
# RAX points on the user password
# cheese's modified.
# 0x0000000000400559 <+44>: movzx eax,BYTE PTR [rax]
if instruction.address == 0x400559:
print "Address 0x400559 progress"
raxId = getRegSymbolicID(IDREF.REG.RAX)
print convertExprToSymVar(raxId, 8)
# mov eax,DWORD PTR [rbp-0x4]
# RAX must be equal to 0xad6d to win
# cheese's modified.
# 0x0000000000400561 <+52>: cmp BYTE PTR [rbp-0x1],0x1
if instruction.address == 0x400561:
print '[+] Address <cmp (result_xor) 0x1>'
raxId = getRegSymbolicID(IDREF.REG.RAX)
raxExpr = getFullExpression(getSymExpr(raxId).getAst())
#for se in instruction.symbolicExpressions:
#print '\t -> #%d = %s %s' %(se.getId(), se.getAst(), (('; ' + se.getComment()) if se.getComment() is not None else ''))
zfId = getRegSymbolicID(IDREF.FLAG.ZF)
zfExpr = getFullExpression(getSymExpr(zfId).getAst())
cfId = getRegSymbolicID(IDREF.FLAG.CF)
cfExpr = getFullExpression(getSymExpr(cfId).getAst())
# ZF != true and CF != true
expr = smt2lib.compound([
smt2lib.smtAssert(smt2lib.equal(zfExpr, smt2lib.bvfalse()))
#smt2lib.smtAssert(smt2lib.equal(cfExpr, smt2lib.bvfalse()))
])
# cheese's modified !
# Get max 20 different models
print expr
models = getModels(expr, 20)
print "2"
for model in models:
print {k: "0x%x, '%c'" % (v, v) for k, v in model.items()}
if __name__ == '__main__':
startAnalysisFromSymbol('main')
addCallback(cafter, IDREF.CALLBACK.AFTER)
runProgram()
|
eliasdorneles/scrapy
|
refs/heads/master
|
scrapy/extensions/memusage.py
|
129
|
"""
MemoryUsage extension
See documentation in docs/topics/extensions.rst
"""
import sys
import socket
import logging
from pprint import pformat
from importlib import import_module
from twisted.internet import task
from scrapy import signals
from scrapy.exceptions import NotConfigured
from scrapy.mail import MailSender
from scrapy.utils.engine import get_engine_status
logger = logging.getLogger(__name__)
class MemoryUsage(object):
def __init__(self, crawler):
if not crawler.settings.getbool('MEMUSAGE_ENABLED'):
raise NotConfigured
try:
# stdlib's resource module is only available on unix platforms.
self.resource = import_module('resource')
except ImportError:
raise NotConfigured
self.crawler = crawler
self.warned = False
self.notify_mails = crawler.settings.getlist('MEMUSAGE_NOTIFY_MAIL')
self.limit = crawler.settings.getint('MEMUSAGE_LIMIT_MB')*1024*1024
self.warning = crawler.settings.getint('MEMUSAGE_WARNING_MB')*1024*1024
self.report = crawler.settings.getbool('MEMUSAGE_REPORT')
self.check_interval = crawler.settings.getfloat('MEMUSAGE_CHECK_INTERVAL_SECONDS')
self.mail = MailSender.from_settings(crawler.settings)
crawler.signals.connect(self.engine_started, signal=signals.engine_started)
crawler.signals.connect(self.engine_stopped, signal=signals.engine_stopped)
@classmethod
def from_crawler(cls, crawler):
return cls(crawler)
def get_virtual_size(self):
size = self.resource.getrusage(self.resource.RUSAGE_SELF).ru_maxrss
if sys.platform != 'darwin':
# on Mac OS X ru_maxrss is in bytes, on Linux it is in KB
size *= 1024
return size
def engine_started(self):
self.crawler.stats.set_value('memusage/startup', self.get_virtual_size())
self.tasks = []
tsk = task.LoopingCall(self.update)
self.tasks.append(tsk)
tsk.start(self.check_interval, now=True)
if self.limit:
tsk = task.LoopingCall(self._check_limit)
self.tasks.append(tsk)
tsk.start(self.check_interval, now=True)
if self.warning:
tsk = task.LoopingCall(self._check_warning)
self.tasks.append(tsk)
tsk.start(self.check_interval, now=True)
def engine_stopped(self):
for tsk in self.tasks:
if tsk.running:
tsk.stop()
def update(self):
self.crawler.stats.max_value('memusage/max', self.get_virtual_size())
def _check_limit(self):
if self.get_virtual_size() > self.limit:
self.crawler.stats.set_value('memusage/limit_reached', 1)
mem = self.limit/1024/1024
logger.error("Memory usage exceeded %(memusage)dM. Shutting down Scrapy...",
{'memusage': mem}, extra={'crawler': self.crawler})
if self.notify_mails:
subj = "%s terminated: memory usage exceeded %dM at %s" % \
(self.crawler.settings['BOT_NAME'], mem, socket.gethostname())
self._send_report(self.notify_mails, subj)
self.crawler.stats.set_value('memusage/limit_notified', 1)
open_spiders = self.crawler.engine.open_spiders
if open_spiders:
for spider in open_spiders:
self.crawler.engine.close_spider(spider, 'memusage_exceeded')
else:
self.crawler.stop()
def _check_warning(self):
if self.warned: # warn only once
return
if self.get_virtual_size() > self.warning:
self.crawler.stats.set_value('memusage/warning_reached', 1)
mem = self.warning/1024/1024
logger.warning("Memory usage reached %(memusage)dM",
{'memusage': mem}, extra={'crawler': self.crawler})
if self.notify_mails:
subj = "%s warning: memory usage reached %dM at %s" % \
(self.crawler.settings['BOT_NAME'], mem, socket.gethostname())
self._send_report(self.notify_mails, subj)
self.crawler.stats.set_value('memusage/warning_notified', 1)
self.warned = True
def _send_report(self, rcpts, subject):
"""send notification mail with some additional useful info"""
stats = self.crawler.stats
s = "Memory usage at engine startup : %dM\r\n" % (stats.get_value('memusage/startup')/1024/1024)
s += "Maximum memory usage : %dM\r\n" % (stats.get_value('memusage/max')/1024/1024)
s += "Current memory usage : %dM\r\n" % (self.get_virtual_size()/1024/1024)
s += "ENGINE STATUS ------------------------------------------------------- \r\n"
s += "\r\n"
s += pformat(get_engine_status(self.crawler.engine))
s += "\r\n"
self.mail.send(rcpts, subject, s)
|
lupyuen/RaspberryPiImage
|
refs/heads/master
|
home/pi/GrovePi/Software/Python/others/temboo/Library/Twilio/ShortCodes/UpdateShortCode.py
|
5
|
# -*- coding: utf-8 -*-
###############################################################################
#
# UpdateShortCode
# Attempts to update an existing short code resource.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class UpdateShortCode(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the UpdateShortCode Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(UpdateShortCode, self).__init__(temboo_session, '/Library/Twilio/ShortCodes/UpdateShortCode')
def new_input_set(self):
return UpdateShortCodeInputSet()
def _make_result_set(self, result, path):
return UpdateShortCodeResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return UpdateShortCodeChoreographyExecution(session, exec_id, path)
class UpdateShortCodeInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the UpdateShortCode
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_APIVersion(self, value):
"""
Set the value of the APIVersion input for this Choreo. ((optional, string) SMSs to this short code will start a new TwiML session with this API version. Either 2010-04-01 or 2008-08-01.)
"""
super(UpdateShortCodeInputSet, self)._set_input('APIVersion', value)
def set_AccountSID(self, value):
"""
Set the value of the AccountSID input for this Choreo. ((required, string) The AccountSID provided when you signed up for a Twilio account.)
"""
super(UpdateShortCodeInputSet, self)._set_input('AccountSID', value)
def set_AuthToken(self, value):
"""
Set the value of the AuthToken input for this Choreo. ((required, string) The authorization token provided when you signed up for a Twilio account.)
"""
super(UpdateShortCodeInputSet, self)._set_input('AuthToken', value)
def set_FriendlyName(self, value):
"""
Set the value of the FriendlyName input for this Choreo. ((optional, string) A human readable description of the short code, with maximum length 64 characters.)
"""
super(UpdateShortCodeInputSet, self)._set_input('FriendlyName', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Valid values are: json (the default) and xml.)
"""
super(UpdateShortCodeInputSet, self)._set_input('ResponseFormat', value)
def set_ShortCodeSID(self, value):
"""
Set the value of the ShortCodeSID input for this Choreo. ((required, string) The id of the short code to update.)
"""
super(UpdateShortCodeInputSet, self)._set_input('ShortCodeSID', value)
def set_SmsFallbackMethod(self, value):
"""
Set the value of the SmsFallbackMethod input for this Choreo. ((optional, string) The HTTP method that should be used to request the SmsFallbackUrl. Either GET or POST.)
"""
super(UpdateShortCodeInputSet, self)._set_input('SmsFallbackMethod', value)
def set_SmsFallbackURL(self, value):
"""
Set the value of the SmsFallbackURL input for this Choreo. ((optional, string) A URL that Twilio will request if an error occurs requesting or executing the TwiML at the SmsUrl.)
"""
super(UpdateShortCodeInputSet, self)._set_input('SmsFallbackURL', value)
def set_SmsMethod(self, value):
"""
Set the value of the SmsMethod input for this Choreo. ((optional, string) The HTTP method that should be used to request the SmsUrl. Either GET or POST.)
"""
super(UpdateShortCodeInputSet, self)._set_input('SmsMethod', value)
def set_SmsURL(self, value):
"""
Set the value of the SmsURL input for this Choreo. ((optional, string) The URL that Twilio should request when somebody sends an SMS to the short code.)
"""
super(UpdateShortCodeInputSet, self)._set_input('SmsURL', value)
def set_SubAccountSID(self, value):
"""
Set the value of the SubAccountSID input for this Choreo. ((optional, string) The SID of the subaccount associated with short code. If not specified, the main AccountSID used to authenticate is used in the request.)
"""
super(UpdateShortCodeInputSet, self)._set_input('SubAccountSID', value)
class UpdateShortCodeResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the UpdateShortCode Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Twilio.)
"""
return self._output.get('Response', None)
class UpdateShortCodeChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return UpdateShortCodeResultSet(response, path)
|
TurBoss/hazzy
|
refs/heads/master
|
hazzy/modules/Operation/MDI/mdi.py
|
2
|
#!/usr/bin/env python
# Copyright (c) 2017 Kurt Jacobson
# <kurtcjacobson@gmail.com>
#
# This file is part of Hazzy.
#
# Hazzy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Hazzy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Hazzy. If not, see <http://www.gnu.org/licenses/>.
# Description:
# MDI entry with command history and completion. Uses the MDIEntry from
# the Widget Factory as a base.
import os
import gi
gi.require_version('Gtk', '3.0')
gi.require_version('Gdk', '3.0')
from gi.repository import Gtk
from gi.repository import Gdk
from widget_factory.entry_widgets import MDIEntry
class MDI(Gtk.Box):
title = 'MDI'
author = 'TurBoss'
version = '0.1.3'
date = '26/02/2018'
description = 'MDI Prompt'
def __init__(self, widget_window):
Gtk.Box.__init__(self, orientation=Gtk.Orientation.VERTICAL)
self.set_hexpand(True)
self.set_vexpand(True)
# Widget Factories MDI entry widget
self.entry = MDIEntry()
self.pack_end(self.entry, False, False, 0)
# Scrolled window for the MDI history view
scrolled = Gtk.ScrolledWindow()
self.vadj = scrolled.get_vadjustment()
self.pack_start(scrolled, True, True, 0)
# MDI history TreeView
self.view = Gtk.TreeView(self.entry.model)
self.view.set_activate_on_single_click(True)
self.view.set_headers_visible(False)
scrolled.add(self.view)
renderer = Gtk.CellRendererText()
column = Gtk.TreeViewColumn("Command", renderer, text=0)
self.view.append_column(column)
self.selection = self.view.get_selection()
self.scrolled_to_bottom = False
self.view.connect('size-allocate', self.scroll_to_bottom)
self.view.connect('row-activated', self.on_view_row_activated)
self.view.connect('cursor-changed', self.on_view_cursor_changed)
self.entry.connect('activate', self.on_entry_activated)
self.entry.connect('focus-in-event', self.on_entry_gets_focus)
self.entry.connect('key-press-event', self.on_entry_keypress)
# On history item clicked set the cmd in the MDI entry
def on_view_row_activated(self, widget, row, column):
cmd = self.entry.model[row][0]
self.set_entry_text(cmd)
# On history item selected (via arrow keys) set the cmd in the MDI entry
def on_view_cursor_changed(self, widget):
row = widget.get_cursor()[0]
if row is not None:
cmd = self.entry.model[row][0]
self.view.set_search_entry(None)
self.set_entry_text(cmd)
# Just a little helper
def set_entry_text(self, cmd):
self.entry.set_text(cmd)
self.entry.set_position(-1)
# While MDI entry has focus, use UP/DOWN arrow keys to move through history
# setting the selected cmd in the entry.
def on_entry_keypress(self, widegt, event):
kv = event.keyval
if kv == Gdk.KEY_Up:
row = self.get_row() # Highlighted row path, or the last row's path
if row.prev():
self.view.set_cursor([row, ], None, False)
else:
Gdk.beep() # Complain if we are already on the top row
return True
elif kv == Gdk.KEY_Down:
# For some odd reason row.next() returns None, instead of a bool
# indication if the row is valid like row.prev() does, so we have
# to do our own validation here.
row = self.get_row() # Highlighted row path, or the last row's path
last_row = Gtk.TreePath.new_from_indices([len(self.entry.model), ])
row.next()
if row != last_row:
self.view.set_cursor([row, ], None, False)
else:
Gdk.beep() # Complain if we are already on the last row
return True
def get_row(self):
try: # If no row is selected the list of rows will be empty resulting in IndexError
row = self.selection.get_selected_rows()[1][0]
except IndexError: # If IndexError then default to the last row
row = Gtk.TreePath.new_from_indices([len(self.entry.model), ])
return row
# Clear any selection when the entry gets focus. This may not be needed/wanted
def on_entry_gets_focus(self, widget, event):
selection = self.view.get_selection()
selection.unselect_all()
def on_entry_activated(self, widget):
self.scrolled_to_bottom = False # Reset the flag
# Since the TreeView row is added after a sort delay, we have to listen for
# the TreeView `size-allocate` and scroll to the end on that.
# See the SO question here: https://stackoverflow.com/a/5235358/7798639
def scroll_to_bottom(self, widget, event):
# We only want to scroll to bottom when a new row is added, so use a flag
if not self.scrolled_to_bottom:
self.vadj.set_value(self.vadj.get_upper() - self.vadj.get_page_size())
self.scrolled_to_bottom = True
|
sdague/home-assistant
|
refs/heads/dev
|
tests/components/device_tracker/__init__.py
|
104
|
"""The tests for Device tracker platforms."""
|
ax003d/openerp
|
refs/heads/master
|
openerp/addons/account/report/account_financial_report.py
|
9
|
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.report import report_sxw
from common_report_header import common_report_header
from openerp.tools.translate import _
class report_account_common(report_sxw.rml_parse, common_report_header):
def __init__(self, cr, uid, name, context=None):
super(report_account_common, self).__init__(cr, uid, name, context=context)
self.localcontext.update( {
'get_lines': self.get_lines,
'time': time,
'get_fiscalyear': self._get_fiscalyear,
'get_account': self._get_account,
'get_start_period': self.get_start_period,
'get_end_period': self.get_end_period,
'get_filter': self._get_filter,
'get_start_date':self._get_start_date,
'get_end_date':self._get_end_date,
})
self.context = context
def set_context(self, objects, data, ids, report_type=None):
new_ids = ids
if (data['model'] == 'ir.ui.menu'):
new_ids = 'chart_account_id' in data['form'] and [data['form']['chart_account_id']] or []
objects = self.pool.get('account.account').browse(self.cr, self.uid, new_ids)
return super(report_account_common, self).set_context(objects, data, new_ids, report_type=report_type)
def get_lines(self, data):
lines = []
account_obj = self.pool.get('account.account')
currency_obj = self.pool.get('res.currency')
ids2 = self.pool.get('account.financial.report')._get_children_by_order(self.cr, self.uid, [data['form']['account_report_id'][0]], context=data['form']['used_context'])
for report in self.pool.get('account.financial.report').browse(self.cr, self.uid, ids2, context=data['form']['used_context']):
vals = {
'name': report.name,
'balance': report.balance * report.sign or 0.0,
'type': 'report',
'level': bool(report.style_overwrite) and report.style_overwrite or report.level,
'account_type': report.type =='sum' and 'view' or False, #used to underline the financial report balances
}
if data['form']['debit_credit']:
vals['debit'] = report.debit
vals['credit'] = report.credit
if data['form']['enable_filter']:
vals['balance_cmp'] = self.pool.get('account.financial.report').browse(self.cr, self.uid, report.id, context=data['form']['comparison_context']).balance * report.sign or 0.0
lines.append(vals)
account_ids = []
if report.display_detail == 'no_detail':
#the rest of the loop is used to display the details of the financial report, so it's not needed here.
continue
if report.type == 'accounts' and report.account_ids:
account_ids = account_obj._get_children_and_consol(self.cr, self.uid, [x.id for x in report.account_ids])
elif report.type == 'account_type' and report.account_type_ids:
account_ids = account_obj.search(self.cr, self.uid, [('user_type','in', [x.id for x in report.account_type_ids])])
if account_ids:
for account in account_obj.browse(self.cr, self.uid, account_ids, context=data['form']['used_context']):
#if there are accounts to display, we add them to the lines with a level equals to their level in
#the COA + 1 (to avoid having them with a too low level that would conflicts with the level of data
#financial reports for Assets, liabilities...)
if report.display_detail == 'detail_flat' and account.type == 'view':
continue
flag = False
vals = {
'name': account.code + ' ' + account.name,
'balance': account.balance != 0 and account.balance * report.sign or account.balance,
'type': 'account',
'level': report.display_detail == 'detail_with_hierarchy' and min(account.level + 1,6) or 6, #account.level + 1
'account_type': account.type,
}
if data['form']['debit_credit']:
vals['debit'] = account.debit
vals['credit'] = account.credit
if not currency_obj.is_zero(self.cr, self.uid, account.company_id.currency_id, vals['balance']):
flag = True
if data['form']['enable_filter']:
vals['balance_cmp'] = account_obj.browse(self.cr, self.uid, account.id, context=data['form']['comparison_context']).balance * report.sign or 0.0
if not currency_obj.is_zero(self.cr, self.uid, account.company_id.currency_id, vals['balance_cmp']):
flag = True
if flag:
lines.append(vals)
return lines
report_sxw.report_sxw('report.account.financial.report', 'account.financial.report',
'addons/account/report/account_financial_report.rml', parser=report_account_common, header='internal')
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
algiopensource/server-tools
|
refs/heads/8.0
|
base_suspend_security/models/ir_model_access.py
|
26
|
# -*- coding: utf-8 -*-
##############################################################################
#
# This module copyright (C) 2015 Therp BV (<http://therp.nl>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, tools
from ..base_suspend_security import BaseSuspendSecurityUid
class IrModelAccess(models.Model):
_inherit = 'ir.model.access'
@tools.ormcache_context(accepted_keys=('lang'))
def check(self, cr, uid, model, mode='read', raise_exception=True,
context=None):
if isinstance(uid, BaseSuspendSecurityUid):
return True
return super(IrModelAccess, self).check(
cr, uid, model, mode=mode, raise_exception=raise_exception,
context=context)
|
boundlessgeo/QGIS
|
refs/heads/master
|
python/plugins/processing/gui/TestTools.py
|
4
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
TestTools.py
---------------------
Date : February 2013
Copyright : (C) 2013 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'February 2013'
__copyright__ = '(C) 2013, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
import posixpath
import re
import yaml
import hashlib
import ast
from osgeo import gdal
from osgeo.gdalconst import GA_ReadOnly
from numpy import nan_to_num
from qgis.core import (QgsApplication,
QgsProcessing,
QgsProcessingParameterDefinition,
QgsProcessingParameterBoolean,
QgsProcessingParameterNumber,
QgsProcessingParameterDistance,
QgsProcessingParameterFile,
QgsProcessingParameterBand,
QgsProcessingParameterString,
QgsProcessingParameterVectorLayer,
QgsProcessingParameterFeatureSource,
QgsProcessingParameterRasterLayer,
QgsProcessingParameterMultipleLayers,
QgsProcessingParameterRasterDestination,
QgsProcessingParameterFeatureSink,
QgsProcessingParameterVectorDestination,
QgsProcessingParameterFileDestination,
QgsProcessingParameterEnum)
from qgis.PyQt.QtCore import QCoreApplication, QMetaObject
from qgis.PyQt.QtWidgets import QDialog, QVBoxLayout, QTextEdit, QMessageBox
def extractSchemaPath(filepath):
"""
Trys to find where the file is relative to the QGIS source code directory.
If it is already placed in the processing or QGIS testdata directory it will
return an appropriate schema and relative filepath
Args:
filepath: The path of the file to examine
Returns:
A tuple (schema, relative_file_path) where the schema is 'qgs' or 'proc'
if we can assume that the file is in this testdata directory.
"""
parts = []
schema = None
localpath = ''
path = filepath
part = True
while part and filepath:
(path, part) = os.path.split(path)
if part == 'testdata' and not localpath:
localparts = parts
localparts.reverse()
# we always want posix style paths here
localpath = posixpath.join(*localparts)
parts.append(part)
parts.reverse()
try:
testsindex = parts.index('tests')
except ValueError:
return '', filepath
if parts[testsindex - 1] == 'processing':
schema = 'proc'
return schema, localpath
def parseParameters(command):
"""
Parse alg string to grab parameters value.
Can handle quotes and comma.
"""
pos = 0
exp = re.compile(r"""(['"]?)(.*?)\1(,|$)""")
while True:
m = exp.search(command, pos)
result = m.group(2)
separator = m.group(3)
# Handle special values:
if result == 'None':
result = None
elif result.lower() == str(True).lower():
result = True
elif result.lower() == str(False).lower():
result = False
yield result
if not separator:
break
pos = m.end(0)
def splitAlgIdAndParameters(command):
"""
Extracts the algorithm ID and input parameter list from a processing runalg command
"""
exp = re.compile(r"""['"](.*?)['"]\s*,\s*(.*)""")
m = exp.search(command[len('processing.run('):-1])
alg_id = m.group(1)
params = m.group(2)
# replace QgsCoordinateReferenceSystem('EPSG:4325') with just string value
exp = re.compile(r"""QgsCoordinateReferenceSystem\((['"].*?['"])\)""")
params = exp.sub('\\1', params)
return alg_id, ast.literal_eval(params)
def createTest(text):
definition = {}
alg_id, parameters = splitAlgIdAndParameters(text)
alg = QgsApplication.processingRegistry().createAlgorithmById(alg_id)
definition['name'] = 'Test ({})'.format(alg_id)
definition['algorithm'] = alg_id
params = {}
results = {}
i = 0
for param in alg.parameterDefinitions():
if param.flags() & QgsProcessingParameterDefinition.FlagHidden or param.isDestination():
continue
if not param.name() in parameters:
continue
i += 1
token = parameters[param.name()]
# Handle empty parameters that are optionals
if param.flags() & QgsProcessingParameterDefinition.FlagOptional and token is None:
continue
if isinstance(param, (QgsProcessingParameterVectorLayer, QgsProcessingParameterFeatureSource)):
schema, filepath = extractSchemaPath(token)
p = {
'type': 'vector',
'name': filepath
}
if not schema:
p['location'] = '[The source data is not in the testdata directory. Please use data in the processing/tests/testdata folder.]'
params[param.name()] = p
elif isinstance(param, QgsProcessingParameterRasterLayer):
schema, filepath = extractSchemaPath(token)
p = {
'type': 'raster',
'name': filepath
}
if not schema:
p['location'] = '[The source data is not in the testdata directory. Please use data in the processing/tests/testdata folder.]'
params[param.name()] = p
elif isinstance(param, QgsProcessingParameterMultipleLayers):
multiparams = token
newparam = []
# Handle datatype detection
dataType = param.layerType()
if dataType in [QgsProcessing.TypeVectorAnyGeometry, QgsProcessing.TypeVectorPoint, QgsProcessing.TypeVectorLine, QgsProcessing.TypeVectorPolygon, QgsProcessing.TypeVector]:
dataType = 'vector'
else:
dataType = 'raster'
schema = None
for mp in multiparams:
schema, filepath = extractSchemaPath(mp)
newparam.append({
'type': dataType,
'name': filepath
})
p = {
'type': 'multi',
'params': newparam
}
if not schema:
p['location'] = '[The source data is not in the testdata directory. Please use data in the processing/tests/testdata folder.]'
params[param.name()] = p
elif isinstance(param, QgsProcessingParameterFile):
schema, filepath = extractSchemaPath(token)
p = {
'type': 'file',
'name': filepath
}
if not schema:
p['location'] = '[The source data is not in the testdata directory. Please use data in the processing/tests/testdata folder.]'
params[param.name()] = p
elif isinstance(param, QgsProcessingParameterString):
params[param.name()] = token
elif isinstance(param, QgsProcessingParameterBoolean):
params[param.name()] = token
elif isinstance(param, (QgsProcessingParameterNumber, QgsProcessingParameterDistance)):
if param.dataType() == QgsProcessingParameterNumber.Integer:
params[param.name()] = int(token)
else:
params[param.name()] = float(token)
elif isinstance(param, QgsProcessingParameterEnum):
if isinstance(token, list):
params[param.name()] = [int(t) for t in token]
else:
params[param.name()] = int(token)
elif isinstance(param, QgsProcessingParameterBand):
params[param.name()] = int(token)
elif token:
if token[0] == '"':
token = token[1:]
if token[-1] == '"':
token = token[:-1]
params[param.name()] = token
definition['params'] = params
for i, out in enumerate([out for out in alg.destinationParameterDefinitions() if not out.flags() & QgsProcessingParameterDefinition.FlagHidden]):
if not out.name() in parameters:
continue
token = parameters[out.name()]
if isinstance(out, QgsProcessingParameterRasterDestination):
if token is None:
QMessageBox.warning(None,
tr('Error'),
tr('Seems some outputs are temporary '
'files. To create test you need to '
'redirect all algorithm outputs to '
'files'))
return
dataset = gdal.Open(token, GA_ReadOnly)
if dataset is None:
QMessageBox.warning(None,
tr('Error'),
tr('Seems some outputs are temporary '
'files. To create test you need to '
'redirect all algorithm outputs to '
'files'))
return
dataArray = nan_to_num(dataset.ReadAsArray(0))
strhash = hashlib.sha224(dataArray.data).hexdigest()
results[out.name()] = {
'type': 'rasterhash',
'hash': strhash
}
elif isinstance(out, (QgsProcessingParameterVectorDestination, QgsProcessingParameterFeatureSink)):
schema, filepath = extractSchemaPath(token)
results[out.name()] = {
'type': 'vector',
'name': filepath
}
if not schema:
results[out.name()]['location'] = '[The expected result data is not in the testdata directory. Please write it to processing/tests/testdata/expected. Prefer gml files.]'
elif isinstance(out, QgsProcessingParameterFileDestination):
schema, filepath = extractSchemaPath(token)
results[out.name()] = {
'type': 'file',
'name': filepath
}
if not schema:
results[out.name()]['location'] = '[The expected result file is not in the testdata directory. Please redirect the output to processing/tests/testdata/expected.]'
definition['results'] = results
dlg = ShowTestDialog(yaml.dump([definition], default_flow_style=False))
dlg.exec_()
def tr(string):
return QCoreApplication.translate('TestTools', string)
class ShowTestDialog(QDialog):
def __init__(self, s):
QDialog.__init__(self)
self.setModal(True)
self.resize(600, 400)
self.setWindowTitle(self.tr('Unit Test'))
layout = QVBoxLayout()
self.text = QTextEdit()
self.text.setFontFamily("monospace")
self.text.setEnabled(True)
# Add two spaces in front of each text for faster copy/paste
self.text.setText(' {}'.format(s.replace('\n', '\n ')))
layout.addWidget(self.text)
self.setLayout(layout)
QMetaObject.connectSlotsByName(self)
|
nippled/dronecoin
|
refs/heads/master
|
contrib/wallettools/walletchangepass.py
|
15
|
from jsonrpc import ServiceProxy
access = ServiceProxy("http://127.0.0.1:10332")
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
|
tashaxe/Red-DiscordBot
|
refs/heads/develop
|
lib/cffi/api.py
|
19
|
import sys, types
from .lock import allocate_lock
from .error import CDefError
from . import model
try:
callable
except NameError:
# Python 3.1
from collections import Callable
callable = lambda x: isinstance(x, Callable)
try:
basestring
except NameError:
# Python 3.x
basestring = str
class FFI(object):
r'''
The main top-level class that you instantiate once, or once per module.
Example usage:
ffi = FFI()
ffi.cdef("""
int printf(const char *, ...);
""")
C = ffi.dlopen(None) # standard library
-or-
C = ffi.verify() # use a C compiler: verify the decl above is right
C.printf("hello, %s!\n", ffi.new("char[]", "world"))
'''
def __init__(self, backend=None):
"""Create an FFI instance. The 'backend' argument is used to
select a non-default backend, mostly for tests.
"""
if backend is None:
# You need PyPy (>= 2.0 beta), or a CPython (>= 2.6) with
# _cffi_backend.so compiled.
import _cffi_backend as backend
from . import __version__
if backend.__version__ != __version__:
# bad version! Try to be as explicit as possible.
if hasattr(backend, '__file__'):
# CPython
raise Exception("Version mismatch: this is the 'cffi' package version %s, located in %r. When we import the top-level '_cffi_backend' extension module, we get version %s, located in %r. The two versions should be equal; check your installation." % (
__version__, __file__,
backend.__version__, backend.__file__))
else:
# PyPy
raise Exception("Version mismatch: this is the 'cffi' package version %s, located in %r. This interpreter comes with a built-in '_cffi_backend' module, which is version %s. The two versions should be equal; check your installation." % (
__version__, __file__, backend.__version__))
# (If you insist you can also try to pass the option
# 'backend=backend_ctypes.CTypesBackend()', but don't
# rely on it! It's probably not going to work well.)
from . import cparser
self._backend = backend
self._lock = allocate_lock()
self._parser = cparser.Parser()
self._cached_btypes = {}
self._parsed_types = types.ModuleType('parsed_types').__dict__
self._new_types = types.ModuleType('new_types').__dict__
self._function_caches = []
self._libraries = []
self._cdefsources = []
self._included_ffis = []
self._windows_unicode = None
self._init_once_cache = {}
self._cdef_version = None
self._embedding = None
if hasattr(backend, 'set_ffi'):
backend.set_ffi(self)
for name in backend.__dict__:
if name.startswith('RTLD_'):
setattr(self, name, getattr(backend, name))
#
with self._lock:
self.BVoidP = self._get_cached_btype(model.voidp_type)
self.BCharA = self._get_cached_btype(model.char_array_type)
if isinstance(backend, types.ModuleType):
# _cffi_backend: attach these constants to the class
if not hasattr(FFI, 'NULL'):
FFI.NULL = self.cast(self.BVoidP, 0)
FFI.CData, FFI.CType = backend._get_types()
else:
# ctypes backend: attach these constants to the instance
self.NULL = self.cast(self.BVoidP, 0)
self.CData, self.CType = backend._get_types()
self.buffer = backend.buffer
def cdef(self, csource, override=False, packed=False):
"""Parse the given C source. This registers all declared functions,
types, and global variables. The functions and global variables can
then be accessed via either 'ffi.dlopen()' or 'ffi.verify()'.
The types can be used in 'ffi.new()' and other functions.
If 'packed' is specified as True, all structs declared inside this
cdef are packed, i.e. laid out without any field alignment at all.
"""
self._cdef(csource, override=override, packed=packed)
def embedding_api(self, csource, packed=False):
self._cdef(csource, packed=packed, dllexport=True)
if self._embedding is None:
self._embedding = ''
def _cdef(self, csource, override=False, **options):
if not isinstance(csource, str): # unicode, on Python 2
if not isinstance(csource, basestring):
raise TypeError("cdef() argument must be a string")
csource = csource.encode('ascii')
with self._lock:
self._cdef_version = object()
self._parser.parse(csource, override=override, **options)
self._cdefsources.append(csource)
if override:
for cache in self._function_caches:
cache.clear()
finishlist = self._parser._recomplete
if finishlist:
self._parser._recomplete = []
for tp in finishlist:
tp.finish_backend_type(self, finishlist)
def dlopen(self, name, flags=0):
"""Load and return a dynamic library identified by 'name'.
The standard C library can be loaded by passing None.
Note that functions and types declared by 'ffi.cdef()' are not
linked to a particular library, just like C headers; in the
library we only look for the actual (untyped) symbols.
"""
assert isinstance(name, basestring) or name is None
with self._lock:
lib, function_cache = _make_ffi_library(self, name, flags)
self._function_caches.append(function_cache)
self._libraries.append(lib)
return lib
def _typeof_locked(self, cdecl):
# call me with the lock!
key = cdecl
if key in self._parsed_types:
return self._parsed_types[key]
#
if not isinstance(cdecl, str): # unicode, on Python 2
cdecl = cdecl.encode('ascii')
#
type = self._parser.parse_type(cdecl)
really_a_function_type = type.is_raw_function
if really_a_function_type:
type = type.as_function_pointer()
btype = self._get_cached_btype(type)
result = btype, really_a_function_type
self._parsed_types[key] = result
return result
def _typeof(self, cdecl, consider_function_as_funcptr=False):
# string -> ctype object
try:
result = self._parsed_types[cdecl]
except KeyError:
with self._lock:
result = self._typeof_locked(cdecl)
#
btype, really_a_function_type = result
if really_a_function_type and not consider_function_as_funcptr:
raise CDefError("the type %r is a function type, not a "
"pointer-to-function type" % (cdecl,))
return btype
def typeof(self, cdecl):
"""Parse the C type given as a string and return the
corresponding <ctype> object.
It can also be used on 'cdata' instance to get its C type.
"""
if isinstance(cdecl, basestring):
return self._typeof(cdecl)
if isinstance(cdecl, self.CData):
return self._backend.typeof(cdecl)
if isinstance(cdecl, types.BuiltinFunctionType):
res = _builtin_function_type(cdecl)
if res is not None:
return res
if (isinstance(cdecl, types.FunctionType)
and hasattr(cdecl, '_cffi_base_type')):
with self._lock:
return self._get_cached_btype(cdecl._cffi_base_type)
raise TypeError(type(cdecl))
def sizeof(self, cdecl):
"""Return the size in bytes of the argument. It can be a
string naming a C type, or a 'cdata' instance.
"""
if isinstance(cdecl, basestring):
BType = self._typeof(cdecl)
return self._backend.sizeof(BType)
else:
return self._backend.sizeof(cdecl)
def alignof(self, cdecl):
"""Return the natural alignment size in bytes of the C type
given as a string.
"""
if isinstance(cdecl, basestring):
cdecl = self._typeof(cdecl)
return self._backend.alignof(cdecl)
def offsetof(self, cdecl, *fields_or_indexes):
"""Return the offset of the named field inside the given
structure or array, which must be given as a C type name.
You can give several field names in case of nested structures.
You can also give numeric values which correspond to array
items, in case of an array type.
"""
if isinstance(cdecl, basestring):
cdecl = self._typeof(cdecl)
return self._typeoffsetof(cdecl, *fields_or_indexes)[1]
def new(self, cdecl, init=None):
"""Allocate an instance according to the specified C type and
return a pointer to it. The specified C type must be either a
pointer or an array: ``new('X *')`` allocates an X and returns
a pointer to it, whereas ``new('X[n]')`` allocates an array of
n X'es and returns an array referencing it (which works
mostly like a pointer, like in C). You can also use
``new('X[]', n)`` to allocate an array of a non-constant
length n.
The memory is initialized following the rules of declaring a
global variable in C: by default it is zero-initialized, but
an explicit initializer can be given which can be used to
fill all or part of the memory.
When the returned <cdata> object goes out of scope, the memory
is freed. In other words the returned <cdata> object has
ownership of the value of type 'cdecl' that it points to. This
means that the raw data can be used as long as this object is
kept alive, but must not be used for a longer time. Be careful
about that when copying the pointer to the memory somewhere
else, e.g. into another structure.
"""
if isinstance(cdecl, basestring):
cdecl = self._typeof(cdecl)
return self._backend.newp(cdecl, init)
def new_allocator(self, alloc=None, free=None,
should_clear_after_alloc=True):
"""Return a new allocator, i.e. a function that behaves like ffi.new()
but uses the provided low-level 'alloc' and 'free' functions.
'alloc' is called with the size as argument. If it returns NULL, a
MemoryError is raised. 'free' is called with the result of 'alloc'
as argument. Both can be either Python function or directly C
functions. If 'free' is None, then no free function is called.
If both 'alloc' and 'free' are None, the default is used.
If 'should_clear_after_alloc' is set to False, then the memory
returned by 'alloc' is assumed to be already cleared (or you are
fine with garbage); otherwise CFFI will clear it.
"""
compiled_ffi = self._backend.FFI()
allocator = compiled_ffi.new_allocator(alloc, free,
should_clear_after_alloc)
def allocate(cdecl, init=None):
if isinstance(cdecl, basestring):
cdecl = self._typeof(cdecl)
return allocator(cdecl, init)
return allocate
def cast(self, cdecl, source):
"""Similar to a C cast: returns an instance of the named C
type initialized with the given 'source'. The source is
casted between integers or pointers of any type.
"""
if isinstance(cdecl, basestring):
cdecl = self._typeof(cdecl)
return self._backend.cast(cdecl, source)
def string(self, cdata, maxlen=-1):
"""Return a Python string (or unicode string) from the 'cdata'.
If 'cdata' is a pointer or array of characters or bytes, returns
the null-terminated string. The returned string extends until
the first null character, or at most 'maxlen' characters. If
'cdata' is an array then 'maxlen' defaults to its length.
If 'cdata' is a pointer or array of wchar_t, returns a unicode
string following the same rules.
If 'cdata' is a single character or byte or a wchar_t, returns
it as a string or unicode string.
If 'cdata' is an enum, returns the value of the enumerator as a
string, or 'NUMBER' if the value is out of range.
"""
return self._backend.string(cdata, maxlen)
def unpack(self, cdata, length):
"""Unpack an array of C data of the given length,
returning a Python string/unicode/list.
If 'cdata' is a pointer to 'char', returns a byte string.
It does not stop at the first null. This is equivalent to:
ffi.buffer(cdata, length)[:]
If 'cdata' is a pointer to 'wchar_t', returns a unicode string.
'length' is measured in wchar_t's; it is not the size in bytes.
If 'cdata' is a pointer to anything else, returns a list of
'length' items. This is a faster equivalent to:
[cdata[i] for i in range(length)]
"""
return self._backend.unpack(cdata, length)
#def buffer(self, cdata, size=-1):
# """Return a read-write buffer object that references the raw C data
# pointed to by the given 'cdata'. The 'cdata' must be a pointer or
# an array. Can be passed to functions expecting a buffer, or directly
# manipulated with:
#
# buf[:] get a copy of it in a regular string, or
# buf[idx] as a single character
# buf[:] = ...
# buf[idx] = ... change the content
# """
# note that 'buffer' is a type, set on this instance by __init__
def from_buffer(self, python_buffer):
"""Return a <cdata 'char[]'> that points to the data of the
given Python object, which must support the buffer interface.
Note that this is not meant to be used on the built-in types
str or unicode (you can build 'char[]' arrays explicitly)
but only on objects containing large quantities of raw data
in some other format, like 'array.array' or numpy arrays.
"""
return self._backend.from_buffer(self.BCharA, python_buffer)
def memmove(self, dest, src, n):
"""ffi.memmove(dest, src, n) copies n bytes of memory from src to dest.
Like the C function memmove(), the memory areas may overlap;
apart from that it behaves like the C function memcpy().
'src' can be any cdata ptr or array, or any Python buffer object.
'dest' can be any cdata ptr or array, or a writable Python buffer
object. The size to copy, 'n', is always measured in bytes.
Unlike other methods, this one supports all Python buffer including
byte strings and bytearrays---but it still does not support
non-contiguous buffers.
"""
return self._backend.memmove(dest, src, n)
def callback(self, cdecl, python_callable=None, error=None, onerror=None):
"""Return a callback object or a decorator making such a
callback object. 'cdecl' must name a C function pointer type.
The callback invokes the specified 'python_callable' (which may
be provided either directly or via a decorator). Important: the
callback object must be manually kept alive for as long as the
callback may be invoked from the C level.
"""
def callback_decorator_wrap(python_callable):
if not callable(python_callable):
raise TypeError("the 'python_callable' argument "
"is not callable")
return self._backend.callback(cdecl, python_callable,
error, onerror)
if isinstance(cdecl, basestring):
cdecl = self._typeof(cdecl, consider_function_as_funcptr=True)
if python_callable is None:
return callback_decorator_wrap # decorator mode
else:
return callback_decorator_wrap(python_callable) # direct mode
def getctype(self, cdecl, replace_with=''):
"""Return a string giving the C type 'cdecl', which may be itself
a string or a <ctype> object. If 'replace_with' is given, it gives
extra text to append (or insert for more complicated C types), like
a variable name, or '*' to get actually the C type 'pointer-to-cdecl'.
"""
if isinstance(cdecl, basestring):
cdecl = self._typeof(cdecl)
replace_with = replace_with.strip()
if (replace_with.startswith('*')
and '&[' in self._backend.getcname(cdecl, '&')):
replace_with = '(%s)' % replace_with
elif replace_with and not replace_with[0] in '[(':
replace_with = ' ' + replace_with
return self._backend.getcname(cdecl, replace_with)
def gc(self, cdata, destructor):
"""Return a new cdata object that points to the same
data. Later, when this new cdata object is garbage-collected,
'destructor(old_cdata_object)' will be called.
"""
return self._backend.gcp(cdata, destructor)
def _get_cached_btype(self, type):
assert self._lock.acquire(False) is False
# call me with the lock!
try:
BType = self._cached_btypes[type]
except KeyError:
finishlist = []
BType = type.get_cached_btype(self, finishlist)
for type in finishlist:
type.finish_backend_type(self, finishlist)
return BType
def verify(self, source='', tmpdir=None, **kwargs):
"""Verify that the current ffi signatures compile on this
machine, and return a dynamic library object. The dynamic
library can be used to call functions and access global
variables declared in this 'ffi'. The library is compiled
by the C compiler: it gives you C-level API compatibility
(including calling macros). This is unlike 'ffi.dlopen()',
which requires binary compatibility in the signatures.
"""
from .verifier import Verifier, _caller_dir_pycache
#
# If set_unicode(True) was called, insert the UNICODE and
# _UNICODE macro declarations
if self._windows_unicode:
self._apply_windows_unicode(kwargs)
#
# Set the tmpdir here, and not in Verifier.__init__: it picks
# up the caller's directory, which we want to be the caller of
# ffi.verify(), as opposed to the caller of Veritier().
tmpdir = tmpdir or _caller_dir_pycache()
#
# Make a Verifier() and use it to load the library.
self.verifier = Verifier(self, source, tmpdir, **kwargs)
lib = self.verifier.load_library()
#
# Save the loaded library for keep-alive purposes, even
# if the caller doesn't keep it alive itself (it should).
self._libraries.append(lib)
return lib
def _get_errno(self):
return self._backend.get_errno()
def _set_errno(self, errno):
self._backend.set_errno(errno)
errno = property(_get_errno, _set_errno, None,
"the value of 'errno' from/to the C calls")
def getwinerror(self, code=-1):
return self._backend.getwinerror(code)
def _pointer_to(self, ctype):
with self._lock:
return model.pointer_cache(self, ctype)
def addressof(self, cdata, *fields_or_indexes):
"""Return the address of a <cdata 'struct-or-union'>.
If 'fields_or_indexes' are given, returns the address of that
field or array item in the structure or array, recursively in
case of nested structures.
"""
try:
ctype = self._backend.typeof(cdata)
except TypeError:
if '__addressof__' in type(cdata).__dict__:
return type(cdata).__addressof__(cdata, *fields_or_indexes)
raise
if fields_or_indexes:
ctype, offset = self._typeoffsetof(ctype, *fields_or_indexes)
else:
if ctype.kind == "pointer":
raise TypeError("addressof(pointer)")
offset = 0
ctypeptr = self._pointer_to(ctype)
return self._backend.rawaddressof(ctypeptr, cdata, offset)
def _typeoffsetof(self, ctype, field_or_index, *fields_or_indexes):
ctype, offset = self._backend.typeoffsetof(ctype, field_or_index)
for field1 in fields_or_indexes:
ctype, offset1 = self._backend.typeoffsetof(ctype, field1, 1)
offset += offset1
return ctype, offset
def include(self, ffi_to_include):
"""Includes the typedefs, structs, unions and enums defined
in another FFI instance. Usage is similar to a #include in C,
where a part of the program might include types defined in
another part for its own usage. Note that the include()
method has no effect on functions, constants and global
variables, which must anyway be accessed directly from the
lib object returned by the original FFI instance.
"""
if not isinstance(ffi_to_include, FFI):
raise TypeError("ffi.include() expects an argument that is also of"
" type cffi.FFI, not %r" % (
type(ffi_to_include).__name__,))
if ffi_to_include is self:
raise ValueError("self.include(self)")
with ffi_to_include._lock:
with self._lock:
self._parser.include(ffi_to_include._parser)
self._cdefsources.append('[')
self._cdefsources.extend(ffi_to_include._cdefsources)
self._cdefsources.append(']')
self._included_ffis.append(ffi_to_include)
def new_handle(self, x):
return self._backend.newp_handle(self.BVoidP, x)
def from_handle(self, x):
return self._backend.from_handle(x)
def set_unicode(self, enabled_flag):
"""Windows: if 'enabled_flag' is True, enable the UNICODE and
_UNICODE defines in C, and declare the types like TCHAR and LPTCSTR
to be (pointers to) wchar_t. If 'enabled_flag' is False,
declare these types to be (pointers to) plain 8-bit characters.
This is mostly for backward compatibility; you usually want True.
"""
if self._windows_unicode is not None:
raise ValueError("set_unicode() can only be called once")
enabled_flag = bool(enabled_flag)
if enabled_flag:
self.cdef("typedef wchar_t TBYTE;"
"typedef wchar_t TCHAR;"
"typedef const wchar_t *LPCTSTR;"
"typedef const wchar_t *PCTSTR;"
"typedef wchar_t *LPTSTR;"
"typedef wchar_t *PTSTR;"
"typedef TBYTE *PTBYTE;"
"typedef TCHAR *PTCHAR;")
else:
self.cdef("typedef char TBYTE;"
"typedef char TCHAR;"
"typedef const char *LPCTSTR;"
"typedef const char *PCTSTR;"
"typedef char *LPTSTR;"
"typedef char *PTSTR;"
"typedef TBYTE *PTBYTE;"
"typedef TCHAR *PTCHAR;")
self._windows_unicode = enabled_flag
def _apply_windows_unicode(self, kwds):
defmacros = kwds.get('define_macros', ())
if not isinstance(defmacros, (list, tuple)):
raise TypeError("'define_macros' must be a list or tuple")
defmacros = list(defmacros) + [('UNICODE', '1'),
('_UNICODE', '1')]
kwds['define_macros'] = defmacros
def _apply_embedding_fix(self, kwds):
# must include an argument like "-lpython2.7" for the compiler
def ensure(key, value):
lst = kwds.setdefault(key, [])
if value not in lst:
lst.append(value)
#
if '__pypy__' in sys.builtin_module_names:
import os
if sys.platform == "win32":
# we need 'libpypy-c.lib'. Current distributions of
# pypy (>= 4.1) contain it as 'libs/python27.lib'.
pythonlib = "python27"
if hasattr(sys, 'prefix'):
ensure('library_dirs', os.path.join(sys.prefix, 'libs'))
else:
# we need 'libpypy-c.{so,dylib}', which should be by
# default located in 'sys.prefix/bin' for installed
# systems.
if sys.version_info < (3,):
pythonlib = "pypy-c"
else:
pythonlib = "pypy3-c"
if hasattr(sys, 'prefix'):
ensure('library_dirs', os.path.join(sys.prefix, 'bin'))
# On uninstalled pypy's, the libpypy-c is typically found in
# .../pypy/goal/.
if hasattr(sys, 'prefix'):
ensure('library_dirs', os.path.join(sys.prefix, 'pypy', 'goal'))
else:
if sys.platform == "win32":
template = "python%d%d"
if hasattr(sys, 'gettotalrefcount'):
template += '_d'
else:
try:
import sysconfig
except ImportError: # 2.6
from distutils import sysconfig
template = "python%d.%d"
if sysconfig.get_config_var('DEBUG_EXT'):
template += sysconfig.get_config_var('DEBUG_EXT')
pythonlib = (template %
(sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff))
if hasattr(sys, 'abiflags'):
pythonlib += sys.abiflags
ensure('libraries', pythonlib)
if sys.platform == "win32":
ensure('extra_link_args', '/MANIFEST')
def set_source(self, module_name, source, source_extension='.c', **kwds):
import os
if hasattr(self, '_assigned_source'):
raise ValueError("set_source() cannot be called several times "
"per ffi object")
if not isinstance(module_name, basestring):
raise TypeError("'module_name' must be a string")
if os.sep in module_name or (os.altsep and os.altsep in module_name):
raise ValueError("'module_name' must not contain '/': use a dotted "
"name to make a 'package.module' location")
self._assigned_source = (str(module_name), source,
source_extension, kwds)
def distutils_extension(self, tmpdir='build', verbose=True):
from distutils.dir_util import mkpath
from .recompiler import recompile
#
if not hasattr(self, '_assigned_source'):
if hasattr(self, 'verifier'): # fallback, 'tmpdir' ignored
return self.verifier.get_extension()
raise ValueError("set_source() must be called before"
" distutils_extension()")
module_name, source, source_extension, kwds = self._assigned_source
if source is None:
raise TypeError("distutils_extension() is only for C extension "
"modules, not for dlopen()-style pure Python "
"modules")
mkpath(tmpdir)
ext, updated = recompile(self, module_name,
source, tmpdir=tmpdir, extradir=tmpdir,
source_extension=source_extension,
call_c_compiler=False, **kwds)
if verbose:
if updated:
sys.stderr.write("regenerated: %r\n" % (ext.sources[0],))
else:
sys.stderr.write("not modified: %r\n" % (ext.sources[0],))
return ext
def emit_c_code(self, filename):
from .recompiler import recompile
#
if not hasattr(self, '_assigned_source'):
raise ValueError("set_source() must be called before emit_c_code()")
module_name, source, source_extension, kwds = self._assigned_source
if source is None:
raise TypeError("emit_c_code() is only for C extension modules, "
"not for dlopen()-style pure Python modules")
recompile(self, module_name, source,
c_file=filename, call_c_compiler=False, **kwds)
def emit_python_code(self, filename):
from .recompiler import recompile
#
if not hasattr(self, '_assigned_source'):
raise ValueError("set_source() must be called before emit_c_code()")
module_name, source, source_extension, kwds = self._assigned_source
if source is not None:
raise TypeError("emit_python_code() is only for dlopen()-style "
"pure Python modules, not for C extension modules")
recompile(self, module_name, source,
c_file=filename, call_c_compiler=False, **kwds)
def compile(self, tmpdir='.', verbose=0, target=None, debug=None):
"""The 'target' argument gives the final file name of the
compiled DLL. Use '*' to force distutils' choice, suitable for
regular CPython C API modules. Use a file name ending in '.*'
to ask for the system's default extension for dynamic libraries
(.so/.dll/.dylib).
The default is '*' when building a non-embedded C API extension,
and (module_name + '.*') when building an embedded library.
"""
from .recompiler import recompile
#
if not hasattr(self, '_assigned_source'):
raise ValueError("set_source() must be called before compile()")
module_name, source, source_extension, kwds = self._assigned_source
return recompile(self, module_name, source, tmpdir=tmpdir,
target=target, source_extension=source_extension,
compiler_verbose=verbose, debug=debug, **kwds)
def init_once(self, func, tag):
# Read _init_once_cache[tag], which is either (False, lock) if
# we're calling the function now in some thread, or (True, result).
# Don't call setdefault() in most cases, to avoid allocating and
# immediately freeing a lock; but still use setdefaut() to avoid
# races.
try:
x = self._init_once_cache[tag]
except KeyError:
x = self._init_once_cache.setdefault(tag, (False, allocate_lock()))
# Common case: we got (True, result), so we return the result.
if x[0]:
return x[1]
# Else, it's a lock. Acquire it to serialize the following tests.
with x[1]:
# Read again from _init_once_cache the current status.
x = self._init_once_cache[tag]
if x[0]:
return x[1]
# Call the function and store the result back.
result = func()
self._init_once_cache[tag] = (True, result)
return result
def embedding_init_code(self, pysource):
if self._embedding:
raise ValueError("embedding_init_code() can only be called once")
# fix 'pysource' before it gets dumped into the C file:
# - remove empty lines at the beginning, so it starts at "line 1"
# - dedent, if all non-empty lines are indented
# - check for SyntaxErrors
import re
match = re.match(r'\s*\n', pysource)
if match:
pysource = pysource[match.end():]
lines = pysource.splitlines() or ['']
prefix = re.match(r'\s*', lines[0]).group()
for i in range(1, len(lines)):
line = lines[i]
if line.rstrip():
while not line.startswith(prefix):
prefix = prefix[:-1]
i = len(prefix)
lines = [line[i:]+'\n' for line in lines]
pysource = ''.join(lines)
#
compile(pysource, "cffi_init", "exec")
#
self._embedding = pysource
def def_extern(self, *args, **kwds):
raise ValueError("ffi.def_extern() is only available on API-mode FFI "
"objects")
def list_types(self):
"""Returns the user type names known to this FFI instance.
This returns a tuple containing three lists of names:
(typedef_names, names_of_structs, names_of_unions)
"""
typedefs = []
structs = []
unions = []
for key in self._parser._declarations:
if key.startswith('typedef '):
typedefs.append(key[8:])
elif key.startswith('struct '):
structs.append(key[7:])
elif key.startswith('union '):
unions.append(key[6:])
typedefs.sort()
structs.sort()
unions.sort()
return (typedefs, structs, unions)
def _load_backend_lib(backend, name, flags):
import os
if name is None:
if sys.platform != "win32":
return backend.load_library(None, flags)
name = "c" # Windows: load_library(None) fails, but this works
# (backward compatibility hack only)
first_error = None
if '.' in name or '/' in name or os.sep in name:
try:
return backend.load_library(name, flags)
except OSError as e:
first_error = e
import ctypes.util
path = ctypes.util.find_library(name)
if path is None:
msg = ("ctypes.util.find_library() did not manage "
"to locate a library called %r" % (name,))
if first_error is not None:
msg = "%s. Additionally, %s" % (first_error, msg)
raise OSError(msg)
return backend.load_library(path, flags)
def _make_ffi_library(ffi, libname, flags):
backend = ffi._backend
backendlib = _load_backend_lib(backend, libname, flags)
#
def accessor_function(name):
key = 'function ' + name
tp, _ = ffi._parser._declarations[key]
BType = ffi._get_cached_btype(tp)
value = backendlib.load_function(BType, name)
library.__dict__[name] = value
#
def accessor_variable(name):
key = 'variable ' + name
tp, _ = ffi._parser._declarations[key]
BType = ffi._get_cached_btype(tp)
read_variable = backendlib.read_variable
write_variable = backendlib.write_variable
setattr(FFILibrary, name, property(
lambda self: read_variable(BType, name),
lambda self, value: write_variable(BType, name, value)))
#
def addressof_var(name):
try:
return addr_variables[name]
except KeyError:
with ffi._lock:
if name not in addr_variables:
key = 'variable ' + name
tp, _ = ffi._parser._declarations[key]
BType = ffi._get_cached_btype(tp)
if BType.kind != 'array':
BType = model.pointer_cache(ffi, BType)
p = backendlib.load_function(BType, name)
addr_variables[name] = p
return addr_variables[name]
#
def accessor_constant(name):
raise NotImplementedError("non-integer constant '%s' cannot be "
"accessed from a dlopen() library" % (name,))
#
def accessor_int_constant(name):
library.__dict__[name] = ffi._parser._int_constants[name]
#
accessors = {}
accessors_version = [False]
addr_variables = {}
#
def update_accessors():
if accessors_version[0] is ffi._cdef_version:
return
#
for key, (tp, _) in ffi._parser._declarations.items():
if not isinstance(tp, model.EnumType):
tag, name = key.split(' ', 1)
if tag == 'function':
accessors[name] = accessor_function
elif tag == 'variable':
accessors[name] = accessor_variable
elif tag == 'constant':
accessors[name] = accessor_constant
else:
for i, enumname in enumerate(tp.enumerators):
def accessor_enum(name, tp=tp, i=i):
tp.check_not_partial()
library.__dict__[name] = tp.enumvalues[i]
accessors[enumname] = accessor_enum
for name in ffi._parser._int_constants:
accessors.setdefault(name, accessor_int_constant)
accessors_version[0] = ffi._cdef_version
#
def make_accessor(name):
with ffi._lock:
if name in library.__dict__ or name in FFILibrary.__dict__:
return # added by another thread while waiting for the lock
if name not in accessors:
update_accessors()
if name not in accessors:
raise AttributeError(name)
accessors[name](name)
#
class FFILibrary(object):
def __getattr__(self, name):
make_accessor(name)
return getattr(self, name)
def __setattr__(self, name, value):
try:
property = getattr(self.__class__, name)
except AttributeError:
make_accessor(name)
setattr(self, name, value)
else:
property.__set__(self, value)
def __dir__(self):
with ffi._lock:
update_accessors()
return accessors.keys()
def __addressof__(self, name):
if name in library.__dict__:
return library.__dict__[name]
if name in FFILibrary.__dict__:
return addressof_var(name)
make_accessor(name)
if name in library.__dict__:
return library.__dict__[name]
if name in FFILibrary.__dict__:
return addressof_var(name)
raise AttributeError("cffi library has no function or "
"global variable named '%s'" % (name,))
#
if libname is not None:
try:
if not isinstance(libname, str): # unicode, on Python 2
libname = libname.encode('utf-8')
FFILibrary.__name__ = 'FFILibrary_%s' % libname
except UnicodeError:
pass
library = FFILibrary()
return library, library.__dict__
def _builtin_function_type(func):
# a hack to make at least ffi.typeof(builtin_function) work,
# if the builtin function was obtained by 'vengine_cpy'.
import sys
try:
module = sys.modules[func.__module__]
ffi = module._cffi_original_ffi
types_of_builtin_funcs = module._cffi_types_of_builtin_funcs
tp = types_of_builtin_funcs[func]
except (KeyError, AttributeError, TypeError):
return None
else:
with ffi._lock:
return ffi._get_cached_btype(tp)
|
TheBoegl/letsencrypt
|
refs/heads/master
|
letsencrypt-apache/letsencrypt_apache/tests/configurator_test.py
|
1
|
# pylint: disable=too-many-public-methods
"""Test for letsencrypt_apache.configurator."""
import os
import shutil
import socket
import unittest
import mock
from acme import challenges
from letsencrypt import achallenges
from letsencrypt import errors
from letsencrypt.tests import acme_util
from letsencrypt_apache import configurator
from letsencrypt_apache import obj
from letsencrypt_apache.tests import util
class TwoVhost80Test(util.ApacheTest):
"""Test two standard well-configured HTTP vhosts."""
def setUp(self): # pylint: disable=arguments-differ
super(TwoVhost80Test, self).setUp()
self.config = util.get_apache_configurator(
self.config_path, self.vhost_path, self.config_dir, self.work_dir)
self.config = self.mock_deploy_cert(self.config)
self.vh_truth = util.get_vh_truth(
self.temp_dir, "debian_apache_2_4/two_vhost_80")
def mock_deploy_cert(self, config):
"""A test for a mock deploy cert"""
self.config.real_deploy_cert = self.config.deploy_cert
def mocked_deploy_cert(*args, **kwargs):
"""a helper to mock a deployed cert"""
with mock.patch("letsencrypt_apache.configurator.ApacheConfigurator.enable_mod"):
config.real_deploy_cert(*args, **kwargs)
self.config.deploy_cert = mocked_deploy_cert
return self.config
def tearDown(self):
shutil.rmtree(self.temp_dir)
shutil.rmtree(self.config_dir)
shutil.rmtree(self.work_dir)
@mock.patch("letsencrypt_apache.configurator.le_util.exe_exists")
def test_prepare_no_install(self, mock_exe_exists):
mock_exe_exists.return_value = False
self.assertRaises(
errors.NoInstallationError, self.config.prepare)
@mock.patch("letsencrypt_apache.parser.ApacheParser")
@mock.patch("letsencrypt_apache.configurator.le_util.exe_exists")
def test_prepare_version(self, mock_exe_exists, _):
mock_exe_exists.return_value = True
self.config.version = None
self.config.config_test = mock.Mock()
self.config.get_version = mock.Mock(return_value=(1, 1))
self.assertRaises(
errors.NotSupportedError, self.config.prepare)
@mock.patch("letsencrypt_apache.parser.ApacheParser")
@mock.patch("letsencrypt_apache.configurator.le_util.exe_exists")
def test_prepare_old_aug(self, mock_exe_exists, _):
mock_exe_exists.return_value = True
self.config.config_test = mock.Mock()
# pylint: disable=protected-access
self.config._check_aug_version = mock.Mock(return_value=False)
self.assertRaises(
errors.NotSupportedError, self.config.prepare)
def test_add_parser_arguments(self): # pylint: disable=no-self-use
from letsencrypt_apache.configurator import ApacheConfigurator
# Weak test..
ApacheConfigurator.add_parser_arguments(mock.MagicMock())
@mock.patch("zope.component.getUtility")
def test_get_all_names(self, mock_getutility):
mock_getutility.notification = mock.MagicMock(return_value=True)
names = self.config.get_all_names()
self.assertEqual(names, set(
["letsencrypt.demo", "encryption-example.demo", "ip-172-30-0-17"]))
@mock.patch("zope.component.getUtility")
@mock.patch("letsencrypt_apache.configurator.socket.gethostbyaddr")
def test_get_all_names_addrs(self, mock_gethost, mock_getutility):
mock_gethost.side_effect = [("google.com", "", ""), socket.error]
notification = mock.Mock()
notification.notification = mock.Mock(return_value=True)
mock_getutility.return_value = notification
vhost = obj.VirtualHost(
"fp", "ap",
set([obj.Addr(("8.8.8.8", "443")),
obj.Addr(("zombo.com",)),
obj.Addr(("192.168.1.2"))]),
True, False)
self.config.vhosts.append(vhost)
names = self.config.get_all_names()
self.assertEqual(len(names), 5)
self.assertTrue("zombo.com" in names)
self.assertTrue("google.com" in names)
self.assertTrue("letsencrypt.demo" in names)
def test_add_servernames_alias(self):
self.config.parser.add_dir(
self.vh_truth[2].path, "ServerAlias", ["*.le.co"])
# pylint: disable=protected-access
self.config._add_servernames(self.vh_truth[2])
self.assertEqual(
self.vh_truth[2].get_names(), set(["*.le.co", "ip-172-30-0-17"]))
def test_get_virtual_hosts(self):
"""Make sure all vhosts are being properly found.
.. note:: If test fails, only finding 1 Vhost... it is likely that
it is a problem with is_enabled. If finding only 3, likely is_ssl
"""
vhs = self.config.get_virtual_hosts()
self.assertEqual(len(vhs), 6)
found = 0
for vhost in vhs:
for truth in self.vh_truth:
if vhost == truth:
found += 1
break
else:
raise Exception("Missed: %s" % vhost) # pragma: no cover
self.assertEqual(found, 6)
# Handle case of non-debian layout get_virtual_hosts
with mock.patch(
"letsencrypt_apache.configurator.ApacheConfigurator.conf"
) as mock_conf:
mock_conf.return_value = False
vhs = self.config.get_virtual_hosts()
self.assertEqual(len(vhs), 6)
@mock.patch("letsencrypt_apache.display_ops.select_vhost")
def test_choose_vhost_none_avail(self, mock_select):
mock_select.return_value = None
self.assertRaises(
errors.PluginError, self.config.choose_vhost, "none.com")
@mock.patch("letsencrypt_apache.display_ops.select_vhost")
def test_choose_vhost_select_vhost_ssl(self, mock_select):
mock_select.return_value = self.vh_truth[1]
self.assertEqual(
self.vh_truth[1], self.config.choose_vhost("none.com"))
@mock.patch("letsencrypt_apache.display_ops.select_vhost")
def test_choose_vhost_select_vhost_non_ssl(self, mock_select):
mock_select.return_value = self.vh_truth[0]
chosen_vhost = self.config.choose_vhost("none.com")
self.vh_truth[0].aliases.add("none.com")
self.assertEqual(
self.vh_truth[0].get_names(), chosen_vhost.get_names())
# Make sure we go from HTTP -> HTTPS
self.assertFalse(self.vh_truth[0].ssl)
self.assertTrue(chosen_vhost.ssl)
@mock.patch("letsencrypt_apache.display_ops.select_vhost")
def test_choose_vhost_select_vhost_with_temp(self, mock_select):
mock_select.return_value = self.vh_truth[0]
chosen_vhost = self.config.choose_vhost("none.com", temp=True)
self.assertEqual(self.vh_truth[0], chosen_vhost)
@mock.patch("letsencrypt_apache.display_ops.select_vhost")
def test_choose_vhost_select_vhost_conflicting_non_ssl(self, mock_select):
mock_select.return_value = self.vh_truth[3]
conflicting_vhost = obj.VirtualHost(
"path", "aug_path", set([obj.Addr.fromstring("*:443")]),
True, True)
self.config.vhosts.append(conflicting_vhost)
self.assertRaises(
errors.PluginError, self.config.choose_vhost, "none.com")
def test_find_best_vhost(self):
# pylint: disable=protected-access
self.assertEqual(
self.vh_truth[3], self.config._find_best_vhost("letsencrypt.demo"))
self.assertEqual(
self.vh_truth[0],
self.config._find_best_vhost("encryption-example.demo"))
self.assertEqual(
self.config._find_best_vhost("does-not-exist.com"), None)
def test_find_best_vhost_variety(self):
# pylint: disable=protected-access
ssl_vh = obj.VirtualHost(
"fp", "ap", set([obj.Addr(("*", "443")),
obj.Addr(("zombo.com",))]),
True, False)
self.config.vhosts.append(ssl_vh)
self.assertEqual(self.config._find_best_vhost("zombo.com"), ssl_vh)
def test_find_best_vhost_default(self):
# pylint: disable=protected-access
# Assume only the two default vhosts.
self.config.vhosts = [
vh for vh in self.config.vhosts
if vh.name not in ["letsencrypt.demo", "encryption-example.demo"]
]
self.assertEqual(
self.config._find_best_vhost("example.demo"), self.vh_truth[2])
def test_non_default_vhosts(self):
# pylint: disable=protected-access
self.assertEqual(len(self.config._non_default_vhosts()), 4)
def test_is_site_enabled(self):
"""Test if site is enabled.
.. note:: This test currently fails for hard links
(which may happen if you move dirs incorrectly)
.. warning:: This test does not work when running using the
unittest.main() function. It incorrectly copies symlinks.
"""
self.assertTrue(self.config.is_site_enabled(self.vh_truth[0].filep))
self.assertFalse(self.config.is_site_enabled(self.vh_truth[1].filep))
self.assertTrue(self.config.is_site_enabled(self.vh_truth[2].filep))
self.assertTrue(self.config.is_site_enabled(self.vh_truth[3].filep))
with mock.patch("os.path.isdir") as mock_isdir:
mock_isdir.return_value = False
self.assertRaises(errors.ConfigurationError,
self.config.is_site_enabled,
"irrelevant")
@mock.patch("letsencrypt.le_util.run_script")
@mock.patch("letsencrypt.le_util.exe_exists")
@mock.patch("letsencrypt_apache.parser.subprocess.Popen")
def test_enable_mod(self, mock_popen, mock_exe_exists, mock_run_script):
mock_popen().communicate.return_value = ("Define: DUMP_RUN_CFG", "")
mock_popen().returncode = 0
mock_exe_exists.return_value = True
self.config.enable_mod("ssl")
self.assertTrue("ssl_module" in self.config.parser.modules)
self.assertTrue("mod_ssl.c" in self.config.parser.modules)
self.assertTrue(mock_run_script.called)
def test_enable_mod_unsupported_dirs(self):
shutil.rmtree(os.path.join(self.config.parser.root, "mods-enabled"))
self.assertRaises(
errors.NotSupportedError, self.config.enable_mod, "ssl")
@mock.patch("letsencrypt.le_util.exe_exists")
def test_enable_mod_no_disable(self, mock_exe_exists):
mock_exe_exists.return_value = False
self.assertRaises(
errors.MisconfigurationError, self.config.enable_mod, "ssl")
def test_enable_site(self):
# Default 443 vhost
self.assertFalse(self.vh_truth[1].enabled)
self.config.enable_site(self.vh_truth[1])
self.assertTrue(self.vh_truth[1].enabled)
# Go again to make sure nothing fails
self.config.enable_site(self.vh_truth[1])
def test_enable_site_failure(self):
self.assertRaises(
errors.NotSupportedError,
self.config.enable_site,
obj.VirtualHost("asdf", "afsaf", set(), False, False))
def test_deploy_cert_newssl(self):
self.config = util.get_apache_configurator(
self.config_path, self.vhost_path, self.config_dir,
self.work_dir, version=(2, 4, 16))
self.config.parser.modules.add("ssl_module")
self.config.parser.modules.add("mod_ssl.c")
# Get the default 443 vhost
self.config.assoc["random.demo"] = self.vh_truth[1]
self.config = self.mock_deploy_cert(self.config)
self.config.deploy_cert(
"random.demo", "example/cert.pem", "example/key.pem",
"example/cert_chain.pem", "example/fullchain.pem")
self.config.save()
# Verify ssl_module was enabled.
self.assertTrue(self.vh_truth[1].enabled)
self.assertTrue("ssl_module" in self.config.parser.modules)
loc_cert = self.config.parser.find_dir(
"sslcertificatefile", "example/fullchain.pem",
self.vh_truth[1].path)
loc_key = self.config.parser.find_dir(
"sslcertificateKeyfile", "example/key.pem", self.vh_truth[1].path)
# Verify one directive was found in the correct file
self.assertEqual(len(loc_cert), 1)
self.assertEqual(configurator.get_file_path(loc_cert[0]),
self.vh_truth[1].filep)
self.assertEqual(len(loc_key), 1)
self.assertEqual(configurator.get_file_path(loc_key[0]),
self.vh_truth[1].filep)
def test_deploy_cert_newssl_no_fullchain(self):
self.config = util.get_apache_configurator(
self.config_path, self.vhost_path, self.config_dir,
self.work_dir, version=(2, 4, 16))
self.config = self.mock_deploy_cert(self.config)
self.config.parser.modules.add("ssl_module")
self.config.parser.modules.add("mod_ssl.c")
# Get the default 443 vhost
self.config.assoc["random.demo"] = self.vh_truth[1]
self.assertRaises(errors.PluginError,
lambda: self.config.deploy_cert(
"random.demo", "example/cert.pem",
"example/key.pem"))
def test_deploy_cert_old_apache_no_chain(self):
self.config = util.get_apache_configurator(
self.config_path, self.vhost_path, self.config_dir,
self.work_dir, version=(2, 4, 7))
self.config = self.mock_deploy_cert(self.config)
self.config.parser.modules.add("ssl_module")
self.config.parser.modules.add("mod_ssl.c")
# Get the default 443 vhost
self.config.assoc["random.demo"] = self.vh_truth[1]
self.assertRaises(errors.PluginError,
lambda: self.config.deploy_cert(
"random.demo", "example/cert.pem",
"example/key.pem"))
def test_deploy_cert(self):
self.config.parser.modules.add("ssl_module")
self.config.parser.modules.add("mod_ssl.c")
# Get the default 443 vhost
self.config.assoc["random.demo"] = self.vh_truth[1]
self.config.deploy_cert(
"random.demo",
"example/cert.pem", "example/key.pem", "example/cert_chain.pem")
self.config.save()
# Verify ssl_module was enabled.
self.assertTrue(self.vh_truth[1].enabled)
self.assertTrue("ssl_module" in self.config.parser.modules)
loc_cert = self.config.parser.find_dir(
"sslcertificatefile", "example/cert.pem", self.vh_truth[1].path)
loc_key = self.config.parser.find_dir(
"sslcertificateKeyfile", "example/key.pem", self.vh_truth[1].path)
loc_chain = self.config.parser.find_dir(
"SSLCertificateChainFile", "example/cert_chain.pem",
self.vh_truth[1].path)
# Verify one directive was found in the correct file
self.assertEqual(len(loc_cert), 1)
self.assertEqual(configurator.get_file_path(loc_cert[0]),
self.vh_truth[1].filep)
self.assertEqual(len(loc_key), 1)
self.assertEqual(configurator.get_file_path(loc_key[0]),
self.vh_truth[1].filep)
self.assertEqual(len(loc_chain), 1)
self.assertEqual(configurator.get_file_path(loc_chain[0]),
self.vh_truth[1].filep)
# One more time for chain directive setting
self.config.deploy_cert(
"random.demo",
"two/cert.pem", "two/key.pem", "two/cert_chain.pem")
self.assertTrue(self.config.parser.find_dir(
"SSLCertificateChainFile", "two/cert_chain.pem",
self.vh_truth[1].path))
def test_deploy_cert_invalid_vhost(self):
self.config.parser.modules.add("ssl_module")
mock_find = mock.MagicMock()
mock_find.return_value = []
self.config.parser.find_dir = mock_find
# Get the default 443 vhost
self.config.assoc["random.demo"] = self.vh_truth[1]
self.assertRaises(
errors.PluginError, self.config.deploy_cert, "random.demo",
"example/cert.pem", "example/key.pem", "example/cert_chain.pem")
def test_is_name_vhost(self):
addr = obj.Addr.fromstring("*:80")
self.assertTrue(self.config.is_name_vhost(addr))
self.config.version = (2, 2)
self.assertFalse(self.config.is_name_vhost(addr))
def test_add_name_vhost(self):
self.config.add_name_vhost(obj.Addr.fromstring("*:443"))
self.config.add_name_vhost(obj.Addr.fromstring("*:80"))
self.assertTrue(self.config.parser.find_dir(
"NameVirtualHost", "*:443", exclude=False))
self.assertTrue(self.config.parser.find_dir(
"NameVirtualHost", "*:80"))
def test_prepare_server_https(self):
mock_enable = mock.Mock()
self.config.enable_mod = mock_enable
mock_find = mock.Mock()
mock_add_dir = mock.Mock()
mock_find.return_value = []
# This will test the Add listen
self.config.parser.find_dir = mock_find
self.config.parser.add_dir_to_ifmodssl = mock_add_dir
self.config.prepare_server_https("443")
# Changing the order these modules are enabled breaks the reverter
self.assertEqual(mock_enable.call_args_list[0][0][0], "socache_shmcb")
self.assertEqual(mock_enable.call_args[0][0], "ssl")
self.assertEqual(mock_enable.call_args[1], {"temp": False})
self.config.prepare_server_https("8080", temp=True)
# Changing the order these modules are enabled breaks the reverter
self.assertEqual(mock_enable.call_args_list[2][0][0], "socache_shmcb")
self.assertEqual(mock_enable.call_args[0][0], "ssl")
# Enable mod is temporary
self.assertEqual(mock_enable.call_args[1], {"temp": True})
self.assertEqual(mock_add_dir.call_count, 2)
def test_prepare_server_https_named_listen(self):
mock_find = mock.Mock()
mock_find.return_value = ["test1", "test2", "test3"]
mock_get = mock.Mock()
mock_get.side_effect = ["1.2.3.4:80", "[::1]:80", "1.1.1.1:443"]
mock_add_dir = mock.Mock()
mock_enable = mock.Mock()
self.config.parser.find_dir = mock_find
self.config.parser.get_arg = mock_get
self.config.parser.add_dir_to_ifmodssl = mock_add_dir
self.config.enable_mod = mock_enable
# Test Listen statements with specific ip listeed
self.config.prepare_server_https("443")
# Should only be 2 here, as the third interface
# already listens to the correct port
self.assertEqual(mock_add_dir.call_count, 2)
# Check argument to new Listen statements
self.assertEqual(mock_add_dir.call_args_list[0][0][2], ["1.2.3.4:443"])
self.assertEqual(mock_add_dir.call_args_list[1][0][2], ["[::1]:443"])
# Reset return lists and inputs
mock_add_dir.reset_mock()
mock_get.side_effect = ["1.2.3.4:80", "[::1]:80", "1.1.1.1:443"]
# Test
self.config.prepare_server_https("8080", temp=True)
self.assertEqual(mock_add_dir.call_count, 3)
self.assertEqual(mock_add_dir.call_args_list[0][0][2],
["1.2.3.4:8080", "https"])
self.assertEqual(mock_add_dir.call_args_list[1][0][2],
["[::1]:8080", "https"])
self.assertEqual(mock_add_dir.call_args_list[2][0][2],
["1.1.1.1:8080", "https"])
def test_prepare_server_https_mixed_listen(self):
mock_find = mock.Mock()
mock_find.return_value = ["test1", "test2"]
mock_get = mock.Mock()
mock_get.side_effect = ["1.2.3.4:8080", "443"]
mock_add_dir = mock.Mock()
mock_enable = mock.Mock()
self.config.parser.find_dir = mock_find
self.config.parser.get_arg = mock_get
self.config.parser.add_dir_to_ifmodssl = mock_add_dir
self.config.enable_mod = mock_enable
# Test Listen statements with specific ip listeed
self.config.prepare_server_https("443")
# Should only be 2 here, as the third interface
# already listens to the correct port
self.assertEqual(mock_add_dir.call_count, 0)
def test_make_vhost_ssl(self):
ssl_vhost = self.config.make_vhost_ssl(self.vh_truth[0])
self.assertEqual(
ssl_vhost.filep,
os.path.join(self.config_path, "sites-available",
"encryption-example-le-ssl.conf"))
self.assertEqual(ssl_vhost.path,
"/files" + ssl_vhost.filep + "/IfModule/VirtualHost")
self.assertEqual(len(ssl_vhost.addrs), 1)
self.assertEqual(set([obj.Addr.fromstring("*:443")]), ssl_vhost.addrs)
self.assertEqual(ssl_vhost.name, "encryption-example.demo")
self.assertTrue(ssl_vhost.ssl)
self.assertFalse(ssl_vhost.enabled)
self.assertTrue(self.config.parser.find_dir(
"SSLCertificateFile", None, ssl_vhost.path, False))
self.assertTrue(self.config.parser.find_dir(
"SSLCertificateKeyFile", None, ssl_vhost.path, False))
self.assertEqual(self.config.is_name_vhost(self.vh_truth[0]),
self.config.is_name_vhost(ssl_vhost))
self.assertEqual(len(self.config.vhosts), 7)
def test_clean_vhost_ssl(self):
# pylint: disable=protected-access
for directive in ["SSLCertificateFile", "SSLCertificateKeyFile",
"SSLCertificateChainFile", "SSLCACertificatePath"]:
for _ in range(10):
self.config.parser.add_dir(self.vh_truth[1].path,
directive, ["bogus"])
self.config.save()
self.config._clean_vhost(self.vh_truth[1])
self.config.save()
loc_cert = self.config.parser.find_dir(
'SSLCertificateFile', None, self.vh_truth[1].path, False)
loc_key = self.config.parser.find_dir(
'SSLCertificateKeyFile', None, self.vh_truth[1].path, False)
loc_chain = self.config.parser.find_dir(
'SSLCertificateChainFile', None, self.vh_truth[1].path, False)
loc_cacert = self.config.parser.find_dir(
'SSLCACertificatePath', None, self.vh_truth[1].path, False)
self.assertEqual(len(loc_cert), 1)
self.assertEqual(len(loc_key), 1)
self.assertEqual(len(loc_chain), 0)
self.assertEqual(len(loc_cacert), 10)
def test_deduplicate_directives(self):
# pylint: disable=protected-access
DIRECTIVE = "Foo"
for _ in range(10):
self.config.parser.add_dir(self.vh_truth[1].path,
DIRECTIVE, ["bar"])
self.config.save()
self.config._deduplicate_directives(self.vh_truth[1].path, [DIRECTIVE])
self.config.save()
self.assertEqual(
len(self.config.parser.find_dir(
DIRECTIVE, None, self.vh_truth[1].path, False)), 1)
def test_remove_directives(self):
# pylint: disable=protected-access
DIRECTIVES = ["Foo", "Bar"]
for directive in DIRECTIVES:
for _ in range(10):
self.config.parser.add_dir(self.vh_truth[1].path,
directive, ["baz"])
self.config.save()
self.config._remove_directives(self.vh_truth[1].path, DIRECTIVES)
self.config.save()
for directive in DIRECTIVES:
self.assertEqual(
len(self.config.parser.find_dir(
directive, None, self.vh_truth[1].path, False)), 0)
def test_make_vhost_ssl_extra_vhs(self):
self.config.aug.match = mock.Mock(return_value=["p1", "p2"])
self.assertRaises(
errors.PluginError, self.config.make_vhost_ssl, self.vh_truth[0])
def test_make_vhost_ssl_bad_write(self):
mock_open = mock.mock_open()
# This calls open
self.config.reverter.register_file_creation = mock.Mock()
mock_open.side_effect = IOError
with mock.patch("__builtin__.open", mock_open):
self.assertRaises(
errors.PluginError,
self.config.make_vhost_ssl, self.vh_truth[0])
def test_get_ssl_vhost_path(self):
# pylint: disable=protected-access
self.assertTrue(
self.config._get_ssl_vhost_path("example_path").endswith(".conf"))
def test_add_name_vhost_if_necessary(self):
# pylint: disable=protected-access
self.config.save = mock.Mock()
self.config.version = (2, 2)
self.config._add_name_vhost_if_necessary(self.vh_truth[0])
self.assertTrue(self.config.save.called)
new_addrs = set()
for addr in self.vh_truth[0].addrs:
new_addrs.add(obj.Addr(("_default_", addr.get_port(),)))
self.vh_truth[0].addrs = new_addrs
self.config._add_name_vhost_if_necessary(self.vh_truth[0])
self.assertEqual(self.config.save.call_count, 2)
@mock.patch("letsencrypt_apache.configurator.tls_sni_01.ApacheTlsSni01.perform")
@mock.patch("letsencrypt_apache.configurator.ApacheConfigurator.restart")
def test_perform(self, mock_restart, mock_perform):
# Only tests functionality specific to configurator.perform
# Note: As more challenges are offered this will have to be expanded
account_key, achall1, achall2 = self.get_achalls()
expected = [
achall1.response(account_key),
achall2.response(account_key),
]
mock_perform.return_value = expected
responses = self.config.perform([achall1, achall2])
self.assertEqual(mock_perform.call_count, 1)
self.assertEqual(responses, expected)
self.assertEqual(mock_restart.call_count, 1)
@mock.patch("letsencrypt_apache.configurator.ApacheConfigurator.restart")
def test_cleanup(self, mock_restart):
_, achall1, achall2 = self.get_achalls()
self.config._chall_out.add(achall1) # pylint: disable=protected-access
self.config._chall_out.add(achall2) # pylint: disable=protected-access
self.config.cleanup([achall1])
self.assertFalse(mock_restart.called)
self.config.cleanup([achall2])
self.assertTrue(mock_restart.called)
@mock.patch("letsencrypt_apache.configurator.ApacheConfigurator.restart")
def test_cleanup_no_errors(self, mock_restart):
_, achall1, achall2 = self.get_achalls()
self.config._chall_out.add(achall1) # pylint: disable=protected-access
self.config.cleanup([achall2])
self.assertFalse(mock_restart.called)
self.config.cleanup([achall1, achall2])
self.assertTrue(mock_restart.called)
@mock.patch("letsencrypt.le_util.run_script")
def test_get_version(self, mock_script):
mock_script.return_value = (
"Server Version: Apache/2.4.2 (Debian)", "")
self.assertEqual(self.config.get_version(), (2, 4, 2))
mock_script.return_value = (
"Server Version: Apache/2 (Linux)", "")
self.assertEqual(self.config.get_version(), (2,))
mock_script.return_value = (
"Server Version: Apache (Debian)", "")
self.assertRaises(errors.PluginError, self.config.get_version)
mock_script.return_value = (
"Server Version: Apache/2.3{0} Apache/2.4.7".format(
os.linesep), "")
self.assertRaises(errors.PluginError, self.config.get_version)
mock_script.side_effect = errors.SubprocessError("Can't find program")
self.assertRaises(errors.PluginError, self.config.get_version)
@mock.patch("letsencrypt_apache.configurator.le_util.run_script")
def test_restart(self, _):
self.config.restart()
@mock.patch("letsencrypt_apache.configurator.le_util.run_script")
def test_restart_bad_process(self, mock_run_script):
mock_run_script.side_effect = [None, errors.SubprocessError]
self.assertRaises(errors.MisconfigurationError, self.config.restart)
@mock.patch("letsencrypt.le_util.run_script")
def test_config_test(self, _):
self.config.config_test()
@mock.patch("letsencrypt.le_util.run_script")
def test_config_test_bad_process(self, mock_run_script):
mock_run_script.side_effect = errors.SubprocessError
self.assertRaises(errors.MisconfigurationError,
self.config.config_test)
def test_get_all_certs_keys(self):
c_k = self.config.get_all_certs_keys()
self.assertEqual(len(c_k), 2)
cert, key, path = next(iter(c_k))
self.assertTrue("cert" in cert)
self.assertTrue("key" in key)
self.assertTrue("default-ssl" in path)
def test_get_all_certs_keys_malformed_conf(self):
self.config.parser.find_dir = mock.Mock(
side_effect=[["path"], [], ["path"], []])
c_k = self.config.get_all_certs_keys()
self.assertFalse(c_k)
def test_more_info(self):
self.assertTrue(self.config.more_info())
def test_get_chall_pref(self):
self.assertTrue(isinstance(self.config.get_chall_pref(""), list))
def test_install_ssl_options_conf(self):
from letsencrypt_apache.configurator import install_ssl_options_conf
path = os.path.join(self.work_dir, "test_it")
install_ssl_options_conf(path)
self.assertTrue(os.path.isfile(path))
# TEST ENHANCEMENTS
def test_supported_enhancements(self):
self.assertTrue(isinstance(self.config.supported_enhancements(), list))
@mock.patch("letsencrypt.le_util.exe_exists")
def test_enhance_unknown_vhost(self, mock_exe):
self.config.parser.modules.add("rewrite_module")
mock_exe.return_value = True
ssl_vh = obj.VirtualHost(
"fp", "ap", set([obj.Addr(("*", "443")),
obj.Addr(("satoshi.com",))]),
True, False)
self.config.vhosts.append(ssl_vh)
self.assertRaises(
errors.PluginError,
self.config.enhance, "satoshi.com", "redirect")
def test_enhance_unknown_enhancement(self):
self.assertRaises(
errors.PluginError,
self.config.enhance, "letsencrypt.demo", "unknown_enhancement")
@mock.patch("letsencrypt.le_util.run_script")
@mock.patch("letsencrypt.le_util.exe_exists")
def test_http_header_hsts(self, mock_exe, _):
self.config.parser.update_runtime_variables = mock.Mock()
self.config.parser.modules.add("mod_ssl.c")
mock_exe.return_value = True
# This will create an ssl vhost for letsencrypt.demo
self.config.enhance("letsencrypt.demo", "ensure-http-header",
"Strict-Transport-Security")
self.assertTrue("headers_module" in self.config.parser.modules)
# Get the ssl vhost for letsencrypt.demo
ssl_vhost = self.config.assoc["letsencrypt.demo"]
# These are not immediately available in find_dir even with save() and
# load(). They must be found in sites-available
hsts_header = self.config.parser.find_dir(
"Header", None, ssl_vhost.path)
# four args to HSTS header
self.assertEqual(len(hsts_header), 4)
def test_http_header_hsts_twice(self):
self.config.parser.modules.add("mod_ssl.c")
# skip the enable mod
self.config.parser.modules.add("headers_module")
# This will create an ssl vhost for letsencrypt.demo
self.config.enhance("encryption-example.demo", "ensure-http-header",
"Strict-Transport-Security")
self.assertRaises(
errors.PluginEnhancementAlreadyPresent,
self.config.enhance, "encryption-example.demo",
"ensure-http-header", "Strict-Transport-Security")
@mock.patch("letsencrypt.le_util.run_script")
@mock.patch("letsencrypt.le_util.exe_exists")
def test_http_header_uir(self, mock_exe, _):
self.config.parser.update_runtime_variables = mock.Mock()
self.config.parser.modules.add("mod_ssl.c")
mock_exe.return_value = True
# This will create an ssl vhost for letsencrypt.demo
self.config.enhance("letsencrypt.demo", "ensure-http-header",
"Upgrade-Insecure-Requests")
self.assertTrue("headers_module" in self.config.parser.modules)
# Get the ssl vhost for letsencrypt.demo
ssl_vhost = self.config.assoc["letsencrypt.demo"]
# These are not immediately available in find_dir even with save() and
# load(). They must be found in sites-available
uir_header = self.config.parser.find_dir(
"Header", None, ssl_vhost.path)
# four args to HSTS header
self.assertEqual(len(uir_header), 4)
def test_http_header_uir_twice(self):
self.config.parser.modules.add("mod_ssl.c")
# skip the enable mod
self.config.parser.modules.add("headers_module")
# This will create an ssl vhost for letsencrypt.demo
self.config.enhance("encryption-example.demo", "ensure-http-header",
"Upgrade-Insecure-Requests")
self.assertRaises(
errors.PluginEnhancementAlreadyPresent,
self.config.enhance, "encryption-example.demo",
"ensure-http-header", "Upgrade-Insecure-Requests")
@mock.patch("letsencrypt.le_util.run_script")
@mock.patch("letsencrypt.le_util.exe_exists")
def test_redirect_well_formed_http(self, mock_exe, _):
self.config.parser.update_runtime_variables = mock.Mock()
mock_exe.return_value = True
self.config.get_version = mock.Mock(return_value=(2, 2))
# This will create an ssl vhost for letsencrypt.demo
self.config.enhance("letsencrypt.demo", "redirect")
# These are not immediately available in find_dir even with save() and
# load(). They must be found in sites-available
rw_engine = self.config.parser.find_dir(
"RewriteEngine", "on", self.vh_truth[3].path)
rw_rule = self.config.parser.find_dir(
"RewriteRule", None, self.vh_truth[3].path)
self.assertEqual(len(rw_engine), 1)
# three args to rw_rule
self.assertEqual(len(rw_rule), 3)
self.assertTrue(rw_engine[0].startswith(self.vh_truth[3].path))
self.assertTrue(rw_rule[0].startswith(self.vh_truth[3].path))
self.assertTrue("rewrite_module" in self.config.parser.modules)
def test_rewrite_rule_exists(self):
# Skip the enable mod
self.config.parser.modules.add("rewrite_module")
self.config.get_version = mock.Mock(return_value=(2, 3, 9))
self.config.parser.add_dir(
self.vh_truth[3].path, "RewriteRule", ["Unknown"])
# pylint: disable=protected-access
self.assertTrue(self.config._is_rewrite_exists(self.vh_truth[3]))
def test_rewrite_engine_exists(self):
# Skip the enable mod
self.config.parser.modules.add("rewrite_module")
self.config.get_version = mock.Mock(return_value=(2, 3, 9))
self.config.parser.add_dir(
self.vh_truth[3].path, "RewriteEngine", "on")
# pylint: disable=protected-access
self.assertTrue(self.config._is_rewrite_engine_on(self.vh_truth[3]))
@mock.patch("letsencrypt.le_util.run_script")
@mock.patch("letsencrypt.le_util.exe_exists")
def test_redirect_with_existing_rewrite(self, mock_exe, _):
self.config.parser.update_runtime_variables = mock.Mock()
mock_exe.return_value = True
self.config.get_version = mock.Mock(return_value=(2, 2))
# Create a preexisting rewrite rule
self.config.parser.add_dir(
self.vh_truth[3].path, "RewriteRule", ["UnknownPattern",
"UnknownTarget"])
self.config.save()
# This will create an ssl vhost for letsencrypt.demo
self.config.enhance("letsencrypt.demo", "redirect")
# These are not immediately available in find_dir even with save() and
# load(). They must be found in sites-available
rw_engine = self.config.parser.find_dir(
"RewriteEngine", "on", self.vh_truth[3].path)
rw_rule = self.config.parser.find_dir(
"RewriteRule", None, self.vh_truth[3].path)
self.assertEqual(len(rw_engine), 1)
# three args to rw_rule + 1 arg for the pre existing rewrite
self.assertEqual(len(rw_rule), 5)
self.assertTrue(rw_engine[0].startswith(self.vh_truth[3].path))
self.assertTrue(rw_rule[0].startswith(self.vh_truth[3].path))
self.assertTrue("rewrite_module" in self.config.parser.modules)
def test_redirect_with_conflict(self):
self.config.parser.modules.add("rewrite_module")
ssl_vh = obj.VirtualHost(
"fp", "ap", set([obj.Addr(("*", "443")),
obj.Addr(("zombo.com",))]),
True, False)
# No names ^ this guy should conflict.
# pylint: disable=protected-access
self.assertRaises(
errors.PluginError, self.config._enable_redirect, ssl_vh, "")
def test_redirect_twice(self):
# Skip the enable mod
self.config.parser.modules.add("rewrite_module")
self.config.get_version = mock.Mock(return_value=(2, 3, 9))
self.config.enhance("encryption-example.demo", "redirect")
self.assertRaises(
errors.PluginEnhancementAlreadyPresent,
self.config.enhance, "encryption-example.demo", "redirect")
def test_create_own_redirect(self):
self.config.parser.modules.add("rewrite_module")
self.config.get_version = mock.Mock(return_value=(2, 3, 9))
# For full testing... give names...
self.vh_truth[1].name = "default.com"
self.vh_truth[1].aliases = set(["yes.default.com"])
# pylint: disable=protected-access
self.config._enable_redirect(self.vh_truth[1], "")
self.assertEqual(len(self.config.vhosts), 7)
def test_create_own_redirect_for_old_apache_version(self):
self.config.parser.modules.add("rewrite_module")
self.config.get_version = mock.Mock(return_value=(2, 2))
# For full testing... give names...
self.vh_truth[1].name = "default.com"
self.vh_truth[1].aliases = set(["yes.default.com"])
# pylint: disable=protected-access
self.config._enable_redirect(self.vh_truth[1], "")
self.assertEqual(len(self.config.vhosts), 7)
def test_sift_line(self):
# pylint: disable=protected-access
small_quoted_target = "RewriteRule ^ \"http://\""
self.assertFalse(self.config._sift_line(small_quoted_target))
https_target = "RewriteRule ^ https://satoshi"
self.assertTrue(self.config._sift_line(https_target))
normal_target = "RewriteRule ^/(.*) http://www.a.com:1234/$1 [L,R]"
self.assertFalse(self.config._sift_line(normal_target))
@mock.patch("letsencrypt_apache.configurator.zope.component.getUtility")
def test_make_vhost_ssl_with_existing_rewrite_rule(self, mock_get_utility):
self.config.parser.modules.add("rewrite_module")
http_vhost = self.vh_truth[0]
self.config.parser.add_dir(
http_vhost.path, "RewriteEngine", "on")
self.config.parser.add_dir(
http_vhost.path, "RewriteRule",
["^",
"https://%{SERVER_NAME}%{REQUEST_URI}",
"[L,QSA,R=permanent]"])
self.config.save()
ssl_vhost = self.config.make_vhost_ssl(self.vh_truth[0])
self.assertTrue(self.config.parser.find_dir(
"RewriteEngine", "on", ssl_vhost.path, False))
conf_text = open(ssl_vhost.filep).read()
commented_rewrite_rule = ("# RewriteRule ^ "
"https://%{SERVER_NAME}%{REQUEST_URI} "
"[L,QSA,R=permanent]")
self.assertTrue(commented_rewrite_rule in conf_text)
mock_get_utility().add_message.assert_called_once_with(mock.ANY,
mock.ANY)
def get_achalls(self):
"""Return testing achallenges."""
account_key = self.rsa512jwk
achall1 = achallenges.KeyAuthorizationAnnotatedChallenge(
challb=acme_util.chall_to_challb(
challenges.TLSSNI01(
token="jIq_Xy1mXGN37tb4L6Xj_es58fW571ZNyXekdZzhh7Q"),
"pending"),
domain="encryption-example.demo", account_key=account_key)
achall2 = achallenges.KeyAuthorizationAnnotatedChallenge(
challb=acme_util.chall_to_challb(
challenges.TLSSNI01(
token="uqnaPzxtrndteOqtrXb0Asl5gOJfWAnnx6QJyvcmlDU"),
"pending"),
domain="letsencrypt.demo", account_key=account_key)
return account_key, achall1, achall2
def test_make_addrs_sni_ready(self):
self.config.version = (2, 2)
self.config.make_addrs_sni_ready(
set([obj.Addr.fromstring("*:443"), obj.Addr.fromstring("*:80")]))
self.assertTrue(self.config.parser.find_dir(
"NameVirtualHost", "*:80", exclude=False))
self.assertTrue(self.config.parser.find_dir(
"NameVirtualHost", "*:443", exclude=False))
def test_aug_version(self):
mock_match = mock.Mock(return_value=["something"])
self.config.aug.match = mock_match
# pylint: disable=protected-access
self.assertEquals(self.config._check_aug_version(),
["something"])
self.config.aug.match.side_effect = RuntimeError
self.assertFalse(self.config._check_aug_version())
if __name__ == "__main__":
unittest.main() # pragma: no cover
|
maximumG/exscript
|
refs/heads/master
|
Exscript/protocols/drivers/zte.py
|
3
|
#
# Copyright (C) 2010-2017 Samuel Abels
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
A driver for devices running Zte operating system.
"""
import re
from Exscript.protocols.drivers.driver import Driver
_user_re = [re.compile(r'user ?name:', re.I), re.compile(r'login', re.I)]
_password_re = [re.compile(r'(?:User )?Password:', re.I)]
_prompt_re = [re.compile(
r'[\r\n][\-\w+\.\(\)]+(?:\([^\)]+\))?[>#$] ?$|(?:\(y/n\)\[n\])'),
re.compile(r"[\r\n]Password: ?", re.I)] # password prompt to be used in privilege mode when it has
# a password different from the login password.
_error_re = [re.compile(r'%Error'),
re.compile(r'(?:Unrecognized|Incomplete) command', re.I), re.compile(r'Invalid input', re.I)]
_zte_re = re.compile(r"ZTE", re.I)
class ZteDriver(Driver):
def __init__(self):
Driver.__init__(self, 'zte')
self.user_re = _user_re
self.password_re = _password_re
self.prompt_re = _prompt_re
self.error_re = _error_re
def auto_authorize(self, conn, account, flush, bailout):
conn.send('enable\r\n')
conn.app_authorize(account, flush, bailout)
def check_head_for_os(self, string):
if _zte_re.search(string):
return 90
return 0
|
Laurawly/tvm-1
|
refs/heads/master
|
python/tvm/topi/sparse/__init__.py
|
5
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=wildcard-import
"""Sparse operators"""
from __future__ import absolute_import as _abs
from .csrmv import csrmv
from .csrmm import csrmm
from .dense import dense
|
GBhack/PFE-VA_Dev
|
refs/heads/master
|
components/fl/pb.py
|
2
|
"""
pb.py
Functional Level module : PushButton manager
Keeps track of the pushbutton's history
Waits for a TCP request on its own port
When gets a request, responds with True if
the button has been pushed since last request
"""
#!/usr/bin/python3.5
#-*- coding: utf-8 -*-
###Standard imports :
import atexit
import time
from os import path
###Specific imports :
##robotBasics:
#Constants:
from robotBasics.constants.gpiodef import RESET as RESET_GPIO
from robotBasics.constants.connectionSettings import PB as PB_CS
#Classes & Methods:
from robotBasics.sockets.tcp.Server import Server as Server
from robotBasics.logger import robotLogger
###########################################################################
# Environment Setup : #
###########################################################################
#If we are on an actual robot :
if path.isdir("/home/robot"):
ROBOT_ROOT = '/home/robot/'
import Adafruit_BBIO.GPIO as GPIO
elif path.isfile(path.expanduser('~/.robotConf')):
#If we're not on an actual robot, check if we have
#a working environment set for robot debugging:
CONFIG_FILE = open(path.expanduser('~/.robotConf'), 'r')
ROBOT_ROOT = CONFIG_FILE.read().strip()
CONFIG_FILE.close()
import Adafruit_BBIO_SIM.GPIO as GPIO
#Simulator setup
GPIO.pin_association(RESET_GPIO, 'pushbutton\'s state')
GPIO.setup_behavior('print')
else:
ROBOT_ROOT = ''
print('It seems like you are NOT working on an actual robot. \
You should set up a debugging environment before running any code (see documentation)')
#Logging Initialization :
LOGGER = robotLogger("FL > pb", ROBOT_ROOT+'logs/fl/')
###########################################################################
# I/O Initialization : #
###########################################################################
#GPIO setup :
GPIO.setup(RESET_GPIO, GPIO.IN)
###########################################################################
# Functions/Callbacks definition : #
###########################################################################
def pb_update_cb(data, arg):
"""
Callback function for push button status reading :
Triggered when a request is received.
Responds True if the button has been pushed since last
request and then reset the button's status
"""
#Responding the request with the button pushing status
arg["connection"].send([arg["state"]])
#Reseting the button pushing status
arg["state"] = False
###########################################################################
# SERVERS SET UP AND SETTINGS : #
###########################################################################
#### SERVER CONNECTION :
#Creating the connection object
SERVER = Server(PB_CS, LOGGER)
#Registering the close method to be executed at exit (clean deconnection)
atexit.register(SERVER.close)
#Opening the connection
SERVER.connect()
#### CALLBACKS' ARGUMENT SETUP:
ARGUMENTS = {
"connection" : SERVER,
"state" : False
}
###########################################################################
# RUNNING : #
###########################################################################
#Waiting for requests and linking them to the callback method
SERVER.listen_to_clients(pb_update_cb, ARGUMENTS)
LOOPING = True
while LOOPING:
try:
if not GPIO.input(RESET_GPIO):
while not GPIO.input(RESET_GPIO):
time.sleep(0.1)
ARGUMENTS['state'] = True
time.sleep(0.5)
except:
LOOPING = False
|
mic4ael/indico
|
refs/heads/master
|
indico/migrations/versions/20191108_1402_39a25a873063_add_editing_file_types_table.py
|
5
|
"""Add editing file_types table
Revision ID: 39a25a873063
Revises: bb522e9f9066
Create Date: 2019-11-08 14:02:33.351292
"""
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import postgresql
from sqlalchemy.sql.ddl import CreateSchema, DropSchema
# revision identifiers, used by Alembic.
revision = '39a25a873063'
down_revision = 'bb522e9f9066'
branch_labels = None
depends_on = None
def upgrade():
op.execute(CreateSchema('event_editing'))
op.create_table(
'file_types',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('event_id', sa.Integer(), nullable=False, index=True),
sa.Column('name', sa.String(), nullable=False),
sa.Column('extensions', postgresql.ARRAY(sa.String()), nullable=False),
sa.Column('allow_multiple_files', sa.Boolean(), nullable=False),
sa.Column('required', sa.Boolean(), nullable=False),
sa.Column('publishable', sa.Boolean(), nullable=False),
sa.ForeignKeyConstraint(['event_id'], ['events.events.id']),
sa.PrimaryKeyConstraint('id'),
schema='event_editing'
)
op.create_index('ix_uq_file_types_event_id_name_lower', 'file_types', ['event_id', sa.text('lower(name)')],
unique=True, schema='event_editing')
def downgrade():
op.drop_table('file_types', schema='event_editing')
op.execute(DropSchema('event_editing'))
|
appleseedhq/gaffer
|
refs/heads/master
|
startup/Gaffer/loopCompatibility.py
|
8
|
##########################################################################
#
# Copyright (c) 2018, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import Gaffer
Gaffer.LoopComputeNode = Gaffer.Loop
|
percy-g2/Novathor_xperia_u8500
|
refs/heads/master
|
6.1.1.B.0.253/external/webkit/Tools/Scripts/webkitpy/common/net/testoutputset.py
|
15
|
#!/usr/bin/env python
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from webkitpy.common.system.directoryfileset import DirectoryFileSet
from webkitpy.common.system.zipfileset import ZipFileSet
import re
import testoutput
import urllib
class TestOutputSet(object):
def __init__(self, name, platform, zip_file, **kwargs):
self._name = name
self._platform = platform
self._zip_file = zip_file
self._include_expected = kwargs.get('include_expected', True)
@classmethod
def from_zip_url(cls, platform, zip_path):
return TestOutputSet('local zip %s builder' % platform, platform, ZipFileSet(zip_path))
@classmethod
def from_zip(cls, platform, zip):
return TestOutputSet('local zip %s builder' % platform, platform, zip)
@classmethod
def from_zip_map(cls, zip_map):
output_sets = []
for k, v in zip_map.items():
output_sets.append(TestOutputSet.from_zip(k, v))
return AggregateTestOutputSet(output_sets)
@classmethod
def from_path(self, path, platform=None):
return TestOutputSet('local %s builder' % platform, platform, DirectoryFileSet(path))
def name(self):
return self._name
def set_platform(self, platform):
self._platform = platform
def files(self):
return [self._zip_file.open(filename) for filename in self._zip_file.namelist()]
def _extract_output_files(self, name, exact_match):
name_matcher = re.compile(name)
actual_matcher = re.compile(r'-actual\.')
expected_matcher = re.compile(r'-expected\.')
checksum_files = []
text_files = []
image_files = []
for output_file in self.files():
name_match = name_matcher.search(output_file.name())
actual_match = actual_matcher.search(output_file.name())
expected_match = expected_matcher.search(output_file.name())
if not (name_match and (actual_match or (self._include_expected and expected_match))):
continue
if output_file.name().endswith('.checksum'):
checksum_files.append(output_file)
elif output_file.name().endswith('.txt'):
text_files.append(output_file)
elif output_file.name().endswith('.png'):
image_files.append(output_file)
return (checksum_files, text_files, image_files)
def _extract_file_with_name(self, name, files):
for file in files:
if file.name() == name:
return file
return None
def _make_output_from_image(self, image_file, checksum_files):
checksum_file_name = re.sub('\.png', '.checksum', image_file.name())
checksum_file = self._extract_file_with_name(checksum_file_name, checksum_files)
return testoutput.ImageTestOutput(self._platform, image_file, checksum_file)
def outputs_for(self, name, **kwargs):
target_type = kwargs.get('target_type', None)
exact_match = kwargs.get('exact_match', False)
if re.search(r'\.x?html', name):
name = name[:name.rindex('.')]
(checksum_files, text_files, image_files) = self._extract_output_files(name, exact_match)
outputs = [self._make_output_from_image(image_file, checksum_files) for image_file in image_files]
outputs += [testoutput.TextTestOutput(self._platform, text_file) for text_file in text_files]
if exact_match:
outputs = filter(lambda output: output.name() == name, outputs)
outputs = filter(lambda r: target_type in [None, r.type()], outputs)
return outputs
class AggregateTestOutputSet(object):
"""Set of test outputs from a list of builders"""
def __init__(self, builders):
self._builders = builders
def outputs_for(self, name, **kwargs):
return sum([builder.outputs_for(name, **kwargs) for builder in self._builders], [])
def builders(self):
return self._builders
|
mindnervestech/mnrp
|
refs/heads/master
|
addons/website_hr/models/__init__.py
|
439
|
import hr
|
Gustry/inasafe
|
refs/heads/develop
|
safe/gui/tools/help/needs_manager_help.py
|
3
|
# coding=utf-8
"""Context help for minimum needs manager dialog."""
from safe.utilities.i18n import tr
from safe import messaging as m
from safe.messaging import styles
from safe.utilities.resources import resources_path
SUBSECTION_STYLE = styles.SUBSECTION_LEVEL_3_STYLE
INFO_STYLE = styles.BLUE_LEVEL_4_STYLE
__author__ = 'ismailsunni'
def needs_manager_helps():
"""Help message for Batch Dialog.
.. versionadded:: 3.2.1
:returns: A message object containing helpful information.
:rtype: messaging.message.Message
"""
message = m.Message()
message.add(m.Brand())
message.add(heading())
message.add(content())
return message
def heading():
"""Helper method that returns just the header.
This method was added so that the text could be reused in the
other contexts.
.. versionadded:: 3.2.2
:returns: A heading object.
:rtype: safe.messaging.heading.Heading
"""
message = m.Heading(tr('Minimum needs manager help'), **SUBSECTION_STYLE)
return message
def content():
"""Helper method that returns just the content.
This method was added so that the text could be reused in the
dock_help module.
.. versionadded:: 3.2.2
:returns: A message object without brand element.
:rtype: safe.messaging.message.Message
"""
message = m.Message()
paragraph = m.Paragraph(
m.Image(
'file:///%s/img/screenshots/'
'minimum-needs-screenshot.png' % resources_path()),
style_class='text-center'
)
message.add(paragraph)
message.add(m.Paragraph(tr(
'During and after a disaster, providing for the basic human minimum '
'needs of food, water, hygiene and shelter is an important element of '
'your contingency plan. InaSAFE has a customisable minimum needs '
'system that allows you to define country or region specific '
'requirements for compiling a needs report where the exposure '
'layer represents population.'
)))
message.add(m.Paragraph(tr(
'By default InaSAFE uses minimum needs defined for Indonesia - '
'and ships with additional profiles for the Philippines and Tanzania. '
'You can customise these or add your own region-specific profiles too.'
)))
message.add(m.Paragraph(tr(
'Minimum needs are grouped into regional or linguistic \'profiles\'. '
'The default profile is \'BNPB_en\' - the english profile for the '
'national disaster agency in Indonesia. '
'You will see that this profile defines requirements for displaced '
'persons in terms of Rice, Drinking Water, Clean Water (for bathing '
'etc.), Family Kits (with personal hygiene items) and provision of '
'toilets.'
)))
message.add(m.Paragraph(tr(
'Each item in the profile can be customised or removed. For example '
'selecting the first item in the list and then clicking on the '
'\'pencil\' icon will show the details of how it was defined. '
'If you scroll up and down in the panel you will see that for each '
'item, you can set a name, description, units (in singular, '
'plural and abbreviated forms), specify maxima and minima for the '
'quantity of item allowed, a default and a frequency. You would use '
'the maxima and minima to ensure that disaster managers never '
'allocate amounts that will not be sufficient for human livelihood, '
'and also that will not overtax the logistics operation for those '
'providing humanitarian relief.'
)))
message.add(m.Paragraph(tr(
'The final item in the item configuration is the \'readable '
'sentence\' which bears special discussion. Using a simple system of '
'tokens you can construct a sentence that will be used in the '
'generated needs report.'
)))
message.add(m.Heading(tr('Minimum needs profiles'), **INFO_STYLE))
message.add(m.Paragraph(tr(
'A profile is a collection of resources that define the minimum needs '
'for a particular country or region. Typically a profile should be '
'based on a regional, national or international standard. The '
'actual definition of which resources are needed in a given '
'profile is dependent on the local conditions and customs for the '
'area where the contingency plan is being devised.'
)))
message.add(m.Paragraph(tr(
'For example in the middle east, rice is a staple food whereas in '
'South Africa, maize meal is a staple food and thus the contingency '
'planning should take these localised needs into account.'
)))
message.add(m.Heading(tr('Minimum needs resources'), **INFO_STYLE))
message.add(m.Paragraph(tr(
'Each item in a minimum needs profile is a resource. Each resource '
'is described as a simple natural language sentence e.g.:'
)))
message.add(m.EmphasizedText(tr(
'Each person should be provided with 2.8 kilograms of Rice weekly.'
)))
message.add(m.Paragraph(tr(
'By clicking on a resource entry in the profile window, and then '
'clicking the black pencil icon you will be able to edit the '
'resource using the resource editor. Alternatively you can create a '
'new resource for a profile by clicking on the black + icon in '
'the profile manager. You can also remove any resource from a '
'profile using the - icon in the profile manager.')))
message.add(m.Heading(tr('Resource Editor'), **INFO_STYLE))
message.add(m.Paragraph(tr(
'When switching to edit or add resource mode, the minimum needs '
'manager will be updated to show the resource editor. Each '
'resource is described in terms of:'
)))
bullets = m.BulletedList()
bullets.add(m.Text(
m.ImportantText(tr(
'resource name')),
tr(' - e.g. Rice')))
bullets.add(m.Text(
m.ImportantText(tr(
'a description of the resource')),
tr(' - e.g. Basic food')))
bullets.add(m.Text(
m.ImportantText(tr(
'unit in which the resource is provided')),
tr(' - e.g. kilogram')))
bullets.add(m.Text(
m.ImportantText(tr(
'pluralised form of the units')),
tr(' - e.g. kilograms')))
bullets.add(m.Text(
m.ImportantText(tr(
'abbreviation for the unit')),
tr(' - e.g. kg')))
bullets.add(m.Text(
m.ImportantText(tr(
'the default allocation for the resource')),
tr(' - e.g. 2.8. This number can be overridden on a '
'per-analysis basis')))
bullets.add(m.Text(
m.ImportantText(tr(
'minimum allowed which is used to prevent allocating')),
tr(' - e.g. no drinking water to displaced persons')))
bullets.add(m.ImportantText(tr(
'maximum allowed which is used to set a sensible upper '
'limit for the resource')))
bullets.add(m.ImportantText(tr(
'a readable sentence which is used to compile the '
'sentence describing the resource in reports.')))
message.add(bullets)
message.add(m.Paragraph(tr(
'These parameters are probably all fairly self explanatory, but '
'the readable sentence probably needs further detail. The '
'sentence is compiled using a simple keyword token replacement '
'system. The following tokens can be used:')))
bullets = m.BulletedList()
bullets.add(m.Text('{{ Default }}'))
bullets.add(m.Text('{{ Unit }}'))
bullets.add(m.Text('{{ Units }}'))
bullets.add(m.Text('{{ Unit abbreviation }}'))
bullets.add(m.Text('{{ Resource name }}'))
bullets.add(m.Text('{{ Frequency }}'))
bullets.add(m.Text('{{ Minimum allowed }}'))
bullets.add(m.Text('{{ Maximum allowed }}'))
message.add(bullets)
message.add(m.Paragraph(tr(
'When the token is placed in the sentence it will be replaced with '
'the actual value at report generation time. This contrived example '
'shows a tokenised sentence that includes all possible keywords:'
)))
message.add(m.EmphasizedText(tr(
'A displaced person should be provided with {{ %s }} '
'{{ %s }}/{{ %s }}/{{ %s }} of {{ %s }}. Though no less than {{ %s }} '
'and no more than {{ %s }}. This should be provided {{ %s }}.' % (
'Default',
'Unit',
'Units',
'Unit abbreviation',
'Resource name',
'Minimum allowed',
'Maximum allowed',
'Frequency'
)
)))
message.add(m.Paragraph(tr(
'Would generate a human readable sentence like this:')))
message.add(m.ImportantText(tr(
'A displaced person should be provided with 2.8 kilogram/kilograms/kg '
'of rice. Though no less than 0 and no more than 100. This should '
'be provided daily.'
)))
message.add(m.Paragraph(tr(
'Once you have populated the resource elements, click the Save '
'resource button to return to the profile view. You will see the '
'new resource added in the profile\'s resource list.'
)))
message.add(m.Heading(tr('Managing profiles'), **INFO_STYLE))
message.add(m.Paragraph(tr(
'In addition to the profiles that come as standard with InaSAFE, you '
'can create new ones, either from scratch, or based on an existing '
'one (which you can then modify).'
)))
message.add(m.Paragraph(tr(
'Use the New button to create new profile. When prompted, give your '
'profile a name e.g. \'JakartaProfile\'.'
)))
message.add(m.Paragraph(tr(
'Note: The profile must be saved in your home directory under '
'.qgis2/minimum_needs in order for InaSAFE to successfully detect it.'
)))
message.add(m.Paragraph(tr(
'An alternative way to create a new profile is to use the Save as to '
'clone an existing profile. The clone profile can then be edited '
'according to your specific needs.'
)))
message.add(m.Heading(tr('Active profile'), **INFO_STYLE))
message.add(m.Paragraph(tr(
'It is important to note, that which ever profile you select in the '
'Profile pick list, will be considered active and will be used as '
'the basis for all minimum needs analysis. You need to restart '
'QGIS before the changed profile become active.'
)))
return message
|
barmalei/scalpel
|
refs/heads/master
|
test/gravity/tae/corpora/test_conll.py
|
2
|
from gravity.tae.corpora.conll import CoNLL, CoNLL2002, CoNLL2003, CoNLL2000
from gravity.tae.tokenizer import Token
import unittest, os, re
class TestCoNLL(unittest.TestCase):
def test_conll(self):
self.assertEqual(os.path.exists(CoNLL.home()), True)
def f(): CoNLL.path('ddd')
self.assertRaises(BaseException, f)
def f(): CoNLL2002.path('ddd')
self.assertRaises(BaseException, f)
def f(): CoNLL2003.path('ddd')
self.assertRaises(BaseException, f)
def f(): CoNLL2000.path('ddd')
self.assertRaises(BaseException, f)
def test_conll2000(self):
def f(): CoNLL2000.testa('d')
self.assertRaises(BaseException, f)
def f(): CoNLL2000.testb('d')
self.assertRaises(NotImplementedError, f)
def f(): CoNLL2000.train('d')
self.assertRaises(BaseException, f)
def f(): CoNLL2000.testa('en').baseline()
self.assertRaises(NotImplementedError, f)
c = CoNLL2000.testa("en")
self._test_corpus(c)
c = CoNLL2000.train("en")
self._test_corpus(c)
def test_conll2002(self):
def f(): CoNLL2002.testa('d')
self.assertRaises(BaseException, f)
def f(): CoNLL2002.testb('d')
self.assertRaises(BaseException, f)
def f(): CoNLL2002.train('d')
self.assertRaises(BaseException, f)
c = CoNLL2002.testa("nl")
b = c.baseline()
self.assertEqual(b['phrases'], len([ e for e in c.ne_tokens()]))
self._test_corpuses(CoNLL2002, "nl")
self._test_corpuses(CoNLL2002, "es")
def test_conll2003(self):
c = CoNLL2003.testa("en")
b = c.baseline()
self.assertEqual(b['phrases'], len([ e for e in c.ne_tokens()]))
self._test_corpuses(CoNLL2003, 'en')
def test_eval(self):
c = CoNLL2000.testa("en")
t = [ e for e in c.iob_tokens('SYN') ]
all_t = [e for e in c.tokens() ]
r = c.conlleval(t)
self._test_self_eval_result(r, all_t, t)
self._test_self_eval(CoNLL2002.testa("nl"))
self._test_self_eval(CoNLL2002.testb("nl"))
self._test_self_eval(CoNLL2002.train("nl"))
self._test_self_eval(CoNLL2002.testa("es"))
self._test_self_eval(CoNLL2002.testb("es"))
self._test_self_eval(CoNLL2002.train("es"))
self._test_self_eval(CoNLL2003.testa("en"))
self._test_self_eval(CoNLL2003.testb("en"))
self._test_self_eval(CoNLL2003.train("en"))
def _test_self_eval(self, c):
t = [ e for e in c.ne_tokens() ]
all_t = [e for e in c.tokens() ]
r = c.conlleval(t)
self._test_self_eval_result(r, all_t, t)
for tag in ('LOC', 'MISC', 'ORG', 'PER'):
self.assertEqual(r[tag+'_FB1'], 100.0)
self.assertEqual(r[tag+'_recall'], 100.0)
self.assertEqual(r[tag+'_precision'], 100.0)
def _test_self_eval_result(self, r, all_t, t):
self.assertEqual(r['accuracy'], 100.0)
self.assertEqual(r['FB1'], 100.0)
self.assertEqual(r['recall'], 100.0)
self.assertEqual(r['precision'], 100.0)
self.assertEqual(r['phrases'], r['correct_phrases'])
self.assertEqual(r['tokens'], len(all_t))
self.assertEqual(r['phrases'], len(t))
def _test_corpuses(self, clz, lang):
c = clz.testa(lang)
self._test_corpus(c)
c = clz.testb(lang)
self._test_corpus(c)
c = clz.train(lang)
self._test_corpus(c)
def _test_corpus(self, corpus):
text = corpus.text()
for t in corpus.tokens():
if t[0] != None and text[t[1]:t[1] + t[2]] != t[0]:
raise BaseException("Wrong entity '%s' location (%d, %d)" % (t[0], t[1], t[2]))
tags_names = corpus.tags_names
if 'POS' in tags_names:
for t in corpus.tokens('POS'):
if t[0] != None and text[t[1]:t[1] + t[2]] != t[0]:
raise BaseException("Wrong entity '%s' location (%d, %d)" % (t[0], t[1], t[2]))
if 'NE' in tags_names:
for t in corpus.iob_tokens('NE'):
if t[0] != None and text[t[1]:t[1] + t[2]] != t[0]:
raise BaseException("Wrong entity '%s' location (%d, %d)" % (t[0], t[1], t[2]))
if 'SYN' in tags_names:
for t in corpus.iob_tokens('SYN'):
if t[0] != None and text[t[1]:t[1] + t[2]] != t[0]:
raise BaseException("Wrong entity '%s' location (%d, %d)" % (t[0], t[1], t[2]))
# c, tokens = 0, [e for e in corpus.tokens('NE')]
# for t in ne_tokens:
# c += len(t[0].split(' '))
#
# if c != len(tokens): raise BaseException("Wrong NE entity combining (%d != %d)" % (c, len(tokens)))
if __name__ == '__main__':
unittest.main()
|
nvoron23/statsmodels
|
refs/heads/master
|
statsmodels/graphics/functional.py
|
31
|
"""Module for functional boxplots."""
from statsmodels.compat.python import combinations, range
import numpy as np
from scipy import stats
from scipy.misc import factorial
from . import utils
__all__ = ['fboxplot', 'rainbowplot', 'banddepth']
def fboxplot(data, xdata=None, labels=None, depth=None, method='MBD',
wfactor=1.5, ax=None, plot_opts={}):
"""Plot functional boxplot.
A functional boxplot is the analog of a boxplot for functional data.
Functional data is any type of data that varies over a continuum, i.e.
curves, probabillity distributions, seasonal data, etc.
The data is first ordered, the order statistic used here is `banddepth`.
Plotted are then the median curve, the envelope of the 50% central region,
the maximum non-outlying envelope and the outlier curves.
Parameters
----------
data : sequence of ndarrays or 2-D ndarray
The vectors of functions to create a functional boxplot from. If a
sequence of 1-D arrays, these should all be the same size.
The first axis is the function index, the second axis the one along
which the function is defined. So ``data[0, :]`` is the first
functional curve.
xdata : ndarray, optional
The independent variable for the data. If not given, it is assumed to
be an array of integers 0..N with N the length of the vectors in
`data`.
labels : sequence of scalar or str, optional
The labels or identifiers of the curves in `data`. If given, outliers
are labeled in the plot.
depth : ndarray, optional
A 1-D array of band depths for `data`, or equivalent order statistic.
If not given, it will be calculated through `banddepth`.
method : {'MBD', 'BD2'}, optional
The method to use to calculate the band depth. Default is 'MBD'.
wfactor : float, optional
Factor by which the central 50% region is multiplied to find the outer
region (analog of "whiskers" of a classical boxplot).
ax : Matplotlib AxesSubplot instance, optional
If given, this subplot is used to plot in instead of a new figure being
created.
plot_opts : dict, optional
A dictionary with plotting options. Any of the following can be
provided, if not present in `plot_opts` the defaults will be used::
- 'cmap_outliers', a Matplotlib LinearSegmentedColormap instance.
- 'c_inner', valid MPL color. Color of the central 50% region
- 'c_outer', valid MPL color. Color of the non-outlying region
- 'c_median', valid MPL color. Color of the median.
- 'lw_outliers', scalar. Linewidth for drawing outlier curves.
- 'lw_median', scalar. Linewidth for drawing the median curve.
- 'draw_nonout', bool. If True, also draw non-outlying curves.
Returns
-------
fig : Matplotlib figure instance
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
depth : ndarray
1-D array containing the calculated band depths of the curves.
ix_depth : ndarray
1-D array of indices needed to order curves (or `depth`) from most to
least central curve.
ix_outliers : ndarray
1-D array of indices of outlying curves in `data`.
See Also
--------
banddepth, rainbowplot
Notes
-----
The median curve is the curve with the highest band depth.
Outliers are defined as curves that fall outside the band created by
multiplying the central region by `wfactor`. Note that the range over
which they fall outside this band doesn't matter, a single data point
outside the band is enough. If the data is noisy, smoothing may therefore
be required.
The non-outlying region is defined as the band made up of all the
non-outlying curves.
References
----------
[1] Y. Sun and M.G. Genton, "Functional Boxplots", Journal of Computational
and Graphical Statistics, vol. 20, pp. 1-19, 2011.
[2] R.J. Hyndman and H.L. Shang, "Rainbow Plots, Bagplots, and Boxplots for
Functional Data", vol. 19, pp. 29-25, 2010.
Examples
--------
Load the El Nino dataset. Consists of 60 years worth of Pacific Ocean sea
surface temperature data.
>>> import matplotlib.pyplot as plt
>>> import statsmodels.api as sm
>>> data = sm.datasets.elnino.load()
Create a functional boxplot. We see that the years 1982-83 and 1997-98 are
outliers; these are the years where El Nino (a climate pattern
characterized by warming up of the sea surface and higher air pressures)
occurred with unusual intensity.
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> res = sm.graphics.fboxplot(data.raw_data[:, 1:], wfactor=2.58,
... labels=data.raw_data[:, 0].astype(int),
... ax=ax)
>>> ax.set_xlabel("Month of the year")
>>> ax.set_ylabel("Sea surface temperature (C)")
>>> ax.set_xticks(np.arange(13, step=3) - 1)
>>> ax.set_xticklabels(["", "Mar", "Jun", "Sep", "Dec"])
>>> ax.set_xlim([-0.2, 11.2])
>>> plt.show()
.. plot:: plots/graphics_functional_fboxplot.py
"""
fig, ax = utils.create_mpl_ax(ax)
if plot_opts.get('cmap_outliers') is None:
from matplotlib.cm import rainbow_r
plot_opts['cmap_outliers'] = rainbow_r
data = np.asarray(data)
if xdata is None:
xdata = np.arange(data.shape[1])
# Calculate band depth if required.
if depth is None:
if method not in ['MBD', 'BD2']:
raise ValueError("Unknown value for parameter `method`.")
depth = banddepth(data, method=method)
else:
if depth.size != data.shape[0]:
raise ValueError("Provided `depth` array is not of correct size.")
# Inner area is 25%-75% region of band-depth ordered curves.
ix_depth = np.argsort(depth)[::-1]
median_curve = data[ix_depth[0], :]
ix_IQR = data.shape[0] // 2
lower = data[ix_depth[0:ix_IQR], :].min(axis=0)
upper = data[ix_depth[0:ix_IQR], :].max(axis=0)
# Determine region for outlier detection
inner_median = np.median(data[ix_depth[0:ix_IQR], :], axis=0)
lower_fence = inner_median - (inner_median - lower) * wfactor
upper_fence = inner_median + (upper - inner_median) * wfactor
# Find outliers.
ix_outliers = []
ix_nonout = []
for ii in range(data.shape[0]):
if np.any(data[ii, :] > upper_fence) or np.any(data[ii, :] < lower_fence):
ix_outliers.append(ii)
else:
ix_nonout.append(ii)
ix_outliers = np.asarray(ix_outliers)
# Plot envelope of all non-outlying data
lower_nonout = data[ix_nonout, :].min(axis=0)
upper_nonout = data[ix_nonout, :].max(axis=0)
ax.fill_between(xdata, lower_nonout, upper_nonout,
color=plot_opts.get('c_outer', (0.75,0.75,0.75)))
# Plot central 50% region
ax.fill_between(xdata, lower, upper,
color=plot_opts.get('c_inner', (0.5,0.5,0.5)))
# Plot median curve
ax.plot(xdata, median_curve, color=plot_opts.get('c_median', 'k'),
lw=plot_opts.get('lw_median', 2))
# Plot outliers
cmap = plot_opts.get('cmap_outliers')
for ii, ix in enumerate(ix_outliers):
label = str(labels[ix]) if labels is not None else None
ax.plot(xdata, data[ix, :],
color=cmap(float(ii) / (len(ix_outliers)-1)), label=label,
lw=plot_opts.get('lw_outliers', 1))
if plot_opts.get('draw_nonout', False):
for ix in ix_nonout:
ax.plot(xdata, data[ix, :], 'k-', lw=0.5)
if labels is not None:
ax.legend()
return fig, depth, ix_depth, ix_outliers
def rainbowplot(data, xdata=None, depth=None, method='MBD', ax=None,
cmap=None):
"""Create a rainbow plot for a set of curves.
A rainbow plot contains line plots of all curves in the dataset, colored in
order of functional depth. The median curve is shown in black.
Parameters
----------
data : sequence of ndarrays or 2-D ndarray
The vectors of functions to create a functional boxplot from. If a
sequence of 1-D arrays, these should all be the same size.
The first axis is the function index, the second axis the one along
which the function is defined. So ``data[0, :]`` is the first
functional curve.
xdata : ndarray, optional
The independent variable for the data. If not given, it is assumed to
be an array of integers 0..N with N the length of the vectors in
`data`.
depth : ndarray, optional
A 1-D array of band depths for `data`, or equivalent order statistic.
If not given, it will be calculated through `banddepth`.
method : {'MBD', 'BD2'}, optional
The method to use to calculate the band depth. Default is 'MBD'.
ax : Matplotlib AxesSubplot instance, optional
If given, this subplot is used to plot in instead of a new figure being
created.
cmap : Matplotlib LinearSegmentedColormap instance, optional
The colormap used to color curves with. Default is a rainbow colormap,
with red used for the most central and purple for the least central
curves.
Returns
-------
fig : Matplotlib figure instance
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
See Also
--------
banddepth, fboxplot
References
----------
[1] R.J. Hyndman and H.L. Shang, "Rainbow Plots, Bagplots, and Boxplots for
Functional Data", vol. 19, pp. 29-25, 2010.
Examples
--------
Load the El Nino dataset. Consists of 60 years worth of Pacific Ocean sea
surface temperature data.
>>> import matplotlib.pyplot as plt
>>> import statsmodels.api as sm
>>> data = sm.datasets.elnino.load()
Create a rainbow plot:
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> res = sm.graphics.rainbowplot(data.raw_data[:, 1:], ax=ax)
>>> ax.set_xlabel("Month of the year")
>>> ax.set_ylabel("Sea surface temperature (C)")
>>> ax.set_xticks(np.arange(13, step=3) - 1)
>>> ax.set_xticklabels(["", "Mar", "Jun", "Sep", "Dec"])
>>> ax.set_xlim([-0.2, 11.2])
>>> plt.show()
.. plot:: plots/graphics_functional_rainbowplot.py
"""
fig, ax = utils.create_mpl_ax(ax)
if cmap is None:
from matplotlib.cm import rainbow_r
cmap = rainbow_r
data = np.asarray(data)
if xdata is None:
xdata = np.arange(data.shape[1])
# Calculate band depth if required.
if depth is None:
if method not in ['MBD', 'BD2']:
raise ValueError("Unknown value for parameter `method`.")
depth = banddepth(data, method=method)
else:
if depth.size != data.shape[0]:
raise ValueError("Provided `depth` array is not of correct size.")
ix_depth = np.argsort(depth)[::-1]
# Plot all curves, colored by depth
num_curves = data.shape[0]
for ii in range(num_curves):
ax.plot(xdata, data[ix_depth[ii], :], c=cmap(ii / (num_curves - 1.)))
# Plot the median curve
median_curve = data[ix_depth[0], :]
ax.plot(xdata, median_curve, 'k-', lw=2)
return fig
def banddepth(data, method='MBD'):
"""Calculate the band depth for a set of functional curves.
Band depth is an order statistic for functional data (see `fboxplot`), with
a higher band depth indicating larger "centrality". In analog to scalar
data, the functional curve with highest band depth is called the median
curve, and the band made up from the first N/2 of N curves is the 50%
central region.
Parameters
----------
data : ndarray
The vectors of functions to create a functional boxplot from.
The first axis is the function index, the second axis the one along
which the function is defined. So ``data[0, :]`` is the first
functional curve.
method : {'MBD', 'BD2'}, optional
Whether to use the original band depth (with J=2) of [1]_ or the
modified band depth. See Notes for details.
Returns
-------
depth : ndarray
Depth values for functional curves.
Notes
-----
Functional band depth as an order statistic for functional data was
proposed in [1]_ and applied to functional boxplots and bagplots in [2]_.
The method 'BD2' checks for each curve whether it lies completely inside
bands constructed from two curves. All permutations of two curves in the
set of curves are used, and the band depth is normalized to one. Due to
the complete curve having to fall within the band, this method yields a lot
of ties.
The method 'MBD' is similar to 'BD2', but checks the fraction of the curve
falling within the bands. It therefore generates very few ties.
References
----------
.. [1] S. Lopez-Pintado and J. Romo, "On the Concept of Depth for
Functional Data", Journal of the American Statistical Association,
vol. 104, pp. 718-734, 2009.
.. [2] Y. Sun and M.G. Genton, "Functional Boxplots", Journal of
Computational and Graphical Statistics, vol. 20, pp. 1-19, 2011.
"""
def _band2(x1, x2, curve):
xb = np.vstack([x1, x2])
if np.any(curve < xb.min(axis=0)) or np.any(curve > xb.max(axis=0)):
res = 0
else:
res = 1
return res
def _band_mod(x1, x2, curve):
xb = np.vstack([x1, x2])
res = np.logical_and(curve >= xb.min(axis=0),
curve <= xb.max(axis=0))
return np.sum(res) / float(res.size)
if method == 'BD2':
band = _band2
elif method == 'MBD':
band = _band_mod
else:
raise ValueError("Unknown input value for parameter `method`.")
num = data.shape[0]
ix = np.arange(num)
depth = []
for ii in range(num):
res = 0
for ix1, ix2 in combinations(ix, 2):
res += band(data[ix1, :], data[ix2, :], data[ii, :])
# Normalize by number of combinations to get band depth
normfactor = factorial(num) / 2. / factorial(num - 2)
depth.append(float(res) / normfactor)
return np.asarray(depth)
|
MalloyPower/parsing-python
|
refs/heads/master
|
front-end/testsuite-python-lib/Python-2.6/Lib/encodings/johab.py
|
816
|
#
# johab.py: Python Unicode Codec for JOHAB
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_kr, codecs
import _multibytecodec as mbc
codec = _codecs_kr.getcodec('johab')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='johab',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
|
barbarubra/Don-t-know-What-i-m-doing.
|
refs/heads/master
|
python/src/Lib/test/test_set.py
|
48
|
import unittest
from test import test_support
import gc
import weakref
import operator
import copy
import pickle
import os
from random import randrange, shuffle
import sys
import collections
class PassThru(Exception):
pass
def check_pass_thru():
raise PassThru
yield 1
class BadCmp:
def __hash__(self):
return 1
def __cmp__(self, other):
raise RuntimeError
class ReprWrapper:
'Used to test self-referential repr() calls'
def __repr__(self):
return repr(self.value)
class HashCountingInt(int):
'int-like object that counts the number of times __hash__ is called'
def __init__(self, *args):
self.hash_count = 0
def __hash__(self):
self.hash_count += 1
return int.__hash__(self)
class TestJointOps(unittest.TestCase):
# Tests common to both set and frozenset
def setUp(self):
self.word = word = 'simsalabim'
self.otherword = 'madagascar'
self.letters = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
self.s = self.thetype(word)
self.d = dict.fromkeys(word)
def test_new_or_init(self):
self.assertRaises(TypeError, self.thetype, [], 2)
def test_uniquification(self):
actual = sorted(self.s)
expected = sorted(self.d)
self.assertEqual(actual, expected)
self.assertRaises(PassThru, self.thetype, check_pass_thru())
self.assertRaises(TypeError, self.thetype, [[]])
def test_len(self):
self.assertEqual(len(self.s), len(self.d))
def test_contains(self):
for c in self.letters:
self.assertEqual(c in self.s, c in self.d)
self.assertRaises(TypeError, self.s.__contains__, [[]])
s = self.thetype([frozenset(self.letters)])
self.assert_(self.thetype(self.letters) in s)
def test_union(self):
u = self.s.union(self.otherword)
for c in self.letters:
self.assertEqual(c in u, c in self.d or c in self.otherword)
self.assertEqual(self.s, self.thetype(self.word))
self.assertEqual(type(u), self.thetype)
self.assertRaises(PassThru, self.s.union, check_pass_thru())
self.assertRaises(TypeError, self.s.union, [[]])
for C in set, frozenset, dict.fromkeys, str, unicode, list, tuple:
self.assertEqual(self.thetype('abcba').union(C('cdc')), set('abcd'))
self.assertEqual(self.thetype('abcba').union(C('efgfe')), set('abcefg'))
self.assertEqual(self.thetype('abcba').union(C('ccb')), set('abc'))
self.assertEqual(self.thetype('abcba').union(C('ef')), set('abcef'))
self.assertEqual(self.thetype('abcba').union(C('ef'), C('fg')), set('abcefg'))
def test_or(self):
i = self.s.union(self.otherword)
self.assertEqual(self.s | set(self.otherword), i)
self.assertEqual(self.s | frozenset(self.otherword), i)
try:
self.s | self.otherword
except TypeError:
pass
else:
self.fail("s|t did not screen-out general iterables")
def test_intersection(self):
i = self.s.intersection(self.otherword)
for c in self.letters:
self.assertEqual(c in i, c in self.d and c in self.otherword)
self.assertEqual(self.s, self.thetype(self.word))
self.assertEqual(type(i), self.thetype)
self.assertRaises(PassThru, self.s.intersection, check_pass_thru())
for C in set, frozenset, dict.fromkeys, str, unicode, list, tuple:
self.assertEqual(self.thetype('abcba').intersection(C('cdc')), set('cc'))
self.assertEqual(self.thetype('abcba').intersection(C('efgfe')), set(''))
self.assertEqual(self.thetype('abcba').intersection(C('ccb')), set('bc'))
self.assertEqual(self.thetype('abcba').intersection(C('ef')), set(''))
self.assertEqual(self.thetype('abcba').intersection(C('cbcf'), C('bag')), set('b'))
s = self.thetype('abcba')
z = s.intersection()
if self.thetype == frozenset():
self.assertEqual(id(s), id(z))
else:
self.assertNotEqual(id(s), id(z))
def test_isdisjoint(self):
def f(s1, s2):
'Pure python equivalent of isdisjoint()'
return not set(s1).intersection(s2)
for larg in '', 'a', 'ab', 'abc', 'ababac', 'cdc', 'cc', 'efgfe', 'ccb', 'ef':
s1 = self.thetype(larg)
for rarg in '', 'a', 'ab', 'abc', 'ababac', 'cdc', 'cc', 'efgfe', 'ccb', 'ef':
for C in set, frozenset, dict.fromkeys, str, unicode, list, tuple:
s2 = C(rarg)
actual = s1.isdisjoint(s2)
expected = f(s1, s2)
self.assertEqual(actual, expected)
self.assert_(actual is True or actual is False)
def test_and(self):
i = self.s.intersection(self.otherword)
self.assertEqual(self.s & set(self.otherword), i)
self.assertEqual(self.s & frozenset(self.otherword), i)
try:
self.s & self.otherword
except TypeError:
pass
else:
self.fail("s&t did not screen-out general iterables")
def test_difference(self):
i = self.s.difference(self.otherword)
for c in self.letters:
self.assertEqual(c in i, c in self.d and c not in self.otherword)
self.assertEqual(self.s, self.thetype(self.word))
self.assertEqual(type(i), self.thetype)
self.assertRaises(PassThru, self.s.difference, check_pass_thru())
self.assertRaises(TypeError, self.s.difference, [[]])
for C in set, frozenset, dict.fromkeys, str, unicode, list, tuple:
self.assertEqual(self.thetype('abcba').difference(C('cdc')), set('ab'))
self.assertEqual(self.thetype('abcba').difference(C('efgfe')), set('abc'))
self.assertEqual(self.thetype('abcba').difference(C('ccb')), set('a'))
self.assertEqual(self.thetype('abcba').difference(C('ef')), set('abc'))
self.assertEqual(self.thetype('abcba').difference(), set('abc'))
self.assertEqual(self.thetype('abcba').difference(C('a'), C('b')), set('c'))
def test_sub(self):
i = self.s.difference(self.otherword)
self.assertEqual(self.s - set(self.otherword), i)
self.assertEqual(self.s - frozenset(self.otherword), i)
try:
self.s - self.otherword
except TypeError:
pass
else:
self.fail("s-t did not screen-out general iterables")
def test_symmetric_difference(self):
i = self.s.symmetric_difference(self.otherword)
for c in self.letters:
self.assertEqual(c in i, (c in self.d) ^ (c in self.otherword))
self.assertEqual(self.s, self.thetype(self.word))
self.assertEqual(type(i), self.thetype)
self.assertRaises(PassThru, self.s.symmetric_difference, check_pass_thru())
self.assertRaises(TypeError, self.s.symmetric_difference, [[]])
for C in set, frozenset, dict.fromkeys, str, unicode, list, tuple:
self.assertEqual(self.thetype('abcba').symmetric_difference(C('cdc')), set('abd'))
self.assertEqual(self.thetype('abcba').symmetric_difference(C('efgfe')), set('abcefg'))
self.assertEqual(self.thetype('abcba').symmetric_difference(C('ccb')), set('a'))
self.assertEqual(self.thetype('abcba').symmetric_difference(C('ef')), set('abcef'))
def test_xor(self):
i = self.s.symmetric_difference(self.otherword)
self.assertEqual(self.s ^ set(self.otherword), i)
self.assertEqual(self.s ^ frozenset(self.otherword), i)
try:
self.s ^ self.otherword
except TypeError:
pass
else:
self.fail("s^t did not screen-out general iterables")
def test_equality(self):
self.assertEqual(self.s, set(self.word))
self.assertEqual(self.s, frozenset(self.word))
self.assertEqual(self.s == self.word, False)
self.assertNotEqual(self.s, set(self.otherword))
self.assertNotEqual(self.s, frozenset(self.otherword))
self.assertEqual(self.s != self.word, True)
def test_setOfFrozensets(self):
t = map(frozenset, ['abcdef', 'bcd', 'bdcb', 'fed', 'fedccba'])
s = self.thetype(t)
self.assertEqual(len(s), 3)
def test_compare(self):
self.assertRaises(TypeError, self.s.__cmp__, self.s)
def test_sub_and_super(self):
p, q, r = map(self.thetype, ['ab', 'abcde', 'def'])
self.assert_(p < q)
self.assert_(p <= q)
self.assert_(q <= q)
self.assert_(q > p)
self.assert_(q >= p)
self.failIf(q < r)
self.failIf(q <= r)
self.failIf(q > r)
self.failIf(q >= r)
self.assert_(set('a').issubset('abc'))
self.assert_(set('abc').issuperset('a'))
self.failIf(set('a').issubset('cbs'))
self.failIf(set('cbs').issuperset('a'))
def test_pickling(self):
for i in range(pickle.HIGHEST_PROTOCOL + 1):
p = pickle.dumps(self.s, i)
dup = pickle.loads(p)
self.assertEqual(self.s, dup, "%s != %s" % (self.s, dup))
if type(self.s) not in (set, frozenset):
self.s.x = 10
p = pickle.dumps(self.s)
dup = pickle.loads(p)
self.assertEqual(self.s.x, dup.x)
def test_deepcopy(self):
class Tracer:
def __init__(self, value):
self.value = value
def __hash__(self):
return self.value
def __deepcopy__(self, memo=None):
return Tracer(self.value + 1)
t = Tracer(10)
s = self.thetype([t])
dup = copy.deepcopy(s)
self.assertNotEqual(id(s), id(dup))
for elem in dup:
newt = elem
self.assertNotEqual(id(t), id(newt))
self.assertEqual(t.value + 1, newt.value)
def test_gc(self):
# Create a nest of cycles to exercise overall ref count check
class A:
pass
s = set(A() for i in xrange(1000))
for elem in s:
elem.cycle = s
elem.sub = elem
elem.set = set([elem])
def test_subclass_with_custom_hash(self):
# Bug #1257731
class H(self.thetype):
def __hash__(self):
return int(id(self) & 0x7fffffff)
s=H()
f=set()
f.add(s)
self.assert_(s in f)
f.remove(s)
f.add(s)
f.discard(s)
def test_badcmp(self):
s = self.thetype([BadCmp()])
# Detect comparison errors during insertion and lookup
self.assertRaises(RuntimeError, self.thetype, [BadCmp(), BadCmp()])
self.assertRaises(RuntimeError, s.__contains__, BadCmp())
# Detect errors during mutating operations
if hasattr(s, 'add'):
self.assertRaises(RuntimeError, s.add, BadCmp())
self.assertRaises(RuntimeError, s.discard, BadCmp())
self.assertRaises(RuntimeError, s.remove, BadCmp())
def test_cyclical_repr(self):
w = ReprWrapper()
s = self.thetype([w])
w.value = s
name = repr(s).partition('(')[0] # strip class name from repr string
self.assertEqual(repr(s), '%s([%s(...)])' % (name, name))
def test_cyclical_print(self):
w = ReprWrapper()
s = self.thetype([w])
w.value = s
fo = open(test_support.TESTFN, "wb")
try:
print >> fo, s,
fo.close()
fo = open(test_support.TESTFN, "rb")
self.assertEqual(fo.read(), repr(s))
finally:
fo.close()
test_support.unlink(test_support.TESTFN)
def test_do_not_rehash_dict_keys(self):
n = 10
d = dict.fromkeys(map(HashCountingInt, xrange(n)))
self.assertEqual(sum(elem.hash_count for elem in d), n)
s = self.thetype(d)
self.assertEqual(sum(elem.hash_count for elem in d), n)
s.difference(d)
self.assertEqual(sum(elem.hash_count for elem in d), n)
if hasattr(s, 'symmetric_difference_update'):
s.symmetric_difference_update(d)
self.assertEqual(sum(elem.hash_count for elem in d), n)
d2 = dict.fromkeys(set(d))
self.assertEqual(sum(elem.hash_count for elem in d), n)
d3 = dict.fromkeys(frozenset(d))
self.assertEqual(sum(elem.hash_count for elem in d), n)
d3 = dict.fromkeys(frozenset(d), 123)
self.assertEqual(sum(elem.hash_count for elem in d), n)
self.assertEqual(d3, dict.fromkeys(d, 123))
def test_container_iterator(self):
# Bug #3680: tp_traverse was not implemented for set iterator object
class C(object):
pass
obj = C()
ref = weakref.ref(obj)
container = set([obj, 1])
obj.x = iter(container)
del obj, container
gc.collect()
self.assert_(ref() is None, "Cycle was not collected")
class TestSet(TestJointOps):
thetype = set
def test_init(self):
s = self.thetype()
s.__init__(self.word)
self.assertEqual(s, set(self.word))
s.__init__(self.otherword)
self.assertEqual(s, set(self.otherword))
self.assertRaises(TypeError, s.__init__, s, 2);
self.assertRaises(TypeError, s.__init__, 1);
def test_constructor_identity(self):
s = self.thetype(range(3))
t = self.thetype(s)
self.assertNotEqual(id(s), id(t))
def test_hash(self):
self.assertRaises(TypeError, hash, self.s)
def test_clear(self):
self.s.clear()
self.assertEqual(self.s, set())
self.assertEqual(len(self.s), 0)
def test_copy(self):
dup = self.s.copy()
self.assertEqual(self.s, dup)
self.assertNotEqual(id(self.s), id(dup))
def test_add(self):
self.s.add('Q')
self.assert_('Q' in self.s)
dup = self.s.copy()
self.s.add('Q')
self.assertEqual(self.s, dup)
self.assertRaises(TypeError, self.s.add, [])
def test_remove(self):
self.s.remove('a')
self.assert_('a' not in self.s)
self.assertRaises(KeyError, self.s.remove, 'Q')
self.assertRaises(TypeError, self.s.remove, [])
s = self.thetype([frozenset(self.word)])
self.assert_(self.thetype(self.word) in s)
s.remove(self.thetype(self.word))
self.assert_(self.thetype(self.word) not in s)
self.assertRaises(KeyError, self.s.remove, self.thetype(self.word))
def test_remove_keyerror_unpacking(self):
# bug: www.python.org/sf/1576657
for v1 in ['Q', (1,)]:
try:
self.s.remove(v1)
except KeyError, e:
v2 = e.args[0]
self.assertEqual(v1, v2)
else:
self.fail()
def test_remove_keyerror_set(self):
key = self.thetype([3, 4])
try:
self.s.remove(key)
except KeyError as e:
self.assert_(e.args[0] is key,
"KeyError should be {0}, not {1}".format(key,
e.args[0]))
else:
self.fail()
def test_discard(self):
self.s.discard('a')
self.assert_('a' not in self.s)
self.s.discard('Q')
self.assertRaises(TypeError, self.s.discard, [])
s = self.thetype([frozenset(self.word)])
self.assert_(self.thetype(self.word) in s)
s.discard(self.thetype(self.word))
self.assert_(self.thetype(self.word) not in s)
s.discard(self.thetype(self.word))
def test_pop(self):
for i in xrange(len(self.s)):
elem = self.s.pop()
self.assert_(elem not in self.s)
self.assertRaises(KeyError, self.s.pop)
def test_update(self):
retval = self.s.update(self.otherword)
self.assertEqual(retval, None)
for c in (self.word + self.otherword):
self.assert_(c in self.s)
self.assertRaises(PassThru, self.s.update, check_pass_thru())
self.assertRaises(TypeError, self.s.update, [[]])
for p, q in (('cdc', 'abcd'), ('efgfe', 'abcefg'), ('ccb', 'abc'), ('ef', 'abcef')):
for C in set, frozenset, dict.fromkeys, str, unicode, list, tuple:
s = self.thetype('abcba')
self.assertEqual(s.update(C(p)), None)
self.assertEqual(s, set(q))
for p in ('cdc', 'efgfe', 'ccb', 'ef', 'abcda'):
q = 'ahi'
for C in set, frozenset, dict.fromkeys, str, unicode, list, tuple:
s = self.thetype('abcba')
self.assertEqual(s.update(C(p), C(q)), None)
self.assertEqual(s, set(s) | set(p) | set(q))
def test_ior(self):
self.s |= set(self.otherword)
for c in (self.word + self.otherword):
self.assert_(c in self.s)
def test_intersection_update(self):
retval = self.s.intersection_update(self.otherword)
self.assertEqual(retval, None)
for c in (self.word + self.otherword):
if c in self.otherword and c in self.word:
self.assert_(c in self.s)
else:
self.assert_(c not in self.s)
self.assertRaises(PassThru, self.s.intersection_update, check_pass_thru())
self.assertRaises(TypeError, self.s.intersection_update, [[]])
for p, q in (('cdc', 'c'), ('efgfe', ''), ('ccb', 'bc'), ('ef', '')):
for C in set, frozenset, dict.fromkeys, str, unicode, list, tuple:
s = self.thetype('abcba')
self.assertEqual(s.intersection_update(C(p)), None)
self.assertEqual(s, set(q))
ss = 'abcba'
s = self.thetype(ss)
t = 'cbc'
self.assertEqual(s.intersection_update(C(p), C(t)), None)
self.assertEqual(s, set('abcba')&set(p)&set(t))
def test_iand(self):
self.s &= set(self.otherword)
for c in (self.word + self.otherword):
if c in self.otherword and c in self.word:
self.assert_(c in self.s)
else:
self.assert_(c not in self.s)
def test_difference_update(self):
retval = self.s.difference_update(self.otherword)
self.assertEqual(retval, None)
for c in (self.word + self.otherword):
if c in self.word and c not in self.otherword:
self.assert_(c in self.s)
else:
self.assert_(c not in self.s)
self.assertRaises(PassThru, self.s.difference_update, check_pass_thru())
self.assertRaises(TypeError, self.s.difference_update, [[]])
self.assertRaises(TypeError, self.s.symmetric_difference_update, [[]])
for p, q in (('cdc', 'ab'), ('efgfe', 'abc'), ('ccb', 'a'), ('ef', 'abc')):
for C in set, frozenset, dict.fromkeys, str, unicode, list, tuple:
s = self.thetype('abcba')
self.assertEqual(s.difference_update(C(p)), None)
self.assertEqual(s, set(q))
s = self.thetype('abcdefghih')
s.difference_update()
self.assertEqual(s, self.thetype('abcdefghih'))
s = self.thetype('abcdefghih')
s.difference_update(C('aba'))
self.assertEqual(s, self.thetype('cdefghih'))
s = self.thetype('abcdefghih')
s.difference_update(C('cdc'), C('aba'))
self.assertEqual(s, self.thetype('efghih'))
def test_isub(self):
self.s -= set(self.otherword)
for c in (self.word + self.otherword):
if c in self.word and c not in self.otherword:
self.assert_(c in self.s)
else:
self.assert_(c not in self.s)
def test_symmetric_difference_update(self):
retval = self.s.symmetric_difference_update(self.otherword)
self.assertEqual(retval, None)
for c in (self.word + self.otherword):
if (c in self.word) ^ (c in self.otherword):
self.assert_(c in self.s)
else:
self.assert_(c not in self.s)
self.assertRaises(PassThru, self.s.symmetric_difference_update, check_pass_thru())
self.assertRaises(TypeError, self.s.symmetric_difference_update, [[]])
for p, q in (('cdc', 'abd'), ('efgfe', 'abcefg'), ('ccb', 'a'), ('ef', 'abcef')):
for C in set, frozenset, dict.fromkeys, str, unicode, list, tuple:
s = self.thetype('abcba')
self.assertEqual(s.symmetric_difference_update(C(p)), None)
self.assertEqual(s, set(q))
def test_ixor(self):
self.s ^= set(self.otherword)
for c in (self.word + self.otherword):
if (c in self.word) ^ (c in self.otherword):
self.assert_(c in self.s)
else:
self.assert_(c not in self.s)
def test_inplace_on_self(self):
t = self.s.copy()
t |= t
self.assertEqual(t, self.s)
t &= t
self.assertEqual(t, self.s)
t -= t
self.assertEqual(t, self.thetype())
t = self.s.copy()
t ^= t
self.assertEqual(t, self.thetype())
def test_weakref(self):
s = self.thetype('gallahad')
p = weakref.proxy(s)
self.assertEqual(str(p), str(s))
s = None
self.assertRaises(ReferenceError, str, p)
# C API test only available in a debug build
if hasattr(set, "test_c_api"):
def test_c_api(self):
self.assertEqual(set('abc').test_c_api(), True)
class SetSubclass(set):
pass
class TestSetSubclass(TestSet):
thetype = SetSubclass
class SetSubclassWithKeywordArgs(set):
def __init__(self, iterable=[], newarg=None):
set.__init__(self, iterable)
class TestSetSubclassWithKeywordArgs(TestSet):
def test_keywords_in_subclass(self):
'SF bug #1486663 -- this used to erroneously raise a TypeError'
SetSubclassWithKeywordArgs(newarg=1)
class TestFrozenSet(TestJointOps):
thetype = frozenset
def test_init(self):
s = self.thetype(self.word)
s.__init__(self.otherword)
self.assertEqual(s, set(self.word))
def test_singleton_empty_frozenset(self):
f = frozenset()
efs = [frozenset(), frozenset([]), frozenset(()), frozenset(''),
frozenset(), frozenset([]), frozenset(()), frozenset(''),
frozenset(xrange(0)), frozenset(frozenset()),
frozenset(f), f]
# All of the empty frozensets should have just one id()
self.assertEqual(len(set(map(id, efs))), 1)
def test_constructor_identity(self):
s = self.thetype(range(3))
t = self.thetype(s)
self.assertEqual(id(s), id(t))
def test_hash(self):
self.assertEqual(hash(self.thetype('abcdeb')),
hash(self.thetype('ebecda')))
# make sure that all permutations give the same hash value
n = 100
seq = [randrange(n) for i in xrange(n)]
results = set()
for i in xrange(200):
shuffle(seq)
results.add(hash(self.thetype(seq)))
self.assertEqual(len(results), 1)
def test_copy(self):
dup = self.s.copy()
self.assertEqual(id(self.s), id(dup))
def test_frozen_as_dictkey(self):
seq = range(10) + list('abcdefg') + ['apple']
key1 = self.thetype(seq)
key2 = self.thetype(reversed(seq))
self.assertEqual(key1, key2)
self.assertNotEqual(id(key1), id(key2))
d = {}
d[key1] = 42
self.assertEqual(d[key2], 42)
def test_hash_caching(self):
f = self.thetype('abcdcda')
self.assertEqual(hash(f), hash(f))
def test_hash_effectiveness(self):
n = 13
hashvalues = set()
addhashvalue = hashvalues.add
elemmasks = [(i+1, 1<<i) for i in range(n)]
for i in xrange(2**n):
addhashvalue(hash(frozenset([e for e, m in elemmasks if m&i])))
self.assertEqual(len(hashvalues), 2**n)
class FrozenSetSubclass(frozenset):
pass
class TestFrozenSetSubclass(TestFrozenSet):
thetype = FrozenSetSubclass
def test_constructor_identity(self):
s = self.thetype(range(3))
t = self.thetype(s)
self.assertNotEqual(id(s), id(t))
def test_copy(self):
dup = self.s.copy()
self.assertNotEqual(id(self.s), id(dup))
def test_nested_empty_constructor(self):
s = self.thetype()
t = self.thetype(s)
self.assertEqual(s, t)
def test_singleton_empty_frozenset(self):
Frozenset = self.thetype
f = frozenset()
F = Frozenset()
efs = [Frozenset(), Frozenset([]), Frozenset(()), Frozenset(''),
Frozenset(), Frozenset([]), Frozenset(()), Frozenset(''),
Frozenset(xrange(0)), Frozenset(Frozenset()),
Frozenset(frozenset()), f, F, Frozenset(f), Frozenset(F)]
# All empty frozenset subclass instances should have different ids
self.assertEqual(len(set(map(id, efs))), len(efs))
# Tests taken from test_sets.py =============================================
empty_set = set()
#==============================================================================
class TestBasicOps(unittest.TestCase):
def test_repr(self):
if self.repr is not None:
self.assertEqual(repr(self.set), self.repr)
def test_print(self):
fo = open(test_support.TESTFN, "wb")
try:
print >> fo, self.set,
fo.close()
fo = open(test_support.TESTFN, "rb")
self.assertEqual(fo.read(), repr(self.set))
finally:
fo.close()
test_support.unlink(test_support.TESTFN)
def test_length(self):
self.assertEqual(len(self.set), self.length)
def test_self_equality(self):
self.assertEqual(self.set, self.set)
def test_equivalent_equality(self):
self.assertEqual(self.set, self.dup)
def test_copy(self):
self.assertEqual(self.set.copy(), self.dup)
def test_self_union(self):
result = self.set | self.set
self.assertEqual(result, self.dup)
def test_empty_union(self):
result = self.set | empty_set
self.assertEqual(result, self.dup)
def test_union_empty(self):
result = empty_set | self.set
self.assertEqual(result, self.dup)
def test_self_intersection(self):
result = self.set & self.set
self.assertEqual(result, self.dup)
def test_empty_intersection(self):
result = self.set & empty_set
self.assertEqual(result, empty_set)
def test_intersection_empty(self):
result = empty_set & self.set
self.assertEqual(result, empty_set)
def test_self_isdisjoint(self):
result = self.set.isdisjoint(self.set)
self.assertEqual(result, not self.set)
def test_empty_isdisjoint(self):
result = self.set.isdisjoint(empty_set)
self.assertEqual(result, True)
def test_isdisjoint_empty(self):
result = empty_set.isdisjoint(self.set)
self.assertEqual(result, True)
def test_self_symmetric_difference(self):
result = self.set ^ self.set
self.assertEqual(result, empty_set)
def checkempty_symmetric_difference(self):
result = self.set ^ empty_set
self.assertEqual(result, self.set)
def test_self_difference(self):
result = self.set - self.set
self.assertEqual(result, empty_set)
def test_empty_difference(self):
result = self.set - empty_set
self.assertEqual(result, self.dup)
def test_empty_difference_rev(self):
result = empty_set - self.set
self.assertEqual(result, empty_set)
def test_iteration(self):
for v in self.set:
self.assert_(v in self.values)
setiter = iter(self.set)
# note: __length_hint__ is an internal undocumented API,
# don't rely on it in your own programs
self.assertEqual(setiter.__length_hint__(), len(self.set))
def test_pickling(self):
p = pickle.dumps(self.set)
copy = pickle.loads(p)
self.assertEqual(self.set, copy,
"%s != %s" % (self.set, copy))
#------------------------------------------------------------------------------
class TestBasicOpsEmpty(TestBasicOps):
def setUp(self):
self.case = "empty set"
self.values = []
self.set = set(self.values)
self.dup = set(self.values)
self.length = 0
self.repr = "set([])"
#------------------------------------------------------------------------------
class TestBasicOpsSingleton(TestBasicOps):
def setUp(self):
self.case = "unit set (number)"
self.values = [3]
self.set = set(self.values)
self.dup = set(self.values)
self.length = 1
self.repr = "set([3])"
def test_in(self):
self.failUnless(3 in self.set)
def test_not_in(self):
self.failUnless(2 not in self.set)
#------------------------------------------------------------------------------
class TestBasicOpsTuple(TestBasicOps):
def setUp(self):
self.case = "unit set (tuple)"
self.values = [(0, "zero")]
self.set = set(self.values)
self.dup = set(self.values)
self.length = 1
self.repr = "set([(0, 'zero')])"
def test_in(self):
self.failUnless((0, "zero") in self.set)
def test_not_in(self):
self.failUnless(9 not in self.set)
#------------------------------------------------------------------------------
class TestBasicOpsTriple(TestBasicOps):
def setUp(self):
self.case = "triple set"
self.values = [0, "zero", operator.add]
self.set = set(self.values)
self.dup = set(self.values)
self.length = 3
self.repr = None
#==============================================================================
def baditer():
raise TypeError
yield True
def gooditer():
yield True
class TestExceptionPropagation(unittest.TestCase):
"""SF 628246: Set constructor should not trap iterator TypeErrors"""
def test_instanceWithException(self):
self.assertRaises(TypeError, set, baditer())
def test_instancesWithoutException(self):
# All of these iterables should load without exception.
set([1,2,3])
set((1,2,3))
set({'one':1, 'two':2, 'three':3})
set(xrange(3))
set('abc')
set(gooditer())
def test_changingSizeWhileIterating(self):
s = set([1,2,3])
try:
for i in s:
s.update([4])
except RuntimeError:
pass
else:
self.fail("no exception when changing size during iteration")
#==============================================================================
class TestSetOfSets(unittest.TestCase):
def test_constructor(self):
inner = frozenset([1])
outer = set([inner])
element = outer.pop()
self.assertEqual(type(element), frozenset)
outer.add(inner) # Rebuild set of sets with .add method
outer.remove(inner)
self.assertEqual(outer, set()) # Verify that remove worked
outer.discard(inner) # Absence of KeyError indicates working fine
#==============================================================================
class TestBinaryOps(unittest.TestCase):
def setUp(self):
self.set = set((2, 4, 6))
def test_eq(self): # SF bug 643115
self.assertEqual(self.set, set({2:1,4:3,6:5}))
def test_union_subset(self):
result = self.set | set([2])
self.assertEqual(result, set((2, 4, 6)))
def test_union_superset(self):
result = self.set | set([2, 4, 6, 8])
self.assertEqual(result, set([2, 4, 6, 8]))
def test_union_overlap(self):
result = self.set | set([3, 4, 5])
self.assertEqual(result, set([2, 3, 4, 5, 6]))
def test_union_non_overlap(self):
result = self.set | set([8])
self.assertEqual(result, set([2, 4, 6, 8]))
def test_intersection_subset(self):
result = self.set & set((2, 4))
self.assertEqual(result, set((2, 4)))
def test_intersection_superset(self):
result = self.set & set([2, 4, 6, 8])
self.assertEqual(result, set([2, 4, 6]))
def test_intersection_overlap(self):
result = self.set & set([3, 4, 5])
self.assertEqual(result, set([4]))
def test_intersection_non_overlap(self):
result = self.set & set([8])
self.assertEqual(result, empty_set)
def test_isdisjoint_subset(self):
result = self.set.isdisjoint(set((2, 4)))
self.assertEqual(result, False)
def test_isdisjoint_superset(self):
result = self.set.isdisjoint(set([2, 4, 6, 8]))
self.assertEqual(result, False)
def test_isdisjoint_overlap(self):
result = self.set.isdisjoint(set([3, 4, 5]))
self.assertEqual(result, False)
def test_isdisjoint_non_overlap(self):
result = self.set.isdisjoint(set([8]))
self.assertEqual(result, True)
def test_sym_difference_subset(self):
result = self.set ^ set((2, 4))
self.assertEqual(result, set([6]))
def test_sym_difference_superset(self):
result = self.set ^ set((2, 4, 6, 8))
self.assertEqual(result, set([8]))
def test_sym_difference_overlap(self):
result = self.set ^ set((3, 4, 5))
self.assertEqual(result, set([2, 3, 5, 6]))
def test_sym_difference_non_overlap(self):
result = self.set ^ set([8])
self.assertEqual(result, set([2, 4, 6, 8]))
def test_cmp(self):
a, b = set('a'), set('b')
self.assertRaises(TypeError, cmp, a, b)
# You can view this as a buglet: cmp(a, a) does not raise TypeError,
# because __eq__ is tried before __cmp__, and a.__eq__(a) returns True,
# which Python thinks is good enough to synthesize a cmp() result
# without calling __cmp__.
self.assertEqual(cmp(a, a), 0)
self.assertRaises(TypeError, cmp, a, 12)
self.assertRaises(TypeError, cmp, "abc", a)
#==============================================================================
class TestUpdateOps(unittest.TestCase):
def setUp(self):
self.set = set((2, 4, 6))
def test_union_subset(self):
self.set |= set([2])
self.assertEqual(self.set, set((2, 4, 6)))
def test_union_superset(self):
self.set |= set([2, 4, 6, 8])
self.assertEqual(self.set, set([2, 4, 6, 8]))
def test_union_overlap(self):
self.set |= set([3, 4, 5])
self.assertEqual(self.set, set([2, 3, 4, 5, 6]))
def test_union_non_overlap(self):
self.set |= set([8])
self.assertEqual(self.set, set([2, 4, 6, 8]))
def test_union_method_call(self):
self.set.update(set([3, 4, 5]))
self.assertEqual(self.set, set([2, 3, 4, 5, 6]))
def test_intersection_subset(self):
self.set &= set((2, 4))
self.assertEqual(self.set, set((2, 4)))
def test_intersection_superset(self):
self.set &= set([2, 4, 6, 8])
self.assertEqual(self.set, set([2, 4, 6]))
def test_intersection_overlap(self):
self.set &= set([3, 4, 5])
self.assertEqual(self.set, set([4]))
def test_intersection_non_overlap(self):
self.set &= set([8])
self.assertEqual(self.set, empty_set)
def test_intersection_method_call(self):
self.set.intersection_update(set([3, 4, 5]))
self.assertEqual(self.set, set([4]))
def test_sym_difference_subset(self):
self.set ^= set((2, 4))
self.assertEqual(self.set, set([6]))
def test_sym_difference_superset(self):
self.set ^= set((2, 4, 6, 8))
self.assertEqual(self.set, set([8]))
def test_sym_difference_overlap(self):
self.set ^= set((3, 4, 5))
self.assertEqual(self.set, set([2, 3, 5, 6]))
def test_sym_difference_non_overlap(self):
self.set ^= set([8])
self.assertEqual(self.set, set([2, 4, 6, 8]))
def test_sym_difference_method_call(self):
self.set.symmetric_difference_update(set([3, 4, 5]))
self.assertEqual(self.set, set([2, 3, 5, 6]))
def test_difference_subset(self):
self.set -= set((2, 4))
self.assertEqual(self.set, set([6]))
def test_difference_superset(self):
self.set -= set((2, 4, 6, 8))
self.assertEqual(self.set, set([]))
def test_difference_overlap(self):
self.set -= set((3, 4, 5))
self.assertEqual(self.set, set([2, 6]))
def test_difference_non_overlap(self):
self.set -= set([8])
self.assertEqual(self.set, set([2, 4, 6]))
def test_difference_method_call(self):
self.set.difference_update(set([3, 4, 5]))
self.assertEqual(self.set, set([2, 6]))
#==============================================================================
class TestMutate(unittest.TestCase):
def setUp(self):
self.values = ["a", "b", "c"]
self.set = set(self.values)
def test_add_present(self):
self.set.add("c")
self.assertEqual(self.set, set("abc"))
def test_add_absent(self):
self.set.add("d")
self.assertEqual(self.set, set("abcd"))
def test_add_until_full(self):
tmp = set()
expected_len = 0
for v in self.values:
tmp.add(v)
expected_len += 1
self.assertEqual(len(tmp), expected_len)
self.assertEqual(tmp, self.set)
def test_remove_present(self):
self.set.remove("b")
self.assertEqual(self.set, set("ac"))
def test_remove_absent(self):
try:
self.set.remove("d")
self.fail("Removing missing element should have raised LookupError")
except LookupError:
pass
def test_remove_until_empty(self):
expected_len = len(self.set)
for v in self.values:
self.set.remove(v)
expected_len -= 1
self.assertEqual(len(self.set), expected_len)
def test_discard_present(self):
self.set.discard("c")
self.assertEqual(self.set, set("ab"))
def test_discard_absent(self):
self.set.discard("d")
self.assertEqual(self.set, set("abc"))
def test_clear(self):
self.set.clear()
self.assertEqual(len(self.set), 0)
def test_pop(self):
popped = {}
while self.set:
popped[self.set.pop()] = None
self.assertEqual(len(popped), len(self.values))
for v in self.values:
self.failUnless(v in popped)
def test_update_empty_tuple(self):
self.set.update(())
self.assertEqual(self.set, set(self.values))
def test_update_unit_tuple_overlap(self):
self.set.update(("a",))
self.assertEqual(self.set, set(self.values))
def test_update_unit_tuple_non_overlap(self):
self.set.update(("a", "z"))
self.assertEqual(self.set, set(self.values + ["z"]))
#==============================================================================
class TestSubsets(unittest.TestCase):
case2method = {"<=": "issubset",
">=": "issuperset",
}
reverse = {"==": "==",
"!=": "!=",
"<": ">",
">": "<",
"<=": ">=",
">=": "<=",
}
def test_issubset(self):
x = self.left
y = self.right
for case in "!=", "==", "<", "<=", ">", ">=":
expected = case in self.cases
# Test the binary infix spelling.
result = eval("x" + case + "y", locals())
self.assertEqual(result, expected)
# Test the "friendly" method-name spelling, if one exists.
if case in TestSubsets.case2method:
method = getattr(x, TestSubsets.case2method[case])
result = method(y)
self.assertEqual(result, expected)
# Now do the same for the operands reversed.
rcase = TestSubsets.reverse[case]
result = eval("y" + rcase + "x", locals())
self.assertEqual(result, expected)
if rcase in TestSubsets.case2method:
method = getattr(y, TestSubsets.case2method[rcase])
result = method(x)
self.assertEqual(result, expected)
#------------------------------------------------------------------------------
class TestSubsetEqualEmpty(TestSubsets):
left = set()
right = set()
name = "both empty"
cases = "==", "<=", ">="
#------------------------------------------------------------------------------
class TestSubsetEqualNonEmpty(TestSubsets):
left = set([1, 2])
right = set([1, 2])
name = "equal pair"
cases = "==", "<=", ">="
#------------------------------------------------------------------------------
class TestSubsetEmptyNonEmpty(TestSubsets):
left = set()
right = set([1, 2])
name = "one empty, one non-empty"
cases = "!=", "<", "<="
#------------------------------------------------------------------------------
class TestSubsetPartial(TestSubsets):
left = set([1])
right = set([1, 2])
name = "one a non-empty proper subset of other"
cases = "!=", "<", "<="
#------------------------------------------------------------------------------
class TestSubsetNonOverlap(TestSubsets):
left = set([1])
right = set([2])
name = "neither empty, neither contains"
cases = "!="
#==============================================================================
class TestOnlySetsInBinaryOps(unittest.TestCase):
def test_eq_ne(self):
# Unlike the others, this is testing that == and != *are* allowed.
self.assertEqual(self.other == self.set, False)
self.assertEqual(self.set == self.other, False)
self.assertEqual(self.other != self.set, True)
self.assertEqual(self.set != self.other, True)
def test_ge_gt_le_lt(self):
self.assertRaises(TypeError, lambda: self.set < self.other)
self.assertRaises(TypeError, lambda: self.set <= self.other)
self.assertRaises(TypeError, lambda: self.set > self.other)
self.assertRaises(TypeError, lambda: self.set >= self.other)
self.assertRaises(TypeError, lambda: self.other < self.set)
self.assertRaises(TypeError, lambda: self.other <= self.set)
self.assertRaises(TypeError, lambda: self.other > self.set)
self.assertRaises(TypeError, lambda: self.other >= self.set)
def test_update_operator(self):
try:
self.set |= self.other
except TypeError:
pass
else:
self.fail("expected TypeError")
def test_update(self):
if self.otherIsIterable:
self.set.update(self.other)
else:
self.assertRaises(TypeError, self.set.update, self.other)
def test_union(self):
self.assertRaises(TypeError, lambda: self.set | self.other)
self.assertRaises(TypeError, lambda: self.other | self.set)
if self.otherIsIterable:
self.set.union(self.other)
else:
self.assertRaises(TypeError, self.set.union, self.other)
def test_intersection_update_operator(self):
try:
self.set &= self.other
except TypeError:
pass
else:
self.fail("expected TypeError")
def test_intersection_update(self):
if self.otherIsIterable:
self.set.intersection_update(self.other)
else:
self.assertRaises(TypeError,
self.set.intersection_update,
self.other)
def test_intersection(self):
self.assertRaises(TypeError, lambda: self.set & self.other)
self.assertRaises(TypeError, lambda: self.other & self.set)
if self.otherIsIterable:
self.set.intersection(self.other)
else:
self.assertRaises(TypeError, self.set.intersection, self.other)
def test_sym_difference_update_operator(self):
try:
self.set ^= self.other
except TypeError:
pass
else:
self.fail("expected TypeError")
def test_sym_difference_update(self):
if self.otherIsIterable:
self.set.symmetric_difference_update(self.other)
else:
self.assertRaises(TypeError,
self.set.symmetric_difference_update,
self.other)
def test_sym_difference(self):
self.assertRaises(TypeError, lambda: self.set ^ self.other)
self.assertRaises(TypeError, lambda: self.other ^ self.set)
if self.otherIsIterable:
self.set.symmetric_difference(self.other)
else:
self.assertRaises(TypeError, self.set.symmetric_difference, self.other)
def test_difference_update_operator(self):
try:
self.set -= self.other
except TypeError:
pass
else:
self.fail("expected TypeError")
def test_difference_update(self):
if self.otherIsIterable:
self.set.difference_update(self.other)
else:
self.assertRaises(TypeError,
self.set.difference_update,
self.other)
def test_difference(self):
self.assertRaises(TypeError, lambda: self.set - self.other)
self.assertRaises(TypeError, lambda: self.other - self.set)
if self.otherIsIterable:
self.set.difference(self.other)
else:
self.assertRaises(TypeError, self.set.difference, self.other)
#------------------------------------------------------------------------------
class TestOnlySetsNumeric(TestOnlySetsInBinaryOps):
def setUp(self):
self.set = set((1, 2, 3))
self.other = 19
self.otherIsIterable = False
#------------------------------------------------------------------------------
class TestOnlySetsDict(TestOnlySetsInBinaryOps):
def setUp(self):
self.set = set((1, 2, 3))
self.other = {1:2, 3:4}
self.otherIsIterable = True
#------------------------------------------------------------------------------
class TestOnlySetsOperator(TestOnlySetsInBinaryOps):
def setUp(self):
self.set = set((1, 2, 3))
self.other = operator.add
self.otherIsIterable = False
#------------------------------------------------------------------------------
class TestOnlySetsTuple(TestOnlySetsInBinaryOps):
def setUp(self):
self.set = set((1, 2, 3))
self.other = (2, 4, 6)
self.otherIsIterable = True
#------------------------------------------------------------------------------
class TestOnlySetsString(TestOnlySetsInBinaryOps):
def setUp(self):
self.set = set((1, 2, 3))
self.other = 'abc'
self.otherIsIterable = True
#------------------------------------------------------------------------------
class TestOnlySetsGenerator(TestOnlySetsInBinaryOps):
def setUp(self):
def gen():
for i in xrange(0, 10, 2):
yield i
self.set = set((1, 2, 3))
self.other = gen()
self.otherIsIterable = True
#==============================================================================
class TestCopying(unittest.TestCase):
def test_copy(self):
dup = self.set.copy()
dup_list = list(dup); dup_list.sort()
set_list = list(self.set); set_list.sort()
self.assertEqual(len(dup_list), len(set_list))
for i in range(len(dup_list)):
self.failUnless(dup_list[i] is set_list[i])
def test_deep_copy(self):
dup = copy.deepcopy(self.set)
##print type(dup), repr(dup)
dup_list = list(dup); dup_list.sort()
set_list = list(self.set); set_list.sort()
self.assertEqual(len(dup_list), len(set_list))
for i in range(len(dup_list)):
self.assertEqual(dup_list[i], set_list[i])
#------------------------------------------------------------------------------
class TestCopyingEmpty(TestCopying):
def setUp(self):
self.set = set()
#------------------------------------------------------------------------------
class TestCopyingSingleton(TestCopying):
def setUp(self):
self.set = set(["hello"])
#------------------------------------------------------------------------------
class TestCopyingTriple(TestCopying):
def setUp(self):
self.set = set(["zero", 0, None])
#------------------------------------------------------------------------------
class TestCopyingTuple(TestCopying):
def setUp(self):
self.set = set([(1, 2)])
#------------------------------------------------------------------------------
class TestCopyingNested(TestCopying):
def setUp(self):
self.set = set([((1, 2), (3, 4))])
#==============================================================================
class TestIdentities(unittest.TestCase):
def setUp(self):
self.a = set('abracadabra')
self.b = set('alacazam')
def test_binopsVsSubsets(self):
a, b = self.a, self.b
self.assert_(a - b < a)
self.assert_(b - a < b)
self.assert_(a & b < a)
self.assert_(a & b < b)
self.assert_(a | b > a)
self.assert_(a | b > b)
self.assert_(a ^ b < a | b)
def test_commutativity(self):
a, b = self.a, self.b
self.assertEqual(a&b, b&a)
self.assertEqual(a|b, b|a)
self.assertEqual(a^b, b^a)
if a != b:
self.assertNotEqual(a-b, b-a)
def test_summations(self):
# check that sums of parts equal the whole
a, b = self.a, self.b
self.assertEqual((a-b)|(a&b)|(b-a), a|b)
self.assertEqual((a&b)|(a^b), a|b)
self.assertEqual(a|(b-a), a|b)
self.assertEqual((a-b)|b, a|b)
self.assertEqual((a-b)|(a&b), a)
self.assertEqual((b-a)|(a&b), b)
self.assertEqual((a-b)|(b-a), a^b)
def test_exclusion(self):
# check that inverse operations show non-overlap
a, b, zero = self.a, self.b, set()
self.assertEqual((a-b)&b, zero)
self.assertEqual((b-a)&a, zero)
self.assertEqual((a&b)&(a^b), zero)
# Tests derived from test_itertools.py =======================================
def R(seqn):
'Regular generator'
for i in seqn:
yield i
class G:
'Sequence using __getitem__'
def __init__(self, seqn):
self.seqn = seqn
def __getitem__(self, i):
return self.seqn[i]
class I:
'Sequence using iterator protocol'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def next(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
class Ig:
'Sequence using iterator protocol defined with a generator'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
for val in self.seqn:
yield val
class X:
'Missing __getitem__ and __iter__'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def next(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
class N:
'Iterator missing next()'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
class E:
'Test propagation of exceptions'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def next(self):
3 // 0
class S:
'Test immediate stop'
def __init__(self, seqn):
pass
def __iter__(self):
return self
def next(self):
raise StopIteration
from itertools import chain, imap
def L(seqn):
'Test multiple tiers of iterators'
return chain(imap(lambda x:x, R(Ig(G(seqn)))))
class TestVariousIteratorArgs(unittest.TestCase):
def test_constructor(self):
for cons in (set, frozenset):
for s in ("123", "", range(1000), ('do', 1.2), xrange(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(sorted(cons(g(s))), sorted(g(s)))
self.assertRaises(TypeError, cons , X(s))
self.assertRaises(TypeError, cons , N(s))
self.assertRaises(ZeroDivisionError, cons , E(s))
def test_inline_methods(self):
s = set('november')
for data in ("123", "", range(1000), ('do', 1.2), xrange(2000,2200,5), 'december'):
for meth in (s.union, s.intersection, s.difference, s.symmetric_difference, s.isdisjoint):
for g in (G, I, Ig, L, R):
expected = meth(data)
actual = meth(G(data))
if isinstance(expected, bool):
self.assertEqual(actual, expected)
else:
self.assertEqual(sorted(actual), sorted(expected))
self.assertRaises(TypeError, meth, X(s))
self.assertRaises(TypeError, meth, N(s))
self.assertRaises(ZeroDivisionError, meth, E(s))
def test_inplace_methods(self):
for data in ("123", "", range(1000), ('do', 1.2), xrange(2000,2200,5), 'december'):
for methname in ('update', 'intersection_update',
'difference_update', 'symmetric_difference_update'):
for g in (G, I, Ig, S, L, R):
s = set('january')
t = s.copy()
getattr(s, methname)(list(g(data)))
getattr(t, methname)(g(data))
self.assertEqual(sorted(s), sorted(t))
self.assertRaises(TypeError, getattr(set('january'), methname), X(data))
self.assertRaises(TypeError, getattr(set('january'), methname), N(data))
self.assertRaises(ZeroDivisionError, getattr(set('january'), methname), E(data))
# Application tests (based on David Eppstein's graph recipes ====================================
def powerset(U):
"""Generates all subsets of a set or sequence U."""
U = iter(U)
try:
x = frozenset([U.next()])
for S in powerset(U):
yield S
yield S | x
except StopIteration:
yield frozenset()
def cube(n):
"""Graph of n-dimensional hypercube."""
singletons = [frozenset([x]) for x in range(n)]
return dict([(x, frozenset([x^s for s in singletons]))
for x in powerset(range(n))])
def linegraph(G):
"""Graph, the vertices of which are edges of G,
with two vertices being adjacent iff the corresponding
edges share a vertex."""
L = {}
for x in G:
for y in G[x]:
nx = [frozenset([x,z]) for z in G[x] if z != y]
ny = [frozenset([y,z]) for z in G[y] if z != x]
L[frozenset([x,y])] = frozenset(nx+ny)
return L
def faces(G):
'Return a set of faces in G. Where a face is a set of vertices on that face'
# currently limited to triangles,squares, and pentagons
f = set()
for v1, edges in G.items():
for v2 in edges:
for v3 in G[v2]:
if v1 == v3:
continue
if v1 in G[v3]:
f.add(frozenset([v1, v2, v3]))
else:
for v4 in G[v3]:
if v4 == v2:
continue
if v1 in G[v4]:
f.add(frozenset([v1, v2, v3, v4]))
else:
for v5 in G[v4]:
if v5 == v3 or v5 == v2:
continue
if v1 in G[v5]:
f.add(frozenset([v1, v2, v3, v4, v5]))
return f
class TestGraphs(unittest.TestCase):
def test_cube(self):
g = cube(3) # vert --> {v1, v2, v3}
vertices1 = set(g)
self.assertEqual(len(vertices1), 8) # eight vertices
for edge in g.values():
self.assertEqual(len(edge), 3) # each vertex connects to three edges
vertices2 = set(v for edges in g.values() for v in edges)
self.assertEqual(vertices1, vertices2) # edge vertices in original set
cubefaces = faces(g)
self.assertEqual(len(cubefaces), 6) # six faces
for face in cubefaces:
self.assertEqual(len(face), 4) # each face is a square
def test_cuboctahedron(self):
# http://en.wikipedia.org/wiki/Cuboctahedron
# 8 triangular faces and 6 square faces
# 12 indentical vertices each connecting a triangle and square
g = cube(3)
cuboctahedron = linegraph(g) # V( --> {V1, V2, V3, V4}
self.assertEqual(len(cuboctahedron), 12)# twelve vertices
vertices = set(cuboctahedron)
for edges in cuboctahedron.values():
self.assertEqual(len(edges), 4) # each vertex connects to four other vertices
othervertices = set(edge for edges in cuboctahedron.values() for edge in edges)
self.assertEqual(vertices, othervertices) # edge vertices in original set
cubofaces = faces(cuboctahedron)
facesizes = collections.defaultdict(int)
for face in cubofaces:
facesizes[len(face)] += 1
self.assertEqual(facesizes[3], 8) # eight triangular faces
self.assertEqual(facesizes[4], 6) # six square faces
for vertex in cuboctahedron:
edge = vertex # Cuboctahedron vertices are edges in Cube
self.assertEqual(len(edge), 2) # Two cube vertices define an edge
for cubevert in edge:
self.assert_(cubevert in g)
#==============================================================================
def test_main(verbose=None):
from test import test_sets
test_classes = (
TestSet,
TestSetSubclass,
TestSetSubclassWithKeywordArgs,
TestFrozenSet,
TestFrozenSetSubclass,
TestSetOfSets,
TestExceptionPropagation,
TestBasicOpsEmpty,
TestBasicOpsSingleton,
TestBasicOpsTuple,
TestBasicOpsTriple,
TestBinaryOps,
TestUpdateOps,
TestMutate,
TestSubsetEqualEmpty,
TestSubsetEqualNonEmpty,
TestSubsetEmptyNonEmpty,
TestSubsetPartial,
TestSubsetNonOverlap,
TestOnlySetsNumeric,
TestOnlySetsDict,
TestOnlySetsOperator,
TestOnlySetsTuple,
TestOnlySetsString,
TestOnlySetsGenerator,
TestCopyingEmpty,
TestCopyingSingleton,
TestCopyingTriple,
TestCopyingTuple,
TestCopyingNested,
TestIdentities,
TestVariousIteratorArgs,
TestGraphs,
)
test_support.run_unittest(*test_classes)
# verify reference counting
if verbose and hasattr(sys, "gettotalrefcount"):
import gc
counts = [None] * 5
for i in xrange(len(counts)):
test_support.run_unittest(*test_classes)
gc.collect()
counts[i] = sys.gettotalrefcount()
print counts
if __name__ == "__main__":
test_main(verbose=True)
|
2ndQuadrant/ansible
|
refs/heads/master
|
lib/ansible/modules/windows/win_shortcut.py
|
52
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016, Dag Wieers (@dagwieers) <dag@wieers.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_shortcut
version_added: '2.3'
short_description: Manage shortcuts on Windows
description:
- Create, manage and delete Windows shortcuts
options:
src:
description:
- Executable or URL the shortcut points to.
- The executable needs to be in your PATH, or has to be an absolute
path to the executable.
type: str
description:
description:
- Description for the shortcut.
- This is usually shown when hoovering the icon.
type: str
dest:
description:
- Destination file for the shortcuting file.
- File name should have a C(.lnk) or C(.url) extension.
type: path
required: yes
arguments:
description:
- Additional arguments for the executable defined in C(src).
- Was originally just C(args) but renamed in Ansible 2.8.
type: str
aliases: [ args ]
directory:
description:
- Working directory for executable defined in C(src).
type: path
icon:
description:
- Icon used for the shortcut.
- File name should have a C(.ico) extension.
- The file name is followed by a comma and the number in the library file (.dll) or use 0 for an image file.
type: path
hotkey:
description:
- Key combination for the shortcut.
- This is a combination of one or more modifiers and a key.
- Possible modifiers are Alt, Ctrl, Shift, Ext.
- Possible keys are [A-Z] and [0-9].
type: str
windowstyle:
description:
- Influences how the application is displayed when it is launched.
type: str
choices: [ maximized, minimized, normal ]
state:
description:
- When C(absent), removes the shortcut if it exists.
- When C(present), creates or updates the shortcut.
type: str
choices: [ absent, present ]
default: present
run_as_admin:
description:
- When C(src) is an executable, this can control whether the shortcut will be opened as an administrator or not.
type: bool
default: no
version_added: '2.8'
notes:
- 'The following options can include Windows environment variables: C(dest), C(args), C(description), C(dest), C(directory), C(icon) C(src)'
- 'Windows has two types of shortcuts: Application and URL shortcuts. URL shortcuts only consists of C(dest) and C(src)'
seealso:
- module: win_file
author:
- Dag Wieers (@dagwieers)
'''
EXAMPLES = r'''
- name: Create an application shortcut on the desktop
win_shortcut:
src: C:\Program Files\Mozilla Firefox\Firefox.exe
dest: C:\Users\Public\Desktop\Mozilla Firefox.lnk
icon: C:\Program Files\Mozilla Firefox\Firefox.exe,0
- name: Create the same shortcut using environment variables
win_shortcut:
description: The Mozilla Firefox web browser
src: '%ProgramFiles%\Mozilla Firefox\Firefox.exe'
dest: '%Public%\Desktop\Mozilla Firefox.lnk'
icon: '%ProgramFiles\Mozilla Firefox\Firefox.exe,0'
directory: '%ProgramFiles%\Mozilla Firefox'
hotkey: Ctrl+Alt+F
- name: Create an application shortcut for an executable in PATH to your desktop
win_shortcut:
src: cmd.exe
dest: Desktop\Command prompt.lnk
- name: Create an application shortcut for the Ansible website
win_shortcut:
src: '%ProgramFiles%\Google\Chrome\Application\chrome.exe'
dest: '%UserProfile%\Desktop\Ansible website.lnk'
arguments: --new-window https://ansible.com/
directory: '%ProgramFiles%\Google\Chrome\Application'
icon: '%ProgramFiles%\Google\Chrome\Application\chrome.exe,0'
hotkey: Ctrl+Alt+A
- name: Create a URL shortcut for the Ansible website
win_shortcut:
src: https://ansible.com/
dest: '%Public%\Desktop\Ansible website.url'
'''
RETURN = r'''
'''
|
thesgc/shergar
|
refs/heads/master
|
shergar/shergar/__init__.py
|
12133432
| |
haad/ansible-modules-extras
|
refs/heads/devel
|
univention/__init__.py
|
12133432
| |
IEEECS-VIT/IEEECS-VIT-Website
|
refs/heads/master
|
ieeecsvit/__init__.py
|
12133432
| |
langner/cclib
|
refs/heads/master
|
cclib/bridge/cclib2pyquante.py
|
3
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2020, the cclib development team
#
# This file is part of cclib (http://cclib.github.io) and is distributed under
# the terms of the BSD 3-Clause License.
"""Bridge for using cclib data in PyQuante (http://pyquante.sourceforge.net)."""
import numpy
from cclib.parser.utils import find_package
class MissingAttributeError(Exception):
pass
_found_pyquante2 = find_package("pyquante2")
if _found_pyquante2:
from pyquante2 import molecule
def _check_pyquante():
if not _found_pyquante2:
raise ImportError("You must install `pyquante2` to use this function")
def makepyquante(data):
"""Create a PyQuante Molecule from ccData object."""
_check_pyquante()
# Check required attributes.
required_attrs = {"atomcoords", "atomnos"}
missing = [x for x in required_attrs if not hasattr(data, x)]
if missing:
missing = " ".join(missing)
raise MissingAttributeError(
"Could not create pyquante molecule due to missing attribute: {}".format(missing)
)
# In pyquante2, molecular geometry is specified in a format of:
# [(3,.0000000000, .0000000000, .0000000000), (1, .0000000000, .0000000000,1.629912)]
moldesc = numpy.insert(data.atomcoords[-1], 0, data.atomnos, 1).tolist()
return molecule(
[tuple(x) for x in moldesc],
units="Angstroms",
charge=data.charge,
multiplicity=data.mult,
)
del find_package
|
miles0411/pm
|
refs/heads/master
|
venv/lib/python2.7/site-packages/pip/utils/deprecation.py
|
271
|
"""
A module that implments tooling to enable easy warnings about deprecations.
"""
from __future__ import absolute_import
import logging
import warnings
class PipDeprecationWarning(Warning):
pass
class RemovedInPip8Warning(PipDeprecationWarning, PendingDeprecationWarning):
pass
class RemovedInPip9Warning(PipDeprecationWarning, PendingDeprecationWarning):
pass
DEPRECATIONS = [RemovedInPip8Warning, RemovedInPip9Warning]
# Warnings <-> Logging Integration
_warnings_showwarning = None
def _showwarning(message, category, filename, lineno, file=None, line=None):
if file is not None:
if _warnings_showwarning is not None:
_warnings_showwarning(
message, category, filename, lineno, file, line,
)
else:
if issubclass(category, PipDeprecationWarning):
# We use a specially named logger which will handle all of the
# deprecation messages for pip.
logger = logging.getLogger("pip.deprecations")
# This is purposely using the % formatter here instead of letting
# the logging module handle the interpolation. This is because we
# want it to appear as if someone typed this entire message out.
log_message = "DEPRECATION: %s" % message
# Things that are DeprecationWarnings will be removed in the very
# next version of pip. We want these to be more obvious so we
# use the ERROR logging level while the PendingDeprecationWarnings
# are still have at least 2 versions to go until they are removed
# so they can just be warnings.
if issubclass(category, DeprecationWarning):
logger.error(log_message)
else:
logger.warning(log_message)
else:
_warnings_showwarning(
message, category, filename, lineno, file, line,
)
def install_warning_logger():
global _warnings_showwarning
if _warnings_showwarning is None:
_warnings_showwarning = warnings.showwarning
warnings.showwarning = _showwarning
|
akurtakov/Pydev
|
refs/heads/master
|
plugins/org.python.pydev.jython/Lib/encodings/iso8859_6.py
|
593
|
""" Python Character Mapping Codec iso8859_6 generated from 'MAPPINGS/ISO8859/8859-6.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-6',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\x80' # 0x80 -> <control>
u'\x81' # 0x81 -> <control>
u'\x82' # 0x82 -> <control>
u'\x83' # 0x83 -> <control>
u'\x84' # 0x84 -> <control>
u'\x85' # 0x85 -> <control>
u'\x86' # 0x86 -> <control>
u'\x87' # 0x87 -> <control>
u'\x88' # 0x88 -> <control>
u'\x89' # 0x89 -> <control>
u'\x8a' # 0x8A -> <control>
u'\x8b' # 0x8B -> <control>
u'\x8c' # 0x8C -> <control>
u'\x8d' # 0x8D -> <control>
u'\x8e' # 0x8E -> <control>
u'\x8f' # 0x8F -> <control>
u'\x90' # 0x90 -> <control>
u'\x91' # 0x91 -> <control>
u'\x92' # 0x92 -> <control>
u'\x93' # 0x93 -> <control>
u'\x94' # 0x94 -> <control>
u'\x95' # 0x95 -> <control>
u'\x96' # 0x96 -> <control>
u'\x97' # 0x97 -> <control>
u'\x98' # 0x98 -> <control>
u'\x99' # 0x99 -> <control>
u'\x9a' # 0x9A -> <control>
u'\x9b' # 0x9B -> <control>
u'\x9c' # 0x9C -> <control>
u'\x9d' # 0x9D -> <control>
u'\x9e' # 0x9E -> <control>
u'\x9f' # 0x9F -> <control>
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\xa4' # 0xA4 -> CURRENCY SIGN
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\u060c' # 0xAC -> ARABIC COMMA
u'\xad' # 0xAD -> SOFT HYPHEN
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\u061b' # 0xBB -> ARABIC SEMICOLON
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\u061f' # 0xBF -> ARABIC QUESTION MARK
u'\ufffe'
u'\u0621' # 0xC1 -> ARABIC LETTER HAMZA
u'\u0622' # 0xC2 -> ARABIC LETTER ALEF WITH MADDA ABOVE
u'\u0623' # 0xC3 -> ARABIC LETTER ALEF WITH HAMZA ABOVE
u'\u0624' # 0xC4 -> ARABIC LETTER WAW WITH HAMZA ABOVE
u'\u0625' # 0xC5 -> ARABIC LETTER ALEF WITH HAMZA BELOW
u'\u0626' # 0xC6 -> ARABIC LETTER YEH WITH HAMZA ABOVE
u'\u0627' # 0xC7 -> ARABIC LETTER ALEF
u'\u0628' # 0xC8 -> ARABIC LETTER BEH
u'\u0629' # 0xC9 -> ARABIC LETTER TEH MARBUTA
u'\u062a' # 0xCA -> ARABIC LETTER TEH
u'\u062b' # 0xCB -> ARABIC LETTER THEH
u'\u062c' # 0xCC -> ARABIC LETTER JEEM
u'\u062d' # 0xCD -> ARABIC LETTER HAH
u'\u062e' # 0xCE -> ARABIC LETTER KHAH
u'\u062f' # 0xCF -> ARABIC LETTER DAL
u'\u0630' # 0xD0 -> ARABIC LETTER THAL
u'\u0631' # 0xD1 -> ARABIC LETTER REH
u'\u0632' # 0xD2 -> ARABIC LETTER ZAIN
u'\u0633' # 0xD3 -> ARABIC LETTER SEEN
u'\u0634' # 0xD4 -> ARABIC LETTER SHEEN
u'\u0635' # 0xD5 -> ARABIC LETTER SAD
u'\u0636' # 0xD6 -> ARABIC LETTER DAD
u'\u0637' # 0xD7 -> ARABIC LETTER TAH
u'\u0638' # 0xD8 -> ARABIC LETTER ZAH
u'\u0639' # 0xD9 -> ARABIC LETTER AIN
u'\u063a' # 0xDA -> ARABIC LETTER GHAIN
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\u0640' # 0xE0 -> ARABIC TATWEEL
u'\u0641' # 0xE1 -> ARABIC LETTER FEH
u'\u0642' # 0xE2 -> ARABIC LETTER QAF
u'\u0643' # 0xE3 -> ARABIC LETTER KAF
u'\u0644' # 0xE4 -> ARABIC LETTER LAM
u'\u0645' # 0xE5 -> ARABIC LETTER MEEM
u'\u0646' # 0xE6 -> ARABIC LETTER NOON
u'\u0647' # 0xE7 -> ARABIC LETTER HEH
u'\u0648' # 0xE8 -> ARABIC LETTER WAW
u'\u0649' # 0xE9 -> ARABIC LETTER ALEF MAKSURA
u'\u064a' # 0xEA -> ARABIC LETTER YEH
u'\u064b' # 0xEB -> ARABIC FATHATAN
u'\u064c' # 0xEC -> ARABIC DAMMATAN
u'\u064d' # 0xED -> ARABIC KASRATAN
u'\u064e' # 0xEE -> ARABIC FATHA
u'\u064f' # 0xEF -> ARABIC DAMMA
u'\u0650' # 0xF0 -> ARABIC KASRA
u'\u0651' # 0xF1 -> ARABIC SHADDA
u'\u0652' # 0xF2 -> ARABIC SUKUN
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
acsone/account-financial-reporting
|
refs/heads/8.0
|
account_financial_report_horizontal/__openerp__.py
|
12
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2012 Therp BV (<http://therp.nl>),
# Copyright (C) 2013 Agile Business Group sagl
# (<http://www.agilebg.com>) (<lorenzo.battistini@agilebg.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "Accounting Financial Reports Horizontal",
"version": "8.0.0.3.0",
"author": "Therp BV,Agile Business Group,Odoo Community Association (OCA)",
"category": 'Accounting & Finance',
'website': 'https://github.com/OCA/account-financial-reporting',
'license': 'AGPL-3',
"depends": ["account"],
'data': [
"data/report_paperformat.xml",
"data/ir_actions_report_xml.xml",
"report/report_financial.xml",
],
'demo': [],
'test': [],
'active': False,
}
|
agileblaze/OpenStackTwoFactorAuthentication
|
refs/heads/master
|
horizon/openstack_dashboard/test/integration_tests/pages/admin/system/resource_usage/__init__.py
|
12133432
| |
zkraime/osf.io
|
refs/heads/develop
|
website/addons/citations/__init__.py
|
12133432
| |
QijunPan/ansible
|
refs/heads/devel
|
test/integration/targets/module_utils/module_utils/qux2/__init__.py
|
12133432
| |
patjouk/djangogirls
|
refs/heads/master
|
story/management/__init__.py
|
12133432
| |
muntasirsyed/intellij-community
|
refs/heads/master
|
python/testData/resolve/multiFile/resolveQualifiedSuperClassInPackage/foo/__init__.py
|
12133432
| |
mhbu50/frappe
|
refs/heads/develop
|
frappe/core/doctype/has_domain/__init__.py
|
12133432
| |
ssarangi/numba
|
refs/heads/master
|
numba/__init__.py
|
5
|
"""
Expose top-level symbols that are safe for import *
"""
from __future__ import print_function, division, absolute_import
import re
from . import testing, decorators
from . import errors, special, types, config
# Re-export typeof
from .special import *
from .errors import *
from .pycc.decorators import export, exportmany
# Re-export all type names
from .types import *
# Re export decorators
jit = decorators.jit
autojit = decorators.autojit
njit = decorators.njit
# Re export vectorize decorators
from .npyufunc import vectorize, guvectorize
# Re export from_dtype
from .numpy_support import from_dtype
# Re-export test entrypoint
test = testing.test
# Try to initialize cuda
from . import cuda
__all__ = """
jit
autojit
njit
vectorize
guvectorize
export
exportmany
cuda
from_dtype
""".split() + types.__all__ + special.__all__ + errors.__all__
_min_llvmlite_version = (0, 6, 0)
def _ensure_llvm():
"""
Make sure llvmlite is operational.
"""
import warnings
import llvmlite
# Only look at the the major, minor and bugfix version numbers.
# Ignore other stuffs
regex = re.compile(r'(\d+)\.(\d+).(\d+)')
m = regex.match(llvmlite.__version__)
if m:
ver = tuple(map(int, m.groups()))
if ver < _min_llvmlite_version:
msg = ("Numba requires at least version %d.%d.%d of llvmlite.\n"
"Installed version is %s.\n"
"Please update llvmlite." %
(_min_llvmlite_version + (llvmlite.__version__,)))
raise ImportError(msg)
else:
# Not matching?
warnings.warn("llvmlite version format not recognized!")
from llvmlite.binding import check_jit_execution
check_jit_execution()
_ensure_llvm()
# Process initialization
# Should this be hooked into CPUContext instead?
from .targets.randomimpl import random_init
random_init()
del random_init
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
|
batxes/4Cin
|
refs/heads/master
|
SHH_INV_models/SHH_INV_models_final_output_0.2_-0.1_10000/SHH_INV_models28974.py
|
4
|
import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "particle_0 geometry" not in marker_sets:
s=new_marker_set('particle_0 geometry')
marker_sets["particle_0 geometry"]=s
s= marker_sets["particle_0 geometry"]
mark=s.place_marker((4263, 10043.6, 14168.7), (0.7, 0.7, 0.7), 890.203)
if "particle_1 geometry" not in marker_sets:
s=new_marker_set('particle_1 geometry')
marker_sets["particle_1 geometry"]=s
s= marker_sets["particle_1 geometry"]
mark=s.place_marker((4269.45, 9472.29, 12096), (0.7, 0.7, 0.7), 792.956)
if "particle_2 geometry" not in marker_sets:
s=new_marker_set('particle_2 geometry')
marker_sets["particle_2 geometry"]=s
s= marker_sets["particle_2 geometry"]
mark=s.place_marker((4234.24, 8428.87, 10411.5), (0.7, 0.7, 0.7), 856.786)
if "particle_3 geometry" not in marker_sets:
s=new_marker_set('particle_3 geometry')
marker_sets["particle_3 geometry"]=s
s= marker_sets["particle_3 geometry"]
mark=s.place_marker((4126.23, 6549.2, 11753.1), (0.7, 0.7, 0.7), 963.679)
if "particle_4 geometry" not in marker_sets:
s=new_marker_set('particle_4 geometry')
marker_sets["particle_4 geometry"]=s
s= marker_sets["particle_4 geometry"]
mark=s.place_marker((5043.4, 5402.95, 10633), (0.7, 0.7, 0.7), 761.442)
if "particle_5 geometry" not in marker_sets:
s=new_marker_set('particle_5 geometry')
marker_sets["particle_5 geometry"]=s
s= marker_sets["particle_5 geometry"]
mark=s.place_marker((6010.35, 6249.42, 8479.61), (0.7, 0.7, 0.7), 961.183)
if "particle_6 geometry" not in marker_sets:
s=new_marker_set('particle_6 geometry')
marker_sets["particle_6 geometry"]=s
s= marker_sets["particle_6 geometry"]
mark=s.place_marker((8714.18, 7248.12, 8950.96), (0.7, 0.7, 0.7), 753.151)
if "particle_7 geometry" not in marker_sets:
s=new_marker_set('particle_7 geometry')
marker_sets["particle_7 geometry"]=s
s= marker_sets["particle_7 geometry"]
mark=s.place_marker((9455.03, 7393.9, 9960.92), (1, 0.7, 0), 1098.07)
if "particle_8 geometry" not in marker_sets:
s=new_marker_set('particle_8 geometry')
marker_sets["particle_8 geometry"]=s
s= marker_sets["particle_8 geometry"]
mark=s.place_marker((7420.49, 7713.31, 6017.06), (0.7, 0.7, 0.7), 1010.42)
if "particle_9 geometry" not in marker_sets:
s=new_marker_set('particle_9 geometry')
marker_sets["particle_9 geometry"]=s
s= marker_sets["particle_9 geometry"]
mark=s.place_marker((8203.68, 7568.14, 5441.74), (1, 0.7, 0), 821.043)
if "particle_10 geometry" not in marker_sets:
s=new_marker_set('particle_10 geometry')
marker_sets["particle_10 geometry"]=s
s= marker_sets["particle_10 geometry"]
mark=s.place_marker((8075.98, 5941.87, 4524.01), (0.7, 0.7, 0.7), 873.876)
if "particle_11 geometry" not in marker_sets:
s=new_marker_set('particle_11 geometry')
marker_sets["particle_11 geometry"]=s
s= marker_sets["particle_11 geometry"]
mark=s.place_marker((8175.19, 4280.15, 3367.78), (0.7, 0.7, 0.7), 625.532)
if "particle_12 geometry" not in marker_sets:
s=new_marker_set('particle_12 geometry')
marker_sets["particle_12 geometry"]=s
s= marker_sets["particle_12 geometry"]
mark=s.place_marker((7073.64, 5212.61, 2565.88), (0.7, 0.7, 0.7), 880.474)
if "particle_13 geometry" not in marker_sets:
s=new_marker_set('particle_13 geometry')
marker_sets["particle_13 geometry"]=s
s= marker_sets["particle_13 geometry"]
mark=s.place_marker((5296.71, 4882.9, 2925.13), (0.7, 0.7, 0.7), 659.161)
if "particle_14 geometry" not in marker_sets:
s=new_marker_set('particle_14 geometry')
marker_sets["particle_14 geometry"]=s
s= marker_sets["particle_14 geometry"]
mark=s.place_marker((5875.05, 3837.12, 1930.23), (0.7, 0.7, 0.7), 831.745)
if "particle_15 geometry" not in marker_sets:
s=new_marker_set('particle_15 geometry')
marker_sets["particle_15 geometry"]=s
s= marker_sets["particle_15 geometry"]
mark=s.place_marker((6147.25, 2464.17, -215.037), (0.7, 0.7, 0.7), 803.065)
if "particle_16 geometry" not in marker_sets:
s=new_marker_set('particle_16 geometry')
marker_sets["particle_16 geometry"]=s
s= marker_sets["particle_16 geometry"]
mark=s.place_marker((7068.54, 3255.94, 1110.9), (0.7, 0.7, 0.7), 610.262)
if "particle_17 geometry" not in marker_sets:
s=new_marker_set('particle_17 geometry')
marker_sets["particle_17 geometry"]=s
s= marker_sets["particle_17 geometry"]
mark=s.place_marker((8157.48, 3676.52, 2163.59), (0.7, 0.7, 0.7), 741.265)
if "particle_18 geometry" not in marker_sets:
s=new_marker_set('particle_18 geometry')
marker_sets["particle_18 geometry"]=s
s= marker_sets["particle_18 geometry"]
mark=s.place_marker((7860.36, 2428.37, 2702.35), (0.7, 0.7, 0.7), 748.625)
if "particle_19 geometry" not in marker_sets:
s=new_marker_set('particle_19 geometry')
marker_sets["particle_19 geometry"]=s
s= marker_sets["particle_19 geometry"]
mark=s.place_marker((6819.75, 2947.74, 1718.55), (0.7, 0.7, 0.7), 677.181)
if "particle_20 geometry" not in marker_sets:
s=new_marker_set('particle_20 geometry')
marker_sets["particle_20 geometry"]=s
s= marker_sets["particle_20 geometry"]
mark=s.place_marker((6161.56, 4437.31, 2468.49), (0.7, 0.7, 0.7), 616.015)
if "particle_21 geometry" not in marker_sets:
s=new_marker_set('particle_21 geometry')
marker_sets["particle_21 geometry"]=s
s= marker_sets["particle_21 geometry"]
mark=s.place_marker((6503.46, 3497.11, 2951.46), (0.7, 0.7, 0.7), 653.154)
if "particle_22 geometry" not in marker_sets:
s=new_marker_set('particle_22 geometry')
marker_sets["particle_22 geometry"]=s
s= marker_sets["particle_22 geometry"]
mark=s.place_marker((7546.84, 4160.96, 2396.09), (0.7, 0.7, 0.7), 595.33)
if "particle_23 geometry" not in marker_sets:
s=new_marker_set('particle_23 geometry')
marker_sets["particle_23 geometry"]=s
s= marker_sets["particle_23 geometry"]
mark=s.place_marker((8592.37, 4584.47, 3062.53), (0.7, 0.7, 0.7), 627.901)
if "particle_24 geometry" not in marker_sets:
s=new_marker_set('particle_24 geometry')
marker_sets["particle_24 geometry"]=s
s= marker_sets["particle_24 geometry"]
mark=s.place_marker((7746.72, 3697.02, 4078.29), (0.7, 0.7, 0.7), 663.941)
if "particle_25 geometry" not in marker_sets:
s=new_marker_set('particle_25 geometry')
marker_sets["particle_25 geometry"]=s
s= marker_sets["particle_25 geometry"]
mark=s.place_marker((6652.91, 3613.64, 3342.17), (0.7, 0.7, 0.7), 663.899)
if "particle_26 geometry" not in marker_sets:
s=new_marker_set('particle_26 geometry')
marker_sets["particle_26 geometry"]=s
s= marker_sets["particle_26 geometry"]
mark=s.place_marker((7806.2, 2963.8, 2753.02), (0.7, 0.7, 0.7), 644.694)
if "particle_27 geometry" not in marker_sets:
s=new_marker_set('particle_27 geometry')
marker_sets["particle_27 geometry"]=s
s= marker_sets["particle_27 geometry"]
mark=s.place_marker((6596.92, 3918.44, 2650.09), (0.7, 0.7, 0.7), 896.802)
if "particle_28 geometry" not in marker_sets:
s=new_marker_set('particle_28 geometry')
marker_sets["particle_28 geometry"]=s
s= marker_sets["particle_28 geometry"]
mark=s.place_marker((5374.55, 3503.51, 2637.14), (0.7, 0.7, 0.7), 576.38)
if "particle_29 geometry" not in marker_sets:
s=new_marker_set('particle_29 geometry')
marker_sets["particle_29 geometry"]=s
s= marker_sets["particle_29 geometry"]
mark=s.place_marker((4429.35, 4041.2, 2038.84), (0.7, 0.7, 0.7), 635.092)
if "particle_30 geometry" not in marker_sets:
s=new_marker_set('particle_30 geometry')
marker_sets["particle_30 geometry"]=s
s= marker_sets["particle_30 geometry"]
mark=s.place_marker((5508.67, 4543.7, 3140.16), (0.7, 0.7, 0.7), 651.505)
if "particle_31 geometry" not in marker_sets:
s=new_marker_set('particle_31 geometry')
marker_sets["particle_31 geometry"]=s
s= marker_sets["particle_31 geometry"]
mark=s.place_marker((5709.17, 4776.09, 4673.17), (0.7, 0.7, 0.7), 718.042)
if "particle_32 geometry" not in marker_sets:
s=new_marker_set('particle_32 geometry')
marker_sets["particle_32 geometry"]=s
s= marker_sets["particle_32 geometry"]
mark=s.place_marker((6310.76, 5320.8, 3395.74), (0.7, 0.7, 0.7), 726.714)
if "particle_33 geometry" not in marker_sets:
s=new_marker_set('particle_33 geometry')
marker_sets["particle_33 geometry"]=s
s= marker_sets["particle_33 geometry"]
mark=s.place_marker((6756.42, 5430.06, 1994.26), (0.7, 0.7, 0.7), 673.585)
if "particle_34 geometry" not in marker_sets:
s=new_marker_set('particle_34 geometry')
marker_sets["particle_34 geometry"]=s
s= marker_sets["particle_34 geometry"]
mark=s.place_marker((7300.15, 4266.62, 1852.09), (0.7, 0.7, 0.7), 598.418)
if "particle_35 geometry" not in marker_sets:
s=new_marker_set('particle_35 geometry')
marker_sets["particle_35 geometry"]=s
s= marker_sets["particle_35 geometry"]
mark=s.place_marker((7901.46, 5204.48, 725.72), (0.7, 0.7, 0.7), 697.612)
if "particle_36 geometry" not in marker_sets:
s=new_marker_set('particle_36 geometry')
marker_sets["particle_36 geometry"]=s
s= marker_sets["particle_36 geometry"]
mark=s.place_marker((6086.67, 6017.24, 3056.71), (0.7, 0.7, 0.7), 799.808)
if "particle_37 geometry" not in marker_sets:
s=new_marker_set('particle_37 geometry')
marker_sets["particle_37 geometry"]=s
s= marker_sets["particle_37 geometry"]
mark=s.place_marker((6289.96, 7697.05, 4736.9), (0.7, 0.7, 0.7), 1132.58)
if "particle_38 geometry" not in marker_sets:
s=new_marker_set('particle_38 geometry')
marker_sets["particle_38 geometry"]=s
s= marker_sets["particle_38 geometry"]
mark=s.place_marker((5514.28, 7684.79, 4513.95), (0.7, 0.7, 0.7), 1011.94)
if "particle_39 geometry" not in marker_sets:
s=new_marker_set('particle_39 geometry')
marker_sets["particle_39 geometry"]=s
s= marker_sets["particle_39 geometry"]
mark=s.place_marker((4401.6, 8580.92, 5381.54), (0.7, 0.7, 0.7), 782.592)
if "particle_40 geometry" not in marker_sets:
s=new_marker_set('particle_40 geometry')
marker_sets["particle_40 geometry"]=s
s= marker_sets["particle_40 geometry"]
mark=s.place_marker((5339.79, 10054.8, 5159.84), (0.7, 0.7, 0.7), 856.575)
if "particle_41 geometry" not in marker_sets:
s=new_marker_set('particle_41 geometry')
marker_sets["particle_41 geometry"]=s
s= marker_sets["particle_41 geometry"]
mark=s.place_marker((5817.73, 10970.2, 4951.25), (1, 0.7, 0), 706.579)
if "particle_42 geometry" not in marker_sets:
s=new_marker_set('particle_42 geometry')
marker_sets["particle_42 geometry"]=s
s= marker_sets["particle_42 geometry"]
mark=s.place_marker((5523.2, 9861.23, 5353.91), (0.7, 0.7, 0.7), 1015.96)
if "particle_43 geometry" not in marker_sets:
s=new_marker_set('particle_43 geometry')
marker_sets["particle_43 geometry"]=s
s= marker_sets["particle_43 geometry"]
mark=s.place_marker((4067.73, 7559.02, 6182.21), (0.7, 0.7, 0.7), 1205.72)
if "particle_44 geometry" not in marker_sets:
s=new_marker_set('particle_44 geometry')
marker_sets["particle_44 geometry"]=s
s= marker_sets["particle_44 geometry"]
mark=s.place_marker((2190.87, 6530.8, 5796.29), (0.7, 0.7, 0.7), 841.939)
if "particle_45 geometry" not in marker_sets:
s=new_marker_set('particle_45 geometry')
marker_sets["particle_45 geometry"]=s
s= marker_sets["particle_45 geometry"]
mark=s.place_marker((3062.17, 6300.9, 6948.23), (1, 0.7, 0), 806.999)
if "particle_46 geometry" not in marker_sets:
s=new_marker_set('particle_46 geometry')
marker_sets["particle_46 geometry"]=s
s= marker_sets["particle_46 geometry"]
mark=s.place_marker((2411.5, 6141.64, 7120.3), (0.7, 0.7, 0.7), 958.856)
if "particle_47 geometry" not in marker_sets:
s=new_marker_set('particle_47 geometry')
marker_sets["particle_47 geometry"]=s
s= marker_sets["particle_47 geometry"]
mark=s.place_marker((2297.73, 4381.85, 7750.62), (0.7, 0.7, 0.7), 952.892)
if "particle_48 geometry" not in marker_sets:
s=new_marker_set('particle_48 geometry')
marker_sets["particle_48 geometry"]=s
s= marker_sets["particle_48 geometry"]
mark=s.place_marker((1628.91, 4044.8, 7724.77), (0.7, 0.7, 0.7), 809.284)
if "particle_49 geometry" not in marker_sets:
s=new_marker_set('particle_49 geometry')
marker_sets["particle_49 geometry"]=s
s= marker_sets["particle_49 geometry"]
mark=s.place_marker((1362.86, 5180.14, 8204.86), (0.7, 0.7, 0.7), 709.159)
if "particle_50 geometry" not in marker_sets:
s=new_marker_set('particle_50 geometry')
marker_sets["particle_50 geometry"]=s
s= marker_sets["particle_50 geometry"]
mark=s.place_marker((1485.6, 3637.15, 7284.55), (0.7, 0.7, 0.7), 859.832)
if "particle_51 geometry" not in marker_sets:
s=new_marker_set('particle_51 geometry')
marker_sets["particle_51 geometry"]=s
s= marker_sets["particle_51 geometry"]
mark=s.place_marker((2658.02, 2791.36, 6283.57), (0.7, 0.7, 0.7), 800.866)
if "particle_52 geometry" not in marker_sets:
s=new_marker_set('particle_52 geometry')
marker_sets["particle_52 geometry"]=s
s= marker_sets["particle_52 geometry"]
mark=s.place_marker((1515.13, 3449.35, 4915.49), (0.7, 0.7, 0.7), 949.508)
if "particle_53 geometry" not in marker_sets:
s=new_marker_set('particle_53 geometry')
marker_sets["particle_53 geometry"]=s
s= marker_sets["particle_53 geometry"]
mark=s.place_marker((1977.87, 5027.66, 3870.1), (0.7, 0.7, 0.7), 891.98)
if "particle_54 geometry" not in marker_sets:
s=new_marker_set('particle_54 geometry')
marker_sets["particle_54 geometry"]=s
s= marker_sets["particle_54 geometry"]
mark=s.place_marker((3411.54, 3762.03, 4500.66), (0.7, 0.7, 0.7), 890.034)
if "particle_55 geometry" not in marker_sets:
s=new_marker_set('particle_55 geometry')
marker_sets["particle_55 geometry"]=s
s= marker_sets["particle_55 geometry"]
mark=s.place_marker((2359.21, 3194.5, 5801.53), (0.7, 0.7, 0.7), 804.165)
if "particle_56 geometry" not in marker_sets:
s=new_marker_set('particle_56 geometry')
marker_sets["particle_56 geometry"]=s
s= marker_sets["particle_56 geometry"]
mark=s.place_marker((843.467, 3028.97, 5617.89), (0.7, 0.7, 0.7), 826.796)
if "particle_57 geometry" not in marker_sets:
s=new_marker_set('particle_57 geometry')
marker_sets["particle_57 geometry"]=s
s= marker_sets["particle_57 geometry"]
mark=s.place_marker((860.119, 2652.43, 3805.31), (0.7, 0.7, 0.7), 1085.8)
if "particle_58 geometry" not in marker_sets:
s=new_marker_set('particle_58 geometry')
marker_sets["particle_58 geometry"]=s
s= marker_sets["particle_58 geometry"]
mark=s.place_marker((2800.64, 4202.33, 4113.37), (0.7, 0.7, 0.7), 906.997)
if "particle_59 geometry" not in marker_sets:
s=new_marker_set('particle_59 geometry')
marker_sets["particle_59 geometry"]=s
s= marker_sets["particle_59 geometry"]
mark=s.place_marker((1496.15, 3320.42, 5007.05), (0.7, 0.7, 0.7), 708.694)
if "particle_60 geometry" not in marker_sets:
s=new_marker_set('particle_60 geometry')
marker_sets["particle_60 geometry"]=s
s= marker_sets["particle_60 geometry"]
mark=s.place_marker((1452.61, 2962.96, 6571.66), (0.7, 0.7, 0.7), 780.223)
if "particle_61 geometry" not in marker_sets:
s=new_marker_set('particle_61 geometry')
marker_sets["particle_61 geometry"]=s
s= marker_sets["particle_61 geometry"]
mark=s.place_marker((861.507, 4147.19, 7423.57), (0.7, 0.7, 0.7), 757.424)
if "particle_62 geometry" not in marker_sets:
s=new_marker_set('particle_62 geometry')
marker_sets["particle_62 geometry"]=s
s= marker_sets["particle_62 geometry"]
mark=s.place_marker((923.586, 4097.73, 9043.6), (0.7, 0.7, 0.7), 817.574)
if "particle_63 geometry" not in marker_sets:
s=new_marker_set('particle_63 geometry')
marker_sets["particle_63 geometry"]=s
s= marker_sets["particle_63 geometry"]
mark=s.place_marker((175.474, 5172.81, 9976.66), (0.7, 0.7, 0.7), 782.423)
if "particle_64 geometry" not in marker_sets:
s=new_marker_set('particle_64 geometry')
marker_sets["particle_64 geometry"]=s
s= marker_sets["particle_64 geometry"]
mark=s.place_marker((1798.74, 5452.9, 9498.41), (0.7, 0.7, 0.7), 906.404)
if "particle_65 geometry" not in marker_sets:
s=new_marker_set('particle_65 geometry')
marker_sets["particle_65 geometry"]=s
s= marker_sets["particle_65 geometry"]
mark=s.place_marker((679.503, 5063.31, 8127.33), (0.7, 0.7, 0.7), 818.463)
if "particle_66 geometry" not in marker_sets:
s=new_marker_set('particle_66 geometry')
marker_sets["particle_66 geometry"]=s
s= marker_sets["particle_66 geometry"]
mark=s.place_marker((1399.48, 3661.64, 7915.22), (0.7, 0.7, 0.7), 759.539)
if "particle_67 geometry" not in marker_sets:
s=new_marker_set('particle_67 geometry')
marker_sets["particle_67 geometry"]=s
s= marker_sets["particle_67 geometry"]
mark=s.place_marker((2649.21, 4007.51, 9057.61), (0.7, 0.7, 0.7), 1088.59)
if "particle_68 geometry" not in marker_sets:
s=new_marker_set('particle_68 geometry')
marker_sets["particle_68 geometry"]=s
s= marker_sets["particle_68 geometry"]
mark=s.place_marker((532.817, 3878.4, 8857.78), (0.7, 0.7, 0.7), 822.312)
if "particle_69 geometry" not in marker_sets:
s=new_marker_set('particle_69 geometry')
marker_sets["particle_69 geometry"]=s
s= marker_sets["particle_69 geometry"]
mark=s.place_marker((838.672, 4822.33, 8309.42), (0.7, 0.7, 0.7), 749.81)
if "particle_70 geometry" not in marker_sets:
s=new_marker_set('particle_70 geometry')
marker_sets["particle_70 geometry"]=s
s= marker_sets["particle_70 geometry"]
mark=s.place_marker((1142.54, 3868.21, 8751.54), (0.7, 0.7, 0.7), 764.488)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
|
adngdb/socorro
|
refs/heads/master
|
webapp-django/crashstats/supersearch/tests/test_utils.py
|
3
|
import datetime
from nose.tools import eq_
from django.utils.timezone import utc
from crashstats.crashstats.tests.test_views import BaseTestViews
from crashstats.topcrashers.views import get_date_boundaries
class TestDateBoundaries(BaseTestViews):
def test_get_date_boundaries(self):
# Simple test.
start, end = get_date_boundaries({
'date': [
'>2010-03-01T12:12:12',
'<=2010-03-10T00:00:00',
]
})
eq_(
start,
datetime.datetime(2010, 3, 1, 12, 12, 12).replace(tzinfo=utc)
)
eq_(end, datetime.datetime(2010, 3, 10).replace(tzinfo=utc))
# Test with messy dates.
start, end = get_date_boundaries({
'date': [
'>2010-03-01T12:12:12',
'>2009-01-01T12:12:12',
'<2010-03-11T00:00:00',
'<=2010-03-10T00:00:00',
]
})
eq_(
start,
datetime.datetime(2009, 1, 1, 12, 12, 12).replace(tzinfo=utc)
)
eq_(end, datetime.datetime(2010, 3, 11).replace(tzinfo=utc))
|
mnahm5/django-estore
|
refs/heads/master
|
src/newsletter/forms.py
|
43
|
from django import forms
from .models import SignUp
class ContactForm(forms.Form):
full_name = forms.CharField(required=False)
email = forms.EmailField()
message = forms.CharField()
class SignUpForm(forms.ModelForm):
class Meta:
model = SignUp
fields = ['full_name', 'email']
### exclude = ['full_name']
def clean_email(self):
email = self.cleaned_data.get('email')
email_base, provider = email.split("@")
domain, extension = provider.split('.')
# if not domain == 'USC':
# raise forms.ValidationError("Please make sure you use your USC email.")
if not extension == "edu":
raise forms.ValidationError("Please use a valid .EDU email address")
return email
def clean_full_name(self):
full_name = self.cleaned_data.get('full_name')
#write validation code.
return full_name
|
hustcalm/seabios-hacking
|
refs/heads/master
|
tools/acpi_extract.py
|
4
|
#!/usr/bin/python
# Copyright (C) 2011 Red Hat, Inc., Michael S. Tsirkin <mst@redhat.com>
#
# This file may be distributed under the terms of the GNU GPLv3 license.
# Process mixed ASL/AML listing (.lst file) produced by iasl -l
# Locate and execute ACPI_EXTRACT directives, output offset info
#
# Documentation of ACPI_EXTRACT_* directive tags:
#
# These directive tags output offset information from AML for BIOS runtime
# table generation.
# Each directive is of the form:
# ACPI_EXTRACT_<TYPE> <array_name> <Operator> (...)
# and causes the extractor to create an array
# named <array_name> with offset, in the generated AML,
# of an object of a given type in the following <Operator>.
#
# A directive must fit on a single code line.
#
# Object type in AML is verified, a mismatch causes a build failure.
#
# Directives and operators currently supported are:
# ACPI_EXTRACT_NAME_DWORD_CONST - extract a Dword Const object from Name()
# ACPI_EXTRACT_NAME_WORD_CONST - extract a Word Const object from Name()
# ACPI_EXTRACT_NAME_BYTE_CONST - extract a Byte Const object from Name()
# ACPI_EXTRACT_METHOD_STRING - extract a NameString from Method()
# ACPI_EXTRACT_NAME_STRING - extract a NameString from Name()
# ACPI_EXTRACT_PROCESSOR_START - start of Processor() block
# ACPI_EXTRACT_PROCESSOR_STRING - extract a NameString from Processor()
# ACPI_EXTRACT_PROCESSOR_END - offset at last byte of Processor() + 1
# ACPI_EXTRACT_PKG_START - start of Package block
#
# ACPI_EXTRACT_ALL_CODE - create an array storing the generated AML bytecode
#
# ACPI_EXTRACT is not allowed anywhere else in code, except in comments.
import re;
import sys;
import fileinput;
aml = []
asl = []
output = {}
debug = ""
class asl_line:
line = None
lineno = None
aml_offset = None
def die(diag):
sys.stderr.write("Error: %s; %s\n" % (diag, debug))
sys.exit(1)
#Store an ASL command, matching AML offset, and input line (for debugging)
def add_asl(lineno, line):
l = asl_line()
l.line = line
l.lineno = lineno
l.aml_offset = len(aml)
asl.append(l)
#Store an AML byte sequence
#Verify that offset output by iasl matches # of bytes so far
def add_aml(offset, line):
o = int(offset, 16);
# Sanity check: offset must match size of code so far
if (o != len(aml)):
die("Offset 0x%x != 0x%x" % (o, len(aml)))
# Strip any trailing dots and ASCII dump after "
line = re.sub(r'\s*\.*\s*".*$',"", line)
# Strip traling whitespace
line = re.sub(r'\s+$',"", line)
# Strip leading whitespace
line = re.sub(r'^\s+',"", line)
# Split on whitespace
code = re.split(r'\s+', line)
for c in code:
# Require a legal hex number, two digits
if (not(re.search(r'^[0-9A-Fa-f][0-9A-Fa-f]$', c))):
die("Unexpected octet %s" % c);
aml.append(int(c, 16));
# Process aml bytecode array, decoding AML
def aml_pkglen_bytes(offset):
# PkgLength can be multibyte. Bits 8-7 give the # of extra bytes.
pkglenbytes = aml[offset] >> 6;
return pkglenbytes + 1
def aml_pkglen(offset):
pkgstart = offset
pkglenbytes = aml_pkglen_bytes(offset)
pkglen = aml[offset] & 0x3F
# If multibyte, first nibble only uses bits 0-3
if ((pkglenbytes > 1) and (pkglen & 0x30)):
die("PkgLen bytes 0x%x but first nibble 0x%x expected 0x0X" %
(pkglen, pkglen))
offset += 1
pkglenbytes -= 1
for i in range(pkglenbytes):
pkglen |= aml[offset + i] << (i * 8 + 4)
if (len(aml) < pkgstart + pkglen):
die("PckgLen 0x%x at offset 0x%x exceeds AML size 0x%x" %
(pkglen, offset, len(aml)))
return pkglen
# Given method offset, find its NameString offset
def aml_method_string(offset):
#0x14 MethodOp PkgLength NameString MethodFlags TermList
if (aml[offset] != 0x14):
die( "Method offset 0x%x: expected 0x14 actual 0x%x" %
(offset, aml[offset]));
offset += 1;
pkglenbytes = aml_pkglen_bytes(offset)
offset += pkglenbytes;
return offset;
# Given name offset, find its NameString offset
def aml_name_string(offset):
#0x08 NameOp NameString DataRef
if (aml[offset] != 0x08):
die( "Name offset 0x%x: expected 0x08 actual 0x%x" %
(offset, aml[offset]));
offset += 1
# Block Name Modifier. Skip it.
if (aml[offset] == 0x5c or aml[offset] == 0x5e):
offset += 1
return offset;
# Given data offset, find dword const offset
def aml_data_dword_const(offset):
#0x08 NameOp NameString DataRef
if (aml[offset] != 0x0C):
die( "Name offset 0x%x: expected 0x0C actual 0x%x" %
(offset, aml[offset]));
return offset + 1;
# Given data offset, find word const offset
def aml_data_word_const(offset):
#0x08 NameOp NameString DataRef
if (aml[offset] != 0x0B):
die( "Name offset 0x%x: expected 0x0B actual 0x%x" %
(offset, aml[offset]));
return offset + 1;
# Given data offset, find byte const offset
def aml_data_byte_const(offset):
#0x08 NameOp NameString DataRef
if (aml[offset] != 0x0A):
die( "Name offset 0x%x: expected 0x0A actual 0x%x" %
(offset, aml[offset]));
return offset + 1;
# Given name offset, find dword const offset
def aml_name_dword_const(offset):
return aml_data_dword_const(aml_name_string(offset) + 4)
# Given name offset, find word const offset
def aml_name_word_const(offset):
return aml_data_word_const(aml_name_string(offset) + 4)
# Given name offset, find byte const offset
def aml_name_byte_const(offset):
return aml_data_byte_const(aml_name_string(offset) + 4)
def aml_device_start(offset):
#0x5B 0x82 DeviceOp PkgLength NameString
if ((aml[offset] != 0x5B) or (aml[offset + 1] != 0x82)):
die( "Name offset 0x%x: expected 0x5B 0x82 actual 0x%x 0x%x" %
(offset, aml[offset], aml[offset + 1]));
return offset
def aml_device_string(offset):
#0x5B 0x82 DeviceOp PkgLength NameString
start = aml_device_start(offset)
offset += 2
pkglenbytes = aml_pkglen_bytes(offset)
offset += pkglenbytes
return offset
def aml_device_end(offset):
start = aml_device_start(offset)
offset += 2
pkglenbytes = aml_pkglen_bytes(offset)
pkglen = aml_pkglen(offset)
return offset + pkglen
def aml_processor_start(offset):
#0x5B 0x83 ProcessorOp PkgLength NameString ProcID
if ((aml[offset] != 0x5B) or (aml[offset + 1] != 0x83)):
die( "Name offset 0x%x: expected 0x5B 0x83 actual 0x%x 0x%x" %
(offset, aml[offset], aml[offset + 1]));
return offset
def aml_processor_string(offset):
#0x5B 0x83 ProcessorOp PkgLength NameString ProcID
start = aml_processor_start(offset)
offset += 2
pkglenbytes = aml_pkglen_bytes(offset)
offset += pkglenbytes
return offset
def aml_processor_end(offset):
start = aml_processor_start(offset)
offset += 2
pkglenbytes = aml_pkglen_bytes(offset)
pkglen = aml_pkglen(offset)
return offset + pkglen
def aml_package_start(offset):
offset = aml_name_string(offset) + 4
# 0x12 PkgLength NumElements PackageElementList
if (aml[offset] != 0x12):
die( "Name offset 0x%x: expected 0x12 actual 0x%x" %
(offset, aml[offset]));
offset += 1
return offset + aml_pkglen_bytes(offset) + 1
lineno = 0
for line in fileinput.input():
# Strip trailing newline
line = line.rstrip();
# line number and debug string to output in case of errors
lineno = lineno + 1
debug = "input line %d: %s" % (lineno, line)
#ASL listing: space, then line#, then ...., then code
pasl = re.compile('^\s+([0-9]+)\.\.\.\.\s*')
m = pasl.search(line)
if (m):
add_asl(lineno, pasl.sub("", line));
# AML listing: offset in hex, then ...., then code
paml = re.compile('^([0-9A-Fa-f]+)\.\.\.\.\s*')
m = paml.search(line)
if (m):
add_aml(m.group(1), paml.sub("", line))
# Now go over code
# Track AML offset of a previous non-empty ASL command
prev_aml_offset = -1
for i in range(len(asl)):
debug = "input line %d: %s" % (asl[i].lineno, asl[i].line)
l = asl[i].line
# skip if not an extract directive
a = len(re.findall(r'ACPI_EXTRACT', l))
if (not a):
# If not empty, store AML offset. Will be used for sanity checks
# IASL seems to put {}. at random places in the listing.
# Ignore any non-words for the purpose of this test.
m = re.search(r'\w+', l)
if (m):
prev_aml_offset = asl[i].aml_offset
continue
if (a > 1):
die("Expected at most one ACPI_EXTRACT per line, actual %d" % a)
mext = re.search(r'''
^\s* # leading whitespace
/\*\s* # start C comment
(ACPI_EXTRACT_\w+) # directive: group(1)
\s+ # whitspace separates directive from array name
(\w+) # array name: group(2)
\s*\*/ # end of C comment
\s*$ # trailing whitespace
''', l, re.VERBOSE)
if (not mext):
die("Stray ACPI_EXTRACT in input")
# previous command must have produced some AML,
# otherwise we are in a middle of a block
if (prev_aml_offset == asl[i].aml_offset):
die("ACPI_EXTRACT directive in the middle of a block")
directive = mext.group(1)
array = mext.group(2)
offset = asl[i].aml_offset
if (directive == "ACPI_EXTRACT_ALL_CODE"):
if array in output:
die("%s directive used more than once" % directive)
output[array] = aml
continue
if (directive == "ACPI_EXTRACT_NAME_DWORD_CONST"):
offset = aml_name_dword_const(offset)
elif (directive == "ACPI_EXTRACT_NAME_WORD_CONST"):
offset = aml_name_word_const(offset)
elif (directive == "ACPI_EXTRACT_NAME_BYTE_CONST"):
offset = aml_name_byte_const(offset)
elif (directive == "ACPI_EXTRACT_NAME_STRING"):
offset = aml_name_string(offset)
elif (directive == "ACPI_EXTRACT_METHOD_STRING"):
offset = aml_method_string(offset)
elif (directive == "ACPI_EXTRACT_DEVICE_START"):
offset = aml_device_start(offset)
elif (directive == "ACPI_EXTRACT_DEVICE_STRING"):
offset = aml_device_string(offset)
elif (directive == "ACPI_EXTRACT_DEVICE_END"):
offset = aml_device_end(offset)
elif (directive == "ACPI_EXTRACT_PROCESSOR_START"):
offset = aml_processor_start(offset)
elif (directive == "ACPI_EXTRACT_PROCESSOR_STRING"):
offset = aml_processor_string(offset)
elif (directive == "ACPI_EXTRACT_PROCESSOR_END"):
offset = aml_processor_end(offset)
elif (directive == "ACPI_EXTRACT_PKG_START"):
offset = aml_package_start(offset)
else:
die("Unsupported directive %s" % directive)
if array not in output:
output[array] = []
output[array].append(offset)
debug = "at end of file"
def get_value_type(maxvalue):
#Use type large enough to fit the table
if (maxvalue >= 0x10000):
return "int"
elif (maxvalue >= 0x100):
return "short"
else:
return "char"
# Pretty print output
for array in output.keys():
otype = get_value_type(max(output[array]))
odata = []
for value in output[array]:
odata.append("0x%x" % value)
sys.stdout.write("static unsigned %s %s[] = {\n" % (otype, array))
sys.stdout.write(",\n".join(odata))
sys.stdout.write('\n};\n');
|
devananda/ironic
|
refs/heads/master
|
ironic/dhcp/base.py
|
4
|
# Copyright 2014 Rackspace, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Abstract base class for dhcp providers.
"""
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class BaseDHCP(object):
"""Base class for DHCP provider APIs."""
@abc.abstractmethod
def update_port_dhcp_opts(self, port_id, dhcp_options, token=None):
"""Update one or more DHCP options on the specified port.
:param port_id: designate which port these attributes
will be applied to.
:param dhcp_options: this will be a list of dicts, e.g.
::
[{'opt_name': 'bootfile-name',
'opt_value': 'pxelinux.0'},
{'opt_name': 'server-ip-address',
'opt_value': '123.123.123.456'},
{'opt_name': 'tftp-server',
'opt_value': '123.123.123.123'}]
:param token: An optional authenticaiton token.
:raises: FailedToUpdateDHCPOptOnPort
"""
@abc.abstractmethod
def update_port_address(self, port_id, address, token=None):
"""Update a port's MAC address.
:param port_id: port id.
:param address: new MAC address.
:param token: An optional authenticaiton token.
:raises: FailedToUpdateMacOnPort
"""
@abc.abstractmethod
def update_dhcp_opts(self, task, options, vifs=None):
"""Send or update the DHCP BOOT options for this node.
:param task: A TaskManager instance.
:param options: this will be a list of dicts, e.g.
::
[{'opt_name': 'bootfile-name',
'opt_value': 'pxelinux.0'},
{'opt_name': 'server-ip-address',
'opt_value': '123.123.123.456'},
{'opt_name': 'tftp-server',
'opt_value': '123.123.123.123'}]
:param vifs: A dict with keys 'ports' and 'portgroups' and
dicts as values. Each dict has key/value pairs of the form
<ironic UUID>:<neutron port UUID>. e.g.
::
{'ports': {'port.uuid': vif.id},
'portgroups': {'portgroup.uuid': vif.id}}
If the value is None, will get the list of ports/portgroups
from the Ironic port/portgroup objects.
:raises: FailedToUpdateDHCPOptOnPort
"""
@abc.abstractmethod
def get_ip_addresses(self, task):
"""Get IP addresses for all ports/portgroups in `task`.
:param task: A TaskManager instance.
:returns: List of IP addresses associated with
task's ports and portgroups.
"""
def clean_dhcp_opts(self, task):
"""Clean up the DHCP BOOT options for all ports in `task`.
:param task: A TaskManager instance.
:raises: FailedToCleanDHCPOpts
"""
pass
|
tdeitch/faceoff
|
refs/heads/master
|
lib/merge-js-css-html.py
|
1
|
import sys
import re
# look for external script and css etc tags and read in files and insert into output file
# this will probably be fairly brittle (ie will most likely only work for this app)
html=open(sys.argv[1])
js_re=re.compile(r'<script .*?src=[\'"]([^\'"]*?)[\'"].*?</script>')
css_re=re.compile(r'<link .*?href=[\'"]([^\'"]*?)[\'"].*?type=[\'"]text/css[\'"]/?>')
write=sys.stdout.write
for line in html:
m=js_re.match(line)
if m:
js=open(m.group(1)).read()
write('<script type="text/javascript">')
write(js)
write('</script>')
else:
m=css_re.match(line)
if m:
css=open(m.group(1)).read()
write('<style type="text/css">')
write(css)
write('</style>')
else:
write(line.strip())
|
KristianOellegaard/django-filer
|
refs/heads/develop
|
filer/server/backends/xsendfile.py
|
1
|
#-*- coding: utf-8 -*-
from django.http import HttpResponse
from filer.server.backends.base import ServerBase
class ApacheXSendfileServer(ServerBase):
def serve(self, request, file, **kwargs):
response = HttpResponse()
response['X-Sendfile'] = file.path
self.default_headers(request, response, file=file, **kwargs)
return response
|
acsone/account-analytic
|
refs/heads/8.0
|
procurement_analytic/models/procurement.py
|
2
|
# -*- coding: utf-8 -*-
# © 2016 Carlos Dauden <carlos.dauden@tecnativa.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from openerp import fields, models
class ProcurementOrder(models.Model):
_inherit = 'procurement.order'
account_analytic_id = fields.Many2one(
comodel_name='account.analytic.account', string='Analytic Account',
domain=[('type', '!=', 'view')])
|
AlexS12/PyFME
|
refs/heads/master
|
src/pyfme/aircrafts/tests/test_cessna_310.py
|
2
|
# -*- coding: utf-8 -*-
"""
Python Flight Mechanics Engine (PyFME).
Copyright (c) AeroPython Development Team.
Distributed under the terms of the MIT License.
Created on Sat Jan 9 23:56:51 2016
@author:olrosales@gmail.com
@AeroPython
"""
from numpy.testing import assert_array_almost_equal
from pyfme.aircrafts.cessna_310 import Cessna310
def test_calculate_aero_forces_moments_alpha_beta_zero():
aircraft = Cessna310()
aircraft.q_inf = 0.5 * 1.225 * 100 ** 2
aircraft.alpha = 0
aircraft.beta = 0
aircraft.controls = {'delta_elevator': 0,
'hor_tail_incidence': 0,
'delta_aileron': 0,
'delta_rudder': 0,
'delta_t': 0}
L, D, Y, l, m, n = aircraft._calculate_aero_forces_moments()
assert_array_almost_equal([L, D, Y],
[28679.16845, 2887.832934, 0.],
decimal=4)
assert_array_almost_equal([l, m, n],
[0, 10177.065816, 0],
decimal=4)
|
jun-wan/scilifelab
|
refs/heads/master
|
scilifelab/lims_utils/valitadion_of_LIMS_upgrade.py
|
4
|
#!/usr/bin/env python
"""valitadion_of_LIMS_upgrade.py is a script to compare extraction output from lims stage
server and lims production server. The comparison is based on the objects created to build
documents in the projects database on status db. A recursive function compares all values
in the objects and any differing values or missing keys are logged in a validation log file.
Maya Brandi, Science for Life Laboratory, Stockholm, Sweden.
"""
usage = """
*****Recomended validation procedure:*****
Testing the script:
Test that the script is caching differences by changing something on the
stage server, eg. the value of the sample udf "status_(manual)". for some
project J.Doe_00_00. Then run the script with the -p flagg:
valitadion_of_LIMS_upgrade.py -p J.Doe_00_00
This should give the output:
Lims stage and Lims production are differing for proj J.Doe_00_00: True
Key status_(manual) differing: Lims production gives: Aborted. Lims stage gives In Progress.
Running the validation:
Run valitadion_of_LIMS_upgrade.py with the -a flagg and grep for "True" in
the logfile when the script is finished. It will take some hours to go through
all projects opened after jul 1
If you don't find anything when grepping for True in the log file, no differences
are found for any projects.
If you get output when grepping for True, there are differences. Then read the log
file to find what is differing.
"""
import sys
import os
import codecs
from optparse import OptionParser
from scilifelab.db.statusDB_utils import *
from functions import *
from pprint import pprint
from genologics.lims import *
from genologics.config import BASEURI, USERNAME, PASSWORD
import objectsDB as DB
from datetime import date
import scilifelab.log
lims = Lims(BASEURI, USERNAME, PASSWORD)
lims_stage = Lims('https://genologics-stage.scilifelab.se:8443', USERNAME, PASSWORD)
def comp_obj(stage, prod):
"""compares the two dictionaries obj and dbobj"""
LOG.info('project %s is being handeled' % stage['project_name'])
diff = recursive_comp(stage, prod)
LOG.info('Lims stage and Lims production are differing for proj %s: %s' % ( stage['project_name'],diff))
def recursive_comp(stage, prod):
diff = False
keys = list(set(stage.keys() + prod.keys()))
for key in keys:
if not (stage.has_key(key)):
LOG.info('Key %s missing in Lims stage to db object ' % key)
diff = True
elif not prod.has_key(key):
LOG.info('Key %s missing in Lims production to db object ' % key)
diff = True
else:
prod_val = prod[key]
stage_val = stage[key]
if (prod_val != stage_val):
diff = True
if (type(prod_val) is dict) and (type(stage_val) is dict):
diff = diff and recursive_comp(stage_val, prod_val)
else:
LOG.info('Key %s differing: Lims production gives: %s. Lims stage gives %s. ' %( key,prod_val,stage_val))
return diff
def main(proj_name, all_projects, conf, only_closed):
first_of_july = '2013-06-30'
today = date.today()
couch = load_couch_server(conf)
if all_projects:
projects = lims.get_projects()
for proj in projects:
closed = proj.close_date
if not only_closed or (only_closed and closed):
contin = True
else:
contin = False
if contin:
proj_name = proj.name
try:
proj_stage = lims_stage.get_projects(name = proj_name)
if len(proj_stage)==0 :
LOG.warning("""Found no projects on Lims stage with name %s""" % proj_name)
else:
proj_stage = proj_stage[0]
opened = proj.open_date
if opened:
if comp_dates(first_of_july, opened):
obj = DB.ProjectDB(lims, proj.id, None)
obj_stage = DB.ProjectDB(lims_stage, proj.id, None)
print
comp_obj(obj_stage.obj, obj.obj)
else:
LOG.info('Open date missing for project %s' % proj_name)
except:
LOG.info('Failed comparing stage and prod for proj %s' % proj_name)
elif proj_name is not None:
proj = lims.get_projects(name = proj_name)
proj_stage = lims_stage.get_projects(name = proj_name)
if (not proj) | (not proj_stage):
LOG.warning("""Found %s projects on Lims stage, and %s projects
on Lims production with project name %s""" % (str(len(proj_stage)), str(len(proj)), proj_name))
else:
proj = proj[0]
proj_stage = proj_stage[0]
opened = proj.open_date
if opened:
if comp_dates(first_of_july, opened):
cont = 'yes'
else:
cont = raw_input("""The project %s is opened before 2013-07-01.
Do you still want to load the data from lims into statusdb? (yes/no): """ % proj_name)
if cont == 'yes':
obj = DB.ProjectDB(lims, proj.id, None)
obj_stage = DB.ProjectDB(lims_stage, proj.id, None)
comp_obj(obj_stage.obj, obj.obj)
else:
LOG.info('Open date missing for project %s' % proj_name)
if __name__ == '__main__':
parser = OptionParser(usage=usage)
parser.add_option("-p", "--project", dest="project_name", default=None,
help = "eg: M.Uhlen_13_01. Dont use with -a flagg.")
parser.add_option("-a", "--all_projects", dest="all_projects", action="store_true", default=False,
help = "Upload all Lims projects into couchDB. Don't use with -p flagg.")
parser.add_option("-C", "--closed_projects", dest="closed_projects", action="store_true", default=False,
help = "Upload only closed projects. Use with -a flagg.")
parser.add_option("-c", "--conf", dest="conf",
default=os.path.join(os.environ['HOME'],'opt/config/post_process.yaml'),
help = "Config file. Default: ~/opt/config/post_process.yaml")
(options, args) = parser.parse_args()
LOG = scilifelab.log.file_logger('LOG',options.conf ,'validate_LIMS_upgrade.log', 'log_dir_tools')
main(options.project_name, options.all_projects, options.conf, options.closed_projects)
|
aliyun/oss-ftp
|
refs/heads/master
|
python27/win32/Lib/test/test_inspect.py
|
35
|
import re
import sys
import types
import unittest
import inspect
import linecache
import datetime
from UserList import UserList
from UserDict import UserDict
from test.test_support import run_unittest, check_py3k_warnings
with check_py3k_warnings(
("tuple parameter unpacking has been removed", SyntaxWarning),
quiet=True):
from test import inspect_fodder as mod
from test import inspect_fodder2 as mod2
# C module for test_findsource_binary
import unicodedata
# Functions tested in this suite:
# ismodule, isclass, ismethod, isfunction, istraceback, isframe, iscode,
# isbuiltin, isroutine, isgenerator, isgeneratorfunction, getmembers,
# getdoc, getfile, getmodule, getsourcefile, getcomments, getsource,
# getclasstree, getargspec, getargvalues, formatargspec, formatargvalues,
# currentframe, stack, trace, isdatadescriptor
# NOTE: There are some additional tests relating to interaction with
# zipimport in the test_zipimport_support test module.
modfile = mod.__file__
if modfile.endswith(('c', 'o')):
modfile = modfile[:-1]
import __builtin__
try:
1 // 0
except:
tb = sys.exc_traceback
git = mod.StupidGit()
class IsTestBase(unittest.TestCase):
predicates = set([inspect.isbuiltin, inspect.isclass, inspect.iscode,
inspect.isframe, inspect.isfunction, inspect.ismethod,
inspect.ismodule, inspect.istraceback,
inspect.isgenerator, inspect.isgeneratorfunction])
def istest(self, predicate, exp):
obj = eval(exp)
self.assertTrue(predicate(obj), '%s(%s)' % (predicate.__name__, exp))
for other in self.predicates - set([predicate]):
if predicate == inspect.isgeneratorfunction and\
other == inspect.isfunction:
continue
self.assertFalse(other(obj), 'not %s(%s)' % (other.__name__, exp))
def generator_function_example(self):
for i in xrange(2):
yield i
class TestPredicates(IsTestBase):
def test_sixteen(self):
count = len(filter(lambda x:x.startswith('is'), dir(inspect)))
# This test is here for remember you to update Doc/library/inspect.rst
# which claims there are 16 such functions
expected = 16
err_msg = "There are %d (not %d) is* functions" % (count, expected)
self.assertEqual(count, expected, err_msg)
def test_excluding_predicates(self):
self.istest(inspect.isbuiltin, 'sys.exit')
self.istest(inspect.isbuiltin, '[].append')
self.istest(inspect.iscode, 'mod.spam.func_code')
self.istest(inspect.isframe, 'tb.tb_frame')
self.istest(inspect.isfunction, 'mod.spam')
self.istest(inspect.ismethod, 'mod.StupidGit.abuse')
self.istest(inspect.ismethod, 'git.argue')
self.istest(inspect.ismodule, 'mod')
self.istest(inspect.istraceback, 'tb')
self.istest(inspect.isdatadescriptor, '__builtin__.file.closed')
self.istest(inspect.isdatadescriptor, '__builtin__.file.softspace')
self.istest(inspect.isgenerator, '(x for x in xrange(2))')
self.istest(inspect.isgeneratorfunction, 'generator_function_example')
if hasattr(types, 'GetSetDescriptorType'):
self.istest(inspect.isgetsetdescriptor,
'type(tb.tb_frame).f_locals')
else:
self.assertFalse(inspect.isgetsetdescriptor(type(tb.tb_frame).f_locals))
if hasattr(types, 'MemberDescriptorType'):
self.istest(inspect.ismemberdescriptor, 'datetime.timedelta.days')
else:
self.assertFalse(inspect.ismemberdescriptor(datetime.timedelta.days))
def test_isroutine(self):
self.assertTrue(inspect.isroutine(mod.spam))
self.assertTrue(inspect.isroutine([].count))
def test_isclass(self):
self.istest(inspect.isclass, 'mod.StupidGit')
self.assertTrue(inspect.isclass(list))
class newstyle(object): pass
self.assertTrue(inspect.isclass(newstyle))
class CustomGetattr(object):
def __getattr__(self, attr):
return None
self.assertFalse(inspect.isclass(CustomGetattr()))
def test_get_slot_members(self):
class C(object):
__slots__ = ("a", "b")
x = C()
x.a = 42
members = dict(inspect.getmembers(x))
self.assertIn('a', members)
self.assertNotIn('b', members)
def test_isabstract(self):
from abc import ABCMeta, abstractmethod
class AbstractClassExample(object):
__metaclass__ = ABCMeta
@abstractmethod
def foo(self):
pass
class ClassExample(AbstractClassExample):
def foo(self):
pass
a = ClassExample()
# Test general behaviour.
self.assertTrue(inspect.isabstract(AbstractClassExample))
self.assertFalse(inspect.isabstract(ClassExample))
self.assertFalse(inspect.isabstract(a))
self.assertFalse(inspect.isabstract(int))
self.assertFalse(inspect.isabstract(5))
class TestInterpreterStack(IsTestBase):
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
git.abuse(7, 8, 9)
def test_abuse_done(self):
self.istest(inspect.istraceback, 'git.ex[2]')
self.istest(inspect.isframe, 'mod.fr')
def test_stack(self):
self.assertTrue(len(mod.st) >= 5)
self.assertEqual(mod.st[0][1:],
(modfile, 16, 'eggs', [' st = inspect.stack()\n'], 0))
self.assertEqual(mod.st[1][1:],
(modfile, 9, 'spam', [' eggs(b + d, c + f)\n'], 0))
self.assertEqual(mod.st[2][1:],
(modfile, 43, 'argue', [' spam(a, b, c)\n'], 0))
self.assertEqual(mod.st[3][1:],
(modfile, 39, 'abuse', [' self.argue(a, b, c)\n'], 0))
def test_trace(self):
self.assertEqual(len(git.tr), 3)
self.assertEqual(git.tr[0][1:], (modfile, 43, 'argue',
[' spam(a, b, c)\n'], 0))
self.assertEqual(git.tr[1][1:], (modfile, 9, 'spam',
[' eggs(b + d, c + f)\n'], 0))
self.assertEqual(git.tr[2][1:], (modfile, 18, 'eggs',
[' q = y // 0\n'], 0))
def test_frame(self):
args, varargs, varkw, locals = inspect.getargvalues(mod.fr)
self.assertEqual(args, ['x', 'y'])
self.assertEqual(varargs, None)
self.assertEqual(varkw, None)
self.assertEqual(locals, {'x': 11, 'p': 11, 'y': 14})
self.assertEqual(inspect.formatargvalues(args, varargs, varkw, locals),
'(x=11, y=14)')
def test_previous_frame(self):
args, varargs, varkw, locals = inspect.getargvalues(mod.fr.f_back)
self.assertEqual(args, ['a', 'b', 'c', 'd', ['e', ['f']]])
self.assertEqual(varargs, 'g')
self.assertEqual(varkw, 'h')
self.assertEqual(inspect.formatargvalues(args, varargs, varkw, locals),
'(a=7, b=8, c=9, d=3, (e=4, (f=5,)), *g=(), **h={})')
class GetSourceBase(unittest.TestCase):
# Subclasses must override.
fodderFile = None
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
with open(inspect.getsourcefile(self.fodderFile)) as fp:
self.source = fp.read()
def sourcerange(self, top, bottom):
lines = self.source.split("\n")
return "\n".join(lines[top-1:bottom]) + "\n"
def assertSourceEqual(self, obj, top, bottom):
self.assertEqual(inspect.getsource(obj),
self.sourcerange(top, bottom))
class TestRetrievingSourceCode(GetSourceBase):
fodderFile = mod
def test_getclasses(self):
classes = inspect.getmembers(mod, inspect.isclass)
self.assertEqual(classes,
[('FesteringGob', mod.FesteringGob),
('MalodorousPervert', mod.MalodorousPervert),
('ParrotDroppings', mod.ParrotDroppings),
('StupidGit', mod.StupidGit),
('Tit', mod.MalodorousPervert),
])
tree = inspect.getclasstree([cls[1] for cls in classes])
self.assertEqual(tree,
[(mod.ParrotDroppings, ()),
[(mod.FesteringGob, (mod.MalodorousPervert,
mod.ParrotDroppings))
],
(mod.StupidGit, ()),
[(mod.MalodorousPervert, (mod.StupidGit,)),
[(mod.FesteringGob, (mod.MalodorousPervert,
mod.ParrotDroppings))
]
]
])
tree = inspect.getclasstree([cls[1] for cls in classes], True)
self.assertEqual(tree,
[(mod.ParrotDroppings, ()),
(mod.StupidGit, ()),
[(mod.MalodorousPervert, (mod.StupidGit,)),
[(mod.FesteringGob, (mod.MalodorousPervert,
mod.ParrotDroppings))
]
]
])
def test_getfunctions(self):
functions = inspect.getmembers(mod, inspect.isfunction)
self.assertEqual(functions, [('eggs', mod.eggs),
('spam', mod.spam)])
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_getdoc(self):
self.assertEqual(inspect.getdoc(mod), 'A module docstring.')
self.assertEqual(inspect.getdoc(mod.StupidGit),
'A longer,\n\nindented\n\ndocstring.')
self.assertEqual(inspect.getdoc(git.abuse),
'Another\n\ndocstring\n\ncontaining\n\ntabs')
def test_cleandoc(self):
self.assertEqual(inspect.cleandoc('An\n indented\n docstring.'),
'An\nindented\ndocstring.')
def test_getcomments(self):
self.assertEqual(inspect.getcomments(mod), '# line 1\n')
self.assertEqual(inspect.getcomments(mod.StupidGit), '# line 20\n')
def test_getmodule(self):
# Check actual module
self.assertEqual(inspect.getmodule(mod), mod)
# Check class (uses __module__ attribute)
self.assertEqual(inspect.getmodule(mod.StupidGit), mod)
# Check a method (no __module__ attribute, falls back to filename)
self.assertEqual(inspect.getmodule(mod.StupidGit.abuse), mod)
# Do it again (check the caching isn't broken)
self.assertEqual(inspect.getmodule(mod.StupidGit.abuse), mod)
# Check a builtin
self.assertEqual(inspect.getmodule(str), sys.modules["__builtin__"])
# Check filename override
self.assertEqual(inspect.getmodule(None, modfile), mod)
def test_getsource(self):
self.assertSourceEqual(git.abuse, 29, 39)
self.assertSourceEqual(mod.StupidGit, 21, 46)
def test_getsourcefile(self):
self.assertEqual(inspect.getsourcefile(mod.spam), modfile)
self.assertEqual(inspect.getsourcefile(git.abuse), modfile)
fn = "_non_existing_filename_used_for_sourcefile_test.py"
co = compile("None", fn, "exec")
self.assertEqual(inspect.getsourcefile(co), None)
linecache.cache[co.co_filename] = (1, None, "None", co.co_filename)
self.assertEqual(inspect.getsourcefile(co), fn)
def test_getfile(self):
self.assertEqual(inspect.getfile(mod.StupidGit), mod.__file__)
def test_getmodule_recursion(self):
from types import ModuleType
name = '__inspect_dummy'
m = sys.modules[name] = ModuleType(name)
m.__file__ = "<string>" # hopefully not a real filename...
m.__loader__ = "dummy" # pretend the filename is understood by a loader
exec "def x(): pass" in m.__dict__
self.assertEqual(inspect.getsourcefile(m.x.func_code), '<string>')
del sys.modules[name]
inspect.getmodule(compile('a=10','','single'))
def test_proceed_with_fake_filename(self):
'''doctest monkeypatches linecache to enable inspection'''
fn, source = '<test>', 'def x(): pass\n'
getlines = linecache.getlines
def monkey(filename, module_globals=None):
if filename == fn:
return source.splitlines(True)
else:
return getlines(filename, module_globals)
linecache.getlines = monkey
try:
ns = {}
exec compile(source, fn, 'single') in ns
inspect.getsource(ns["x"])
finally:
linecache.getlines = getlines
class TestDecorators(GetSourceBase):
fodderFile = mod2
def test_wrapped_decorator(self):
self.assertSourceEqual(mod2.wrapped, 14, 17)
def test_replacing_decorator(self):
self.assertSourceEqual(mod2.gone, 9, 10)
class TestOneliners(GetSourceBase):
fodderFile = mod2
def test_oneline_lambda(self):
# Test inspect.getsource with a one-line lambda function.
self.assertSourceEqual(mod2.oll, 25, 25)
def test_threeline_lambda(self):
# Test inspect.getsource with a three-line lambda function,
# where the second and third lines are _not_ indented.
self.assertSourceEqual(mod2.tll, 28, 30)
def test_twoline_indented_lambda(self):
# Test inspect.getsource with a two-line lambda function,
# where the second line _is_ indented.
self.assertSourceEqual(mod2.tlli, 33, 34)
def test_onelinefunc(self):
# Test inspect.getsource with a regular one-line function.
self.assertSourceEqual(mod2.onelinefunc, 37, 37)
def test_manyargs(self):
# Test inspect.getsource with a regular function where
# the arguments are on two lines and _not_ indented and
# the body on the second line with the last arguments.
self.assertSourceEqual(mod2.manyargs, 40, 41)
def test_twolinefunc(self):
# Test inspect.getsource with a regular function where
# the body is on two lines, following the argument list and
# continued on the next line by a \\.
self.assertSourceEqual(mod2.twolinefunc, 44, 45)
def test_lambda_in_list(self):
# Test inspect.getsource with a one-line lambda function
# defined in a list, indented.
self.assertSourceEqual(mod2.a[1], 49, 49)
def test_anonymous(self):
# Test inspect.getsource with a lambda function defined
# as argument to another function.
self.assertSourceEqual(mod2.anonymous, 55, 55)
class TestBuggyCases(GetSourceBase):
fodderFile = mod2
def test_with_comment(self):
self.assertSourceEqual(mod2.with_comment, 58, 59)
def test_multiline_sig(self):
self.assertSourceEqual(mod2.multiline_sig[0], 63, 64)
def test_nested_class(self):
self.assertSourceEqual(mod2.func69().func71, 71, 72)
def test_one_liner_followed_by_non_name(self):
self.assertSourceEqual(mod2.func77, 77, 77)
def test_one_liner_dedent_non_name(self):
self.assertSourceEqual(mod2.cls82.func83, 83, 83)
def test_with_comment_instead_of_docstring(self):
self.assertSourceEqual(mod2.func88, 88, 90)
def test_method_in_dynamic_class(self):
self.assertSourceEqual(mod2.method_in_dynamic_class, 95, 97)
@unittest.skipIf(
not hasattr(unicodedata, '__file__') or
unicodedata.__file__[-4:] in (".pyc", ".pyo"),
"unicodedata is not an external binary module")
def test_findsource_binary(self):
self.assertRaises(IOError, inspect.getsource, unicodedata)
self.assertRaises(IOError, inspect.findsource, unicodedata)
def test_findsource_code_in_linecache(self):
lines = ["x=1"]
co = compile(lines[0], "_dynamically_created_file", "exec")
self.assertRaises(IOError, inspect.findsource, co)
self.assertRaises(IOError, inspect.getsource, co)
linecache.cache[co.co_filename] = (1, None, lines, co.co_filename)
self.assertEqual(inspect.findsource(co), (lines,0))
self.assertEqual(inspect.getsource(co), lines[0])
def test_findsource_without_filename(self):
for fname in ['', '<string>']:
co = compile('x=1', fname, "exec")
self.assertRaises(IOError, inspect.findsource, co)
self.assertRaises(IOError, inspect.getsource, co)
class _BrokenDataDescriptor(object):
"""
A broken data descriptor. See bug #1785.
"""
def __get__(*args):
raise AssertionError("should not __get__ data descriptors")
def __set__(*args):
raise RuntimeError
def __getattr__(*args):
raise AssertionError("should not __getattr__ data descriptors")
class _BrokenMethodDescriptor(object):
"""
A broken method descriptor. See bug #1785.
"""
def __get__(*args):
raise AssertionError("should not __get__ method descriptors")
def __getattr__(*args):
raise AssertionError("should not __getattr__ method descriptors")
# Helper for testing classify_class_attrs.
def attrs_wo_objs(cls):
return [t[:3] for t in inspect.classify_class_attrs(cls)]
class TestClassesAndFunctions(unittest.TestCase):
def test_classic_mro(self):
# Test classic-class method resolution order.
class A: pass
class B(A): pass
class C(A): pass
class D(B, C): pass
expected = (D, B, A, C)
got = inspect.getmro(D)
self.assertEqual(expected, got)
def test_newstyle_mro(self):
# The same w/ new-class MRO.
class A(object): pass
class B(A): pass
class C(A): pass
class D(B, C): pass
expected = (D, B, C, A, object)
got = inspect.getmro(D)
self.assertEqual(expected, got)
def assertArgSpecEquals(self, routine, args_e, varargs_e = None,
varkw_e = None, defaults_e = None,
formatted = None):
args, varargs, varkw, defaults = inspect.getargspec(routine)
self.assertEqual(args, args_e)
self.assertEqual(varargs, varargs_e)
self.assertEqual(varkw, varkw_e)
self.assertEqual(defaults, defaults_e)
if formatted is not None:
self.assertEqual(inspect.formatargspec(args, varargs, varkw, defaults),
formatted)
def test_getargspec(self):
self.assertArgSpecEquals(mod.eggs, ['x', 'y'], formatted = '(x, y)')
self.assertArgSpecEquals(mod.spam,
['a', 'b', 'c', 'd', ['e', ['f']]],
'g', 'h', (3, (4, (5,))),
'(a, b, c, d=3, (e, (f,))=(4, (5,)), *g, **h)')
def test_getargspec_method(self):
class A(object):
def m(self):
pass
self.assertArgSpecEquals(A.m, ['self'])
def test_getargspec_sublistofone(self):
with check_py3k_warnings(
("tuple parameter unpacking has been removed", SyntaxWarning),
("parenthesized argument names are invalid", SyntaxWarning)):
exec 'def sublistOfOne((foo,)): return 1'
self.assertArgSpecEquals(sublistOfOne, [['foo']])
exec 'def fakeSublistOfOne((foo)): return 1'
self.assertArgSpecEquals(fakeSublistOfOne, ['foo'])
def _classify_test(self, newstyle):
"""Helper for testing that classify_class_attrs finds a bunch of
different kinds of attributes on a given class.
"""
if newstyle:
base = object
else:
class base:
pass
class A(base):
def s(): pass
s = staticmethod(s)
def c(cls): pass
c = classmethod(c)
def getp(self): pass
p = property(getp)
def m(self): pass
def m1(self): pass
datablob = '1'
dd = _BrokenDataDescriptor()
md = _BrokenMethodDescriptor()
attrs = attrs_wo_objs(A)
self.assertIn(('s', 'static method', A), attrs, 'missing static method')
self.assertIn(('c', 'class method', A), attrs, 'missing class method')
self.assertIn(('p', 'property', A), attrs, 'missing property')
self.assertIn(('m', 'method', A), attrs, 'missing plain method')
self.assertIn(('m1', 'method', A), attrs, 'missing plain method')
self.assertIn(('datablob', 'data', A), attrs, 'missing data')
self.assertIn(('md', 'method', A), attrs, 'missing method descriptor')
self.assertIn(('dd', 'data', A), attrs, 'missing data descriptor')
class B(A):
def m(self): pass
attrs = attrs_wo_objs(B)
self.assertIn(('s', 'static method', A), attrs, 'missing static method')
self.assertIn(('c', 'class method', A), attrs, 'missing class method')
self.assertIn(('p', 'property', A), attrs, 'missing property')
self.assertIn(('m', 'method', B), attrs, 'missing plain method')
self.assertIn(('m1', 'method', A), attrs, 'missing plain method')
self.assertIn(('datablob', 'data', A), attrs, 'missing data')
self.assertIn(('md', 'method', A), attrs, 'missing method descriptor')
self.assertIn(('dd', 'data', A), attrs, 'missing data descriptor')
class C(A):
def m(self): pass
def c(self): pass
attrs = attrs_wo_objs(C)
self.assertIn(('s', 'static method', A), attrs, 'missing static method')
self.assertIn(('c', 'method', C), attrs, 'missing plain method')
self.assertIn(('p', 'property', A), attrs, 'missing property')
self.assertIn(('m', 'method', C), attrs, 'missing plain method')
self.assertIn(('m1', 'method', A), attrs, 'missing plain method')
self.assertIn(('datablob', 'data', A), attrs, 'missing data')
self.assertIn(('md', 'method', A), attrs, 'missing method descriptor')
self.assertIn(('dd', 'data', A), attrs, 'missing data descriptor')
class D(B, C):
def m1(self): pass
attrs = attrs_wo_objs(D)
self.assertIn(('s', 'static method', A), attrs, 'missing static method')
if newstyle:
self.assertIn(('c', 'method', C), attrs, 'missing plain method')
else:
self.assertIn(('c', 'class method', A), attrs, 'missing class method')
self.assertIn(('p', 'property', A), attrs, 'missing property')
self.assertIn(('m', 'method', B), attrs, 'missing plain method')
self.assertIn(('m1', 'method', D), attrs, 'missing plain method')
self.assertIn(('datablob', 'data', A), attrs, 'missing data')
self.assertIn(('md', 'method', A), attrs, 'missing method descriptor')
self.assertIn(('dd', 'data', A), attrs, 'missing data descriptor')
def test_classify_oldstyle(self):
"""classify_class_attrs finds static methods, class methods,
properties, normal methods, and data attributes on an old-style
class.
"""
self._classify_test(False)
def test_classify_newstyle(self):
"""Just like test_classify_oldstyle, but for a new-style class.
"""
self._classify_test(True)
def test_classify_builtin_types(self):
# Simple sanity check that all built-in types can have their
# attributes classified.
for name in dir(__builtin__):
builtin = getattr(__builtin__, name)
if isinstance(builtin, type):
inspect.classify_class_attrs(builtin)
def test_getmembers_method(self):
# Old-style classes
class B:
def f(self):
pass
self.assertIn(('f', B.f), inspect.getmembers(B))
# contrary to spec, ismethod() is also True for unbound methods
# (see #1785)
self.assertIn(('f', B.f), inspect.getmembers(B, inspect.ismethod))
b = B()
self.assertIn(('f', b.f), inspect.getmembers(b))
self.assertIn(('f', b.f), inspect.getmembers(b, inspect.ismethod))
# New-style classes
class B(object):
def f(self):
pass
self.assertIn(('f', B.f), inspect.getmembers(B))
self.assertIn(('f', B.f), inspect.getmembers(B, inspect.ismethod))
b = B()
self.assertIn(('f', b.f), inspect.getmembers(b))
self.assertIn(('f', b.f), inspect.getmembers(b, inspect.ismethod))
class TestGetcallargsFunctions(unittest.TestCase):
# tuple parameters are named '.1', '.2', etc.
is_tuplename = re.compile(r'^\.\d+$').match
def assertEqualCallArgs(self, func, call_params_string, locs=None):
locs = dict(locs or {}, func=func)
r1 = eval('func(%s)' % call_params_string, None, locs)
r2 = eval('inspect.getcallargs(func, %s)' % call_params_string, None,
locs)
self.assertEqual(r1, r2)
def assertEqualException(self, func, call_param_string, locs=None):
locs = dict(locs or {}, func=func)
try:
eval('func(%s)' % call_param_string, None, locs)
except Exception, ex1:
pass
else:
self.fail('Exception not raised')
try:
eval('inspect.getcallargs(func, %s)' % call_param_string, None,
locs)
except Exception, ex2:
pass
else:
self.fail('Exception not raised')
self.assertIs(type(ex1), type(ex2))
self.assertEqual(str(ex1), str(ex2))
def makeCallable(self, signature):
"""Create a function that returns its locals(), excluding the
autogenerated '.1', '.2', etc. tuple param names (if any)."""
with check_py3k_warnings(
("tuple parameter unpacking has been removed", SyntaxWarning),
quiet=True):
code = ("lambda %s: dict(i for i in locals().items() "
"if not is_tuplename(i[0]))")
return eval(code % signature, {'is_tuplename' : self.is_tuplename})
def test_plain(self):
f = self.makeCallable('a, b=1')
self.assertEqualCallArgs(f, '2')
self.assertEqualCallArgs(f, '2, 3')
self.assertEqualCallArgs(f, 'a=2')
self.assertEqualCallArgs(f, 'b=3, a=2')
self.assertEqualCallArgs(f, '2, b=3')
# expand *iterable / **mapping
self.assertEqualCallArgs(f, '*(2,)')
self.assertEqualCallArgs(f, '*[2]')
self.assertEqualCallArgs(f, '*(2, 3)')
self.assertEqualCallArgs(f, '*[2, 3]')
self.assertEqualCallArgs(f, '**{"a":2}')
self.assertEqualCallArgs(f, 'b=3, **{"a":2}')
self.assertEqualCallArgs(f, '2, **{"b":3}')
self.assertEqualCallArgs(f, '**{"b":3, "a":2}')
# expand UserList / UserDict
self.assertEqualCallArgs(f, '*UserList([2])')
self.assertEqualCallArgs(f, '*UserList([2, 3])')
self.assertEqualCallArgs(f, '**UserDict(a=2)')
self.assertEqualCallArgs(f, '2, **UserDict(b=3)')
self.assertEqualCallArgs(f, 'b=2, **UserDict(a=3)')
# unicode keyword args
self.assertEqualCallArgs(f, '**{u"a":2}')
self.assertEqualCallArgs(f, 'b=3, **{u"a":2}')
self.assertEqualCallArgs(f, '2, **{u"b":3}')
self.assertEqualCallArgs(f, '**{u"b":3, u"a":2}')
def test_varargs(self):
f = self.makeCallable('a, b=1, *c')
self.assertEqualCallArgs(f, '2')
self.assertEqualCallArgs(f, '2, 3')
self.assertEqualCallArgs(f, '2, 3, 4')
self.assertEqualCallArgs(f, '*(2,3,4)')
self.assertEqualCallArgs(f, '2, *[3,4]')
self.assertEqualCallArgs(f, '2, 3, *UserList([4])')
def test_varkw(self):
f = self.makeCallable('a, b=1, **c')
self.assertEqualCallArgs(f, 'a=2')
self.assertEqualCallArgs(f, '2, b=3, c=4')
self.assertEqualCallArgs(f, 'b=3, a=2, c=4')
self.assertEqualCallArgs(f, 'c=4, **{"a":2, "b":3}')
self.assertEqualCallArgs(f, '2, c=4, **{"b":3}')
self.assertEqualCallArgs(f, 'b=2, **{"a":3, "c":4}')
self.assertEqualCallArgs(f, '**UserDict(a=2, b=3, c=4)')
self.assertEqualCallArgs(f, '2, c=4, **UserDict(b=3)')
self.assertEqualCallArgs(f, 'b=2, **UserDict(a=3, c=4)')
# unicode keyword args
self.assertEqualCallArgs(f, 'c=4, **{u"a":2, u"b":3}')
self.assertEqualCallArgs(f, '2, c=4, **{u"b":3}')
self.assertEqualCallArgs(f, 'b=2, **{u"a":3, u"c":4}')
def test_varkw_only(self):
# issue11256:
f = self.makeCallable('**c')
self.assertEqualCallArgs(f, '')
self.assertEqualCallArgs(f, 'a=1')
self.assertEqualCallArgs(f, 'a=1, b=2')
self.assertEqualCallArgs(f, 'c=3, **{"a": 1, "b": 2}')
self.assertEqualCallArgs(f, '**UserDict(a=1, b=2)')
self.assertEqualCallArgs(f, 'c=3, **UserDict(a=1, b=2)')
def test_tupleargs(self):
f = self.makeCallable('(b,c), (d,(e,f))=(0,[1,2])')
self.assertEqualCallArgs(f, '(2,3)')
self.assertEqualCallArgs(f, '[2,3]')
self.assertEqualCallArgs(f, 'UserList([2,3])')
self.assertEqualCallArgs(f, '(2,3), (4,(5,6))')
self.assertEqualCallArgs(f, '(2,3), (4,[5,6])')
self.assertEqualCallArgs(f, '(2,3), [4,UserList([5,6])]')
def test_multiple_features(self):
f = self.makeCallable('a, b=2, (c,(d,e))=(3,[4,5]), *f, **g')
self.assertEqualCallArgs(f, '2, 3, (4,[5,6]), 7')
self.assertEqualCallArgs(f, '2, 3, *[(4,[5,6]), 7], x=8')
self.assertEqualCallArgs(f, '2, 3, x=8, *[(4,[5,6]), 7]')
self.assertEqualCallArgs(f, '2, x=8, *[3, (4,[5,6]), 7], y=9')
self.assertEqualCallArgs(f, 'x=8, *[2, 3, (4,[5,6])], y=9')
self.assertEqualCallArgs(f, 'x=8, *UserList([2, 3, (4,[5,6])]), '
'**{"y":9, "z":10}')
self.assertEqualCallArgs(f, '2, x=8, *UserList([3, (4,[5,6])]), '
'**UserDict(y=9, z=10)')
def test_errors(self):
f0 = self.makeCallable('')
f1 = self.makeCallable('a, b')
f2 = self.makeCallable('a, b=1')
# f0 takes no arguments
self.assertEqualException(f0, '1')
self.assertEqualException(f0, 'x=1')
self.assertEqualException(f0, '1,x=1')
# f1 takes exactly 2 arguments
self.assertEqualException(f1, '')
self.assertEqualException(f1, '1')
self.assertEqualException(f1, 'a=2')
self.assertEqualException(f1, 'b=3')
# f2 takes at least 1 argument
self.assertEqualException(f2, '')
self.assertEqualException(f2, 'b=3')
for f in f1, f2:
# f1/f2 takes exactly/at most 2 arguments
self.assertEqualException(f, '2, 3, 4')
self.assertEqualException(f, '1, 2, 3, a=1')
self.assertEqualException(f, '2, 3, 4, c=5')
self.assertEqualException(f, '2, 3, 4, a=1, c=5')
# f got an unexpected keyword argument
self.assertEqualException(f, 'c=2')
self.assertEqualException(f, '2, c=3')
self.assertEqualException(f, '2, 3, c=4')
self.assertEqualException(f, '2, c=4, b=3')
self.assertEqualException(f, '**{u"\u03c0\u03b9": 4}')
# f got multiple values for keyword argument
self.assertEqualException(f, '1, a=2')
self.assertEqualException(f, '1, **{"a":2}')
self.assertEqualException(f, '1, 2, b=3')
# XXX: Python inconsistency
# - for functions and bound methods: unexpected keyword 'c'
# - for unbound methods: multiple values for keyword 'a'
#self.assertEqualException(f, '1, c=3, a=2')
f = self.makeCallable('(a,b)=(0,1)')
self.assertEqualException(f, '1')
self.assertEqualException(f, '[1]')
self.assertEqualException(f, '(1,2,3)')
# issue11256:
f3 = self.makeCallable('**c')
self.assertEqualException(f3, '1, 2')
self.assertEqualException(f3, '1, 2, a=1, b=2')
class TestGetcallargsMethods(TestGetcallargsFunctions):
def setUp(self):
class Foo(object):
pass
self.cls = Foo
self.inst = Foo()
def makeCallable(self, signature):
assert 'self' not in signature
mk = super(TestGetcallargsMethods, self).makeCallable
self.cls.method = mk('self, ' + signature)
return self.inst.method
class TestGetcallargsUnboundMethods(TestGetcallargsMethods):
def makeCallable(self, signature):
super(TestGetcallargsUnboundMethods, self).makeCallable(signature)
return self.cls.method
def assertEqualCallArgs(self, func, call_params_string, locs=None):
return super(TestGetcallargsUnboundMethods, self).assertEqualCallArgs(
*self._getAssertEqualParams(func, call_params_string, locs))
def assertEqualException(self, func, call_params_string, locs=None):
return super(TestGetcallargsUnboundMethods, self).assertEqualException(
*self._getAssertEqualParams(func, call_params_string, locs))
def _getAssertEqualParams(self, func, call_params_string, locs=None):
assert 'inst' not in call_params_string
locs = dict(locs or {}, inst=self.inst)
return (func, 'inst,' + call_params_string, locs)
def test_main():
run_unittest(
TestDecorators, TestRetrievingSourceCode, TestOneliners, TestBuggyCases,
TestInterpreterStack, TestClassesAndFunctions, TestPredicates,
TestGetcallargsFunctions, TestGetcallargsMethods,
TestGetcallargsUnboundMethods)
if __name__ == "__main__":
test_main()
|
alexissmirnov/donomo
|
refs/heads/master
|
donomo_archive/deps/reportlab/docs/userguide/ch9_future.py
|
2
|
#Copyright ReportLab Europe Ltd. 2000-2004
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/docs/userguide/ch9_future.py
from reportlab.tools.docco.rl_doc_utils import *
heading1("Future Directions")
disc("""We have a very long list of things we plan to do
and what we do first will most likely be inspired by customer
or user interest.
""")
disc("""
We plan to provide a large number of pre-designed Platypus example
document types -- brochure, newsletter, business letter, thesis, memo,
etcetera, to give our users a better boost towards the solutions they
desire.
""")
disc("""
We plan to fully support adding fonts and internationalization, which are
not well supported in the current release.""")
disc("""
We plan to fully support some of the more obscure features of PDF
such as general hyperlinks, which are not yet well supported.
""")
disc("""
We are also open for suggestions. Please let us know what you think
is missing. You can also offer patches or contributions. Please
look to $http://www.reportlab.com$ for the latest mailing list and
contact information.""")
# this comment is a trivial test of SF checkin rights - delete it some time! AR 2001-04-17
|
reingart/gui2py
|
refs/heads/master
|
samples/chat/chat.py
|
1
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"Minimal gui2py CHAT application (to be used as skeleton)"
from __future__ import with_statement # for python 2.5 compatibility
__author__ = "Mariano Reingart (reingart@gmail.com)"
__copyright__ = "Copyright (C) 2016 Mariano Reingart"
__license__ = "LGPL 3.0"
import gui
# --- here goes your event handlers ---
def send(evt):
"Process an outgoing communication"
# get the text written by the user (input textbox control)
msg = ctrl_input.value
# send the message (replace with socket/queue/etc.)
gui.alert(msg, "Message")
# record the message (update the UI)
log(msg)
ctrl_input.value = ""
ctrl_input.set_focus()
def recv(evt=None):
"Process an incoming communication"
# receive the message (replace with socket/queue/etc.)
msg = ""
# record the message (update the UI)
log(msg)
def log(msg):
"Append the message to the output text box control"
ctrl_output.value += msg + "\n"
# --- gui2py designer generated code starts ---
with gui.Window(name='mywin', title=u'gui2py chat', resizable=True,
height='461px', left='168', top='79', width='400px', ):
gui.TextBox(name=u'output', multiline=True, height='403', left='8',
top='10', width='379')
gui.TextBox(name=u'input', height='30', left='11', top='417', width='323')
gui.Button(label=u'\u2192', name=u'send', left='348', top='419',
width='40', default=True, )
# --- gui2py designer generated code ends ---
mywin = gui.get("mywin")
ctrl_input = mywin["input"]
ctrl_output = mywin["output"]
# assign your event handlers:
mywin['send'].onclick = send
if __name__ == "__main__":
# example to call a GUI function (i.e. from other thread):
gui.call_after(log, "Welcome!")
# basic startup: show windows, activate control and start GUI
mywin.show()
ctrl_input.set_focus()
gui.main_loop()
|
scmsqhn/changhongmall
|
refs/heads/master
|
jieba-0.38/jieba/posseg/__init__.py
|
1
|
from __future__ import absolute_import, unicode_literals
import os
import re
import sys
import jieba
import pickle
from .._compat import *
from .viterbi import viterbi
PROB_START_P = "prob_start.p"
PROB_TRANS_P = "prob_trans.p"
PROB_EMIT_P = "prob_emit.p"
CHAR_STATE_TAB_P = "char_state_tab.p"
re_han_detail = re.compile("([\u4E00-\u9FD5]+)")
re_skip_detail = re.compile("([\.0-9]+|[a-zA-Z0-9]+)")
re_han_internal = re.compile("([\u4E00-\u9FD5a-zA-Z0-9+#&\._]+)")
re_skip_internal = re.compile("(\r\n|\s)")
re_eng = re.compile("[a-zA-Z0-9]+")
re_num = re.compile("[\.0-9]+")
re_eng1 = re.compile('^[a-zA-Z0-9]$', re.U)
def load_model():
# For Jython
start_p = pickle.load(get_module_res("posseg", PROB_START_P))
trans_p = pickle.load(get_module_res("posseg", PROB_TRANS_P))
emit_p = pickle.load(get_module_res("posseg", PROB_EMIT_P))
state = pickle.load(get_module_res("posseg", CHAR_STATE_TAB_P))
return state, start_p, trans_p, emit_p
if sys.platform.startswith("java"):
char_state_tab_P, start_P, trans_P, emit_P = load_model()
else:
from .char_state_tab import P as char_state_tab_P
from .prob_start import P as start_P
from .prob_trans import P as trans_P
from .prob_emit import P as emit_P
class pair(object):
def __init__(self, word, flag):
self.word = word
self.flag = flag
def __unicode__(self):
return '%s/%s' % (self.word, self.flag)
def __repr__(self):
return 'pair(%r, %r)' % (self.word, self.flag)
def __str__(self):
if PY2:
return self.__unicode__().encode(default_encoding)
else:
return self.__unicode__()
def __iter__(self):
return iter((self.word, self.flag))
def __lt__(self, other):
return self.word < other.word
def __eq__(self, other):
return isinstance(other, pair) and self.word == other.word and self.flag == other.flag
def __hash__(self):
return hash(self.word)
def encode(self, arg):
return self.__unicode__().encode(arg)
class POSTokenizer(object):
def __init__(self, tokenizer=None):
self.tokenizer = tokenizer or jieba.Tokenizer()
self.load_word_tag(self.tokenizer.get_dict_file())
def __repr__(self):
return '<POSTokenizer tokenizer=%r>' % self.tokenizer
def __getattr__(self, name):
if name in ('cut_for_search', 'lcut_for_search', 'tokenize'):
# may be possible?
raise NotImplementedError
return getattr(self.tokenizer, name)
def initialize(self, dictionary=None):
self.tokenizer.initialize(dictionary)
self.load_word_tag(self.tokenizer.get_dict_file())
def load_word_tag(self, f):
self.word_tag_tab = {}
f_name = resolve_filename(f)
for lineno, line in enumerate(f, 1):
try:
line = line.strip().decode("utf-8")
if not line:
continue
word, _, tag = line.split(" ")
self.word_tag_tab[word] = tag
except Exception:
raise ValueError(
'invalid POS dictionary entry in %s at Line %s: %s' % (f_name, lineno, line))
f.close()
def makesure_userdict_loaded(self):
if self.tokenizer.user_word_tag_tab:
self.word_tag_tab.update(self.tokenizer.user_word_tag_tab)
self.tokenizer.user_word_tag_tab = {}
def __cut(self, sentence):
prob, pos_list = viterbi(
sentence, char_state_tab_P, start_P, trans_P, emit_P)
begin, nexti = 0, 0
for i, char in enumerate(sentence):
pos = pos_list[i][0]
if pos == 'B':
begin = i
elif pos == 'E':
yield pair(sentence[begin:i + 1], pos_list[i][1])
nexti = i + 1
elif pos == 'S':
yield pair(char, pos_list[i][1])
nexti = i + 1
if nexti < len(sentence):
yield pair(sentence[nexti:], pos_list[nexti][1])
def __cut_detail(self, sentence):
blocks = re_han_detail.split(sentence)
for blk in blocks:
if re_han_detail.match(blk):
for word in self.__cut(blk):
yield word
else:
tmp = re_skip_detail.split(blk)
for x in tmp:
if x:
if re_num.match(x):
yield pair(x, 'm')
elif re_eng.match(x):
yield pair(x, 'eng')
else:
yield pair(x, 'x')
def __cut_DAG_NO_HMM(self, sentence):
DAG = self.tokenizer.get_DAG(sentence)
route = {}
self.tokenizer.calc(sentence, DAG, route)
x = 0
N = len(sentence)
buf = ''
while x < N:
y = route[x][1] + 1
l_word = sentence[x:y]
if re_eng1.match(l_word):
buf += l_word
x = y
else:
if buf:
yield pair(buf, 'eng')
buf = ''
yield pair(l_word, self.word_tag_tab.get(l_word, 'x'))
x = y
if buf:
yield pair(buf, 'eng')
buf = ''
def __cut_DAG(self, sentence):
DAG = self.tokenizer.get_DAG(sentence)
route = {}
self.tokenizer.calc(sentence, DAG, route)
x = 0
buf = ''
N = len(sentence)
while x < N:
y = route[x][1] + 1
l_word = sentence[x:y]
if y - x == 1:
buf += l_word
else:
if buf:
if len(buf) == 1:
yield pair(buf, self.word_tag_tab.get(buf, 'x'))
elif not self.tokenizer.FREQ.get(buf):
recognized = self.__cut_detail(buf)
for t in recognized:
yield t
else:
for elem in buf:
yield pair(elem, self.word_tag_tab.get(elem, 'x'))
buf = ''
yield pair(l_word, self.word_tag_tab.get(l_word, 'x'))
x = y
if buf:
if len(buf) == 1:
yield pair(buf, self.word_tag_tab.get(buf, 'x'))
elif not self.tokenizer.FREQ.get(buf):
recognized = self.__cut_detail(buf)
for t in recognized:
yield t
else:
for elem in buf:
yield pair(elem, self.word_tag_tab.get(elem, 'x'))
def __cut_internal(self, sentence, HMM=True):
self.makesure_userdict_loaded()
sentence = strdecode(sentence)
blocks = re_han_internal.split(sentence)
if HMM:
cut_blk = self.__cut_DAG
else:
cut_blk = self.__cut_DAG_NO_HMM
for blk in blocks:
if re_han_internal.match(blk):
for word in cut_blk(blk):
yield word
else:
tmp = re_skip_internal.split(blk)
for x in tmp:
if re_skip_internal.match(x):
yield pair(x, 'x')
else:
for xx in x:
if re_num.match(xx):
yield pair(xx, 'm')
elif re_eng.match(x):
yield pair(xx, 'eng')
else:
yield pair(xx, 'x')
def _lcut_internal(self, sentence):
return list(self.__cut_internal(sentence))
def _lcut_internal_no_hmm(self, sentence):
return list(self.__cut_internal(sentence, False))
def cut(self, sentence, HMM=True):
for w in self.__cut_internal(sentence, HMM=HMM):
yield w
def lcut(self, *args, **kwargs):
return list(self.cut(*args, **kwargs))
# default Tokenizer instance
dt = POSTokenizer(jieba.dt)
# global functions
initialize = dt.initialize
def _lcut_internal(s):
return dt._lcut_internal(s)
def _lcut_internal_no_hmm(s):
return dt._lcut_internal_no_hmm(s)
def cut(sentence, HMM=True):
"""
Global `cut` function that supports parallel processing.
Note that this only works using dt, custom POSTokenizer
instances are not supported.
"""
global dt
if jieba.pool is None:
for w in dt.cut(sentence, HMM=HMM):
yield w
else:
parts = strdecode(sentence).splitlines(True)
if HMM:
result = jieba.pool.map(_lcut_internal, parts)
else:
result = jieba.pool.map(_lcut_internal_no_hmm, parts)
for r in result:
for w in r:
yield w
def lcut(sentence, HMM=True):
return list(cut(sentence, HMM))
|
zturchan/CMPUT410-Lab4
|
refs/heads/master
|
env-lab4/lib/python2.7/re.py
|
4
|
/usr/lib/python2.7/re.py
|
currychou/1
|
refs/heads/master
|
static/Brython3.1.3-20150514-095342/Lib/atexit.py
|
743
|
"""allow programmer to define multiple exit functions to be executedupon normal program termination.
Two public functions, register and unregister, are defined.
"""
class __loader__(object):
pass
def _clear(*args,**kw):
"""_clear() -> None
Clear the list of previously registered exit functions."""
pass
def _run_exitfuncs(*args,**kw):
"""_run_exitfuncs() -> None
Run all registered exit functions."""
pass
def register(*args,**kw):
"""register(func, *args, **kwargs) -> func
Register a function to be executed upon normal program termination
func - function to be called at exit
args - optional arguments to pass to func
kwargs - optional keyword arguments to pass to func
func is returned to facilitate usage as a decorator."""
pass
def unregister(*args,**kw):
"""unregister(func) -> None
Unregister a exit function which was previously registered using
atexit.register
func - function to be unregistered"""
pass
|
igemsoftware/SYSU-Software2013
|
refs/heads/master
|
project/Python27/Lib/site-packages/win32/Demos/rastest.py
|
17
|
# rastest.py - test/demonstrate the win32ras module.
# Much of the code here contributed by Jethro Wright.
import sys
import os
import win32ras
# Build a little dictionary of RAS states to decent strings.
# eg win32ras.RASCS_OpenPort -> "OpenPort"
stateMap = {}
for name, val in win32ras.__dict__.items():
if name[:6]=="RASCS_":
stateMap[val] = name[6:]
# Use a lock so the callback can tell the main thread when it is finished.
import win32event
callbackEvent = win32event.CreateEvent(None, 0, 0, None)
def Callback( hras, msg, state, error, exterror):
# print "Callback called with ", hras, msg, state, error, exterror
stateName = stateMap.get(state, "Unknown state?")
print "Status is %s (%04lx), error code is %d" % (stateName, state, error)
finished = state in [win32ras.RASCS_Connected]
if finished:
win32event.SetEvent(callbackEvent)
if error != 0 or int( state ) == win32ras.RASCS_Disconnected:
# we know for sure this is a good place to hangup....
print "Detected call failure: %s" % win32ras.GetErrorString( error )
HangUp( hras )
win32event.SetEvent(callbackEvent)
def ShowConnections():
print "All phone-book entries:"
for (name,) in win32ras.EnumEntries():
print " ", name
print "Current Connections:"
for con in win32ras.EnumConnections():
print " ", con
def EditEntry(entryName):
try:
win32ras.EditPhonebookEntry(0,None,entryName)
except win32ras.error, (rc, function, msg):
print "Can not edit/find the RAS entry -", msg
def HangUp( hras ):
# trap potential, irrelevant errors from win32ras....
try:
win32ras.HangUp( hras )
except:
print "Tried to hang up gracefully on error, but didn't work...."
return None
def Connect(entryName, bUseCallback):
if bUseCallback:
theCallback = Callback
win32event.ResetEvent(callbackEvent)
else:
theCallback = None
# in order to *use* the username/password of a particular dun entry, one must
# explicitly get those params under win95....
try:
dp, b = win32ras.GetEntryDialParams( None, entryName )
except:
print "Couldn't find DUN entry: %s" % entryName
else:
hras, rc = win32ras.Dial(None, None, (entryName, "", "", dp[ 3 ], dp[ 4 ], ""),theCallback)
# hras, rc = win32ras.Dial(None, None, (entryName, ),theCallback)
# print hras, rc
if not bUseCallback and rc != 0:
print "Could not dial the RAS connection:", win32ras.GetErrorString(rc)
hras = HangUp( hras )
# don't wait here if there's no need to....
elif bUseCallback and win32event.WaitForSingleObject(callbackEvent, 60000)!=win32event.WAIT_OBJECT_0:
print "Gave up waiting for the process to complete!"
# sdk docs state one must explcitly hangup, even if there's an error....
try:
cs = win32ras.GetConnectStatus( hras )
except:
# on error, attempt a hang up anyway....
hras = HangUp( hras )
else:
if int( cs[ 0 ] ) == win32ras.RASCS_Disconnected:
hras = HangUp( hras )
return hras, rc
def Disconnect( rasEntry ):
# Need to find the entry
name = rasEntry.lower()
for hcon, entryName, devName, devType in win32ras.EnumConnections():
if entryName.lower() == name:
win32ras.HangUp( hcon )
print "Disconnected from", rasEntry
break
else:
print "Could not find an open connection to", entryName
usage = """
Usage: %s [-s] [-l] [-c connection] [-d connection]
-l : List phone-book entries and current connections.
-s : Show status while connecting/disconnecting (uses callbacks)
-c : Connect to the specified phonebook name.
-d : Disconnect from the specified phonebook name.
-e : Edit the specified phonebook entry.
"""
def main():
import getopt
try:
opts, args = getopt.getopt(sys.argv[1:], "slc:d:e:")
except getopt.error, why:
print why
print usage % (os.path.basename(sys.argv[0],))
return
bCallback = 0
if args or not opts:
print usage % (os.path.basename(sys.argv[0],))
return
for opt, val in opts:
if opt=="-s":
bCallback = 1
if opt=="-l":
ShowConnections()
if opt=="-c":
hras, rc = Connect(val, bCallback)
if hras != None:
print "hras: 0x%8lx, rc: 0x%04x" % ( hras, rc )
if opt=="-d":
Disconnect(val)
if opt=="-e":
EditEntry(val)
if __name__=='__main__':
main()
|
saurabh6790/test-med-lib
|
refs/heads/master
|
core/doctype/workflow_transition/workflow_transition.py
|
578
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import webnotes
class DocType:
def __init__(self, d, dl):
self.doc, self.doclist = d, dl
|
KadVenku/google-diff-match-patch
|
refs/heads/master
|
python2/diff_match_patch_test.py
|
319
|
#!/usr/bin/python2.4
"""Test harness for diff_match_patch.py
Copyright 2006 Google Inc.
http://code.google.com/p/google-diff-match-patch/
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
import time
import unittest
import diff_match_patch as dmp_module
# Force a module reload. Allows one to edit the DMP module and rerun the tests
# without leaving the Python interpreter.
reload(dmp_module)
class DiffMatchPatchTest(unittest.TestCase):
def setUp(self):
"Test harness for dmp_module."
self.dmp = dmp_module.diff_match_patch()
def diff_rebuildtexts(self, diffs):
# Construct the two texts which made up the diff originally.
text1 = ""
text2 = ""
for x in range(0, len(diffs)):
if diffs[x][0] != dmp_module.diff_match_patch.DIFF_INSERT:
text1 += diffs[x][1]
if diffs[x][0] != dmp_module.diff_match_patch.DIFF_DELETE:
text2 += diffs[x][1]
return (text1, text2)
class DiffTest(DiffMatchPatchTest):
"""DIFF TEST FUNCTIONS"""
def testDiffCommonPrefix(self):
# Detect any common prefix.
# Null case.
self.assertEquals(0, self.dmp.diff_commonPrefix("abc", "xyz"))
# Non-null case.
self.assertEquals(4, self.dmp.diff_commonPrefix("1234abcdef", "1234xyz"))
# Whole case.
self.assertEquals(4, self.dmp.diff_commonPrefix("1234", "1234xyz"))
def testDiffCommonSuffix(self):
# Detect any common suffix.
# Null case.
self.assertEquals(0, self.dmp.diff_commonSuffix("abc", "xyz"))
# Non-null case.
self.assertEquals(4, self.dmp.diff_commonSuffix("abcdef1234", "xyz1234"))
# Whole case.
self.assertEquals(4, self.dmp.diff_commonSuffix("1234", "xyz1234"))
def testDiffCommonOverlap(self):
# Null case.
self.assertEquals(0, self.dmp.diff_commonOverlap("", "abcd"))
# Whole case.
self.assertEquals(3, self.dmp.diff_commonOverlap("abc", "abcd"))
# No overlap.
self.assertEquals(0, self.dmp.diff_commonOverlap("123456", "abcd"))
# Overlap.
self.assertEquals(3, self.dmp.diff_commonOverlap("123456xxx", "xxxabcd"))
# Unicode.
# Some overly clever languages (C#) may treat ligatures as equal to their
# component letters. E.g. U+FB01 == 'fi'
self.assertEquals(0, self.dmp.diff_commonOverlap("fi", u"\ufb01i"))
def testDiffHalfMatch(self):
# Detect a halfmatch.
self.dmp.Diff_Timeout = 1
# No match.
self.assertEquals(None, self.dmp.diff_halfMatch("1234567890", "abcdef"))
self.assertEquals(None, self.dmp.diff_halfMatch("12345", "23"))
# Single Match.
self.assertEquals(("12", "90", "a", "z", "345678"), self.dmp.diff_halfMatch("1234567890", "a345678z"))
self.assertEquals(("a", "z", "12", "90", "345678"), self.dmp.diff_halfMatch("a345678z", "1234567890"))
self.assertEquals(("abc", "z", "1234", "0", "56789"), self.dmp.diff_halfMatch("abc56789z", "1234567890"))
self.assertEquals(("a", "xyz", "1", "7890", "23456"), self.dmp.diff_halfMatch("a23456xyz", "1234567890"))
# Multiple Matches.
self.assertEquals(("12123", "123121", "a", "z", "1234123451234"), self.dmp.diff_halfMatch("121231234123451234123121", "a1234123451234z"))
self.assertEquals(("", "-=-=-=-=-=", "x", "", "x-=-=-=-=-=-=-="), self.dmp.diff_halfMatch("x-=-=-=-=-=-=-=-=-=-=-=-=", "xx-=-=-=-=-=-=-="))
self.assertEquals(("-=-=-=-=-=", "", "", "y", "-=-=-=-=-=-=-=y"), self.dmp.diff_halfMatch("-=-=-=-=-=-=-=-=-=-=-=-=y", "-=-=-=-=-=-=-=yy"))
# Non-optimal halfmatch.
# Optimal diff would be -q+x=H-i+e=lloHe+Hu=llo-Hew+y not -qHillo+x=HelloHe-w+Hulloy
self.assertEquals(("qHillo", "w", "x", "Hulloy", "HelloHe"), self.dmp.diff_halfMatch("qHilloHelloHew", "xHelloHeHulloy"))
# Optimal no halfmatch.
self.dmp.Diff_Timeout = 0
self.assertEquals(None, self.dmp.diff_halfMatch("qHilloHelloHew", "xHelloHeHulloy"))
def testDiffLinesToChars(self):
# Convert lines down to characters.
self.assertEquals(("\x01\x02\x01", "\x02\x01\x02", ["", "alpha\n", "beta\n"]), self.dmp.diff_linesToChars("alpha\nbeta\nalpha\n", "beta\nalpha\nbeta\n"))
self.assertEquals(("", "\x01\x02\x03\x03", ["", "alpha\r\n", "beta\r\n", "\r\n"]), self.dmp.diff_linesToChars("", "alpha\r\nbeta\r\n\r\n\r\n"))
self.assertEquals(("\x01", "\x02", ["", "a", "b"]), self.dmp.diff_linesToChars("a", "b"))
# More than 256 to reveal any 8-bit limitations.
n = 300
lineList = []
charList = []
for x in range(1, n + 1):
lineList.append(str(x) + "\n")
charList.append(unichr(x))
self.assertEquals(n, len(lineList))
lines = "".join(lineList)
chars = "".join(charList)
self.assertEquals(n, len(chars))
lineList.insert(0, "")
self.assertEquals((chars, "", lineList), self.dmp.diff_linesToChars(lines, ""))
def testDiffCharsToLines(self):
# Convert chars up to lines.
diffs = [(self.dmp.DIFF_EQUAL, "\x01\x02\x01"), (self.dmp.DIFF_INSERT, "\x02\x01\x02")]
self.dmp.diff_charsToLines(diffs, ["", "alpha\n", "beta\n"])
self.assertEquals([(self.dmp.DIFF_EQUAL, "alpha\nbeta\nalpha\n"), (self.dmp.DIFF_INSERT, "beta\nalpha\nbeta\n")], diffs)
# More than 256 to reveal any 8-bit limitations.
n = 300
lineList = []
charList = []
for x in range(1, n + 1):
lineList.append(str(x) + "\n")
charList.append(unichr(x))
self.assertEquals(n, len(lineList))
lines = "".join(lineList)
chars = "".join(charList)
self.assertEquals(n, len(chars))
lineList.insert(0, "")
diffs = [(self.dmp.DIFF_DELETE, chars)]
self.dmp.diff_charsToLines(diffs, lineList)
self.assertEquals([(self.dmp.DIFF_DELETE, lines)], diffs)
def testDiffCleanupMerge(self):
# Cleanup a messy diff.
# Null case.
diffs = []
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([], diffs)
# No change case.
diffs = [(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "b"), (self.dmp.DIFF_INSERT, "c")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "b"), (self.dmp.DIFF_INSERT, "c")], diffs)
# Merge equalities.
diffs = [(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_EQUAL, "b"), (self.dmp.DIFF_EQUAL, "c")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "abc")], diffs)
# Merge deletions.
diffs = [(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_DELETE, "b"), (self.dmp.DIFF_DELETE, "c")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abc")], diffs)
# Merge insertions.
diffs = [(self.dmp.DIFF_INSERT, "a"), (self.dmp.DIFF_INSERT, "b"), (self.dmp.DIFF_INSERT, "c")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_INSERT, "abc")], diffs)
# Merge interweave.
diffs = [(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_INSERT, "b"), (self.dmp.DIFF_DELETE, "c"), (self.dmp.DIFF_INSERT, "d"), (self.dmp.DIFF_EQUAL, "e"), (self.dmp.DIFF_EQUAL, "f")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "ac"), (self.dmp.DIFF_INSERT, "bd"), (self.dmp.DIFF_EQUAL, "ef")], diffs)
# Prefix and suffix detection.
diffs = [(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_INSERT, "abc"), (self.dmp.DIFF_DELETE, "dc")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "d"), (self.dmp.DIFF_INSERT, "b"), (self.dmp.DIFF_EQUAL, "c")], diffs)
# Prefix and suffix detection with equalities.
diffs = [(self.dmp.DIFF_EQUAL, "x"), (self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_INSERT, "abc"), (self.dmp.DIFF_DELETE, "dc"), (self.dmp.DIFF_EQUAL, "y")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "xa"), (self.dmp.DIFF_DELETE, "d"), (self.dmp.DIFF_INSERT, "b"), (self.dmp.DIFF_EQUAL, "cy")], diffs)
# Slide edit left.
diffs = [(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_INSERT, "ba"), (self.dmp.DIFF_EQUAL, "c")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_INSERT, "ab"), (self.dmp.DIFF_EQUAL, "ac")], diffs)
# Slide edit right.
diffs = [(self.dmp.DIFF_EQUAL, "c"), (self.dmp.DIFF_INSERT, "ab"), (self.dmp.DIFF_EQUAL, "a")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "ca"), (self.dmp.DIFF_INSERT, "ba")], diffs)
# Slide edit left recursive.
diffs = [(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "b"), (self.dmp.DIFF_EQUAL, "c"), (self.dmp.DIFF_DELETE, "ac"), (self.dmp.DIFF_EQUAL, "x")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_EQUAL, "acx")], diffs)
# Slide edit right recursive.
diffs = [(self.dmp.DIFF_EQUAL, "x"), (self.dmp.DIFF_DELETE, "ca"), (self.dmp.DIFF_EQUAL, "c"), (self.dmp.DIFF_DELETE, "b"), (self.dmp.DIFF_EQUAL, "a")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "xca"), (self.dmp.DIFF_DELETE, "cba")], diffs)
def testDiffCleanupSemanticLossless(self):
# Slide diffs to match logical boundaries.
# Null case.
diffs = []
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEquals([], diffs)
# Blank lines.
diffs = [(self.dmp.DIFF_EQUAL, "AAA\r\n\r\nBBB"), (self.dmp.DIFF_INSERT, "\r\nDDD\r\n\r\nBBB"), (self.dmp.DIFF_EQUAL, "\r\nEEE")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "AAA\r\n\r\n"), (self.dmp.DIFF_INSERT, "BBB\r\nDDD\r\n\r\n"), (self.dmp.DIFF_EQUAL, "BBB\r\nEEE")], diffs)
# Line boundaries.
diffs = [(self.dmp.DIFF_EQUAL, "AAA\r\nBBB"), (self.dmp.DIFF_INSERT, " DDD\r\nBBB"), (self.dmp.DIFF_EQUAL, " EEE")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "AAA\r\n"), (self.dmp.DIFF_INSERT, "BBB DDD\r\n"), (self.dmp.DIFF_EQUAL, "BBB EEE")], diffs)
# Word boundaries.
diffs = [(self.dmp.DIFF_EQUAL, "The c"), (self.dmp.DIFF_INSERT, "ow and the c"), (self.dmp.DIFF_EQUAL, "at.")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "The "), (self.dmp.DIFF_INSERT, "cow and the "), (self.dmp.DIFF_EQUAL, "cat.")], diffs)
# Alphanumeric boundaries.
diffs = [(self.dmp.DIFF_EQUAL, "The-c"), (self.dmp.DIFF_INSERT, "ow-and-the-c"), (self.dmp.DIFF_EQUAL, "at.")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "The-"), (self.dmp.DIFF_INSERT, "cow-and-the-"), (self.dmp.DIFF_EQUAL, "cat.")], diffs)
# Hitting the start.
diffs = [(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_EQUAL, "ax")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_EQUAL, "aax")], diffs)
# Hitting the end.
diffs = [(self.dmp.DIFF_EQUAL, "xa"), (self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_EQUAL, "a")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "xaa"), (self.dmp.DIFF_DELETE, "a")], diffs)
# Sentence boundaries.
diffs = [(self.dmp.DIFF_EQUAL, "The xxx. The "), (self.dmp.DIFF_INSERT, "zzz. The "), (self.dmp.DIFF_EQUAL, "yyy.")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "The xxx."), (self.dmp.DIFF_INSERT, " The zzz."), (self.dmp.DIFF_EQUAL, " The yyy.")], diffs)
def testDiffCleanupSemantic(self):
# Cleanup semantically trivial equalities.
# Null case.
diffs = []
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([], diffs)
# No elimination #1.
diffs = [(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "cd"), (self.dmp.DIFF_EQUAL, "12"), (self.dmp.DIFF_DELETE, "e")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "cd"), (self.dmp.DIFF_EQUAL, "12"), (self.dmp.DIFF_DELETE, "e")], diffs)
# No elimination #2.
diffs = [(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_INSERT, "ABC"), (self.dmp.DIFF_EQUAL, "1234"), (self.dmp.DIFF_DELETE, "wxyz")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_INSERT, "ABC"), (self.dmp.DIFF_EQUAL, "1234"), (self.dmp.DIFF_DELETE, "wxyz")], diffs)
# Simple elimination.
diffs = [(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_EQUAL, "b"), (self.dmp.DIFF_DELETE, "c")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_INSERT, "b")], diffs)
# Backpass elimination.
diffs = [(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_EQUAL, "cd"), (self.dmp.DIFF_DELETE, "e"), (self.dmp.DIFF_EQUAL, "f"), (self.dmp.DIFF_INSERT, "g")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abcdef"), (self.dmp.DIFF_INSERT, "cdfg")], diffs)
# Multiple eliminations.
diffs = [(self.dmp.DIFF_INSERT, "1"), (self.dmp.DIFF_EQUAL, "A"), (self.dmp.DIFF_DELETE, "B"), (self.dmp.DIFF_INSERT, "2"), (self.dmp.DIFF_EQUAL, "_"), (self.dmp.DIFF_INSERT, "1"), (self.dmp.DIFF_EQUAL, "A"), (self.dmp.DIFF_DELETE, "B"), (self.dmp.DIFF_INSERT, "2")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "AB_AB"), (self.dmp.DIFF_INSERT, "1A2_1A2")], diffs)
# Word boundaries.
diffs = [(self.dmp.DIFF_EQUAL, "The c"), (self.dmp.DIFF_DELETE, "ow and the c"), (self.dmp.DIFF_EQUAL, "at.")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "The "), (self.dmp.DIFF_DELETE, "cow and the "), (self.dmp.DIFF_EQUAL, "cat.")], diffs)
# No overlap elimination.
diffs = [(self.dmp.DIFF_DELETE, "abcxx"), (self.dmp.DIFF_INSERT, "xxdef")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abcxx"), (self.dmp.DIFF_INSERT, "xxdef")], diffs)
# Overlap elimination.
diffs = [(self.dmp.DIFF_DELETE, "abcxxx"), (self.dmp.DIFF_INSERT, "xxxdef")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_EQUAL, "xxx"), (self.dmp.DIFF_INSERT, "def")], diffs)
# Reverse overlap elimination.
diffs = [(self.dmp.DIFF_DELETE, "xxxabc"), (self.dmp.DIFF_INSERT, "defxxx")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_INSERT, "def"), (self.dmp.DIFF_EQUAL, "xxx"), (self.dmp.DIFF_DELETE, "abc")], diffs)
# Two overlap eliminations.
diffs = [(self.dmp.DIFF_DELETE, "abcd1212"), (self.dmp.DIFF_INSERT, "1212efghi"), (self.dmp.DIFF_EQUAL, "----"), (self.dmp.DIFF_DELETE, "A3"), (self.dmp.DIFF_INSERT, "3BC")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abcd"), (self.dmp.DIFF_EQUAL, "1212"), (self.dmp.DIFF_INSERT, "efghi"), (self.dmp.DIFF_EQUAL, "----"), (self.dmp.DIFF_DELETE, "A"), (self.dmp.DIFF_EQUAL, "3"), (self.dmp.DIFF_INSERT, "BC")], diffs)
def testDiffCleanupEfficiency(self):
# Cleanup operationally trivial equalities.
self.dmp.Diff_EditCost = 4
# Null case.
diffs = []
self.dmp.diff_cleanupEfficiency(diffs)
self.assertEquals([], diffs)
# No elimination.
diffs = [(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "12"), (self.dmp.DIFF_EQUAL, "wxyz"), (self.dmp.DIFF_DELETE, "cd"), (self.dmp.DIFF_INSERT, "34")]
self.dmp.diff_cleanupEfficiency(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "12"), (self.dmp.DIFF_EQUAL, "wxyz"), (self.dmp.DIFF_DELETE, "cd"), (self.dmp.DIFF_INSERT, "34")], diffs)
# Four-edit elimination.
diffs = [(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "12"), (self.dmp.DIFF_EQUAL, "xyz"), (self.dmp.DIFF_DELETE, "cd"), (self.dmp.DIFF_INSERT, "34")]
self.dmp.diff_cleanupEfficiency(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abxyzcd"), (self.dmp.DIFF_INSERT, "12xyz34")], diffs)
# Three-edit elimination.
diffs = [(self.dmp.DIFF_INSERT, "12"), (self.dmp.DIFF_EQUAL, "x"), (self.dmp.DIFF_DELETE, "cd"), (self.dmp.DIFF_INSERT, "34")]
self.dmp.diff_cleanupEfficiency(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "xcd"), (self.dmp.DIFF_INSERT, "12x34")], diffs)
# Backpass elimination.
diffs = [(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "12"), (self.dmp.DIFF_EQUAL, "xy"), (self.dmp.DIFF_INSERT, "34"), (self.dmp.DIFF_EQUAL, "z"), (self.dmp.DIFF_DELETE, "cd"), (self.dmp.DIFF_INSERT, "56")]
self.dmp.diff_cleanupEfficiency(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abxyzcd"), (self.dmp.DIFF_INSERT, "12xy34z56")], diffs)
# High cost elimination.
self.dmp.Diff_EditCost = 5
diffs = [(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "12"), (self.dmp.DIFF_EQUAL, "wxyz"), (self.dmp.DIFF_DELETE, "cd"), (self.dmp.DIFF_INSERT, "34")]
self.dmp.diff_cleanupEfficiency(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abwxyzcd"), (self.dmp.DIFF_INSERT, "12wxyz34")], diffs)
self.dmp.Diff_EditCost = 4
def testDiffPrettyHtml(self):
# Pretty print.
diffs = [(self.dmp.DIFF_EQUAL, "a\n"), (self.dmp.DIFF_DELETE, "<B>b</B>"), (self.dmp.DIFF_INSERT, "c&d")]
self.assertEquals("<span>a¶<br></span><del style=\"background:#ffe6e6;\"><B>b</B></del><ins style=\"background:#e6ffe6;\">c&d</ins>", self.dmp.diff_prettyHtml(diffs))
def testDiffText(self):
# Compute the source and destination texts.
diffs = [(self.dmp.DIFF_EQUAL, "jump"), (self.dmp.DIFF_DELETE, "s"), (self.dmp.DIFF_INSERT, "ed"), (self.dmp.DIFF_EQUAL, " over "), (self.dmp.DIFF_DELETE, "the"), (self.dmp.DIFF_INSERT, "a"), (self.dmp.DIFF_EQUAL, " lazy")]
self.assertEquals("jumps over the lazy", self.dmp.diff_text1(diffs))
self.assertEquals("jumped over a lazy", self.dmp.diff_text2(diffs))
def testDiffDelta(self):
# Convert a diff into delta string.
diffs = [(self.dmp.DIFF_EQUAL, "jump"), (self.dmp.DIFF_DELETE, "s"), (self.dmp.DIFF_INSERT, "ed"), (self.dmp.DIFF_EQUAL, " over "), (self.dmp.DIFF_DELETE, "the"), (self.dmp.DIFF_INSERT, "a"), (self.dmp.DIFF_EQUAL, " lazy"), (self.dmp.DIFF_INSERT, "old dog")]
text1 = self.dmp.diff_text1(diffs)
self.assertEquals("jumps over the lazy", text1)
delta = self.dmp.diff_toDelta(diffs)
self.assertEquals("=4\t-1\t+ed\t=6\t-3\t+a\t=5\t+old dog", delta)
# Convert delta string into a diff.
self.assertEquals(diffs, self.dmp.diff_fromDelta(text1, delta))
# Generates error (19 != 20).
try:
self.dmp.diff_fromDelta(text1 + "x", delta)
self.assertFalse(True)
except ValueError:
# Exception expected.
pass
# Generates error (19 != 18).
try:
self.dmp.diff_fromDelta(text1[1:], delta)
self.assertFalse(True)
except ValueError:
# Exception expected.
pass
# Generates error (%c3%xy invalid Unicode).
try:
self.dmp.diff_fromDelta("", "+%c3xy")
self.assertFalse(True)
except ValueError:
# Exception expected.
pass
# Test deltas with special characters.
diffs = [(self.dmp.DIFF_EQUAL, u"\u0680 \x00 \t %"), (self.dmp.DIFF_DELETE, u"\u0681 \x01 \n ^"), (self.dmp.DIFF_INSERT, u"\u0682 \x02 \\ |")]
text1 = self.dmp.diff_text1(diffs)
self.assertEquals(u"\u0680 \x00 \t %\u0681 \x01 \n ^", text1)
delta = self.dmp.diff_toDelta(diffs)
self.assertEquals("=7\t-7\t+%DA%82 %02 %5C %7C", delta)
# Convert delta string into a diff.
self.assertEquals(diffs, self.dmp.diff_fromDelta(text1, delta))
# Verify pool of unchanged characters.
diffs = [(self.dmp.DIFF_INSERT, "A-Z a-z 0-9 - _ . ! ~ * ' ( ) ; / ? : @ & = + $ , # ")]
text2 = self.dmp.diff_text2(diffs)
self.assertEquals("A-Z a-z 0-9 - _ . ! ~ * \' ( ) ; / ? : @ & = + $ , # ", text2)
delta = self.dmp.diff_toDelta(diffs)
self.assertEquals("+A-Z a-z 0-9 - _ . ! ~ * \' ( ) ; / ? : @ & = + $ , # ", delta)
# Convert delta string into a diff.
self.assertEquals(diffs, self.dmp.diff_fromDelta("", delta))
def testDiffXIndex(self):
# Translate a location in text1 to text2.
self.assertEquals(5, self.dmp.diff_xIndex([(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_INSERT, "1234"), (self.dmp.DIFF_EQUAL, "xyz")], 2))
# Translation on deletion.
self.assertEquals(1, self.dmp.diff_xIndex([(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "1234"), (self.dmp.DIFF_EQUAL, "xyz")], 3))
def testDiffLevenshtein(self):
# Levenshtein with trailing equality.
self.assertEquals(4, self.dmp.diff_levenshtein([(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_INSERT, "1234"), (self.dmp.DIFF_EQUAL, "xyz")]))
# Levenshtein with leading equality.
self.assertEquals(4, self.dmp.diff_levenshtein([(self.dmp.DIFF_EQUAL, "xyz"), (self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_INSERT, "1234")]))
# Levenshtein with middle equality.
self.assertEquals(7, self.dmp.diff_levenshtein([(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_EQUAL, "xyz"), (self.dmp.DIFF_INSERT, "1234")]))
def testDiffBisect(self):
# Normal.
a = "cat"
b = "map"
# Since the resulting diff hasn't been normalized, it would be ok if
# the insertion and deletion pairs are swapped.
# If the order changes, tweak this test as required.
self.assertEquals([(self.dmp.DIFF_DELETE, "c"), (self.dmp.DIFF_INSERT, "m"), (self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "t"), (self.dmp.DIFF_INSERT, "p")], self.dmp.diff_bisect(a, b, sys.maxint))
# Timeout.
self.assertEquals([(self.dmp.DIFF_DELETE, "cat"), (self.dmp.DIFF_INSERT, "map")], self.dmp.diff_bisect(a, b, 0))
def testDiffMain(self):
# Perform a trivial diff.
# Null case.
self.assertEquals([], self.dmp.diff_main("", "", False))
# Equality.
self.assertEquals([(self.dmp.DIFF_EQUAL, "abc")], self.dmp.diff_main("abc", "abc", False))
# Simple insertion.
self.assertEquals([(self.dmp.DIFF_EQUAL, "ab"), (self.dmp.DIFF_INSERT, "123"), (self.dmp.DIFF_EQUAL, "c")], self.dmp.diff_main("abc", "ab123c", False))
# Simple deletion.
self.assertEquals([(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "123"), (self.dmp.DIFF_EQUAL, "bc")], self.dmp.diff_main("a123bc", "abc", False))
# Two insertions.
self.assertEquals([(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_INSERT, "123"), (self.dmp.DIFF_EQUAL, "b"), (self.dmp.DIFF_INSERT, "456"), (self.dmp.DIFF_EQUAL, "c")], self.dmp.diff_main("abc", "a123b456c", False))
# Two deletions.
self.assertEquals([(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "123"), (self.dmp.DIFF_EQUAL, "b"), (self.dmp.DIFF_DELETE, "456"), (self.dmp.DIFF_EQUAL, "c")], self.dmp.diff_main("a123b456c", "abc", False))
# Perform a real diff.
# Switch off the timeout.
self.dmp.Diff_Timeout = 0
# Simple cases.
self.assertEquals([(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_INSERT, "b")], self.dmp.diff_main("a", "b", False))
self.assertEquals([(self.dmp.DIFF_DELETE, "Apple"), (self.dmp.DIFF_INSERT, "Banana"), (self.dmp.DIFF_EQUAL, "s are a"), (self.dmp.DIFF_INSERT, "lso"), (self.dmp.DIFF_EQUAL, " fruit.")], self.dmp.diff_main("Apples are a fruit.", "Bananas are also fruit.", False))
self.assertEquals([(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_INSERT, u"\u0680"), (self.dmp.DIFF_EQUAL, "x"), (self.dmp.DIFF_DELETE, "\t"), (self.dmp.DIFF_INSERT, "\x00")], self.dmp.diff_main("ax\t", u"\u0680x\x00", False))
# Overlaps.
self.assertEquals([(self.dmp.DIFF_DELETE, "1"), (self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "y"), (self.dmp.DIFF_EQUAL, "b"), (self.dmp.DIFF_DELETE, "2"), (self.dmp.DIFF_INSERT, "xab")], self.dmp.diff_main("1ayb2", "abxab", False))
self.assertEquals([(self.dmp.DIFF_INSERT, "xaxcx"), (self.dmp.DIFF_EQUAL, "abc"), (self.dmp.DIFF_DELETE, "y")], self.dmp.diff_main("abcy", "xaxcxabc", False))
self.assertEquals([(self.dmp.DIFF_DELETE, "ABCD"), (self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "="), (self.dmp.DIFF_INSERT, "-"), (self.dmp.DIFF_EQUAL, "bcd"), (self.dmp.DIFF_DELETE, "="), (self.dmp.DIFF_INSERT, "-"), (self.dmp.DIFF_EQUAL, "efghijklmnopqrs"), (self.dmp.DIFF_DELETE, "EFGHIJKLMNOefg")], self.dmp.diff_main("ABCDa=bcd=efghijklmnopqrsEFGHIJKLMNOefg", "a-bcd-efghijklmnopqrs", False))
# Large equality.
self.assertEquals([(self.dmp.DIFF_INSERT, " "), (self.dmp.DIFF_EQUAL,"a"), (self.dmp.DIFF_INSERT,"nd"), (self.dmp.DIFF_EQUAL," [[Pennsylvania]]"), (self.dmp.DIFF_DELETE," and [[New")], self.dmp.diff_main("a [[Pennsylvania]] and [[New", " and [[Pennsylvania]]", False))
# Timeout.
self.dmp.Diff_Timeout = 0.1 # 100ms
a = "`Twas brillig, and the slithy toves\nDid gyre and gimble in the wabe:\nAll mimsy were the borogoves,\nAnd the mome raths outgrabe.\n"
b = "I am the very model of a modern major general,\nI've information vegetable, animal, and mineral,\nI know the kings of England, and I quote the fights historical,\nFrom Marathon to Waterloo, in order categorical.\n"
# Increase the text lengths by 1024 times to ensure a timeout.
for x in range(10):
a = a + a
b = b + b
startTime = time.time()
self.dmp.diff_main(a, b)
endTime = time.time()
# Test that we took at least the timeout period.
self.assertTrue(self.dmp.Diff_Timeout <= endTime - startTime)
# Test that we didn't take forever (be forgiving).
# Theoretically this test could fail very occasionally if the
# OS task swaps or locks up for a second at the wrong moment.
self.assertTrue(self.dmp.Diff_Timeout * 2 > endTime - startTime)
self.dmp.Diff_Timeout = 0
# Test the linemode speedup.
# Must be long to pass the 100 char cutoff.
# Simple line-mode.
a = "1234567890\n" * 13
b = "abcdefghij\n" * 13
self.assertEquals(self.dmp.diff_main(a, b, False), self.dmp.diff_main(a, b, True))
# Single line-mode.
a = "1234567890" * 13
b = "abcdefghij" * 13
self.assertEquals(self.dmp.diff_main(a, b, False), self.dmp.diff_main(a, b, True))
# Overlap line-mode.
a = "1234567890\n" * 13
b = "abcdefghij\n1234567890\n1234567890\n1234567890\nabcdefghij\n1234567890\n1234567890\n1234567890\nabcdefghij\n1234567890\n1234567890\n1234567890\nabcdefghij\n"
texts_linemode = self.diff_rebuildtexts(self.dmp.diff_main(a, b, True))
texts_textmode = self.diff_rebuildtexts(self.dmp.diff_main(a, b, False))
self.assertEquals(texts_textmode, texts_linemode)
# Test null inputs.
try:
self.dmp.diff_main(None, None)
self.assertFalse(True)
except ValueError:
# Exception expected.
pass
class MatchTest(DiffMatchPatchTest):
"""MATCH TEST FUNCTIONS"""
def testMatchAlphabet(self):
# Initialise the bitmasks for Bitap.
self.assertEquals({"a":4, "b":2, "c":1}, self.dmp.match_alphabet("abc"))
self.assertEquals({"a":37, "b":18, "c":8}, self.dmp.match_alphabet("abcaba"))
def testMatchBitap(self):
self.dmp.Match_Distance = 100
self.dmp.Match_Threshold = 0.5
# Exact matches.
self.assertEquals(5, self.dmp.match_bitap("abcdefghijk", "fgh", 5))
self.assertEquals(5, self.dmp.match_bitap("abcdefghijk", "fgh", 0))
# Fuzzy matches.
self.assertEquals(4, self.dmp.match_bitap("abcdefghijk", "efxhi", 0))
self.assertEquals(2, self.dmp.match_bitap("abcdefghijk", "cdefxyhijk", 5))
self.assertEquals(-1, self.dmp.match_bitap("abcdefghijk", "bxy", 1))
# Overflow.
self.assertEquals(2, self.dmp.match_bitap("123456789xx0", "3456789x0", 2))
self.assertEquals(0, self.dmp.match_bitap("abcdef", "xxabc", 4))
self.assertEquals(3, self.dmp.match_bitap("abcdef", "defyy", 4))
self.assertEquals(0, self.dmp.match_bitap("abcdef", "xabcdefy", 0))
# Threshold test.
self.dmp.Match_Threshold = 0.4
self.assertEquals(4, self.dmp.match_bitap("abcdefghijk", "efxyhi", 1))
self.dmp.Match_Threshold = 0.3
self.assertEquals(-1, self.dmp.match_bitap("abcdefghijk", "efxyhi", 1))
self.dmp.Match_Threshold = 0.0
self.assertEquals(1, self.dmp.match_bitap("abcdefghijk", "bcdef", 1))
self.dmp.Match_Threshold = 0.5
# Multiple select.
self.assertEquals(0, self.dmp.match_bitap("abcdexyzabcde", "abccde", 3))
self.assertEquals(8, self.dmp.match_bitap("abcdexyzabcde", "abccde", 5))
# Distance test.
self.dmp.Match_Distance = 10 # Strict location.
self.assertEquals(-1, self.dmp.match_bitap("abcdefghijklmnopqrstuvwxyz", "abcdefg", 24))
self.assertEquals(0, self.dmp.match_bitap("abcdefghijklmnopqrstuvwxyz", "abcdxxefg", 1))
self.dmp.Match_Distance = 1000 # Loose location.
self.assertEquals(0, self.dmp.match_bitap("abcdefghijklmnopqrstuvwxyz", "abcdefg", 24))
def testMatchMain(self):
# Full match.
# Shortcut matches.
self.assertEquals(0, self.dmp.match_main("abcdef", "abcdef", 1000))
self.assertEquals(-1, self.dmp.match_main("", "abcdef", 1))
self.assertEquals(3, self.dmp.match_main("abcdef", "", 3))
self.assertEquals(3, self.dmp.match_main("abcdef", "de", 3))
self.assertEquals(3, self.dmp.match_main("abcdef", "defy", 4))
self.assertEquals(0, self.dmp.match_main("abcdef", "abcdefy", 0))
# Complex match.
self.dmp.Match_Threshold = 0.7
self.assertEquals(4, self.dmp.match_main("I am the very model of a modern major general.", " that berry ", 5))
self.dmp.Match_Threshold = 0.5
# Test null inputs.
try:
self.dmp.match_main(None, None, 0)
self.assertFalse(True)
except ValueError:
# Exception expected.
pass
class PatchTest(DiffMatchPatchTest):
"""PATCH TEST FUNCTIONS"""
def testPatchObj(self):
# Patch Object.
p = dmp_module.patch_obj()
p.start1 = 20
p.start2 = 21
p.length1 = 18
p.length2 = 17
p.diffs = [(self.dmp.DIFF_EQUAL, "jump"), (self.dmp.DIFF_DELETE, "s"), (self.dmp.DIFF_INSERT, "ed"), (self.dmp.DIFF_EQUAL, " over "), (self.dmp.DIFF_DELETE, "the"), (self.dmp.DIFF_INSERT, "a"), (self.dmp.DIFF_EQUAL, "\nlaz")]
strp = str(p)
self.assertEquals("@@ -21,18 +22,17 @@\n jump\n-s\n+ed\n over \n-the\n+a\n %0Alaz\n", strp)
def testPatchFromText(self):
self.assertEquals([], self.dmp.patch_fromText(""))
strp = "@@ -21,18 +22,17 @@\n jump\n-s\n+ed\n over \n-the\n+a\n %0Alaz\n"
self.assertEquals(strp, str(self.dmp.patch_fromText(strp)[0]))
self.assertEquals("@@ -1 +1 @@\n-a\n+b\n", str(self.dmp.patch_fromText("@@ -1 +1 @@\n-a\n+b\n")[0]))
self.assertEquals("@@ -1,3 +0,0 @@\n-abc\n", str(self.dmp.patch_fromText("@@ -1,3 +0,0 @@\n-abc\n")[0]))
self.assertEquals("@@ -0,0 +1,3 @@\n+abc\n", str(self.dmp.patch_fromText("@@ -0,0 +1,3 @@\n+abc\n")[0]))
# Generates error.
try:
self.dmp.patch_fromText("Bad\nPatch\n")
self.assertFalse(True)
except ValueError:
# Exception expected.
pass
def testPatchToText(self):
strp = "@@ -21,18 +22,17 @@\n jump\n-s\n+ed\n over \n-the\n+a\n laz\n"
p = self.dmp.patch_fromText(strp)
self.assertEquals(strp, self.dmp.patch_toText(p))
strp = "@@ -1,9 +1,9 @@\n-f\n+F\n oo+fooba\n@@ -7,9 +7,9 @@\n obar\n-,\n+.\n tes\n"
p = self.dmp.patch_fromText(strp)
self.assertEquals(strp, self.dmp.patch_toText(p))
def testPatchAddContext(self):
self.dmp.Patch_Margin = 4
p = self.dmp.patch_fromText("@@ -21,4 +21,10 @@\n-jump\n+somersault\n")[0]
self.dmp.patch_addContext(p, "The quick brown fox jumps over the lazy dog.")
self.assertEquals("@@ -17,12 +17,18 @@\n fox \n-jump\n+somersault\n s ov\n", str(p))
# Same, but not enough trailing context.
p = self.dmp.patch_fromText("@@ -21,4 +21,10 @@\n-jump\n+somersault\n")[0]
self.dmp.patch_addContext(p, "The quick brown fox jumps.")
self.assertEquals("@@ -17,10 +17,16 @@\n fox \n-jump\n+somersault\n s.\n", str(p))
# Same, but not enough leading context.
p = self.dmp.patch_fromText("@@ -3 +3,2 @@\n-e\n+at\n")[0]
self.dmp.patch_addContext(p, "The quick brown fox jumps.")
self.assertEquals("@@ -1,7 +1,8 @@\n Th\n-e\n+at\n qui\n", str(p))
# Same, but with ambiguity.
p = self.dmp.patch_fromText("@@ -3 +3,2 @@\n-e\n+at\n")[0]
self.dmp.patch_addContext(p, "The quick brown fox jumps. The quick brown fox crashes.")
self.assertEquals("@@ -1,27 +1,28 @@\n Th\n-e\n+at\n quick brown fox jumps. \n", str(p))
def testPatchMake(self):
# Null case.
patches = self.dmp.patch_make("", "")
self.assertEquals("", self.dmp.patch_toText(patches))
text1 = "The quick brown fox jumps over the lazy dog."
text2 = "That quick brown fox jumped over a lazy dog."
# Text2+Text1 inputs.
expectedPatch = "@@ -1,8 +1,7 @@\n Th\n-at\n+e\n qui\n@@ -21,17 +21,18 @@\n jump\n-ed\n+s\n over \n-a\n+the\n laz\n"
# The second patch must be "-21,17 +21,18", not "-22,17 +21,18" due to rolling context.
patches = self.dmp.patch_make(text2, text1)
self.assertEquals(expectedPatch, self.dmp.patch_toText(patches))
# Text1+Text2 inputs.
expectedPatch = "@@ -1,11 +1,12 @@\n Th\n-e\n+at\n quick b\n@@ -22,18 +22,17 @@\n jump\n-s\n+ed\n over \n-the\n+a\n laz\n"
patches = self.dmp.patch_make(text1, text2)
self.assertEquals(expectedPatch, self.dmp.patch_toText(patches))
# Diff input.
diffs = self.dmp.diff_main(text1, text2, False)
patches = self.dmp.patch_make(diffs)
self.assertEquals(expectedPatch, self.dmp.patch_toText(patches))
# Text1+Diff inputs.
patches = self.dmp.patch_make(text1, diffs)
self.assertEquals(expectedPatch, self.dmp.patch_toText(patches))
# Text1+Text2+Diff inputs (deprecated).
patches = self.dmp.patch_make(text1, text2, diffs)
self.assertEquals(expectedPatch, self.dmp.patch_toText(patches))
# Character encoding.
patches = self.dmp.patch_make("`1234567890-=[]\\;',./", "~!@#$%^&*()_+{}|:\"<>?")
self.assertEquals("@@ -1,21 +1,21 @@\n-%601234567890-=%5B%5D%5C;',./\n+~!@#$%25%5E&*()_+%7B%7D%7C:%22%3C%3E?\n", self.dmp.patch_toText(patches))
# Character decoding.
diffs = [(self.dmp.DIFF_DELETE, "`1234567890-=[]\\;',./"), (self.dmp.DIFF_INSERT, "~!@#$%^&*()_+{}|:\"<>?")]
self.assertEquals(diffs, self.dmp.patch_fromText("@@ -1,21 +1,21 @@\n-%601234567890-=%5B%5D%5C;',./\n+~!@#$%25%5E&*()_+%7B%7D%7C:%22%3C%3E?\n")[0].diffs)
# Long string with repeats.
text1 = ""
for x in range(100):
text1 += "abcdef"
text2 = text1 + "123"
expectedPatch = "@@ -573,28 +573,31 @@\n cdefabcdefabcdefabcdefabcdef\n+123\n"
patches = self.dmp.patch_make(text1, text2)
self.assertEquals(expectedPatch, self.dmp.patch_toText(patches))
# Test null inputs.
try:
self.dmp.patch_make(None, None)
self.assertFalse(True)
except ValueError:
# Exception expected.
pass
def testPatchSplitMax(self):
# Assumes that Match_MaxBits is 32.
patches = self.dmp.patch_make("abcdefghijklmnopqrstuvwxyz01234567890", "XabXcdXefXghXijXklXmnXopXqrXstXuvXwxXyzX01X23X45X67X89X0")
self.dmp.patch_splitMax(patches)
self.assertEquals("@@ -1,32 +1,46 @@\n+X\n ab\n+X\n cd\n+X\n ef\n+X\n gh\n+X\n ij\n+X\n kl\n+X\n mn\n+X\n op\n+X\n qr\n+X\n st\n+X\n uv\n+X\n wx\n+X\n yz\n+X\n 012345\n@@ -25,13 +39,18 @@\n zX01\n+X\n 23\n+X\n 45\n+X\n 67\n+X\n 89\n+X\n 0\n", self.dmp.patch_toText(patches))
patches = self.dmp.patch_make("abcdef1234567890123456789012345678901234567890123456789012345678901234567890uvwxyz", "abcdefuvwxyz")
oldToText = self.dmp.patch_toText(patches)
self.dmp.patch_splitMax(patches)
self.assertEquals(oldToText, self.dmp.patch_toText(patches))
patches = self.dmp.patch_make("1234567890123456789012345678901234567890123456789012345678901234567890", "abc")
self.dmp.patch_splitMax(patches)
self.assertEquals("@@ -1,32 +1,4 @@\n-1234567890123456789012345678\n 9012\n@@ -29,32 +1,4 @@\n-9012345678901234567890123456\n 7890\n@@ -57,14 +1,3 @@\n-78901234567890\n+abc\n", self.dmp.patch_toText(patches))
patches = self.dmp.patch_make("abcdefghij , h : 0 , t : 1 abcdefghij , h : 0 , t : 1 abcdefghij , h : 0 , t : 1", "abcdefghij , h : 1 , t : 1 abcdefghij , h : 1 , t : 1 abcdefghij , h : 0 , t : 1")
self.dmp.patch_splitMax(patches)
self.assertEquals("@@ -2,32 +2,32 @@\n bcdefghij , h : \n-0\n+1\n , t : 1 abcdef\n@@ -29,32 +29,32 @@\n bcdefghij , h : \n-0\n+1\n , t : 1 abcdef\n", self.dmp.patch_toText(patches))
def testPatchAddPadding(self):
# Both edges full.
patches = self.dmp.patch_make("", "test")
self.assertEquals("@@ -0,0 +1,4 @@\n+test\n", self.dmp.patch_toText(patches))
self.dmp.patch_addPadding(patches)
self.assertEquals("@@ -1,8 +1,12 @@\n %01%02%03%04\n+test\n %01%02%03%04\n", self.dmp.patch_toText(patches))
# Both edges partial.
patches = self.dmp.patch_make("XY", "XtestY")
self.assertEquals("@@ -1,2 +1,6 @@\n X\n+test\n Y\n", self.dmp.patch_toText(patches))
self.dmp.patch_addPadding(patches)
self.assertEquals("@@ -2,8 +2,12 @@\n %02%03%04X\n+test\n Y%01%02%03\n", self.dmp.patch_toText(patches))
# Both edges none.
patches = self.dmp.patch_make("XXXXYYYY", "XXXXtestYYYY")
self.assertEquals("@@ -1,8 +1,12 @@\n XXXX\n+test\n YYYY\n", self.dmp.patch_toText(patches))
self.dmp.patch_addPadding(patches)
self.assertEquals("@@ -5,8 +5,12 @@\n XXXX\n+test\n YYYY\n", self.dmp.patch_toText(patches))
def testPatchApply(self):
self.dmp.Match_Distance = 1000
self.dmp.Match_Threshold = 0.5
self.dmp.Patch_DeleteThreshold = 0.5
# Null case.
patches = self.dmp.patch_make("", "")
results = self.dmp.patch_apply(patches, "Hello world.")
self.assertEquals(("Hello world.", []), results)
# Exact match.
patches = self.dmp.patch_make("The quick brown fox jumps over the lazy dog.", "That quick brown fox jumped over a lazy dog.")
results = self.dmp.patch_apply(patches, "The quick brown fox jumps over the lazy dog.")
self.assertEquals(("That quick brown fox jumped over a lazy dog.", [True, True]), results)
# Partial match.
results = self.dmp.patch_apply(patches, "The quick red rabbit jumps over the tired tiger.")
self.assertEquals(("That quick red rabbit jumped over a tired tiger.", [True, True]), results)
# Failed match.
results = self.dmp.patch_apply(patches, "I am the very model of a modern major general.")
self.assertEquals(("I am the very model of a modern major general.", [False, False]), results)
# Big delete, small change.
patches = self.dmp.patch_make("x1234567890123456789012345678901234567890123456789012345678901234567890y", "xabcy")
results = self.dmp.patch_apply(patches, "x123456789012345678901234567890-----++++++++++-----123456789012345678901234567890y")
self.assertEquals(("xabcy", [True, True]), results)
# Big delete, big change 1.
patches = self.dmp.patch_make("x1234567890123456789012345678901234567890123456789012345678901234567890y", "xabcy")
results = self.dmp.patch_apply(patches, "x12345678901234567890---------------++++++++++---------------12345678901234567890y")
self.assertEquals(("xabc12345678901234567890---------------++++++++++---------------12345678901234567890y", [False, True]), results)
# Big delete, big change 2.
self.dmp.Patch_DeleteThreshold = 0.6
patches = self.dmp.patch_make("x1234567890123456789012345678901234567890123456789012345678901234567890y", "xabcy")
results = self.dmp.patch_apply(patches, "x12345678901234567890---------------++++++++++---------------12345678901234567890y")
self.assertEquals(("xabcy", [True, True]), results)
self.dmp.Patch_DeleteThreshold = 0.5
# Compensate for failed patch.
self.dmp.Match_Threshold = 0.0
self.dmp.Match_Distance = 0
patches = self.dmp.patch_make("abcdefghijklmnopqrstuvwxyz--------------------1234567890", "abcXXXXXXXXXXdefghijklmnopqrstuvwxyz--------------------1234567YYYYYYYYYY890")
results = self.dmp.patch_apply(patches, "ABCDEFGHIJKLMNOPQRSTUVWXYZ--------------------1234567890")
self.assertEquals(("ABCDEFGHIJKLMNOPQRSTUVWXYZ--------------------1234567YYYYYYYYYY890", [False, True]), results)
self.dmp.Match_Threshold = 0.5
self.dmp.Match_Distance = 1000
# No side effects.
patches = self.dmp.patch_make("", "test")
patchstr = self.dmp.patch_toText(patches)
results = self.dmp.patch_apply(patches, "")
self.assertEquals(patchstr, self.dmp.patch_toText(patches))
# No side effects with major delete.
patches = self.dmp.patch_make("The quick brown fox jumps over the lazy dog.", "Woof")
patchstr = self.dmp.patch_toText(patches)
self.dmp.patch_apply(patches, "The quick brown fox jumps over the lazy dog.")
self.assertEquals(patchstr, self.dmp.patch_toText(patches))
# Edge exact match.
patches = self.dmp.patch_make("", "test")
self.dmp.patch_apply(patches, "")
self.assertEquals(("test", [True]), results)
# Near edge exact match.
patches = self.dmp.patch_make("XY", "XtestY")
results = self.dmp.patch_apply(patches, "XY")
self.assertEquals(("XtestY", [True]), results)
# Edge partial match.
patches = self.dmp.patch_make("y", "y123")
results = self.dmp.patch_apply(patches, "x")
self.assertEquals(("x123", [True]), results)
if __name__ == "__main__":
unittest.main()
|
EvaSDK/sqlalchemy
|
refs/heads/master
|
examples/versioned_history/__init__.py
|
30
|
"""
Illustrates an extension which creates version tables for entities and stores
records for each change. The given extensions generate an anonymous "history" class which
represents historical versions of the target object.
Usage is illustrated via a unit test module ``test_versioning.py``, which can
be run via nose::
cd examples/versioning
nosetests -v
A fragment of example usage, using declarative::
from history_meta import Versioned, versioned_session
Base = declarative_base()
class SomeClass(Versioned, Base):
__tablename__ = 'sometable'
id = Column(Integer, primary_key=True)
name = Column(String(50))
def __eq__(self, other):
assert type(other) is SomeClass and other.id == self.id
Session = sessionmaker(bind=engine)
versioned_session(Session)
sess = Session()
sc = SomeClass(name='sc1')
sess.add(sc)
sess.commit()
sc.name = 'sc1modified'
sess.commit()
assert sc.version == 2
SomeClassHistory = SomeClass.__history_mapper__.class_
assert sess.query(SomeClassHistory).\\
filter(SomeClassHistory.version == 1).\\
all() \\
== [SomeClassHistory(version=1, name='sc1')]
The ``Versioned`` mixin is designed to work with declarative. To use
the extension with classical mappers, the ``_history_mapper`` function
can be applied::
from history_meta import _history_mapper
m = mapper(SomeClass, sometable)
_history_mapper(m)
SomeHistoryClass = SomeClass.__history_mapper__.class_
.. autosource::
"""
|
kjurek/pyTotalActivation
|
refs/heads/master
|
setup.py
|
2
|
import os
from setuptools import setup, find_packages
PACKAGES = find_packages()
# Get version and release info, which is all stored in TotalActivation/version.py
ver_file = os.path.join('TotalActivation', 'version.py')
with open(ver_file) as f:
exec(f.read())
opts = dict(name=NAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
url=URL,
download_url=DOWNLOAD_URL,
license=LICENSE,
classifiers=CLASSIFIERS,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
platforms=PLATFORMS,
version=VERSION,
packages=PACKAGES,
package_data=PACKAGE_DATA,
install_requires=REQUIRES,
requires=REQUIRES)
if __name__ == '__main__':
setup(**opts)
|
dnxbjyj/python-basic
|
refs/heads/master
|
gui/wxpython/wxPython-demo-4.0.1/samples/floatcanvas/GroupDemo.py
|
1
|
#!/usr/bin/env python
"""
A small demo of how to use Groups of Objects
"""
import wx
## import the installed version
from wx.lib.floatcanvas import NavCanvas, FloatCanvas
## import a local version
#import sys
#sys.path.append("..")
#from floatcanvas import NavCanvas, FloatCanvas
class DrawFrame(wx.Frame):
"""
A frame used for the FloatCanvas Demo
"""
def __init__(self, *args, **kwargs):
wx.Frame.__init__(self, *args, **kwargs)
self.CreateStatusBar()
# Add the Canvas
NC = NavCanvas.NavCanvas(self,-1,
size = (500,500),
ProjectionFun = None,
Debug = 0,
BackgroundColor = "DARK SLATE BLUE",
)
Canvas = NC.Canvas
self.Canvas = Canvas
self.Canvas.Bind(FloatCanvas.EVT_MOTION, self.OnMove )
Point = (45,40)
## create a few Objects:
C = FloatCanvas.Circle((0, 0), 10, FillColor="Red")
R = FloatCanvas.Rectangle((5, 5),(15, 8), FillColor="Blue")
E = FloatCanvas.Ellipse((1.5, 1.5), (12, 8), FillColor="Purple")
C2 = FloatCanvas.Circle((0, 5), 10, FillColor="cyan")
T = FloatCanvas.Text("Group A", (5.5, 5.5), Position="cc", Size = 16, Weight=wx.BOLD, Family=wx.SWISS)
self.GroupA = FloatCanvas.Group((R,C,E))
self.GroupA.AddObjects((C2,T))
Canvas.AddObject(self.GroupA)
## create another Groups of objects
R = FloatCanvas.Rectangle((15, 15),(10, 18), FillColor="orange")
E = FloatCanvas.Ellipse((22, 28), (12, 8), FillColor="yellow")
C = FloatCanvas.Circle((25, 20), 15, FillColor="Green")
C2 = FloatCanvas.Circle((12, 22), 10, FillColor="cyan")
T = FloatCanvas.Text("Group B", (19, 24), Position="cc", Size = 16, Weight=wx.BOLD, Family=wx.SWISS)
self.GroupB = FloatCanvas.Group((R,E,C,C2,T))
Canvas.AddObject(self.GroupB)
self.Groups = {"A":self.GroupA, "B":self.GroupB}
# Add a couple of tools to the Canvas Toolbar
tb = NC.ToolBar
# tb.AddSeparator()
for Group in self.Groups.keys():
Button = wx.Button(tb, wx.ID_ANY, "Hide/Show%s"%Group)
tb.AddControl(Button)
print(Group)
Button.Bind(wx.EVT_BUTTON, lambda evt, group=Group: self.HideGroup(evt, group))
tb.Realize()
self.Show()
Canvas.ZoomToBB()
def OnMove(self, event):
"""
Updates the status bar with the world coordinates of the mouse position
"""
self.SetStatusText("%.2f, %.2f"%tuple(event.Coords))
def HideGroup(self, evt, group=""):
G = self.Groups[group]
G.Visible = not G.Visible
self.Canvas.Draw(Force=True)
app = wx.App(False)
F = DrawFrame(None, title="FloatCanvas Demo App", size=(700,700) )
app.MainLoop()
|
yamt/neutron
|
refs/heads/master
|
quantum/common/__init__.py
|
32
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Nicira Networks, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Somik Behera, Nicira Networks, Inc.
|
RogerioBorba/geonode
|
refs/heads/master
|
geonode/people/tests.py
|
30
|
#########################################################################
#
# Copyright (C) 2012 OpenPlans
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django.test import TestCase
from django.contrib.auth import get_user_model
from django.core.urlresolvers import reverse
from django.core import mail
from django.contrib.sites.models import Site
class PeopleTest(TestCase):
fixtures = ('people_data.json', 'bobby.json')
def test_forgot_username(self):
url = reverse('forgot_username')
# page renders
response = self.client.get(url)
self.assertEquals(response.status_code, 200)
# and responds for a bad email
response = self.client.post(url, data={
'email': 'foobar@doesnotexist.com'
})
# self.assertContains(response, "No user could be found with that email address.")
admin = get_user_model().objects.get(username='bobby')
response = self.client.post(url, data={
'email': admin.email
})
# and sends a mail for a good one
self.assertEqual(len(mail.outbox), 1)
site = Site.objects.get_current()
# Verify that the subject of the first message is correct.
self.assertEqual(mail.outbox[0].subject, "Your username for " + site.name)
def test_account_email_sync(self):
'''verify we can create an account and modify it keeping emails in sync'''
from geonode.people.models import Profile
email = 'joe@b.ob'
joebob = Profile.objects.create(username='joebob', email=email)
self.assertEqual(joebob.emailaddress_set.get(primary=True).email, email)
email = 'jo@eb.ob'
joebob.email = email
joebob.save()
self.assertEqual(joebob.emailaddress_set.get(primary=True).email, email)
email = joebob.emailaddress_set.get(primary=True)
email.email = 'j@oe.bob'
email.save()
joebob = Profile.objects.get(id=joebob.id)
self.assertEqual(email.email, joebob.email)
|
Juniper/ceilometer
|
refs/heads/master
|
ceilometer/agent/manager.py
|
1
|
#
# Copyright 2012-2013 eNovance <licensing@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystoneclient.v2_0 import client as ksclient
from oslo_config import cfg
from ceilometer.agent import base
from ceilometer.openstack.common import log
OPTS = [
cfg.StrOpt('partitioning_group_prefix',
default=None,
deprecated_group='central',
help='Work-load partitioning group prefix. Use only if you '
'want to run multiple polling agents with different '
'config files. For each sub-group of the agent '
'pool with the same partitioning_group_prefix a disjoint '
'subset of pollsters should be loaded.'),
]
cfg.CONF.register_opts(OPTS, group='polling')
cfg.CONF.import_group('service_credentials', 'ceilometer.service')
cfg.CONF.import_opt('http_timeout', 'ceilometer.service')
LOG = log.getLogger(__name__)
class AgentManager(base.AgentManager):
def __init__(self, namespaces=None, pollster_list=None):
namespaces = namespaces or ['compute', 'central']
pollster_list = pollster_list or []
super(AgentManager, self).__init__(
namespaces, pollster_list,
group_prefix=cfg.CONF.polling.partitioning_group_prefix)
def interval_task(self, task):
try:
self.keystone = ksclient.Client(
username=cfg.CONF.service_credentials.os_username,
password=cfg.CONF.service_credentials.os_password,
tenant_id=cfg.CONF.service_credentials.os_tenant_id,
tenant_name=cfg.CONF.service_credentials.os_tenant_name,
cacert=cfg.CONF.service_credentials.os_cacert,
auth_url=cfg.CONF.service_credentials.os_auth_url,
region_name=cfg.CONF.service_credentials.os_region_name,
insecure=cfg.CONF.service_credentials.insecure,
timeout=cfg.CONF.http_timeout,)
except Exception as e:
self.keystone = e
super(AgentManager, self).interval_task(task)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.