repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
alirizakeles/zato
|
refs/heads/dsuch-f-gh723-add-exe-agent
|
code/zato-common/test/__init__.py
|
12133432
| |
figarocorso/mss
|
refs/heads/master
|
mss/www/cpserver/management/__init__.py
|
12133432
| |
maryklayne/Funcao
|
refs/heads/master
|
sympy/mpmath/libmp/libmpf.py
|
23
|
"""
Low-level functions for arbitrary-precision floating-point arithmetic.
"""
__docformat__ = 'plaintext'
import math
from bisect import bisect
import sys
# Importing random is slow
#from random import getrandbits
getrandbits = None
from .backend import (MPZ, MPZ_TYPE, MPZ_ZERO, MPZ_ONE, MPZ_TWO, MPZ_FIVE,
BACKEND, STRICT, HASH_MODULUS, HASH_BITS, gmpy, sage, sage_utils)
from .libintmath import (giant_steps,
trailtable, bctable, lshift, rshift, bitcount, trailing,
sqrt_fixed, numeral, isqrt, isqrt_fast, sqrtrem,
bin_to_radix)
# We don't pickle tuples directly for the following reasons:
# 1: pickle uses str() for ints, which is inefficient when they are large
# 2: pickle doesn't work for gmpy mpzs
# Both problems are solved by using hex()
if BACKEND == 'sage':
def to_pickable(x):
sign, man, exp, bc = x
return sign, hex(man), exp, bc
else:
def to_pickable(x):
sign, man, exp, bc = x
return sign, hex(man)[2:], exp, bc
def from_pickable(x):
sign, man, exp, bc = x
return (sign, MPZ(man, 16), exp, bc)
class ComplexResult(ValueError):
pass
try:
intern
except NameError:
intern = lambda x: x
# All supported rounding modes
round_nearest = intern('n')
round_floor = intern('f')
round_ceiling = intern('c')
round_up = intern('u')
round_down = intern('d')
round_fast = round_down
def prec_to_dps(n):
"""Return number of accurate decimals that can be represented
with a precision of n bits."""
return max(1, int(round(int(n)/3.3219280948873626)-1))
def dps_to_prec(n):
"""Return the number of bits required to represent n decimals
accurately."""
return max(1, int(round((int(n)+1)*3.3219280948873626)))
def repr_dps(n):
"""Return the number of decimal digits required to represent
a number with n-bit precision so that it can be uniquely
reconstructed from the representation."""
dps = prec_to_dps(n)
if dps == 15:
return 17
return dps + 3
#----------------------------------------------------------------------------#
# Some commonly needed float values #
#----------------------------------------------------------------------------#
# Regular number format:
# (-1)**sign * mantissa * 2**exponent, plus bitcount of mantissa
fzero = (0, MPZ_ZERO, 0, 0)
fnzero = (1, MPZ_ZERO, 0, 0)
fone = (0, MPZ_ONE, 0, 1)
fnone = (1, MPZ_ONE, 0, 1)
ftwo = (0, MPZ_ONE, 1, 1)
ften = (0, MPZ_FIVE, 1, 3)
fhalf = (0, MPZ_ONE, -1, 1)
# Arbitrary encoding for special numbers: zero mantissa, nonzero exponent
fnan = (0, MPZ_ZERO, -123, -1)
finf = (0, MPZ_ZERO, -456, -2)
fninf = (1, MPZ_ZERO, -789, -3)
# Was 1e1000; this is broken in Python 2.4
math_float_inf = 1e300 * 1e300
#----------------------------------------------------------------------------#
# Rounding #
#----------------------------------------------------------------------------#
# This function can be used to round a mantissa generally. However,
# we will try to do most rounding inline for efficiency.
def round_int(x, n, rnd):
if rnd == round_nearest:
if x >= 0:
t = x >> (n-1)
if t & 1 and ((t & 2) or (x & h_mask[n<300][n])):
return (t>>1)+1
else:
return t>>1
else:
return -round_int(-x, n, rnd)
if rnd == round_floor:
return x >> n
if rnd == round_ceiling:
return -((-x) >> n)
if rnd == round_down:
if x >= 0:
return x >> n
return -((-x) >> n)
if rnd == round_up:
if x >= 0:
return -((-x) >> n)
return x >> n
# These masks are used to pick out segments of numbers to determine
# which direction to round when rounding to nearest.
class h_mask_big:
def __getitem__(self, n):
return (MPZ_ONE<<(n-1))-1
h_mask_small = [0]+[((MPZ_ONE<<(_-1))-1) for _ in range(1, 300)]
h_mask = [h_mask_big(), h_mask_small]
# The >> operator rounds to floor. shifts_down[rnd][sign]
# tells whether this is the right direction to use, or if the
# number should be negated before shifting
shifts_down = {round_floor:(1,0), round_ceiling:(0,1),
round_down:(1,1), round_up:(0,0)}
#----------------------------------------------------------------------------#
# Normalization of raw mpfs #
#----------------------------------------------------------------------------#
# This function is called almost every time an mpf is created.
# It has been optimized accordingly.
def _normalize(sign, man, exp, bc, prec, rnd):
"""
Create a raw mpf tuple with value (-1)**sign * man * 2**exp and
normalized mantissa. The mantissa is rounded in the specified
direction if its size exceeds the precision. Trailing zero bits
are also stripped from the mantissa to ensure that the
representation is canonical.
Conditions on the input:
* The input must represent a regular (finite) number
* The sign bit must be 0 or 1
* The mantissa must be positive
* The exponent must be an integer
* The bitcount must be exact
If these conditions are not met, use from_man_exp, mpf_pos, or any
of the conversion functions to create normalized raw mpf tuples.
"""
if not man:
return fzero
# Cut mantissa down to size if larger than target precision
n = bc - prec
if n > 0:
if rnd == round_nearest:
t = man >> (n-1)
if t & 1 and ((t & 2) or (man & h_mask[n<300][n])):
man = (t>>1)+1
else:
man = t>>1
elif shifts_down[rnd][sign]:
man >>= n
else:
man = -((-man)>>n)
exp += n
bc = prec
# Strip trailing bits
if not man & 1:
t = trailtable[int(man & 255)]
if not t:
while not man & 255:
man >>= 8
exp += 8
bc -= 8
t = trailtable[int(man & 255)]
man >>= t
exp += t
bc -= t
# Bit count can be wrong if the input mantissa was 1 less than
# a power of 2 and got rounded up, thereby adding an extra bit.
# With trailing bits removed, all powers of two have mantissa 1,
# so this is easy to check for.
if man == 1:
bc = 1
return sign, man, exp, bc
def _normalize1(sign, man, exp, bc, prec, rnd):
"""same as normalize, but with the added condition that
man is odd or zero
"""
if not man:
return fzero
if bc <= prec:
return sign, man, exp, bc
n = bc - prec
if rnd == round_nearest:
t = man >> (n-1)
if t & 1 and ((t & 2) or (man & h_mask[n<300][n])):
man = (t>>1)+1
else:
man = t>>1
elif shifts_down[rnd][sign]:
man >>= n
else:
man = -((-man)>>n)
exp += n
bc = prec
# Strip trailing bits
if not man & 1:
t = trailtable[int(man & 255)]
if not t:
while not man & 255:
man >>= 8
exp += 8
bc -= 8
t = trailtable[int(man & 255)]
man >>= t
exp += t
bc -= t
# Bit count can be wrong if the input mantissa was 1 less than
# a power of 2 and got rounded up, thereby adding an extra bit.
# With trailing bits removed, all powers of two have mantissa 1,
# so this is easy to check for.
if man == 1:
bc = 1
return sign, man, exp, bc
try:
_exp_types = (int, long)
except NameError:
_exp_types = (int,)
def strict_normalize(sign, man, exp, bc, prec, rnd):
"""Additional checks on the components of an mpf. Enable tests by setting
the environment variable MPMATH_STRICT to Y."""
assert type(man) == MPZ_TYPE
assert type(bc) in _exp_types
assert type(exp) in _exp_types
assert bc == bitcount(man)
return _normalize(sign, man, exp, bc, prec, rnd)
def strict_normalize1(sign, man, exp, bc, prec, rnd):
"""Additional checks on the components of an mpf. Enable tests by setting
the environment variable MPMATH_STRICT to Y."""
assert type(man) == MPZ_TYPE
assert type(bc) in _exp_types
assert type(exp) in _exp_types
assert bc == bitcount(man)
assert (not man) or (man & 1)
return _normalize1(sign, man, exp, bc, prec, rnd)
if BACKEND == 'gmpy' and '_mpmath_normalize' in dir(gmpy):
_normalize = gmpy._mpmath_normalize
_normalize1 = gmpy._mpmath_normalize
if BACKEND == 'sage':
_normalize = _normalize1 = sage_utils.normalize
if STRICT:
normalize = strict_normalize
normalize1 = strict_normalize1
else:
normalize = _normalize
normalize1 = _normalize1
#----------------------------------------------------------------------------#
# Conversion functions #
#----------------------------------------------------------------------------#
def from_man_exp(man, exp, prec=None, rnd=round_fast):
"""Create raw mpf from (man, exp) pair. The mantissa may be signed.
If no precision is specified, the mantissa is stored exactly."""
man = MPZ(man)
sign = 0
if man < 0:
sign = 1
man = -man
if man < 1024:
bc = bctable[int(man)]
else:
bc = bitcount(man)
if not prec:
if not man:
return fzero
if not man & 1:
if man & 2:
return (sign, man >> 1, exp + 1, bc - 1)
t = trailtable[int(man & 255)]
if not t:
while not man & 255:
man >>= 8
exp += 8
bc -= 8
t = trailtable[int(man & 255)]
man >>= t
exp += t
bc -= t
return (sign, man, exp, bc)
return normalize(sign, man, exp, bc, prec, rnd)
int_cache = dict((n, from_man_exp(n, 0)) for n in range(-10, 257))
if BACKEND == 'gmpy' and '_mpmath_create' in dir(gmpy):
from_man_exp = gmpy._mpmath_create
if BACKEND == 'sage':
from_man_exp = sage_utils.from_man_exp
def from_int(n, prec=0, rnd=round_fast):
"""Create a raw mpf from an integer. If no precision is specified,
the mantissa is stored exactly."""
if not prec:
if n in int_cache:
return int_cache[n]
return from_man_exp(n, 0, prec, rnd)
def to_man_exp(s):
"""Return (man, exp) of a raw mpf. Raise an error if inf/nan."""
sign, man, exp, bc = s
if (not man) and exp:
raise ValueError("mantissa and exponent are undefined for %s" % man)
return man, exp
def to_int(s, rnd=None):
"""Convert a raw mpf to the nearest int. Rounding is done down by
default (same as int(float) in Python), but can be changed. If the
input is inf/nan, an exception is raised."""
sign, man, exp, bc = s
if (not man) and exp:
raise ValueError("cannot convert %s to int" % man)
if exp >= 0:
if sign:
return (-man) << exp
return man << exp
# Make default rounding fast
if not rnd:
if sign:
return -(man >> (-exp))
else:
return man >> (-exp)
if sign:
return round_int(-man, -exp, rnd)
else:
return round_int(man, -exp, rnd)
def mpf_round_int(s, rnd):
sign, man, exp, bc = s
if (not man) and exp:
return s
if exp >= 0:
return s
mag = exp+bc
if mag < 1:
if rnd == round_ceiling:
if sign: return fzero
else: return fone
elif rnd == round_floor:
if sign: return fnone
else: return fzero
elif rnd == round_nearest:
if mag < 0 or man == MPZ_ONE: return fzero
elif sign: return fnone
else: return fone
else:
raise NotImplementedError
return mpf_pos(s, min(bc, mag), rnd)
def mpf_floor(s, prec=0, rnd=round_fast):
v = mpf_round_int(s, round_floor)
if prec:
v = mpf_pos(v, prec, rnd)
return v
def mpf_ceil(s, prec=0, rnd=round_fast):
v = mpf_round_int(s, round_ceiling)
if prec:
v = mpf_pos(v, prec, rnd)
return v
def mpf_nint(s, prec=0, rnd=round_fast):
v = mpf_round_int(s, round_nearest)
if prec:
v = mpf_pos(v, prec, rnd)
return v
def mpf_frac(s, prec=0, rnd=round_fast):
return mpf_sub(s, mpf_floor(s), prec, rnd)
def from_float(x, prec=53, rnd=round_fast):
"""Create a raw mpf from a Python float, rounding if necessary.
If prec >= 53, the result is guaranteed to represent exactly the
same number as the input. If prec is not specified, use prec=53."""
# frexp only raises an exception for nan on some platforms
if x != x:
return fnan
# in Python2.5 math.frexp gives an exception for float infinity
# in Python2.6 it returns (float infinity, 0)
try:
m, e = math.frexp(x)
except:
if x == math_float_inf: return finf
if x == -math_float_inf: return fninf
return fnan
if x == math_float_inf: return finf
if x == -math_float_inf: return fninf
return from_man_exp(int(m*(1<<53)), e-53, prec, rnd)
def to_float(s, strict=False):
"""
Convert a raw mpf to a Python float. The result is exact if the
bitcount of s is <= 53 and no underflow/overflow occurs.
If the number is too large or too small to represent as a regular
float, it will be converted to inf or 0.0. Setting strict=True
forces an OverflowError to be raised instead.
"""
sign, man, exp, bc = s
if not man:
if s == fzero: return 0.0
if s == finf: return math_float_inf
if s == fninf: return -math_float_inf
return math_float_inf/math_float_inf
if sign:
man = -man
try:
if bc < 100:
return math.ldexp(man, exp)
# Try resizing the mantissa. Overflow may still happen here.
n = bc - 53
m = man >> n
return math.ldexp(m, exp + n)
except OverflowError:
if strict:
raise
# Overflow to infinity
if exp + bc > 0:
if sign:
return -math_float_inf
else:
return math_float_inf
# Underflow to zero
return 0.0
def from_rational(p, q, prec, rnd=round_fast):
"""Create a raw mpf from a rational number p/q, round if
necessary."""
return mpf_div(from_int(p), from_int(q), prec, rnd)
def to_rational(s):
"""Convert a raw mpf to a rational number. Return integers (p, q)
such that s = p/q exactly."""
sign, man, exp, bc = s
if sign:
man = -man
if bc == -1:
raise ValueError("cannot convert %s to a rational number" % man)
if exp >= 0:
return man * (1<<exp), 1
else:
return man, 1<<(-exp)
def to_fixed(s, prec):
"""Convert a raw mpf to a fixed-point big integer"""
sign, man, exp, bc = s
offset = exp + prec
if sign:
if offset >= 0: return (-man) << offset
else: return (-man) >> (-offset)
else:
if offset >= 0: return man << offset
else: return man >> (-offset)
##############################################################################
##############################################################################
#----------------------------------------------------------------------------#
# Arithmetic operations, etc. #
#----------------------------------------------------------------------------#
def mpf_rand(prec):
"""Return a raw mpf chosen randomly from [0, 1), with prec bits
in the mantissa."""
global getrandbits
if not getrandbits:
import random
getrandbits = random.getrandbits
return from_man_exp(getrandbits(prec), -prec, prec, round_floor)
def mpf_eq(s, t):
"""Test equality of two raw mpfs. This is simply tuple comparison
unless either number is nan, in which case the result is False."""
if not s[1] or not t[1]:
if s == fnan or t == fnan:
return False
return s == t
def mpf_hash(s):
# Duplicate the new hash algorithm introduces in Python 3.2.
if sys.version >= "3.2":
ssign, sman, sexp, sbc = s
# Handle special numbers
if not sman:
if s == fnan: return sys.hash_info.nan
if s == finf: return sys.hash_info.inf
if s == fninf: return -sys.hash_info.inf
h = sman % HASH_MODULUS
if sexp >= 0:
sexp = sexp % HASH_BITS
else:
sexp = HASH_BITS - 1 - ((-1 - sexp) % HASH_BITS)
h = (h << sexp) % HASH_MODULUS
if ssign: h = -h
if h == -1: h == -2
return int(h)
else:
try:
# Try to be compatible with hash values for floats and ints
return hash(to_float(s, strict=1))
except OverflowError:
# We must unfortunately sacrifice compatibility with ints here.
# We could do hash(man << exp) when the exponent is positive, but
# this would cause unreasonable inefficiency for large numbers.
return hash(s)
def mpf_cmp(s, t):
"""Compare the raw mpfs s and t. Return -1 if s < t, 0 if s == t,
and 1 if s > t. (Same convention as Python's cmp() function.)"""
# In principle, a comparison amounts to determining the sign of s-t.
# A full subtraction is relatively slow, however, so we first try to
# look at the components.
ssign, sman, sexp, sbc = s
tsign, tman, texp, tbc = t
# Handle zeros and special numbers
if not sman or not tman:
if s == fzero: return -mpf_sign(t)
if t == fzero: return mpf_sign(s)
if s == t: return 0
# Follow same convention as Python's cmp for float nan
if t == fnan: return 1
if s == finf: return 1
if t == fninf: return 1
return -1
# Different sides of zero
if ssign != tsign:
if not ssign: return 1
return -1
# This reduces to direct integer comparison
if sexp == texp:
if sman == tman:
return 0
if sman > tman:
if ssign: return -1
else: return 1
else:
if ssign: return 1
else: return -1
# Check position of the highest set bit in each number. If
# different, there is certainly an inequality.
a = sbc + sexp
b = tbc + texp
if ssign:
if a < b: return 1
if a > b: return -1
else:
if a < b: return -1
if a > b: return 1
# Both numbers have the same highest bit. Subtract to find
# how the lower bits compare.
delta = mpf_sub(s, t, 5, round_floor)
if delta[0]:
return -1
return 1
def mpf_lt(s, t):
if s == fnan or t == fnan:
return False
return mpf_cmp(s, t) < 0
def mpf_le(s, t):
if s == fnan or t == fnan:
return False
return mpf_cmp(s, t) <= 0
def mpf_gt(s, t):
if s == fnan or t == fnan:
return False
return mpf_cmp(s, t) > 0
def mpf_ge(s, t):
if s == fnan or t == fnan:
return False
return mpf_cmp(s, t) >= 0
def mpf_min_max(seq):
min = max = seq[0]
for x in seq[1:]:
if mpf_lt(x, min): min = x
if mpf_gt(x, max): max = x
return min, max
def mpf_pos(s, prec=0, rnd=round_fast):
"""Calculate 0+s for a raw mpf (i.e., just round s to the specified
precision)."""
if prec:
sign, man, exp, bc = s
if (not man) and exp:
return s
return normalize1(sign, man, exp, bc, prec, rnd)
return s
def mpf_neg(s, prec=None, rnd=round_fast):
"""Negate a raw mpf (return -s), rounding the result to the
specified precision. The prec argument can be omitted to do the
operation exactly."""
sign, man, exp, bc = s
if not man:
if exp:
if s == finf: return fninf
if s == fninf: return finf
return s
if not prec:
return (1-sign, man, exp, bc)
return normalize1(1-sign, man, exp, bc, prec, rnd)
def mpf_abs(s, prec=None, rnd=round_fast):
"""Return abs(s) of the raw mpf s, rounded to the specified
precision. The prec argument can be omitted to generate an
exact result."""
sign, man, exp, bc = s
if (not man) and exp:
if s == fninf:
return finf
return s
if not prec:
if sign:
return (0, man, exp, bc)
return s
return normalize1(0, man, exp, bc, prec, rnd)
def mpf_sign(s):
"""Return -1, 0, or 1 (as a Python int, not a raw mpf) depending on
whether s is negative, zero, or positive. (Nan is taken to give 0.)"""
sign, man, exp, bc = s
if not man:
if s == finf: return 1
if s == fninf: return -1
return 0
return (-1) ** sign
def mpf_add(s, t, prec=0, rnd=round_fast, _sub=0):
"""
Add the two raw mpf values s and t.
With prec=0, no rounding is performed. Note that this can
produce a very large mantissa (potentially too large to fit
in memory) if exponents are far apart.
"""
ssign, sman, sexp, sbc = s
tsign, tman, texp, tbc = t
tsign ^= _sub
# Standard case: two nonzero, regular numbers
if sman and tman:
offset = sexp - texp
if offset:
if offset > 0:
# Outside precision range; only need to perturb
if offset > 100 and prec:
delta = sbc + sexp - tbc - texp
if delta > prec + 4:
offset = prec + 4
sman <<= offset
if tsign == ssign: sman += 1
else: sman -= 1
return normalize1(ssign, sman, sexp-offset,
bitcount(sman), prec, rnd)
# Add
if ssign == tsign:
man = tman + (sman << offset)
# Subtract
else:
if ssign: man = tman - (sman << offset)
else: man = (sman << offset) - tman
if man >= 0:
ssign = 0
else:
man = -man
ssign = 1
bc = bitcount(man)
return normalize1(ssign, man, texp, bc, prec or bc, rnd)
elif offset < 0:
# Outside precision range; only need to perturb
if offset < -100 and prec:
delta = tbc + texp - sbc - sexp
if delta > prec + 4:
offset = prec + 4
tman <<= offset
if ssign == tsign: tman += 1
else: tman -= 1
return normalize1(tsign, tman, texp-offset,
bitcount(tman), prec, rnd)
# Add
if ssign == tsign:
man = sman + (tman << -offset)
# Subtract
else:
if tsign: man = sman - (tman << -offset)
else: man = (tman << -offset) - sman
if man >= 0:
ssign = 0
else:
man = -man
ssign = 1
bc = bitcount(man)
return normalize1(ssign, man, sexp, bc, prec or bc, rnd)
# Equal exponents; no shifting necessary
if ssign == tsign:
man = tman + sman
else:
if ssign: man = tman - sman
else: man = sman - tman
if man >= 0:
ssign = 0
else:
man = -man
ssign = 1
bc = bitcount(man)
return normalize(ssign, man, texp, bc, prec or bc, rnd)
# Handle zeros and special numbers
if _sub:
t = mpf_neg(t)
if not sman:
if sexp:
if s == t or tman or not texp:
return s
return fnan
if tman:
return normalize1(tsign, tman, texp, tbc, prec or tbc, rnd)
return t
if texp:
return t
if sman:
return normalize1(ssign, sman, sexp, sbc, prec or sbc, rnd)
return s
def mpf_sub(s, t, prec=0, rnd=round_fast):
"""Return the difference of two raw mpfs, s-t. This function is
simply a wrapper of mpf_add that changes the sign of t."""
return mpf_add(s, t, prec, rnd, 1)
def mpf_sum(xs, prec=0, rnd=round_fast, absolute=False):
"""
Sum a list of mpf values efficiently and accurately
(typically no temporary roundoff occurs). If prec=0,
the final result will not be rounded either.
There may be roundoff error or cancellation if extremely
large exponent differences occur.
With absolute=True, sums the absolute values.
"""
man = 0
exp = 0
max_extra_prec = prec*2 or 1000000 # XXX
special = None
for x in xs:
xsign, xman, xexp, xbc = x
if xman:
if xsign and not absolute:
xman = -xman
delta = xexp - exp
if xexp >= exp:
# x much larger than existing sum?
# first: quick test
if (delta > max_extra_prec) and \
((not man) or delta-bitcount(abs(man)) > max_extra_prec):
man = xman
exp = xexp
else:
man += (xman << delta)
else:
delta = -delta
# x much smaller than existing sum?
if delta-xbc > max_extra_prec:
if not man:
man, exp = xman, xexp
else:
man = (man << delta) + xman
exp = xexp
elif xexp:
if absolute:
x = mpf_abs(x)
special = mpf_add(special or fzero, x, 1)
# Will be inf or nan
if special:
return special
return from_man_exp(man, exp, prec, rnd)
def gmpy_mpf_mul(s, t, prec=0, rnd=round_fast):
"""Multiply two raw mpfs"""
ssign, sman, sexp, sbc = s
tsign, tman, texp, tbc = t
sign = ssign ^ tsign
man = sman*tman
if man:
bc = bitcount(man)
if prec:
return normalize1(sign, man, sexp+texp, bc, prec, rnd)
else:
return (sign, man, sexp+texp, bc)
s_special = (not sman) and sexp
t_special = (not tman) and texp
if not s_special and not t_special:
return fzero
if fnan in (s, t): return fnan
if (not tman) and texp: s, t = t, s
if t == fzero: return fnan
return {1:finf, -1:fninf}[mpf_sign(s) * mpf_sign(t)]
def gmpy_mpf_mul_int(s, n, prec, rnd=round_fast):
"""Multiply by a Python integer."""
sign, man, exp, bc = s
if not man:
return mpf_mul(s, from_int(n), prec, rnd)
if not n:
return fzero
if n < 0:
sign ^= 1
n = -n
man *= n
return normalize(sign, man, exp, bitcount(man), prec, rnd)
def python_mpf_mul(s, t, prec=0, rnd=round_fast):
"""Multiply two raw mpfs"""
ssign, sman, sexp, sbc = s
tsign, tman, texp, tbc = t
sign = ssign ^ tsign
man = sman*tman
if man:
bc = sbc + tbc - 1
bc += int(man>>bc)
if prec:
return normalize1(sign, man, sexp+texp, bc, prec, rnd)
else:
return (sign, man, sexp+texp, bc)
s_special = (not sman) and sexp
t_special = (not tman) and texp
if not s_special and not t_special:
return fzero
if fnan in (s, t): return fnan
if (not tman) and texp: s, t = t, s
if t == fzero: return fnan
return {1:finf, -1:fninf}[mpf_sign(s) * mpf_sign(t)]
def python_mpf_mul_int(s, n, prec, rnd=round_fast):
"""Multiply by a Python integer."""
sign, man, exp, bc = s
if not man:
return mpf_mul(s, from_int(n), prec, rnd)
if not n:
return fzero
if n < 0:
sign ^= 1
n = -n
man *= n
# Generally n will be small
if n < 1024:
bc += bctable[int(n)] - 1
else:
bc += bitcount(n) - 1
bc += int(man>>bc)
return normalize(sign, man, exp, bc, prec, rnd)
if BACKEND == 'gmpy':
mpf_mul = gmpy_mpf_mul
mpf_mul_int = gmpy_mpf_mul_int
else:
mpf_mul = python_mpf_mul
mpf_mul_int = python_mpf_mul_int
def mpf_shift(s, n):
"""Quickly multiply the raw mpf s by 2**n without rounding."""
sign, man, exp, bc = s
if not man:
return s
return sign, man, exp+n, bc
def mpf_frexp(x):
"""Convert x = y*2**n to (y, n) with abs(y) in [0.5, 1) if nonzero"""
sign, man, exp, bc = x
if not man:
if x == fzero:
return (fzero, 0)
else:
raise ValueError
return mpf_shift(x, -bc-exp), bc+exp
def mpf_div(s, t, prec, rnd=round_fast):
"""Floating-point division"""
ssign, sman, sexp, sbc = s
tsign, tman, texp, tbc = t
if not sman or not tman:
if s == fzero:
if t == fzero: raise ZeroDivisionError
if t == fnan: return fnan
return fzero
if t == fzero:
raise ZeroDivisionError
s_special = (not sman) and sexp
t_special = (not tman) and texp
if s_special and t_special:
return fnan
if s == fnan or t == fnan:
return fnan
if not t_special:
if t == fzero:
return fnan
return {1:finf, -1:fninf}[mpf_sign(s) * mpf_sign(t)]
return fzero
sign = ssign ^ tsign
if tman == 1:
return normalize1(sign, sman, sexp-texp, sbc, prec, rnd)
# Same strategy as for addition: if there is a remainder, perturb
# the result a few bits outside the precision range before rounding
extra = prec - sbc + tbc + 5
if extra < 5:
extra = 5
quot, rem = divmod(sman<<extra, tman)
if rem:
quot = (quot<<1) + 1
extra += 1
return normalize1(sign, quot, sexp-texp-extra, bitcount(quot), prec, rnd)
return normalize(sign, quot, sexp-texp-extra, bitcount(quot), prec, rnd)
def mpf_rdiv_int(n, t, prec, rnd=round_fast):
"""Floating-point division n/t with a Python integer as numerator"""
sign, man, exp, bc = t
if not n or not man:
return mpf_div(from_int(n), t, prec, rnd)
if n < 0:
sign ^= 1
n = -n
extra = prec + bc + 5
quot, rem = divmod(n<<extra, man)
if rem:
quot = (quot<<1) + 1
extra += 1
return normalize1(sign, quot, -exp-extra, bitcount(quot), prec, rnd)
return normalize(sign, quot, -exp-extra, bitcount(quot), prec, rnd)
def mpf_mod(s, t, prec, rnd=round_fast):
ssign, sman, sexp, sbc = s
tsign, tman, texp, tbc = t
if ((not sman) and sexp) or ((not tman) and texp):
return fnan
# Important special case: do nothing if t is larger
if ssign == tsign and texp > sexp+sbc:
return s
# Another important special case: this allows us to do e.g. x % 1.0
# to find the fractional part of x, and it will work when x is huge.
if tman == 1 and sexp > texp+tbc:
return fzero
base = min(sexp, texp)
sman = (-1)**ssign * sman
tman = (-1)**tsign * tman
man = (sman << (sexp-base)) % (tman << (texp-base))
if man >= 0:
sign = 0
else:
man = -man
sign = 1
return normalize(sign, man, base, bitcount(man), prec, rnd)
reciprocal_rnd = {
round_down : round_up,
round_up : round_down,
round_floor : round_ceiling,
round_ceiling : round_floor,
round_nearest : round_nearest
}
negative_rnd = {
round_down : round_down,
round_up : round_up,
round_floor : round_ceiling,
round_ceiling : round_floor,
round_nearest : round_nearest
}
def mpf_pow_int(s, n, prec, rnd=round_fast):
"""Compute s**n, where s is a raw mpf and n is a Python integer."""
sign, man, exp, bc = s
if (not man) and exp:
if s == finf:
if n > 0: return s
if n == 0: return fnan
return fzero
if s == fninf:
if n > 0: return [finf, fninf][n & 1]
if n == 0: return fnan
return fzero
return fnan
n = int(n)
if n == 0: return fone
if n == 1: return mpf_pos(s, prec, rnd)
if n == 2:
_, man, exp, bc = s
if not man:
return fzero
man = man*man
if man == 1:
return (0, MPZ_ONE, exp+exp, 1)
bc = bc + bc - 2
bc += bctable[int(man>>bc)]
return normalize1(0, man, exp+exp, bc, prec, rnd)
if n == -1: return mpf_div(fone, s, prec, rnd)
if n < 0:
inverse = mpf_pow_int(s, -n, prec+5, reciprocal_rnd[rnd])
return mpf_div(fone, inverse, prec, rnd)
result_sign = sign & n
# Use exact integer power when the exact mantissa is small
if man == 1:
return (result_sign, MPZ_ONE, exp*n, 1)
if bc*n < 1000:
man **= n
return normalize1(result_sign, man, exp*n, bitcount(man), prec, rnd)
# Use directed rounding all the way through to maintain rigorous
# bounds for interval arithmetic
rounds_down = (rnd == round_nearest) or \
shifts_down[rnd][result_sign]
# Now we perform binary exponentiation. Need to estimate precision
# to avoid rounding errors from temporary operations. Roughly log_2(n)
# operations are performed.
workprec = prec + 4*bitcount(n) + 4
_, pm, pe, pbc = fone
while 1:
if n & 1:
pm = pm*man
pe = pe+exp
pbc += bc - 2
pbc = pbc + bctable[int(pm >> pbc)]
if pbc > workprec:
if rounds_down:
pm = pm >> (pbc-workprec)
else:
pm = -((-pm) >> (pbc-workprec))
pe += pbc - workprec
pbc = workprec
n -= 1
if not n:
break
man = man*man
exp = exp+exp
bc = bc + bc - 2
bc = bc + bctable[int(man >> bc)]
if bc > workprec:
if rounds_down:
man = man >> (bc-workprec)
else:
man = -((-man) >> (bc-workprec))
exp += bc - workprec
bc = workprec
n = n // 2
return normalize(result_sign, pm, pe, pbc, prec, rnd)
def mpf_perturb(x, eps_sign, prec, rnd):
"""
For nonzero x, calculate x + eps with directed rounding, where
eps < prec relatively and eps has the given sign (0 for
positive, 1 for negative).
With rounding to nearest, this is taken to simply normalize
x to the given precision.
"""
if rnd == round_nearest:
return mpf_pos(x, prec, rnd)
sign, man, exp, bc = x
eps = (eps_sign, MPZ_ONE, exp+bc-prec-1, 1)
if sign:
away = (rnd in (round_down, round_ceiling)) ^ eps_sign
else:
away = (rnd in (round_up, round_ceiling)) ^ eps_sign
if away:
return mpf_add(x, eps, prec, rnd)
else:
return mpf_pos(x, prec, rnd)
#----------------------------------------------------------------------------#
# Radix conversion #
#----------------------------------------------------------------------------#
def to_digits_exp(s, dps):
"""Helper function for representing the floating-point number s as
a decimal with dps digits. Returns (sign, string, exponent) where
sign is '' or '-', string is the digit string, and exponent is
the decimal exponent as an int.
If inexact, the decimal representation is rounded toward zero."""
# Extract sign first so it doesn't mess up the string digit count
if s[0]:
sign = '-'
s = mpf_neg(s)
else:
sign = ''
_sign, man, exp, bc = s
if not man:
return '', '0', 0
bitprec = int(dps * math.log(10,2)) + 10
# Cut down to size
# TODO: account for precision when doing this
exp_from_1 = exp + bc
if abs(exp_from_1) > 3500:
from .libelefun import mpf_ln2, mpf_ln10
# Set b = int(exp * log(2)/log(10))
# If exp is huge, we must use high-precision arithmetic to
# find the nearest power of ten
expprec = bitcount(abs(exp)) + 5
tmp = from_int(exp)
tmp = mpf_mul(tmp, mpf_ln2(expprec))
tmp = mpf_div(tmp, mpf_ln10(expprec), expprec)
b = to_int(tmp)
s = mpf_div(s, mpf_pow_int(ften, b, bitprec), bitprec)
_sign, man, exp, bc = s
exponent = b
else:
exponent = 0
# First, calculate mantissa digits by converting to a binary
# fixed-point number and then converting that number to
# a decimal fixed-point number.
fixprec = max(bitprec - exp - bc, 0)
fixdps = int(fixprec / math.log(10,2) + 0.5)
sf = to_fixed(s, fixprec)
sd = bin_to_radix(sf, fixprec, 10, fixdps)
digits = numeral(sd, base=10, size=dps)
exponent += len(digits) - fixdps - 1
return sign, digits, exponent
def to_str(s, dps, strip_zeros=True, min_fixed=None, max_fixed=None,
show_zero_exponent=False):
"""
Convert a raw mpf to a decimal floating-point literal with at
most `dps` decimal digits in the mantissa (not counting extra zeros
that may be inserted for visual purposes).
The number will be printed in fixed-point format if the position
of the leading digit is strictly between min_fixed
(default = min(-dps/3,-5)) and max_fixed (default = dps).
To force fixed-point format always, set min_fixed = -inf,
max_fixed = +inf. To force floating-point format, set
min_fixed >= max_fixed.
The literal is formatted so that it can be parsed back to a number
by to_str, float() or Decimal().
"""
# Special numbers
if not s[1]:
if s == fzero:
if dps: t = '0.0'
else: t = '.0'
if show_zero_exponent:
t += 'e+0'
return t
if s == finf: return '+inf'
if s == fninf: return '-inf'
if s == fnan: return 'nan'
raise ValueError
if min_fixed is None: min_fixed = min(-(dps//3), -5)
if max_fixed is None: max_fixed = dps
# to_digits_exp rounds to floor.
# This sometimes kills some instances of "...00001"
sign, digits, exponent = to_digits_exp(s, dps+3)
# No digits: show only .0; round exponent to nearest
if not dps:
if digits[0] in '56789':
exponent += 1
digits = ".0"
else:
# Rounding up kills some instances of "...99999"
if len(digits) > dps and digits[dps] in '56789' and \
(dps < 500 or digits[dps-4:dps] == '9999'):
digits2 = str(int(digits[:dps]) + 1)
if len(digits2) > dps:
digits2 = digits2[:dps]
exponent += 1
digits = digits2
else:
digits = digits[:dps]
# Prettify numbers close to unit magnitude
if min_fixed < exponent < max_fixed:
if exponent < 0:
digits = ("0"*int(-exponent)) + digits
split = 1
else:
split = exponent + 1
if split > dps:
digits += "0"*(split-dps)
exponent = 0
else:
split = 1
digits = (digits[:split] + "." + digits[split:])
if strip_zeros:
# Clean up trailing zeros
digits = digits.rstrip('0')
if digits[-1] == ".":
digits += "0"
if exponent == 0 and dps and not show_zero_exponent: return sign + digits
if exponent >= 0: return sign + digits + "e+" + str(exponent)
if exponent < 0: return sign + digits + "e" + str(exponent)
def str_to_man_exp(x, base=10):
"""Helper function for from_str."""
# Verify that the input is a valid float literal
float(x)
# Split into mantissa, exponent
x = x.lower()
parts = x.split('e')
if len(parts) == 1:
exp = 0
else: # == 2
x = parts[0]
exp = int(parts[1])
# Look for radix point in mantissa
parts = x.split('.')
if len(parts) == 2:
a, b = parts[0], parts[1].rstrip('0')
exp -= len(b)
x = a + b
x = MPZ(int(x, base))
return x, exp
special_str = {'inf':finf, '+inf':finf, '-inf':fninf, 'nan':fnan}
def from_str(x, prec, rnd=round_fast):
"""Create a raw mpf from a decimal literal, rounding in the
specified direction if the input number cannot be represented
exactly as a binary floating-point number with the given number of
bits. The literal syntax accepted is the same as for Python
floats.
TODO: the rounding does not work properly for large exponents.
"""
x = x.strip()
if x in special_str:
return special_str[x]
if '/' in x:
p, q = x.split('/')
return from_rational(int(p), int(q), prec, rnd)
man, exp = str_to_man_exp(x, base=10)
# XXX: appropriate cutoffs & track direction
# note no factors of 5
if abs(exp) > 400:
s = from_int(man, prec+10)
s = mpf_mul(s, mpf_pow_int(ften, exp, prec+10), prec, rnd)
else:
if exp >= 0:
s = from_int(man * 10**exp, prec, rnd)
else:
s = from_rational(man, 10**-exp, prec, rnd)
return s
# Binary string conversion. These are currently mainly used for debugging
# and could use some improvement in the future
def from_bstr(x):
man, exp = str_to_man_exp(x, base=2)
man = MPZ(man)
sign = 0
if man < 0:
man = -man
sign = 1
bc = bitcount(man)
return normalize(sign, man, exp, bc, bc, round_floor)
def to_bstr(x):
sign, man, exp, bc = x
return ['','-'][sign] + numeral(man, size=bitcount(man), base=2) + ("e%i" % exp)
#----------------------------------------------------------------------------#
# Square roots #
#----------------------------------------------------------------------------#
def mpf_sqrt(s, prec, rnd=round_fast):
"""
Compute the square root of a nonnegative mpf value. The
result is correctly rounded.
"""
sign, man, exp, bc = s
if sign:
raise ComplexResult("square root of a negative number")
if not man:
return s
if exp & 1:
exp -= 1
man <<= 1
bc += 1
elif man == 1:
return normalize1(sign, man, exp//2, bc, prec, rnd)
shift = max(4, 2*prec-bc+4)
shift += shift & 1
if rnd in 'fd':
man = isqrt(man<<shift)
else:
man, rem = sqrtrem(man<<shift)
# Perturb up
if rem:
man = (man<<1)+1
shift += 2
return from_man_exp(man, (exp-shift)//2, prec, rnd)
def mpf_hypot(x, y, prec, rnd=round_fast):
"""Compute the Euclidean norm sqrt(x**2 + y**2) of two raw mpfs
x and y."""
if y == fzero: return mpf_abs(x, prec, rnd)
if x == fzero: return mpf_abs(y, prec, rnd)
hypot2 = mpf_add(mpf_mul(x,x), mpf_mul(y,y), prec+4)
return mpf_sqrt(hypot2, prec, rnd)
if BACKEND == 'sage':
try:
import sage.libs.mpmath.ext_libmp as ext_lib
mpf_add = ext_lib.mpf_add
mpf_sub = ext_lib.mpf_sub
mpf_mul = ext_lib.mpf_mul
mpf_div = ext_lib.mpf_div
mpf_sqrt = ext_lib.mpf_sqrt
except ImportError:
pass
|
denis-pitul/django
|
refs/heads/master
|
tests/messages_tests/test_mixins.py
|
281
|
from django.core.urlresolvers import reverse
from django.test import SimpleTestCase, override_settings
from .urls import ContactFormViewWithMsg
@override_settings(ROOT_URLCONF='messages_tests.urls')
class SuccessMessageMixinTests(SimpleTestCase):
def test_set_messages_success(self):
author = {'name': 'John Doe',
'slug': 'success-msg'}
add_url = reverse('add_success_msg')
req = self.client.post(add_url, author)
self.assertIn(ContactFormViewWithMsg.success_message % author,
req.cookies['messages'].value)
|
crcresearch/osf.io
|
refs/heads/develop
|
addons/dataverse/tests/test_client.py
|
24
|
import mock
from nose.tools import (
assert_equal, assert_raises, assert_true,
assert_false, assert_in, assert_is, assert_is_none
)
import pytest
import unittest
from dataverse import Connection, Dataverse, DataverseFile, Dataset
from dataverse.exceptions import UnauthorizedError
from addons.dataverse.models import NodeSettings
from addons.dataverse.tests.utils import DataverseAddonTestCase, create_external_account
from framework.exceptions import HTTPError
from addons.dataverse.client import (
_connect, get_files, publish_dataset, get_datasets, get_dataset,
get_dataverses, get_dataverse, connect_from_settings, connect_or_error,
connect_from_settings_or_401,
)
from addons.dataverse import settings
pytestmark = pytest.mark.django_db
class TestClient(DataverseAddonTestCase, unittest.TestCase):
def setUp(self):
super(TestClient, self).setUp()
self.host = 'some.host.url'
self.token = 'some-fancy-api-token-which-is-long'
self.mock_connection = mock.create_autospec(Connection)
self.mock_dataverse = mock.create_autospec(Dataverse)
self.mock_dataset = mock.create_autospec(Dataset)
self.mock_file = mock.create_autospec(DataverseFile)
self.mock_file.dataset = self.mock_dataset
self.mock_dataset.dataverse = self.mock_dataverse
self.mock_dataverse.connection = self.mock_connection
@mock.patch('addons.dataverse.client.Connection')
def test_connect(self, mock_connection):
mock_connection.return_value = mock.create_autospec(Connection)
c = _connect(self.host, self.token)
mock_connection.assert_called_once_with(self.host, self.token)
assert_true(c)
@mock.patch('addons.dataverse.client.Connection')
def test_connect_fail(self, mock_connection):
mock_connection.side_effect = UnauthorizedError()
with assert_raises(UnauthorizedError):
_connect(self.host, self.token)
mock_connection.assert_called_once_with(self.host, self.token)
@mock.patch('addons.dataverse.client.Connection')
def test_connect_or_error(self, mock_connection):
mock_connection.return_value = mock.create_autospec(Connection)
c = connect_or_error(self.host, self.token)
mock_connection.assert_called_once_with(self.host, self.token)
assert_true(c)
@mock.patch('addons.dataverse.client.Connection')
def test_connect_or_error_returns_401_when_client_raises_unauthorized_error(self, mock_connection):
mock_connection.side_effect = UnauthorizedError()
with assert_raises(HTTPError) as cm:
connect_or_error(self.host, self.token)
mock_connection.assert_called_once_with(self.host, self.token)
assert_equal(cm.exception.code, 401)
@mock.patch('addons.dataverse.client._connect')
def test_connect_from_settings(self, mock_connect):
node_settings = NodeSettings()
node_settings.external_account = create_external_account(
self.host, self.token,
)
connection = connect_from_settings(node_settings)
assert_true(connection)
mock_connect.assert_called_once_with(self.host, self.token)
def test_connect_from_settings_none(self):
connection = connect_from_settings(None)
assert_is_none(connection)
@mock.patch('addons.dataverse.client._connect')
def test_connect_from_settings_or_401(self, mock_connect):
node_settings = NodeSettings()
node_settings.external_account = create_external_account(
self.host, self.token,
)
connection = connect_from_settings_or_401(node_settings)
assert_true(connection)
mock_connect.assert_called_once_with(self.host, self.token)
def test_connect_from_settings_or_401_none(self):
connection = connect_from_settings_or_401(None)
assert_is_none(connection)
@mock.patch('addons.dataverse.client.Connection')
def test_connect_from_settings_or_401_forbidden(self, mock_connection):
mock_connection.side_effect = UnauthorizedError()
node_settings = NodeSettings()
node_settings.external_account = create_external_account(
self.host, self.token,
)
with assert_raises(HTTPError) as e:
connect_from_settings_or_401(node_settings)
mock_connection.assert_called_once_with(self.host, self.token)
assert_equal(e.exception.code, 401)
def test_get_files(self):
published = False
get_files(self.mock_dataset, published)
self.mock_dataset.get_files.assert_called_once_with('latest')
def test_get_files_published(self):
published = True
get_files(self.mock_dataset, published)
self.mock_dataset.get_files.assert_called_once_with('latest-published')
def test_publish_dataset(self):
publish_dataset(self.mock_dataset)
self.mock_dataset.publish.assert_called_once_with()
def test_publish_dataset_unpublished_dataverse(self):
type(self.mock_dataverse).is_published = mock.PropertyMock(return_value=False)
with assert_raises(HTTPError) as e:
publish_dataset(self.mock_dataset)
assert_false(self.mock_dataset.publish.called)
assert_equal(e.exception.code, 405)
def test_get_datasets(self):
mock_dataset1 = mock.create_autospec(Dataset)
mock_dataset2 = mock.create_autospec(Dataset)
mock_dataset3 = mock.create_autospec(Dataset)
mock_dataset1.get_state.return_value = 'DRAFT'
mock_dataset2.get_state.return_value = 'RELEASED'
mock_dataset3.get_state.return_value = 'DEACCESSIONED'
self.mock_dataverse.get_datasets.return_value = [
mock_dataset1, mock_dataset2, mock_dataset3
]
datasets = get_datasets(self.mock_dataverse)
assert_is(self.mock_dataverse.get_datasets.assert_called_once_with(timeout=settings.REQUEST_TIMEOUT), None)
assert_in(mock_dataset1, datasets)
assert_in(mock_dataset2, datasets)
assert_in(mock_dataset3, datasets)
def test_get_datasets_no_dataverse(self):
datasets = get_datasets(None)
assert_equal(datasets, [])
def test_get_dataset(self):
self.mock_dataset.get_state.return_value = 'DRAFT'
self.mock_dataverse.get_dataset_by_doi.return_value = self.mock_dataset
s = get_dataset(self.mock_dataverse, 'My hdl')
assert_is(self.mock_dataverse.get_dataset_by_doi.assert_called_once_with('My hdl', timeout=settings.REQUEST_TIMEOUT), None)
assert_equal(s, self.mock_dataset)
@mock.patch('dataverse.dataverse.requests')
def test_get_dataset_calls_patched_timeout_method(self, mock_requests):
# Verify optional timeout parameter is passed to requests by dataverse client.
# https://github.com/IQSS/dataverse-client-python/pull/27
dataverse = Dataverse(mock.Mock(), mock.Mock())
dataverse.connection.auth = 'me'
dataverse.collection.get.return_value = '123'
mock_requests.get.side_effect = Exception('Done Testing')
with assert_raises(Exception) as e:
get_dataset(dataverse, 'My hdl')
assert_is(mock_requests.get.assert_called_once_with('123', auth='me', timeout=settings.REQUEST_TIMEOUT), None)
assert_equal(e.exception.message, 'Done Testing')
def test_get_deaccessioned_dataset(self):
self.mock_dataset.get_state.return_value = 'DEACCESSIONED'
self.mock_dataverse.get_dataset_by_doi.return_value = self.mock_dataset
with assert_raises(HTTPError) as e:
get_dataset(self.mock_dataverse, 'My hdl')
assert_is(self.mock_dataverse.get_dataset_by_doi.assert_called_once_with('My hdl', timeout=settings.REQUEST_TIMEOUT), None)
assert_equal(e.exception.code, 410)
def test_get_bad_dataset(self):
error = UnicodeDecodeError('utf-8', b'', 1, 2, 'jeepers')
self.mock_dataset.get_state.side_effect = error
self.mock_dataverse.get_dataset_by_doi.return_value = self.mock_dataset
with assert_raises(HTTPError) as e:
get_dataset(self.mock_dataverse, 'My hdl')
assert_is(self.mock_dataverse.get_dataset_by_doi.assert_called_once_with('My hdl', timeout=settings.REQUEST_TIMEOUT), None)
assert_equal(e.exception.code, 406)
def test_get_dataverses(self):
published_dv = mock.create_autospec(Dataverse)
unpublished_dv = mock.create_autospec(Dataverse)
type(published_dv).is_published = mock.PropertyMock(return_value=True)
type(unpublished_dv).is_published = mock.PropertyMock(return_value=False)
self.mock_connection.get_dataverses.return_value = [
published_dv, unpublished_dv
]
dvs = get_dataverses(self.mock_connection)
self.mock_connection.get_dataverses.assert_called_once_with()
assert_in(published_dv, dvs)
assert_in(unpublished_dv, dvs)
assert_equal(len(dvs), 2)
def test_get_dataverse(self):
type(self.mock_dataverse).is_published = mock.PropertyMock(return_value=True)
self.mock_connection.get_dataverse.return_value = self.mock_dataverse
d = get_dataverse(self.mock_connection, 'ALIAS')
self.mock_connection.get_dataverse.assert_called_once_with('ALIAS')
assert_equal(d, self.mock_dataverse)
def test_get_unpublished_dataverse(self):
type(self.mock_dataverse).is_published = mock.PropertyMock(return_value=False)
self.mock_connection.get_dataverse.return_value = self.mock_dataverse
d = get_dataverse(self.mock_connection, 'ALIAS')
self.mock_connection.get_dataverse.assert_called_once_with('ALIAS')
assert_equal(d, self.mock_dataverse)
|
apache/libcloud
|
refs/heads/trunk
|
libcloud/compute/drivers/oneandone.py
|
7
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
1&1 Cloud Server Compute driver
"""
import json
from libcloud.compute.providers import Provider
from libcloud.common.base import JsonResponse, ConnectionKey
from libcloud.compute.base import NodeSize, NodeImage, NodeLocation, \
Node, NodeAuthPassword, NodeAuthSSHKey
from libcloud.common.types import InvalidCredsError
from libcloud.compute.types import NodeState
from libcloud.utils.py3 import httplib
from libcloud.compute.base import NodeDriver
from time import sleep
API_HOST = 'cloudpanel-api.1and1.com'
API_VERSION = '/v1/'
__all__ = [
'API_HOST',
'API_VERSION',
'OneAndOneResponse',
'OneAndOneConnection',
'OneAndOneNodeDriver'
]
class OneAndOneResponse(JsonResponse):
"""
OneAndOne response parsing.
"""
valid_response_codes = [httplib.OK, httplib.CREATED, httplib.ACCEPTED]
def parse_error(self):
if self.status == httplib.UNAUTHORIZED:
body = self.parse_body()
raise InvalidCredsError(body['message'])
else:
body = self.parse_body()
if 'message' in body:
error = '%s (code: %s)' % (body['message'], self.status)
else:
error = body
return error
def success(self):
return self.status in self.valid_response_codes
class OneAndOneConnection(ConnectionKey):
"""
Connection class for the 1&1 driver
"""
host = API_HOST
api_prefix = API_VERSION
responseCls = OneAndOneResponse
def encode_data(self, data):
return json.dumps(data)
def add_default_headers(self, headers):
"""
Add headers that are necessary for every request
This method adds ``token`` and ``Content-Type`` to the request.
"""
headers['X-Token'] = self.key
headers['Content-Type'] = 'application/json'
return headers
def request(self, action, params=None, data=None, headers=None,
method='GET', raw=False):
"""
Some requests will use the href attribute directly.
If this is not the case, then we should formulate the
url based on the action specified.
If we are using a full url, we need to remove the
host and protocol components.
"""
action = self.api_prefix + action.lstrip('/')
return super(OneAndOneConnection, self). \
request(action=action,
params=params,
data=data,
headers=headers,
method=method,
raw=raw)
class OneAndOneNodeDriver(NodeDriver):
"""
Base OneAndOne node driver.
"""
connectionCls = OneAndOneConnection
name = '1and1'
website = 'http://www.1and1.com'
type = Provider.ONEANDONE
NODE_STATE_MAP = {
'POWERING_ON': NodeState.STARTING,
'POWERING_OFF': NodeState.PENDING,
'POWERED_OFF': NodeState.STOPPING,
'POWERED_ON': NodeState.RUNNING,
'REBOOTING': NodeState.REBOOTING,
'CONFIGURING': NodeState.RECONFIGURING,
'REMOVING': NodeState.UNKNOWN,
'DEPLOYING': NodeState.STARTING,
}
"""
Core Functions
"""
def list_sizes(self):
"""
Lists all sizes
:return: A list of all configurable node sizes.
:rtype: ``list`` of :class:`NodeSize`
"""
sizes = []
fixed_instances = self._list_fixed_instances()
for value in fixed_instances:
node_size = self._to_node_size(value)
sizes.append(node_size)
return sizes
def list_locations(self):
"""
Lists all locations
:return: ``list`` of :class:`NodeLocation`
:rtype: ``list``
"""
datacenters = self.ex_list_datacenters()
locations = []
for values in datacenters:
node_size = self._to_location(values)
locations.append(node_size)
return locations
def list_images(self, image_type=None):
"""
:return: ``list`` of :class: `NodeImage`
:rtype: ``list``
"""
response = self.connection.request(
action='server_appliances',
method='GET'
)
return self._to_images(response.object, image_type)
def get_image(self, image_id):
response = self.connection.request(
action='server_appliances/%s' % image_id,
method='GET'
)
return self._to_image(response.object)
"""
Node functions
"""
def create_node(self,
name,
image,
ex_fixed_instance_size_id,
location=None,
auth=None,
ex_ip=None,
ex_monitoring_policy_id=None,
ex_firewall_policy_id=None,
ex_loadbalancer_id=None,
ex_description=None,
ex_power_on=None):
"""
Creates a node.
:param name: The name of the new node
:type name: `str`
:param ex_fixed_instance_size_id:
Fixed instance size ID from list_sizes
:type ex_fixed_instance_size_id: ``str``
:param location: 1&1 Data center Location
:type location: `NodeLocation`
:param ex_ip: IP address
:type ex_ip: `str`
:param ex_ssh_key: SSH Key
:type ex_ssh_key: `str`
:param password: Password
:type password: `str`
:param ex_monitoring_policy_id:
:type ex_firewall_policy_id: `str`
:param ex_firewall_policy_id:
:type ex_firewall_policy_id: `str`
:param ex_loadbalancer_id:
:type ex_loadbalancer_id: `str`
:param ex_description:
:type ex_description: `str`
:param ex_power_on:
:type ex_power_on: `bool`
:return: Instance of class ``Node``
:rtype: :class:`Node`
"""
body = {
'name': name,
'appliance_id': image.id,
'hardware': {
'fixed_instance_size_id': ex_fixed_instance_size_id
},
}
if location is not None:
body['datacenter_id'] = location.id
if ex_power_on is not None:
body['power_on'] = ex_power_on
if ex_description is not None:
body['description'] = ex_description
if ex_firewall_policy_id is not None:
body['firewall_policy_id'] = ex_firewall_policy_id
if ex_monitoring_policy_id is not None:
body['monitoring_policy_id'] = ex_monitoring_policy_id
if ex_loadbalancer_id is not None:
body['loadbalancer_id'] = ex_loadbalancer_id
if auth is not None:
if isinstance(auth, NodeAuthPassword):
body['password'] = auth.password
elif isinstance(auth, NodeAuthSSHKey):
body['rsa_key'] = auth.pubkey
if ex_ip is not None:
body['ip_id'] = ex_ip
response = self.connection.request(
action='servers',
data=body,
method='POST',
)
return self._to_node(response.object)
def list_nodes(self):
"""
List all nodes.
:return: ``list`` of :class:`Node`
:rtype: ``list``
"""
response = self.connection.request(
action='servers',
method='GET'
)
return self._to_nodes(response.object)
def destroy_node(self, node, ex_keep_ips=False):
"""
Destroys a node.
:param node: The node you wish to destroy.
:type volume: :class:`Node`
:param ex_keep_ips: True to keep all IP addresses assigned to the node
:type ex_keep_ips: : ``bool``
:return: Instance of class ``Node``
:rtype: :class: `Node`
"""
self.ex_shutdown_server(node.id)
self._wait_for_state(node.id, 'POWERED_OFF')
response = self.connection.request(
action='servers/%s' % node.id,
params={'keep_ips': ex_keep_ips},
method='DELETE'
)
return self._to_node(response.object)
def reboot_node(self, node):
"""
Reboots the node.
:param node: The node you wish to destroy.
:type volume: :class:`Node`
:return: Instance of class ``Node``
:rtype: :class: `Node`
"""
shutdown_body = {
"action": "REBOOT",
"method": "HARDWARE"
}
response = self.connection.request(
action='servers/%s/status/action' % node.id,
data=shutdown_body,
method='PUT',
)
return self._to_node(response.object)
"""
Extension functions
"""
def ex_rename_server(self, server_id, name=None, description=None):
"""
Renames the server
:param server_id: ID of the server you want to rename
:param name: New name of the server
:type: ``str``
:param description: New description of the server
:type: ``str``
:return: Instance of class ``Node``
:rtype: :class: `Node`
"""
body = {}
if name is not None:
body["name"] = name
if description is not None:
body["description"] = description
response = self.connection.request(
action='servers/%s' % server_id,
data=body,
method='PUT'
)
return self._to_node(response.object)
def ex_get_server_hardware(self, server_id):
"""
Gets all server hardware
:param server_id: Id of the server
:type: ``str``
:return: Server's hardware
:rtype: ``dict``
"""
response = self.connection.request(
action='servers/%s/hardware' % server_id,
method='GET'
)
return response.object
"""
Hardware operations
"""
def ex_modify_server_hardware(self, server_id,
fixed_instance_size_id=None, vcore=None,
cores_per_processor=None, ram=None):
"""
Modifies server's hardware
:param server_id:
:type: ``str``
:param fixed_instance_size_id: Id of the fixed instance size
:type: ``str``
:param vcore: Virtual cores count
:type: ``int``
:param cores_per_processor: Count of cores per procesor
:type: ``int``
:param ram: Amount of ram for the server
:type: ``int``
:return: Instance of class ``Node``
:type: :class: `Node`
"""
body = {}
if fixed_instance_size_id is not None:
body['fixed_instance_size_id'] = fixed_instance_size_id
if vcore is not None:
body['vcore'] = vcore
if cores_per_processor is not None:
body['cores_per_processor'] = cores_per_processor
if ram is not None:
body['ram'] = ram
response = self.connection.request(
action='servers/%s/hardware' % server_id,
data=body,
method='PUT'
)
return self._to_node(response.object)
"""
HDD operations
"""
def ex_modify_server_hdd(self, server_id, hdd_id=None, size=None):
"""
Modifies server hard disk drives
:param server_id: Id of the server
:type: ``str``
:param hdd_id: Id of the hard disk
:type: ``str``
:param size: Size of the hard disk
:type: ``str``
:return: Instance of class ``Node``
:rtype: :class: `Node`
"""
body = {}
if size is not None:
body['size'] = size
response = self.connection.request(
action='servers/%s/hardware/hdds/%s' % (server_id, hdd_id),
data=body,
method='PUT'
)
return self._to_node(response.object)
def ex_add_hdd(self, server_id, size, is_main):
"""
Add a hard disk to the server
:param server_id: Id of the server
:type: ``str``
:param size: Size of the new disk
:type: ``str``
:param is_main: Indicates if the disk is going to be the boot disk
:type: ``boolean``
:return: Instance of class ``Node``
:type: :class: `Node`
"""
body = {
'size': size,
'is_main': is_main
}
response = self.connection.request(
action='servers/%s/hardware/hdds' % server_id,
data=body,
method='POST'
)
return self._to_node(response.object)
def ex_remove_hdd(self, server_id, hdd_id):
"""
Removes existing hard disk
:param server_id: Id of the server
:type: ``str``
:param hdd_id: Id of the hard disk
:type: ``str``
:return: Instance of class ``Node``
:rtype: :class: `Node`
"""
response = self.connection.request(
action='servers/%s/hardware/hdds/%s' % (server_id, hdd_id),
method='DELETE'
)
return self._to_node(response.object)
"""
Data center operations
"""
def ex_list_datacenters(self):
"""
Lists all data centers
:return: List of data centers
:rtype: ``dict``
"""
response = self.connection.request(
action='datacenters',
method='GET'
)
return response.object
def ex_get_server(self, server_id):
"""
Gets a server
:param server_id: Id of the server to be retrieved
:type: ``str``
:return: Instance of class ``Node``
:rtype: :class: `Node`
"""
response = self.connection.request(
action='servers/%s' % (server_id),
method='GET'
)
return self._to_node(response.object)
def ex_shutdown_server(self, server_id, method='SOFTWARE'):
"""
Shuts down the server
:param server_id: Id of the server to be shut down
:type: ``str``
:param method: Method of shutting down "SOFTWARE" or "HARDWARE"
:return: Instance of class ``Node``
:rtype: :class: `Node`
"""
shutdown_body = {
'action': 'POWER_OFF',
'method': method
}
response = self.connection.request(
action='servers/%s/status/action' % (server_id),
data=shutdown_body,
method='PUT',
)
return self._to_node(response.object)
"""
Image operations
"""
def ex_get_server_image(self, server_id):
"""
Gets server image
:param server_id: Id of the server
:type: ``str``
:return: Server image
:rtype: ``dict``
"""
response = self.connection.request(
action='servers/%s/image' % server_id,
method='GET'
)
return response.object
def ex_reinstall_server_image(self, server_id, image_id, password=None):
"""
Installs a new image on the server
:param server_id: Id of the server
:type: ``str``
:param image_id: Id of the image (Server Appliance)
:type: ``str``
:param password: New password for the server
:return: Instance of class ``Node``
:rtype: :class: `Node`
"""
body = {
'id': image_id,
}
if password is not None:
body['password'] = password
response = self.connection.request(
action='servers/%s/image' % server_id,
data=body,
method='PUT'
)
return self._to_node(response.object)
"""
Server IP operations
"""
def ex_list_server_ips(self, server_id):
"""
Gets all server IP objects
:param server_id: Id of the server
:type: ``str``
:return: List of server IP objects
:rtype: ``list`` of ``dict``
"""
response = self.connection.request(
action='servers/%s/ips' % server_id,
method='GET'
)
return response.object
def ex_get_server_ip(self, server_id, ip_id):
"""
Get a single server IP object
:param server_id: Id of the server
:type: ``str``
:param ip_id: ID of the IP address
:type: ``str``
:return: IP address object
:rtype: ``dict``
"""
response = self.connection.request(
action='servers/%s/ips/%s' % (server_id, ip_id),
method='GET'
)
return response.object
def ex_assign_server_ip(self, server_id, ip_type):
"""
Assigns a new IP address to the server
:param server_id: Id of the server
:type: ``str``
:param ip_type: Type of the IP address [IPV4,IPV6]
:type: ``str``
:return: ``Node`` instance
:rtype: ``Node``
"""
body = {
'type': ip_type
}
response = self.connection.request(
action='servers/%s/ips' % server_id,
data=body,
method='POST'
)
return self._to_node(response.object)
def ex_remove_server_ip(self, server_id, ip_id, keep_ip=None):
"""
Removes an IP address from the server
:param server_id: Id of the server
:type: ``str``
:param ip_id: ID of the IP address
:type: ``str``
:param keep_ip: Indicates whether IP address will be removed from
the Cloud Panel
:type: ``boolean``
:return: ``Node`` instance
:rtype: ``Node``
"""
body = {}
if keep_ip is not None:
body['keep_ip'] = keep_ip
response = self.connection.request(
action='servers/%s/ips/%s' % (server_id, ip_id),
data=body,
method='DELETE'
)
return self._to_node(response.object)
def ex_get_server_firewall_policies(self, server_id, ip_id):
"""
Gets a firewall policy of attached to the server's IP
:param server_id: Id of the server
:type: ``str``
:param ip_id: ID of the IP address
:type: ``str``
:return: IP address object
:rtype: ``dict``
"""
response = self.connection.request(
action='/servers/%s/ips/%s/firewall_policy' % (server_id, ip_id),
method='GET'
)
return response.object
def ex_add_server_firewall_policy(self, server_id, ip_id, firewall_id):
"""
Adds a firewall policy to the server's IP address
:param server_id: Id of the server
:type: ``str``
:param ip_id: ID of the IP address
:type: ``str``
:param firewall_id: ID of the firewall policy
:type: ``str``
:return: ``Node`` instance
:rtype: ``Node``
"""
body = {
'id': firewall_id
}
response = self.connection.request(
action='/servers/%s/ips/%s/firewall_policy' % (server_id, ip_id),
data=body,
method='POST'
)
return self._to_node(response.object)
"""
Firewall Policy operations
"""
def ex_create_firewall_policy(self, name, rules, description=None):
"""
Creates a firewall Policy.
:param name:
:param description:
:param rules:
:rtype: `dict`
:return: `dict` firewall policy
"""
body = {
'name': name
}
if description is not None:
body['description'] = description
if len(rules) == 0:
raise ValueError(
'At least one firewall rule is required.'
)
else:
body['rules'] = rules
response = self.connection.request(
action='firewall_policies',
data=body,
method='POST',
)
return response.object
def ex_list_firewall_policies(self):
""""
List firewall policies
:return: 'dict'
"""
response = self.connection.request(
action='firewall_policies',
method='GET'
)
return response.object
def ex_get_firewall_policy(self, fw_id):
"""
Gets firewall policy
:param fw_id: ID of the firewall policy
:return: 'dict'
"""
response = self.connection.request(
action='firewall_policy/%s' % fw_id,
method='GET'
)
return response.object
def ex_delete_firewall_policy(self, fw_id):
"""
Deletes firewall policy
:param fw_id: ID of the Firewall
:return: 'dict'
"""
response = self.connection.request(
action='firewall_policy/%s' % fw_id,
method='DELETE'
)
return response.object
"""
Shared storage operations
"""
def ex_list_shared_storages(self):
"""
List of shared storages
:return: 'dict'
"""
response = self.connection.request(
action='shared_storages',
method='GET'
)
return response.object
def ex_get_shared_storage(self, storage_id):
"""
Gets a shared storage
:return: 'dict'
"""
response = self.connection.request(
action='shared_storages/%s' % (storage_id),
method='GET'
)
return response.object
def ex_create_shared_storage(self, name, size, datacenter_id=None,
description=None):
"""
Creates a shared storage
:param name: Name of the storage
:param size: Size of the storage
:param datacenter_id: datacenter where storage should be created
:param description: description ot the storage
:return: 'dict'
"""
body = {
'name': name,
'size': size,
'datacenter_id': datacenter_id
}
if description is not None:
body['description'] = description
response = self.connection.request(
action='shared_storages',
data=body,
method='POST'
)
return response.object
def ex_delete_shared_storage(self, storage_id):
"""
Removes a shared storage
:param storage_id: Id of the shared storage
:type: ``str``
:return: Instnace of shared storage
:rtype: ``list`` of ``dict``
"""
response = self.connection.request(
action='shared_storages/%s' % storage_id,
method='DELETE'
)
return response.object
def ex_attach_server_to_shared_storage(self, storage_id,
server_id, rights):
"""
Attaches a single server to a shared storage
:param storage_id: Id of the shared storage
:param server_id: Id of the server to be attached to the shared storage
:param rights:
:return:
:rtype: 'dict'
"""
body = {
'severs': [
{
'id': server_id,
'rights': rights
}
]
}
response = self.connection.request(
action='shared_storages/%s/servers' % storage_id,
data=body,
method='POST'
)
return response.object
def ex_get_shared_storage_server(self, storage_id, server_id):
"""
Gets a shared storage's server
:param storage_id:
:param server_id:
:return:
"""
response = self.connection.request(
action='shared_storages/%s/servers/%s' % (storage_id, server_id),
)
return response.object
def ex_detach_server_from_shared_storage(self, storage_id,
server_id):
"""
Detaches a server from shared storage
:param storage_id: Id of the shared storage
:type: ``str``
:param server_id: Id of the server
:type: ``str``
:return: Instance of shared storage
:rtype: ``dict``
"""
response = self.connection.request(
action='shared_storages/%s/servers/%s' % (storage_id, server_id),
method='DELETE'
)
return response.object
"""
Load Balancers operations
"""
def ex_create_load_balancer(self, name, method, rules,
persistence=None,
persistence_time=None,
health_check_test=None,
health_check_interval=None,
health_check_path=None,
health_check_parser=None,
datacenter_id=None,
description=None):
"""
:param name: Name of the load balancer
:param method: Load balancer method
:param rules: Load balancer rules
:type rules: ``list`` of ``dict``
:param persistence: Indictes if persistance is set
:type persistence: ``boolean``
:param persistence_time: Persistance time
:type persistence_time: ``int``
:param health_check_test: Type of test
:type health_check_test:``str``
:param health_check_interval: Interval of the check
:param health_check_path: Path
:type health_check_path: ``str``
:param health_check_parser: Parser
:type health_check_parser:``str``
:param datacenter_id: Data center id
:type datacenter_id:``str``
:param description: Description of load balancer
:type description:``str``
:return: ``dict``
"""
body = {
'name': name,
'method': method,
}
body['rules'] = []
body['rules'] = rules
if persistence is not None:
body['persistence'] = persistence
if persistence_time is not None:
body['persistence_time'] = persistence_time
if health_check_test is not None:
body['health_check_test'] = health_check_test
if health_check_interval is not None:
body['health_check_interval'] = health_check_interval
if health_check_path is not None:
body['health_check_path'] = health_check_path
if health_check_parser is not None:
body['health_check_parser'] = health_check_parser
if datacenter_id is not None:
body['datacenter_id'] = datacenter_id
if description is not None:
body['description'] = description
response = self.connection.request(
action='load_balancers',
data=body,
method='POST'
)
return response.object
def ex_update_load_balancer(self, lb_id, name=None, description=None,
health_check_test=None,
health_check_interval=None,
persistence=None,
persistence_time=None,
method=None):
body = {}
if name is not None:
body['name'] = name
if description is not None:
body['description'] = description
if health_check_test is not None:
body['health_check_test'] = health_check_test
if health_check_interval is not None:
body['health_check_interval'] = health_check_interval
if persistence is not None:
body['persistence'] = persistence
if persistence_time is not None:
body['persistence_time'] = persistence_time
if method is not None:
body['method'] = method
response = self.connection.request(
action='load_balancers/%s' % lb_id,
data=body,
method='PUT'
)
return response.object
def ex_add_servers_to_load_balancer(self, lb_id, server_ips=[]):
"""
Adds server's IP address to load balancer
:param lb_id: Load balancer ID
:type: ``str``
:param server_ips: Array of server IP IDs
:type: ``list`` of ``str``
:return: Instance of load balancer
:rtype: ``dict``
"""
body = {
'server_ips': server_ips,
}
response = self.connection.request(
action='load_balancers/%s/server_ips' % lb_id,
data=body,
method='POST'
)
return response.object
def ex_remove_server_from_load_balancer(self, lb_id, server_ip):
"""
Removes server's IP from load balancer
:param lb_id: Load balancer ID
:type: ``str``
:param server_ip: ID of the server IP
:type: ``str``
:return: Instance of load balancer
:rtype: ``dict``
"""
response = self.connection.request(
action='/load_balancers/%s/server_ips/%s' % (lb_id, server_ip),
method='DELETE'
)
return response.object
def ex_add_load_balancer_rule(self, lb_id, protocol, port_balancer,
port_server, source=None):
"""
Adds a rule to load balancer
:param lb_id: Load balancer ID
:rtype: ``str``
:param protocol: Load balancer protocol
:rtype: ``str``
:param port_balancer: Port to be balananced
:rtype: ``int``
:param port_server: Server port
:rtype: ``int``
:param source: Source IP address
:rtype: ``str``
:return: Instance of load balancer
:rtype: ``dict``
"""
body = {
'rules': [
{
'protocol': protocol,
'port_balancer': port_balancer,
'port_server': port_server
}
]
}
if source is not None:
body['rules'][0]['source'] = source
response = self.connection.request(
action='/load_balancers/%s/rules' % lb_id,
data=body,
method='POST'
)
return response.object
def ex_remove_load_balancer_rule(self, lb_id, rule_id):
"""
Removes load balancer rule
:param lb_id: Load balancer ID
:rtype: ``str``
:param rule_id: Rule ID
:rtype: ``str``
:return: Instance of load balancer
:rtype: ``dict``
"""
response = self.connection.request(
action='/load_balancers/%s/rules/%s' % (lb_id, rule_id),
method='DELETE'
)
return response.object
def ex_list_load_balancers(self):
"""
Lists all load balancers
:return: List of load balancers
:rtype: ``list`` of ``dict``
"""
response = self.connection.request(
action='load_balancers',
method='GET'
)
return response.object
def ex_get_load_balancer(self, lb_id):
"""
Gets a single load balancer
:param lb_id: ID of the load balancer
:type lb_id: ``str``
:return: Instance of load balancer
:rtype: ``dict``
"""
response = self.connection.request(
action='load_balancers/%s' % lb_id,
method='GET'
)
return response.object
def ex_list_load_balancer_server_ips(self, lb_id):
"""
List balanced server IP addresses
:param lb_id: ID of the load balancer
:type lb_id: ``str``
:return: Array of IP address IDs
:rtype: ``dict``
"""
response = self.connection.request(
action='load_balancers/%s/server_ips' % lb_id,
method='GET'
)
return response.object
def ex_get_load_balancer_server_ip(self, lb_id, server_ip):
"""
Gets load balanced server id
:param lb_id: ID of the load balancer
:type lb_id: ``str``
:param server_ip: ID of the server IP
:type server_ip: ``str``
:return: Server IP
:rtype: ``dict``
"""
response = self.connection.request(
action='load_balancers/%s/server_ips/%s' % (lb_id, server_ip),
method='GET'
)
return response.object
def ex_list_load_balancer_rules(self, lb_id):
"""
Lists loadbalancer rules
:param lb_id: ID of the load balancer
:type lb_id: ``str``
:return: Lists of rules
:rtype: ``list`` of ``dict``
"""
response = self.connection.request(
action='load_balancers/%s/rules' % lb_id,
method='GET'
)
return response.object
def ex_get_load_balancer_rule(self, lb_id, rule_id):
"""
Get a load balancer rule
:param lb_id: ID of the load balancer
:type lb_id: ``str``
:param rule_id: Rule ID
:type rule_id: ``str``
:return: A load balancer rule
:rtype: ``dict``
"""
response = self.connection.request(
action='load_balancers/%s/rules/%s' % (lb_id, rule_id),
method='GET'
)
return response.object
def ex_delete_load_balancer(self, lb_id):
"""
Deletes a load balancer rule
:param lb_id: ID of the load balancer
:type lb_id: ``str``
:param rule_id: Rule ID
:type rule_id: ``str``
:return: Instance of load balancer
:rtype: ``dict``
"""
response = self.connection.request(
action='load_balancers/%s' % lb_id,
method='DELETE'
)
return response.object
"""
Public IP operations
"""
def ex_list_public_ips(self):
"""
Lists all public IP addresses
:return: Array of public addresses
:rtype: ``list`` of ``dict``
"""
response = self.connection.request(
action='public_ips',
method='GET'
)
return response.object
def ex_create_public_ip(self, type, reverse_dns=None, datacenter_id=None):
"""
Creates a public IP
:param type: Type of IP (IPV4 or IPV6)
:type type: ``str``
:param reverse_dns: Reverse DNS
:type reverse_dns: ``str``
:param datacenter_id: Datacenter ID where IP address will be crated
:type datacenter_id: ``str``
:return: Instance of Public IP
:rtype: ``dict``
"""
body = {
'type': type
}
if reverse_dns is not None:
body['reverse_dns'] = reverse_dns
if datacenter_id is not None:
body['datacenter_id'] = datacenter_id
response = self.connection.request(
action='public_ips',
data=body,
method='POST'
)
return response.object
def ex_get_public_ip(self, ip_id):
"""
Gets a Public IP
:param ip_id: ID of the IP
:type ip_id: ``str``
:return: Instance of Public IP
:rtype: ``dict``
"""
response = self.connection.request(
action='public_ips/%s' % ip_id,
method='GET'
)
return response.object
def ex_delete_public_ip(self, ip_id):
"""
Deletes a public IP
:param ip_id: ID of public IP
:type ip_id: ``str``
:return: Instance of IP Address
:rtype: ``dict``
"""
response = self.connection.request(
action='public_ips/%s' % ip_id,
method='DELETE'
)
return response
def ex_update_public_ip(self, ip_id, reverse_dns):
"""
Updates a Public IP
:param ip_id: ID of public IP
:type ip_id: ``str``
:param reverse_dns: Reverse DNS
:type reverse_dns: ``str``
:return: Instance of Public IP
:rtype: ``dict``
"""
body = {
'reverse_dns': reverse_dns
}
response = self.connection.request(
action='public_ips/%s' % ip_id,
data=body,
method='DELETE'
)
return response.object
"""
Private Network Operations
"""
def ex_list_private_networks(self):
"""
Lists all private networks
:return: List of private networks
:rtype: ``dict``
"""
response = self.connection.request(
action='private_networks',
method='GET'
)
return response.object
def ex_create_private_network(self, name, description=None,
datacenter_id=None,
network_address=None,
subnet_mask=None):
"""
Creates a private network
:param name: Name of the private network
:type name: ``str``
:param description: Description of the private network
:type description: ``str``
:param datacenter_id: ID of the data center for the private network
:type datacenter_id: ``str``
:param network_address: Network address of the private network
:type network_address: ``str``
:param subnet_mask: Subnet mask of the private network
:type subnet_mask: ``str``
:return: Newly created private network
:rtype: ``dict``
"""
body = {
'name': name
}
if description is not None:
body['description'] = description
if datacenter_id is not None:
body['datacenter_id'] = datacenter_id
if network_address is not None:
body['network_address'] = network_address
if subnet_mask is not None:
body['subnet_maks'] = subnet_mask
response = self.connection.request(
action='private_networks',
data=body,
method='POST'
)
return response.object
def ex_delete_private_network(self, network_id):
"""
Deletes a private network
:param network_id: Id of the private network
:type network_id: ``str``
:return: Instance of the private network being deleted
:rtype: ``dict``
"""
response = self.connection.request(
action='private_networks' % network_id,
method='DELETE'
)
return response.object
def ex_update_private_network(self, network_id,
name=None, description=None,
datacenter_id=None,
network_address=None,
subnet_mask=None):
"""
Updates a private network
:param name: Name of the private network
:type name: ``str``
:param description: Description of the private network
:type description: ``str``
:param datacenter_id: ID of the data center for the private network
:type datacenter_id: ``str``
:param network_address: Network address of the private network
:type network_address: ``str``
:param subnet_mask: Subnet mask of the private network
:type subnet_mask: ``str``
:return: Instance of private network
:rtype: ``dict``
"""
body = {}
if name is not None:
body['name'] = name
if description is not None:
body['description'] = description
if datacenter_id is not None:
body['datacenter_id'] = datacenter_id
if network_address is not None:
body['network_address'] = network_address
if subnet_mask is not None:
body['subnet_maks'] = subnet_mask
response = self.connection.request(
action='private_networks/%s',
data=body,
method='PUT'
)
return response.object
def ex_list_private_network_servers(self, network_id):
"""
Lists all private network servers
:param network_id: Private network ID
:type network_id: ``str``
:return: List of private network servers
:rtype: ``dict``
"""
response = self.connection.request(
action='/private_networks/%s/servers' % network_id,
method='GET'
)
return response.object
def ex_add_private_network_server(self, network_id, server_ids):
"""
Add servers to private network
:param network_id: Private Network ID
:type network_id: ``str``
:param server_ids: List of server IDs
:type server_ids: ``list`` of ``str``
:return: List of attached servers
:rtype: ``dict``
"""
body = {
'servers': server_ids
}
response = self.connection.request(
action='/private_networks/%s/servers' % network_id,
data=body,
method='POST'
)
return response.object
def ex_remove_server_from_private_network(self, network_id, server_id):
"""
Removes a server from the private network
:param network_id: Private Network ID
:type network_id: ``str``
:param server_id: Id of the server
:type server_id: ``str``
:return: Instance of the private network
:rtype: ``dict``
"""
response = self.connection.request(
action='/private_networks/%s/servers/%s' % (network_id, server_id),
method='POST'
)
return response.object
"""
Monitoring policy operations
"""
def ex_list_monitoring_policies(self):
"""
Lists all monitoring policies
:return: List of monitoring policies
:rtype: ``dict``
"""
response = self.connection.request(
action='monitoring_policies',
method='GET'
)
return response.object
def ex_create_monitoring_policy(self, name, thresholds,
ports,
processes,
description=None,
email=None,
agent=None,
):
"""
Creates a monitoring policy
:param name: Name for the monitoring policy
:type name: ``str``
:param thresholds: Thresholds for the monitoring policy
:type thresholds: ``dict``
:param ports: Monitoring policies for ports
:type ports: ``list`` of ``dict``
:param processes: Processes to be monitored
:type processes: ``list`` of ``dict``
:param description: Description for the monitoring policy
:type description: ``str``
:param email: Email for notifications
:type email: ``str``
:param agent: Indicates if agent application will be installed
:type agent: ``boolean``
:return: Newly created instance of monitofing policy
:rtype: ``dict``
"""
body = {
'name': name,
'thresholds': thresholds,
'ports': ports,
'processes': processes
}
if description is not None:
body['description'] = description
if email is not None:
body['email'] = email
if agent is not None:
body['agent'] = agent
response = self.connection.request(
action='monitoring_policies',
data=body,
method='POST'
)
return response.object
def ex_delete_monitoring_policy(self, policy_id):
"""
Deletes a monitoring policy
:param policy_id: Id of the monitoring policy
:type policy_id: ``str``
:return: Instance of the monitoring policy being deleted
:rtype: ``dict``
"""
response = self.connection.request(
action='monitoring_policies' % policy_id,
method='DELETE'
)
return response.object
def ex_update_monitoring_policy(self, policy_id,
email,
thresholds,
name=None, description=None):
"""
Updates monitoring policy
:param policy_id: Id of the monitoring policy
:type policy_id: ``str``
:param email: Email to send notifications to
:type email: ``str``
:param thresholds: Thresholds for the monitoring policy
:type thresholds: ``dict``
:param name: Name of the monitoring policy
:type name: ``str``
:param description: Description of the monitoring policy
:type description: ``str``
:return: Instance of the monitoring policy being deleted
:rtype: ``dict``
"""
body = {}
if name is not None:
body['name'] = name
if description is not None:
body['description'] = description
if thresholds is not None:
body['thresholds'] = thresholds
if email is not None:
body['email'] = email
response = self.connection.request(
action='monitoring_policies/%s' % policy_id,
data=body,
method='PUT'
)
return response.object
def ex_get_monitoring_policy(self, policy_id):
"""
Fetches a monitoring policy
:param policy_id: Id of the monitoring policy
:type policy_id: ``str``
:return: Instance of a monitoring policy
:rtype: ``dict``
"""
response = self.connection.request(
action='monitoring_policies/%s' % policy_id,
method='GET'
)
return response.object
def ex_get_monitoring_policy_ports(self, policy_id):
"""
Fetches monitoring policy ports
:param policy_id: Id of the monitoring policy
:type policy_id:
:return: Instance of a monitoring policy
:rtype: ``dict``
"""
response = self.connection.request(
action='monitoring_policies/%s/ports' % policy_id,
method='GET'
)
return response.object
def ex_get_monitoring_policy_port(self, policy_id, port_id):
"""
Fetches monitoring policy port
:param policy_id: Id of the monitoring policy
:type policy_id: ``str``
:param port_id: Id of the port
:type port_id: ``str``
:return: Instance of a monitoring policy
:rtype: ``dict``
"""
response = self.connection.request(
action='monitoring_policies/%s/ports/%s' % (policy_id, port_id),
method='GET'
)
return response.object
def ex_remove_monitoring_policy_port(self, policy_id, port_id):
"""
Removes monitoring policy port
:param policy_id: Id of the monitoring policy
:type policy_id: ``str``
:param port_id: Id of the port
:type port_id: ``str``
:return: Instance of a monitoring policy
:rtype: ``dict``
"""
response = self.connection.request(
action='monitoring_policies/%s/ports/%s' % (policy_id, port_id),
method='DELETE'
)
return response.object
def ex_add_monitoring_policy_ports(self, policy_id, ports):
"""
Add monitoring policy ports
:param policy_id: Id of the monitoring policy
:type policy_id: ``str``
:param ports: List of ports
:type ports: ``dict``
[
{
'protocol':'TCP',
'port':'80',
'alert_if':'RESPONDING',
'email_notification':true
}
]
:return: Instance of a monitoring policy
:rtype: ``dict``
"""
body = {'ports': ports}
response = self.connection.request(
action='monitoring_policies/%s/ports' % policy_id,
data=body,
method='POST'
)
return response.object
def ex_get_monitoring_policy_processes(self, policy_id):
"""
Fetches monitoring policy processes
:param policy_id: Id of the monitoring policy
:type policy_id: ``str``
:return: Instance of a monitoring policy
:rtype: ``dict``
"""
response = self.connection.request(
action='monitoring_policies/%s/processes' % policy_id,
method='GET'
)
return response.object
def ex_get_monitoring_policy_process(self, policy_id, process_id):
"""
Fetches monitoring policy process
:param policy_id: Id of the monitoring policy
:type policy_id: ``str``
:param process_id: Id of the process
:type process_id: ``str``
:return: Instance of a monitoring policy
:rtype: ``dict``
"""
response = self.connection.request(
action='monitoring_policies/%s/processes/%s'
% (policy_id, process_id),
method='GET'
)
return response.object
def ex_remove_monitoring_policy_process(self, policy_id, process_id):
"""
Removes monitoring policy process
:param policy_id: Id of the monitoring policy
:type policy_id: ``str``
:param process_id: Id of the process
:type process_id: ``str``
:return: Instance of a monitoring policy
:rtype: ``dict``
"""
response = self.connection.request(
action='monitoring_policies/%s/processes/%s'
% (policy_id, process_id),
method='DELETE'
)
return response.object
def ex_add_monitoring_policy_processes(self, policy_id, processes):
"""
Add monitoring policy processes
:param policy_id: Id of the monitoring policy
:type policy_id: ``str``
:param processes: List of processes
:type processes: ``list`` of ``dict``
[
{
'process': 'taskmmgr',
'alert_if': 'RUNNING',
'email_notification': true
}
]
:return: Instance of a monitoring policy
:rtype: ``dict``
"""
body = {'processes': processes}
response = self.connection.request(
action='monitoring_policies/%s/processes' % policy_id,
data=body,
method='POST'
)
return response.object
def ex_list_monitoring_policy_servers(self, policy_id):
"""
List all servers that are being monitoried by the policy
:param policy_id: Id of the monitoring policy
:type policy_id: ``str``
:return: List of servers being monitored
:rtype: ``list`` of ``dict``
"""
response = self.connection.request(
action='monitoring_policies/%s/servers' % policy_id,
method='GET'
)
return response.object
def ex_add_servers_to_monitoring_policy(self, policy_id, servers):
"""
Adds servers to monitoring policy
:param policy_id: Id of the monitoring policy
:type policy_id: ``str``
:param servers: List of server ID
:type servers: ``list`` of ``str``
:return: Instance of a monitoring policy
:rtype: ``dict``
"""
body = {
'servers': servers
}
response = self.connection.request(
action='monitoring_policies/%s/servers' % policy_id,
data=body,
method='POST'
)
return response.object
def ex_remove_server_from_monitoring_policy(self, policy_id, server_id):
"""
Removes a server from monitoring policy
:param policy_id: Id of the monitoring policy
:type policy_id: ``str``
:param server_id: Id of the server
:type server_id: ``str``
:return: Instance of a monitoring policy
:rtype: ``dict``
"""
response = self.connection.request(
action='monitoring_policies/%s/servers/%s'
% (policy_id, server_id),
method='DELETE'
)
return response.object
"""
Private Functions
"""
def _to_images(self, object, image_type=None):
if image_type is not None:
images = [image for image in object if image['type'] == image_type]
else:
images = [image for image in object]
return [self._to_image(image) for image in images]
def _to_image(self, data):
extra = {
'os_family': data['os_family'],
'os': data['os'],
'os_version': data['os_version'],
'os_architecture': data['os_architecture'],
'os_image_type': data['os_image_type'],
'min_hdd_size': data['min_hdd_size'],
'available_datacenters': data['available_datacenters'],
'licenses': data['licenses'],
'version': data['version'],
'categories': data['categories']
}
return NodeImage(id=data['id'], name=data['name'], driver=self,
extra=extra)
def _to_node_size(self, data):
return NodeSize(
id=data['id'],
name=data['name'],
ram=data['hardware']['ram'],
disk=data['hardware']['hdds'][0]['size'],
bandwidth=None,
price=None,
driver=self.connection.driver,
extra={
'vcores': data['hardware']['vcore'],
'cores_per_processor': data['hardware']['cores_per_processor']}
)
def _to_location(self, location):
return NodeLocation(
id=location['id'],
name=location['country_code'],
country=location['location'],
driver=self.connection.driver
)
def _to_nodes(self, servers):
return [self._to_node(
server) for server in servers]
def _to_node(self, server):
extra = {}
extra['datacenter'] = server['datacenter']
if 'description' in server:
extra['description'] = server['description']
if 'status' in server:
extra['status'] = server['status']
if 'image' in server:
extra['image'] = server['image']
if 'hardware' in server:
extra['hardware'] = server['hardware']
if 'dvd' in server:
extra['dvd'] = server['dvd']
if 'snapshot' in server:
extra['snapshot'] = server['snapshot']
if 'ips' in server:
extra['ips'] = server['ips']
if 'alerts' in server:
extra['alerts'] = server['alerts']
if 'monitoring_policy' in server:
extra['monitoring_policy'] = server['monitoring_policy']
if 'private_networks' in server:
extra['private_networks'] = server['private_networks']
ips = []
if server['ips'] is not None:
for ip in server['ips']:
ips.append(ip['ip'])
state = self.NODE_STATE_MAP.get(
server['status']['state'])
return Node(
id=server['id'],
state=state,
name=server['name'],
driver=self.connection.driver,
public_ips=ips,
private_ips=None,
extra=extra
)
def _wait_for_state(self, server_id, state, retries=50):
for i in (0, retries):
server = self.ex_get_server(server_id)
if server.extra['status']['state'] == state:
return
sleep(5)
if i == retries:
raise Exception('Retries count reached')
def _list_fixed_instances(self):
response = self.connection.request(
action='/servers/fixed_instance_sizes',
method='GET'
)
return response.object
|
gautam1858/tensorflow
|
refs/heads/master
|
tensorflow/python/kernel_tests/string_length_op_test.py
|
22
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for string_length_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import test_util
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
class StringLengthOpTest(test.TestCase):
def testStringLength(self):
strings = [[["1", "12"], ["123", "1234"], ["12345", "123456"]]]
with self.cached_session() as sess:
lengths = string_ops.string_length(strings)
values = self.evaluate(lengths)
self.assertAllEqual(values, [[[1, 2], [3, 4], [5, 6]]])
@test_util.run_deprecated_v1
def testUnit(self):
unicode_strings = [u"H\xc3llo", u"\U0001f604"]
utf8_strings = [s.encode("utf-8") for s in unicode_strings]
expected_utf8_byte_lengths = [6, 4]
expected_utf8_char_lengths = [5, 1]
with self.session() as sess:
utf8_byte_lengths = string_ops.string_length(utf8_strings, unit="BYTE")
utf8_char_lengths = string_ops.string_length(
utf8_strings, unit="UTF8_CHAR")
self.assertAllEqual(
self.evaluate(utf8_byte_lengths), expected_utf8_byte_lengths)
self.assertAllEqual(
self.evaluate(utf8_char_lengths), expected_utf8_char_lengths)
with self.assertRaisesRegexp(
ValueError, "Attr 'unit' of 'StringLength' Op passed string 'XYZ' "
'not in: "BYTE", "UTF8_CHAR"'):
string_ops.string_length(utf8_strings, unit="XYZ")
@test_util.run_deprecated_v1
def testLegacyPositionalName(self):
# Code that predates the 'unit' parameter may have used a positional
# argument for the 'name' parameter. Check that we don't break such code.
strings = [[["1", "12"], ["123", "1234"], ["12345", "123456"]]]
lengths = string_ops.string_length(strings, "some_name")
with self.session():
self.assertAllEqual(lengths.eval(), [[[1, 2], [3, 4], [5, 6]]])
if __name__ == "__main__":
test.main()
|
wger-project/wger
|
refs/heads/master
|
wger/gallery/api/serializers.py
|
1
|
# Third Party
from rest_framework import serializers
# wger
from wger.gallery.models import Image
class ImageSerializer(serializers.ModelSerializer):
"""
Exercise serializer
"""
class Meta:
model = Image
fields = [
'id',
'date',
'image',
'description',
]
|
swpowell/raintype_python
|
refs/heads/master
|
uw_raintype/__init__.py
|
1
|
# =============================
"""
UW - Rain Type - Python Package that contains functions needed for UW rain
type classification based on Powell et al. (2016, JTECH)
==================================
Top-level package (:mod:`uw_raintype`)
==================================
.. currentmodule:: uw_raintype
"""
|
wilkeraziz/pcfg-sampling
|
refs/heads/master
|
generalisedSampling.py
|
1
|
"""
:Authors: - Iason
"""
from symbol import is_nonterminal
import random
import numpy as np
class GeneralisedSampling(object):
def __init__(self, forest, inside_node, omega=lambda edge: edge.log_prob):
"""
:param forest: an acyclic hypergraph
:param inside_node: a dictionary mapping nodes to their inside weights.
:param omega: a function that returns the weight of an edge.
By default we return the edge's log probability, but omega
can be used in situations where we must compute a function of that weight, for example,
when we want to convert from a semiring to another,
or when we want to compute a uniform probability based on assingments of the slice variables.
"""
self.forest = forest
self.inside_node = inside_node
self.inside_edge = dict() # cache for the inside weight of edges
self.omega = omega
def sample(self, goal='[GOAL]'):
"""
the generalised sample algorithm
"""
# an empty partial derivation
d = []
# Q, a queue of nodes to be visited, starting from [GOAL]
Q = [goal]
while Q:
parent = Q.pop()
# select an edge
edge = self.select(parent)
# add the edge to the partial derivation
d.append(edge)
# queue the non-terminal nodes in the tail of the selected edge
for child in edge.rhs:
if is_nonterminal(child):
Q.append(child)
return d
def get_edge_inside(self, edge):
"""Compute the inside weight of an edge (caching the result)."""
w = self.inside_edge.get(edge, None)
if w is None:
# starting from the edge's own weight
# and including the inside of each child node
# accumulate (log-domain) all contributions
w = sum((self.inside_node[child] for child in edge.rhs), self.omega(edge))
self.inside_edge[edge] = w
return w
def select(self, parent):
"""
select method, draws a random edge with respect to the Inside weight distribution
"""
# self.iq = dict()
incoming = self.forest.get(parent, frozenset())
if not incoming:
raise ValueError('I cannot sample an incoming edge to a terminal node')
# the inside weight of the parent node
ip = self.inside_node[parent]
# select an edge randomly with respect to the distribution of the edges
# threshold for selecting an edge
threshold = np.log(random.uniform(0, np.exp(ip)))
acc = -float("inf")
for e in incoming:
# acc = math.log(math.exp(acc) + math.exp(self.get_edge_inside(e)))
acc = np.logaddexp(acc, self.get_edge_inside(e))
if acc > threshold:
return e
# if there is not yet an edge returned for some rare rounding error,
# return the last edge, hence that is the edge closest to the threshold
return e
|
JSchwerberg/review
|
refs/heads/master
|
vagrant_resources/cookbooks/python/files/default/get-pip.py
|
2
| null |
40223202/2015cdb_g2
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/_functools.py
|
727
|
def partial(func, *args, **keywords):
def newfunc(*fargs, **fkeywords):
newkeywords = keywords.copy()
newkeywords.update(fkeywords)
return func(*(args + fargs), **newkeywords)
newfunc.func = func
newfunc.args = args
newfunc.keywords = keywords
return newfunc
def reduce(func,iterable,initializer=None):
args = iter(iterable)
if initializer is not None:
res = initializer
else:
res = next(args)
while True:
try:
res = func(res,next(args))
except StopIteration:
return res
|
leowucom/captain
|
refs/heads/master
|
src/learn_english/model/utility.py
|
2
|
# -*- coding:utf-8 -*-
"""
utility function
"""
import platform
import os
import re
import json
import sys
import urllib2
import datetime
import bs4
import requests
from nltk.stem import WordNetLemmatizer
import constants
from datetime import date
import calendar
reload(sys)
sys.setdefaultencoding('utf-8')
REVEAL_ORIGINAL_FORM = WordNetLemmatizer()
def show_notification(title, msg):
if platform.system() == 'Darwin':
strippend_msg = msg.strip()
if strippend_msg == "":
return
command = "osascript -e \'tell app \"System Events\" to display notification \"" + \
strippend_msg.encode('utf-8') + "\" with title \"" + \
title.encode('utf-8') + "\"\'"
os.system(command)
return
def load_json_file(file_name):
try:
f = open(file_name, 'r')
res = json.load(f)
f.close()
return res
except:
return dict()
def write_json_file(file_name, data):
f = open(file_name, 'w')
f.write(json.dumps(data, indent=2))
f.close()
def get_content_of_url(url):
try:
return urllib2.urlopen(url).read()
except:
return ''
def get_raw_content(url, mark):
s = requests.session()
s.keep_alive = False
try:
res = s.get(url)
except:
return
soup = bs4.BeautifulSoup(res.content, 'lxml')
return str(soup.find('div', attrs={'class': mark}))
def extract_info_from_raw(raw_content, mark):
"""
:param raw_content:
:param mark: extract content against mark
:return:
"""
try:
point_one_index = raw_content.index(mark)
except:
return ''
left_bracket_index = raw_content[point_one_index:].index(
'>') + point_one_index
right_bracket_index = raw_content[point_one_index:].index(
'<') + point_one_index
res = raw_content[left_bracket_index + 1:right_bracket_index]
return res
def get_day_of_week():
my_date = date.today()
return calendar.day_name[my_date.weekday()]
def get_word_original_form(word):
word = word.strip().lower()
ori_form = REVEAL_ORIGINAL_FORM.lemmatize(word, pos='n')
if word != ori_form:
return ori_form
else:
ori_form = REVEAL_ORIGINAL_FORM.lemmatize(word, pos='a')
if word != ori_form:
return ori_form
else:
ori_form = REVEAL_ORIGINAL_FORM.lemmatize(word, pos='v')
if word != ori_form:
return ori_form
return word
def get_concatinated_usages(dst_usage, new_usage):
new_usage = new_usage.replace(constants.USAGE_PREFIX, '')
new_usage_lst = new_usage.split('\n')
ok = False
for usage in new_usage_lst:
if dst_usage.find(usage) < 0:
if not usage.endswith('\n') and len(usage) > 0:
usage += '\n'
if not dst_usage.endswith('\n') and len(dst_usage) > 0:
dst_usage += '\n'
dst_usage += get_refined_usages(usage)
ok = True
return dst_usage, ok
def get_refined_usages(raw_usages):
lst = re.compile('[0-9]+\)').split(raw_usages)
if len(lst) == 1:
return constants.USAGE_PREFIX + lst[0]
return constants.USAGE_PREFIX + ('\n' + constants.USAGE_PREFIX).join(lst[1:])
def get_current_minute():
return int(datetime.datetime.now().strftime("%M"))
def get_current_seconds():
return int(datetime.datetime.now().strftime("%S"))
def log2file(content):
append_log('---------------------')
append_log(str(content))
def append_log(content):
with open('log.txt', 'a') as f:
f.write(content + '\n')
# print(get_word_original_form('apples'))
|
skylines-project/skylines
|
refs/heads/master
|
migrations/versions/2dade673f10e_add_qnh_column_to_flight.py
|
1
|
# revision identifiers, used by Alembic.
revision = "2dade673f10e"
down_revision = "1d8eda758ba6"
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column("flights", sa.Column("qnh", sa.Float(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column("flights", "qnh")
### end Alembic commands ###
|
ninapavlich/caledon
|
refs/heads/master
|
celadon/apps/media/migrations/0001_initial.py
|
2
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import imagekit_cropper.fields
import celadon.s3utils
import carbon.atoms.models.media
import django.db.models.deletion
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('core', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Image',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('version', models.IntegerField(default=0)),
('created_date', models.DateTimeField(auto_now_add=True, verbose_name='Created Date', null=True)),
('modified_date', models.DateTimeField(auto_now=True, verbose_name='Modified Date', null=True)),
('admin_note', models.TextField(null=True, verbose_name='admin note', blank=True)),
('title', models.CharField(help_text=b'The display title for this object.', max_length=255, null=True, verbose_name='Title', blank=True)),
('slug', models.CharField(help_text=b'Auto-generated page slug for this object.', max_length=255, verbose_name='Slug', db_index=True, blank=True)),
('uuid', models.CharField(help_text=b'UUID generated for object; can be used for short URLs', max_length=255, verbose_name='UUID', blank=True)),
('order', models.IntegerField(default=0, help_text=b'')),
('path', models.CharField(help_text=b'Actual path used based on generated and override path', max_length=255, null=True, verbose_name='path', blank=True)),
('path_generated', models.CharField(help_text=b'The URL path to this page, based on page hierarchy and slug.', max_length=255, null=True, verbose_name='generated path', blank=True)),
('path_override', models.CharField(help_text=b'The URL path to this page, defined absolutely.', max_length=255, null=True, verbose_name='path override', blank=True)),
('hierarchy', models.CharField(null=True, max_length=255, blank=True, help_text=b'Administrative Hierarchy', unique=True, verbose_name='hierarchy')),
('temporary_redirect', models.CharField(help_text=b'Temporarily redirect to a different path', max_length=255, verbose_name='Temporary Redirect', blank=True)),
('permanent_redirect', models.CharField(help_text=b'Permanently redirect to a different path', max_length=255, verbose_name='Permanent Redirect', blank=True)),
('credit', models.CharField(help_text=b'Credit', max_length=255, verbose_name='Credit', blank=True)),
('caption', models.TextField(help_text=b'Caption', verbose_name='Caption', blank=True)),
('clean_filename_on_upload', models.BooleanField(default=True, help_text=b'This removes spaces, special characters, and capitalization from the file name for more consistent naming.', verbose_name='Clean filename on upload')),
('allow_overwrite', models.BooleanField(default=True, help_text=b"Allow file to write over an existing file if the name is the same. If not, we'll automatically add a numerical suffix to ensure file doesn't override existing files.", verbose_name='Allow Overwrite')),
('size', models.BigIntegerField(help_text=b'File size in bytes', null=True, blank=True)),
('display_size', models.CharField(max_length=255, null=True, verbose_name='Display Size', blank=True)),
('alt', models.CharField(help_text=b'Alt text (very important for SEO)', max_length=255, verbose_name='Alt Text', blank=True)),
('use_png', models.BooleanField(default=False, help_text=b'Render image as png instead of jpg when possible', verbose_name=b'Use .PNG (instead of .JPG)')),
('image_width', models.IntegerField(null=True, blank=True)),
('image_height', models.IntegerField(null=True, blank=True)),
('image', models.ImageField(help_text=b'To ensure a precise color replication in image variants, make sure an sRGB color profile has been assigned to each image.', storage=celadon.s3utils._MediaS3BotoStorage(bucket=b'celadon', custom_domain=b'celadon.s3.amazonaws.com', location=b'media'), null=True, upload_to=carbon.atoms.models.media.image_file_name, blank=True)),
('square_crop', imagekit_cropper.fields.ImageCropField(null=True, properties={b'min_height': 600, b'source': b'image', b'format_field': b'get_format', b'crop_field': b'square_crop', b'aspect_ratio': 1, b'upscale': True, b'min_width': 600, b'resize_method': b'fill'}, blank=True)),
('width_1000_fill_crop', imagekit_cropper.fields.ImageCropField(null=True, properties={b'format_field': b'get_format', b'width': 1000, b'resize_method': b'fit', b'height': None, b'source': b'image', b'upscale': True}, blank=True)),
('width_1200_wide_crop', imagekit_cropper.fields.ImageCropField(null=True, properties={b'format_field': b'get_format', b'width': 1200, b'resize_method': b'fill', b'height': 600, b'source': b'image', b'upscale': True, b'crop_field': b'width_1200_wide_crop'}, blank=True)),
('width_1200_fill_crop', imagekit_cropper.fields.ImageCropField(null=True, properties={b'format_field': b'get_format', b'width': 1200, b'resize_method': b'fit', b'height': None, b'source': b'image', b'upscale': True}, blank=True)),
('created_by', models.ForeignKey(related_name='media_image_created_by', on_delete=django.db.models.deletion.SET_NULL, blank=True, to=settings.AUTH_USER_MODEL, null=True)),
('modified_by', models.ForeignKey(related_name='media_image_modified_by', on_delete=django.db.models.deletion.SET_NULL, blank=True, to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Media',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('version', models.IntegerField(default=0)),
('created_date', models.DateTimeField(auto_now_add=True, verbose_name='Created Date', null=True)),
('modified_date', models.DateTimeField(auto_now=True, verbose_name='Modified Date', null=True)),
('admin_note', models.TextField(null=True, verbose_name='admin note', blank=True)),
('title', models.CharField(help_text=b'The display title for this object.', max_length=255, null=True, verbose_name='Title', blank=True)),
('slug', models.CharField(help_text=b'Auto-generated page slug for this object.', max_length=255, verbose_name='Slug', db_index=True, blank=True)),
('uuid', models.CharField(help_text=b'UUID generated for object; can be used for short URLs', max_length=255, verbose_name='UUID', blank=True)),
('order', models.IntegerField(default=0, help_text=b'')),
('path', models.CharField(help_text=b'Actual path used based on generated and override path', max_length=255, null=True, verbose_name='path', blank=True)),
('path_generated', models.CharField(help_text=b'The URL path to this page, based on page hierarchy and slug.', max_length=255, null=True, verbose_name='generated path', blank=True)),
('path_override', models.CharField(help_text=b'The URL path to this page, defined absolutely.', max_length=255, null=True, verbose_name='path override', blank=True)),
('hierarchy', models.CharField(null=True, max_length=255, blank=True, help_text=b'Administrative Hierarchy', unique=True, verbose_name='hierarchy')),
('temporary_redirect', models.CharField(help_text=b'Temporarily redirect to a different path', max_length=255, verbose_name='Temporary Redirect', blank=True)),
('permanent_redirect', models.CharField(help_text=b'Permanently redirect to a different path', max_length=255, verbose_name='Permanent Redirect', blank=True)),
('credit', models.CharField(help_text=b'Credit', max_length=255, verbose_name='Credit', blank=True)),
('caption', models.TextField(help_text=b'Caption', verbose_name='Caption', blank=True)),
('clean_filename_on_upload', models.BooleanField(default=True, help_text=b'This removes spaces, special characters, and capitalization from the file name for more consistent naming.', verbose_name='Clean filename on upload')),
('allow_overwrite', models.BooleanField(default=True, help_text=b"Allow file to write over an existing file if the name is the same. If not, we'll automatically add a numerical suffix to ensure file doesn't override existing files.", verbose_name='Allow Overwrite')),
('size', models.BigIntegerField(help_text=b'File size in bytes', null=True, blank=True)),
('display_size', models.CharField(max_length=255, null=True, verbose_name='Display Size', blank=True)),
('alt', models.CharField(help_text=b'Alt text (very important for SEO)', max_length=255, verbose_name='Alt Text', blank=True)),
('use_png', models.BooleanField(default=False, help_text=b'Render image as png instead of jpg when possible', verbose_name=b'Use .PNG (instead of .JPG)')),
('image_width', models.IntegerField(null=True, blank=True)),
('image_height', models.IntegerField(null=True, blank=True)),
('image', models.ImageField(help_text=b'To ensure a precise color replication in image variants, make sure an sRGB color profile has been assigned to each image.', storage=celadon.s3utils._MediaS3BotoStorage(bucket=b'celadon', custom_domain=b'celadon.s3.amazonaws.com', location=b'media'), null=True, upload_to=carbon.atoms.models.media.image_file_name, blank=True)),
('file', models.FileField(storage=celadon.s3utils._MediaS3BotoStorage(bucket=b'celadon', custom_domain=b'celadon.s3.amazonaws.com', location=b'media'), null=True, upload_to=carbon.atoms.models.media.media_file_name, blank=True)),
('created_by', models.ForeignKey(related_name='media_media_created_by', on_delete=django.db.models.deletion.SET_NULL, blank=True, to=settings.AUTH_USER_MODEL, null=True)),
('modified_by', models.ForeignKey(related_name='media_media_modified_by', on_delete=django.db.models.deletion.SET_NULL, blank=True, to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'abstract': False,
'verbose_name_plural': 'media',
},
),
migrations.CreateModel(
name='MediaTag',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('version', models.IntegerField(default=0)),
('created_date', models.DateTimeField(auto_now_add=True, verbose_name='Created Date', null=True)),
('modified_date', models.DateTimeField(auto_now=True, verbose_name='Modified Date', null=True)),
('admin_note', models.TextField(null=True, verbose_name='admin note', blank=True)),
('title', models.CharField(help_text=b'The display title for this object.', max_length=255, null=True, verbose_name='Title', blank=True)),
('slug', models.CharField(help_text=b'Auto-generated page slug for this object.', max_length=255, verbose_name='Slug', db_index=True, blank=True)),
('uuid', models.CharField(help_text=b'UUID generated for object; can be used for short URLs', max_length=255, verbose_name='UUID', blank=True)),
('order', models.IntegerField(default=0, help_text=b'')),
('path', models.CharField(help_text=b'Actual path used based on generated and override path', max_length=255, null=True, verbose_name='path', blank=True)),
('path_generated', models.CharField(help_text=b'The URL path to this page, based on page hierarchy and slug.', max_length=255, null=True, verbose_name='generated path', blank=True)),
('path_override', models.CharField(help_text=b'The URL path to this page, defined absolutely.', max_length=255, null=True, verbose_name='path override', blank=True)),
('hierarchy', models.CharField(null=True, max_length=255, blank=True, help_text=b'Administrative Hierarchy', unique=True, verbose_name='hierarchy')),
('temporary_redirect', models.CharField(help_text=b'Temporarily redirect to a different path', max_length=255, verbose_name='Temporary Redirect', blank=True)),
('permanent_redirect', models.CharField(help_text=b'Permanently redirect to a different path', max_length=255, verbose_name='Permanent Redirect', blank=True)),
('publication_date', models.DateTimeField(null=True, verbose_name='Publication Date', blank=True)),
('publication_status', models.IntegerField(default=10, help_text=b'Current publication status', choices=[(10, 'Draft'), (20, 'Needs Review'), (100, 'Published'), (40, 'Unpublished')])),
('publish_on_date', models.DateTimeField(help_text=b"Object state will be set to 'Published' on this date.", null=True, verbose_name='Publish on Date', blank=True)),
('expire_on_date', models.DateTimeField(help_text=b"Object state will be set to 'Expired' on this date.", null=True, verbose_name='Expire on Date', blank=True)),
('page_meta_description', models.CharField(help_text=b'A short description of the page, used for SEO and not displayed to the user; aim for 150-160 characters.', max_length=2000, verbose_name='Meta Description', blank=True)),
('page_meta_keywords', models.CharField(help_text=b'A short list of keywords of the page, used for SEO and not displayed to the user; aim for 150-160 characters.', max_length=2000, verbose_name='Meta Page Keywords', blank=True)),
('is_searchable', models.BooleanField(default=True, help_text=b'Allow search engines to index this object and display in sitemap.')),
('in_sitemap', models.BooleanField(default=True, help_text=b'Is in sitemap')),
('noindex', models.BooleanField(default=False, help_text=b'Robots noindex')),
('nofollow', models.BooleanField(default=False, help_text=b'Robots nofollow')),
('sitemap_changefreq', models.CharField(default=b'monthly', help_text=b'How frequently does page content update', max_length=255, verbose_name='Sitemap Change Frequency', choices=[(b'never', 'Never'), (b'yearly', 'Yearly'), (b'monthly', 'Monthly'), (b'weekly', 'Weekly'), (b'daily', 'Daily'), (b'hourly', 'Hourly'), (b'always', 'Always')])),
('sitemap_priority', models.CharField(default=b'0.5', max_length=255, blank=True, help_text=b'Sitemap priority', null=True, verbose_name=b'Sitemap Priority')),
('shareable', models.BooleanField(default=False, help_text=b'Show sharing widget')),
('tiny_url', models.CharField(help_text=b'Tiny URL used for social sharing', max_length=255, null=True, verbose_name='tiny url', blank=True)),
('social_share_type', models.CharField(default=b'article', choices=[(b'article', b'Article'), (b'book', b'Book'), (b'profile', b'Profile'), (b'website', b'Website'), (b'video.movie', b'Video - Movie'), (b'video.episode', b'Video - Episode'), (b'video.tv_show', b'Video - TV Show'), (b'video.other', b'Video - Other'), (b'music.song', b'Music - Song'), (b'music.album', b'Music - Album'), (b'music.radio_station', b'Music - Playlist'), (b'music.radio_station', b'Music - Radio Station')], max_length=255, blank=True, null=True, verbose_name=b'Social type')),
('facebook_author_id', models.CharField(help_text=b'Numeric Facebook ID', max_length=255, null=True, verbose_name=b'Facebook Author ID', blank=True)),
('twitter_author_id', models.CharField(help_text=b'Twitter handle, including "@" e.g. @cgpartners', max_length=255, null=True, verbose_name=b'Twitter Admin ID', blank=True)),
('google_author_id', models.CharField(help_text=b'Google author id, e.g. the AUTHOR_ID in https://plus.google.com/AUTHOR_ID/posts', max_length=255, null=True, verbose_name=b'Google Admin ID', blank=True)),
('content', models.TextField(help_text=b'', null=True, verbose_name='content', blank=True)),
('synopsis', models.TextField(help_text=b'', null=True, verbose_name='synopsis', blank=True)),
('created_by', models.ForeignKey(related_name='media_mediatag_created_by', on_delete=django.db.models.deletion.SET_NULL, blank=True, to=settings.AUTH_USER_MODEL, null=True)),
('image', models.ForeignKey(related_name='media_mediatag_images', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='media.Image', help_text=b'Featured image', null=True)),
('modified_by', models.ForeignKey(related_name='media_mediatag_modified_by', on_delete=django.db.models.deletion.SET_NULL, blank=True, to=settings.AUTH_USER_MODEL, null=True)),
('published_by', models.ForeignKey(related_name='media_mediatag_published_by', on_delete=django.db.models.deletion.SET_NULL, blank=True, to=settings.AUTH_USER_MODEL, null=True)),
('social_share_image', models.ForeignKey(related_name='media_mediatag_social_images', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='media.Image', help_text=b'Standards for the social share image vary, but an image at least 300x200px should work well.', null=True)),
('template', models.ForeignKey(blank=True, to='core.Template', help_text=b'Template for view', null=True)),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='media',
name='tags',
field=models.ManyToManyField(related_name='media_media_tags', to='media.MediaTag', blank=True),
),
migrations.AddField(
model_name='media',
name='template',
field=models.ForeignKey(blank=True, to='core.Template', help_text=b'Template for view', null=True),
),
migrations.AddField(
model_name='image',
name='tags',
field=models.ManyToManyField(related_name='media_image_tags', to='media.MediaTag', blank=True),
),
migrations.AddField(
model_name='image',
name='template',
field=models.ForeignKey(blank=True, to='core.Template', help_text=b'Template for view', null=True),
),
]
|
frishberg/django
|
refs/heads/master
|
tests/migrations/test_add_many_to_many_field_initial/0002_initial.py
|
65
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
("migrations", "0001_initial"),
]
operations = [
migrations.AddField(
model_name='task',
name='projects',
field=models.ManyToManyField(to='Project'),
),
]
|
ssorgatem/pulsar
|
refs/heads/master
|
pulsar/web/__init__.py
|
4
|
""" The code explicitly related to the Pulsar web server can be found in this
module and its submodules.
"""
|
edivancamargo/three.js
|
refs/heads/master
|
utils/exporters/blender/addons/io_three/exporter/api/__init__.py
|
174
|
import os
import bpy
from . import object as object_, mesh, material, camera, light
from .. import logger
def active_object():
"""
:return: The actively selected object
"""
return bpy.context.scene.objects.active
def batch_mode():
"""
:return: Whether or not the session is interactive
:rtype: bool
"""
return bpy.context.area is None
def data(node):
"""
:param node: name of an object node
:returns: the data block of the node
"""
try:
return bpy.data.objects[node].data
except KeyError:
pass
def init():
"""Initializing the api module. Required first step before
initializing the actual export process.
"""
logger.debug("Initializing API")
object_.clear_mesh_map()
def selected_objects(valid_types=None):
"""Selected objects.
:param valid_types: Filter for valid types (Default value = None)
"""
logger.debug("api.selected_objects(%s)", valid_types)
for node in bpy.context.selected_objects:
if valid_types is None:
yield node.name
elif valid_types is not None and node.type in valid_types:
yield node.name
def set_active_object(obj):
"""Set the object as active in the scene
:param obj:
"""
logger.debug("api.set_active_object(%s)", obj)
bpy.context.scene.objects.active = obj
def scene_name():
"""
:return: name of the current scene
"""
return os.path.basename(bpy.data.filepath)
|
mmclenna/engine
|
refs/heads/master
|
build/android/pylib/remote/__init__.py
|
1201
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
|
Belxjander/Kirito
|
refs/heads/master
|
Python-3.5.0-main/Lib/distutils/text_file.py
|
16
|
"""text_file
provides the TextFile class, which gives an interface to text files
that (optionally) takes care of stripping comments, ignoring blank
lines, and joining lines with backslashes."""
import sys, os, io
class TextFile:
"""Provides a file-like object that takes care of all the things you
commonly want to do when processing a text file that has some
line-by-line syntax: strip comments (as long as "#" is your
comment character), skip blank lines, join adjacent lines by
escaping the newline (ie. backslash at end of line), strip
leading and/or trailing whitespace. All of these are optional
and independently controllable.
Provides a 'warn()' method so you can generate warning messages that
report physical line number, even if the logical line in question
spans multiple physical lines. Also provides 'unreadline()' for
implementing line-at-a-time lookahead.
Constructor is called as:
TextFile (filename=None, file=None, **options)
It bombs (RuntimeError) if both 'filename' and 'file' are None;
'filename' should be a string, and 'file' a file object (or
something that provides 'readline()' and 'close()' methods). It is
recommended that you supply at least 'filename', so that TextFile
can include it in warning messages. If 'file' is not supplied,
TextFile creates its own using 'io.open()'.
The options are all boolean, and affect the value returned by
'readline()':
strip_comments [default: true]
strip from "#" to end-of-line, as well as any whitespace
leading up to the "#" -- unless it is escaped by a backslash
lstrip_ws [default: false]
strip leading whitespace from each line before returning it
rstrip_ws [default: true]
strip trailing whitespace (including line terminator!) from
each line before returning it
skip_blanks [default: true}
skip lines that are empty *after* stripping comments and
whitespace. (If both lstrip_ws and rstrip_ws are false,
then some lines may consist of solely whitespace: these will
*not* be skipped, even if 'skip_blanks' is true.)
join_lines [default: false]
if a backslash is the last non-newline character on a line
after stripping comments and whitespace, join the following line
to it to form one "logical line"; if N consecutive lines end
with a backslash, then N+1 physical lines will be joined to
form one logical line.
collapse_join [default: false]
strip leading whitespace from lines that are joined to their
predecessor; only matters if (join_lines and not lstrip_ws)
errors [default: 'strict']
error handler used to decode the file content
Note that since 'rstrip_ws' can strip the trailing newline, the
semantics of 'readline()' must differ from those of the builtin file
object's 'readline()' method! In particular, 'readline()' returns
None for end-of-file: an empty string might just be a blank line (or
an all-whitespace line), if 'rstrip_ws' is true but 'skip_blanks' is
not."""
default_options = { 'strip_comments': 1,
'skip_blanks': 1,
'lstrip_ws': 0,
'rstrip_ws': 1,
'join_lines': 0,
'collapse_join': 0,
'errors': 'strict',
}
def __init__(self, filename=None, file=None, **options):
"""Construct a new TextFile object. At least one of 'filename'
(a string) and 'file' (a file-like object) must be supplied.
They keyword argument options are described above and affect
the values returned by 'readline()'."""
if filename is None and file is None:
raise RuntimeError("you must supply either or both of 'filename' and 'file'")
# set values for all options -- either from client option hash
# or fallback to default_options
for opt in self.default_options.keys():
if opt in options:
setattr(self, opt, options[opt])
else:
setattr(self, opt, self.default_options[opt])
# sanity check client option hash
for opt in options.keys():
if opt not in self.default_options:
raise KeyError("invalid TextFile option '%s'" % opt)
if file is None:
self.open(filename)
else:
self.filename = filename
self.file = file
self.current_line = 0 # assuming that file is at BOF!
# 'linebuf' is a stack of lines that will be emptied before we
# actually read from the file; it's only populated by an
# 'unreadline()' operation
self.linebuf = []
def open(self, filename):
"""Open a new file named 'filename'. This overrides both the
'filename' and 'file' arguments to the constructor."""
self.filename = filename
self.file = io.open(self.filename, 'r', errors=self.errors)
self.current_line = 0
def close(self):
"""Close the current file and forget everything we know about it
(filename, current line number)."""
file = self.file
self.file = None
self.filename = None
self.current_line = None
file.close()
def gen_error(self, msg, line=None):
outmsg = []
if line is None:
line = self.current_line
outmsg.append(self.filename + ", ")
if isinstance(line, (list, tuple)):
outmsg.append("lines %d-%d: " % tuple(line))
else:
outmsg.append("line %d: " % line)
outmsg.append(str(msg))
return "".join(outmsg)
def error(self, msg, line=None):
raise ValueError("error: " + self.gen_error(msg, line))
def warn(self, msg, line=None):
"""Print (to stderr) a warning message tied to the current logical
line in the current file. If the current logical line in the
file spans multiple physical lines, the warning refers to the
whole range, eg. "lines 3-5". If 'line' supplied, it overrides
the current line number; it may be a list or tuple to indicate a
range of physical lines, or an integer for a single physical
line."""
sys.stderr.write("warning: " + self.gen_error(msg, line) + "\n")
def readline(self):
"""Read and return a single logical line from the current file (or
from an internal buffer if lines have previously been "unread"
with 'unreadline()'). If the 'join_lines' option is true, this
may involve reading multiple physical lines concatenated into a
single string. Updates the current line number, so calling
'warn()' after 'readline()' emits a warning about the physical
line(s) just read. Returns None on end-of-file, since the empty
string can occur if 'rstrip_ws' is true but 'strip_blanks' is
not."""
# If any "unread" lines waiting in 'linebuf', return the top
# one. (We don't actually buffer read-ahead data -- lines only
# get put in 'linebuf' if the client explicitly does an
# 'unreadline()'.
if self.linebuf:
line = self.linebuf[-1]
del self.linebuf[-1]
return line
buildup_line = ''
while True:
# read the line, make it None if EOF
line = self.file.readline()
if line == '':
line = None
if self.strip_comments and line:
# Look for the first "#" in the line. If none, never
# mind. If we find one and it's the first character, or
# is not preceded by "\", then it starts a comment --
# strip the comment, strip whitespace before it, and
# carry on. Otherwise, it's just an escaped "#", so
# unescape it (and any other escaped "#"'s that might be
# lurking in there) and otherwise leave the line alone.
pos = line.find("#")
if pos == -1: # no "#" -- no comments
pass
# It's definitely a comment -- either "#" is the first
# character, or it's elsewhere and unescaped.
elif pos == 0 or line[pos-1] != "\\":
# Have to preserve the trailing newline, because it's
# the job of a later step (rstrip_ws) to remove it --
# and if rstrip_ws is false, we'd better preserve it!
# (NB. this means that if the final line is all comment
# and has no trailing newline, we will think that it's
# EOF; I think that's OK.)
eol = (line[-1] == '\n') and '\n' or ''
line = line[0:pos] + eol
# If all that's left is whitespace, then skip line
# *now*, before we try to join it to 'buildup_line' --
# that way constructs like
# hello \\
# # comment that should be ignored
# there
# result in "hello there".
if line.strip() == "":
continue
else: # it's an escaped "#"
line = line.replace("\\#", "#")
# did previous line end with a backslash? then accumulate
if self.join_lines and buildup_line:
# oops: end of file
if line is None:
self.warn("continuation line immediately precedes "
"end-of-file")
return buildup_line
if self.collapse_join:
line = line.lstrip()
line = buildup_line + line
# careful: pay attention to line number when incrementing it
if isinstance(self.current_line, list):
self.current_line[1] = self.current_line[1] + 1
else:
self.current_line = [self.current_line,
self.current_line + 1]
# just an ordinary line, read it as usual
else:
if line is None: # eof
return None
# still have to be careful about incrementing the line number!
if isinstance(self.current_line, list):
self.current_line = self.current_line[1] + 1
else:
self.current_line = self.current_line + 1
# strip whitespace however the client wants (leading and
# trailing, or one or the other, or neither)
if self.lstrip_ws and self.rstrip_ws:
line = line.strip()
elif self.lstrip_ws:
line = line.lstrip()
elif self.rstrip_ws:
line = line.rstrip()
# blank line (whether we rstrip'ed or not)? skip to next line
# if appropriate
if (line == '' or line == '\n') and self.skip_blanks:
continue
if self.join_lines:
if line[-1] == '\\':
buildup_line = line[:-1]
continue
if line[-2:] == '\\\n':
buildup_line = line[0:-2] + '\n'
continue
# well, I guess there's some actual content there: return it
return line
def readlines(self):
"""Read and return the list of all logical lines remaining in the
current file."""
lines = []
while True:
line = self.readline()
if line is None:
return lines
lines.append(line)
def unreadline(self, line):
"""Push 'line' (a string) onto an internal buffer that will be
checked by future 'readline()' calls. Handy for implementing
a parser with line-at-a-time lookahead."""
self.linebuf.append(line)
|
wzairix/parameter-framework
|
refs/heads/master
|
test/functional-tests-legacy/PfwTestCase/Types/tEnum.py
|
10
|
# -*-coding:utf-8 -*
# Copyright (c) 2011-2015, Intel Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Enum parameter type testcases.
List of tested functions :
--------------------------
- [setParameter] function
- [getParameter] function
Initial Settings :
------------------
Enum size = 8bits; 5 components :
- max range [-127,128]
Test cases :
------------
- Enum parameter nominal value = ENUM_NOMINAL : 5
- Enum parameter min value = ENUM_MIN : -127
- Enum parameter max value = ENUM_MAX : 128
- Enum parameter out of bound value = ENUM_OOB : 255
- Enum parameter out of size value = ENUM_OOS : 256
- Enum parameter undefined value = UNDEF
"""
import commands
from Util.PfwUnitTestLib import PfwTestCase
from Util import ACTLogging
log=ACTLogging.Logger()
# Test of type UINT16 - range [0, 1000]
class TestCases(PfwTestCase):
def setUp(self):
self.param_name = "/Test/Test/TEST_TYPES/ENUM"
self.filesystem_name="$PFW_RESULT/ENUM"
self.pfw.sendCmd("setTuningMode", "on")
def tearDown(self):
self.pfw.sendCmd("setTuningMode", "off")
def test_Nominal_Case(self):
"""
Testing Enum parameter in nominal case
--------------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- ENUM parameter in nominal case = ENUM_NOMINAL
Tested commands :
~~~~~~~~~~~~~~~~~
- [setParameter] function
Used commands :
~~~~~~~~~~~~~~~
- [getParameter] function
Expected result :
~~~~~~~~~~~~~~~~~
- ENUM parameter set to ENUM_NOMINAL
- FILESYSTEM set to 0x5
"""
log.D(self.test_Nominal_Case.__doc__)
value = "ENUM_NOMINAL"
filesystem_value="0x5"
log.I("Set parameter %s to %s"%(self.param_name,value))
out,err = self.pfw.sendCmd("setParameter",self.param_name, value)
assert err == None, log.E("setParameter %s %s : %s" % (self.param_name, value, err))
assert out == "Done", log.F("setParameter %s %s - expected : Done : %s" % (self.param_name, value,out))
log.I("Check Enum parameter state")
out, err = self.pfw.sendCmd("getParameter",self.param_name)
assert err == None, log.E("getParameter %s : %s" % (self.param_name, err))
assert out == value, log.F("getParameter %s - expected : %s , found : %s" % (self.param_name,value,out))
log.I("Check filesystem value")
assert commands.getoutput("cat %s"%(self.filesystem_name)) == filesystem_value, log.F("FILESYSTEM : parameter update error for %s"%(self.param_name))
def test_TypeMin(self):
"""
Testing minimal value for Enum parameter
----------------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- ENUM parameter in min case = ENUM_MIN
Tested commands :
~~~~~~~~~~~~~~~~~
- [setParameter] function
Used commands :
~~~~~~~~~~~~~~~
- [getParameter] function
Expected result :
~~~~~~~~~~~~~~~~~
- ENUM parameter set to ENUM_MIN
- FILESYSTEM set to 0x80
"""
log.D(self.test_TypeMin.__doc__)
value = "ENUM_MIN"
filesystem_value="0x80"
log.I("Set parameter %s to %s"%(self.param_name,value))
out,err = self.pfw.sendCmd("setParameter",self.param_name, value)
assert err == None, log.E("setParameter %s %s : %s" % (self.param_name, value, err))
assert out == "Done", log.F("setParameter %s %s - expected : Done : %s" % (self.param_name, value,out))
log.I("Check Enum parameter state")
out, err = self.pfw.sendCmd("getParameter",self.param_name)
assert err == None, log.E("getParameter %s : %s" % (self.param_name, err))
assert out == value, log.F("getParameter %s - expected : %s , found : %s" % (self.param_name,value,out))
log.I("Check filesystem value")
assert commands.getoutput("cat %s"%(self.filesystem_name)) == filesystem_value, log.F("FILESYSTEM : parameter update error for %s"%(self.param_name))
def test_TypeMax(self):
"""
Testing maximal value for Enum parameter
----------------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- ENUM parameter in max case = ENUM_MAX
Tested commands :
~~~~~~~~~~~~~~~~~
- [setParameter] function
Used commands :
~~~~~~~~~~~~~~~
- [getParameter] function
Expected result :
~~~~~~~~~~~~~~~~~
- ENUM parameter set to ENUM_MAX
- FILESYSTEM set to 0x7F
"""
log.D(self.test_TypeMax.__doc__)
value = "ENUM_MAX"
filesystem_value="0x7f"
log.I("Set parameter %s to %s"%(self.param_name,value))
out,err = self.pfw.sendCmd("setParameter",self.param_name, value)
assert err == None, log.E("setParameter %s %s : %s" % (self.param_name, value, err))
assert out == "Done", log.F("setParameter %s %s - expected : Done : %s" % (self.param_name, value,out))
log.I("Check Enum parameter state")
out, err = self.pfw.sendCmd("getParameter",self.param_name)
assert err == None, log.E("getParameter %s : %s" % (self.param_name, err))
assert out == value, log.F("getParameter %s - expected : %s , found : %s" % (self.param_name,value,out))
log.I("Check filesystem value")
assert commands.getoutput("cat %s"%(self.filesystem_name)) == filesystem_value, log.F("FILESYSTEM : parameter update error for %s"%(self.param_name))
def test_TypeUndefined(self):
"""
Testing ENUM parameter in undefined reference case
--------------------------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- ENUM parameter = UNDEF
Tested commands :
~~~~~~~~~~~~~~~~~
- [setParameter] function
Used commands :
~~~~~~~~~~~~~~~
- [getParameter] function
Expected result :
~~~~~~~~~~~~~~~~~
- error detected, parameter must not change
- FILESYSTEM must not change
"""
log.D(self.test_TypeUndefined.__doc__)
value = "UNDEF"
log.I("Check parameter %s initial value"%(self.param_name))
init_parameter_value, err=self.pfw.sendCmd("getParameter",self.param_name)
init_filesystem_value=commands.getoutput("cat %s"%(self.filesystem_name))
log.I("Set parameter %s to %s"%(self.param_name,value))
out,err = self.pfw.sendCmd("setParameter",self.param_name, value)
assert err == None, log.E("setParameter %s %s : %s" % (self.param_name, value, err))
assert out != "Done", log.F("Error not detected when setParameter %s %s" % (self.param_name, value))
log.I("Check Enum parameter state")
out, err = self.pfw.sendCmd("getParameter",self.param_name)
assert err == None, log.E("getParameter %s : %s" % (self.param_name, err))
assert out == init_parameter_value, log.F("getParameter %s - expected : %s , found : %s" % (self.param_name,init_parameter_value,out))
log.I("Check filesystem value")
assert commands.getoutput("cat %s"%(self.filesystem_name)) == init_filesystem_value, log.F("FILESYSTEM : parameter update error for %s"%(self.param_name))
def test_TypeOutOfBound(self):
"""
Testing ENUM parameter in out of range case
-------------------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- ENUM parameter in max case = ENUM_OOB : 255
Tested commands :
~~~~~~~~~~~~~~~~~
- [setParameter] function
Used commands :
~~~~~~~~~~~~~~~
- [getParameter] function
Expected result :
~~~~~~~~~~~~~~~~~
- error detected, parameter must not change
- FILESYSTEM must not change
"""
log.D(self.test_TypeOutOfBound.__doc__)
value = "ENUM_OOB"
log.I("Check parameter %s initial value"%(self.param_name))
init_parameter_value, err=self.pfw.sendCmd("getParameter",self.param_name)
init_filesystem_value=commands.getoutput("cat %s"%(self.filesystem_name))
log.I("Set parameter %s to %s"%(self.param_name,value))
out,err = self.pfw.sendCmd("setParameter",self.param_name, value)
assert err == None, log.E("setParameter %s %s : %s" % (self.param_name, value, err))
assert out != "Done", log.F("Error not detected when setParameter %s %s" % (self.param_name, value))
log.I("Check Enum parameter state")
out, err = self.pfw.sendCmd("getParameter",self.param_name)
assert err == None, log.E("getParameter %s : %s" % (self.param_name, err))
assert out == init_parameter_value, log.F("getParameter %s - expected : %s , found : %s" % (self.param_name,init_parameter_value,out))
log.I("Check filesystem value")
assert commands.getoutput("cat %s"%(self.filesystem_name)) == init_filesystem_value, log.F("FILESYSTEM : parameter update error for %s"%(self.param_name))
def test_TypeOutOfSize(self):
"""
Testing ENUM parameter in out of size case
------------------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- ENUM parameter in max case = ENUM_OOS : 256
Tested commands :
~~~~~~~~~~~~~~~~~
- [setParameter] function
Used commands :
~~~~~~~~~~~~~~~
- [getParameter] function
Expected result :
~~~~~~~~~~~~~~~~~
- error detected, parameter must not change
- FILESYSTEM must not change
"""
log.D(self.test_TypeOutOfBound.__doc__)
value = "ENUM_OOS"
log.I("Check parameter %s initial value"%(self.param_name))
init_parameter_value, err=self.pfw.sendCmd("getParameter",self.param_name)
init_filesystem_value=commands.getoutput("cat %s"%(self.filesystem_name))
log.I("Set parameter %s to %s"%(self.param_name,value))
out,err = self.pfw.sendCmd("setParameter",self.param_name, value)
assert err == None, log.E("setParameter %s %s : %s" % (self.param_name, value, err))
assert out != "Done", log.F("Error not detected when setParameter %s %s" % (self.param_name, value))
log.I("Check Enum parameter state")
out, err = self.pfw.sendCmd("getParameter",self.param_name)
assert err == None, log.E("getParameter %s : %s" % (self.param_name, err))
assert out == init_parameter_value, log.F("getParameter %s - expected : %s , found : %s" % (self.param_name,init_parameter_value,out))
log.I("Check filesystem value")
assert commands.getoutput("cat %s"%(self.filesystem_name)) == init_filesystem_value, log.F("FILESYSTEM : parameter update error for %s"%(self.param_name))
|
ericzundel/pants
|
refs/heads/master
|
src/python/pants/engine/legacy/graph.py
|
1
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
from twitter.common.collections import OrderedSet
from pants.backend.jvm.targets.jvm_app import Bundle, JvmApp
from pants.base.exceptions import TargetDefinitionException
from pants.base.parse_context import ParseContext
from pants.base.specs import SingleAddress
from pants.build_graph.address import Address
from pants.build_graph.address_lookup_error import AddressLookupError
from pants.build_graph.build_graph import BuildGraph
from pants.build_graph.remote_sources import RemoteSources
from pants.engine.addressable import Addresses, Collection
from pants.engine.fs import Files, FilesDigest, PathGlobs
from pants.engine.legacy.structs import BundleAdaptor, BundlesField, SourcesField, TargetAdaptor
from pants.engine.nodes import Return, State, Throw
from pants.engine.selectors import Select, SelectDependencies, SelectProjection
from pants.source.wrapped_globs import EagerFilesetWithSpec, FilesetRelPathWrapper
from pants.util.dirutil import fast_relpath
from pants.util.objects import datatype
logger = logging.getLogger(__name__)
class _DestWrapper(datatype('DestWrapper', ['target_types'])):
"""A wrapper for dest field of RemoteSources target.
This is only used when instantiating RemoteSources target.
"""
class LegacyBuildGraph(BuildGraph):
"""A directed acyclic graph of Targets and dependencies. Not necessarily connected.
This implementation is backed by a Scheduler that is able to resolve HydratedTargets.
"""
class InvalidCommandLineSpecError(AddressLookupError):
"""Raised when command line spec is not a valid directory"""
def __init__(self, scheduler, engine, symbol_table_cls):
"""Construct a graph given a Scheduler, Engine, and a SymbolTable class.
:param scheduler: A Scheduler that is configured to be able to resolve HydratedTargets.
:param engine: An Engine subclass to execute calls to `inject`.
:param symbol_table_cls: A SymbolTable class used to instantiate Target objects. Must match
the symbol table installed in the scheduler (TODO: see comment in `_instantiate_target`).
"""
self._scheduler = scheduler
self._target_types = self._get_target_types(symbol_table_cls)
self._engine = engine
super(LegacyBuildGraph, self).__init__()
def _get_target_types(self, symbol_table_cls):
aliases = symbol_table_cls.aliases()
target_types = dict(aliases.target_types)
for alias, factory in aliases.target_macro_factories.items():
target_type, = factory.target_types
target_types[alias] = target_type
return target_types
def _index(self, roots):
"""Index from the given roots into the storage provided by the base class.
This is an additive operation: any existing connections involving these nodes are preserved.
"""
all_addresses = set()
new_targets = list()
# Index the ProductGraph.
for node, state in roots.items():
if type(state) is Throw:
trace = 'TODO: restore trace!\n {}'.format(state) #'\n'.join(self._graph.trace(node))
raise AddressLookupError(
'Build graph construction failed for {}:\n{}'.format(node, trace))
elif type(state) is not Return:
State.raise_unrecognized(state)
if type(state.value) is not HydratedTargets:
raise TypeError('Expected roots to hold {}; got: {}'.format(
HydratedTargets, type(state.value)))
# We have a successful HydratedTargets value (for a particular input Spec).
for hydrated_target in state.value.dependencies:
target_adaptor = hydrated_target.adaptor
address = target_adaptor.address
all_addresses.add(address)
if address not in self._target_by_address:
new_targets.append(self._index_target(target_adaptor))
# Once the declared dependencies of all targets are indexed, inject their
# additional "traversable_(dependency_)?specs".
deps_to_inject = OrderedSet()
addresses_to_inject = set()
def inject(target, dep_spec, is_dependency):
address = Address.parse(dep_spec, relative_to=target.address.spec_path)
if not any(address == t.address for t in target.dependencies):
addresses_to_inject.add(address)
if is_dependency:
deps_to_inject.add((target.address, address))
for target in new_targets:
for spec in target.traversable_dependency_specs:
inject(target, spec, is_dependency=True)
for spec in target.traversable_specs:
inject(target, spec, is_dependency=False)
# Inject all addresses, then declare injected dependencies.
self.inject_addresses_closure(addresses_to_inject)
for target_address, dep_address in deps_to_inject:
self.inject_dependency(dependent=target_address, dependency=dep_address)
return all_addresses
def _index_target(self, target_adaptor):
"""Instantiate the given TargetAdaptor, index it in the graph, and return a Target."""
# Instantiate the target.
address = target_adaptor.address
target = self._instantiate_target(target_adaptor)
self._target_by_address[address] = target
# Link its declared dependencies, which will be indexed independently.
self._target_dependencies_by_address[address].update(target_adaptor.dependencies)
for dependency in target_adaptor.dependencies:
self._target_dependees_by_address[dependency].add(address)
return target
def _instantiate_target(self, target_adaptor):
"""Given a TargetAdaptor struct previously parsed from a BUILD file, instantiate a Target.
TODO: This assumes that the SymbolTable used for parsing matches the SymbolTable passed
to this graph. Would be good to make that more explicit, but it might be better to nuke
the Target subclassing pattern instead, and lean further into the "configuration composition"
model explored in the `exp` package.
"""
target_cls = self._target_types[target_adaptor.type_alias]
try:
# Pop dependencies, which were already consumed during construction.
kwargs = target_adaptor.kwargs()
kwargs.pop('dependencies')
# Instantiate.
if target_cls is JvmApp:
return self._instantiate_jvm_app(kwargs)
elif target_cls is RemoteSources:
return self._instantiate_remote_sources(kwargs)
return target_cls(build_graph=self, **kwargs)
except TargetDefinitionException:
raise
except Exception as e:
raise TargetDefinitionException(
target_adaptor.address,
'Failed to instantiate Target with type {}: {}'.format(target_cls, e))
def _instantiate_jvm_app(self, kwargs):
"""For JvmApp target, convert BundleAdaptor to BundleProps."""
parse_context = ParseContext(kwargs['address'].spec_path, dict())
bundleprops_factory = Bundle(parse_context)
kwargs['bundles'] = [
bundleprops_factory.create_bundle_props(bundle)
for bundle in kwargs['bundles']
]
return JvmApp(build_graph=self, **kwargs)
def _instantiate_remote_sources(self, kwargs):
"""For RemoteSources target, convert "dest" field to its real target type."""
kwargs['dest'] = _DestWrapper((self._target_types[kwargs['dest']],))
return RemoteSources(build_graph=self, **kwargs)
def inject_synthetic_target(self,
address,
target_type,
dependencies=None,
derived_from=None,
**kwargs):
target = target_type(name=address.target_name,
address=address,
build_graph=self,
**kwargs)
self.inject_target(target,
dependencies=dependencies,
derived_from=derived_from,
synthetic=True)
def inject_address_closure(self, address):
self.inject_addresses_closure([address])
def inject_addresses_closure(self, addresses):
addresses = set(addresses) - set(self._target_by_address.keys())
if not addresses:
return
for _ in self._inject([SingleAddress(a.spec_path, a.target_name) for a in addresses]):
pass
def inject_specs_closure(self, specs, fail_fast=None):
# Request loading of these specs.
for address in self._inject(specs):
yield address
def resolve_address(self, address):
if not self.contains_address(address):
self.inject_address_closure(address)
return self.get_target(address)
def _inject(self, subjects):
"""Inject Targets into the graph for each of the subjects and yield the resulting addresses."""
logger.debug('Injecting to %s: %s', self, subjects)
request = self._scheduler.execution_request([HydratedTargets, Addresses], subjects)
result = self._engine.execute(request)
if result.error:
raise result.error
# Update the base class indexes for this request.
root_entries = self._scheduler.root_entries(request)
address_entries = {k: v for k, v in root_entries.items() if k[1].product is Addresses}
target_entries = {k: v for k, v in root_entries.items() if k[1].product is HydratedTargets}
self._index(target_entries)
yielded_addresses = set()
for root, state in address_entries.items():
if not state.value:
raise self.InvalidCommandLineSpecError(
'Spec {} does not match any targets.'.format(root.subject))
for address in state.value.dependencies:
if address not in yielded_addresses:
yielded_addresses.add(address)
yield address
class HydratedTarget(datatype('HydratedTarget', ['adaptor', 'dependencies'])):
"""A wrapper for a fully hydrated TargetAdaptor object."""
# TODO: Only used (currently) to represent transitive hydrated targets. Consider renaming.
HydratedTargets = Collection.of(HydratedTarget)
class HydratedField(datatype('HydratedField', ['name', 'value'])):
"""A wrapper for a fully constructed replacement kwarg for a HydratedTarget."""
def hydrate_target(target_adaptor, hydrated_fields):
"""Construct a HydratedTarget from a TargetAdaptor and hydrated versions of its adapted fields."""
# Hydrate the fields of the adaptor and re-construct it.
kwargs = target_adaptor.kwargs()
for field in hydrated_fields:
kwargs[field.name] = field.value
return HydratedTarget(TargetAdaptor(**kwargs), tuple(target_adaptor.dependencies))
def _eager_fileset_with_spec(spec_path, filespec, source_files_digest, excluded_source_files):
excluded = {f.path for f in excluded_source_files.dependencies}
file_tuples = [(fast_relpath(fd.path, spec_path), fd.digest)
for fd in source_files_digest.dependencies
if fd.path not in excluded]
relpath_adjusted_filespec = FilesetRelPathWrapper.to_filespec(filespec['globs'], spec_path)
if filespec.has_key('exclude'):
relpath_adjusted_filespec['exclude'] = [FilesetRelPathWrapper.to_filespec(e['globs'], spec_path)
for e in filespec['exclude']]
# NB: In order to preserve declared ordering, we record a list of matched files
# independent of the file hash dict.
return EagerFilesetWithSpec(spec_path,
relpath_adjusted_filespec,
files=tuple(f for f, _ in file_tuples),
file_hashes=dict(file_tuples))
def hydrate_sources(sources_field, source_files_digest, excluded_source_files):
"""Given a SourcesField and FilesDigest for its path_globs, create an EagerFilesetWithSpec."""
fileset_with_spec = _eager_fileset_with_spec(sources_field.address.spec_path,
sources_field.filespecs,
source_files_digest,
excluded_source_files)
return HydratedField(sources_field.arg, fileset_with_spec)
def hydrate_bundles(bundles_field, files_digest_list, excluded_files_list):
"""Given a BundlesField and FilesDigest for each of its filesets create a list of BundleAdaptors."""
bundles = []
zipped = zip(bundles_field.bundles,
bundles_field.filespecs_list,
files_digest_list,
excluded_files_list)
for bundle, filespecs, files_digest, excluded_files in zipped:
spec_path = bundles_field.address.spec_path
kwargs = bundle.kwargs()
kwargs['fileset'] = _eager_fileset_with_spec(getattr(bundle, 'rel_path', spec_path),
filespecs,
files_digest,
excluded_files)
bundles.append(BundleAdaptor(**kwargs))
return HydratedField('bundles', bundles)
def create_legacy_graph_tasks(symbol_table_cls):
"""Create tasks to recursively parse the legacy graph."""
symbol_table_constraint = symbol_table_cls.constraint()
return [
# Recursively requests HydratedTargets, which will result in an eager, transitive graph walk.
(HydratedTargets,
[SelectDependencies(HydratedTarget,
Addresses,
field_types=(Address,), transitive=True)],
HydratedTargets),
(HydratedTarget,
[Select(symbol_table_constraint),
SelectDependencies(HydratedField,
symbol_table_constraint,
'field_adaptors',
field_types=(SourcesField, BundlesField,))],
hydrate_target),
(HydratedField,
[Select(SourcesField),
SelectProjection(FilesDigest, PathGlobs, ('path_globs',), SourcesField),
SelectProjection(Files, PathGlobs, ('excluded_path_globs',), SourcesField)],
hydrate_sources),
(HydratedField,
[Select(BundlesField),
SelectDependencies(FilesDigest, BundlesField, 'path_globs_list', field_types=(PathGlobs,)),
SelectDependencies(Files, BundlesField, 'excluded_path_globs_list', field_types=(PathGlobs,))],
hydrate_bundles),
]
|
drabastomek/learningPySpark
|
refs/heads/master
|
Chapter11/additionalCode/utilities/__init__.py
|
1
|
from .geoCalc import geoCalc
__all__ = ['geoCalc','converters']
|
moio/spacewalk
|
refs/heads/master
|
java/scripts/api/usertests.py
|
11
|
#!/usr/bin/python
import xmlrpclib
import unittest
from config import *
from random import randint
class UserTests(RhnTestCase):
def setUp(self):
RhnTestCase.setUp(self)
self.test_user = "TestUser%s" % randint(1, 1000000)
client.user.create(self.session_key, self.test_user, "testpassword",
"Test", "User", "TestUser@example.com")
self.test_group_names = []
self.test_group_ids = []
self.test_group_names.append("Test Group %s" % randint(1, 100000))
self.test_group_names.append("Test Group %s" % randint(1, 100000))
self.test_group_names.append("Test Group %s" % randint(1, 100000))
for group_name in self.test_group_names:
group = client.systemgroup.create(self.session_key, group_name,
"Fake Description")
self.test_group_ids.append(group['id'])
def tearDown(self):
client.user.delete(self.session_key, self.test_user)
for group_name in self.test_group_names:
client.systemgroup.delete(self.session_key, group_name)
RhnTestCase.tearDown(self)
def test_add_assigned_system_groups(self):
groups = client.user.listAssignedSystemGroups(self.session_key,
self.test_user)
self.assertEquals(0, len(groups))
ret = client.user.addAssignedSystemGroups(self.session_key,
self.test_user, self.test_group_ids, False)
self.assertEquals(1, ret)
groups = client.user.listAssignedSystemGroups(self.session_key,
self.test_user)
self.assertEquals(len(self.test_group_ids), len(groups))
def test_add_assigned_system_groups_and_set_default(self):
groups = client.user.listAssignedSystemGroups(self.session_key,
self.test_user)
self.assertEquals(0, len(groups))
groups = client.user.listDefaultSystemGroups(self.session_key,
self.test_user)
self.assertEquals(0, len(groups))
ret = client.user.addAssignedSystemGroups(self.session_key,
self.test_user, self.test_group_ids, True)
self.assertEquals(1, ret)
groups = client.user.listAssignedSystemGroups(self.session_key,
self.test_user)
self.assertEquals(len(self.test_group_ids), len(groups))
groups = client.user.listDefaultSystemGroups(self.session_key,
self.test_user)
self.assertEquals(len(self.test_group_ids), len(groups))
def test_add_assigned_system_group(self):
groups = client.user.listAssignedSystemGroups(self.session_key,
self.test_user)
self.assertEquals(0, len(groups))
ret = client.user.addAssignedSystemGroup(self.session_key,
self.test_user, self.test_group_ids[0], False)
self.assertEquals(1, ret)
groups = client.user.listAssignedSystemGroups(self.session_key,
self.test_user)
self.assertEquals(1, len(groups))
if __name__ == "__main__":
unittest.main()
|
Ecotrust-Canada/terratruth
|
refs/heads/master
|
django_app/registration/management/commands/cleanupregistration.py
|
232
|
"""
A management command which deletes expired accounts (e.g.,
accounts which signed up but never activated) from the database.
Calls ``RegistrationProfile.objects.delete_expired_users()``, which
contains the actual logic for determining which accounts are deleted.
"""
from django.core.management.base import NoArgsCommand
from registration.models import RegistrationProfile
class Command(NoArgsCommand):
help = "Delete expired user registrations from the database"
def handle_noargs(self, **options):
RegistrationProfile.objects.delete_expired_users()
|
IDSIA/sacred
|
refs/heads/master
|
tests/test_experiment.py
|
1
|
#!/usr/bin/env python
# coding=utf-8
from sacred import Ingredient
"""Global Docstring"""
from mock import patch
import pytest
import sys
from sacred import cli_option
from sacred import host_info_gatherer
from sacred.experiment import Experiment
from sacred.utils import apply_backspaces_and_linefeeds, ConfigAddedError, SacredError
@pytest.fixture
def ex():
return Experiment("ator3000")
def test_main(ex):
@ex.main
def foo():
pass
assert "foo" in ex.commands
assert ex.commands["foo"] == foo
assert ex.default_command == "foo"
def test_automain_imported(ex):
main_called = [False]
with patch.object(sys, "argv", ["test.py"]):
@ex.automain
def foo():
main_called[0] = True
assert "foo" in ex.commands
assert ex.commands["foo"] == foo
assert ex.default_command == "foo"
assert main_called[0] is False
def test_automain_script_runs_main(ex):
global __name__
oldname = __name__
main_called = [False]
try:
__name__ = "__main__"
with patch.object(sys, "argv", ["test.py"]):
@ex.automain
def foo():
main_called[0] = True
assert "foo" in ex.commands
assert ex.commands["foo"] == foo
assert ex.default_command == "foo"
assert main_called[0] is True
finally:
__name__ = oldname
def test_fails_on_unused_config_updates(ex):
@ex.config
def cfg():
a = 1
c = 3
@ex.main
def foo(a, b=2):
return a + b
# normal config updates work
assert ex.run(config_updates={"a": 3}).result == 5
# not in config but used works
assert ex.run(config_updates={"b": 8}).result == 9
# unused but in config updates work
assert ex.run(config_updates={"c": 9}).result == 3
# unused config updates raise
with pytest.raises(ConfigAddedError):
ex.run(config_updates={"d": 3})
def test_fails_on_nested_unused_config_updates(ex):
@ex.config
def cfg():
a = {"b": 1}
d = {"e": 3}
@ex.main
def foo(a):
return a["b"]
# normal config updates work
assert ex.run(config_updates={"a": {"b": 2}}).result == 2
# not in config but parent is works
assert ex.run(config_updates={"a": {"c": 5}}).result == 1
# unused but in config works
assert ex.run(config_updates={"d": {"e": 7}}).result == 1
# unused nested config updates raise
with pytest.raises(ConfigAddedError):
ex.run(config_updates={"d": {"f": 3}})
def test_considers_captured_functions_for_fail_on_unused_config(ex):
@ex.config
def cfg():
a = 1
@ex.capture
def transmogrify(a, b=0):
return a + b
@ex.main
def foo():
return transmogrify()
assert ex.run(config_updates={"a": 7}).result == 7
assert ex.run(config_updates={"b": 3}).result == 4
with pytest.raises(ConfigAddedError):
ex.run(config_updates={"c": 3})
def test_considers_prefix_for_fail_on_unused_config(ex):
@ex.config
def cfg():
a = {"b": 1}
@ex.capture(prefix="a")
def transmogrify(b):
return b
@ex.main
def foo():
return transmogrify()
assert ex.run(config_updates={"a": {"b": 3}}).result == 3
with pytest.raises(ConfigAddedError):
ex.run(config_updates={"b": 5})
with pytest.raises(ConfigAddedError):
ex.run(config_updates={"a": {"c": 5}})
def test_non_existing_prefix_is_treated_as_empty_dict(ex):
@ex.capture(prefix="nonexisting")
def transmogrify(b=10):
return b
@ex.main
def foo():
return transmogrify()
assert ex.run().result == 10
def test_using_a_named_config(ex):
@ex.config
def cfg():
a = 1
@ex.named_config
def ncfg_first():
a = 10
@ex.named_config
def ncfg_second(a):
a = a * 2
@ex.main
def run(a):
return a
assert ex.run().result == 1
assert ex.run(named_configs=["ncfg_first"]).result == 10
assert ex.run(named_configs=["ncfg_first", "ncfg_second"]).result == 20
with pytest.raises(KeyError, match=r".*not in preset for ConfigScope"):
ex.run(named_configs=["ncfg_second", "ncfg_first"])
def test_empty_dict_named_config(ex):
@ex.named_config
def ncfg():
empty_dict = {}
nested_empty_dict = {"k1": {"k2": {}}}
@ex.automain
def main(empty_dict=1, nested_empty_dict=2):
return empty_dict, nested_empty_dict
assert ex.run().result == (1, 2)
assert ex.run(named_configs=["ncfg"]).result == ({}, {"k1": {"k2": {}}})
def test_empty_dict_config_updates(ex):
@ex.config
def cfg():
a = 1
@ex.config
def default():
a = {"b": 1}
@ex.main
def main():
pass
r = ex.run()
assert r.config["a"]["b"] == 1
def test_named_config_and_ingredient():
ing = Ingredient("foo")
@ing.config
def cfg():
a = 10
ex = Experiment(ingredients=[ing])
@ex.config
def default():
b = 20
@ex.named_config
def named():
b = 30
@ex.main
def main():
pass
r = ex.run(named_configs=["named"])
assert r.config["b"] == 30
assert r.config["foo"] == {"a": 10}
def test_captured_out_filter(ex, capsys):
@ex.main
def run_print_mock_progress():
sys.stdout.write("progress 0")
sys.stdout.flush()
for i in range(10):
sys.stdout.write("\b")
sys.stdout.write("{}".format(i))
sys.stdout.flush()
ex.captured_out_filter = apply_backspaces_and_linefeeds
# disable logging and set capture mode to python
options = {"--loglevel": "CRITICAL", "--capture": "sys"}
with capsys.disabled():
assert ex.run(options=options).captured_out == "progress 9"
def test_adding_option_hooks(ex):
@ex.option_hook
def hook(options):
pass
@ex.option_hook
def hook2(options):
pass
assert hook in ex.option_hooks
assert hook2 in ex.option_hooks
def test_option_hooks_without_options_arg_raises(ex):
with pytest.raises(KeyError):
@ex.option_hook
def invalid_hook(wrong_arg_name):
pass
def test_config_hook_updates_config(ex):
@ex.config
def cfg():
a = "hello"
@ex.config_hook
def hook(config, command_name, logger):
config.update({"a": "me"})
return config
@ex.main
def foo():
pass
r = ex.run()
assert r.config["a"] == "me"
def test_info_kwarg_updates_info(ex):
"""Tests that the info kwarg of Experiment.create_run is used to update Run.info"""
@ex.automain
def foo():
pass
run = ex.run(info={"bar": "baz"})
assert "bar" in run.info
def test_info_kwargs_default_behavior(ex):
"""Tests the default behavior of Experiment.create_run when the info kwarg is not specified."""
@ex.automain
def foo(_run):
_run.info["bar"] = "baz"
run = ex.run()
assert "bar" in run.info
def test_fails_on_config_write(ex):
@ex.config
def cfg():
a = "hello"
nested_dict = {"dict": {"dict": 1234, "list": [1, 2, 3, 4]}}
nested_list = [{"a": 42}, (1, 2, 3, 4), [1, 2, 3, 4]]
nested_tuple = ({"a": 42}, (1, 2, 3, 4), [1, 2, 3, 4])
@ex.main
def main(_config, nested_dict, nested_list, nested_tuple):
raises_list = pytest.raises(
SacredError, match="The configuration is read-only in a captured function!"
)
raises_dict = pytest.raises(
SacredError, match="The configuration is read-only in a captured function!"
)
print("in main")
# Test for ReadOnlyDict
with raises_dict:
_config["a"] = "world!"
with raises_dict:
nested_dict["dict"] = "world!"
with raises_dict:
nested_dict["list"] = "world!"
with raises_dict:
nested_dict.clear()
with raises_dict:
nested_dict.update({"a": "world"})
# Test ReadOnlyList
with raises_list:
nested_dict["dict"]["list"][0] = 1
with raises_list:
nested_list[0] = "world!"
with raises_list:
nested_dict.clear()
# Test nested tuple
with raises_dict:
nested_tuple[0]["a"] = "world!"
with raises_list:
nested_tuple[2][0] = 123
ex.run()
def test_add_config_dict_chain(ex):
@ex.config
def config1():
"""This is my demo configuration"""
dictnest_cap = {"key_1": "value_1", "key_2": "value_2"}
@ex.config
def config2():
"""This is my demo configuration"""
dictnest_cap = {"key_2": "update_value_2", "key_3": "value3", "key_4": "value4"}
adict = {"dictnest_dict": {"key_1": "value_1", "key_2": "value_2"}}
ex.add_config(adict)
bdict = {
"dictnest_dict": {
"key_2": "update_value_2",
"key_3": "value3",
"key_4": "value4",
}
}
ex.add_config(bdict)
@ex.automain
def run():
pass
final_config = ex.run().config
assert final_config["dictnest_cap"] == {
"key_1": "value_1",
"key_2": "update_value_2",
"key_3": "value3",
"key_4": "value4",
}
assert final_config["dictnest_cap"] == final_config["dictnest_dict"]
def test_additional_gatherers():
@host_info_gatherer("hello")
def get_hello():
return "hello world"
experiment = Experiment("ator3000", additional_host_info=[get_hello])
@experiment.main
def foo():
pass
experiment.run()
assert experiment.current_run.host_info["hello"] == "hello world"
@pytest.mark.parametrize("command_line_option", ["-w", "--warning"])
def test_additional_cli_options_flag(command_line_option):
executed = [False]
@cli_option("-w", "--warning", is_flag=True)
def dummy_option(args, run):
executed[0] = True
experiment = Experiment("ator3000", additional_cli_options=[dummy_option])
@experiment.main
def foo():
pass
experiment.run_commandline([__file__, command_line_option])
assert executed[0]
@pytest.mark.parametrize("command_line_option", ["-w", "--warning"])
def test_additional_cli_options(command_line_option):
executed = [False]
@cli_option("-w", "--warning")
def dummy_option(args, run):
executed[0] = args
experiment = Experiment("ator3000", additional_cli_options=[dummy_option])
@experiment.main
def foo():
pass
experiment.run_commandline([__file__, command_line_option, "10"])
assert executed[0] == "10"
|
jeandet/meson
|
refs/heads/master
|
mesonbuild/modules/windows.py
|
1
|
# Copyright 2015 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from .. import mlog
from .. import mesonlib, build
from ..mesonlib import MesonException, extract_as_list
from . import get_include_args
from . import ModuleReturnValue
from . import ExtensionModule
from ..interpreter import CustomTargetHolder
from ..interpreterbase import permittedKwargs, FeatureNewKwargs
from ..dependencies import ExternalProgram
class WindowsModule(ExtensionModule):
def detect_compiler(self, compilers):
for l in ('c', 'cpp'):
if l in compilers:
return compilers[l]
raise MesonException('Resource compilation requires a C or C++ compiler.')
@FeatureNewKwargs('windows.compile_resources', '0.47.0', ['depend_files', 'depends'])
@permittedKwargs({'args', 'include_directories', 'depend_files', 'depends'})
def compile_resources(self, state, args, kwargs):
comp = self.detect_compiler(state.compilers)
extra_args = mesonlib.stringlistify(kwargs.get('args', []))
wrc_depend_files = extract_as_list(kwargs, 'depend_files', pop = True)
wrc_depends = extract_as_list(kwargs, 'depends', pop = True)
for d in wrc_depends:
if isinstance(d, CustomTargetHolder):
extra_args += get_include_args([d.outdir_include()])
inc_dirs = extract_as_list(kwargs, 'include_directories', pop = True)
for incd in inc_dirs:
if not isinstance(incd.held_object, (str, build.IncludeDirs)):
raise MesonException('Resource include dirs should be include_directories().')
extra_args += get_include_args(inc_dirs)
if comp.id == 'msvc':
rescomp = ExternalProgram('rc', silent=True)
res_args = extra_args + ['/nologo', '/fo@OUTPUT@', '@INPUT@']
suffix = 'res'
else:
m = 'Argument {!r} has a space which may not work with windres due to ' \
'a MinGW bug: https://sourceware.org/bugzilla/show_bug.cgi?id=4933'
for arg in extra_args:
if ' ' in arg:
mlog.warning(m.format(arg))
rescomp = None
# FIXME: Does not handle `native: true` executables, see
# https://github.com/mesonbuild/meson/issues/1531
if state.environment.is_cross_build():
# If cross compiling see if windres has been specified in the
# cross file before trying to find it another way.
cross_info = state.environment.cross_info
rescomp = ExternalProgram.from_cross_info(cross_info, 'windres')
if not rescomp or not rescomp.found():
# Pick-up env var WINDRES if set. This is often used for
# specifying an arch-specific windres.
rescomp = ExternalProgram(os.environ.get('WINDRES', 'windres'), silent=True)
res_args = extra_args + ['@INPUT@', '@OUTPUT@']
suffix = 'o'
if not rescomp.found():
raise MesonException('Could not find Windows resource compiler {!r}'
''.format(rescomp.get_path()))
res_targets = []
def add_target(src):
if isinstance(src, list):
for subsrc in src:
add_target(subsrc)
return
if hasattr(src, 'held_object'):
src = src.held_object
if isinstance(src, str):
name_format = 'file {!r}'
name = os.path.join(state.subdir, src)
elif isinstance(src, mesonlib.File):
name_format = 'file {!r}'
name = src.relative_name()
elif isinstance(src, build.CustomTarget):
if len(src.get_outputs()) > 1:
raise MesonException('windows.compile_resources does not accept custom targets with more than 1 output.')
name_format = 'target {!r}'
name = src.get_id()
else:
raise MesonException('Unexpected source type {!r}. windows.compile_resources accepts only strings, files, custom targets, and lists thereof.'.format(src))
# Path separators are not allowed in target names
name = name.replace('/', '_').replace('\\', '_')
res_kwargs = {
'output': name + '_@BASENAME@.' + suffix,
'input': [src],
'command': [rescomp] + res_args,
'depend_files': wrc_depend_files,
'depends': wrc_depends,
}
# instruct binutils windres to generate a preprocessor depfile
if comp.id != 'msvc':
res_kwargs['depfile'] = res_kwargs['output'] + '.d'
res_kwargs['command'] += ['--preprocessor-arg=-MD', '--preprocessor-arg=-MQ@OUTPUT@', '--preprocessor-arg=-MF@DEPFILE@']
res_targets.append(build.CustomTarget('Windows resource for ' + name_format.format(name), state.subdir, state.subproject, res_kwargs))
add_target(args)
return ModuleReturnValue(res_targets, [res_targets])
def initialize(*args, **kwargs):
return WindowsModule(*args, **kwargs)
|
kennedyshead/home-assistant
|
refs/heads/dev
|
homeassistant/components/blebox/switch.py
|
21
|
"""BleBox switch implementation."""
from homeassistant.components.switch import SwitchEntity
from . import BleBoxEntity, create_blebox_entities
from .const import BLEBOX_TO_HASS_DEVICE_CLASSES
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up a BleBox switch entity."""
create_blebox_entities(
hass, config_entry, async_add_entities, BleBoxSwitchEntity, "switches"
)
class BleBoxSwitchEntity(BleBoxEntity, SwitchEntity):
"""Representation of a BleBox switch feature."""
@property
def device_class(self):
"""Return the device class."""
return BLEBOX_TO_HASS_DEVICE_CLASSES[self._feature.device_class]
@property
def is_on(self):
"""Return whether switch is on."""
return self._feature.is_on
async def async_turn_on(self, **kwargs):
"""Turn on the switch."""
await self._feature.async_turn_on()
async def async_turn_off(self, **kwargs):
"""Turn off the switch."""
await self._feature.async_turn_off()
|
javierTerry/odoo
|
refs/heads/8.0
|
openerp/addons/test_uninstall/__openerp__.py
|
435
|
# -*- coding: utf-8 -*-
{
'name': 'test-uninstall',
'version': '0.1',
'category': 'Tests',
'description': """A module to test the uninstall feature.""",
'author': 'OpenERP SA',
'maintainer': 'OpenERP SA',
'website': 'http://www.openerp.com',
'depends': ['base'],
'data': ['ir.model.access.csv'],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
SEL-Columbia/commcare-hq
|
refs/heads/master
|
corehq/apps/reports/commtrack/data_sources.py
|
1
|
from dimagi.utils.couch.database import iter_docs
from dimagi.utils.decorators.memoized import memoized
from corehq.apps.locations.models import Location
from corehq.apps.commtrack.models import Product, SupplyPointCase, StockState
from corehq.apps.domain.models import Domain
from dimagi.utils.couch.loosechange import map_reduce
from corehq.apps.reports.api import ReportDataSource
from datetime import datetime, timedelta
from casexml.apps.stock.models import StockTransaction
from couchforms.models import XFormInstance
from corehq.apps.reports.commtrack.util import get_relevant_supply_point_ids, product_ids_filtered_by_program
from corehq.apps.reports.commtrack.const import STOCK_SECTION_TYPE
from casexml.apps.stock.utils import months_of_stock_remaining, stock_category
from corehq.apps.reports.standard.monitoring import MultiFormDrilldownMixin
from decimal import Decimal
class CommtrackDataSourceMixin(object):
@property
def domain(self):
return self.config.get('domain')
@property
@memoized
def active_location(self):
return Location.get_in_domain(self.domain, self.config.get('location_id'))
@property
@memoized
def active_product(self):
prod_id = self.config.get('product_id')
if prod_id:
return Product.get(prod_id)
@property
@memoized
def program_id(self):
prog_id = self.config.get('program_id')
if prog_id != '':
return prog_id
@property
def start_date(self):
return self.config.get('startdate') or (datetime.now() - timedelta(30)).date()
@property
def end_date(self):
return self.config.get('enddate') or datetime.now().date()
@property
def request(self):
request = self.config.get('request')
if request:
return request
class StockStatusDataSource(ReportDataSource, CommtrackDataSourceMixin):
"""
Config:
domain: The domain to report on.
location_id: ID of location to get data for. Omit for all locations.
product_id: ID of product to get data for. Omit for all products.
aggregate: True to aggregate the indicators by product for the current location.
Data Slugs:
product_name: Name of the product
product_id: ID of the product
location_id: The ID of the current location.
location_lineage: The lineage of the current location.
current_stock: The current stock level
consumption: The current monthly consumption rate
months_remaining: The number of months remaining until stock out
category: The status category. See casexml.apps.stock.models.StockState.stock_category
resupply_quantity_needed: Max amount - current amount
"""
slug = 'agg_stock_status'
SLUG_PRODUCT_NAME = 'product_name'
SLUG_PRODUCT_ID = 'product_id'
SLUG_MONTHS_REMAINING = 'months_remaining'
SLUG_CONSUMPTION = 'consumption'
SLUG_CURRENT_STOCK = 'current_stock'
SLUG_LOCATION_ID = 'location_id'
SLUG_LOCATION_LINEAGE = 'location_lineage'
SLUG_STOCKOUT_SINCE = 'stockout_since'
SLUG_STOCKOUT_DURATION = 'stockout_duration'
SLUG_LAST_REPORTED = 'last_reported'
SLUG_CATEGORY = 'category'
SLUG_RESUPPLY_QUANTITY_NEEDED = 'resupply_quantity_needed'
@property
@memoized
def _slug_attrib_map(self):
@memoized
def product_name(product_id):
return Product.get(product_id).name
@memoized
def supply_point_location(case_id):
return SupplyPointCase.get(case_id).location_[-1]
raw_map = {
self.SLUG_PRODUCT_NAME: lambda s: product_name(s.product_id),
self.SLUG_PRODUCT_ID: 'product_id',
self.SLUG_LOCATION_ID: lambda s: supply_point_location(s.case_id),
# SLUG_LOCATION_LINEAGE: lambda p: list(reversed(p.location_[:-1])),
self.SLUG_CURRENT_STOCK: 'stock_on_hand',
self.SLUG_CONSUMPTION: lambda s: s.get_monthly_consumption(),
self.SLUG_MONTHS_REMAINING: 'months_remaining',
self.SLUG_CATEGORY: 'stock_category',
# SLUG_STOCKOUT_SINCE: 'stocked_out_since',
# SLUG_STOCKOUT_DURATION: 'stockout_duration_in_months',
self.SLUG_LAST_REPORTED: 'last_modified_date',
self.SLUG_RESUPPLY_QUANTITY_NEEDED: 'resupply_quantity_needed',
}
# normalize the slug attrib map so everything is callable
def _normalize_row(slug, function_or_property):
if not callable(function_or_property):
function = lambda s: getattr(s, function_or_property, '')
else:
function = function_or_property
return slug, function
return dict(_normalize_row(k, v) for k, v in raw_map.items())
def slugs(self):
return self._slug_attrib_map.keys()
def filter_by_program(self, stock_states):
return stock_states.filter(
product_id__in=product_ids_filtered_by_program(
self.domain,
self.program_id
)
)
def get_data(self, slugs=None):
sp_ids = get_relevant_supply_point_ids(self.domain, self.active_location)
if len(sp_ids) == 1:
stock_states = StockState.objects.filter(
case_id=sp_ids[0],
section_id=STOCK_SECTION_TYPE,
last_modified_date__lte=self.end_date,
last_modified_date__gte=self.start_date,
)
if self.program_id:
stock_states = self.filter_by_program(stock_states)
return self.leaf_node_data(stock_states)
else:
stock_states = StockState.objects.filter(
case_id__in=sp_ids,
section_id=STOCK_SECTION_TYPE,
last_modified_date__lte=self.end_date,
last_modified_date__gte=self.start_date,
)
if self.program_id:
stock_states = self.filter_by_program(stock_states)
if self.config.get('aggregate'):
return self.aggregated_data(stock_states)
else:
return self.raw_product_states(stock_states, slugs)
def format_decimal(self, d):
# https://docs.python.org/2/library/decimal.html#decimal-faq
if d is not None:
return d.quantize(Decimal(1)) if d == d.to_integral() else d.normalize()
else:
return None
def leaf_node_data(self, stock_states):
for state in stock_states:
product = Product.get(state.product_id)
yield {
'category': state.stock_category,
'product_id': product._id,
'consumption': state.get_monthly_consumption(),
'months_remaining': state.months_remaining,
'location_id': SupplyPointCase.get(state.case_id).location_id,
'product_name': product.name,
'current_stock': self.format_decimal(state.stock_on_hand),
'location_lineage': None,
'resupply_quantity_needed': state.resupply_quantity_needed
}
def aggregated_data(self, stock_states):
product_aggregation = {}
for state in stock_states:
if state.product_id in product_aggregation:
product = product_aggregation[state.product_id]
product['current_stock'] = self.format_decimal(
product['current_stock'] + state.stock_on_hand
)
consumption = state.get_monthly_consumption()
if product['consumption'] is None:
product['consumption'] = consumption
elif consumption is not None:
product['consumption'] += consumption
product['count'] += 1
product['category'] = stock_category(
product['current_stock'],
product['consumption'],
Domain.get_by_name(self.domain)
)
product['months_remaining'] = months_of_stock_remaining(
product['current_stock'],
product['consumption']
)
else:
product = Product.get(state.product_id)
consumption = state.get_monthly_consumption()
product_aggregation[state.product_id] = {
'product_id': product._id,
'location_id': None,
'product_name': product.name,
'location_lineage': None,
'resupply_quantity_needed': None,
'current_stock': self.format_decimal(state.stock_on_hand),
'count': 1,
'consumption': consumption,
'category': stock_category(
state.stock_on_hand,
consumption,
Domain.get_by_name(self.domain)
),
'months_remaining': months_of_stock_remaining(
state.stock_on_hand,
consumption
)
}
return product_aggregation.values()
def raw_product_states(self, stock_states, slugs):
for state in stock_states:
yield {
slug: f(state) for slug, f in self._slug_attrib_map.items() if not slugs or slug in slugs
}
class StockStatusBySupplyPointDataSource(StockStatusDataSource):
def get_data(self):
data = list(super(StockStatusBySupplyPointDataSource, self).get_data())
products = dict((r['product_id'], r['product_name']) for r in data)
product_ids = sorted(products.keys(), key=lambda e: products[e])
by_supply_point = map_reduce(lambda e: [(e['location_id'],)], data=data, include_docs=True)
locs = dict((loc._id, loc) for loc in Location.view(
'_all_docs',
keys=by_supply_point.keys(),
include_docs=True))
for loc_id, subcases in by_supply_point.iteritems():
loc = locs[loc_id]
by_product = dict((c['product_id'], c) for c in subcases)
rec = {
'name': loc.name,
'type': loc.location_type,
'geo': loc._geopoint,
}
for prod in product_ids:
rec.update(dict(('%s-%s' % (prod, key), by_product.get(prod, {}).get(key)) for key in
('current_stock', 'consumption', 'months_remaining', 'category')))
yield rec
class ReportingStatusDataSource(ReportDataSource, CommtrackDataSourceMixin, MultiFormDrilldownMixin):
"""
Config:
domain: The domain to report on.
location_id: ID of location to get data for. Omit for all locations.
"""
def get_data(self):
# todo: this will probably have to paginate eventually
if self.all_relevant_forms:
sp_ids = get_relevant_supply_point_ids(
self.domain,
self.active_location,
)
supply_points = (SupplyPointCase.wrap(doc) for doc in iter_docs(SupplyPointCase.get_db(), sp_ids))
form_xmlnses = [form['xmlns'] for form in self.all_relevant_forms.values()]
for supply_point in supply_points:
# todo: get locations in bulk
loc = supply_point.location
transactions = StockTransaction.objects.filter(
case_id=supply_point._id,
).exclude(
report__date__lte=self.start_date
).exclude(
report__date__gte=self.end_date
).order_by('-report__date')
matched = False
for trans in transactions:
if XFormInstance.get(trans.report.form_id).xmlns in form_xmlnses:
yield {
'loc_id': loc._id,
'loc_path': loc.path,
'name': loc.name,
'type': loc.location_type,
'reporting_status': 'reporting',
'geo': loc._geopoint,
}
matched = True
break
if not matched:
yield {
'loc_id': loc._id,
'loc_path': loc.path,
'name': loc.name,
'type': loc.location_type,
'reporting_status': 'nonreporting',
'geo': loc._geopoint,
}
|
eudicots/Cactus
|
refs/heads/master
|
cactus/i18n/commands.py
|
2
|
#coding:utf-8
from django.core.management.commands.makemessages import Command as MakeMessagesCommand
from django.core.management.commands.compilemessages import Command as CompileMessagesCommand
from cactus.utils.filesystem import chdir
DEFAULT_COMMAND_KWARGS = {
# Command Options
"verbosity": 3,
"settings": None,
"pythonpath": None,
"traceback": True,
"all": False,
}
DEFAULT_MAKEMESSAGES_KWARGS = {
# MakeMessages Options: Default
"domain": "django",
"extensions": [],
"ignore_patterns": [],
"symlinks": False,
"use_default_ignore_patterns": True,
"no_wrap": False,
"no_location": False,
"no_obsolete": False,
"keep_pot": False
}
def WrappedCommandFactory(wrapped, default_kwargs=None):
# Compose a list of kwargs for future runs
base_kwargs = {}
base_kwargs.update(DEFAULT_COMMAND_KWARGS)
if default_kwargs is not None:
base_kwargs.update(default_kwargs)
class WrappedCommand(object):
"""
Wraps a Django management command
"""
def __init__(self, site):
self.site = site
def execute(self):
kwargs = {"locale": [self.site.locale]}
kwargs.update(base_kwargs)
cmd = wrapped()
with chdir(self.site.path):
cmd.execute(**kwargs) # May raise an exception depending on gettext install.
return WrappedCommand
MessageMaker = WrappedCommandFactory(MakeMessagesCommand, DEFAULT_MAKEMESSAGES_KWARGS)
MessageCompiler = WrappedCommandFactory(CompileMessagesCommand)
|
mxOBS/deb-pkg_trusty_chromium-browser
|
refs/heads/master
|
third_party/cython/src/Cython/Compiler/Tests/TestParseTreeTransforms.py
|
89
|
import os
from Cython.Compiler import CmdLine
from Cython.TestUtils import TransformTest
from Cython.Compiler.ParseTreeTransforms import *
from Cython.Compiler.Nodes import *
from Cython.Compiler import Main, Symtab
class TestNormalizeTree(TransformTest):
def test_parserbehaviour_is_what_we_coded_for(self):
t = self.fragment(u"if x: y").root
self.assertLines(u"""
(root): StatListNode
stats[0]: IfStatNode
if_clauses[0]: IfClauseNode
condition: NameNode
body: ExprStatNode
expr: NameNode
""", self.treetypes(t))
def test_wrap_singlestat(self):
t = self.run_pipeline([NormalizeTree(None)], u"if x: y")
self.assertLines(u"""
(root): StatListNode
stats[0]: IfStatNode
if_clauses[0]: IfClauseNode
condition: NameNode
body: StatListNode
stats[0]: ExprStatNode
expr: NameNode
""", self.treetypes(t))
def test_wrap_multistat(self):
t = self.run_pipeline([NormalizeTree(None)], u"""
if z:
x
y
""")
self.assertLines(u"""
(root): StatListNode
stats[0]: IfStatNode
if_clauses[0]: IfClauseNode
condition: NameNode
body: StatListNode
stats[0]: ExprStatNode
expr: NameNode
stats[1]: ExprStatNode
expr: NameNode
""", self.treetypes(t))
def test_statinexpr(self):
t = self.run_pipeline([NormalizeTree(None)], u"""
a, b = x, y
""")
self.assertLines(u"""
(root): StatListNode
stats[0]: SingleAssignmentNode
lhs: TupleNode
args[0]: NameNode
args[1]: NameNode
rhs: TupleNode
args[0]: NameNode
args[1]: NameNode
""", self.treetypes(t))
def test_wrap_offagain(self):
t = self.run_pipeline([NormalizeTree(None)], u"""
x
y
if z:
x
""")
self.assertLines(u"""
(root): StatListNode
stats[0]: ExprStatNode
expr: NameNode
stats[1]: ExprStatNode
expr: NameNode
stats[2]: IfStatNode
if_clauses[0]: IfClauseNode
condition: NameNode
body: StatListNode
stats[0]: ExprStatNode
expr: NameNode
""", self.treetypes(t))
def test_pass_eliminated(self):
t = self.run_pipeline([NormalizeTree(None)], u"pass")
self.assert_(len(t.stats) == 0)
class TestWithTransform(object): # (TransformTest): # Disabled!
def test_simplified(self):
t = self.run_pipeline([WithTransform(None)], u"""
with x:
y = z ** 3
""")
self.assertCode(u"""
$0_0 = x
$0_2 = $0_0.__exit__
$0_0.__enter__()
$0_1 = True
try:
try:
$1_0 = None
y = z ** 3
except:
$0_1 = False
if (not $0_2($1_0)):
raise
finally:
if $0_1:
$0_2(None, None, None)
""", t)
def test_basic(self):
t = self.run_pipeline([WithTransform(None)], u"""
with x as y:
y = z ** 3
""")
self.assertCode(u"""
$0_0 = x
$0_2 = $0_0.__exit__
$0_3 = $0_0.__enter__()
$0_1 = True
try:
try:
$1_0 = None
y = $0_3
y = z ** 3
except:
$0_1 = False
if (not $0_2($1_0)):
raise
finally:
if $0_1:
$0_2(None, None, None)
""", t)
class TestInterpretCompilerDirectives(TransformTest):
"""
This class tests the parallel directives AST-rewriting and importing.
"""
# Test the parallel directives (c)importing
import_code = u"""
cimport cython.parallel
cimport cython.parallel as par
from cython cimport parallel as par2
from cython cimport parallel
from cython.parallel cimport threadid as tid
from cython.parallel cimport threadavailable as tavail
from cython.parallel cimport prange
"""
expected_directives_dict = {
u'cython.parallel': u'cython.parallel',
u'par': u'cython.parallel',
u'par2': u'cython.parallel',
u'parallel': u'cython.parallel',
u"tid": u"cython.parallel.threadid",
u"tavail": u"cython.parallel.threadavailable",
u"prange": u"cython.parallel.prange",
}
def setUp(self):
super(TestInterpretCompilerDirectives, self).setUp()
compilation_options = Main.CompilationOptions(Main.default_options)
ctx = compilation_options.create_context()
transform = InterpretCompilerDirectives(ctx, ctx.compiler_directives)
transform.module_scope = Symtab.ModuleScope('__main__', None, ctx)
self.pipeline = [transform]
self.debug_exception_on_error = DebugFlags.debug_exception_on_error
def tearDown(self):
DebugFlags.debug_exception_on_error = self.debug_exception_on_error
def test_parallel_directives_cimports(self):
self.run_pipeline(self.pipeline, self.import_code)
parallel_directives = self.pipeline[0].parallel_directives
self.assertEqual(parallel_directives, self.expected_directives_dict)
def test_parallel_directives_imports(self):
self.run_pipeline(self.pipeline,
self.import_code.replace(u'cimport', u'import'))
parallel_directives = self.pipeline[0].parallel_directives
self.assertEqual(parallel_directives, self.expected_directives_dict)
# TODO: Re-enable once they're more robust.
if sys.version_info[:2] >= (2, 5) and False:
from Cython.Debugger import DebugWriter
from Cython.Debugger.Tests.TestLibCython import DebuggerTestCase
else:
# skip test, don't let it inherit unittest.TestCase
DebuggerTestCase = object
class TestDebugTransform(DebuggerTestCase):
def elem_hasattrs(self, elem, attrs):
# we shall supporteth python 2.3 !
return all([attr in elem.attrib for attr in attrs])
def test_debug_info(self):
try:
assert os.path.exists(self.debug_dest)
t = DebugWriter.etree.parse(self.debug_dest)
# the xpath of the standard ElementTree is primitive, don't use
# anything fancy
L = list(t.find('/Module/Globals'))
# assertTrue is retarded, use the normal assert statement
assert L
xml_globals = dict(
[(e.attrib['name'], e.attrib['type']) for e in L])
self.assertEqual(len(L), len(xml_globals))
L = list(t.find('/Module/Functions'))
assert L
xml_funcs = dict([(e.attrib['qualified_name'], e) for e in L])
self.assertEqual(len(L), len(xml_funcs))
# test globals
self.assertEqual('CObject', xml_globals.get('c_var'))
self.assertEqual('PythonObject', xml_globals.get('python_var'))
# test functions
funcnames = ('codefile.spam', 'codefile.ham', 'codefile.eggs',
'codefile.closure', 'codefile.inner')
required_xml_attrs = 'name', 'cname', 'qualified_name'
assert all([f in xml_funcs for f in funcnames])
spam, ham, eggs = [xml_funcs[funcname] for funcname in funcnames]
self.assertEqual(spam.attrib['name'], 'spam')
self.assertNotEqual('spam', spam.attrib['cname'])
assert self.elem_hasattrs(spam, required_xml_attrs)
# test locals of functions
spam_locals = list(spam.find('Locals'))
assert spam_locals
spam_locals.sort(key=lambda e: e.attrib['name'])
names = [e.attrib['name'] for e in spam_locals]
self.assertEqual(list('abcd'), names)
assert self.elem_hasattrs(spam_locals[0], required_xml_attrs)
# test arguments of functions
spam_arguments = list(spam.find('Arguments'))
assert spam_arguments
self.assertEqual(1, len(list(spam_arguments)))
# test step-into functions
step_into = spam.find('StepIntoFunctions')
spam_stepinto = [x.attrib['name'] for x in step_into]
assert spam_stepinto
self.assertEqual(2, len(spam_stepinto))
assert 'puts' in spam_stepinto
assert 'some_c_function' in spam_stepinto
except:
f = open(self.debug_dest)
try:
print(f.read())
finally:
f.close()
raise
if __name__ == "__main__":
import unittest
unittest.main()
|
Simplistix/testfixtures
|
refs/heads/master
|
testfixtures/tests/test_sequencecomparison.py
|
1
|
from testfixtures import SequenceComparison, generator, compare, Subset, Permutation
class TestSequenceComparison(object):
def test_repr(self):
compare(repr(SequenceComparison(1, 2, 3)),
expected='<SequenceComparison(ordered=True, partial=False)>1, 2, 3</>')
def test_repr_long(self):
actual = repr(SequenceComparison('a', 'b', 'c'*1000))[:60]
compare(actual,
expected='\n'
"<SequenceComparison(ordered=True, partial=False)>\n'a',\n 'b'")
def test_repr_after_equal(self):
s = SequenceComparison(1, 2, 3)
assert s == (1, 2, 3)
compare(repr(s), expected='<SequenceComparison(ordered=True, partial=False)>1, 2, 3</>')
def test_equal_list(self):
s = SequenceComparison(1, 2, 3)
assert s == [1, 2, 3]
def test_equal_tuple(self):
s = SequenceComparison(1, 2, 3)
assert s == (1, 2, 3)
def test_equal_nested_unhashable_unordered(self):
s = SequenceComparison({1}, {2}, {2}, ordered=False)
assert s == ({2}, {1}, {2})
def test_equal_nested_unhashable_unordered_partial(self):
s = SequenceComparison({1}, {2}, {2}, ordered=False, partial=True)
assert s == ({2}, {1}, {2}, {3})
def test_equal_generator(self):
s = SequenceComparison(1, 2, 3)
assert s == generator(1, 2, 3)
def test_equal_unordered(self):
s = SequenceComparison(1, 2, 3, ordered=False)
assert s == (1, 3, 2)
def test_equal_partial_unordered(self):
s = SequenceComparison(1, 2, ordered=False, partial=True)
assert s == (2, 1, 4)
def test_equal_partial_ordered(self):
s = SequenceComparison(1, 2, 1, ordered=True, partial=True)
assert s == (1, 1, 2, 1)
def test_equal_ordered_duplicates(self):
s = SequenceComparison(1, 2, 2, ordered=True, partial=True)
assert s == (1, 2, 2, 3)
def test_unequal_bad_type(self):
s = SequenceComparison(1, 3)
assert s != object()
compare(repr(s),
expected="<SequenceComparison(ordered=True, partial=False)(failed)>bad type</>")
def test_unequal_list(self):
s = SequenceComparison(1, 2, 3)
assert s != (1, 2, 4)
compare(repr(s), expected=(
'\n'
'<SequenceComparison(ordered=True, partial=False)(failed)>\n'
'same:\n'
'[1, 2]\n\n'
'expected:\n'
'[3]\n\n'
'actual:\n'
'[4]\n'
'</SequenceComparison(ordered=True, partial=False)>'
))
def test_unequal_same_but_all_wrong_order(self):
s = SequenceComparison(1, 2, 3)
assert s != (3, 1, 2)
compare(repr(s), expected=(
'\n'
'<SequenceComparison(ordered=True, partial=False)(failed)>\n'
'same:\n'
'[]\n\n'
'expected:\n'
'[1, 2, 3]\n\n'
'actual:\n'
'[3, 1, 2]\n'
'</SequenceComparison(ordered=True, partial=False)>'
))
def test_unequal_prefix_match_but_partial_false(self):
s = SequenceComparison(1, 2, partial=False)
assert s != (1, 2, 4)
compare(repr(s), expected=(
'\n'
'<SequenceComparison(ordered=True, partial=False)(failed)>\n'
'same:\n'
'[1, 2]\n\n'
'expected:\n'
'[]\n\n'
'actual:\n'
'[4]\n'
'</SequenceComparison(ordered=True, partial=False)>'
))
def test_unequal_partial_ordered(self):
s = SequenceComparison(1, 3, 5, ordered=True, partial=True, recursive=False)
assert s != (1, 2, 3, 4, 0)
compare(repr(s), expected=(
'\n'
'<SequenceComparison(ordered=True, partial=True)(failed)>\n'
'ignored:\n'
'[2, 4, 0]\n\n'
'same:\n'
'[1, 3]\n\n'
'expected:\n'
'[5]\n\n'
'actual:\n'
'[]\n'
'</SequenceComparison(ordered=True, partial=True)>'
))
def test_unequal_partial_ordered_recursive(self):
s = SequenceComparison(1, 3, 5, ordered=True, partial=True, recursive=True)
assert s != (1, 2, 3, 4, 0)
compare(repr(s), expected=(
'\n'
'<SequenceComparison(ordered=True, partial=True)(failed)>\n'
'ignored:\n'
'[4, 0]\n\n'
'same:\n'
'[1]\n\n'
'expected:\n'
'[3, 5]\n\n'
'actual:\n'
'[2, 3]\n'
'</SequenceComparison(ordered=True, partial=True)>'
))
def test_unequal_partial_ordered_only_one_ignored_recursive(self):
s = SequenceComparison(1, 2, ordered=True, partial=True, recursive=True)
assert s != (2, 1, 4)
compare(repr(s), expected=(
'\n'
'<SequenceComparison(ordered=True, partial=True)(failed)>\n'
'ignored:\n'
'[4]\n\n'
'same:\n'
'[]\n\n'
'expected:\n'
'[1, 2]\n\n'
'actual:\n'
'[2, 1]\n'
'</SequenceComparison(ordered=True, partial=True)>'
))
def test_unequal_full_ordered(self):
s = SequenceComparison(1, 3, 5, ordered=True, partial=False)
assert s != (0, 1, 2, 3, 4)
compare(repr(s), expected=(
'\n'
'<SequenceComparison(ordered=True, partial=False)(failed)>\n'
'same:\n'
'[]\n\n'
'expected:\n'
'[1, 3, 5]\n\n'
'actual:\n'
'[0, 1, 2, 3, 4]\n'
'</SequenceComparison(ordered=True, partial=False)>'
))
def test_unequal_partial_ordered_with_prefix(self):
s = SequenceComparison('a', 'b', 1, 2, ordered=True, partial=True)
assert s != ('a', 'b', 2, 1, 4)
compare(repr(s), expected=(
'\n'
'<SequenceComparison(ordered=True, partial=True)(failed)>\n'
'ignored:\n'
'[2, 4]\n\n'
'same:\n'
"['a', 'b', 1]\n\n"
'expected:\n'
'[2]\n\n'
'actual:\n'
'[]\n'
'</SequenceComparison(ordered=True, partial=True)>'
))
def test_unequal_partial_unordered(self):
s = SequenceComparison(1, 3, ordered=False, partial=True)
assert s != (2, 1, 4)
compare(repr(s), expected=(
'\n'
'<SequenceComparison(ordered=False, partial=True)(failed)>\n'
'ignored:\n'
'[2, 4]\n\n'
'same:\n'
"[1]\n\n"
'in expected but not actual:\n'
"[3]\n"
'</SequenceComparison(ordered=False, partial=True)>'
))
def test_unequal_unordered_duplicates(self):
s = SequenceComparison(2, 1, 2, ordered=False, partial=False)
assert s != (1, 2)
compare(repr(s), expected=(
'\n'
'<SequenceComparison(ordered=False, partial=False)(failed)>\n'
'same:\n'
"[2, 1]\n\n"
'in expected but not actual:\n'
"[2]\n"
'</SequenceComparison(ordered=False, partial=False)>'
))
def test_unequal_partial_unordered_duplicates(self):
s = SequenceComparison(1, 2, 2, ordered=False, partial=True)
assert s != (1, 2)
compare(repr(s), expected=(
'\n'
'<SequenceComparison(ordered=False, partial=True)(failed)>\n'
'same:\n'
"[1, 2]\n\n"
'in expected but not actual:\n'
"[2]\n"
'</SequenceComparison(ordered=False, partial=True)>'
))
def test_unequal_partial_ordered_duplicates(self):
s = SequenceComparison(1, 2, 2, partial=True)
assert s != (1, 2)
compare(repr(s), expected=(
'\n'
'<SequenceComparison(ordered=True, partial=True)(failed)>\n'
'same:\n'
"[1, 2]\n\n"
'expected:\n'
'[2]\n\n'
'actual:\n'
'[]\n'
'</SequenceComparison(ordered=True, partial=True)>'
))
def test_unequal_generator(self):
s = SequenceComparison(1, 3)
assert s != generator(1, 2)
compare(repr(s), expected=(
'\n'
'<SequenceComparison(ordered=True, partial=False)(failed)>\n'
'same:\n'
"[1]\n\n"
'expected:\n'
'[3]\n\n'
'actual:\n'
'[2]\n'
'</SequenceComparison(ordered=True, partial=False)>'
))
def test_unequal_nested(self):
s = SequenceComparison({1: 'a', 2: 'b'}, [1, 2], recursive=False)
assert s != ({2: 'b', 3: 'c'}, [1, 3])
compare(repr(s), expected=(
'\n'
'<SequenceComparison(ordered=True, partial=False)(failed)>\n'
'same:\n'
"[]\n\n"
'expected:\n'
"[{1: 'a', 2: 'b'}, [1, 2]]\n\n"
'actual:\n'
"[{2: 'b', 3: 'c'}, [1, 3]]\n"
'</SequenceComparison(ordered=True, partial=False)>'
))
def test_unequal_nested_recursive(self):
s = SequenceComparison({1: 'a', 2: 'b'}, [1, 2], recursive=True)
assert s != ({2: 'b', 3: 'c'}, [1, 3])
compare(repr(s), expected=(
'\n'
'<SequenceComparison(ordered=True, partial=False)(failed)>\n'
'same:\n'
"[]\n\n"
'expected:\n'
"[{1: 'a', 2: 'b'}, [1, 2]]\n\n"
'actual:\n'
"[{2: 'b', 3: 'c'}, [1, 3]]\n\n"
"While comparing [0]: dict not as expected:\n\n"
"same:\n"
"[2]\n\n"
"in expected but not actual:\n"
"1: 'a'\n\n"
"in actual but not expected:\n"
"3: 'c'\n"
'</SequenceComparison(ordered=True, partial=False)>'
))
def test_unequal_nested_unhashable_unordered(self):
s = SequenceComparison({2: True}, {1: True}, {2: True}, {3: True}, ordered=False)
assert s != ({1: True}, {2: True}, {4: True})
compare(repr(s), expected=(
'\n'
'<SequenceComparison(ordered=False, partial=False)(failed)>\n'
'same:\n'
"[{2: True}, {1: True}]\n\n"
'in expected but not actual:\n'
"[{2: True}, {3: True}]\n\n"
'in actual but not expected:\n'
"[{4: True}]\n"
'</SequenceComparison(ordered=False, partial=False)>'
))
def test_unequal_nested_unhashable_unordered_partial(self):
s = SequenceComparison({2: True}, {1: True}, {2: True}, {3: True},
ordered=False, partial=True)
assert s != ({1: True}, {2: True}, {4: True})
compare(repr(s), expected=(
'\n'
'<SequenceComparison(ordered=False, partial=True)(failed)>\n'
'ignored:\n'
"[{4: True}]\n\n"
'same:\n'
"[{2: True}, {1: True}]\n\n"
'in expected but not actual:\n'
"[{2: True}, {3: True}]\n"
'</SequenceComparison(ordered=False, partial=True)>'
))
def test_unequal_wrong_order(self):
s = SequenceComparison(1, 2, 3)
assert s != (1, 3, 2)
compare(repr(s), expected=(
'\n'
'<SequenceComparison(ordered=True, partial=False)(failed)>\n'
'same:\n'
"[1]\n\n"
'expected:\n'
'[2, 3]\n\n'
'actual:\n'
'[3, 2]\n'
'</SequenceComparison(ordered=True, partial=False)>'
))
def test_partial_nothing_specified(self):
s = SequenceComparison(partial=True)
assert s == {}
def test_partial_wrong_type(self):
s = SequenceComparison(partial=True)
assert s != object()
class TestSubset(object):
def test_equal(self):
assert Subset({1}, {2}) == [{1}, {2}, {3}]
def test_unequal(self):
assert Subset({1}, {2}) != [{1}]
class TestPermutation(object):
def test_equal(self):
assert Permutation({1}, {2}) == [{2}, {1}]
def test_unequal(self):
assert Permutation({1}) != [{2}, {1}]
|
andresgz/django
|
refs/heads/master
|
tests/gis_tests/relatedapp/tests.py
|
39
|
from __future__ import unicode_literals
from django.contrib.gis.db.models import F, Collect, Count, Extent, Union
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.geos import GEOSGeometry, MultiPoint, Point
from django.db import connection
from django.test import TestCase, skipUnlessDBFeature
from django.test.utils import override_settings
from django.utils import timezone
from ..utils import no_oracle
from .models import (
Article, Author, Book, City, DirectoryEntry, Event, Location, Parcel,
)
@skipUnlessDBFeature("gis_enabled")
class RelatedGeoModelTest(TestCase):
fixtures = ['initial']
def test02_select_related(self):
"Testing `select_related` on geographic models (see #7126)."
qs1 = City.objects.order_by('id')
qs2 = City.objects.order_by('id').select_related()
qs3 = City.objects.order_by('id').select_related('location')
# Reference data for what's in the fixtures.
cities = (
('Aurora', 'TX', -97.516111, 33.058333),
('Roswell', 'NM', -104.528056, 33.387222),
('Kecksburg', 'PA', -79.460734, 40.18476),
)
for qs in (qs1, qs2, qs3):
for ref, c in zip(cities, qs):
nm, st, lon, lat = ref
self.assertEqual(nm, c.name)
self.assertEqual(st, c.state)
self.assertEqual(Point(lon, lat), c.location.point)
@skipUnlessDBFeature("has_transform_method")
def test03_transform_related(self):
"Testing the `transform` GeoQuerySet method on related geographic models."
# All the transformations are to state plane coordinate systems using
# US Survey Feet (thus a tolerance of 0 implies error w/in 1 survey foot).
tol = 0
def check_pnt(ref, pnt):
self.assertAlmostEqual(ref.x, pnt.x, tol)
self.assertAlmostEqual(ref.y, pnt.y, tol)
self.assertEqual(ref.srid, pnt.srid)
# Each city transformed to the SRID of their state plane coordinate system.
transformed = (('Kecksburg', 2272, 'POINT(1490553.98959621 314792.131023984)'),
('Roswell', 2257, 'POINT(481902.189077221 868477.766629735)'),
('Aurora', 2276, 'POINT(2269923.2484839 7069381.28722222)'),
)
for name, srid, wkt in transformed:
# Doing this implicitly sets `select_related` select the location.
# TODO: Fix why this breaks on Oracle.
qs = list(City.objects.filter(name=name).transform(srid, field_name='location__point'))
check_pnt(GEOSGeometry(wkt, srid), qs[0].location.point)
@skipUnlessDBFeature("supports_extent_aggr")
def test_related_extent_aggregate(self):
"Testing the `Extent` aggregate on related geographic models."
# This combines the Extent and Union aggregates into one query
aggs = City.objects.aggregate(Extent('location__point'))
# One for all locations, one that excludes New Mexico (Roswell).
all_extent = (-104.528056, 29.763374, -79.460734, 40.18476)
txpa_extent = (-97.516111, 29.763374, -79.460734, 40.18476)
e1 = City.objects.aggregate(Extent('location__point'))['location__point__extent']
e2 = City.objects.exclude(state='NM').aggregate(Extent('location__point'))['location__point__extent']
e3 = aggs['location__point__extent']
# The tolerance value is to four decimal places because of differences
# between the Oracle and PostGIS spatial backends on the extent calculation.
tol = 4
for ref, e in [(all_extent, e1), (txpa_extent, e2), (all_extent, e3)]:
for ref_val, e_val in zip(ref, e):
self.assertAlmostEqual(ref_val, e_val, tol)
@skipUnlessDBFeature("supports_extent_aggr")
def test_related_extent_annotate(self):
"""
Test annotation with Extent GeoAggregate.
"""
cities = City.objects.annotate(points_extent=Extent('location__point')).order_by('name')
tol = 4
self.assertAlmostEqual(
cities[0].points_extent,
(-97.516111, 33.058333, -97.516111, 33.058333),
tol
)
@skipUnlessDBFeature("has_unionagg_method")
def test_related_union_aggregate(self):
"Testing the `Union` aggregate on related geographic models."
# This combines the Extent and Union aggregates into one query
aggs = City.objects.aggregate(Union('location__point'))
# These are the points that are components of the aggregate geographic
# union that is returned. Each point # corresponds to City PK.
p1 = Point(-104.528056, 33.387222)
p2 = Point(-97.516111, 33.058333)
p3 = Point(-79.460734, 40.18476)
p4 = Point(-96.801611, 32.782057)
p5 = Point(-95.363151, 29.763374)
# The second union aggregate is for a union
# query that includes limiting information in the WHERE clause (in other
# words a `.filter()` precedes the call to `.aggregate(Union()`).
ref_u1 = MultiPoint(p1, p2, p4, p5, p3, srid=4326)
ref_u2 = MultiPoint(p2, p3, srid=4326)
u1 = City.objects.aggregate(Union('location__point'))['location__point__union']
u2 = City.objects.exclude(
name__in=('Roswell', 'Houston', 'Dallas', 'Fort Worth'),
).aggregate(Union('location__point'))['location__point__union']
u3 = aggs['location__point__union']
self.assertEqual(type(u1), MultiPoint)
self.assertEqual(type(u3), MultiPoint)
# Ordering of points in the result of the union is not defined and
# implementation-dependent (DB backend, GEOS version)
self.assertSetEqual({p.ewkt for p in ref_u1}, {p.ewkt for p in u1})
self.assertSetEqual({p.ewkt for p in ref_u2}, {p.ewkt for p in u2})
self.assertSetEqual({p.ewkt for p in ref_u1}, {p.ewkt for p in u3})
def test05_select_related_fk_to_subclass(self):
"Testing that calling select_related on a query over a model with an FK to a model subclass works"
# Regression test for #9752.
list(DirectoryEntry.objects.all().select_related())
def test06_f_expressions(self):
"Testing F() expressions on GeometryFields."
# Constructing a dummy parcel border and getting the City instance for
# assigning the FK.
b1 = GEOSGeometry(
'POLYGON((-97.501205 33.052520,-97.501205 33.052576,'
'-97.501150 33.052576,-97.501150 33.052520,-97.501205 33.052520))',
srid=4326
)
pcity = City.objects.get(name='Aurora')
# First parcel has incorrect center point that is equal to the City;
# it also has a second border that is different from the first as a
# 100ft buffer around the City.
c1 = pcity.location.point
c2 = c1.transform(2276, clone=True)
b2 = c2.buffer(100)
Parcel.objects.create(name='P1', city=pcity, center1=c1, center2=c2, border1=b1, border2=b2)
# Now creating a second Parcel where the borders are the same, just
# in different coordinate systems. The center points are also the
# same (but in different coordinate systems), and this time they
# actually correspond to the centroid of the border.
c1 = b1.centroid
c2 = c1.transform(2276, clone=True)
Parcel.objects.create(name='P2', city=pcity, center1=c1, center2=c2, border1=b1, border2=b1)
# Should return the second Parcel, which has the center within the
# border.
qs = Parcel.objects.filter(center1__within=F('border1'))
self.assertEqual(1, len(qs))
self.assertEqual('P2', qs[0].name)
if connection.features.supports_transform:
# This time center2 is in a different coordinate system and needs
# to be wrapped in transformation SQL.
qs = Parcel.objects.filter(center2__within=F('border1'))
self.assertEqual(1, len(qs))
self.assertEqual('P2', qs[0].name)
# Should return the first Parcel, which has the center point equal
# to the point in the City ForeignKey.
qs = Parcel.objects.filter(center1=F('city__location__point'))
self.assertEqual(1, len(qs))
self.assertEqual('P1', qs[0].name)
if connection.features.supports_transform:
# This time the city column should be wrapped in transformation SQL.
qs = Parcel.objects.filter(border2__contains=F('city__location__point'))
self.assertEqual(1, len(qs))
self.assertEqual('P1', qs[0].name)
def test07_values(self):
"Testing values() and values_list() and GeoQuerySets."
gqs = Location.objects.all()
gvqs = Location.objects.values()
gvlqs = Location.objects.values_list()
# Incrementing through each of the models, dictionaries, and tuples
# returned by the different types of GeoQuerySets.
for m, d, t in zip(gqs, gvqs, gvlqs):
# The values should be Geometry objects and not raw strings returned
# by the spatial database.
self.assertIsInstance(d['point'], Geometry)
self.assertIsInstance(t[1], Geometry)
self.assertEqual(m.point, d['point'])
self.assertEqual(m.point, t[1])
@override_settings(USE_TZ=True)
def test_07b_values(self):
"Testing values() and values_list() with aware datetime. See #21565."
Event.objects.create(name="foo", when=timezone.now())
list(Event.objects.values_list('when'))
def test08_defer_only(self):
"Testing defer() and only() on Geographic models."
qs = Location.objects.all()
def_qs = Location.objects.defer('point')
for loc, def_loc in zip(qs, def_qs):
self.assertEqual(loc.point, def_loc.point)
def test09_pk_relations(self):
"Ensuring correct primary key column is selected across relations. See #10757."
# The expected ID values -- notice the last two location IDs
# are out of order. Dallas and Houston have location IDs that differ
# from their PKs -- this is done to ensure that the related location
# ID column is selected instead of ID column for the city.
city_ids = (1, 2, 3, 4, 5)
loc_ids = (1, 2, 3, 5, 4)
ids_qs = City.objects.order_by('id').values('id', 'location__id')
for val_dict, c_id, l_id in zip(ids_qs, city_ids, loc_ids):
self.assertEqual(val_dict['id'], c_id)
self.assertEqual(val_dict['location__id'], l_id)
# TODO: fix on Oracle -- qs2 returns an empty result for an unknown reason
@no_oracle
def test10_combine(self):
"Testing the combination of two GeoQuerySets. See #10807."
buf1 = City.objects.get(name='Aurora').location.point.buffer(0.1)
buf2 = City.objects.get(name='Kecksburg').location.point.buffer(0.1)
qs1 = City.objects.filter(location__point__within=buf1)
qs2 = City.objects.filter(location__point__within=buf2)
combined = qs1 | qs2
names = [c.name for c in combined]
self.assertEqual(2, len(names))
self.assertIn('Aurora', names)
self.assertIn('Kecksburg', names)
# TODO: fix on Oracle -- get the following error because the SQL is ordered
# by a geometry object, which Oracle apparently doesn't like:
# ORA-22901: cannot compare nested table or VARRAY or LOB attributes of an object type
@no_oracle
def test12a_count(self):
"Testing `Count` aggregate use with the `GeoManager` on geo-fields."
# The City, 'Fort Worth' uses the same location as Dallas.
dallas = City.objects.get(name='Dallas')
# Count annotation should be 2 for the Dallas location now.
loc = Location.objects.annotate(num_cities=Count('city')).get(id=dallas.location.id)
self.assertEqual(2, loc.num_cities)
def test12b_count(self):
"Testing `Count` aggregate use with the `GeoManager` on non geo-fields. See #11087."
# Should only be one author (Trevor Paglen) returned by this query, and
# the annotation should have 3 for the number of books, see #11087.
# Also testing with a values(), see #11489.
qs = Author.objects.annotate(num_books=Count('books')).filter(num_books__gt=1)
vqs = Author.objects.values('name').annotate(num_books=Count('books')).filter(num_books__gt=1)
self.assertEqual(1, len(qs))
self.assertEqual(3, qs[0].num_books)
self.assertEqual(1, len(vqs))
self.assertEqual(3, vqs[0]['num_books'])
# TODO: fix on Oracle -- get the following error because the SQL is ordered
# by a geometry object, which Oracle apparently doesn't like:
# ORA-22901: cannot compare nested table or VARRAY or LOB attributes of an object type
@no_oracle
def test13c_count(self):
"Testing `Count` aggregate with `.values()`. See #15305."
qs = Location.objects.filter(id=5).annotate(num_cities=Count('city')).values('id', 'point', 'num_cities')
self.assertEqual(1, len(qs))
self.assertEqual(2, qs[0]['num_cities'])
self.assertIsInstance(qs[0]['point'], GEOSGeometry)
# TODO: The phantom model does appear on Oracle.
@no_oracle
def test13_select_related_null_fk(self):
"Testing `select_related` on a nullable ForeignKey via `GeoManager`. See #11381."
Book.objects.create(title='Without Author')
b = Book.objects.select_related('author').get(title='Without Author')
# Should be `None`, and not a 'dummy' model.
self.assertIsNone(b.author)
@skipUnlessDBFeature("supports_collect_aggr")
def test_collect(self):
"""
Testing the `Collect` aggregate.
"""
# Reference query:
# SELECT AsText(ST_Collect("relatedapp_location"."point")) FROM "relatedapp_city" LEFT OUTER JOIN
# "relatedapp_location" ON ("relatedapp_city"."location_id" = "relatedapp_location"."id")
# WHERE "relatedapp_city"."state" = 'TX';
ref_geom = GEOSGeometry(
'MULTIPOINT(-97.516111 33.058333,-96.801611 32.782057,'
'-95.363151 29.763374,-96.801611 32.782057)'
)
coll = City.objects.filter(state='TX').aggregate(Collect('location__point'))['location__point__collect']
# Even though Dallas and Ft. Worth share same point, Collect doesn't
# consolidate -- that's why 4 points in MultiPoint.
self.assertEqual(4, len(coll))
self.assertTrue(ref_geom.equals(coll))
def test15_invalid_select_related(self):
"Testing doing select_related on the related name manager of a unique FK. See #13934."
qs = Article.objects.select_related('author__article')
# This triggers TypeError when `get_default_columns` has no `local_only`
# keyword. The TypeError is swallowed if QuerySet is actually
# evaluated as list generation swallows TypeError in CPython.
str(qs.query)
def test16_annotated_date_queryset(self):
"Ensure annotated date querysets work if spatial backend is used. See #14648."
birth_years = [dt.year for dt in
list(Author.objects.annotate(num_books=Count('books')).dates('dob', 'year'))]
birth_years.sort()
self.assertEqual([1950, 1974], birth_years)
# TODO: Related tests for KML, GML, and distance lookups.
|
fuzzy-io/python
|
refs/heads/master
|
fuzzyai/__init__.py
|
12133432
| |
awatts/boto
|
refs/heads/develop
|
boto/pyami/launch_ami.py
|
153
|
#!/usr/bin/env python
# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import getopt
import sys
import imp
import time
import boto
usage_string = """
SYNOPSIS
launch_ami.py -a ami_id [-b script_bucket] [-s script_name]
[-m module] [-c class_name] [-r]
[-g group] [-k key_name] [-n num_instances]
[-w] [extra_data]
Where:
ami_id - the id of the AMI you wish to launch
module - The name of the Python module containing the class you
want to run when the instance is started. If you use this
option the Python module must already be stored on the
instance in a location that is on the Python path.
script_file - The name of a local Python module that you would like
to have copied to S3 and then run on the instance
when it is started. The specified module must be
import'able (i.e. in your local Python path). It
will then be copied to the specified bucket in S3
(see the -b option). Once the new instance(s)
start up the script will be copied from S3 and then
run locally on the instance.
class_name - The name of the class to be instantiated within the
module or script file specified.
script_bucket - the name of the bucket in which the script will be
stored
group - the name of the security group the instance will run in
key_name - the name of the keypair to use when launching the AMI
num_instances - how many instances of the AMI to launch (default 1)
input_queue_name - Name of SQS to read input messages from
output_queue_name - Name of SQS to write output messages to
extra_data - additional name-value pairs that will be passed as
userdata to the newly launched instance. These should
be of the form "name=value"
The -r option reloads the Python module to S3 without launching
another instance. This can be useful during debugging to allow
you to test a new version of your script without shutting down
your instance and starting up another one.
The -w option tells the script to run synchronously, meaning to
wait until the instance is actually up and running. It then prints
the IP address and internal and external DNS names before exiting.
"""
def usage():
print(usage_string)
sys.exit()
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], 'a:b:c:g:hi:k:m:n:o:rs:w',
['ami', 'bucket', 'class', 'group', 'help',
'inputqueue', 'keypair', 'module',
'numinstances', 'outputqueue',
'reload', 'script_name', 'wait'])
except:
usage()
params = {'module_name': None,
'script_name': None,
'class_name': None,
'script_bucket': None,
'group': 'default',
'keypair': None,
'ami': None,
'num_instances': 1,
'input_queue_name': None,
'output_queue_name': None}
reload = None
wait = None
for o, a in opts:
if o in ('-a', '--ami'):
params['ami'] = a
if o in ('-b', '--bucket'):
params['script_bucket'] = a
if o in ('-c', '--class'):
params['class_name'] = a
if o in ('-g', '--group'):
params['group'] = a
if o in ('-h', '--help'):
usage()
if o in ('-i', '--inputqueue'):
params['input_queue_name'] = a
if o in ('-k', '--keypair'):
params['keypair'] = a
if o in ('-m', '--module'):
params['module_name'] = a
if o in ('-n', '--num_instances'):
params['num_instances'] = int(a)
if o in ('-o', '--outputqueue'):
params['output_queue_name'] = a
if o in ('-r', '--reload'):
reload = True
if o in ('-s', '--script'):
params['script_name'] = a
if o in ('-w', '--wait'):
wait = True
# check required fields
required = ['ami']
for pname in required:
if not params.get(pname, None):
print('%s is required' % pname)
usage()
if params['script_name']:
# first copy the desired module file to S3 bucket
if reload:
print('Reloading module %s to S3' % params['script_name'])
else:
print('Copying module %s to S3' % params['script_name'])
l = imp.find_module(params['script_name'])
c = boto.connect_s3()
bucket = c.get_bucket(params['script_bucket'])
key = bucket.new_key(params['script_name'] + '.py')
key.set_contents_from_file(l[0])
params['script_md5'] = key.md5
# we have everything we need, now build userdata string
l = []
for k, v in params.items():
if v:
l.append('%s=%s' % (k, v))
c = boto.connect_ec2()
l.append('aws_access_key_id=%s' % c.aws_access_key_id)
l.append('aws_secret_access_key=%s' % c.aws_secret_access_key)
for kv in args:
l.append(kv)
s = '|'.join(l)
if not reload:
rs = c.get_all_images([params['ami']])
img = rs[0]
r = img.run(user_data=s, key_name=params['keypair'],
security_groups=[params['group']],
max_count=params.get('num_instances', 1))
print('AMI: %s - %s (Started)' % (params['ami'], img.location))
print('Reservation %s contains the following instances:' % r.id)
for i in r.instances:
print('\t%s' % i.id)
if wait:
running = False
while not running:
time.sleep(30)
[i.update() for i in r.instances]
status = [i.state for i in r.instances]
print(status)
if status.count('running') == len(r.instances):
running = True
for i in r.instances:
print('Instance: %s' % i.ami_launch_index)
print('Public DNS Name: %s' % i.public_dns_name)
print('Private DNS Name: %s' % i.private_dns_name)
if __name__ == "__main__":
main()
|
gchp/django
|
refs/heads/master
|
django/contrib/sitemaps/apps.py
|
590
|
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class SiteMapsConfig(AppConfig):
name = 'django.contrib.sitemaps'
verbose_name = _("Site Maps")
|
agiledata/python-gpiozero
|
refs/heads/master
|
gpiozero/input_devices.py
|
1
|
from __future__ import division
from time import sleep, time
from threading import Event
from collections import deque
from RPi import GPIO
from w1thermsensor import W1ThermSensor
from .devices import GPIODeviceError, GPIODevice, GPIOThread
class InputDeviceError(GPIODeviceError):
pass
class InputDevice(GPIODevice):
def __init__(self, pin=None, pull_up=True):
super(InputDevice, self).__init__(pin)
self._pull_up = pull_up
self._edge = (GPIO.RISING, GPIO.FALLING)[pull_up]
if pull_up:
self._active_state = 0
self._inactive_state = 1
GPIO.setup(pin, GPIO.IN, (GPIO.PUD_DOWN, GPIO.PUD_UP)[pull_up])
@property
def pull_up(self):
return self._pull_up
def wait_for_input(self):
GPIO.wait_for_edge(self.pin, self._edge)
def add_callback(self, callback=None, bouncetime=1000):
if callback is None:
raise InputDeviceError('No callback function given')
GPIO.add_event_detect(self.pin, self._edge, callback, bouncetime)
def remove_callback(self):
GPIO.remove_event_detect(self.pin)
class Button(InputDevice):
pass
class MotionSensor(InputDevice):
def __init__(
self, pin=None, queue_len=5, sample_rate=10, threshold=0.5,
partial=False):
super(MotionSensor, self).__init__(pin, pull_up=False)
if queue_len < 1:
raise InputDeviceError('queue_len must be at least one')
self.sample_rate = sample_rate
self.threshold = threshold
self.partial = partial
self._queue = deque(maxlen=queue_len)
self._queue_full = Event()
self._queue_thread = GPIOThread(target=self._fill_queue)
self._queue_thread.start()
@property
def queue_len(self):
return self._queue.maxlen
@property
def value(self):
if not self.partial:
self._queue_full.wait()
try:
return sum(self._queue) / len(self._queue)
except ZeroDivisionError:
# No data == no motion
return 0.0
@property
def motion_detected(self):
return self.value > self.threshold
def _get_sample_rate(self):
return self._sample_rate
def _set_sample_rate(self, value):
if value <= 0:
raise InputDeviceError('sample_rate must be greater than zero')
self._sample_rate = value
sample_rate = property(_get_sample_rate, _set_sample_rate)
def _get_threshold(self):
return self._threshold
def _set_threshold(self, value):
if value < 0:
raise InputDeviceError('threshold must be zero or more')
self._threshold = value
threshold = property(_get_threshold, _set_threshold)
def _fill_queue(self):
while (
not self._queue_thread.stopping.wait(1 / self.sample_rate) and
len(self._queue) < self._queue.maxlen
):
self._queue.append(self.is_active)
self._queue_full.set()
while not self._queue_thread.stopping.wait(1 / self.sample_rate):
self._queue.append(self.is_active)
class LightSensor(InputDevice):
def __init__(
self, pin=None, queue_len=5, darkness_time=0.01,
threshold=0.1, partial=False):
super(LightSensor, self).__init__(pin, pull_up=False)
if queue_len < 1:
raise InputDeviceError('queue_len must be at least one')
self.darkness_time = darkness_time
self.threshold = threshold
self.partial = partial
self._charged = Event()
GPIO.add_event_detect(self.pin, GPIO.RISING, lambda channel: self._charged.set())
self._queue = deque(maxlen=queue_len)
self._queue_full = Event()
self._queue_thread = GPIOThread(target=self._fill_queue)
self._queue_thread.start()
@property
def queue_len(self):
return self._queue.maxlen
@property
def value(self):
if not self.partial:
self._queue_full.wait()
try:
return 1.0 - (sum(self._queue) / len(self._queue)) / self.darkness_time
except ZeroDivisionError:
# No data == no light
return 0.0
@property
def light_detected(self):
return self.value > self.threshold
def _get_darkness_time(self):
return self._darkness_time
def _set_darkness_time(self, value):
if value <= 0.0:
raise InputDeviceError('darkness_time must be greater than zero')
self._darkness_time = value
# XXX Empty the queue and restart the thread
darkness_time = property(_get_darkness_time, _set_darkness_time)
def _get_threshold(self):
return self._threshold
def _set_threshold(self, value):
if value < 0:
raise InputDeviceError('threshold must be zero or more')
self._threshold = value
threshold = property(_get_threshold, _set_threshold)
def _fill_queue(self):
try:
while (
not self._queue_thread.stopping.is_set() and
len(self._queue) < self._queue.maxlen
):
self._queue.append(self._time_charging())
self._queue_full.set()
while not self._queue_thread.stopping.is_set():
self._queue.append(self._time_charging())
finally:
GPIO.remove_event_detect(self.pin)
def _time_charging(self):
# Drain charge from the capacitor
GPIO.setup(self.pin, GPIO.OUT)
GPIO.output(self.pin, GPIO.LOW)
sleep(0.1)
# Time the charging of the capacitor
start = time()
self._charged.clear()
GPIO.setup(self.pin, GPIO.IN)
self._charged.wait(self.darkness_time)
return min(self.darkness_time, time() - start)
class TemperatureSensor(W1ThermSensor):
@property
def value(self):
return self.get_temperature()
|
Gillu13/scipy
|
refs/heads/master
|
doc/source/tutorial/stats/plots/kde_plot3.py
|
132
|
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
np.random.seed(12456)
x1 = np.random.normal(size=200) # random data, normal distribution
xs = np.linspace(x1.min()-1, x1.max()+1, 200)
kde1 = stats.gaussian_kde(x1)
kde2 = stats.gaussian_kde(x1, bw_method='silverman')
fig = plt.figure(figsize=(8, 6))
ax1 = fig.add_subplot(211)
ax1.plot(x1, np.zeros(x1.shape), 'b+', ms=12) # rug plot
ax1.plot(xs, kde1(xs), 'k-', label="Scott's Rule")
ax1.plot(xs, kde2(xs), 'b-', label="Silverman's Rule")
ax1.plot(xs, stats.norm.pdf(xs), 'r--', label="True PDF")
ax1.set_xlabel('x')
ax1.set_ylabel('Density')
ax1.set_title("Normal (top) and Student's T$_{df=5}$ (bottom) distributions")
ax1.legend(loc=1)
x2 = stats.t.rvs(5, size=200) # random data, T distribution
xs = np.linspace(x2.min() - 1, x2.max() + 1, 200)
kde3 = stats.gaussian_kde(x2)
kde4 = stats.gaussian_kde(x2, bw_method='silverman')
ax2 = fig.add_subplot(212)
ax2.plot(x2, np.zeros(x2.shape), 'b+', ms=12) # rug plot
ax2.plot(xs, kde3(xs), 'k-', label="Scott's Rule")
ax2.plot(xs, kde4(xs), 'b-', label="Silverman's Rule")
ax2.plot(xs, stats.t.pdf(xs, 5), 'r--', label="True PDF")
ax2.set_xlabel('x')
ax2.set_ylabel('Density')
plt.show()
|
tastynoodle/django
|
refs/heads/master
|
django/db/models/fields/subclassing.py
|
81
|
"""
Convenience routines for creating non-trivial Field subclasses, as well as
backwards compatibility utilities.
Add SubfieldBase as the metaclass for your Field subclass, implement
to_python() and the other necessary methods and everything will work
seamlessly.
"""
class SubfieldBase(type):
"""
A metaclass for custom Field subclasses. This ensures the model's attribute
has the descriptor protocol attached to it.
"""
def __new__(cls, name, bases, attrs):
new_class = super(SubfieldBase, cls).__new__(cls, name, bases, attrs)
new_class.contribute_to_class = make_contrib(
new_class, attrs.get('contribute_to_class')
)
return new_class
class Creator(object):
"""
A placeholder class that provides a way to set the attribute on the model.
"""
def __init__(self, field):
self.field = field
def __get__(self, obj, type=None):
if obj is None:
return self
return obj.__dict__[self.field.name]
def __set__(self, obj, value):
obj.__dict__[self.field.name] = self.field.to_python(value)
def make_contrib(superclass, func=None):
"""
Returns a suitable contribute_to_class() method for the Field subclass.
If 'func' is passed in, it is the existing contribute_to_class() method on
the subclass and it is called before anything else. It is assumed in this
case that the existing contribute_to_class() calls all the necessary
superclass methods.
"""
def contribute_to_class(self, cls, name):
if func:
func(self, cls, name)
else:
super(superclass, self).contribute_to_class(cls, name)
setattr(cls, self.name, Creator(self))
return contribute_to_class
|
adaussy/eclipse-monkey-revival
|
refs/heads/master
|
plugins/python/org.eclipse.eclipsemonkey.lang.python/Lib/test/bugs/pr191.py
|
31
|
# PR#191 instances of Java classes missing __class__ attribute
import java.lang.String
s = java.lang.String('s')
s.__class__
|
grakiss888/testapi
|
refs/heads/master
|
opnfv_testapi/tests/unit/conftest.py
|
1
|
from os import path
import pytest
@pytest.fixture
def config_normal():
return path.join(path.dirname(__file__), 'common/normal.ini')
|
onoga/wm
|
refs/heads/master
|
src/gnue/__init__.py
|
2
|
# GNU Enterprise Common Library - Base Module
#
# Copyright 2001-2006 Free Software Foundation
#
# This file is part of GNU Enterprise
#
# GNU Enterprise is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation; either
# version 2, or (at your option) any later version.
#
# GNU Enterprise is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with program; see the file COPYING. If not,
# write to the Free Software Foundation, Inc., 59 Temple Place
# - Suite 330, Boston, MA 02111-1307, USA.
#
# $Id: __init__.py,v 1.1 2007/10/04 18:20:41 oleg Exp $
"""
GNUe base module. All gnue.* modules depend on gnue.common, so import
gnue.<whatever>" will cause gnue.common.apps to be loaded. This sets up a GNUe
environment.
"""
# Init stuff like _()
import gnue.common.apps as _init
|
denisff/python-for-android
|
refs/heads/master
|
python3-alpha/python3-src/Lib/difflib.py
|
50
|
#! /usr/bin/env python3
"""
Module difflib -- helpers for computing deltas between objects.
Function get_close_matches(word, possibilities, n=3, cutoff=0.6):
Use SequenceMatcher to return list of the best "good enough" matches.
Function context_diff(a, b):
For two lists of strings, return a delta in context diff format.
Function ndiff(a, b):
Return a delta: the difference between `a` and `b` (lists of strings).
Function restore(delta, which):
Return one of the two sequences that generated an ndiff delta.
Function unified_diff(a, b):
For two lists of strings, return a delta in unified diff format.
Class SequenceMatcher:
A flexible class for comparing pairs of sequences of any type.
Class Differ:
For producing human-readable deltas from sequences of lines of text.
Class HtmlDiff:
For producing HTML side by side comparison with change highlights.
"""
__all__ = ['get_close_matches', 'ndiff', 'restore', 'SequenceMatcher',
'Differ','IS_CHARACTER_JUNK', 'IS_LINE_JUNK', 'context_diff',
'unified_diff', 'HtmlDiff', 'Match']
import warnings
import heapq
from collections import namedtuple as _namedtuple
Match = _namedtuple('Match', 'a b size')
def _calculate_ratio(matches, length):
if length:
return 2.0 * matches / length
return 1.0
class SequenceMatcher:
"""
SequenceMatcher is a flexible class for comparing pairs of sequences of
any type, so long as the sequence elements are hashable. The basic
algorithm predates, and is a little fancier than, an algorithm
published in the late 1980's by Ratcliff and Obershelp under the
hyperbolic name "gestalt pattern matching". The basic idea is to find
the longest contiguous matching subsequence that contains no "junk"
elements (R-O doesn't address junk). The same idea is then applied
recursively to the pieces of the sequences to the left and to the right
of the matching subsequence. This does not yield minimal edit
sequences, but does tend to yield matches that "look right" to people.
SequenceMatcher tries to compute a "human-friendly diff" between two
sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the
longest *contiguous* & junk-free matching subsequence. That's what
catches peoples' eyes. The Windows(tm) windiff has another interesting
notion, pairing up elements that appear uniquely in each sequence.
That, and the method here, appear to yield more intuitive difference
reports than does diff. This method appears to be the least vulnerable
to synching up on blocks of "junk lines", though (like blank lines in
ordinary text files, or maybe "<P>" lines in HTML files). That may be
because this is the only method of the 3 that has a *concept* of
"junk" <wink>.
Example, comparing two strings, and considering blanks to be "junk":
>>> s = SequenceMatcher(lambda x: x == " ",
... "private Thread currentThread;",
... "private volatile Thread currentThread;")
>>>
.ratio() returns a float in [0, 1], measuring the "similarity" of the
sequences. As a rule of thumb, a .ratio() value over 0.6 means the
sequences are close matches:
>>> print(round(s.ratio(), 3))
0.866
>>>
If you're only interested in where the sequences match,
.get_matching_blocks() is handy:
>>> for block in s.get_matching_blocks():
... print("a[%d] and b[%d] match for %d elements" % block)
a[0] and b[0] match for 8 elements
a[8] and b[17] match for 21 elements
a[29] and b[38] match for 0 elements
Note that the last tuple returned by .get_matching_blocks() is always a
dummy, (len(a), len(b), 0), and this is the only case in which the last
tuple element (number of elements matched) is 0.
If you want to know how to change the first sequence into the second,
use .get_opcodes():
>>> for opcode in s.get_opcodes():
... print("%6s a[%d:%d] b[%d:%d]" % opcode)
equal a[0:8] b[0:8]
insert a[8:8] b[8:17]
equal a[8:29] b[17:38]
See the Differ class for a fancy human-friendly file differencer, which
uses SequenceMatcher both to compare sequences of lines, and to compare
sequences of characters within similar (near-matching) lines.
See also function get_close_matches() in this module, which shows how
simple code building on SequenceMatcher can be used to do useful work.
Timing: Basic R-O is cubic time worst case and quadratic time expected
case. SequenceMatcher is quadratic time for the worst case and has
expected-case behavior dependent in a complicated way on how many
elements the sequences have in common; best case time is linear.
Methods:
__init__(isjunk=None, a='', b='')
Construct a SequenceMatcher.
set_seqs(a, b)
Set the two sequences to be compared.
set_seq1(a)
Set the first sequence to be compared.
set_seq2(b)
Set the second sequence to be compared.
find_longest_match(alo, ahi, blo, bhi)
Find longest matching block in a[alo:ahi] and b[blo:bhi].
get_matching_blocks()
Return list of triples describing matching subsequences.
get_opcodes()
Return list of 5-tuples describing how to turn a into b.
ratio()
Return a measure of the sequences' similarity (float in [0,1]).
quick_ratio()
Return an upper bound on .ratio() relatively quickly.
real_quick_ratio()
Return an upper bound on ratio() very quickly.
"""
def __init__(self, isjunk=None, a='', b='', autojunk=True):
"""Construct a SequenceMatcher.
Optional arg isjunk is None (the default), or a one-argument
function that takes a sequence element and returns true iff the
element is junk. None is equivalent to passing "lambda x: 0", i.e.
no elements are considered to be junk. For example, pass
lambda x: x in " \\t"
if you're comparing lines as sequences of characters, and don't
want to synch up on blanks or hard tabs.
Optional arg a is the first of two sequences to be compared. By
default, an empty string. The elements of a must be hashable. See
also .set_seqs() and .set_seq1().
Optional arg b is the second of two sequences to be compared. By
default, an empty string. The elements of b must be hashable. See
also .set_seqs() and .set_seq2().
Optional arg autojunk should be set to False to disable the
"automatic junk heuristic" that treats popular elements as junk
(see module documentation for more information).
"""
# Members:
# a
# first sequence
# b
# second sequence; differences are computed as "what do
# we need to do to 'a' to change it into 'b'?"
# b2j
# for x in b, b2j[x] is a list of the indices (into b)
# at which x appears; junk and popular elements do not appear
# fullbcount
# for x in b, fullbcount[x] == the number of times x
# appears in b; only materialized if really needed (used
# only for computing quick_ratio())
# matching_blocks
# a list of (i, j, k) triples, where a[i:i+k] == b[j:j+k];
# ascending & non-overlapping in i and in j; terminated by
# a dummy (len(a), len(b), 0) sentinel
# opcodes
# a list of (tag, i1, i2, j1, j2) tuples, where tag is
# one of
# 'replace' a[i1:i2] should be replaced by b[j1:j2]
# 'delete' a[i1:i2] should be deleted
# 'insert' b[j1:j2] should be inserted
# 'equal' a[i1:i2] == b[j1:j2]
# isjunk
# a user-supplied function taking a sequence element and
# returning true iff the element is "junk" -- this has
# subtle but helpful effects on the algorithm, which I'll
# get around to writing up someday <0.9 wink>.
# DON'T USE! Only __chain_b uses this. Use isbjunk.
# bjunk
# the items in b for which isjunk is True.
# bpopular
# nonjunk items in b treated as junk by the heuristic (if used).
self.isjunk = isjunk
self.a = self.b = None
self.autojunk = autojunk
self.set_seqs(a, b)
def set_seqs(self, a, b):
"""Set the two sequences to be compared.
>>> s = SequenceMatcher()
>>> s.set_seqs("abcd", "bcde")
>>> s.ratio()
0.75
"""
self.set_seq1(a)
self.set_seq2(b)
def set_seq1(self, a):
"""Set the first sequence to be compared.
The second sequence to be compared is not changed.
>>> s = SequenceMatcher(None, "abcd", "bcde")
>>> s.ratio()
0.75
>>> s.set_seq1("bcde")
>>> s.ratio()
1.0
>>>
SequenceMatcher computes and caches detailed information about the
second sequence, so if you want to compare one sequence S against
many sequences, use .set_seq2(S) once and call .set_seq1(x)
repeatedly for each of the other sequences.
See also set_seqs() and set_seq2().
"""
if a is self.a:
return
self.a = a
self.matching_blocks = self.opcodes = None
def set_seq2(self, b):
"""Set the second sequence to be compared.
The first sequence to be compared is not changed.
>>> s = SequenceMatcher(None, "abcd", "bcde")
>>> s.ratio()
0.75
>>> s.set_seq2("abcd")
>>> s.ratio()
1.0
>>>
SequenceMatcher computes and caches detailed information about the
second sequence, so if you want to compare one sequence S against
many sequences, use .set_seq2(S) once and call .set_seq1(x)
repeatedly for each of the other sequences.
See also set_seqs() and set_seq1().
"""
if b is self.b:
return
self.b = b
self.matching_blocks = self.opcodes = None
self.fullbcount = None
self.__chain_b()
# For each element x in b, set b2j[x] to a list of the indices in
# b where x appears; the indices are in increasing order; note that
# the number of times x appears in b is len(b2j[x]) ...
# when self.isjunk is defined, junk elements don't show up in this
# map at all, which stops the central find_longest_match method
# from starting any matching block at a junk element ...
# also creates the fast isbjunk function ...
# b2j also does not contain entries for "popular" elements, meaning
# elements that account for more than 1 + 1% of the total elements, and
# when the sequence is reasonably large (>= 200 elements); this can
# be viewed as an adaptive notion of semi-junk, and yields an enormous
# speedup when, e.g., comparing program files with hundreds of
# instances of "return NULL;" ...
# note that this is only called when b changes; so for cross-product
# kinds of matches, it's best to call set_seq2 once, then set_seq1
# repeatedly
def __chain_b(self):
# Because isjunk is a user-defined (not C) function, and we test
# for junk a LOT, it's important to minimize the number of calls.
# Before the tricks described here, __chain_b was by far the most
# time-consuming routine in the whole module! If anyone sees
# Jim Roskind, thank him again for profile.py -- I never would
# have guessed that.
# The first trick is to build b2j ignoring the possibility
# of junk. I.e., we don't call isjunk at all yet. Throwing
# out the junk later is much cheaper than building b2j "right"
# from the start.
b = self.b
self.b2j = b2j = {}
for i, elt in enumerate(b):
indices = b2j.setdefault(elt, [])
indices.append(i)
# Purge junk elements
self.bjunk = junk = set()
isjunk = self.isjunk
if isjunk:
for elt in b2j.keys():
if isjunk(elt):
junk.add(elt)
for elt in junk: # separate loop avoids separate list of keys
del b2j[elt]
# Purge popular elements that are not junk
self.bpopular = popular = set()
n = len(b)
if self.autojunk and n >= 200:
ntest = n // 100 + 1
for elt, idxs in b2j.items():
if len(idxs) > ntest:
popular.add(elt)
for elt in popular: # ditto; as fast for 1% deletion
del b2j[elt]
def isbjunk(self, item):
"Deprecated; use 'item in SequenceMatcher().bjunk'."
warnings.warn("'SequenceMatcher().isbjunk(item)' is deprecated;\n"
"use 'item in SMinstance.bjunk' instead.",
DeprecationWarning, 2)
return item in self.bjunk
def isbpopular(self, item):
"Deprecated; use 'item in SequenceMatcher().bpopular'."
warnings.warn("'SequenceMatcher().isbpopular(item)' is deprecated;\n"
"use 'item in SMinstance.bpopular' instead.",
DeprecationWarning, 2)
return item in self.bpopular
def find_longest_match(self, alo, ahi, blo, bhi):
"""Find longest matching block in a[alo:ahi] and b[blo:bhi].
If isjunk is not defined:
Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where
alo <= i <= i+k <= ahi
blo <= j <= j+k <= bhi
and for all (i',j',k') meeting those conditions,
k >= k'
i <= i'
and if i == i', j <= j'
In other words, of all maximal matching blocks, return one that
starts earliest in a, and of all those maximal matching blocks that
start earliest in a, return the one that starts earliest in b.
>>> s = SequenceMatcher(None, " abcd", "abcd abcd")
>>> s.find_longest_match(0, 5, 0, 9)
Match(a=0, b=4, size=5)
If isjunk is defined, first the longest matching block is
determined as above, but with the additional restriction that no
junk element appears in the block. Then that block is extended as
far as possible by matching (only) junk elements on both sides. So
the resulting block never matches on junk except as identical junk
happens to be adjacent to an "interesting" match.
Here's the same example as before, but considering blanks to be
junk. That prevents " abcd" from matching the " abcd" at the tail
end of the second sequence directly. Instead only the "abcd" can
match, and matches the leftmost "abcd" in the second sequence:
>>> s = SequenceMatcher(lambda x: x==" ", " abcd", "abcd abcd")
>>> s.find_longest_match(0, 5, 0, 9)
Match(a=1, b=0, size=4)
If no blocks match, return (alo, blo, 0).
>>> s = SequenceMatcher(None, "ab", "c")
>>> s.find_longest_match(0, 2, 0, 1)
Match(a=0, b=0, size=0)
"""
# CAUTION: stripping common prefix or suffix would be incorrect.
# E.g.,
# ab
# acab
# Longest matching block is "ab", but if common prefix is
# stripped, it's "a" (tied with "b"). UNIX(tm) diff does so
# strip, so ends up claiming that ab is changed to acab by
# inserting "ca" in the middle. That's minimal but unintuitive:
# "it's obvious" that someone inserted "ac" at the front.
# Windiff ends up at the same place as diff, but by pairing up
# the unique 'b's and then matching the first two 'a's.
a, b, b2j, isbjunk = self.a, self.b, self.b2j, self.bjunk.__contains__
besti, bestj, bestsize = alo, blo, 0
# find longest junk-free match
# during an iteration of the loop, j2len[j] = length of longest
# junk-free match ending with a[i-1] and b[j]
j2len = {}
nothing = []
for i in range(alo, ahi):
# look at all instances of a[i] in b; note that because
# b2j has no junk keys, the loop is skipped if a[i] is junk
j2lenget = j2len.get
newj2len = {}
for j in b2j.get(a[i], nothing):
# a[i] matches b[j]
if j < blo:
continue
if j >= bhi:
break
k = newj2len[j] = j2lenget(j-1, 0) + 1
if k > bestsize:
besti, bestj, bestsize = i-k+1, j-k+1, k
j2len = newj2len
# Extend the best by non-junk elements on each end. In particular,
# "popular" non-junk elements aren't in b2j, which greatly speeds
# the inner loop above, but also means "the best" match so far
# doesn't contain any junk *or* popular non-junk elements.
while besti > alo and bestj > blo and \
not isbjunk(b[bestj-1]) and \
a[besti-1] == b[bestj-1]:
besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
while besti+bestsize < ahi and bestj+bestsize < bhi and \
not isbjunk(b[bestj+bestsize]) and \
a[besti+bestsize] == b[bestj+bestsize]:
bestsize += 1
# Now that we have a wholly interesting match (albeit possibly
# empty!), we may as well suck up the matching junk on each
# side of it too. Can't think of a good reason not to, and it
# saves post-processing the (possibly considerable) expense of
# figuring out what to do with it. In the case of an empty
# interesting match, this is clearly the right thing to do,
# because no other kind of match is possible in the regions.
while besti > alo and bestj > blo and \
isbjunk(b[bestj-1]) and \
a[besti-1] == b[bestj-1]:
besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
while besti+bestsize < ahi and bestj+bestsize < bhi and \
isbjunk(b[bestj+bestsize]) and \
a[besti+bestsize] == b[bestj+bestsize]:
bestsize = bestsize + 1
return Match(besti, bestj, bestsize)
def get_matching_blocks(self):
"""Return list of triples describing matching subsequences.
Each triple is of the form (i, j, n), and means that
a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in
i and in j. New in Python 2.5, it's also guaranteed that if
(i, j, n) and (i', j', n') are adjacent triples in the list, and
the second is not the last triple in the list, then i+n != i' or
j+n != j'. IOW, adjacent triples never describe adjacent equal
blocks.
The last triple is a dummy, (len(a), len(b), 0), and is the only
triple with n==0.
>>> s = SequenceMatcher(None, "abxcd", "abcd")
>>> list(s.get_matching_blocks())
[Match(a=0, b=0, size=2), Match(a=3, b=2, size=2), Match(a=5, b=4, size=0)]
"""
if self.matching_blocks is not None:
return self.matching_blocks
la, lb = len(self.a), len(self.b)
# This is most naturally expressed as a recursive algorithm, but
# at least one user bumped into extreme use cases that exceeded
# the recursion limit on their box. So, now we maintain a list
# ('queue`) of blocks we still need to look at, and append partial
# results to `matching_blocks` in a loop; the matches are sorted
# at the end.
queue = [(0, la, 0, lb)]
matching_blocks = []
while queue:
alo, ahi, blo, bhi = queue.pop()
i, j, k = x = self.find_longest_match(alo, ahi, blo, bhi)
# a[alo:i] vs b[blo:j] unknown
# a[i:i+k] same as b[j:j+k]
# a[i+k:ahi] vs b[j+k:bhi] unknown
if k: # if k is 0, there was no matching block
matching_blocks.append(x)
if alo < i and blo < j:
queue.append((alo, i, blo, j))
if i+k < ahi and j+k < bhi:
queue.append((i+k, ahi, j+k, bhi))
matching_blocks.sort()
# It's possible that we have adjacent equal blocks in the
# matching_blocks list now. Starting with 2.5, this code was added
# to collapse them.
i1 = j1 = k1 = 0
non_adjacent = []
for i2, j2, k2 in matching_blocks:
# Is this block adjacent to i1, j1, k1?
if i1 + k1 == i2 and j1 + k1 == j2:
# Yes, so collapse them -- this just increases the length of
# the first block by the length of the second, and the first
# block so lengthened remains the block to compare against.
k1 += k2
else:
# Not adjacent. Remember the first block (k1==0 means it's
# the dummy we started with), and make the second block the
# new block to compare against.
if k1:
non_adjacent.append((i1, j1, k1))
i1, j1, k1 = i2, j2, k2
if k1:
non_adjacent.append((i1, j1, k1))
non_adjacent.append( (la, lb, 0) )
self.matching_blocks = non_adjacent
return map(Match._make, self.matching_blocks)
def get_opcodes(self):
"""Return list of 5-tuples describing how to turn a into b.
Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple
has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the
tuple preceding it, and likewise for j1 == the previous j2.
The tags are strings, with these meanings:
'replace': a[i1:i2] should be replaced by b[j1:j2]
'delete': a[i1:i2] should be deleted.
Note that j1==j2 in this case.
'insert': b[j1:j2] should be inserted at a[i1:i1].
Note that i1==i2 in this case.
'equal': a[i1:i2] == b[j1:j2]
>>> a = "qabxcd"
>>> b = "abycdf"
>>> s = SequenceMatcher(None, a, b)
>>> for tag, i1, i2, j1, j2 in s.get_opcodes():
... print(("%7s a[%d:%d] (%s) b[%d:%d] (%s)" %
... (tag, i1, i2, a[i1:i2], j1, j2, b[j1:j2])))
delete a[0:1] (q) b[0:0] ()
equal a[1:3] (ab) b[0:2] (ab)
replace a[3:4] (x) b[2:3] (y)
equal a[4:6] (cd) b[3:5] (cd)
insert a[6:6] () b[5:6] (f)
"""
if self.opcodes is not None:
return self.opcodes
i = j = 0
self.opcodes = answer = []
for ai, bj, size in self.get_matching_blocks():
# invariant: we've pumped out correct diffs to change
# a[:i] into b[:j], and the next matching block is
# a[ai:ai+size] == b[bj:bj+size]. So we need to pump
# out a diff to change a[i:ai] into b[j:bj], pump out
# the matching block, and move (i,j) beyond the match
tag = ''
if i < ai and j < bj:
tag = 'replace'
elif i < ai:
tag = 'delete'
elif j < bj:
tag = 'insert'
if tag:
answer.append( (tag, i, ai, j, bj) )
i, j = ai+size, bj+size
# the list of matching blocks is terminated by a
# sentinel with size 0
if size:
answer.append( ('equal', ai, i, bj, j) )
return answer
def get_grouped_opcodes(self, n=3):
""" Isolate change clusters by eliminating ranges with no changes.
Return a generator of groups with upto n lines of context.
Each group is in the same format as returned by get_opcodes().
>>> from pprint import pprint
>>> a = list(map(str, range(1,40)))
>>> b = a[:]
>>> b[8:8] = ['i'] # Make an insertion
>>> b[20] += 'x' # Make a replacement
>>> b[23:28] = [] # Make a deletion
>>> b[30] += 'y' # Make another replacement
>>> pprint(list(SequenceMatcher(None,a,b).get_grouped_opcodes()))
[[('equal', 5, 8, 5, 8), ('insert', 8, 8, 8, 9), ('equal', 8, 11, 9, 12)],
[('equal', 16, 19, 17, 20),
('replace', 19, 20, 20, 21),
('equal', 20, 22, 21, 23),
('delete', 22, 27, 23, 23),
('equal', 27, 30, 23, 26)],
[('equal', 31, 34, 27, 30),
('replace', 34, 35, 30, 31),
('equal', 35, 38, 31, 34)]]
"""
codes = self.get_opcodes()
if not codes:
codes = [("equal", 0, 1, 0, 1)]
# Fixup leading and trailing groups if they show no changes.
if codes[0][0] == 'equal':
tag, i1, i2, j1, j2 = codes[0]
codes[0] = tag, max(i1, i2-n), i2, max(j1, j2-n), j2
if codes[-1][0] == 'equal':
tag, i1, i2, j1, j2 = codes[-1]
codes[-1] = tag, i1, min(i2, i1+n), j1, min(j2, j1+n)
nn = n + n
group = []
for tag, i1, i2, j1, j2 in codes:
# End the current group and start a new one whenever
# there is a large range with no changes.
if tag == 'equal' and i2-i1 > nn:
group.append((tag, i1, min(i2, i1+n), j1, min(j2, j1+n)))
yield group
group = []
i1, j1 = max(i1, i2-n), max(j1, j2-n)
group.append((tag, i1, i2, j1 ,j2))
if group and not (len(group)==1 and group[0][0] == 'equal'):
yield group
def ratio(self):
"""Return a measure of the sequences' similarity (float in [0,1]).
Where T is the total number of elements in both sequences, and
M is the number of matches, this is 2.0*M / T.
Note that this is 1 if the sequences are identical, and 0 if
they have nothing in common.
.ratio() is expensive to compute if you haven't already computed
.get_matching_blocks() or .get_opcodes(), in which case you may
want to try .quick_ratio() or .real_quick_ratio() first to get an
upper bound.
>>> s = SequenceMatcher(None, "abcd", "bcde")
>>> s.ratio()
0.75
>>> s.quick_ratio()
0.75
>>> s.real_quick_ratio()
1.0
"""
matches = sum(triple[-1] for triple in self.get_matching_blocks())
return _calculate_ratio(matches, len(self.a) + len(self.b))
def quick_ratio(self):
"""Return an upper bound on ratio() relatively quickly.
This isn't defined beyond that it is an upper bound on .ratio(), and
is faster to compute.
"""
# viewing a and b as multisets, set matches to the cardinality
# of their intersection; this counts the number of matches
# without regard to order, so is clearly an upper bound
if self.fullbcount is None:
self.fullbcount = fullbcount = {}
for elt in self.b:
fullbcount[elt] = fullbcount.get(elt, 0) + 1
fullbcount = self.fullbcount
# avail[x] is the number of times x appears in 'b' less the
# number of times we've seen it in 'a' so far ... kinda
avail = {}
availhas, matches = avail.__contains__, 0
for elt in self.a:
if availhas(elt):
numb = avail[elt]
else:
numb = fullbcount.get(elt, 0)
avail[elt] = numb - 1
if numb > 0:
matches = matches + 1
return _calculate_ratio(matches, len(self.a) + len(self.b))
def real_quick_ratio(self):
"""Return an upper bound on ratio() very quickly.
This isn't defined beyond that it is an upper bound on .ratio(), and
is faster to compute than either .ratio() or .quick_ratio().
"""
la, lb = len(self.a), len(self.b)
# can't have more matches than the number of elements in the
# shorter sequence
return _calculate_ratio(min(la, lb), la + lb)
def get_close_matches(word, possibilities, n=3, cutoff=0.6):
"""Use SequenceMatcher to return list of the best "good enough" matches.
word is a sequence for which close matches are desired (typically a
string).
possibilities is a list of sequences against which to match word
(typically a list of strings).
Optional arg n (default 3) is the maximum number of close matches to
return. n must be > 0.
Optional arg cutoff (default 0.6) is a float in [0, 1]. Possibilities
that don't score at least that similar to word are ignored.
The best (no more than n) matches among the possibilities are returned
in a list, sorted by similarity score, most similar first.
>>> get_close_matches("appel", ["ape", "apple", "peach", "puppy"])
['apple', 'ape']
>>> import keyword as _keyword
>>> get_close_matches("wheel", _keyword.kwlist)
['while']
>>> get_close_matches("Apple", _keyword.kwlist)
[]
>>> get_close_matches("accept", _keyword.kwlist)
['except']
"""
if not n > 0:
raise ValueError("n must be > 0: %r" % (n,))
if not 0.0 <= cutoff <= 1.0:
raise ValueError("cutoff must be in [0.0, 1.0]: %r" % (cutoff,))
result = []
s = SequenceMatcher()
s.set_seq2(word)
for x in possibilities:
s.set_seq1(x)
if s.real_quick_ratio() >= cutoff and \
s.quick_ratio() >= cutoff and \
s.ratio() >= cutoff:
result.append((s.ratio(), x))
# Move the best scorers to head of list
result = heapq.nlargest(n, result)
# Strip scores for the best n matches
return [x for score, x in result]
def _count_leading(line, ch):
"""
Return number of `ch` characters at the start of `line`.
Example:
>>> _count_leading(' abc', ' ')
3
"""
i, n = 0, len(line)
while i < n and line[i] == ch:
i += 1
return i
class Differ:
r"""
Differ is a class for comparing sequences of lines of text, and
producing human-readable differences or deltas. Differ uses
SequenceMatcher both to compare sequences of lines, and to compare
sequences of characters within similar (near-matching) lines.
Each line of a Differ delta begins with a two-letter code:
'- ' line unique to sequence 1
'+ ' line unique to sequence 2
' ' line common to both sequences
'? ' line not present in either input sequence
Lines beginning with '? ' attempt to guide the eye to intraline
differences, and were not present in either input sequence. These lines
can be confusing if the sequences contain tab characters.
Note that Differ makes no claim to produce a *minimal* diff. To the
contrary, minimal diffs are often counter-intuitive, because they synch
up anywhere possible, sometimes accidental matches 100 pages apart.
Restricting synch points to contiguous matches preserves some notion of
locality, at the occasional cost of producing a longer diff.
Example: Comparing two texts.
First we set up the texts, sequences of individual single-line strings
ending with newlines (such sequences can also be obtained from the
`readlines()` method of file-like objects):
>>> text1 = ''' 1. Beautiful is better than ugly.
... 2. Explicit is better than implicit.
... 3. Simple is better than complex.
... 4. Complex is better than complicated.
... '''.splitlines(1)
>>> len(text1)
4
>>> text1[0][-1]
'\n'
>>> text2 = ''' 1. Beautiful is better than ugly.
... 3. Simple is better than complex.
... 4. Complicated is better than complex.
... 5. Flat is better than nested.
... '''.splitlines(1)
Next we instantiate a Differ object:
>>> d = Differ()
Note that when instantiating a Differ object we may pass functions to
filter out line and character 'junk'. See Differ.__init__ for details.
Finally, we compare the two:
>>> result = list(d.compare(text1, text2))
'result' is a list of strings, so let's pretty-print it:
>>> from pprint import pprint as _pprint
>>> _pprint(result)
[' 1. Beautiful is better than ugly.\n',
'- 2. Explicit is better than implicit.\n',
'- 3. Simple is better than complex.\n',
'+ 3. Simple is better than complex.\n',
'? ++\n',
'- 4. Complex is better than complicated.\n',
'? ^ ---- ^\n',
'+ 4. Complicated is better than complex.\n',
'? ++++ ^ ^\n',
'+ 5. Flat is better than nested.\n']
As a single multi-line string it looks like this:
>>> print(''.join(result), end="")
1. Beautiful is better than ugly.
- 2. Explicit is better than implicit.
- 3. Simple is better than complex.
+ 3. Simple is better than complex.
? ++
- 4. Complex is better than complicated.
? ^ ---- ^
+ 4. Complicated is better than complex.
? ++++ ^ ^
+ 5. Flat is better than nested.
Methods:
__init__(linejunk=None, charjunk=None)
Construct a text differencer, with optional filters.
compare(a, b)
Compare two sequences of lines; generate the resulting delta.
"""
def __init__(self, linejunk=None, charjunk=None):
"""
Construct a text differencer, with optional filters.
The two optional keyword parameters are for filter functions:
- `linejunk`: A function that should accept a single string argument,
and return true iff the string is junk. The module-level function
`IS_LINE_JUNK` may be used to filter out lines without visible
characters, except for at most one splat ('#'). It is recommended
to leave linejunk None; as of Python 2.3, the underlying
SequenceMatcher class has grown an adaptive notion of "noise" lines
that's better than any static definition the author has ever been
able to craft.
- `charjunk`: A function that should accept a string of length 1. The
module-level function `IS_CHARACTER_JUNK` may be used to filter out
whitespace characters (a blank or tab; **note**: bad idea to include
newline in this!). Use of IS_CHARACTER_JUNK is recommended.
"""
self.linejunk = linejunk
self.charjunk = charjunk
def compare(self, a, b):
r"""
Compare two sequences of lines; generate the resulting delta.
Each sequence must contain individual single-line strings ending with
newlines. Such sequences can be obtained from the `readlines()` method
of file-like objects. The delta generated also consists of newline-
terminated strings, ready to be printed as-is via the writeline()
method of a file-like object.
Example:
>>> print(''.join(Differ().compare('one\ntwo\nthree\n'.splitlines(1),
... 'ore\ntree\nemu\n'.splitlines(1))),
... end="")
- one
? ^
+ ore
? ^
- two
- three
? -
+ tree
+ emu
"""
cruncher = SequenceMatcher(self.linejunk, a, b)
for tag, alo, ahi, blo, bhi in cruncher.get_opcodes():
if tag == 'replace':
g = self._fancy_replace(a, alo, ahi, b, blo, bhi)
elif tag == 'delete':
g = self._dump('-', a, alo, ahi)
elif tag == 'insert':
g = self._dump('+', b, blo, bhi)
elif tag == 'equal':
g = self._dump(' ', a, alo, ahi)
else:
raise ValueError('unknown tag %r' % (tag,))
for line in g:
yield line
def _dump(self, tag, x, lo, hi):
"""Generate comparison results for a same-tagged range."""
for i in range(lo, hi):
yield '%s %s' % (tag, x[i])
def _plain_replace(self, a, alo, ahi, b, blo, bhi):
assert alo < ahi and blo < bhi
# dump the shorter block first -- reduces the burden on short-term
# memory if the blocks are of very different sizes
if bhi - blo < ahi - alo:
first = self._dump('+', b, blo, bhi)
second = self._dump('-', a, alo, ahi)
else:
first = self._dump('-', a, alo, ahi)
second = self._dump('+', b, blo, bhi)
for g in first, second:
for line in g:
yield line
def _fancy_replace(self, a, alo, ahi, b, blo, bhi):
r"""
When replacing one block of lines with another, search the blocks
for *similar* lines; the best-matching pair (if any) is used as a
synch point, and intraline difference marking is done on the
similar pair. Lots of work, but often worth it.
Example:
>>> d = Differ()
>>> results = d._fancy_replace(['abcDefghiJkl\n'], 0, 1,
... ['abcdefGhijkl\n'], 0, 1)
>>> print(''.join(results), end="")
- abcDefghiJkl
? ^ ^ ^
+ abcdefGhijkl
? ^ ^ ^
"""
# don't synch up unless the lines have a similarity score of at
# least cutoff; best_ratio tracks the best score seen so far
best_ratio, cutoff = 0.74, 0.75
cruncher = SequenceMatcher(self.charjunk)
eqi, eqj = None, None # 1st indices of equal lines (if any)
# search for the pair that matches best without being identical
# (identical lines must be junk lines, & we don't want to synch up
# on junk -- unless we have to)
for j in range(blo, bhi):
bj = b[j]
cruncher.set_seq2(bj)
for i in range(alo, ahi):
ai = a[i]
if ai == bj:
if eqi is None:
eqi, eqj = i, j
continue
cruncher.set_seq1(ai)
# computing similarity is expensive, so use the quick
# upper bounds first -- have seen this speed up messy
# compares by a factor of 3.
# note that ratio() is only expensive to compute the first
# time it's called on a sequence pair; the expensive part
# of the computation is cached by cruncher
if cruncher.real_quick_ratio() > best_ratio and \
cruncher.quick_ratio() > best_ratio and \
cruncher.ratio() > best_ratio:
best_ratio, best_i, best_j = cruncher.ratio(), i, j
if best_ratio < cutoff:
# no non-identical "pretty close" pair
if eqi is None:
# no identical pair either -- treat it as a straight replace
for line in self._plain_replace(a, alo, ahi, b, blo, bhi):
yield line
return
# no close pair, but an identical pair -- synch up on that
best_i, best_j, best_ratio = eqi, eqj, 1.0
else:
# there's a close pair, so forget the identical pair (if any)
eqi = None
# a[best_i] very similar to b[best_j]; eqi is None iff they're not
# identical
# pump out diffs from before the synch point
for line in self._fancy_helper(a, alo, best_i, b, blo, best_j):
yield line
# do intraline marking on the synch pair
aelt, belt = a[best_i], b[best_j]
if eqi is None:
# pump out a '-', '?', '+', '?' quad for the synched lines
atags = btags = ""
cruncher.set_seqs(aelt, belt)
for tag, ai1, ai2, bj1, bj2 in cruncher.get_opcodes():
la, lb = ai2 - ai1, bj2 - bj1
if tag == 'replace':
atags += '^' * la
btags += '^' * lb
elif tag == 'delete':
atags += '-' * la
elif tag == 'insert':
btags += '+' * lb
elif tag == 'equal':
atags += ' ' * la
btags += ' ' * lb
else:
raise ValueError('unknown tag %r' % (tag,))
for line in self._qformat(aelt, belt, atags, btags):
yield line
else:
# the synch pair is identical
yield ' ' + aelt
# pump out diffs from after the synch point
for line in self._fancy_helper(a, best_i+1, ahi, b, best_j+1, bhi):
yield line
def _fancy_helper(self, a, alo, ahi, b, blo, bhi):
g = []
if alo < ahi:
if blo < bhi:
g = self._fancy_replace(a, alo, ahi, b, blo, bhi)
else:
g = self._dump('-', a, alo, ahi)
elif blo < bhi:
g = self._dump('+', b, blo, bhi)
for line in g:
yield line
def _qformat(self, aline, bline, atags, btags):
r"""
Format "?" output and deal with leading tabs.
Example:
>>> d = Differ()
>>> results = d._qformat('\tabcDefghiJkl\n', '\tabcdefGhijkl\n',
... ' ^ ^ ^ ', ' ^ ^ ^ ')
>>> for line in results: print(repr(line))
...
'- \tabcDefghiJkl\n'
'? \t ^ ^ ^\n'
'+ \tabcdefGhijkl\n'
'? \t ^ ^ ^\n'
"""
# Can hurt, but will probably help most of the time.
common = min(_count_leading(aline, "\t"),
_count_leading(bline, "\t"))
common = min(common, _count_leading(atags[:common], " "))
common = min(common, _count_leading(btags[:common], " "))
atags = atags[common:].rstrip()
btags = btags[common:].rstrip()
yield "- " + aline
if atags:
yield "? %s%s\n" % ("\t" * common, atags)
yield "+ " + bline
if btags:
yield "? %s%s\n" % ("\t" * common, btags)
# With respect to junk, an earlier version of ndiff simply refused to
# *start* a match with a junk element. The result was cases like this:
# before: private Thread currentThread;
# after: private volatile Thread currentThread;
# If you consider whitespace to be junk, the longest contiguous match
# not starting with junk is "e Thread currentThread". So ndiff reported
# that "e volatil" was inserted between the 't' and the 'e' in "private".
# While an accurate view, to people that's absurd. The current version
# looks for matching blocks that are entirely junk-free, then extends the
# longest one of those as far as possible but only with matching junk.
# So now "currentThread" is matched, then extended to suck up the
# preceding blank; then "private" is matched, and extended to suck up the
# following blank; then "Thread" is matched; and finally ndiff reports
# that "volatile " was inserted before "Thread". The only quibble
# remaining is that perhaps it was really the case that " volatile"
# was inserted after "private". I can live with that <wink>.
import re
def IS_LINE_JUNK(line, pat=re.compile(r"\s*#?\s*$").match):
r"""
Return 1 for ignorable line: iff `line` is blank or contains a single '#'.
Examples:
>>> IS_LINE_JUNK('\n')
True
>>> IS_LINE_JUNK(' # \n')
True
>>> IS_LINE_JUNK('hello\n')
False
"""
return pat(line) is not None
def IS_CHARACTER_JUNK(ch, ws=" \t"):
r"""
Return 1 for ignorable character: iff `ch` is a space or tab.
Examples:
>>> IS_CHARACTER_JUNK(' ')
True
>>> IS_CHARACTER_JUNK('\t')
True
>>> IS_CHARACTER_JUNK('\n')
False
>>> IS_CHARACTER_JUNK('x')
False
"""
return ch in ws
########################################################################
### Unified Diff
########################################################################
def _format_range_unified(start, stop):
'Convert range to the "ed" format'
# Per the diff spec at http://www.unix.org/single_unix_specification/
beginning = start + 1 # lines start numbering with one
length = stop - start
if length == 1:
return '{}'.format(beginning)
if not length:
beginning -= 1 # empty ranges begin at line just before the range
return '{},{}'.format(beginning, length)
def unified_diff(a, b, fromfile='', tofile='', fromfiledate='',
tofiledate='', n=3, lineterm='\n'):
r"""
Compare two sequences of lines; generate the delta as a unified diff.
Unified diffs are a compact way of showing line changes and a few
lines of context. The number of context lines is set by 'n' which
defaults to three.
By default, the diff control lines (those with ---, +++, or @@) are
created with a trailing newline. This is helpful so that inputs
created from file.readlines() result in diffs that are suitable for
file.writelines() since both the inputs and outputs have trailing
newlines.
For inputs that do not have trailing newlines, set the lineterm
argument to "" so that the output will be uniformly newline free.
The unidiff format normally has a header for filenames and modification
times. Any or all of these may be specified using strings for
'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'.
The modification times are normally expressed in the ISO 8601 format.
Example:
>>> for line in unified_diff('one two three four'.split(),
... 'zero one tree four'.split(), 'Original', 'Current',
... '2005-01-26 23:30:50', '2010-04-02 10:20:52',
... lineterm=''):
... print(line) # doctest: +NORMALIZE_WHITESPACE
--- Original 2005-01-26 23:30:50
+++ Current 2010-04-02 10:20:52
@@ -1,4 +1,4 @@
+zero
one
-two
-three
+tree
four
"""
started = False
for group in SequenceMatcher(None,a,b).get_grouped_opcodes(n):
if not started:
started = True
fromdate = '\t{}'.format(fromfiledate) if fromfiledate else ''
todate = '\t{}'.format(tofiledate) if tofiledate else ''
yield '--- {}{}{}'.format(fromfile, fromdate, lineterm)
yield '+++ {}{}{}'.format(tofile, todate, lineterm)
first, last = group[0], group[-1]
file1_range = _format_range_unified(first[1], last[2])
file2_range = _format_range_unified(first[3], last[4])
yield '@@ -{} +{} @@{}'.format(file1_range, file2_range, lineterm)
for tag, i1, i2, j1, j2 in group:
if tag == 'equal':
for line in a[i1:i2]:
yield ' ' + line
continue
if tag in {'replace', 'delete'}:
for line in a[i1:i2]:
yield '-' + line
if tag in {'replace', 'insert'}:
for line in b[j1:j2]:
yield '+' + line
########################################################################
### Context Diff
########################################################################
def _format_range_context(start, stop):
'Convert range to the "ed" format'
# Per the diff spec at http://www.unix.org/single_unix_specification/
beginning = start + 1 # lines start numbering with one
length = stop - start
if not length:
beginning -= 1 # empty ranges begin at line just before the range
if length <= 1:
return '{}'.format(beginning)
return '{},{}'.format(beginning, beginning + length - 1)
# See http://www.unix.org/single_unix_specification/
def context_diff(a, b, fromfile='', tofile='',
fromfiledate='', tofiledate='', n=3, lineterm='\n'):
r"""
Compare two sequences of lines; generate the delta as a context diff.
Context diffs are a compact way of showing line changes and a few
lines of context. The number of context lines is set by 'n' which
defaults to three.
By default, the diff control lines (those with *** or ---) are
created with a trailing newline. This is helpful so that inputs
created from file.readlines() result in diffs that are suitable for
file.writelines() since both the inputs and outputs have trailing
newlines.
For inputs that do not have trailing newlines, set the lineterm
argument to "" so that the output will be uniformly newline free.
The context diff format normally has a header for filenames and
modification times. Any or all of these may be specified using
strings for 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'.
The modification times are normally expressed in the ISO 8601 format.
If not specified, the strings default to blanks.
Example:
>>> print(''.join(context_diff('one\ntwo\nthree\nfour\n'.splitlines(1),
... 'zero\none\ntree\nfour\n'.splitlines(1), 'Original', 'Current')),
... end="")
*** Original
--- Current
***************
*** 1,4 ****
one
! two
! three
four
--- 1,4 ----
+ zero
one
! tree
four
"""
prefix = dict(insert='+ ', delete='- ', replace='! ', equal=' ')
started = False
for group in SequenceMatcher(None,a,b).get_grouped_opcodes(n):
if not started:
started = True
fromdate = '\t{}'.format(fromfiledate) if fromfiledate else ''
todate = '\t{}'.format(tofiledate) if tofiledate else ''
yield '*** {}{}{}'.format(fromfile, fromdate, lineterm)
yield '--- {}{}{}'.format(tofile, todate, lineterm)
first, last = group[0], group[-1]
yield '***************' + lineterm
file1_range = _format_range_context(first[1], last[2])
yield '*** {} ****{}'.format(file1_range, lineterm)
if any(tag in {'replace', 'delete'} for tag, _, _, _, _ in group):
for tag, i1, i2, _, _ in group:
if tag != 'insert':
for line in a[i1:i2]:
yield prefix[tag] + line
file2_range = _format_range_context(first[3], last[4])
yield '--- {} ----{}'.format(file2_range, lineterm)
if any(tag in {'replace', 'insert'} for tag, _, _, _, _ in group):
for tag, _, _, j1, j2 in group:
if tag != 'delete':
for line in b[j1:j2]:
yield prefix[tag] + line
def ndiff(a, b, linejunk=None, charjunk=IS_CHARACTER_JUNK):
r"""
Compare `a` and `b` (lists of strings); return a `Differ`-style delta.
Optional keyword parameters `linejunk` and `charjunk` are for filter
functions (or None):
- linejunk: A function that should accept a single string argument, and
return true iff the string is junk. The default is None, and is
recommended; as of Python 2.3, an adaptive notion of "noise" lines is
used that does a good job on its own.
- charjunk: A function that should accept a string of length 1. The
default is module-level function IS_CHARACTER_JUNK, which filters out
whitespace characters (a blank or tab; note: bad idea to include newline
in this!).
Tools/scripts/ndiff.py is a command-line front-end to this function.
Example:
>>> diff = ndiff('one\ntwo\nthree\n'.splitlines(1),
... 'ore\ntree\nemu\n'.splitlines(1))
>>> print(''.join(diff), end="")
- one
? ^
+ ore
? ^
- two
- three
? -
+ tree
+ emu
"""
return Differ(linejunk, charjunk).compare(a, b)
def _mdiff(fromlines, tolines, context=None, linejunk=None,
charjunk=IS_CHARACTER_JUNK):
r"""Returns generator yielding marked up from/to side by side differences.
Arguments:
fromlines -- list of text lines to compared to tolines
tolines -- list of text lines to be compared to fromlines
context -- number of context lines to display on each side of difference,
if None, all from/to text lines will be generated.
linejunk -- passed on to ndiff (see ndiff documentation)
charjunk -- passed on to ndiff (see ndiff documentation)
This function returns an interator which returns a tuple:
(from line tuple, to line tuple, boolean flag)
from/to line tuple -- (line num, line text)
line num -- integer or None (to indicate a context separation)
line text -- original line text with following markers inserted:
'\0+' -- marks start of added text
'\0-' -- marks start of deleted text
'\0^' -- marks start of changed text
'\1' -- marks end of added/deleted/changed text
boolean flag -- None indicates context separation, True indicates
either "from" or "to" line contains a change, otherwise False.
This function/iterator was originally developed to generate side by side
file difference for making HTML pages (see HtmlDiff class for example
usage).
Note, this function utilizes the ndiff function to generate the side by
side difference markup. Optional ndiff arguments may be passed to this
function and they in turn will be passed to ndiff.
"""
import re
# regular expression for finding intraline change indices
change_re = re.compile('(\++|\-+|\^+)')
# create the difference iterator to generate the differences
diff_lines_iterator = ndiff(fromlines,tolines,linejunk,charjunk)
def _make_line(lines, format_key, side, num_lines=[0,0]):
"""Returns line of text with user's change markup and line formatting.
lines -- list of lines from the ndiff generator to produce a line of
text from. When producing the line of text to return, the
lines used are removed from this list.
format_key -- '+' return first line in list with "add" markup around
the entire line.
'-' return first line in list with "delete" markup around
the entire line.
'?' return first line in list with add/delete/change
intraline markup (indices obtained from second line)
None return first line in list with no markup
side -- indice into the num_lines list (0=from,1=to)
num_lines -- from/to current line number. This is NOT intended to be a
passed parameter. It is present as a keyword argument to
maintain memory of the current line numbers between calls
of this function.
Note, this function is purposefully not defined at the module scope so
that data it needs from its parent function (within whose context it
is defined) does not need to be of module scope.
"""
num_lines[side] += 1
# Handle case where no user markup is to be added, just return line of
# text with user's line format to allow for usage of the line number.
if format_key is None:
return (num_lines[side],lines.pop(0)[2:])
# Handle case of intraline changes
if format_key == '?':
text, markers = lines.pop(0), lines.pop(0)
# find intraline changes (store change type and indices in tuples)
sub_info = []
def record_sub_info(match_object,sub_info=sub_info):
sub_info.append([match_object.group(1)[0],match_object.span()])
return match_object.group(1)
change_re.sub(record_sub_info,markers)
# process each tuple inserting our special marks that won't be
# noticed by an xml/html escaper.
for key,(begin,end) in sub_info[::-1]:
text = text[0:begin]+'\0'+key+text[begin:end]+'\1'+text[end:]
text = text[2:]
# Handle case of add/delete entire line
else:
text = lines.pop(0)[2:]
# if line of text is just a newline, insert a space so there is
# something for the user to highlight and see.
if not text:
text = ' '
# insert marks that won't be noticed by an xml/html escaper.
text = '\0' + format_key + text + '\1'
# Return line of text, first allow user's line formatter to do its
# thing (such as adding the line number) then replace the special
# marks with what the user's change markup.
return (num_lines[side],text)
def _line_iterator():
"""Yields from/to lines of text with a change indication.
This function is an iterator. It itself pulls lines from a
differencing iterator, processes them and yields them. When it can
it yields both a "from" and a "to" line, otherwise it will yield one
or the other. In addition to yielding the lines of from/to text, a
boolean flag is yielded to indicate if the text line(s) have
differences in them.
Note, this function is purposefully not defined at the module scope so
that data it needs from its parent function (within whose context it
is defined) does not need to be of module scope.
"""
lines = []
num_blanks_pending, num_blanks_to_yield = 0, 0
while True:
# Load up next 4 lines so we can look ahead, create strings which
# are a concatenation of the first character of each of the 4 lines
# so we can do some very readable comparisons.
while len(lines) < 4:
try:
lines.append(next(diff_lines_iterator))
except StopIteration:
lines.append('X')
s = ''.join([line[0] for line in lines])
if s.startswith('X'):
# When no more lines, pump out any remaining blank lines so the
# corresponding add/delete lines get a matching blank line so
# all line pairs get yielded at the next level.
num_blanks_to_yield = num_blanks_pending
elif s.startswith('-?+?'):
# simple intraline change
yield _make_line(lines,'?',0), _make_line(lines,'?',1), True
continue
elif s.startswith('--++'):
# in delete block, add block coming: we do NOT want to get
# caught up on blank lines yet, just process the delete line
num_blanks_pending -= 1
yield _make_line(lines,'-',0), None, True
continue
elif s.startswith(('--?+', '--+', '- ')):
# in delete block and see a intraline change or unchanged line
# coming: yield the delete line and then blanks
from_line,to_line = _make_line(lines,'-',0), None
num_blanks_to_yield,num_blanks_pending = num_blanks_pending-1,0
elif s.startswith('-+?'):
# intraline change
yield _make_line(lines,None,0), _make_line(lines,'?',1), True
continue
elif s.startswith('-?+'):
# intraline change
yield _make_line(lines,'?',0), _make_line(lines,None,1), True
continue
elif s.startswith('-'):
# delete FROM line
num_blanks_pending -= 1
yield _make_line(lines,'-',0), None, True
continue
elif s.startswith('+--'):
# in add block, delete block coming: we do NOT want to get
# caught up on blank lines yet, just process the add line
num_blanks_pending += 1
yield None, _make_line(lines,'+',1), True
continue
elif s.startswith(('+ ', '+-')):
# will be leaving an add block: yield blanks then add line
from_line, to_line = None, _make_line(lines,'+',1)
num_blanks_to_yield,num_blanks_pending = num_blanks_pending+1,0
elif s.startswith('+'):
# inside an add block, yield the add line
num_blanks_pending += 1
yield None, _make_line(lines,'+',1), True
continue
elif s.startswith(' '):
# unchanged text, yield it to both sides
yield _make_line(lines[:],None,0),_make_line(lines,None,1),False
continue
# Catch up on the blank lines so when we yield the next from/to
# pair, they are lined up.
while(num_blanks_to_yield < 0):
num_blanks_to_yield += 1
yield None,('','\n'),True
while(num_blanks_to_yield > 0):
num_blanks_to_yield -= 1
yield ('','\n'),None,True
if s.startswith('X'):
raise StopIteration
else:
yield from_line,to_line,True
def _line_pair_iterator():
"""Yields from/to lines of text with a change indication.
This function is an iterator. It itself pulls lines from the line
iterator. Its difference from that iterator is that this function
always yields a pair of from/to text lines (with the change
indication). If necessary it will collect single from/to lines
until it has a matching pair from/to pair to yield.
Note, this function is purposefully not defined at the module scope so
that data it needs from its parent function (within whose context it
is defined) does not need to be of module scope.
"""
line_iterator = _line_iterator()
fromlines,tolines=[],[]
while True:
# Collecting lines of text until we have a from/to pair
while (len(fromlines)==0 or len(tolines)==0):
from_line, to_line, found_diff = next(line_iterator)
if from_line is not None:
fromlines.append((from_line,found_diff))
if to_line is not None:
tolines.append((to_line,found_diff))
# Once we have a pair, remove them from the collection and yield it
from_line, fromDiff = fromlines.pop(0)
to_line, to_diff = tolines.pop(0)
yield (from_line,to_line,fromDiff or to_diff)
# Handle case where user does not want context differencing, just yield
# them up without doing anything else with them.
line_pair_iterator = _line_pair_iterator()
if context is None:
while True:
yield next(line_pair_iterator)
# Handle case where user wants context differencing. We must do some
# storage of lines until we know for sure that they are to be yielded.
else:
context += 1
lines_to_write = 0
while True:
# Store lines up until we find a difference, note use of a
# circular queue because we only need to keep around what
# we need for context.
index, contextLines = 0, [None]*(context)
found_diff = False
while(found_diff is False):
from_line, to_line, found_diff = next(line_pair_iterator)
i = index % context
contextLines[i] = (from_line, to_line, found_diff)
index += 1
# Yield lines that we have collected so far, but first yield
# the user's separator.
if index > context:
yield None, None, None
lines_to_write = context
else:
lines_to_write = index
index = 0
while(lines_to_write):
i = index % context
index += 1
yield contextLines[i]
lines_to_write -= 1
# Now yield the context lines after the change
lines_to_write = context-1
while(lines_to_write):
from_line, to_line, found_diff = next(line_pair_iterator)
# If another change within the context, extend the context
if found_diff:
lines_to_write = context-1
else:
lines_to_write -= 1
yield from_line, to_line, found_diff
_file_template = """
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html>
<head>
<meta http-equiv="Content-Type"
content="text/html; charset=ISO-8859-1" />
<title></title>
<style type="text/css">%(styles)s
</style>
</head>
<body>
%(table)s%(legend)s
</body>
</html>"""
_styles = """
table.diff {font-family:Courier; border:medium;}
.diff_header {background-color:#e0e0e0}
td.diff_header {text-align:right}
.diff_next {background-color:#c0c0c0}
.diff_add {background-color:#aaffaa}
.diff_chg {background-color:#ffff77}
.diff_sub {background-color:#ffaaaa}"""
_table_template = """
<table class="diff" id="difflib_chg_%(prefix)s_top"
cellspacing="0" cellpadding="0" rules="groups" >
<colgroup></colgroup> <colgroup></colgroup> <colgroup></colgroup>
<colgroup></colgroup> <colgroup></colgroup> <colgroup></colgroup>
%(header_row)s
<tbody>
%(data_rows)s </tbody>
</table>"""
_legend = """
<table class="diff" summary="Legends">
<tr> <th colspan="2"> Legends </th> </tr>
<tr> <td> <table border="" summary="Colors">
<tr><th> Colors </th> </tr>
<tr><td class="diff_add"> Added </td></tr>
<tr><td class="diff_chg">Changed</td> </tr>
<tr><td class="diff_sub">Deleted</td> </tr>
</table></td>
<td> <table border="" summary="Links">
<tr><th colspan="2"> Links </th> </tr>
<tr><td>(f)irst change</td> </tr>
<tr><td>(n)ext change</td> </tr>
<tr><td>(t)op</td> </tr>
</table></td> </tr>
</table>"""
class HtmlDiff(object):
"""For producing HTML side by side comparison with change highlights.
This class can be used to create an HTML table (or a complete HTML file
containing the table) showing a side by side, line by line comparison
of text with inter-line and intra-line change highlights. The table can
be generated in either full or contextual difference mode.
The following methods are provided for HTML generation:
make_table -- generates HTML for a single side by side table
make_file -- generates complete HTML file with a single side by side table
See tools/scripts/diff.py for an example usage of this class.
"""
_file_template = _file_template
_styles = _styles
_table_template = _table_template
_legend = _legend
_default_prefix = 0
def __init__(self,tabsize=8,wrapcolumn=None,linejunk=None,
charjunk=IS_CHARACTER_JUNK):
"""HtmlDiff instance initializer
Arguments:
tabsize -- tab stop spacing, defaults to 8.
wrapcolumn -- column number where lines are broken and wrapped,
defaults to None where lines are not wrapped.
linejunk,charjunk -- keyword arguments passed into ndiff() (used to by
HtmlDiff() to generate the side by side HTML differences). See
ndiff() documentation for argument default values and descriptions.
"""
self._tabsize = tabsize
self._wrapcolumn = wrapcolumn
self._linejunk = linejunk
self._charjunk = charjunk
def make_file(self,fromlines,tolines,fromdesc='',todesc='',context=False,
numlines=5):
"""Returns HTML file of side by side comparison with change highlights
Arguments:
fromlines -- list of "from" lines
tolines -- list of "to" lines
fromdesc -- "from" file column header string
todesc -- "to" file column header string
context -- set to True for contextual differences (defaults to False
which shows full differences).
numlines -- number of context lines. When context is set True,
controls number of lines displayed before and after the change.
When context is False, controls the number of lines to place
the "next" link anchors before the next change (so click of
"next" link jumps to just before the change).
"""
return self._file_template % dict(
styles = self._styles,
legend = self._legend,
table = self.make_table(fromlines,tolines,fromdesc,todesc,
context=context,numlines=numlines))
def _tab_newline_replace(self,fromlines,tolines):
"""Returns from/to line lists with tabs expanded and newlines removed.
Instead of tab characters being replaced by the number of spaces
needed to fill in to the next tab stop, this function will fill
the space with tab characters. This is done so that the difference
algorithms can identify changes in a file when tabs are replaced by
spaces and vice versa. At the end of the HTML generation, the tab
characters will be replaced with a nonbreakable space.
"""
def expand_tabs(line):
# hide real spaces
line = line.replace(' ','\0')
# expand tabs into spaces
line = line.expandtabs(self._tabsize)
# replace spaces from expanded tabs back into tab characters
# (we'll replace them with markup after we do differencing)
line = line.replace(' ','\t')
return line.replace('\0',' ').rstrip('\n')
fromlines = [expand_tabs(line) for line in fromlines]
tolines = [expand_tabs(line) for line in tolines]
return fromlines,tolines
def _split_line(self,data_list,line_num,text):
"""Builds list of text lines by splitting text lines at wrap point
This function will determine if the input text line needs to be
wrapped (split) into separate lines. If so, the first wrap point
will be determined and the first line appended to the output
text line list. This function is used recursively to handle
the second part of the split line to further split it.
"""
# if blank line or context separator, just add it to the output list
if not line_num:
data_list.append((line_num,text))
return
# if line text doesn't need wrapping, just add it to the output list
size = len(text)
max = self._wrapcolumn
if (size <= max) or ((size -(text.count('\0')*3)) <= max):
data_list.append((line_num,text))
return
# scan text looking for the wrap point, keeping track if the wrap
# point is inside markers
i = 0
n = 0
mark = ''
while n < max and i < size:
if text[i] == '\0':
i += 1
mark = text[i]
i += 1
elif text[i] == '\1':
i += 1
mark = ''
else:
i += 1
n += 1
# wrap point is inside text, break it up into separate lines
line1 = text[:i]
line2 = text[i:]
# if wrap point is inside markers, place end marker at end of first
# line and start marker at beginning of second line because each
# line will have its own table tag markup around it.
if mark:
line1 = line1 + '\1'
line2 = '\0' + mark + line2
# tack on first line onto the output list
data_list.append((line_num,line1))
# use this routine again to wrap the remaining text
self._split_line(data_list,'>',line2)
def _line_wrapper(self,diffs):
"""Returns iterator that splits (wraps) mdiff text lines"""
# pull from/to data and flags from mdiff iterator
for fromdata,todata,flag in diffs:
# check for context separators and pass them through
if flag is None:
yield fromdata,todata,flag
continue
(fromline,fromtext),(toline,totext) = fromdata,todata
# for each from/to line split it at the wrap column to form
# list of text lines.
fromlist,tolist = [],[]
self._split_line(fromlist,fromline,fromtext)
self._split_line(tolist,toline,totext)
# yield from/to line in pairs inserting blank lines as
# necessary when one side has more wrapped lines
while fromlist or tolist:
if fromlist:
fromdata = fromlist.pop(0)
else:
fromdata = ('',' ')
if tolist:
todata = tolist.pop(0)
else:
todata = ('',' ')
yield fromdata,todata,flag
def _collect_lines(self,diffs):
"""Collects mdiff output into separate lists
Before storing the mdiff from/to data into a list, it is converted
into a single line of text with HTML markup.
"""
fromlist,tolist,flaglist = [],[],[]
# pull from/to data and flags from mdiff style iterator
for fromdata,todata,flag in diffs:
try:
# store HTML markup of the lines into the lists
fromlist.append(self._format_line(0,flag,*fromdata))
tolist.append(self._format_line(1,flag,*todata))
except TypeError:
# exceptions occur for lines where context separators go
fromlist.append(None)
tolist.append(None)
flaglist.append(flag)
return fromlist,tolist,flaglist
def _format_line(self,side,flag,linenum,text):
"""Returns HTML markup of "from" / "to" text lines
side -- 0 or 1 indicating "from" or "to" text
flag -- indicates if difference on line
linenum -- line number (used for line number column)
text -- line text to be marked up
"""
try:
linenum = '%d' % linenum
id = ' id="%s%s"' % (self._prefix[side],linenum)
except TypeError:
# handle blank lines where linenum is '>' or ''
id = ''
# replace those things that would get confused with HTML symbols
text=text.replace("&","&").replace(">",">").replace("<","<")
# make space non-breakable so they don't get compressed or line wrapped
text = text.replace(' ',' ').rstrip()
return '<td class="diff_header"%s>%s</td><td nowrap="nowrap">%s</td>' \
% (id,linenum,text)
def _make_prefix(self):
"""Create unique anchor prefixes"""
# Generate a unique anchor prefix so multiple tables
# can exist on the same HTML page without conflicts.
fromprefix = "from%d_" % HtmlDiff._default_prefix
toprefix = "to%d_" % HtmlDiff._default_prefix
HtmlDiff._default_prefix += 1
# store prefixes so line format method has access
self._prefix = [fromprefix,toprefix]
def _convert_flags(self,fromlist,tolist,flaglist,context,numlines):
"""Makes list of "next" links"""
# all anchor names will be generated using the unique "to" prefix
toprefix = self._prefix[1]
# process change flags, generating middle column of next anchors/links
next_id = ['']*len(flaglist)
next_href = ['']*len(flaglist)
num_chg, in_change = 0, False
last = 0
for i,flag in enumerate(flaglist):
if flag:
if not in_change:
in_change = True
last = i
# at the beginning of a change, drop an anchor a few lines
# (the context lines) before the change for the previous
# link
i = max([0,i-numlines])
next_id[i] = ' id="difflib_chg_%s_%d"' % (toprefix,num_chg)
# at the beginning of a change, drop a link to the next
# change
num_chg += 1
next_href[last] = '<a href="#difflib_chg_%s_%d">n</a>' % (
toprefix,num_chg)
else:
in_change = False
# check for cases where there is no content to avoid exceptions
if not flaglist:
flaglist = [False]
next_id = ['']
next_href = ['']
last = 0
if context:
fromlist = ['<td></td><td> No Differences Found </td>']
tolist = fromlist
else:
fromlist = tolist = ['<td></td><td> Empty File </td>']
# if not a change on first line, drop a link
if not flaglist[0]:
next_href[0] = '<a href="#difflib_chg_%s_0">f</a>' % toprefix
# redo the last link to link to the top
next_href[last] = '<a href="#difflib_chg_%s_top">t</a>' % (toprefix)
return fromlist,tolist,flaglist,next_href,next_id
def make_table(self,fromlines,tolines,fromdesc='',todesc='',context=False,
numlines=5):
"""Returns HTML table of side by side comparison with change highlights
Arguments:
fromlines -- list of "from" lines
tolines -- list of "to" lines
fromdesc -- "from" file column header string
todesc -- "to" file column header string
context -- set to True for contextual differences (defaults to False
which shows full differences).
numlines -- number of context lines. When context is set True,
controls number of lines displayed before and after the change.
When context is False, controls the number of lines to place
the "next" link anchors before the next change (so click of
"next" link jumps to just before the change).
"""
# make unique anchor prefixes so that multiple tables may exist
# on the same page without conflict.
self._make_prefix()
# change tabs to spaces before it gets more difficult after we insert
# markkup
fromlines,tolines = self._tab_newline_replace(fromlines,tolines)
# create diffs iterator which generates side by side from/to data
if context:
context_lines = numlines
else:
context_lines = None
diffs = _mdiff(fromlines,tolines,context_lines,linejunk=self._linejunk,
charjunk=self._charjunk)
# set up iterator to wrap lines that exceed desired width
if self._wrapcolumn:
diffs = self._line_wrapper(diffs)
# collect up from/to lines and flags into lists (also format the lines)
fromlist,tolist,flaglist = self._collect_lines(diffs)
# process change flags, generating middle column of next anchors/links
fromlist,tolist,flaglist,next_href,next_id = self._convert_flags(
fromlist,tolist,flaglist,context,numlines)
s = []
fmt = ' <tr><td class="diff_next"%s>%s</td>%s' + \
'<td class="diff_next">%s</td>%s</tr>\n'
for i in range(len(flaglist)):
if flaglist[i] is None:
# mdiff yields None on separator lines skip the bogus ones
# generated for the first line
if i > 0:
s.append(' </tbody> \n <tbody>\n')
else:
s.append( fmt % (next_id[i],next_href[i],fromlist[i],
next_href[i],tolist[i]))
if fromdesc or todesc:
header_row = '<thead><tr>%s%s%s%s</tr></thead>' % (
'<th class="diff_next"><br /></th>',
'<th colspan="2" class="diff_header">%s</th>' % fromdesc,
'<th class="diff_next"><br /></th>',
'<th colspan="2" class="diff_header">%s</th>' % todesc)
else:
header_row = ''
table = self._table_template % dict(
data_rows=''.join(s),
header_row=header_row,
prefix=self._prefix[1])
return table.replace('\0+','<span class="diff_add">'). \
replace('\0-','<span class="diff_sub">'). \
replace('\0^','<span class="diff_chg">'). \
replace('\1','</span>'). \
replace('\t',' ')
del re
def restore(delta, which):
r"""
Generate one of the two sequences that generated a delta.
Given a `delta` produced by `Differ.compare()` or `ndiff()`, extract
lines originating from file 1 or 2 (parameter `which`), stripping off line
prefixes.
Examples:
>>> diff = ndiff('one\ntwo\nthree\n'.splitlines(1),
... 'ore\ntree\nemu\n'.splitlines(1))
>>> diff = list(diff)
>>> print(''.join(restore(diff, 1)), end="")
one
two
three
>>> print(''.join(restore(diff, 2)), end="")
ore
tree
emu
"""
try:
tag = {1: "- ", 2: "+ "}[int(which)]
except KeyError:
raise ValueError('unknown delta choice (must be 1 or 2): %r'
% which)
prefixes = (" ", tag)
for line in delta:
if line[:2] in prefixes:
yield line[2:]
def _test():
import doctest, difflib
return doctest.testmod(difflib)
if __name__ == "__main__":
_test()
|
vertexproject/synapse
|
refs/heads/master
|
synapse/tests/test_cmds_cron.py
|
1
|
import asyncio
import logging
import datetime
from datetime import timezone as tz
from unittest import mock
import synapse.lib.cmdr as s_cmdr
import synapse.lib.provenance as s_provenance
import synapse.tests.utils as s_t_utils
MINSECS = 60
HOURSECS = 60 * MINSECS
DAYSECS = 24 * HOURSECS
logger = logging.getLogger(__name__)
class CmdCronTest(s_t_utils.SynTest):
async def test_cron(self):
MONO_DELT = 1543827303.0
unixtime = datetime.datetime(year=2018, month=12, day=5, hour=7, minute=0, tzinfo=tz.utc).timestamp()
s_provenance.reset()
def timetime():
return unixtime
def looptime():
return unixtime - MONO_DELT
loop = asyncio.get_running_loop()
with mock.patch.object(loop, 'time', looptime), mock.patch('time.time', timetime):
async with self.getTestCoreAndProxy() as (realcore, core):
outp = self.getTestOutp()
async with await s_cmdr.getItemCmdr(core, outp=outp) as cmdr:
async def waitForCron(guid):
'''
Because the wall clock is "frozen" for this test unless we manually advance it, we can't sleep
non-zero amounts. However, we are running in the same asyncio loop as the agenda. Just
sleep(0) in a loop until the cron job is not running anymore
'''
for _ in range(300):
await asyncio.sleep(0)
crons = await core.listCronJobs()
cron = [c for c in crons if c.get('iden') == guid][0]
if not cron['isrunning']:
break
else:
# the cron job didn't finish after *alot* of sleeps?!
self.true(0)
# Various silliness
await cmdr.runCmdLine('cron')
self.true(outp.expect('Manages cron jobs in a cortex'))
await cmdr.runCmdLine('cron timemachine')
self.true(outp.expect('invalid choice'))
await cmdr.runCmdLine('cron list')
self.true(outp.expect('No cron jobs found'))
await cmdr.runCmdLine('cron ls')
self.true(outp.expect('No cron jobs found'))
outp.clear()
await cmdr.runCmdLine("cron add -M+1,beeroclock {[graph:node='*' :type=m1]}")
self.true(outp.expect('failed to parse parameter'))
await cmdr.runCmdLine("cron add -m nosuchmonth -d=-2 {#foo}")
self.true(outp.expect('failed to parse fixed parameter'))
outp.clear()
await cmdr.runCmdLine("cron add -m 8nosuchmonth -d=-2 {#foo}")
self.true(outp.expect('failed to parse fixed parameter'))
await cmdr.runCmdLine("cron add -d Mon -m +3 {#foo}")
self.true(outp.expect('provide a recurrence value with day of week'))
await cmdr.runCmdLine("cron add -dMon -m June {#foo}")
self.true(outp.expect('fix month or year with day of week'))
await cmdr.runCmdLine("cron add -dMon -m +3 -y +2 {#foo}")
self.true(outp.expect('more than 1 recurrence'))
await cmdr.runCmdLine("cron add --year=2019 {#foo}")
self.true(outp.expect('year may not be a fixed value'))
await cmdr.runCmdLine("cron add {#foo}")
self.true(outp.expect('must provide at least one optional'))
await cmdr.runCmdLine("cron add -H3 -M +4 {#foo}")
self.true(outp.expect('fixed unit may not be larger'))
outp.clear()
await cmdr.runCmdLine('cron add -d Tuesday,1 {#foo}')
self.true(outp.expect('failed to parse day value'))
outp.clear()
await cmdr.runCmdLine('cron add -d Fri,3 {#foo}')
self.true(outp.expect('failed to parse day value'))
outp.clear()
await cmdr.runCmdLine('cron add }')
self.true(outp.expect('BadSyntax'))
# add a mechanism on which we can wait...
await realcore.nodes('$lib.queue.add(foo)')
async def getNextFoo():
return await asyncio.wait_for(realcore.callStorm('''
$foo = $lib.queue.get(foo)
($offs, $retn) = $foo.get()
$foo.cull($offs)
return($retn)
'''), timeout=5)
##################
# Start simple: add a cron job that creates a node every minute
outp.clear()
await cmdr.runCmdLine("cron add -M +1 {$lib.queue.get(foo).put(bar)}")
self.true(outp.expect('Created cron job'))
guid = outp.mesgs[-1].strip().rsplit(' ', 1)[-1]
unixtime += 60
self.eq('bar', await getNextFoo())
await cmdr.runCmdLine('cron list')
self.true(outp.expect('(bar)'))
# Make sure it ran
await cmdr.runCmdLine(f"cron mod {guid[:6]} {{$lib.queue.get(foo).put(baz)}}")
self.true(outp.expect('Modified cron job'))
await cmdr.runCmdLine(f"cron edit xxx {{[graph:node='*' :type=m2]}}")
self.true(outp.expect('does not match'))
await cmdr.runCmdLine(f"cron mod xxx yyy")
self.true(outp.expect('expected second argument to start with {'))
# Make sure the old one didn't run and the new query ran
unixtime += 60
self.eq('baz', await getNextFoo())
outp.clear()
# Delete the job
await cmdr.runCmdLine(f"cron del {guid}")
self.true(outp.expect('Deleted cron job'))
await cmdr.runCmdLine(f"cron del xxx")
self.true(outp.expect('does not match'))
await cmdr.runCmdLine(f"cron rm xxx")
self.true(outp.expect('does not match'))
# Make sure deleted job didn't run
unixtime += 60
await asyncio.sleep(0)
self.eq(0, await realcore.callStorm('return($lib.queue.get(foo).size())'))
# Test fixed minute, i.e. every hour at 17 past
unixtime = datetime.datetime(year=2018, month=12, day=5, hour=7, minute=10,
tzinfo=tz.utc).timestamp()
await cmdr.runCmdLine("cron add -M 17 {$lib.queue.get(foo).put(faz)}")
guid = outp.mesgs[-1].strip().rsplit(' ', 1)[-1]
unixtime += 7 * MINSECS
self.eq('faz', await getNextFoo())
await cmdr.runCmdLine(f"cron del {guid}")
##################
# Test day increment
await cmdr.runCmdLine("cron add -d +2 {$lib.queue.get(foo).put(d1)}")
self.true(outp.expect('Created cron job'))
guid1 = outp.mesgs[-1].strip().rsplit(' ', 1)[-1]
unixtime += DAYSECS
# Make sure it *didn't* run
await asyncio.sleep(0)
self.eq(0, await realcore.callStorm('return($lib.queue.get(foo).size())'))
unixtime += DAYSECS
self.eq('d1', await getNextFoo())
unixtime += DAYSECS * 2
outp.clear()
self.eq('d1', await getNextFoo())
await cmdr.runCmdLine(f"cron del {guid1}")
outp.expect('Deleted cron job')
##################
# Test fixed day of week: every Monday and Thursday at 3am
unixtime = datetime.datetime(year=2018, month=12, day=11, hour=7, minute=10,
tzinfo=tz.utc).timestamp() # A Tuesday
outp.clear()
await cmdr.runCmdLine("cron add -H 3 -d Mon,Thursday {$lib.queue.get(foo).put(d2)}")
self.true(outp.expect('Created cron job'))
guid2 = outp.mesgs[-1].strip().rsplit(' ', 1)[-1]
unixtime = datetime.datetime(year=2018, month=12, day=12, hour=3, minute=10,
tzinfo=tz.utc).timestamp() # Now Wednesday
outp.clear()
await cmdr.runCmdLine(f'cron stat {guid2}')
self.true(outp.expect('last start time: Never'))
unixtime = datetime.datetime(year=2018, month=12, day=13, hour=3, minute=10,
tzinfo=tz.utc).timestamp() # Now Thursday
self.eq('d2', await getNextFoo())
outp.clear()
await cmdr.runCmdLine(f'cron stat {guid2}')
self.true(outp.expect('last start time: 2018'))
self.true(outp.expect('dayofweek 0'))
outp.clear()
await cmdr.runCmdLine(f"cron del {guid2}")
outp.expect('Deleted cron job')
await cmdr.runCmdLine("cron add -H 3 -d Noday {[graph:node='*' :type=d2]}")
self.true(outp.expect('failed to parse day value "Noday"'))
##################
# Test fixed day of month: second-to-last day of month
await cmdr.runCmdLine("cron add -d-2 -mDec {$lib.queue.get(foo).put(d3)}")
guid = outp.mesgs[-1].strip().rsplit(' ', 1)[-1]
unixtime = datetime.datetime(year=2018, month=12, day=29, hour=0, minute=0,
tzinfo=tz.utc).timestamp() # Now Thursday
unixtime += DAYSECS
self.eq('d3', await getNextFoo())
outp.clear()
await cmdr.runCmdLine(f"cron del {guid}")
outp.expect('Deleted cron job')
##################
# Test month increment
outp.clear()
await cmdr.runCmdLine("cron add -m +2 -d=4 {$lib.queue.get(foo).put(month1)}")
guid = outp.mesgs[-1].strip().rsplit(' ', 1)[-1]
self.true(outp.expect('Created cron job'))
unixtime = datetime.datetime(year=2019, month=2, day=4, hour=0, minute=0,
tzinfo=tz.utc).timestamp() # Now Thursday
self.eq('month1', await getNextFoo())
outp.clear()
await cmdr.runCmdLine(f"cron del {guid}")
outp.expect('Deleted cron job')
##################
# Test year increment
outp.clear()
await cmdr.runCmdLine("cron add -y +2 {$lib.queue.get(foo).put(year1)}")
guid2 = outp.mesgs[-1].strip().rsplit(' ', 1)[-1]
self.true(outp.expect('Created cron job'))
unixtime = datetime.datetime(year=2021, month=1, day=1, hour=0, minute=0,
tzinfo=tz.utc).timestamp() # Now Thursday
self.eq('year1', await getNextFoo())
outp.clear()
await cmdr.runCmdLine(f'cron stat {guid2[:6]}')
self.true(outp.expect("{'month': 1, 'hour': 0, 'minute': 0, 'dayofmonth': 1}"))
outp.clear()
await cmdr.runCmdLine(f"cron del {guid2}")
outp.expect('Deleted cron job')
# Make sure second-to-last day works for February
outp.clear()
await cmdr.runCmdLine("cron add -m February -d=-2 {$lib.queue.get(foo).put(year2)}")
self.true(outp.expect('Created cron job'))
unixtime = datetime.datetime(year=2021, month=2, day=27, hour=0, minute=0,
tzinfo=tz.utc).timestamp() # Now Thursday
self.eq('year2', await getNextFoo())
##################
# Test 'at' command
outp.clear()
await cmdr.runCmdLine('at')
self.true(outp.expect('Adds a non-recurring'))
await cmdr.runCmdLine('at --not-a-real-flag')
self.true(outp.expect('the following arguments'))
await cmdr.runCmdLine('at {#foo} {#bar}')
self.true(outp.expect('only a single query'))
await cmdr.runCmdLine('at {#foo}')
self.true(outp.expect('at least'))
await cmdr.runCmdLine('at +1')
self.true(outp.expect('missing unit'))
await cmdr.runCmdLine('at +1parsec')
self.true(outp.expect('Trouble parsing'))
await cmdr.runCmdLine('at +1day')
self.true(outp.expect('Missing query'))
await cmdr.runCmdLine("at +5 minutes {$lib.queue.get(foo).put(at1)}")
unixtime += 5 * MINSECS
self.eq('at1', await getNextFoo())
await cmdr.runCmdLine("at +1 day +7 days {$lib.queue.get(foo).put(at2)}")
guid = outp.mesgs[-1].strip().rsplit(' ', 1)[-1]
unixtime += DAYSECS
await waitForCron(guid)
self.eq('at2', await getNextFoo())
unixtime += 6 * DAYSECS + 1
self.eq('at2', await getNextFoo())
await cmdr.runCmdLine("at 202104170415 {$lib.queue.get(foo).put(at3)}")
unixtime = datetime.datetime(year=2021, month=4, day=17, hour=4, minute=15,
tzinfo=tz.utc).timestamp() # Now Thursday
self.eq('at3', await getNextFoo())
outp.clear()
##################
# Test 'stat' command
await cmdr.runCmdLine(f'cron stat xxx')
self.true(outp.expect('provided iden does not match any'))
await cmdr.runCmdLine(f'cron stat {guid[:6]}')
self.true(outp.expect('last result: finished successfully with 0 nodes'))
self.true(outp.expect('entries: <None>'))
##################
# Test 'enable' 'disable' commands
await cmdr.runCmdLine(f'cron enable xxx')
self.true(outp.expect('provided iden does not match any'))
outp.clear()
await cmdr.runCmdLine(f'cron disable xxx')
self.true(outp.expect('provided iden does not match any'))
outp.clear()
await cmdr.runCmdLine(f'cron disable {guid[:6]}')
await cmdr.runCmdLine(f'cron stat {guid[:6]}')
self.true(outp.expect(f'enabled: N'))
outp.clear()
await cmdr.runCmdLine(f'cron enable {guid[:6]}')
await cmdr.runCmdLine(f'cron stat {guid[:6]}')
self.true(outp.expect(f'enabled: Y'))
outp.clear()
###################
# Delete an expired at job
outp.clear()
await cmdr.runCmdLine(f"cron del {guid}")
self.true(outp.expect('Deleted cron job'))
##################
# Test the aliases
outp.clear()
await cmdr.runCmdLine('cron add --hourly 15 {#bar}')
guid = outp.mesgs[-1].strip().rsplit(' ', 1)[-1]
await cmdr.runCmdLine(f'cron stat {guid[:6]}')
self.true(outp.expect("{'minute': 15}"))
outp.clear()
await cmdr.runCmdLine('cron add --daily 05:47 {#bar}')
guid = outp.mesgs[-1].strip().rsplit(' ', 1)[-1]
await cmdr.runCmdLine(f'cron stat {guid[:6]}')
self.true(outp.expect("{'hour': 5, 'minute': 47"))
outp.clear()
await cmdr.runCmdLine('cron add --monthly=-1:12:30 {#bar}')
guid = outp.mesgs[-1].strip().rsplit(' ', 1)[-1]
await cmdr.runCmdLine(f'cron stat {guid[:6]}')
self.true(outp.expect("{'hour': 12, 'minute': 30, 'dayofmonth': -1}"))
outp.clear()
await cmdr.runCmdLine('cron add --yearly 04:17:12:30 {#bar}')
guid = outp.mesgs[-1].strip().rsplit(' ', 1)[-1]
await cmdr.runCmdLine(f'cron stat {guid[:6]}')
self.true(outp.expect("{'month': 4, 'hour': 12, 'minute': 30, 'dayofmonth': 17}"))
outp.clear()
await cmdr.runCmdLine('cron add --yearly 04:17:12 {#bar}')
self.true(outp.expect('Failed to parse parameter'))
outp.clear()
await cmdr.runCmdLine('cron add --daily xx:xx {#bar}')
self.true(outp.expect('Failed to parse ..ly parameter'))
outp.clear()
await cmdr.runCmdLine('cron add --hourly 1 -M 17 {#bar}')
self.true(outp.expect('may not use both'))
# Test manipulating cron jobs as another user
bond = await realcore.auth.addUser('bond')
async with realcore.getLocalProxy(user='bond') as tcore:
toutp = self.getTestOutp()
tcmdr = await s_cmdr.getItemCmdr(tcore, outp=toutp)
await tcmdr.runCmdLine('cron list')
self.true(toutp.expect('No cron jobs found'))
toutp.clear()
await tcmdr.runCmdLine(f'cron disable {guid[:6]}')
self.true(toutp.expect('provided iden does not match'))
toutp.clear()
await tcmdr.runCmdLine(f'cron enable {guid[:6]}')
self.true(toutp.expect('provided iden does not match'))
toutp.clear()
await tcmdr.runCmdLine(f'cron edit {guid[:6]} {{#foo}}')
self.true(toutp.expect('provided iden does not match'))
toutp.clear()
await tcmdr.runCmdLine(f'cron del {guid[:6]}')
self.true(toutp.expect('provided iden does not match'))
# Give explicit perm
await core.addUserRule(bond.iden, (True, ('cron', 'get')))
toutp.clear()
await tcmdr.runCmdLine('cron list')
self.true(toutp.expect('root'))
await core.addUserRule(bond.iden, (True, ('cron', 'set')))
toutp.clear()
await tcmdr.runCmdLine(f'cron disable {guid[:6]}')
self.true(toutp.expect('Disabled cron job'))
toutp.clear()
await tcmdr.runCmdLine(f'cron enable {guid[:6]}')
self.true(toutp.expect('Enabled cron job'))
toutp.clear()
await tcmdr.runCmdLine(f'cron edit {guid[:6]} {{#foo}}')
self.true(toutp.expect('Modified cron job'))
await core.addUserRule(bond.iden, (True, ('cron', 'del')))
toutp.clear()
await tcmdr.runCmdLine(f'cron del {guid[:6]}')
self.true(toutp.expect('Deleted cron job'))
|
stanlyxiang/incubator-hawq
|
refs/heads/master
|
tools/bin/gppylib/gpcoverage.py
|
12
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This file is a wrapper around figleaf and will start/stop coverage as
needed. It also includes a method for generating the HTML reports.
"""
import os
import random
import figleaf
import pickle
from glob import glob
from gppylib import gplog
from gppylib.commands.base import Command, LOCAL, REMOTE, ExecutionContext, RemoteExecutionContext, WorkerPool
from gppylib.commands.unix import RemoveFiles, Scp
from gppylib.operations import Operation
from gppylib.operations.unix import ListFiles, ListRemoteFiles, MakeDir
logger = gplog.get_default_logger()
COVERAGE_FILENAME = 'cover.out'
#------------------------------------------------------------------------------
class GpWriteFigleafCoverageHtml(Command):
"""Command to write out figleaf html reports to disk based on the
coverage information that has been collected."""
def __init__(self,name,filename, directory,ctxt=LOCAL,remoteHost=None):
gphome = os.getenv("GPHOME", None)
if not gphome:
raise Exception('GPHOME environment variable not set.')
cmdStr = "%s -d %s %s" % (os.path.normpath(gphome + '/lib/python/figleaf/figleaf2html'), directory, filename)
Command.__init__(self,name,cmdStr,ctxt,remoteHost)
@staticmethod
def local(name, coverfile, directory):
cmd = GpWriteFigleafCoverageHtml(name, coverfile, directory)
cmd.run(validateAfter=True)
#------------------------------------------------------------------------------
# TODO: We should not allow this class to be instantiated. It offers static
# functionality, and its exposed methods should reflect that.
class GpFigleafCoverage:
"""
Distributed code coverage, built atop figleaf.
Figleaf code coverage is a two-phase process: recording and reporting. Recording simply involves
starting and stopping instrumentation. This results in a pickled data file in a designated location
on disk. (The distributed adaptation here of figleaf relies on this point.) Lastly, we invoke
figleaf2html via the Command above to produce html from the recorded data.
Like figleaf, GpFigleafCoverage is a similar two-phase process: enable recording and enable reporting.
To enable recording, gppylib must be *reactive* to coverage requests; in other words, the entry points to gppylib
must invoke GpFigleafCoverage. Currently, there are two such entry points: gppylib.mainUtils.simple_main and
sbin/gpoperation.py. Moreover, gppylib must be *proactive* to propagate requests to subprocesses or remote processes.
This is accomplished below by hooking gppylib.commands.base.ExecutionContext, and its inherited classes, in order
to propagate a couple of key environment variables needed below: USE_FIGLEAF, FIGLEAF_DIR, and FIGLEAF_PID.
To enable reporting, we must aggregate the data that the various python interpreters across subprocesses
and remote processes had generated. This Operaiton will rely on the knowledge of how figleaf resultant data is stored
on disk. For more detail, see FinalizeCoverage below.
It will help to explain how recording and reporting come together. GpFigleafCoverage recording is expected to produce,
and its reporting is dependent upon, the following directory structure:
<base>/*.out,*.html - Global coverage data, aggregated across multiple runs
<base>/<pid>/*.out,*.html - Coverage data pertaining to <pid>, where <pid> is the
process id of the originating python program, on the master
<base>/<pid>/<comp>/*.out,*html - Coverage data pertaining to some subprocess or remote process
that is invoked as a subcomponent of the overall program given by <pid>
For clarity, the rest of the code will adopt the following coding convention:
base_dir := <base>
pid_dir := <base>/<pid>
comp_dir := <base>/<pid>/<comp>
"""
# TODO: change directory structure to something more human-readable
# How about <base>/<program_name><pid>/<program_name><rand>/*.out,*.html ?
def __init__(self):
try:
self.directory = os.getenv('FIGLEAF_DIR', None)
if self.directory is None:
self.directory = os.path.normpath(os.path.expanduser("~") + '/.figleaf')
self.my_pid = str(os.getpid())
self.main_pid = os.getenv('FIGLEAF_PID', self.my_pid)
randstring = ''.join(random.choice('0123456789') for x in range(20))
self.filename = os.path.join(self.directory, self.main_pid, randstring, COVERAGE_FILENAME)
self.running = False
logger.debug('Code coverage file name: %s' % self.filename)
except Exception, e:
logger.exception('Error initializing code coverage')
def start(self):
"""Starts coverage collection if the environment variable USE_FIGLEAF is set."""
try:
if os.getenv('USE_FIGLEAF', None):
logger.info('Code coverage will be generated')
MakeDir(os.path.dirname(self.filename)).run()
self.running = True
ExecutionContext.propagate_env_map.update({'FIGLEAF_DIR': os.getenv('FIGLEAF_DIR', self.directory),
'USE_FIGLEAF': 1,
'FIGLEAF_PID': self.main_pid })
figleaf.start()
except Exception, e:
logger.error('Error starting code coverage: %s' % e)
def stop(self):
"""Stops code coverage."""
try:
if self.running:
logger.info('Stopping code coverage')
figleaf.stop()
figleaf.write_coverage(self.filename)
self.running = False
for k in ['FIGLEAF_DIR', 'USE_FIGLEAF', 'FIGLEAF_PID']:
del ExecutionContext.propagate_env_map[k]
except Exception, e:
logger.error('Error stopping code coverage: %s' % e)
def generate_report(self):
"""Generates the html reports and puts them in the directory specified."""
if os.getenv('USE_FIGLEAF', None):
try:
directory = os.path.dirname(self.filename)
logger.info('Generating code coverage HTML reports to %s' % directory)
GpWriteFigleafCoverageHtml.local('Generate HTML', self.filename, directory)
if self.main_pid == self.my_pid:
FinalizeCoverage(trail = RemoteExecutionContext.trail,
pid = self.main_pid,
base_dir = self.directory).run()
except Exception, e:
logger.exception('Error generating HTML code cover reports.')
def delete_files(self):
"""Deletes code coverage files."""
if os.getenv('USE_FIGLEAF', None):
logger.info('Deleting coverage files...')
try:
RemoveFiles.local('Remove coverage file', self.filename)
directory = os.path.dirname(self.filename)
RemoveFiles.local('Remove html files', directory + '/*.html')
except:
logger.error('Failed to clean up coverage files')
# The coverage tool to use
#if os.getenv('USE_FIGLEAF', None):
GP_COVERAGE_CLASS=GpFigleafCoverage
#else:
# GP_COVERAGE_CLASS=<some other coverage class>
#------------------------------------------------------------------------------
class GpCoverage(GP_COVERAGE_CLASS):
"""Class the controls code coverage. Right now this inherits from
GpFigleafCoverage, but in the future we may find a better code coverage
tool and switch to that. With this class, we can do that without
touching any of the management utilities or modules."""
pass
#------------------------------------------------------------------------------
class FinalizeCoverage(Operation):
"""
This aggregates coverage data from across the cluster for this current process (which is soon to complete.)
Then, we update the global coverage data that persists from run to run at <base_dir>/*.out,*.html.
"""
def __init__(self, trail, pid, base_dir):
self.trail = trail
self.pid = pid
self.base_dir = base_dir
def execute(self):
pid_dir = os.path.join(self.base_dir, self.pid)
# update the pid-level coverage statistics, which reside within pid_dir
# this requires: collect coverage data, merge data, save, and generate html
CollectCoverage(trail = self.trail, pid_dir = pid_dir).run()
partial_coverages = LoadPartialCoverages(pid_dir = pid_dir).run()
cumulative_coverage = {}
for partial_coverage in partial_coverages:
MergeCoverage(input = partial_coverage, output = cumulative_coverage).run()
SaveCoverage(obj = cumulative_coverage,
path = os.path.join(pid_dir, COVERAGE_FILENAME)).run()
GpWriteFigleafCoverageHtml.local('Generate HTML', os.path.join(pid_dir, COVERAGE_FILENAME), pid_dir)
# update the global coverage statistics, which reside within self.base_dir
overall_coverage = LoadCoverage(os.path.join(self.base_dir, COVERAGE_FILENAME)).run()
MergeCoverage(input = cumulative_coverage, output = overall_coverage).run()
SaveCoverage(obj = overall_coverage,
path = os.path.join(self.base_dir, COVERAGE_FILENAME)).run()
GpWriteFigleafCoverageHtml.local('Generate HTML', os.path.join(self.base_dir, COVERAGE_FILENAME), self.base_dir)
#------------------------------------------------------------------------------
class CollectCoverage(Operation):
"""
Simply copy over <base>/<pid>/<comp> dirs back to the master. This may
be an unnecessary step IF <base> is an NFS mount.
"""
def __init__(self, trail, pid_dir):
self.trail = trail
self.pid_dir = pid_dir
def execute(self):
pool = WorkerPool()
given = set(ListFiles(self.pid_dir).run())
try:
for host in self.trail:
available = ListRemoteFiles(self.pid_dir, host).run()
to_copy = [dir for dir in available if dir not in given]
for dir in to_copy:
comp_dir = os.path.join(self.pid_dir, dir)
pool.addCommand(Scp('collect coverage',
srcFile = comp_dir,
srcHost = host,
dstFile = comp_dir,
recursive = True))
finally:
pool.join()
#------------------------------------------------------------------------------
class LoadCoverage(Operation):
""" Unpickles and returns an object residing at a current path """
def __init__(self, path):
self.path = path
def execute(self):
try:
with open(self.path, 'r') as f:
obj = pickle.load(f)
return obj
except (IOError, OSError):
logger.exception('Failed to un-pickle coverage off disk.')
return {}
#------------------------------------------------------------------------------
class SaveCoverage(Operation):
""" Pickles a given object to disk at a designated path """
def __init__(self, path, obj):
self.path = path
self.obj = obj
def execute(self):
with open(self.path, 'w') as f:
pickle.dump(self.obj, f)
#------------------------------------------------------------------------------
class LoadPartialCoverages(Operation):
""" Returns an array of unpickled coverage objects from <base>/<pid>/*/<COVERAGE_FILENAME> """
def __init__(self, pid_dir):
self.pid_dir = pid_dir
def execute(self):
coverage_files = glob(os.path.join(self.pid_dir, '*', COVERAGE_FILENAME))
return [LoadCoverage(path).run() for path in coverage_files]
#------------------------------------------------------------------------------
# TODO: Support a parallel merge? Or would there be no point with the Python GIL?
class MergeCoverage(Operation):
"""
Figleaf coverage data is pickled on disk as a dict of filenames to sets of numbers,
where each number denotes a covered line number.
e.g. { "gparray.py" : set(0, 1, 2, ...),
"operations/dump.py" : set(175, 13, 208, ...),
... }
Here, we merge such an input dict into an output dict. As such, we'll be able to pickle
the result back to disk and invoke figleaf2html to get consolidated html reports.
"""
def __init__(self, input, output):
self.input, self.output = input, output
def execute(self):
for filename in self.input:
if filename not in self.output:
self.output[filename] = self.input[filename]
else:
self.output[filename] |= self.input[filename] # set union
|
sanjeevtripurari/hue
|
refs/heads/master
|
desktop/core/src/desktop/tests.py
|
4
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import os
import subprocess
import sys
import time
import desktop
import desktop.conf
import desktop.urls
import desktop.views as views
import proxy.conf
from nose.plugins.attrib import attr
from nose.plugins.skip import SkipTest
from nose.tools import assert_true, assert_false, assert_equal, assert_not_equal, assert_raises, nottest
from django.conf import settings
from django.conf.urls import patterns, url
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.http import HttpResponse
from django.db.models import query, CharField, SmallIntegerField
from beeswax.conf import HIVE_SERVER_HOST
from pig.models import PigScript
from useradmin.models import GroupPermission
from desktop.appmanager import DESKTOP_APPS
from desktop.lib import django_mako
from desktop.lib.django_test_util import make_logged_in_client
from desktop.lib.paginator import Paginator
from desktop.lib.conf import validate_path
from desktop.lib.django_util import TruncatingModel
from desktop.lib.exceptions_renderable import PopupException
from desktop.lib.test_utils import grant_access
from desktop.models import Document, Document2, get_data_link
from desktop.views import check_config, home
def setup_test_environment():
"""
Sets up mako to signal template rendering.
"""
django_mako.render_to_string = django_mako.render_to_string_test
setup_test_environment.__test__ = False
def teardown_test_environment():
"""
This method is called by nose_runner when
the tests all finish. This helps track
down when tests aren't cleaning up after
themselves and leaving threads hanging around.
"""
import threading
import desktop.lib.thread_util
# We should shut down all relevant threads by test completion.
threads = list(threading.enumerate())
if len(threads) > 1:
desktop.lib.thread_util.dump_traceback()
assert 1 == len(threads), threads
django_mako.render_to_string = django_mako.render_to_string_normal
teardown_test_environment.__test__ = False
def test_home():
c = make_logged_in_client(username="test_home", groupname="test_home", recreate=True, is_superuser=False)
user = User.objects.get(username="test_home")
response = c.get(reverse(home))
assert_equal(["notmine", "trash", "mine", "history"], json.loads(response.context['json_tags']).keys())
assert_equal(200, response.status_code)
script, created = PigScript.objects.get_or_create(owner=user)
doc = Document.objects.link(script, owner=script.owner, name='test_home')
response = c.get(reverse(home))
assert_true(str(doc.id) in json.loads(response.context['json_documents']))
response = c.get(reverse(home))
tags = json.loads(response.context['json_tags'])
assert_equal([doc.id], tags['mine'][0]['docs'], tags)
assert_equal([], tags['trash']['docs'], tags)
assert_equal([], tags['history']['docs'], tags)
doc.send_to_trash()
response = c.get(reverse(home))
tags = json.loads(response.context['json_tags'])
assert_equal([], tags['mine'][0]['docs'], tags)
assert_equal([doc.id], tags['trash']['docs'], tags)
assert_equal([], tags['history']['docs'], tags)
doc.restore_from_trash()
response = c.get(reverse(home))
tags = json.loads(response.context['json_tags'])
assert_equal([doc.id], tags['mine'][0]['docs'], tags)
assert_equal([], tags['trash']['docs'], tags)
assert_equal([], tags['history']['docs'], tags)
doc.add_to_history()
response = c.get(reverse(home))
tags = json.loads(response.context['json_tags'])
assert_equal([], tags['mine'][0]['docs'], tags)
assert_equal([], tags['trash']['docs'], tags)
assert_equal([], tags['history']['docs'], tags) # We currently don't fetch [doc.id]
def test_skip_wizard():
c = make_logged_in_client() # is_superuser
response = c.get('/', follow=True)
assert_true(['admin_wizard.mako' in _template.filename for _template in response.templates], [_template.filename for _template in response.templates])
c.cookies['hueLandingPage'] = 'home'
response = c.get('/', follow=True)
assert_true(['home.mako' in _template.filename for _template in response.templates], [_template.filename for _template in response.templates])
c.cookies['hueLandingPage'] = ''
response = c.get('/', follow=True)
assert_true(['admin_wizard.mako' in _template.filename for _template in response.templates], [_template.filename for _template in response.templates])
c = make_logged_in_client(username="test_skip_wizard", password="test_skip_wizard", is_superuser=False)
response = c.get('/', follow=True)
assert_true(['home.mako' in _template.filename for _template in response.templates], [_template.filename for _template in response.templates])
c.cookies['hueLandingPage'] = 'home'
response = c.get('/', follow=True)
assert_true(['home.mako' in _template.filename for _template in response.templates], [_template.filename for _template in response.templates])
c.cookies['hueLandingPage'] = ''
response = c.get('/', follow=True)
assert_true(['home.mako' in _template.filename for _template in response.templates], [_template.filename for _template in response.templates])
def test_log_view():
c = make_logged_in_client()
URL = reverse(views.log_view)
LOG = logging.getLogger(__name__)
LOG.warn('une voix m’a réveillé')
# UnicodeDecodeError: 'ascii' codec can't decode byte... should not happen
response = c.get(URL)
assert_equal(200, response.status_code)
c = make_logged_in_client()
URL = reverse(views.log_view)
LOG = logging.getLogger(__name__)
LOG.warn('Got response: PK\x03\x04\n\x00\x00\x08\x00\x00\xad\x0cN?\x00\x00\x00\x00')
# DjangoUnicodeDecodeError: 'utf8' codec can't decode byte 0xad in position 75: invalid start byte... should not happen
response = c.get(URL)
assert_equal(200, response.status_code)
def test_download_log_view():
c = make_logged_in_client()
URL = reverse(views.download_log_view)
LOG = logging.getLogger(__name__)
LOG.warn(u'une voix m’a réveillé')
# UnicodeDecodeError: 'ascii' codec can't decode byte... should not happen
response = c.get(URL)
assert_equal("application/zip", response.get('Content-Type', ''))
def test_dump_config():
c = make_logged_in_client()
CANARY = "abracadabra"
# Depending on the order of the conf.initialize() in settings, the set_for_testing() are not seen in the global settings variable
clear = HIVE_SERVER_HOST.set_for_testing(CANARY)
response1 = c.get(reverse('desktop.views.dump_config'))
assert_true(CANARY in response1.content, response1.content)
response2 = c.get(reverse('desktop.views.dump_config'), dict(private="true"))
assert_true(CANARY in response2.content)
# There are more private variables...
assert_true(len(response1.content) < len(response2.content))
clear()
CANARY = "(localhost|127\.0\.0\.1):(50030|50070|50060|50075)"
clear = proxy.conf.WHITELIST.set_for_testing(CANARY)
response1 = c.get(reverse('desktop.views.dump_config'))
assert_true(CANARY in response1.content)
clear()
# Malformed port per HUE-674
CANARY = "asdfoijaoidfjaosdjffjfjaoojosjfiojdosjoidjfoa"
clear = HIVE_SERVER_HOST.set_for_testing(CANARY)
response1 = c.get(reverse('desktop.views.dump_config'))
assert_true(CANARY in response1.content, response1.content)
clear()
CANARY = '/tmp/spacé.dat'
finish = proxy.conf.WHITELIST.set_for_testing(CANARY)
try:
response = c.get(reverse('desktop.views.dump_config'))
assert_true(CANARY in response.content, response.content)
finally:
finish()
# Not showing some passwords
response = c.get(reverse('desktop.views.dump_config'))
assert_false('bind_password' in response.content)
# Login as someone else
client_not_me = make_logged_in_client(username='not_me', is_superuser=False, groupname='test')
grant_access("not_me", "test", "desktop")
response = client_not_me.get(reverse('desktop.views.dump_config'))
assert_true("You must be a superuser" in response.content, response.content)
os.environ["HUE_CONF_DIR"] = "/tmp/test_hue_conf_dir"
resp = c.get(reverse('desktop.views.dump_config'))
del os.environ["HUE_CONF_DIR"]
assert_true('/tmp/test_hue_conf_dir' in resp.content, resp)
def test_prefs():
c = make_logged_in_client()
# Get everything
response = c.get('/desktop/prefs/')
assert_equal('{}', response.content)
# Set and get
response = c.get('/desktop/prefs/foo', dict(set="bar"))
assert_equal('true', response.content)
response = c.get('/desktop/prefs/foo')
assert_equal('"bar"', response.content)
# Reset (use post this time)
c.post('/desktop/prefs/foo', dict(set="baz"))
response = c.get('/desktop/prefs/foo')
assert_equal('"baz"', response.content)
# Check multiple values
c.post('/desktop/prefs/elephant', dict(set="room"))
response = c.get('/desktop/prefs/')
assert_true("baz" in response.content)
assert_true("room" in response.content)
# Delete everything
c.get('/desktop/prefs/elephant', dict(delete=""))
c.get('/desktop/prefs/foo', dict(delete=""))
response = c.get('/desktop/prefs/')
assert_equal('{}', response.content)
# Check non-existent value
response = c.get('/desktop/prefs/doesNotExist')
assert_equal('null', response.content)
def test_status_bar():
"""
Subs out the status_bar_views registry with temporary examples.
Tests handling of errors on view functions.
"""
backup = views._status_bar_views
views._status_bar_views = []
c = make_logged_in_client()
views.register_status_bar_view(lambda _: HttpResponse("foo", status=200))
views.register_status_bar_view(lambda _: HttpResponse("bar"))
views.register_status_bar_view(lambda _: None)
def f(r):
raise Exception()
views.register_status_bar_view(f)
response = c.get("/desktop/status_bar")
assert_equal("foobar", response.content)
views._status_bar_views = backup
def test_paginator():
"""
Test that the paginator works with partial list.
"""
def assert_page(page, data, start, end):
assert_equal(page.object_list, data)
assert_equal(page.start_index(), start)
assert_equal(page.end_index(), end)
# First page 1-20
obj = range(20)
pgn = Paginator(obj, per_page=20, total=25)
assert_page(pgn.page(1), obj, 1, 20)
# Second page 21-25
obj = range(5)
pgn = Paginator(obj, per_page=20, total=25)
assert_page(pgn.page(2), obj, 21, 25)
# Handle extra data on first page (22 items on a 20-page)
obj = range(22)
pgn = Paginator(obj, per_page=20, total=25)
assert_page(pgn.page(1), range(20), 1, 20)
# Handle extra data on second page (22 items on a 20-page)
obj = range(22)
pgn = Paginator(obj, per_page=20, total=25)
assert_page(pgn.page(2), range(5), 21, 25)
# Handle total < len(obj). Only works for QuerySet.
obj = query.QuerySet()
obj._result_cache = range(10)
pgn = Paginator(obj, per_page=10, total=9)
assert_page(pgn.page(1), range(10), 1, 10)
# Still works with a normal complete list
obj = range(25)
pgn = Paginator(obj, per_page=20)
assert_page(pgn.page(1), range(20), 1, 20)
assert_page(pgn.page(2), range(20, 25), 21, 25)
def test_thread_dump():
c = make_logged_in_client()
response = c.get("/desktop/debug/threads")
assert_true("test_thread_dump" in response.content)
def test_truncating_model():
class TinyModel(TruncatingModel):
short_field = CharField(max_length=10)
non_string_field = SmallIntegerField()
a = TinyModel()
a.short_field = 'a' * 9 # One less than it's max length
assert_true(a.short_field == 'a' * 9, 'Short-enough field does not get truncated')
a.short_field = 'a' * 11 # One more than it's max_length
assert_true(a.short_field == 'a' * 10, 'Too-long field gets truncated')
a.non_string_field = 10**10
assert_true(a.non_string_field == 10**10, 'non-string fields are not truncated')
def test_error_handling():
raise SkipTest
restore_django_debug = desktop.conf.DJANGO_DEBUG_MODE.set_for_testing(False)
restore_500_debug = desktop.conf.HTTP_500_DEBUG_MODE.set_for_testing(False)
exc_msg = "error_raising_view: Test earráid handling"
def error_raising_view(request, *args, **kwargs):
raise Exception(exc_msg)
def popup_exception_view(request, *args, **kwargs):
raise PopupException(exc_msg, title="earráid", detail=exc_msg)
# Add an error view
error_url_pat = patterns('',
url('^500_internal_error$', error_raising_view),
url('^popup_exception$', popup_exception_view))
desktop.urls.urlpatterns.extend(error_url_pat)
try:
def store_exc_info(*args, **kwargs):
pass
# Disable the test client's exception forwarding
c = make_logged_in_client()
c.store_exc_info = store_exc_info
response = c.get('/500_internal_error')
assert_true(any(["500.mako" in _template.filename for _template in response.templates]))
assert_true('Thank you for your patience' in response.content)
assert_true(exc_msg not in response.content)
# Now test the 500 handler with backtrace
desktop.conf.HTTP_500_DEBUG_MODE.set_for_testing(True)
response = c.get('/500_internal_error')
assert_equal(response.template.name, 'Technical 500 template')
assert_true(exc_msg in response.content)
# PopupException
response = c.get('/popup_exception')
assert_true(any(["popup_error.mako" in _template.filename for _template in response.templates]))
assert_true(exc_msg in response.content)
finally:
# Restore the world
for i in error_url_pat:
desktop.urls.urlpatterns.remove(i)
restore_django_debug()
restore_500_debug()
def test_desktop_permissions():
USERNAME = 'test_core_permissions'
GROUPNAME = 'default'
desktop.conf.REDIRECT_WHITELIST.set_for_testing('^\/.*$,^http:\/\/testserver\/.*$')
c = make_logged_in_client(USERNAME, groupname=GROUPNAME, recreate=True, is_superuser=False)
# Access to the basic works
assert_equal(200, c.get('/accounts/login/', follow=True).status_code)
assert_equal(200, c.get('/accounts/logout', follow=True).status_code)
assert_equal(200, c.get('/home', follow=True).status_code)
def test_app_permissions():
USERNAME = 'test_app_permissions'
GROUPNAME = 'impala_only'
desktop.conf.REDIRECT_WHITELIST.set_for_testing('^\/.*$,^http:\/\/testserver\/.*$')
c = make_logged_in_client(USERNAME, groupname=GROUPNAME, recreate=True, is_superuser=False)
# Reset all perms
GroupPermission.objects.filter(group__name=GROUPNAME).delete()
def check_app(status_code, app_name):
if app_name in DESKTOP_APPS:
assert_equal(
status_code,
c.get('/' + app_name, follow=True).status_code,
'status_code=%s app_name=%s' % (status_code, app_name))
# Access to nothing
check_app(401, 'beeswax')
check_app(401, 'impala')
check_app(401, 'hbase')
# Add access to beeswax
grant_access(USERNAME, GROUPNAME, "beeswax")
check_app(200, 'beeswax')
check_app(401, 'impala')
check_app(401, 'hbase')
# Add access to hbase
grant_access(USERNAME, GROUPNAME, "hbase")
check_app(200, 'beeswax')
check_app(401, 'impala')
check_app(200, 'hbase')
# Reset all perms
GroupPermission.objects.filter(group__name=GROUPNAME).delete()
check_app(401, 'beeswax')
check_app(401, 'impala')
check_app(401, 'hbase')
# Test only impala perm
grant_access(USERNAME, GROUPNAME, "impala")
check_app(401, 'beeswax')
check_app(200, 'impala')
check_app(401, 'hbase')
def test_error_handling_failure():
# Change rewrite_user to call has_hue_permission
# Try to get filebrowser page
# test for default 500 page
# Restore rewrite_user
import desktop.auth.backend
c = make_logged_in_client()
restore_django_debug = desktop.conf.DJANGO_DEBUG_MODE.set_for_testing(False)
restore_500_debug = desktop.conf.HTTP_500_DEBUG_MODE.set_for_testing(False)
original_rewrite_user = desktop.auth.backend.rewrite_user
def rewrite_user(user):
user = original_rewrite_user(user)
delattr(user, 'has_hue_permission')
return user
original_rewrite_user = desktop.auth.backend.rewrite_user
desktop.auth.backend.rewrite_user = rewrite_user
try:
# Make sure we are showing default 500.html page.
# See django.test.client#L246
assert_raises(AttributeError, c.get, reverse('desktop.views.dump_config'))
finally:
# Restore the world
restore_django_debug()
restore_500_debug()
desktop.auth.backend.rewrite_user = original_rewrite_user
def test_404_handling():
view_name = '/the-view-that-is-not-there'
c = make_logged_in_client()
response = c.get(view_name)
assert_true(any(['404.mako' in _template.filename for _template in response.templates]), response.templates)
assert_true('Not Found' in response.content)
assert_true(view_name in response.content)
class RecordingHandler(logging.Handler):
def __init__(self, *args, **kwargs):
logging.Handler.__init__(self, *args, **kwargs)
self.records = []
def emit(self, r):
self.records.append(r)
def test_log_event():
c = make_logged_in_client()
root = logging.getLogger("desktop.views.log_frontend_event")
handler = RecordingHandler()
root.addHandler(handler)
c.get("/desktop/log_frontend_event?level=info&message=foo")
assert_equal("INFO", handler.records[-1].levelname)
assert_equal("Untrusted log event from user test: foo", handler.records[-1].message)
assert_equal("desktop.views.log_frontend_event", handler.records[-1].name)
c.get("/desktop/log_frontend_event?level=error&message=foo2")
assert_equal("ERROR", handler.records[-1].levelname)
assert_equal("Untrusted log event from user test: foo2", handler.records[-1].message)
c.get("/desktop/log_frontend_event?message=foo3")
assert_equal("INFO", handler.records[-1].levelname)
assert_equal("Untrusted log event from user test: foo3", handler.records[-1].message)
c.post("/desktop/log_frontend_event", {
"message": "01234567" * 1024})
assert_equal("INFO", handler.records[-1].levelname)
assert_equal("Untrusted log event from user test: " + "01234567"*(1024/8),
handler.records[-1].message)
root.removeHandler(handler)
def test_validate_path():
reset = desktop.conf.SSL_PRIVATE_KEY.set_for_testing('/')
assert_equal([], validate_path(desktop.conf.SSL_PRIVATE_KEY, is_dir=True))
reset()
reset = desktop.conf.SSL_PRIVATE_KEY.set_for_testing('/tmm/does_not_exist')
assert_not_equal([], validate_path(desktop.conf.SSL_PRIVATE_KEY, is_dir=True))
reset()
@attr('requires_hadoop')
def test_config_check():
reset = (
desktop.conf.SECRET_KEY.set_for_testing(''),
desktop.conf.SECRET_KEY_SCRIPT.set_for_testing(present=False),
desktop.conf.SSL_CERTIFICATE.set_for_testing('foobar'),
desktop.conf.SSL_PRIVATE_KEY.set_for_testing(''),
desktop.conf.DEFAULT_SITE_ENCODING.set_for_testing('klingon')
)
try:
cli = make_logged_in_client()
resp = cli.get('/desktop/debug/check_config')
assert_true('Secret key should be configured' in resp.content, resp)
assert_true('desktop.ssl_certificate' in resp.content, resp)
assert_true('Path does not exist' in resp.content, resp)
assert_true('SSL private key file should be set' in resp.content, resp)
assert_true('klingon' in resp.content, resp)
assert_true('Encoding not supported' in resp.content, resp)
# Set HUE_CONF_DIR and make sure check_config returns appropriate conf
os.environ["HUE_CONF_DIR"] = "/tmp/test_hue_conf_dir"
resp = cli.get('/desktop/debug/check_config')
del os.environ["HUE_CONF_DIR"]
assert_true('/tmp/test_hue_conf_dir' in resp.content, resp)
finally:
for old_conf in reset:
old_conf()
def test_last_access_time():
c = make_logged_in_client(username="access_test")
c.post('/accounts/login/')
login = desktop.auth.views.get_current_users()
before_access_time = time.time()
response = c.get('/home')
after_access_time = time.time()
access = desktop.auth.views.get_current_users()
user = response.context['user']
login_time = login[user]['time']
access_time = access[user]['time']
# Check that 'last_access_time' is later than login time
assert_true(login_time < access_time)
# Check that 'last_access_time' is in between the timestamps before and after the last access path
assert_true(before_access_time < access_time)
assert_true(access_time < after_access_time)
def test_ui_customizations():
custom_banner = 'test ui customization'
reset = (
desktop.conf.CUSTOM.BANNER_TOP_HTML.set_for_testing(custom_banner),
)
try:
c = make_logged_in_client()
resp = c.get('/about', follow=True)
assert_true(custom_banner in resp.content, resp)
finally:
for old_conf in reset:
old_conf()
@attr('requires_hadoop')
def test_check_config_ajax():
c = make_logged_in_client()
response = c.get(reverse(check_config))
assert_true("misconfiguration" in response.content, response.content)
def test_cx_Oracle():
"""
Tests that cx_Oracle (external dependency) is built correctly.
"""
if 'ORACLE_HOME' not in os.environ and 'ORACLE_INSTANTCLIENT_HOME' not in os.environ:
raise SkipTest
try:
import cx_Oracle
return
except ImportError, ex:
if "No module named" in ex.message:
assert_true(False, "cx_Oracle skipped its build. This happens if "
"env var ORACLE_HOME or ORACLE_INSTANTCLIENT_HOME is not defined. "
"So ignore this test failure if your build does not need to work "
"with an oracle backend.")
class TestStrictRedirection():
def setUp(self):
self.client = make_logged_in_client()
self.user = dict(username="test", password="test")
desktop.conf.REDIRECT_WHITELIST.set_for_testing('^\/.*$,^http:\/\/example.com\/.*$')
def test_redirection_blocked(self):
# Redirection with code 301 should be handled properly
# Redirection with Status code 301 example reference: http://www.somacon.com/p145.php
self._test_redirection(redirection_url='http://www.somacon.com/color/html_css_table_border_styles.php',
expected_status_code=403)
# Redirection with code 302 should be handled properly
self._test_redirection(redirection_url='http://www.google.com',
expected_status_code=403)
def test_redirection_allowed(self):
# Redirection to the host where Hue is running should be OK.
self._test_redirection(redirection_url='/', expected_status_code=302)
self._test_redirection(redirection_url='/pig', expected_status_code=302)
self._test_redirection(redirection_url='http://testserver/', expected_status_code=302)
self._test_redirection(redirection_url='https://testserver/', expected_status_code=302, **{
'SERVER_PORT': '443',
'wsgi.url_scheme': 'https',
})
self._test_redirection(redirection_url='http://example.com/', expected_status_code=302)
def _test_redirection(self, redirection_url, expected_status_code, **kwargs):
self.client.get('/accounts/logout', **kwargs)
response = self.client.post('/accounts/login/?next=' + redirection_url, self.user, **kwargs)
assert_equal(expected_status_code, response.status_code)
if expected_status_code == 403:
error_msg = 'Redirect to ' + redirection_url + ' is not allowed.'
assert_true(error_msg in response.content, response.content)
class BaseTestPasswordConfig(object):
SCRIPT = '%s -c "print \'\\n password from script \\n\'"' % sys.executable
def get_config_password(self):
raise NotImplementedError
def get_config_password_script(self):
raise NotImplementedError
def get_password(self):
raise NotImplementedError
def test_read_password_from_script(self):
self._run_test_read_password_from_script_with(present=False)
self._run_test_read_password_from_script_with(data=None)
self._run_test_read_password_from_script_with(data='')
def _run_test_read_password_from_script_with(self, **kwargs):
resets = [
self.get_config_password().set_for_testing(**kwargs),
self.get_config_password_script().set_for_testing(self.SCRIPT),
]
try:
assert_equal(self.get_password(), ' password from script ', 'kwargs: %s' % kwargs)
finally:
for reset in resets:
reset()
def test_config_password_overrides_script_password(self):
resets = [
self.get_config_password().set_for_testing(' password from config '),
self.get_config_password_script().set_for_testing(self.SCRIPT),
]
try:
assert_equal(self.get_password(), ' password from config ')
finally:
for reset in resets:
reset()
def test_password_script_raises_exception(self):
resets = [
self.get_config_password().set_for_testing(present=False),
self.get_config_password_script().set_for_testing(
'%s -c "import sys; sys.exit(1)"' % sys.executable
),
]
try:
assert_raises(subprocess.CalledProcessError, self.get_password)
finally:
for reset in resets:
reset()
resets = [
self.get_config_password().set_for_testing(present=False),
self.get_config_password_script().set_for_testing('/does-not-exist'),
]
try:
assert_raises(subprocess.CalledProcessError, self.get_password)
finally:
for reset in resets:
reset()
class TestSecretKeyConfig(BaseTestPasswordConfig):
def get_config_password(self):
return desktop.conf.SECRET_KEY
def get_config_password_script(self):
return desktop.conf.SECRET_KEY_SCRIPT
def get_password(self):
return desktop.conf.get_secret_key()
class TestDatabasePasswordConfig(BaseTestPasswordConfig):
def get_config_password(self):
return desktop.conf.DATABASE.PASSWORD
def get_config_password_script(self):
return desktop.conf.DATABASE.PASSWORD_SCRIPT
def get_password(self):
return desktop.conf.get_database_password()
class TestLDAPPasswordConfig(BaseTestPasswordConfig):
def get_config_password(self):
return desktop.conf.LDAP_PASSWORD
def get_config_password_script(self):
return desktop.conf.LDAP_PASSWORD_SCRIPT
def get_password(self):
return desktop.conf.get_ldap_password()
class TestLDAPBindPasswordConfig(BaseTestPasswordConfig):
def setup(self):
self.finish = desktop.conf.LDAP.LDAP_SERVERS.set_for_testing({'test': {}})
def teardown(self):
self.finish()
def get_config_password(self):
return desktop.conf.LDAP.LDAP_SERVERS['test'].BIND_PASSWORD
def get_config_password_script(self):
return desktop.conf.LDAP.LDAP_SERVERS['test'].BIND_PASSWORD_SCRIPT
def get_password(self):
return desktop.conf.get_ldap_bind_password(desktop.conf.LDAP.LDAP_SERVERS['test'])
class TestSMTPPasswordConfig(BaseTestPasswordConfig):
def get_config_password(self):
return desktop.conf.SMTP.PASSWORD
def get_config_password_script(self):
return desktop.conf.SMTP.PASSWORD_SCRIPT
def get_password(self):
return desktop.conf.get_smtp_password()
class TestDocument(object):
def setUp(self):
make_logged_in_client(username="original_owner", groupname="test_doc", recreate=True, is_superuser=False)
self.user = User.objects.get(username="original_owner")
make_logged_in_client(username="copy_owner", groupname="test_doc", recreate=True, is_superuser=False)
self.copy_user = User.objects.get(username="copy_owner")
# Get count of existing Document objects
self.doc2_count = Document2.objects.count()
self.doc1_count = Document.objects.count()
self.document2 = Document2.objects.create(name='Test Document2',
type='search-dashboard',
owner=self.user,
description='Test Document2')
self.document = Document.objects.link(content_object=self.document2,
owner=self.user,
name='Test Document',
description='Test Document',
extra='test')
self.document.save()
self.document2.doc.add(self.document)
def tearDown(self):
# Get any Doc2 objects that were created and delete them, Doc1 child objects will be deleted in turn
test_docs = Document2.objects.filter(name__contains='Test Document2')
test_docs.delete()
def test_document_create(self):
assert_equal(Document2.objects.count(), self.doc2_count + 1)
assert_equal(Document.objects.count(), self.doc1_count + 1)
assert_equal(Document2.objects.get(name='Test Document2').id, self.document2.id)
assert_equal(Document.objects.get(name='Test Document').id, self.document.id)
def test_document_copy(self):
name = 'Test Document2 Copy'
doc2 = self.document2.copy(name=name, owner=self.copy_user, description=self.document2.description)
# Test that copying a Document2 object creates another object
assert_equal(Document2.objects.count(), self.doc2_count + 2)
assert_equal(Document.objects.count(), self.doc1_count + 1)
# Test that the content object is not pointing to the same object
assert_not_equal(self.document2.doc, doc2.doc)
# Test that the owner is attributed to the new user
assert_equal(doc2.owner, self.copy_user)
# Test that copying enables attribute overrides
assert_equal(Document2.objects.filter(name=name).count(), 1)
assert_equal(doc2.description, self.document2.description)
doc = self.document.copy(doc2, name=name, owner=self.copy_user, description=self.document2.description)
# Test that copying a Document object creates another Document2 and Document object
assert_equal(Document2.objects.count(), self.doc2_count + 2)
assert_equal(Document.objects.count(), self.doc1_count + 2)
# Test that the content object is not pointing to the same object
assert_not_equal(self.document.content_object, doc.content_object)
# Test that the owner is attributed to the new user
assert_equal(doc.owner, self.copy_user)
# Test that copying enables attribute overrides
assert_equal(Document.objects.filter(name=name).count(), 1)
assert_equal(doc.description, self.document.description)
def test_session_secure_cookie():
resets = [
desktop.conf.SSL_CERTIFICATE.set_for_testing('cert.pem'),
desktop.conf.SSL_PRIVATE_KEY.set_for_testing('key.pem'),
desktop.conf.SESSION.SECURE.set_for_testing(False),
]
try:
assert_true(desktop.conf.is_https_enabled())
assert_false(desktop.conf.SESSION.SECURE.get())
finally:
for reset in resets:
reset()
resets = [
desktop.conf.SSL_CERTIFICATE.set_for_testing('cert.pem'),
desktop.conf.SSL_PRIVATE_KEY.set_for_testing('key.pem'),
desktop.conf.SESSION.SECURE.set_for_testing(True),
]
try:
assert_true(desktop.conf.is_https_enabled())
assert_true(desktop.conf.SESSION.SECURE.get())
finally:
for reset in resets:
reset()
resets = [
desktop.conf.SSL_CERTIFICATE.set_for_testing('cert.pem'),
desktop.conf.SSL_PRIVATE_KEY.set_for_testing('key.pem'),
desktop.conf.SESSION.SECURE.set_for_testing(present=False),
]
try:
assert_true(desktop.conf.is_https_enabled())
assert_true(desktop.conf.SESSION.SECURE.get())
finally:
for reset in resets:
reset()
resets = [
desktop.conf.SSL_CERTIFICATE.set_for_testing(present=None),
desktop.conf.SSL_PRIVATE_KEY.set_for_testing(present=None),
desktop.conf.SESSION.SECURE.set_for_testing(present=False),
]
try:
assert_false(desktop.conf.is_https_enabled())
assert_false(desktop.conf.SESSION.SECURE.get())
finally:
for reset in resets:
reset()
def test_get_data_link():
assert_equal(None, get_data_link({}))
assert_equal('gethue.com', get_data_link({'type': 'link', 'link': 'gethue.com'}))
assert_equal('/hbase/#Cluster/document_demo/query/20150527', get_data_link({'type': 'hbase', 'table': 'document_demo', 'row_key': '20150527'}))
assert_equal('/hbase/#Cluster/document_demo/query/20150527[f1]', get_data_link({'type': 'hbase', 'table': 'document_demo', 'row_key': '20150527', 'fam': 'f1'}))
assert_equal('/hbase/#Cluster/document_demo/query/20150527[f1:c1]', get_data_link({'type': 'hbase', 'table': 'document_demo', 'row_key': '20150527', 'fam': 'f1', 'col': 'c1'}))
assert_equal('/filebrowser/view=/data/hue/1', get_data_link({'type': 'hdfs', 'path': '/data/hue/1'}))
assert_equal('/metastore/table/default/sample_07', get_data_link({'type': 'hive', 'database': 'default', 'table': 'sample_07'}))
|
SNU-Sigma/rosbridge_suite
|
refs/heads/develop
|
rosbridge_library/src/rosbridge_library/capability.py
|
12
|
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from rosbridge_library.internal.exceptions import InvalidArgumentException
from rosbridge_library.internal.exceptions import MissingArgumentException
class Capability:
""" Handles the operation-specific logic of a rosbridge message
May define one or more opcodes to handle, for example 'publish' or
'call_service'
Each connected client receives its own capability instance, which are
managed by the client's own protocol instance.
Protocol.send() is available to send messages back to the client.
"""
def __init__(self, protocol):
""" Abstract class constructor. All capabilities require a handle to
the containing protocol.
Keyword arguments:
protocol -- the protocol instance for this capability instance
"""
self.protocol = protocol
def handle_message(self, message):
""" Handle an incoming message.
Called by the protocol after having already checked the message op code
Keyword arguments:
message -- the incoming message, deserialized into a dictionary
"""
pass
def finish(self):
""" Notify this capability that the client is finished and that it's
time to free up resources. """
pass
def basic_type_check(self, msg, types_info):
""" Performs basic typechecking on fields in msg.
Keyword arguments:
msg -- a message, deserialized into a dictoinary
types_info -- a list of tuples (mandatory, fieldname, fieldtype) where
mandatory - boolean, is the field mandatory
fieldname - the name of the field in the message
fieldtypes - the expected python type of the field or list of types
Throws:
MissingArgumentException -- if a field is mandatory but not present in
the message
InvalidArgumentException -- if a field is present but not of the type
specified by fieldtype
"""
for mandatory, fieldname, fieldtypes in types_info:
if mandatory and fieldname not in msg:
raise MissingArgumentException("Expected a %s field but none was found." % fieldname)
elif fieldname in msg:
if not isinstance(fieldtypes, tuple):
fieldtypes = (fieldtypes, )
valid = False
for typ in fieldtypes:
if isinstance(msg[fieldname], typ):
valid = True
if not valid:
raise InvalidArgumentException("Expected field %s to be one of %s. Invalid value: %s" % (fieldname, fieldtypes, msg[fieldname]))
|
plotly/python-api
|
refs/heads/master
|
packages/python/plotly/plotly/validators/choropleth/colorbar/title/font/_family.py
|
1
|
import _plotly_utils.basevalidators
class FamilyValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name="family",
parent_name="choropleth.colorbar.title.font",
**kwargs
):
super(FamilyValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
no_blank=kwargs.pop("no_blank", True),
role=kwargs.pop("role", "style"),
strict=kwargs.pop("strict", True),
**kwargs
)
|
gjr80/weewx
|
refs/heads/master
|
bin/weewx/drivers/wmr100.py
|
3
|
#
# Copyright (c) 2009-2015 Tom Keffer <tkeffer@gmail.com>
#
# See the file LICENSE.txt for your full rights.
#
"""Classees and functions for interfacing with an Oregon Scientific WMR100
station. The WMRS200 reportedly works with this driver (NOT the WMR200, which
is a different beast).
The wind sensor reports wind speed, wind direction, and wind gust. It does
not report wind gust direction.
WMR89:
- data logger
- up to 3 channels
- protocol 3 sensors
- THGN800, PRCR800, WTG800
WMR86:
- no data logger
- protocol 3 sensors
- THGR800, WGR800, PCR800, UVN800
The following references were useful for figuring out the WMR protocol:
From Per Ejeklint:
https://github.com/ejeklint/WLoggerDaemon/blob/master/Station_protocol.md
From Rainer Finkeldeh:
http://www.bashewa.com/wmr200-protocol.php
The WMR driver for the wfrog weather system:
http://code.google.com/p/wfrog/source/browse/trunk/wfdriver/station/wmrs200.py
Unfortunately, there is no documentation for PyUSB v0.4, so you have to back
it out of the source code, available at:
https://pyusb.svn.sourceforge.net/svnroot/pyusb/branches/0.4/pyusb.c
"""
from __future__ import absolute_import
from __future__ import print_function
import logging
import time
import operator
from functools import reduce
import usb
import weewx.drivers
import weewx.wxformulas
import weeutil.weeutil
log = logging.getLogger(__name__)
DRIVER_NAME = 'WMR100'
DRIVER_VERSION = "3.5.0"
def loader(config_dict, engine): # @UnusedVariable
return WMR100(**config_dict[DRIVER_NAME])
def confeditor_loader():
return WMR100ConfEditor()
class WMR100(weewx.drivers.AbstractDevice):
"""Driver for the WMR100 station."""
DEFAULT_MAP = {
'pressure': 'pressure',
'windSpeed': 'wind_speed',
'windDir': 'wind_dir',
'windGust': 'wind_gust',
'windBatteryStatus': 'battery_status_wind',
'inTemp': 'temperature_0',
'outTemp': 'temperature_1',
'extraTemp1': 'temperature_2',
'extraTemp2': 'temperature_3',
'extraTemp3': 'temperature_4',
'extraTemp4': 'temperature_5',
'extraTemp5': 'temperature_6',
'extraTemp6': 'temperature_7',
'extraTemp7': 'temperature_8',
'inHumidity': 'humidity_0',
'outHumidity': 'humidity_1',
'extraHumid1': 'humidity_2',
'extraHumid2': 'humidity_3',
'extraHumid3': 'humidity_4',
'extraHumid4': 'humidity_5',
'extraHumid5': 'humidity_6',
'extraHumid6': 'humidity_7',
'extraHumid7': 'humidity_8',
'inTempBatteryStatus': 'battery_status_0',
'outTempBatteryStatus': 'battery_status_1',
'extraBatteryStatus1': 'battery_status_2',
'extraBatteryStatus2': 'battery_status_3',
'extraBatteryStatus3': 'battery_status_4',
'extraBatteryStatus4': 'battery_status_5',
'extraBatteryStatus5': 'battery_status_6',
'extraBatteryStatus6': 'battery_status_7',
'extraBatteryStatus7': 'battery_status_8',
'rain': 'rain',
'rainTotal': 'rain_total',
'rainRate': 'rain_rate',
'hourRain': 'rain_hour',
'rain24': 'rain_24',
'rainBatteryStatus': 'battery_status_rain',
'UV': 'uv',
'uvBatteryStatus': 'battery_status_uv'}
def __init__(self, **stn_dict):
"""Initialize an object of type WMR100.
NAMED ARGUMENTS:
model: Which station model is this?
[Optional. Default is 'WMR100']
timeout: How long to wait, in seconds, before giving up on a response
from the USB port.
[Optional. Default is 15 seconds]
wait_before_retry: How long to wait before retrying.
[Optional. Default is 5 seconds]
max_tries: How many times to try before giving up.
[Optional. Default is 3]
vendor_id: The USB vendor ID for the WMR
[Optional. Default is 0xfde]
product_id: The USB product ID for the WM
[Optional. Default is 0xca01]
interface: The USB interface
[Optional. Default is 0]
IN_endpoint: The IN USB endpoint used by the WMR.
[Optional. Default is usb.ENDPOINT_IN + 1]
"""
log.info('Driver version is %s' % DRIVER_VERSION)
self.model = stn_dict.get('model', 'WMR100')
# TODO: Consider putting these in the driver loader instead:
self.record_generation = stn_dict.get('record_generation', 'software')
self.timeout = float(stn_dict.get('timeout', 15.0))
self.wait_before_retry = float(stn_dict.get('wait_before_retry', 5.0))
self.max_tries = int(stn_dict.get('max_tries', 3))
self.vendor_id = int(stn_dict.get('vendor_id', '0x0fde'), 0)
self.product_id = int(stn_dict.get('product_id', '0xca01'), 0)
self.interface = int(stn_dict.get('interface', 0))
self.IN_endpoint = int(stn_dict.get('IN_endpoint', usb.ENDPOINT_IN + 1))
self.sensor_map = dict(self.DEFAULT_MAP)
if 'sensor_map' in stn_dict:
self.sensor_map.update(stn_dict['sensor_map'])
log.info('Sensor map is %s' % self.sensor_map)
self.last_rain_total = None
self.devh = None
self.openPort()
def openPort(self):
dev = self._findDevice()
if not dev:
log.error("Unable to find USB device (0x%04x, 0x%04x)"
% (self.vendor_id, self.product_id))
raise weewx.WeeWxIOError("Unable to find USB device")
self.devh = dev.open()
# Detach any old claimed interfaces
try:
self.devh.detachKernelDriver(self.interface)
except usb.USBError:
pass
try:
self.devh.claimInterface(self.interface)
except usb.USBError as e:
self.closePort()
log.error("Unable to claim USB interface: %s" % e)
raise weewx.WeeWxIOError(e)
def closePort(self):
try:
self.devh.releaseInterface()
except usb.USBError:
pass
try:
self.devh.detachKernelDriver(self.interface)
except usb.USBError:
pass
def genLoopPackets(self):
"""Generator function that continuously returns loop packets"""
# Get a stream of raw packets, then convert them, depending on the
# observation type.
for _packet in self.genPackets():
try:
_packet_type = _packet[1]
if _packet_type in WMR100._dispatch_dict:
# get the observations from the packet
_raw = WMR100._dispatch_dict[_packet_type](self, _packet)
if _raw is not None:
# map the packet labels to schema fields
_record = dict()
for k in self.sensor_map:
if self.sensor_map[k] in _raw:
_record[k] = _raw[self.sensor_map[k]]
# if there are any observations, add time and units
if _record:
for k in ['dateTime', 'usUnits']:
_record[k] = _raw[k]
yield _record
except IndexError:
log.error("Malformed packet: %s" % _packet)
def genPackets(self):
"""Generate measurement packets. These are 8 to 17 byte long packets containing
the raw measurement data.
For a pretty good summary of what's in these packets see
https://github.com/ejeklint/WLoggerDaemon/blob/master/Station_protocol.md
"""
# Wrap the byte generator function in GenWithPeek so we
# can peek at the next byte in the stream. The result, the variable
# genBytes, will be a generator function.
genBytes = weeutil.weeutil.GenWithPeek(self._genBytes_raw())
# Start by throwing away any partial packets:
for ibyte in genBytes:
if genBytes.peek() != 0xff:
break
buff = []
# March through the bytes generated by the generator function genBytes:
for ibyte in genBytes:
# If both this byte and the next one are 0xff, then we are at the end of a record
if ibyte == 0xff and genBytes.peek() == 0xff:
# We are at the end of a packet.
# Compute its checksum. This can throw an exception if the packet is empty.
try:
computed_checksum = reduce(operator.iadd, buff[:-2])
except TypeError as e:
log.debug("Exception while calculating checksum: %s" % e)
else:
actual_checksum = (buff[-1] << 8) + buff[-2]
if computed_checksum == actual_checksum:
# Looks good. Yield the packet
yield buff
else:
log.debug("Bad checksum on buffer of length %d" % len(buff))
# Throw away the next character (which will be 0xff):
next(genBytes)
# Start with a fresh buffer
buff = []
else:
buff.append(ibyte)
@property
def hardware_name(self):
return self.model
#===============================================================================
# USB functions
#===============================================================================
def _findDevice(self):
"""Find the given vendor and product IDs on the USB bus"""
for bus in usb.busses():
for dev in bus.devices:
if dev.idVendor == self.vendor_id and dev.idProduct == self.product_id:
return dev
def _genBytes_raw(self):
"""Generates a sequence of bytes from the WMR USB reports."""
try:
# Only need to be sent after a reset or power failure of the station:
self.devh.controlMsg(usb.TYPE_CLASS + usb.RECIP_INTERFACE, # requestType
0x0000009, # request
[0x20,0x00,0x08,0x01,0x00,0x00,0x00,0x00], # buffer
0x0000200, # value
0x0000000, # index
1000) # timeout
except usb.USBError as e:
log.error("Unable to send USB control message: %s" % e)
# Convert to a Weewx error:
raise weewx.WakeupError(e)
nerrors = 0
while True:
try:
# Continually loop, retrieving "USB reports". They are 8 bytes long each.
report = self.devh.interruptRead(self.IN_endpoint,
8, # bytes to read
int(self.timeout * 1000))
# While the report is 8 bytes long, only a smaller, variable portion of it
# has measurement data. This amount is given by byte zero. Return each
# byte, starting with byte one:
for i in range(1, report[0] + 1):
yield report[i]
nerrors = 0
except (IndexError, usb.USBError) as e:
log.debug("Bad USB report received: %s" % e)
nerrors += 1
if nerrors > self.max_tries:
log.error("Max retries exceeded while fetching USB reports")
raise weewx.RetriesExceeded("Max retries exceeded while fetching USB reports")
time.sleep(self.wait_before_retry)
# =========================================================================
# LOOP packet decoding functions
#==========================================================================
def _rain_packet(self, packet):
# NB: in my experiments with the WMR100, it registers in increments of
# 0.04 inches. Per Ejeklint's notes have you divide the packet values
# by 10, but this would result in an 0.4 inch bucket --- too big. So,
# I'm dividing by 100.
_record = {
'rain_rate' : ((packet[3] << 8) + packet[2]) / 100.0,
'rain_hour' : ((packet[5] << 8) + packet[4]) / 100.0,
'rain_24' : ((packet[7] << 8) + packet[6]) / 100.0,
'rain_total' : ((packet[9] << 8) + packet[8]) / 100.0,
'battery_status_rain': packet[0] >> 4,
'dateTime': int(time.time() + 0.5),
'usUnits': weewx.US}
# Because the WMR does not offer anything like bucket tips, we must
# calculate it by looking for the change in total rain. Of course, this
# won't work for the very first rain packet.
_record['rain'] = weewx.wxformulas.calculate_rain(
_record['rain_total'], self.last_rain_total)
self.last_rain_total = _record['rain_total']
return _record
def _temperature_packet(self, packet):
_record = {'dateTime': int(time.time() + 0.5),
'usUnits': weewx.METRIC}
# Per Ejeklint's notes don't mention what to do if temperature is
# negative. I think the following is correct. Also, from experience, we
# know that the WMR has problems measuring dewpoint at temperatures
# below about 20F. So ignore dewpoint and let weewx calculate it.
T = (((packet[4] & 0x7f) << 8) + packet[3]) / 10.0
if packet[4] & 0x80:
T = -T
R = float(packet[5])
channel = packet[2] & 0x0f
_record['temperature_%d' % channel] = T
_record['humidity_%d' % channel] = R
_record['battery_status_%d' % channel] = (packet[0] & 0x40) >> 6
return _record
def _temperatureonly_packet(self, packet):
# function added by fstuyk to manage temperature-only sensor THWR800
_record = {'dateTime': int(time.time() + 0.5),
'usUnits': weewx.METRIC}
# Per Ejeklint's notes don't mention what to do if temperature is
# negative. I think the following is correct.
T = (((packet[4] & 0x7f) << 8) + packet[3])/10.0
if packet[4] & 0x80:
T = -T
channel = packet[2] & 0x0f
_record['temperature_%d' % channel] = T
_record['battery_status_%d' % channel] = (packet[0] & 0x40) >> 6
return _record
def _pressure_packet(self, packet):
# Although the WMR100 emits SLP, not all consoles in the series
# (notably, the WMRS200) allow the user to set altitude. So we
# record only the station pressure (raw gauge pressure).
SP = float(((packet[3] & 0x0f) << 8) + packet[2])
_record = {'pressure': SP,
'dateTime': int(time.time() + 0.5),
'usUnits': weewx.METRIC}
return _record
def _uv_packet(self, packet):
_record = {'uv': float(packet[3]),
'battery_status_uv': packet[0] >> 4,
'dateTime': int(time.time() + 0.5),
'usUnits': weewx.METRIC}
return _record
def _wind_packet(self, packet):
"""Decode a wind packet. Wind speed will be in kph"""
_record = {
'wind_speed': ((packet[6] << 4) + ((packet[5]) >> 4)) / 10.0,
'wind_gust': (((packet[5] & 0x0f) << 8) + packet[4]) / 10.0,
'wind_dir': (packet[2] & 0x0f) * 360.0 / 16.0,
'battery_status_wind': (packet[0] >> 4),
'dateTime': int(time.time() + 0.5),
'usUnits': weewx.METRICWX}
# Sometimes the station emits a wind gust that is less than the
# average wind. If this happens, ignore it.
if _record['wind_gust'] < _record['wind_speed']:
_record['wind_gust'] = None
return _record
def _clock_packet(self, packet):
"""The clock packet is not used by weewx. However, the last time is
saved in case getTime() is called."""
tt = (2000 + packet[8], packet[7], packet[6], packet[5], packet[4], 0, 0, 0, -1)
self.last_time = time.mktime(tt)
return None
# Dictionary that maps a measurement code, to a function that can decode it
_dispatch_dict = {0x41: _rain_packet,
0x42: _temperature_packet,
0x46: _pressure_packet,
0x47: _uv_packet,
0x48: _wind_packet,
0x60: _clock_packet,
0x44: _temperatureonly_packet}
class WMR100ConfEditor(weewx.drivers.AbstractConfEditor):
@property
def default_stanza(self):
return """
[WMR100]
# This section is for the Oregon Scientific WMR100
# The driver to use
driver = weewx.drivers.wmr100
# The station model, e.g., WMR100, WMR100N, WMRS200
model = WMR100
"""
def modify_config(self, config_dict):
print("""
Setting rainRate calculation to hardware.""")
config_dict.setdefault('StdWXCalculate', {})
config_dict['StdWXCalculate'].setdefault('Calculations', {})
config_dict['StdWXCalculate']['Calculations']['rainRate'] = 'hardware'
|
ministryofjustice/PyGithub
|
refs/heads/master
|
github/tests/IssueEvent.py
|
39
|
# -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# #
# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import Framework
import datetime
class IssueEvent(Framework.TestCase):
def setUp(self):
Framework.TestCase.setUp(self)
self.event = self.g.get_user().get_repo("PyGithub").get_issues_event(16348656)
def testAttributes(self):
self.assertEqual(self.event.actor.login, "jacquev6")
self.assertEqual(self.event.commit_id, "ed866fc43833802ab553e5ff8581c81bb00dd433")
self.assertEqual(self.event.created_at, datetime.datetime(2012, 5, 27, 7, 29, 25))
self.assertEqual(self.event.event, "referenced")
self.assertEqual(self.event.id, 16348656)
self.assertEqual(self.event.issue.number, 30)
self.assertEqual(self.event.url, "https://api.github.com/repos/jacquev6/PyGithub/issues/events/16348656")
|
AlexChernov/rdo_studio
|
refs/heads/dev
|
thirdparty/scintilla/qt/ScintillaEdit/WidgetGen.py
|
6
|
#!/usr/bin/env python
# WidgetGen.py - regenerate the ScintillaWidgetCpp.cpp and ScintillaWidgetCpp.h files
# Check that API includes all gtkscintilla2 functions
import sys
import os
import getopt
scintillaDirectory = "../.."
scintillaIncludeDirectory = os.path.join(scintillaDirectory, "include")
sys.path.append(scintillaIncludeDirectory)
import Face
def Contains(s,sub):
return s.find(sub) != -1
def underscoreName(s):
# Name conversion fixes to match gtkscintilla2
irregular = ['WS', 'EOL', 'AutoC', 'KeyWords', 'BackSpace', 'UnIndents', 'RE', 'RGBA']
for word in irregular:
replacement = word[0] + word[1:].lower()
s = s.replace(word, replacement)
out = ""
for c in s:
if c.isupper():
if out:
out += "_"
out += c.lower()
else:
out += c
return out
def normalisedName(s, options, role=None):
if options["qtStyle"]:
if role == "get":
s = s.replace("Get", "")
return s[0].lower() + s[1:]
else:
return underscoreName(s)
typeAliases = {
"position": "int",
"colour": "int",
"keymod": "int",
"string": "const char *",
"stringresult": "const char *",
"cells": "const char *",
}
def cppAlias(s):
if s in typeAliases:
return typeAliases[s]
else:
return s
understoodTypes = ["", "void", "int", "bool", "position",
"colour", "keymod", "string", "stringresult", "cells"]
def checkTypes(name, v):
understandAllTypes = True
if v["ReturnType"] not in understoodTypes:
#~ print("Do not understand", v["ReturnType"], "for", name)
understandAllTypes = False
if v["Param1Type"] not in understoodTypes:
#~ print("Do not understand", v["Param1Type"], "for", name)
understandAllTypes = False
if v["Param2Type"] not in understoodTypes:
#~ print("Do not understand", v["Param2Type"], "for", name)
understandAllTypes = False
return understandAllTypes
def arguments(v, stringResult, options):
ret = ""
p1Type = cppAlias(v["Param1Type"])
if p1Type:
ret = ret + p1Type + " " + normalisedName(v["Param1Name"], options)
p2Type = cppAlias(v["Param2Type"])
if p2Type and not stringResult:
if p1Type:
ret = ret + ", "
ret = ret + p2Type + " " + normalisedName(v["Param2Name"], options)
return ret
def printPyFile(f,out, options):
for name in f.order:
v = f.features[name]
if v["Category"] != "Deprecated":
feat = v["FeatureType"]
if feat in ["val"]:
out.write(name + "=" + v["Value"] + "\n")
if feat in ["evt"]:
out.write("SCN_" + name.upper() + "=" + v["Value"] + "\n")
def printHFile(f,out, options):
for name in f.order:
v = f.features[name]
if v["Category"] != "Deprecated":
feat = v["FeatureType"]
if feat in ["fun", "get", "set"]:
if checkTypes(name, v):
constDeclarator = " const" if feat == "get" else ""
returnType = cppAlias(v["ReturnType"])
stringResult = v["Param2Type"] == "stringresult"
if stringResult:
returnType = "QByteArray"
out.write("\t" + returnType + " " + normalisedName(name, options, feat) + "(")
out.write(arguments(v, stringResult, options))
out.write(")" + constDeclarator + ";\n")
def methodNames(f, options):
for name in f.order:
v = f.features[name]
if v["Category"] != "Deprecated":
feat = v["FeatureType"]
if feat in ["fun", "get", "set"]:
if checkTypes(name, v):
yield normalisedName(name, options)
def printCPPFile(f,out, options):
for name in f.order:
v = f.features[name]
if v["Category"] != "Deprecated":
feat = v["FeatureType"]
if feat in ["fun", "get", "set"]:
if checkTypes(name, v):
constDeclarator = " const" if feat == "get" else ""
featureDefineName = "SCI_" + name.upper()
returnType = cppAlias(v["ReturnType"])
stringResult = v["Param2Type"] == "stringresult"
if stringResult:
returnType = "QByteArray"
returnStatement = ""
if returnType != "void":
returnStatement = "return "
out.write(returnType + " ScintillaEdit::" + normalisedName(name, options, feat) + "(")
out.write(arguments(v, stringResult, options))
out.write(")" + constDeclarator + " {\n")
if stringResult:
out.write(" " + returnStatement + "TextReturner(" + featureDefineName + ", ")
if "*" in cppAlias(v["Param1Type"]):
out.write("(uptr_t)")
if v["Param1Name"]:
out.write(normalisedName(v["Param1Name"], options))
else:
out.write("0")
out.write(");\n")
else:
out.write(" " + returnStatement + "send(" + featureDefineName + ", ")
if "*" in cppAlias(v["Param1Type"]):
out.write("(uptr_t)")
if v["Param1Name"]:
out.write(normalisedName(v["Param1Name"], options))
else:
out.write("0")
out.write(", ")
if "*" in cppAlias(v["Param2Type"]):
out.write("(sptr_t)")
if v["Param2Name"]:
out.write(normalisedName(v["Param2Name"], options))
else:
out.write("0")
out.write(");\n")
out.write("}\n")
out.write("\n")
def CopyWithInsertion(input, output, genfn, definition, options):
copying = 1
for line in input.readlines():
if copying:
output.write(line)
if "/* ++Autogenerated" in line or "# ++Autogenerated" in line or "<!-- ++Autogenerated" in line:
copying = 0
genfn(definition, output, options)
# ~~ form needed as XML comments can not contain --
if "/* --Autogenerated" in line or "# --Autogenerated" in line or "<!-- ~~Autogenerated" in line:
copying = 1
output.write(line)
def contents(filename):
with open(filename, "U") as f:
t = f.read()
return t
def Generate(templateFile, destinationFile, genfn, definition, options):
inText = contents(templateFile)
try:
currentText = contents(destinationFile)
except IOError:
currentText = ""
tempname = "WidgetGen.tmp"
with open(tempname, "w") as out:
with open(templateFile, "U") as hfile:
CopyWithInsertion(hfile, out, genfn, definition, options)
outText = contents(tempname)
if currentText == outText:
os.unlink(tempname)
else:
try:
os.unlink(destinationFile)
except OSError:
# Will see failure if file does not yet exist
pass
os.rename(tempname, destinationFile)
def gtkNames():
# The full path on my machine: should be altered for anyone else
p = "C:/Users/Neil/Downloads/wingide-source-4.0.1-1/wingide-source-4.0.1-1/external/gtkscintilla2/gtkscintilla.c"
with open(p) as f:
for l in f.readlines():
if "gtk_scintilla_" in l:
name = l.split()[1][14:]
if '(' in name:
name = name.split('(')[0]
yield name
def usage():
print("WidgetGen.py [-c|--clean][-h|--help][-u|--underscore-names]")
print("")
print("Generate full APIs for ScintillaEdit class and ScintillaConstants.py.")
print("")
print("options:")
print("")
print("-c --clean remove all generated code from files")
print("-h --help display this text")
print("-u --underscore-names use method_names consistent with GTK+ standards")
def readInterface(cleanGenerated):
f = Face.Face()
if not cleanGenerated:
f.ReadFromFile("../../include/Scintilla.iface")
return f
def main(argv):
# Using local path for gtkscintilla2 so don't default to checking
checkGTK = False
cleanGenerated = False
qtStyleInterface = True
# The --gtk-check option checks for full coverage of the gtkscintilla2 API but
# depends on a particular directory so is not mentioned in --help.
opts, args = getopt.getopt(argv, "hcgu", ["help", "clean", "gtk-check", "underscore-names"])
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit()
elif opt in ("-c", "--clean"):
cleanGenerated = True
elif opt in ("-g", "--gtk-check"):
checkGTK = True
elif opt in ("-u", "--underscore-names"):
qtStyleInterface = False
options = {"qtStyle": qtStyleInterface}
f = readInterface(cleanGenerated)
try:
Generate("ScintillaEdit.cpp.template", "ScintillaEdit.cpp", printCPPFile, f, options)
Generate("ScintillaEdit.h.template", "ScintillaEdit.h", printHFile, f, options)
Generate("../ScintillaEditPy/ScintillaConstants.py.template",
"../ScintillaEditPy/ScintillaConstants.py",
printPyFile, f, options)
if checkGTK:
names = set(methodNames(f))
#~ print("\n".join(names))
namesGtk = set(gtkNames())
for name in namesGtk:
if name not in names:
print(name, "not found in Qt version")
for name in names:
if name not in namesGtk:
print(name, "not found in GTK+ version")
except:
raise
if cleanGenerated:
for file in ["ScintillaEdit.cpp", "ScintillaEdit.h", "../ScintillaEditPy/ScintillaConstants.py"]:
try:
os.remove(file)
except OSError:
pass
if __name__ == "__main__":
main(sys.argv[1:])
|
feer56/Kitsune1
|
refs/heads/master
|
migrations/159-topics-migration.py
|
6
|
from django.utils.encoding import smart_str
from taggit.models import Tag
from kitsune.wiki.models import Document
from migrations.models import Topic
tags_to_migrate = {
# '<source tag>': '<destination tag>',
'sync': 'sync',
'general': 'general',
'recovery-key': 'recovery-key',
'privacy-security': 'privacy-and-security',
'marketplace': 'marketplace',
'download-and-install': 'download-and-install',
'privacy-and-security': 'privacy-and-security',
'getting-started': 'getting-started',
'customize': 'customize',
'addons': 'addons',
'settings': 'settings',
'controls': 'controls',
'flash': 'flash',
'search': 'search',
'add-ons': 'addons',
'tabs': 'tabs',
'bookmarks': 'bookmarks',
'tips': 'tips',
'ios': 'ios',
'websites': 'websites',
'persona': 'persona',
'error-messages': 'error-messages',
'diagnostics': 'diagnostics',
'cookies': 'cookies',
'accessibility': 'accessibility',
'migrate': 'migrate',
'android': 'android',
'history': 'history',
'slowness-or-hanging': 'slowness-or-hanging',
'crashing': 'crashing',
'malware': 'malware',
'slowness-and-hanging': 'slowness-or-hanging',
'hanging-and-slowness': 'slowness-or-hanging',
'profiles': 'profiles',
'versions': 'versions',
'download': 'download',
'dignostics': 'diagnostics',
'browserid': 'browserid',
'passwords': 'passwords',
'profile': 'profiles',
'security-and-privacy': 'privacy-and-security',
'diagnostic': 'diagnostics',
}
def run():
# Get all the tags to migrate.
tags = Tag.objects.filter(slug__in=tags_to_migrate.keys())
# For each tag, get the document and add a topic for it.
for tag in tags:
try:
destination_tag = Tag.objects.get(slug=tags_to_migrate[tag.slug])
except Tag.DoesNotExist:
print 'Skipped tag %s' % tag
continue
# Get or create the topic.
topic, created = Topic.objects.get_or_create(
title=destination_tag.name,
slug=destination_tag.slug,
display_order=0,
visible=True)
if created:
print 'Created new topic "%s"' % smart_str(topic.slug)
# Assign the topic to all the documents tagged with tag.
for doc in Document.objects.filter(tags__slug=tag.slug):
doc.topics.add(topic)
print 'Added topic "%s" to document "%s"' % (
smart_str(topic.slug), smart_str(doc.title))
print 'Done!'
|
maestrotf/pymepps-streaming
|
refs/heads/master
|
pymeppsstream/submodules/graphmanager.py
|
1
|
#!/bin/env python
# -*- coding: utf-8 -*-
#
# Created on 20.02.17
#
# Created for pymepps-streaming
#
# @author: Tobias Sebastian Finn, tobias.sebastian.finn@studium.uni-hamburg.de
#
# Copyright (C) {2017} {Tobias Sebastian Finn}
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# System modules
import collections
import glob
import os
import pickle
import re
# External modules
# Internal modules
from pymeppsstream.socket_module import SocketModule
class GraphManager(SocketModule):
"""
This class manages the graph which should be executed. The graph nodes
are registered to this manager. Registered nodes are serialized with
pickle and only the path is saved. Streams are handled in a special way,
such that they are always online and in a suspended mode and waked up
after a defined amount of time. This is an implementation of SocketModule.
Parameters
----------
config_path : str or None, optional
The path to the configuration file based on yaml. A default
configuration is loaded, such that this configuration file needs only to
overwrite changed parameters. If this is None, only the default
configuration is used. Default is None.
base_config_path : str or None, optional
The path to the base configuration file based on yaml. A default
configuration is loaded, such that this configuration file needs only to
overwrite changed parameters. If this is None, only the default
configuration is used. Default is None.
Attributes
----------
nodes : dict of str
The node list. The node name is the key and the serialization path
is the value. The nodes names have to be unique. If this is empty
there is no node registered.
streams : dict of str
The streams list. The node name is the key and the stream instance
is the value. The stream names have to be unique. If this is empty
there is no stream registered.
child_nodes : dict of str
A dict with the name of the parent node as key and names of the
child nodes as value. This dict is used to map the output of the
parent node to the child nodes.
"""
def __init__(self, config_path=None, base_config_path=None):
super().__init__(config_path, base_config_path)
self.nodes = {}
self.streams = {}
self.child_nodes = {}
self._socket_methods = {
self.base_config['port_child_node'] : self._request_child_nodes,
self.base_config['port_stream'] : self._request_streams
}
def serialize_node(self, node, prefix=''):
"""
Method to serialize a given node into pickle_path defined by
base_config. The serialization is done with pickle.dump.
Parameters
----------
node : Node or Stream
The node which should be serialized.
prefix : str, optional
The name prefix. Standard prefixes are n for Node and s for Stream.
If this is empty there is no prefix. Default is ''.
Returns
-------
pk_path : str
The path to the serialized object. The path is composed with
pickle_path, prefix, node class name and node name.
"""
self.logger.debug('Trying to serialize node {0:s}'.format(node.name))
pk_path = os.path.join(
self.base_config['pickle_path'],
'{0:s}{1:s}_{2:s}.pk'.format(
prefix, node.__class__.__name__, node.name)
)
self.logger.debug('Serialize the node to {0:s}'.format(pk_path))
pickle.dump(node, open(pk_path, 'wb'))
self.logger.debug(
'Serialized the node {0:s} to {1:s}'.format(node.name, pk_path))
return pk_path
def register_node(self, node):
"""
Method to register a node to this graph manager.
1) The node is serialized.
2) The node is registered to nodes dict.
3) The node is registered to child node dict.
Returns
-------
self
"""
self.logger.debug('Trying to register the node {0:s}'.format(node.name))
if node.name not in self.nodes:
self.logger.debug(
'The node {0:s} is not registered yet'.format(node.name))
self.init_instance_logger(node)
node_path = self.serialize_node(node, prefix='n')
self.logger.debug(
'The node {0:s} is serialized now add the node to the node '
'dict'.format(node.name))
self.nodes[node.name] = node_path
if isinstance(node.dependancy, collections.Iterable):
self.logger.debug(
'The node {0:s} has dependancies'.format(node.name))
for dep in node.dependancy:
self.logger.debug(
'Register node {0:s} to child node dict with '
'dependancy {1:s}'.format(node.name, dep))
self._add_child_node(dep, node)
else:
self.logger.debug(
'The node {0:s} has a single dependancy to {1:s}'.format(
node.name, node.dependancy))
self._add_child_node(node.dependancy, node)
self.logger.info(
'Registered the node {0:s} to the nodes list'.format(node.name))
return self
else:
self.logger.info(
'The node {0:s} is already in the nodes list'.format(str(node)))
return self
def _add_child_node(self, dep, node):
"""
Method to set a given node as child node of given dependency.
Parameters
----------
dep : str
The name of the parent node.
node : str
The name of the child node
"""
self.logger.debug(
'Adding the node {0:s} to the dependency of {1:s}'.format(
node, dep))
try:
self.child_nodes[str(dep)].append(str(node))
except KeyError:
self.child_nodes[str(dep)] = [str(node),]
def register_stream(self, stream):
"""
Method to register a stream. The stream is registered to the streams
list attribute and after this its serialized.
Parameters
----------
stream : Stream instance
The stream which should be registered and serialized.
Returns
-------
self
"""
self.logger.debug(
'Trying to register stream {0:s} to the stream list'.format(
stream.name))
serialized_path = self.serialize_node(stream, 's')
self.logger.debug(
'Serialized the stream {0:s}, now trying to register the '
'stream'.format(stream.name))
if stream.name not in self.streams:
self.init_instance_logger(stream)
self.logger.debug(
'The stream {0:s} is not registered yet, register the '
'stream'.format(stream.name))
self.streams[stream.name] = serialized_path
return self
def check_folder_add_new(self):
"""
Method to check the pickle path and add new registered streams and
nodes.
Returns
-------
self
"""
self.logger.debug(
'Checking if new serialized node and stream files are within '
'{0:s}'.format(self.base_config['pickle_path']))
found_files = glob.glob(
os.path.join(self.base_config['pickle_path'], '*.pk'))
for file in found_files:
self.logger.debug('The file {0:s} was found'.format(file))
file_name = os.path.split(file)[-1]
cleaned_file_name = re.sub(r'^[n|s]', '', file_name).split('.')[0]
self.logger.debug(
'The cleaned file name of {0:s} is {1:s}'.format(
file, cleaned_file_name))
file_parts = cleaned_file_name.split('_')
node_name = '{0:s}({1:s})'.format(
'_'.join(file_parts[1:]), file_parts[0])
self.logger.debug(
'The name of file {0:s} is {1:s}'.format(file, node_name))
if (node_name not in self.nodes) and \
(node_name not in self.streams):
self.logger.debug(
'The node {0:s} was not found within stream and node list, '
'trying to register the node'.format(node_name))
node = pickle.load(open(file, 'rb'))
self.logger.debug(
'Deserialized the node {0:s} now remove the node '
'file'.format(node.name))
os.remove(file)
self.logger.debug(
'Trying to register the node {0:s}'.format(node.name))
if file_name[0]=='s':
self.register_stream(node)
else:
self.register_node(node)
return self
def _request_child_nodes(self, connection):
self.logger.debug('Got a request event for child nodes')
data = connection.recv(1048576)
if data:
self.logger.info('Got request from child nodes connection')
dependency = pickle.loads(data)
self.logger.info(
'Got successor request for {0:s} from connection'.format(
dependency))
self.check_folder_add_new()
self.logger.debug(
'Checked for new nodes, now get the child nodes of dependency '
'{0:s}'.format(dependency))
child_nodes = self.child_nodes[dependency]
path_dict = {}
self.logger.debug('Got child nodes for dependency {0:s}'.format(
dependency))
for node in child_nodes:
self.logger.debug(
'Add node {0:s} to child node dict for return of child '
'node request'.format(node))
path_dict[node] = self.nodes[node]
serialized_obj = pickle.dumps({dependency: path_dict})
self.logger.debug(
'Serialized the child node dict to send results back')
connection.sendall(serialized_obj)
self.logger.info(
'Sent successors for {0:s} to connection'.format(
dependency))
else:
self.logger.debug(
'Got no data from connection, close the connection')
self.close_socket(connection)
def _request_streams(self, connection):
self.logger.debug('Got a request event for streams')
data = connection.recv(1048576)
if data:
self.logger.info('Got stream request from connection')
self.check_folder_add_new()
serialized_obj = pickle.dumps(self.streams)
self.logger.debug(
'Serialized the stream, now sending the streams via the '
'connection')
connection.sendall(serialized_obj)
self.logger.debug('Sent streams to connection')
else:
self.logger.debug('Got no request for data, close the connection')
self.close_socket(connection)
def create_clients(self):
self.logger.debug('Set an empty clients dict')
self.clients = {}
def create_servers(self):
self.logger.debug(
'Create the servers for child node and stream request')
child_node = self.create_socket_server(
self.base_config['port_child_node'])
self.logger.debug('Created server for child node request')
stream = self.create_socket_server(
self.base_config['port_stream'])
self.logger.debug('Created server for stream request')
self.servers = {'successor': child_node, 'stream': stream}
self.logger.debug('Created the servers successfully')
|
40223234/2015cd_midterm2
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/_functools.py
|
727
|
def partial(func, *args, **keywords):
def newfunc(*fargs, **fkeywords):
newkeywords = keywords.copy()
newkeywords.update(fkeywords)
return func(*(args + fargs), **newkeywords)
newfunc.func = func
newfunc.args = args
newfunc.keywords = keywords
return newfunc
def reduce(func,iterable,initializer=None):
args = iter(iterable)
if initializer is not None:
res = initializer
else:
res = next(args)
while True:
try:
res = func(res,next(args))
except StopIteration:
return res
|
lmtim/sample-code
|
refs/heads/master
|
sample-code/examples/python/android_simple.py
|
36
|
import os
from time import sleep
import unittest
from appium import webdriver
# Returns abs path relative to this file and not cwd
PATH = lambda p: os.path.abspath(
os.path.join(os.path.dirname(__file__), p)
)
class SimpleAndroidTests(unittest.TestCase):
def setUp(self):
desired_caps = {}
desired_caps['platformName'] = 'Android'
desired_caps['platformVersion'] = '4.2'
desired_caps['deviceName'] = 'Android Emulator'
desired_caps['app'] = PATH(
'../../../sample-code/apps/ApiDemos/bin/ApiDemos-debug.apk'
)
self.driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
def tearDown(self):
# end the session
self.driver.quit()
def test_find_elements(self):
el = self.driver.find_element_by_accessibility_id('Graphics')
el.click()
el = self.driver.find_element_by_accessibility_id('Arcs')
self.assertIsNotNone(el)
self.driver.back()
el = self.driver.find_element_by_accessibility_id("App")
self.assertIsNotNone(el)
els = self.driver.find_elements_by_android_uiautomator("new UiSelector().clickable(true)")
self.assertGreaterEqual(12, len(els))
self.driver.find_element_by_android_uiautomator('text("API Demos")')
def test_simple_actions(self):
el = self.driver.find_element_by_accessibility_id('Graphics')
el.click()
el = self.driver.find_element_by_accessibility_id('Arcs')
el.click()
self.driver.find_element_by_android_uiautomator('new UiSelector().text("Graphics/Arcs")')
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(SimpleAndroidTests)
unittest.TextTestRunner(verbosity=2).run(suite)
|
garciasolero/oauthlib
|
refs/heads/develop
|
oauthlib/oauth2/rfc6749/endpoints/authorization.py
|
71
|
# -*- coding: utf-8 -*-
"""
oauthlib.oauth2.rfc6749
~~~~~~~~~~~~~~~~~~~~~~~
This module is an implementation of various logic needed
for consuming and providing OAuth 2.0 RFC6749.
"""
from __future__ import absolute_import, unicode_literals
import logging
from oauthlib.common import Request
from .base import BaseEndpoint, catch_errors_and_unavailability
log = logging.getLogger(__name__)
class AuthorizationEndpoint(BaseEndpoint):
"""Authorization endpoint - used by the client to obtain authorization
from the resource owner via user-agent redirection.
The authorization endpoint is used to interact with the resource
owner and obtain an authorization grant. The authorization server
MUST first verify the identity of the resource owner. The way in
which the authorization server authenticates the resource owner (e.g.
username and password login, session cookies) is beyond the scope of
this specification.
The endpoint URI MAY include an "application/x-www-form-urlencoded"
formatted (per `Appendix B`_) query component,
which MUST be retained when adding additional query parameters. The
endpoint URI MUST NOT include a fragment component::
https://example.com/path?query=component # OK
https://example.com/path?query=component#fragment # Not OK
Since requests to the authorization endpoint result in user
authentication and the transmission of clear-text credentials (in the
HTTP response), the authorization server MUST require the use of TLS
as described in Section 1.6 when sending requests to the
authorization endpoint::
# We will deny any request which URI schema is not with https
The authorization server MUST support the use of the HTTP "GET"
method [RFC2616] for the authorization endpoint, and MAY support the
use of the "POST" method as well::
# HTTP method is currently not enforced
Parameters sent without a value MUST be treated as if they were
omitted from the request. The authorization server MUST ignore
unrecognized request parameters. Request and response parameters
MUST NOT be included more than once::
# Enforced through the design of oauthlib.common.Request
.. _`Appendix B`: http://tools.ietf.org/html/rfc6749#appendix-B
"""
def __init__(self, default_response_type, default_token_type,
response_types):
BaseEndpoint.__init__(self)
self._response_types = response_types
self._default_response_type = default_response_type
self._default_token_type = default_token_type
@property
def response_types(self):
return self._response_types
@property
def default_response_type(self):
return self._default_response_type
@property
def default_response_type_handler(self):
return self.response_types.get(self.default_response_type)
@property
def default_token_type(self):
return self._default_token_type
@catch_errors_and_unavailability
def create_authorization_response(self, uri, http_method='GET', body=None,
headers=None, scopes=None, credentials=None):
"""Extract response_type and route to the designated handler."""
request = Request(
uri, http_method=http_method, body=body, headers=headers)
request.scopes = scopes
# TODO: decide whether this should be a required argument
request.user = None # TODO: explain this in docs
for k, v in (credentials or {}).items():
setattr(request, k, v)
response_type_handler = self.response_types.get(
request.response_type, self.default_response_type_handler)
log.debug('Dispatching response_type %s request to %r.',
request.response_type, response_type_handler)
return response_type_handler.create_authorization_response(
request, self.default_token_type)
@catch_errors_and_unavailability
def validate_authorization_request(self, uri, http_method='GET', body=None,
headers=None):
"""Extract response_type and route to the designated handler."""
request = Request(
uri, http_method=http_method, body=body, headers=headers)
request.scopes = None
response_type_handler = self.response_types.get(
request.response_type, self.default_response_type_handler)
return response_type_handler.validate_authorization_request(request)
|
DavidNorman/tensorflow
|
refs/heads/master
|
tensorflow/python/tools/strip_unused_lib.py
|
23
|
# pylint: disable=g-bad-file-header
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities to remove unneeded nodes from a GraphDefs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from google.protobuf import text_format
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.python.framework import graph_util
from tensorflow.python.platform import gfile
def strip_unused(input_graph_def, input_node_names, output_node_names,
placeholder_type_enum):
"""Removes unused nodes from a GraphDef.
Args:
input_graph_def: A graph with nodes we want to prune.
input_node_names: A list of the nodes we use as inputs.
output_node_names: A list of the output nodes.
placeholder_type_enum: The AttrValue enum for the placeholder data type, or
a list that specifies one value per input node name.
Returns:
A `GraphDef` with all unnecessary ops removed.
Raises:
ValueError: If any element in `input_node_names` refers to a tensor instead
of an operation.
KeyError: If any element in `input_node_names` is not found in the graph.
"""
for name in input_node_names:
if ":" in name:
raise ValueError("Name '%s' appears to refer to a Tensor, "
"not a Operation." % name)
# Here we replace the nodes we're going to override as inputs with
# placeholders so that any unused nodes that are inputs to them are
# automatically stripped out by extract_sub_graph().
not_found = {name for name in input_node_names}
inputs_replaced_graph_def = graph_pb2.GraphDef()
for node in input_graph_def.node:
if node.name in input_node_names:
not_found.remove(node.name)
placeholder_node = node_def_pb2.NodeDef()
placeholder_node.op = "Placeholder"
placeholder_node.name = node.name
if isinstance(placeholder_type_enum, list):
input_node_index = input_node_names.index(node.name)
placeholder_node.attr["dtype"].CopyFrom(
attr_value_pb2.AttrValue(type=placeholder_type_enum[
input_node_index]))
else:
placeholder_node.attr["dtype"].CopyFrom(
attr_value_pb2.AttrValue(type=placeholder_type_enum))
if "_output_shapes" in node.attr:
placeholder_node.attr["_output_shapes"].CopyFrom(node.attr[
"_output_shapes"])
if "shape" in node.attr:
placeholder_node.attr["shape"].CopyFrom(node.attr["shape"])
inputs_replaced_graph_def.node.extend([placeholder_node])
else:
inputs_replaced_graph_def.node.extend([copy.deepcopy(node)])
if not_found:
raise KeyError("The following input nodes were not found: %s" % not_found)
output_graph_def = graph_util.extract_sub_graph(inputs_replaced_graph_def,
output_node_names)
return output_graph_def
def strip_unused_from_files(input_graph, input_binary, output_graph,
output_binary, input_node_names, output_node_names,
placeholder_type_enum):
"""Removes unused nodes from a graph file."""
if not gfile.Exists(input_graph):
print("Input graph file '" + input_graph + "' does not exist!")
return -1
if not output_node_names:
print("You need to supply the name of a node to --output_node_names.")
return -1
input_graph_def = graph_pb2.GraphDef()
mode = "rb" if input_binary else "r"
with gfile.GFile(input_graph, mode) as f:
if input_binary:
input_graph_def.ParseFromString(f.read())
else:
text_format.Merge(f.read(), input_graph_def)
output_graph_def = strip_unused(input_graph_def,
input_node_names.split(","),
output_node_names.split(","),
placeholder_type_enum)
if output_binary:
with gfile.GFile(output_graph, "wb") as f:
f.write(output_graph_def.SerializeToString())
else:
with gfile.GFile(output_graph, "w") as f:
f.write(text_format.MessageToString(output_graph_def))
print("%d ops in the final graph." % len(output_graph_def.node))
|
ArthurGarnier/SickRage
|
refs/heads/master
|
lib/rtorrent/tracker.py
|
173
|
# Copyright (c) 2013 Chris Lucas, <chris@chrisjlucas.com>
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# from rtorrent.rpc import Method
import rtorrent.rpc
from rtorrent.common import safe_repr
Method = rtorrent.rpc.Method
class Tracker:
"""Represents an individual tracker within a L{Torrent} instance."""
def __init__(self, _rt_obj, info_hash, **kwargs):
self._rt_obj = _rt_obj
self.info_hash = info_hash # : info hash for the torrent using this tracker
for k in kwargs.keys():
setattr(self, k, kwargs.get(k, None))
# for clarity's sake...
self.index = self.group # : position of tracker within the torrent's tracker list
self.rpc_id = "{0}:t{1}".format(
self.info_hash, self.index) # : unique id to pass to rTorrent
def __repr__(self):
return safe_repr("Tracker(index={0}, url=\"{1}\")",
self.index, self.url)
def enable(self):
"""Alias for set_enabled("yes")"""
self.set_enabled("yes")
def disable(self):
"""Alias for set_enabled("no")"""
self.set_enabled("no")
def update(self):
"""Refresh tracker data
@note: All fields are stored as attributes to self.
@return: None
"""
multicall = rtorrent.rpc.Multicall(self)
retriever_methods = [m for m in methods
if m.is_retriever() and m.is_available(self._rt_obj)]
for method in retriever_methods:
multicall.add(method, self.rpc_id)
multicall.call()
methods = [
# RETRIEVERS
Method(Tracker, 'is_enabled', 't.is_enabled', boolean=True),
Method(Tracker, 'get_id', 't.get_id'),
Method(Tracker, 'get_scrape_incomplete', 't.get_scrape_incomplete'),
Method(Tracker, 'is_open', 't.is_open', boolean=True),
Method(Tracker, 'get_min_interval', 't.get_min_interval'),
Method(Tracker, 'get_scrape_downloaded', 't.get_scrape_downloaded'),
Method(Tracker, 'get_group', 't.get_group'),
Method(Tracker, 'get_scrape_time_last', 't.get_scrape_time_last'),
Method(Tracker, 'get_type', 't.get_type'),
Method(Tracker, 'get_normal_interval', 't.get_normal_interval'),
Method(Tracker, 'get_url', 't.get_url'),
Method(Tracker, 'get_scrape_complete', 't.get_scrape_complete',
min_version=(0, 8, 9),
),
Method(Tracker, 'get_activity_time_last', 't.activity_time_last',
min_version=(0, 8, 9),
),
Method(Tracker, 'get_activity_time_next', 't.activity_time_next',
min_version=(0, 8, 9),
),
Method(Tracker, 'get_failed_time_last', 't.failed_time_last',
min_version=(0, 8, 9),
),
Method(Tracker, 'get_failed_time_next', 't.failed_time_next',
min_version=(0, 8, 9),
),
Method(Tracker, 'get_success_time_last', 't.success_time_last',
min_version=(0, 8, 9),
),
Method(Tracker, 'get_success_time_next', 't.success_time_next',
min_version=(0, 8, 9),
),
Method(Tracker, 'can_scrape', 't.can_scrape',
min_version=(0, 9, 1),
boolean=True
),
Method(Tracker, 'get_failed_counter', 't.failed_counter',
min_version=(0, 8, 9)
),
Method(Tracker, 'get_scrape_counter', 't.scrape_counter',
min_version=(0, 8, 9)
),
Method(Tracker, 'get_success_counter', 't.success_counter',
min_version=(0, 8, 9)
),
Method(Tracker, 'is_usable', 't.is_usable',
min_version=(0, 9, 1),
boolean=True
),
Method(Tracker, 'is_busy', 't.is_busy',
min_version=(0, 9, 1),
boolean=True
),
Method(Tracker, 'is_extra_tracker', 't.is_extra_tracker',
min_version=(0, 9, 1),
boolean=True,
),
Method(Tracker, "get_latest_sum_peers", "t.latest_sum_peers",
min_version=(0, 9, 0)
),
Method(Tracker, "get_latest_new_peers", "t.latest_new_peers",
min_version=(0, 9, 0)
),
# MODIFIERS
Method(Tracker, 'set_enabled', 't.set_enabled'),
]
|
GabrielBrascher/cloudstack
|
refs/heads/master
|
test/integration/smoke/test_portable_publicip.py
|
6
|
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from marvin.cloudstackTestCase import *
from marvin.cloudstackAPI import *
from marvin.lib.utils import *
from marvin.lib.base import *
from marvin.lib.common import *
from nose.plugins.attrib import attr
class TestPortablePublicIPRange(cloudstackTestCase):
"""
This test validates functionality where
- admin can provision a portable public ip range
- list provisioned portable public ip range
- delete provisioned portable public ip range
"""
@classmethod
def setUpClass(cls):
testClient = super(TestPortablePublicIPRange, cls).getClsTestClient()
cls.apiclient = testClient.getApiClient()
cls.services = testClient.getParsedTestDataConfig()
# Get Zone, Domain
cls.domain = get_domain(cls.apiclient)
cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests())
# Create Account
cls.account = Account.create(
cls.apiclient,
cls.services["account"],
domainid=cls.domain.id
)
cls._cleanup = [
cls.account,
]
return
@classmethod
def tearDownClass(cls):
try:
# Cleanup resources used
cleanup_resources(cls.apiclient, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
# Clean up
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags = ["basic", "advanced", "portablepublicip"], required_hardware="false")
def test_createPortablePublicIPRange(self):
""" Test to create a portable public ip range
"""
self.debug("attempting to create a portable Public IP range")
self.portable_ip_range = PortablePublicIpRange.create(
self.apiclient,
self.services
)
self.debug("attempting to verify portable Public IP range is created")
list_portbale_ip_range_response = PortablePublicIpRange.list(
self.apiclient,
id=self.portable_ip_range.id
)
self.portable_ip_range.delete(self.apiclient)
return
class TestPortablePublicIPAcquire(cloudstackTestCase):
"""
This test validates functionality where
- admin has provisioned a portable public ip range
- user can acquire portable ip from the provisioned ip range
"""
@classmethod
def setUpClass(cls):
testClient = super(TestPortablePublicIPAcquire, cls).getClsTestClient()
cls.apiclient = testClient.getApiClient()
cls.services = testClient.getParsedTestDataConfig()
# Get Zone, Domain
cls.domain = get_domain(cls.apiclient)
cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests())
# Create Account
cls.account = Account.create(
cls.apiclient,
cls.services["account"],
domainid=cls.domain.id
)
cls.services["network"]["zoneid"] = cls.zone.id
cls.network_offering = NetworkOffering.create(
cls.apiclient,
cls.services["network_offering"],
)
# Enable Network offering
cls.network_offering.update(cls.apiclient, state='Enabled')
cls.services["network"]["networkoffering"] = cls.network_offering.id
cls.account_network = Network.create(
cls.apiclient,
cls.services["network"],
cls.account.name,
cls.account.domainid
)
cls._cleanup = [
cls.account_network,
cls.network_offering,
cls.account
]
return
@classmethod
def tearDownClass(cls):
try:
# Cleanup resources used
cleanup_resources(cls.apiclient, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
# Clean up
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags = ["advanced", "portablepublicip"], required_hardware="false")
def test_createPortablePublicIPAcquire(self):
""" Test to acquire a provisioned public ip range
"""
self.debug("attempting to create a portable Public IP range")
self.portable_ip_range = PortablePublicIpRange.create(
self.apiclient,
self.services
)
ip_address = PublicIPAddress.create(self.apiclient, self.account.name,
self.zone.id, self.account.domainid, isportable=True)
ip_address.delete(self.apiclient)
self.portable_ip_range.delete(self.apiclient)
return
|
tdtrask/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/openstack/os_user_role.py
|
25
|
#!/usr/bin/python
# Copyright (c) 2016 IBM
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_user_role
short_description: Associate OpenStack Identity users and roles
extends_documentation_fragment: openstack
author: "Monty Taylor (@emonty), David Shrewsbury (@Shrews)"
version_added: "2.1"
description:
- Grant and revoke roles in either project or domain context for
OpenStack Identity Users.
options:
role:
description:
- Name or ID for the role.
required: true
user:
description:
- Name or ID for the user. If I(user) is not specified, then
I(group) is required. Both may not be specified.
required: false
default: null
group:
description:
- Name or ID for the group. Valid only with keystone version 3.
If I(group) is not specified, then I(user) is required. Both
may not be specified.
required: false
default: null
project:
description:
- Name or ID of the project to scope the role association to.
If you are using keystone version 2, then this value is required.
required: false
default: null
domain:
description:
- ID of the domain to scope the role association to. Valid only with
keystone version 3, and required if I(project) is not specified.
required: false
default: null
state:
description:
- Should the roles be present or absent on the user.
choices: [present, absent]
default: present
availability_zone:
description:
- Ignored. Present for backwards compatibility
required: false
requirements:
- "python >= 2.6"
- "shade"
'''
EXAMPLES = '''
# Grant an admin role on the user admin in the project project1
- os_user_role:
cloud: mycloud
user: admin
role: admin
project: project1
# Revoke the admin role from the user barney in the newyork domain
- os_user_role:
cloud: mycloud
state: absent
user: barney
role: admin
domain: newyork
'''
RETURN = '''
#
'''
from distutils.version import StrictVersion
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.openstack import openstack_full_argument_spec, openstack_module_kwargs
def _system_state_change(state, assignment):
if state == 'present' and not assignment:
return True
elif state == 'absent' and assignment:
return True
return False
def _build_kwargs(user, group, project, domain):
kwargs = {}
if user:
kwargs['user'] = user
if group:
kwargs['group'] = group
if project:
kwargs['project'] = project
if domain:
kwargs['domain'] = domain
return kwargs
def main():
argument_spec = openstack_full_argument_spec(
role=dict(required=True),
user=dict(required=False),
group=dict(required=False),
project=dict(required=False),
domain=dict(required=False),
state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs(
required_one_of=[
['user', 'group']
])
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
# role grant/revoke API introduced in 1.5.0
if not HAS_SHADE or (StrictVersion(shade.__version__) < StrictVersion('1.5.0')):
module.fail_json(msg='shade 1.5.0 or higher is required for this module')
role = module.params.pop('role')
user = module.params.pop('user')
group = module.params.pop('group')
project = module.params.pop('project')
domain = module.params.pop('domain')
state = module.params.pop('state')
try:
cloud = shade.operator_cloud(**module.params)
filters = {}
r = cloud.get_role(role)
if r is None:
module.fail_json(msg="Role %s is not valid" % role)
filters['role'] = r['id']
if user:
u = cloud.get_user(user)
if u is None:
module.fail_json(msg="User %s is not valid" % user)
filters['user'] = u['id']
if group:
g = cloud.get_group(group)
if g is None:
module.fail_json(msg="Group %s is not valid" % group)
filters['group'] = g['id']
if domain:
d = cloud.get_domain(domain)
if d is None:
module.fail_json(msg="Domain %s is not valid" % domain)
filters['domain'] = d['id']
if project:
if domain:
p = cloud.get_project(project, domain_id=filters['domain'])
else:
p = cloud.get_project(project)
if p is None:
module.fail_json(msg="Project %s is not valid" % project)
filters['project'] = p['id']
assignment = cloud.list_role_assignments(filters=filters)
if module.check_mode:
module.exit_json(changed=_system_state_change(state, assignment))
changed = False
if state == 'present':
if not assignment:
kwargs = _build_kwargs(user, group, project, domain)
cloud.grant_role(role, **kwargs)
changed = True
elif state == 'absent':
if assignment:
kwargs = _build_kwargs(user, group, project, domain)
cloud.revoke_role(role, **kwargs)
changed = True
module.exit_json(changed=changed)
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
|
defance/edx-platform
|
refs/heads/master
|
common/djangoapps/heartbeat/views.py
|
199
|
from xmodule.modulestore.django import modulestore
from dogapi import dog_stats_api
from util.json_request import JsonResponse
from django.db import connection
from django.db.utils import DatabaseError
from xmodule.exceptions import HeartbeatFailure
@dog_stats_api.timed('edxapp.heartbeat')
def heartbeat(request):
"""
Simple view that a loadbalancer can check to verify that the app is up. Returns a json doc
of service id: status or message. If the status for any service is anything other than True,
it returns HTTP code 503 (Service Unavailable); otherwise, it returns 200.
"""
# This refactoring merely delegates to the default modulestore (which if it's mixed modulestore will
# delegate to all configured modulestores) and a quick test of sql. A later refactoring may allow
# any service to register itself as participating in the heartbeat. It's important that all implementation
# do as little as possible but give a sound determination that they are ready.
try:
output = modulestore().heartbeat()
except HeartbeatFailure as fail:
return JsonResponse({fail.service: unicode(fail)}, status=503)
cursor = connection.cursor()
try:
cursor.execute("SELECT CURRENT_DATE")
cursor.fetchone()
output['SQL'] = True
except DatabaseError as fail:
return JsonResponse({'SQL': unicode(fail)}, status=503)
return JsonResponse(output)
|
bregman-arie/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/panos/panos_interface.py
|
18
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Ansible module to manage PaloAltoNetworks Firewall
# (c) 2016, techbizdev <techbizdev@paloaltonetworks.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: panos_interface
short_description: configure data-port network interface for DHCP
description:
- Configure data-port (DP) network interface for DHCP. By default DP interfaces are static.
author: "Luigi Mori (@jtschichold), Ivan Bojer (@ivanbojer)"
version_added: "2.3"
requirements:
- pan-python can be obtained from PyPi U(https://pypi.python.org/pypi/pan-python)
notes:
- Checkmode is not supported.
options:
ip_address:
description:
- IP address (or hostname) of PAN-OS device being configured.
required: true
username:
description:
- Username credentials to use for auth.
default: "admin"
password:
description:
- Password credentials to use for auth.
required: true
if_name:
description:
- Name of the interface to configure.
required: true
zone_name:
description: >
Name of the zone for the interface. If the zone does not exist it is created but if the zone exists and
it is not of the layer3 type the operation will fail.
required: true
create_default_route:
description:
- Whether or not to add default route with router learned via DHCP.
default: "false"
commit:
description:
- Commit if changed
default: true
'''
EXAMPLES = '''
- name: enable DHCP client on ethernet1/1 in zone public
interface:
password: "admin"
ip_address: "192.168.1.1"
if_name: "ethernet1/1"
zone_name: "public"
create_default_route: "yes"
'''
RETURN = '''
# Default return values
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
try:
import pan.xapi
from pan.xapi import PanXapiError
HAS_LIB = True
except ImportError:
HAS_LIB = False
_IF_XPATH = "/config/devices/entry[@name='localhost.localdomain']" +\
"/network/interface/ethernet/entry[@name='%s']"
_ZONE_XPATH = "/config/devices/entry[@name='localhost.localdomain']" +\
"/vsys/entry/zone/entry"
_ZONE_XPATH_QUERY = _ZONE_XPATH + "[network/layer3/member/text()='%s']"
_ZONE_XPATH_IF = _ZONE_XPATH + "[@name='%s']/network/layer3/member[text()='%s']"
_VR_XPATH = "/config/devices/entry[@name='localhost.localdomain']" +\
"/network/virtual-router/entry"
def add_dhcp_if(xapi, if_name, zone_name, create_default_route):
if_xml = [
'<entry name="%s">',
'<layer3>',
'<dhcp-client>',
'<create-default-route>%s</create-default-route>',
'</dhcp-client>'
'</layer3>'
'</entry>'
]
cdr = 'yes'
if not create_default_route:
cdr = 'no'
if_xml = (''.join(if_xml)) % (if_name, cdr)
xapi.edit(xpath=_IF_XPATH % if_name, element=if_xml)
xapi.set(xpath=_ZONE_XPATH + "[@name='%s']/network/layer3" % zone_name,
element='<member>%s</member>' % if_name)
xapi.set(xpath=_VR_XPATH + "[@name='default']/interface",
element='<member>%s</member>' % if_name)
return True
def if_exists(xapi, if_name):
xpath = _IF_XPATH % if_name
xapi.get(xpath=xpath)
network = xapi.element_root.find('.//layer3')
return (network is not None)
def main():
argument_spec = dict(
ip_address=dict(required=True),
password=dict(required=True, no_log=True),
username=dict(default='admin'),
if_name=dict(required=True),
zone_name=dict(required=True),
create_default_route=dict(type='bool', default=False),
commit=dict(type='bool', default=True)
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
if not HAS_LIB:
module.fail_json(msg='pan-python is required for this module')
ip_address = module.params["ip_address"]
password = module.params["password"]
username = module.params['username']
xapi = pan.xapi.PanXapi(
hostname=ip_address,
api_username=username,
api_password=password
)
if_name = module.params['if_name']
zone_name = module.params['zone_name']
create_default_route = module.params['create_default_route']
commit = module.params['commit']
ifexists = if_exists(xapi, if_name)
if ifexists:
module.exit_json(changed=False, msg="interface exists, not changed")
try:
changed = add_dhcp_if(xapi, if_name, zone_name, create_default_route)
except PanXapiError as exc:
module.fail_json(msg=to_native(exc))
if changed and commit:
xapi.commit(cmd="<commit></commit>", sync=True, interval=1)
module.exit_json(changed=changed, msg="okey dokey")
if __name__ == '__main__':
main()
|
zerkrx/zerkbox
|
refs/heads/develop
|
lib/youtube_dl/extractor/dreisat.py
|
40
|
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
int_or_none,
unified_strdate,
xpath_text,
determine_ext,
qualities,
float_or_none,
ExtractorError,
)
class DreiSatIE(InfoExtractor):
IE_NAME = '3sat'
_VALID_URL = r'(?:https?://)?(?:www\.)?3sat\.de/mediathek/(?:index\.php|mediathek\.php)?\?(?:(?:mode|display)=[^&]+&)*obj=(?P<id>[0-9]+)$'
_TESTS = [
{
'url': 'http://www.3sat.de/mediathek/index.php?mode=play&obj=45918',
'md5': 'be37228896d30a88f315b638900a026e',
'info_dict': {
'id': '45918',
'ext': 'mp4',
'title': 'Waidmannsheil',
'description': 'md5:cce00ca1d70e21425e72c86a98a56817',
'uploader': 'SCHWEIZWEIT',
'uploader_id': '100000210',
'upload_date': '20140913'
},
'params': {
'skip_download': True, # m3u8 downloads
}
},
{
'url': 'http://www.3sat.de/mediathek/mediathek.php?mode=play&obj=51066',
'only_matching': True,
},
]
def _parse_smil_formats(self, smil, smil_url, video_id, namespace=None, f4m_params=None, transform_rtmp_url=None):
param_groups = {}
for param_group in smil.findall(self._xpath_ns('./head/paramGroup', namespace)):
group_id = param_group.attrib.get(self._xpath_ns('id', 'http://www.w3.org/XML/1998/namespace'))
params = {}
for param in param_group:
params[param.get('name')] = param.get('value')
param_groups[group_id] = params
formats = []
for video in smil.findall(self._xpath_ns('.//video', namespace)):
src = video.get('src')
if not src:
continue
bitrate = float_or_none(video.get('system-bitrate') or video.get('systemBitrate'), 1000)
group_id = video.get('paramGroup')
param_group = param_groups[group_id]
for proto in param_group['protocols'].split(','):
formats.append({
'url': '%s://%s' % (proto, param_group['host']),
'app': param_group['app'],
'play_path': src,
'ext': 'flv',
'format_id': '%s-%d' % (proto, bitrate),
'tbr': bitrate,
})
self._sort_formats(formats)
return formats
def extract_from_xml_url(self, video_id, xml_url):
doc = self._download_xml(
xml_url, video_id,
note='Downloading video info',
errnote='Failed to download video info')
status_code = doc.find('./status/statuscode')
if status_code is not None and status_code.text != 'ok':
code = status_code.text
if code == 'notVisibleAnymore':
message = 'Video %s is not available' % video_id
else:
message = '%s returned error: %s' % (self.IE_NAME, code)
raise ExtractorError(message, expected=True)
title = doc.find('.//information/title').text
description = xpath_text(doc, './/information/detail', 'description')
duration = int_or_none(xpath_text(doc, './/details/lengthSec', 'duration'))
uploader = xpath_text(doc, './/details/originChannelTitle', 'uploader')
uploader_id = xpath_text(doc, './/details/originChannelId', 'uploader id')
upload_date = unified_strdate(xpath_text(doc, './/details/airtime', 'upload date'))
def xml_to_thumbnails(fnode):
thumbnails = []
for node in fnode:
thumbnail_url = node.text
if not thumbnail_url:
continue
thumbnail = {
'url': thumbnail_url,
}
if 'key' in node.attrib:
m = re.match('^([0-9]+)x([0-9]+)$', node.attrib['key'])
if m:
thumbnail['width'] = int(m.group(1))
thumbnail['height'] = int(m.group(2))
thumbnails.append(thumbnail)
return thumbnails
thumbnails = xml_to_thumbnails(doc.findall('.//teaserimages/teaserimage'))
format_nodes = doc.findall('.//formitaeten/formitaet')
quality = qualities(['veryhigh', 'high', 'med', 'low'])
def get_quality(elem):
return quality(xpath_text(elem, 'quality'))
format_nodes.sort(key=get_quality)
format_ids = []
formats = []
for fnode in format_nodes:
video_url = fnode.find('url').text
is_available = 'http://www.metafilegenerator' not in video_url
if not is_available:
continue
format_id = fnode.attrib['basetype']
quality = xpath_text(fnode, './quality', 'quality')
format_m = re.match(r'''(?x)
(?P<vcodec>[^_]+)_(?P<acodec>[^_]+)_(?P<container>[^_]+)_
(?P<proto>[^_]+)_(?P<index>[^_]+)_(?P<indexproto>[^_]+)
''', format_id)
ext = determine_ext(video_url, None) or format_m.group('container')
if ext not in ('smil', 'f4m', 'm3u8'):
format_id = format_id + '-' + quality
if format_id in format_ids:
continue
if ext == 'meta':
continue
elif ext == 'smil':
formats.extend(self._extract_smil_formats(
video_url, video_id, fatal=False))
elif ext == 'm3u8':
# the certificates are misconfigured (see
# https://github.com/rg3/youtube-dl/issues/8665)
if video_url.startswith('https://'):
continue
formats.extend(self._extract_m3u8_formats(
video_url, video_id, 'mp4', m3u8_id=format_id, fatal=False))
elif ext == 'f4m':
formats.extend(self._extract_f4m_formats(
video_url, video_id, f4m_id=format_id, fatal=False))
else:
proto = format_m.group('proto').lower()
abr = int_or_none(xpath_text(fnode, './audioBitrate', 'abr'), 1000)
vbr = int_or_none(xpath_text(fnode, './videoBitrate', 'vbr'), 1000)
width = int_or_none(xpath_text(fnode, './width', 'width'))
height = int_or_none(xpath_text(fnode, './height', 'height'))
filesize = int_or_none(xpath_text(fnode, './filesize', 'filesize'))
format_note = ''
if not format_note:
format_note = None
formats.append({
'format_id': format_id,
'url': video_url,
'ext': ext,
'acodec': format_m.group('acodec'),
'vcodec': format_m.group('vcodec'),
'abr': abr,
'vbr': vbr,
'width': width,
'height': height,
'filesize': filesize,
'format_note': format_note,
'protocol': proto,
'_available': is_available,
})
format_ids.append(format_id)
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'description': description,
'duration': duration,
'thumbnails': thumbnails,
'uploader': uploader,
'uploader_id': uploader_id,
'upload_date': upload_date,
'formats': formats,
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
details_url = 'http://www.3sat.de/mediathek/xmlservice/web/beitragsDetails?ak=web&id=%s' % video_id
return self.extract_from_xml_url(video_id, details_url)
|
Azure/azure-sdk-for-python
|
refs/heads/sync-eng/common-js-nightly-docs-2-1768-ForTestPipeline
|
sdk/iothub/azure-mgmt-iothub/azure/mgmt/iothub/v2020_03_01/operations/_iot_hub_resource_operations.py
|
1
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class IotHubResourceOperations(object):
"""IotHubResourceOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.iothub.v2020_03_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
resource_group_name, # type: str
resource_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.IotHubDescription"
"""Get the non-security related metadata of an IoT hub.
Get the non-security related metadata of an IoT hub.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: IotHubDescription, or the result of cls(response)
:rtype: ~azure.mgmt.iothub.v2020_03_01.models.IotHubDescription
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.IotHubDescription"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('IotHubDescription', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
resource_name, # type: str
iot_hub_description, # type: "_models.IotHubDescription"
if_match=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.IotHubDescription"
cls = kwargs.pop('cls', None) # type: ClsType["_models.IotHubDescription"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(iot_hub_description, 'IotHubDescription')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('IotHubDescription', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('IotHubDescription', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
resource_name, # type: str
iot_hub_description, # type: "_models.IotHubDescription"
if_match=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.IotHubDescription"]
"""Create or update the metadata of an IoT hub.
Create or update the metadata of an Iot hub. The usual pattern to modify a property is to
retrieve the IoT hub metadata and security metadata, and then combine them with the modified
values in a new body to update the IoT hub. If certain properties are missing in the JSON,
updating IoT Hub may cause these values to fallback to default, which may lead to unexpected
behavior.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param iot_hub_description: The IoT hub metadata and security metadata.
:type iot_hub_description: ~azure.mgmt.iothub.v2020_03_01.models.IotHubDescription
:param if_match: ETag of the IoT Hub. Do not specify for creating a brand new IoT Hub. Required
to update an existing IoT Hub.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either IotHubDescription or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.iothub.v2020_03_01.models.IotHubDescription]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.IotHubDescription"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
iot_hub_description=iot_hub_description,
if_match=if_match,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('IotHubDescription', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}'} # type: ignore
def _update_initial(
self,
resource_group_name, # type: str
resource_name, # type: str
iot_hub_tags, # type: "_models.TagsResource"
**kwargs # type: Any
):
# type: (...) -> "_models.IotHubDescription"
cls = kwargs.pop('cls', None) # type: ClsType["_models.IotHubDescription"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(iot_hub_tags, 'TagsResource')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('IotHubDescription', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}'} # type: ignore
def begin_update(
self,
resource_group_name, # type: str
resource_name, # type: str
iot_hub_tags, # type: "_models.TagsResource"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.IotHubDescription"]
"""Update an existing IoT Hubs tags.
Update an existing IoT Hub tags. to update other fields use the CreateOrUpdate method.
:param resource_group_name: Resource group identifier.
:type resource_group_name: str
:param resource_name: Name of iot hub to update.
:type resource_name: str
:param iot_hub_tags: Updated tag information to set into the iot hub instance.
:type iot_hub_tags: ~azure.mgmt.iothub.v2020_03_01.models.TagsResource
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either IotHubDescription or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.iothub.v2020_03_01.models.IotHubDescription]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.IotHubDescription"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
iot_hub_tags=iot_hub_tags,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('IotHubDescription', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
resource_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Optional[Union["_models.IotHubDescription", "_models.ErrorDetails"]]
cls = kwargs.pop('cls', None) # type: ClsType[Optional[Union["_models.IotHubDescription", "_models.ErrorDetails"]]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('IotHubDescription', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('IotHubDescription', pipeline_response)
if response.status_code == 404:
deserialized = self._deserialize('ErrorDetails', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
resource_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[Union["_models.IotHubDescription", "_models.ErrorDetails"]]
"""Delete an IoT hub.
Delete an IoT hub.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either IotHubDescription or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.iothub.v2020_03_01.models.IotHubDescription]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[Union["_models.IotHubDescription", "_models.ErrorDetails"]]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('IotHubDescription', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}'} # type: ignore
def list_by_subscription(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.IotHubDescriptionListResult"]
"""Get all the IoT hubs in a subscription.
Get all the IoT hubs in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either IotHubDescriptionListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.iothub.v2020_03_01.models.IotHubDescriptionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.IotHubDescriptionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_subscription.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('IotHubDescriptionListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_subscription.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Devices/IotHubs'} # type: ignore
def list_by_resource_group(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.IotHubDescriptionListResult"]
"""Get all the IoT hubs in a resource group.
Get all the IoT hubs in a resource group.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either IotHubDescriptionListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.iothub.v2020_03_01.models.IotHubDescriptionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.IotHubDescriptionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('IotHubDescriptionListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs'} # type: ignore
def get_stats(
self,
resource_group_name, # type: str
resource_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.RegistryStatistics"
"""Get the statistics from an IoT hub.
Get the statistics from an IoT hub.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RegistryStatistics, or the result of cls(response)
:rtype: ~azure.mgmt.iothub.v2020_03_01.models.RegistryStatistics
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RegistryStatistics"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self.get_stats.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('RegistryStatistics', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_stats.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/IotHubStats'} # type: ignore
def get_valid_skus(
self,
resource_group_name, # type: str
resource_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.IotHubSkuDescriptionListResult"]
"""Get the list of valid SKUs for an IoT hub.
Get the list of valid SKUs for an IoT hub.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either IotHubSkuDescriptionListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.iothub.v2020_03_01.models.IotHubSkuDescriptionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.IotHubSkuDescriptionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.get_valid_skus.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('IotHubSkuDescriptionListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
get_valid_skus.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/skus'} # type: ignore
def list_event_hub_consumer_groups(
self,
resource_group_name, # type: str
resource_name, # type: str
event_hub_endpoint_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.EventHubConsumerGroupsListResult"]
"""Get a list of the consumer groups in the Event Hub-compatible device-to-cloud endpoint in an IoT hub.
Get a list of the consumer groups in the Event Hub-compatible device-to-cloud endpoint in an
IoT hub.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param event_hub_endpoint_name: The name of the Event Hub-compatible endpoint.
:type event_hub_endpoint_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either EventHubConsumerGroupsListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.iothub.v2020_03_01.models.EventHubConsumerGroupsListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.EventHubConsumerGroupsListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_event_hub_consumer_groups.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'eventHubEndpointName': self._serialize.url("event_hub_endpoint_name", event_hub_endpoint_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('EventHubConsumerGroupsListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_event_hub_consumer_groups.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/eventHubEndpoints/{eventHubEndpointName}/ConsumerGroups'} # type: ignore
def get_event_hub_consumer_group(
self,
resource_group_name, # type: str
resource_name, # type: str
event_hub_endpoint_name, # type: str
name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.EventHubConsumerGroupInfo"
"""Get a consumer group from the Event Hub-compatible device-to-cloud endpoint for an IoT hub.
Get a consumer group from the Event Hub-compatible device-to-cloud endpoint for an IoT hub.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param event_hub_endpoint_name: The name of the Event Hub-compatible endpoint in the IoT hub.
:type event_hub_endpoint_name: str
:param name: The name of the consumer group to retrieve.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: EventHubConsumerGroupInfo, or the result of cls(response)
:rtype: ~azure.mgmt.iothub.v2020_03_01.models.EventHubConsumerGroupInfo
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.EventHubConsumerGroupInfo"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self.get_event_hub_consumer_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'eventHubEndpointName': self._serialize.url("event_hub_endpoint_name", event_hub_endpoint_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('EventHubConsumerGroupInfo', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_event_hub_consumer_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/eventHubEndpoints/{eventHubEndpointName}/ConsumerGroups/{name}'} # type: ignore
def create_event_hub_consumer_group(
self,
resource_group_name, # type: str
resource_name, # type: str
event_hub_endpoint_name, # type: str
name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.EventHubConsumerGroupInfo"
"""Add a consumer group to an Event Hub-compatible endpoint in an IoT hub.
Add a consumer group to an Event Hub-compatible endpoint in an IoT hub.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param event_hub_endpoint_name: The name of the Event Hub-compatible endpoint in the IoT hub.
:type event_hub_endpoint_name: str
:param name: The name of the consumer group to add.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: EventHubConsumerGroupInfo, or the result of cls(response)
:rtype: ~azure.mgmt.iothub.v2020_03_01.models.EventHubConsumerGroupInfo
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.EventHubConsumerGroupInfo"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self.create_event_hub_consumer_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'eventHubEndpointName': self._serialize.url("event_hub_endpoint_name", event_hub_endpoint_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.put(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('EventHubConsumerGroupInfo', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_event_hub_consumer_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/eventHubEndpoints/{eventHubEndpointName}/ConsumerGroups/{name}'} # type: ignore
def delete_event_hub_consumer_group(
self,
resource_group_name, # type: str
resource_name, # type: str
event_hub_endpoint_name, # type: str
name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
"""Delete a consumer group from an Event Hub-compatible endpoint in an IoT hub.
Delete a consumer group from an Event Hub-compatible endpoint in an IoT hub.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param event_hub_endpoint_name: The name of the Event Hub-compatible endpoint in the IoT hub.
:type event_hub_endpoint_name: str
:param name: The name of the consumer group to delete.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self.delete_event_hub_consumer_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'eventHubEndpointName': self._serialize.url("event_hub_endpoint_name", event_hub_endpoint_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_event_hub_consumer_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/eventHubEndpoints/{eventHubEndpointName}/ConsumerGroups/{name}'} # type: ignore
def list_jobs(
self,
resource_group_name, # type: str
resource_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.JobResponseListResult"]
"""Get a list of all the jobs in an IoT hub. For more information, see: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-identity-registry.
Get a list of all the jobs in an IoT hub. For more information, see:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-identity-registry.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either JobResponseListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.iothub.v2020_03_01.models.JobResponseListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.JobResponseListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_jobs.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('JobResponseListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_jobs.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/jobs'} # type: ignore
def get_job(
self,
resource_group_name, # type: str
resource_name, # type: str
job_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.JobResponse"
"""Get the details of a job from an IoT hub. For more information, see: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-identity-registry.
Get the details of a job from an IoT hub. For more information, see:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-identity-registry.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param job_id: The job identifier.
:type job_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: JobResponse, or the result of cls(response)
:rtype: ~azure.mgmt.iothub.v2020_03_01.models.JobResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.JobResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self.get_job.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'jobId': self._serialize.url("job_id", job_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('JobResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_job.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/jobs/{jobId}'} # type: ignore
def get_quota_metrics(
self,
resource_group_name, # type: str
resource_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.IotHubQuotaMetricInfoListResult"]
"""Get the quota metrics for an IoT hub.
Get the quota metrics for an IoT hub.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either IotHubQuotaMetricInfoListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.iothub.v2020_03_01.models.IotHubQuotaMetricInfoListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.IotHubQuotaMetricInfoListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.get_quota_metrics.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('IotHubQuotaMetricInfoListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
get_quota_metrics.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/quotaMetrics'} # type: ignore
def get_endpoint_health(
self,
resource_group_name, # type: str
iot_hub_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.EndpointHealthDataListResult"]
"""Get the health for routing endpoints.
Get the health for routing endpoints.
:param resource_group_name:
:type resource_group_name: str
:param iot_hub_name:
:type iot_hub_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either EndpointHealthDataListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.iothub.v2020_03_01.models.EndpointHealthDataListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.EndpointHealthDataListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.get_endpoint_health.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'iotHubName': self._serialize.url("iot_hub_name", iot_hub_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('EndpointHealthDataListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
get_endpoint_health.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{iotHubName}/routingEndpointsHealth'} # type: ignore
def check_name_availability(
self,
operation_inputs, # type: "_models.OperationInputs"
**kwargs # type: Any
):
# type: (...) -> "_models.IotHubNameAvailabilityInfo"
"""Check if an IoT hub name is available.
Check if an IoT hub name is available.
:param operation_inputs: Set the name parameter in the OperationInputs structure to the name of
the IoT hub to check.
:type operation_inputs: ~azure.mgmt.iothub.v2020_03_01.models.OperationInputs
:keyword callable cls: A custom type or function that will be passed the direct response
:return: IotHubNameAvailabilityInfo, or the result of cls(response)
:rtype: ~azure.mgmt.iothub.v2020_03_01.models.IotHubNameAvailabilityInfo
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.IotHubNameAvailabilityInfo"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.check_name_availability.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(operation_inputs, 'OperationInputs')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('IotHubNameAvailabilityInfo', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
check_name_availability.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Devices/checkNameAvailability'} # type: ignore
def test_all_routes(
self,
iot_hub_name, # type: str
resource_group_name, # type: str
input, # type: "_models.TestAllRoutesInput"
**kwargs # type: Any
):
# type: (...) -> "_models.TestAllRoutesResult"
"""Test all routes.
Test all routes configured in this Iot Hub.
:param iot_hub_name: IotHub to be tested.
:type iot_hub_name: str
:param resource_group_name: resource group which Iot Hub belongs to.
:type resource_group_name: str
:param input: Input for testing all routes.
:type input: ~azure.mgmt.iothub.v2020_03_01.models.TestAllRoutesInput
:keyword callable cls: A custom type or function that will be passed the direct response
:return: TestAllRoutesResult, or the result of cls(response)
:rtype: ~azure.mgmt.iothub.v2020_03_01.models.TestAllRoutesResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TestAllRoutesResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.test_all_routes.metadata['url'] # type: ignore
path_format_arguments = {
'iotHubName': self._serialize.url("iot_hub_name", iot_hub_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(input, 'TestAllRoutesInput')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('TestAllRoutesResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
test_all_routes.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{iotHubName}/routing/routes/$testall'} # type: ignore
def test_route(
self,
iot_hub_name, # type: str
resource_group_name, # type: str
input, # type: "_models.TestRouteInput"
**kwargs # type: Any
):
# type: (...) -> "_models.TestRouteResult"
"""Test the new route.
Test the new route for this Iot Hub.
:param iot_hub_name: IotHub to be tested.
:type iot_hub_name: str
:param resource_group_name: resource group which Iot Hub belongs to.
:type resource_group_name: str
:param input: Route that needs to be tested.
:type input: ~azure.mgmt.iothub.v2020_03_01.models.TestRouteInput
:keyword callable cls: A custom type or function that will be passed the direct response
:return: TestRouteResult, or the result of cls(response)
:rtype: ~azure.mgmt.iothub.v2020_03_01.models.TestRouteResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TestRouteResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.test_route.metadata['url'] # type: ignore
path_format_arguments = {
'iotHubName': self._serialize.url("iot_hub_name", iot_hub_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(input, 'TestRouteInput')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('TestRouteResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
test_route.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{iotHubName}/routing/routes/$testnew'} # type: ignore
def list_keys(
self,
resource_group_name, # type: str
resource_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.SharedAccessSignatureAuthorizationRuleListResult"]
"""Get the security metadata for an IoT hub. For more information, see: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-security.
Get the security metadata for an IoT hub. For more information, see:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-security.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SharedAccessSignatureAuthorizationRuleListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.iothub.v2020_03_01.models.SharedAccessSignatureAuthorizationRuleListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SharedAccessSignatureAuthorizationRuleListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_keys.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.post(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('SharedAccessSignatureAuthorizationRuleListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_keys.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/listkeys'} # type: ignore
def get_keys_for_key_name(
self,
resource_group_name, # type: str
resource_name, # type: str
key_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.SharedAccessSignatureAuthorizationRule"
"""Get a shared access policy by name from an IoT hub. For more information, see: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-security.
Get a shared access policy by name from an IoT hub. For more information, see:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-security.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param key_name: The name of the shared access policy.
:type key_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SharedAccessSignatureAuthorizationRule, or the result of cls(response)
:rtype: ~azure.mgmt.iothub.v2020_03_01.models.SharedAccessSignatureAuthorizationRule
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SharedAccessSignatureAuthorizationRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self.get_keys_for_key_name.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'keyName': self._serialize.url("key_name", key_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('SharedAccessSignatureAuthorizationRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_keys_for_key_name.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/IotHubKeys/{keyName}/listkeys'} # type: ignore
def export_devices(
self,
resource_group_name, # type: str
resource_name, # type: str
export_devices_parameters, # type: "_models.ExportDevicesRequest"
**kwargs # type: Any
):
# type: (...) -> "_models.JobResponse"
"""Exports all the device identities in the IoT hub identity registry to an Azure Storage blob container. For more information, see: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-identity-registry#import-and-export-device-identities.
Exports all the device identities in the IoT hub identity registry to an Azure Storage blob
container. For more information, see:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-identity-registry#import-and-export-device-identities.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param export_devices_parameters: The parameters that specify the export devices operation.
:type export_devices_parameters: ~azure.mgmt.iothub.v2020_03_01.models.ExportDevicesRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: JobResponse, or the result of cls(response)
:rtype: ~azure.mgmt.iothub.v2020_03_01.models.JobResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.JobResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.export_devices.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(export_devices_parameters, 'ExportDevicesRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('JobResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
export_devices.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/exportDevices'} # type: ignore
def import_devices(
self,
resource_group_name, # type: str
resource_name, # type: str
import_devices_parameters, # type: "_models.ImportDevicesRequest"
**kwargs # type: Any
):
# type: (...) -> "_models.JobResponse"
"""Import, update, or delete device identities in the IoT hub identity registry from a blob. For more information, see: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-identity-registry#import-and-export-device-identities.
Import, update, or delete device identities in the IoT hub identity registry from a blob. For
more information, see:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-identity-registry#import-and-export-device-identities.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param import_devices_parameters: The parameters that specify the import devices operation.
:type import_devices_parameters: ~azure.mgmt.iothub.v2020_03_01.models.ImportDevicesRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: JobResponse, or the result of cls(response)
:rtype: ~azure.mgmt.iothub.v2020_03_01.models.JobResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.JobResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.import_devices.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(import_devices_parameters, 'ImportDevicesRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('JobResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
import_devices.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/importDevices'} # type: ignore
|
postlund/home-assistant
|
refs/heads/dev
|
homeassistant/components/russound_rio/media_player.py
|
2
|
"""Support for Russound multizone controllers using RIO Protocol."""
import logging
from russound_rio import Russound
import voluptuous as vol
from homeassistant.components.media_player import PLATFORM_SCHEMA, MediaPlayerDevice
from homeassistant.components.media_player.const import (
MEDIA_TYPE_MUSIC,
SUPPORT_SELECT_SOURCE,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
)
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
CONF_PORT,
EVENT_HOMEASSISTANT_STOP,
STATE_OFF,
STATE_ON,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
SUPPORT_RUSSOUND = (
SUPPORT_VOLUME_MUTE
| SUPPORT_VOLUME_SET
| SUPPORT_TURN_ON
| SUPPORT_TURN_OFF
| SUPPORT_SELECT_SOURCE
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_NAME): cv.string,
vol.Optional(CONF_PORT, default=9621): cv.port,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Russound RIO platform."""
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
russ = Russound(hass.loop, host, port)
await russ.connect()
# Discover sources and zones
sources = await russ.enumerate_sources()
valid_zones = await russ.enumerate_zones()
devices = []
for zone_id, name in valid_zones:
await russ.watch_zone(zone_id)
dev = RussoundZoneDevice(russ, zone_id, name, sources)
devices.append(dev)
@callback
def on_stop(event):
"""Shutdown cleanly when hass stops."""
hass.loop.create_task(russ.close())
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, on_stop)
async_add_entities(devices)
class RussoundZoneDevice(MediaPlayerDevice):
"""Representation of a Russound Zone."""
def __init__(self, russ, zone_id, name, sources):
"""Initialize the zone device."""
super().__init__()
self._name = name
self._russ = russ
self._zone_id = zone_id
self._sources = sources
def _zone_var(self, name, default=None):
return self._russ.get_cached_zone_variable(self._zone_id, name, default)
def _source_var(self, name, default=None):
current = int(self._zone_var("currentsource", 0))
if current:
return self._russ.get_cached_source_variable(current, name, default)
return default
def _source_na_var(self, name):
"""Will replace invalid values with None."""
current = int(self._zone_var("currentsource", 0))
if current:
value = self._russ.get_cached_source_variable(current, name, None)
if value in (None, "", "------"):
return None
return value
return None
def _zone_callback_handler(self, zone_id, *args):
if zone_id == self._zone_id:
self.schedule_update_ha_state()
def _source_callback_handler(self, source_id, *args):
current = int(self._zone_var("currentsource", 0))
if source_id == current:
self.schedule_update_ha_state()
async def async_added_to_hass(self):
"""Register callback handlers."""
self._russ.add_zone_callback(self._zone_callback_handler)
self._russ.add_source_callback(self._source_callback_handler)
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def name(self):
"""Return the name of the zone."""
return self._zone_var("name", self._name)
@property
def state(self):
"""Return the state of the device."""
status = self._zone_var("status", "OFF")
if status == "ON":
return STATE_ON
if status == "OFF":
return STATE_OFF
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_RUSSOUND
@property
def source(self):
"""Get the currently selected source."""
return self._source_na_var("name")
@property
def source_list(self):
"""Return a list of available input sources."""
return [x[1] for x in self._sources]
@property
def media_content_type(self):
"""Content type of current playing media."""
return MEDIA_TYPE_MUSIC
@property
def media_title(self):
"""Title of current playing media."""
return self._source_na_var("songname")
@property
def media_artist(self):
"""Artist of current playing media, music track only."""
return self._source_na_var("artistname")
@property
def media_album_name(self):
"""Album name of current playing media, music track only."""
return self._source_na_var("albumname")
@property
def media_image_url(self):
"""Image url of current playing media."""
return self._source_na_var("coverarturl")
@property
def volume_level(self):
"""Volume level of the media player (0..1).
Value is returned based on a range (0..50).
Therefore float divide by 50 to get to the required range.
"""
return float(self._zone_var("volume", 0)) / 50.0
async def async_turn_off(self):
"""Turn off the zone."""
await self._russ.send_zone_event(self._zone_id, "ZoneOff")
async def async_turn_on(self):
"""Turn on the zone."""
await self._russ.send_zone_event(self._zone_id, "ZoneOn")
async def async_set_volume_level(self, volume):
"""Set the volume level."""
rvol = int(volume * 50.0)
await self._russ.send_zone_event(self._zone_id, "KeyPress", "Volume", rvol)
async def async_select_source(self, source):
"""Select the source input for this zone."""
for source_id, name in self._sources:
if name.lower() != source.lower():
continue
await self._russ.send_zone_event(self._zone_id, "SelectSource", source_id)
break
|
Matrixeigs/Optimization
|
refs/heads/master
|
energy_hub/bidding_strategy/__init__.py
|
1
|
"""
The bidding strategy strategy for energy hubs
This function is to provide
1) a deterministic day-ahead bidding strategy for hybrid AC/DC multiple micro-grids
2) a stochstic bidding strategy
3) start-up and shut-down of resources
4) decomposition algorithm, Benders decomposition
"""
import sys
sys.path.append('/home/matrix/PycharmProjects/Optimization/solvers')
|
songfj/calibre
|
refs/heads/master
|
src/cherrypy/wsgiserver/__init__.py
|
238
|
__all__ = ['HTTPRequest', 'HTTPConnection', 'HTTPServer',
'SizeCheckWrapper', 'KnownLengthRFile', 'ChunkedRFile',
'MaxSizeExceeded', 'NoSSLError', 'FatalSSLAlert',
'WorkerThread', 'ThreadPool', 'SSLAdapter',
'CherryPyWSGIServer',
'Gateway', 'WSGIGateway', 'WSGIGateway_10', 'WSGIGateway_u0',
'WSGIPathInfoDispatcher', 'get_ssl_adapter_class']
import sys
if sys.version_info < (3, 0):
from wsgiserver2 import *
else:
# Le sigh. Boo for backward-incompatible syntax.
exec('from .wsgiserver3 import *')
|
shenlong3030/asv-django-guestbook
|
refs/heads/master
|
djangoappengine/main/main.py
|
11
|
import os
import sys
# Add parent folder to sys.path, so we can import boot.
# App Engine causes main.py to be reloaded if an exception gets raised
# on the first request of a main.py instance, so don't add project_dir multiple
# times.
project_dir = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
if project_dir not in sys.path or sys.path.index(project_dir) > 0:
while project_dir in sys.path:
sys.path.remove(project_dir)
sys.path.insert(0, project_dir)
for path in sys.path[:]:
if path != project_dir and os.path.isdir(os.path.join(path, 'django')):
sys.path.remove(path)
break
# Remove the standard version of Django.
if 'django' in sys.modules and sys.modules['django'].VERSION < (1, 2):
for k in [k for k in sys.modules
if k.startswith('django\.') or k == 'django']:
del sys.modules[k]
from djangoappengine.boot import setup_env, setup_logging, env_ext
setup_env()
from django.core.handlers.wsgi import WSGIHandler
from google.appengine.ext.webapp.util import run_wsgi_app
from django.conf import settings
def log_traceback(*args, **kwargs):
import logging
logging.exception('Exception in request:')
from django.core import signals
signals.got_request_exception.connect(log_traceback)
def real_main():
# Reset path and environment variables
global path_backup
try:
sys.path = path_backup[:]
except:
path_backup = sys.path[:]
os.environ.update(env_ext)
setup_logging()
# Create a Django application for WSGI.
application = WSGIHandler()
# Run the WSGI CGI handler with that application.
run_wsgi_app(application)
def profile_main():
import logging, cProfile, pstats, random, StringIO
only_forced_profile = getattr(settings, 'ONLY_FORCED_PROFILE', False)
profile_percentage = getattr(settings, 'PROFILE_PERCENTAGE', None)
if (only_forced_profile and
'profile=forced' not in os.environ.get('QUERY_STRING')) or \
(not only_forced_profile and profile_percentage and
float(profile_percentage) / 100.0 <= random.random()):
return real_main()
prof = cProfile.Profile()
prof = prof.runctx('real_main()', globals(), locals())
stream = StringIO.StringIO()
stats = pstats.Stats(prof, stream=stream)
sort_by = getattr(settings, 'SORT_PROFILE_RESULTS_BY', 'time')
if not isinstance(sort_by, (list, tuple)):
sort_by = (sort_by,)
stats.sort_stats(*sort_by)
restrictions = []
profile_pattern = getattr(settings, 'PROFILE_PATTERN', None)
if profile_pattern:
restrictions.append(profile_pattern)
max_results = getattr(settings, 'MAX_PROFILE_RESULTS', 80)
if max_results and max_results != 'all':
restrictions.append(max_results)
stats.print_stats(*restrictions)
extra_output = getattr(settings, 'EXTRA_PROFILE_OUTPUT', None) or ()
if not isinstance(sort_by, (list, tuple)):
extra_output = (extra_output,)
if 'callees' in extra_output:
stats.print_callees()
if 'callers' in extra_output:
stats.print_callers()
logging.info('Profile data:\n%s', stream.getvalue())
main = getattr(settings, 'ENABLE_PROFILER', False) and profile_main or real_main
if __name__ == '__main__':
main()
|
openstack/oslo.vmware
|
refs/heads/master
|
oslo_vmware/tests/test_rw_handles.py
|
1
|
# Copyright (c) 2014 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for read and write handles for image transfer."""
import ssl
from unittest import mock
import requests
from oslo_vmware import exceptions
from oslo_vmware import rw_handles
from oslo_vmware.tests import base
from oslo_vmware import vim_util
class FileHandleTest(base.TestCase):
"""Tests for FileHandle."""
def test_close(self):
file_handle = mock.Mock()
vmw_http_file = rw_handles.FileHandle(file_handle)
vmw_http_file.close()
file_handle.close.assert_called_once_with()
@mock.patch('urllib3.connection.HTTPConnection')
def test_create_connection_http(self, http_conn):
conn = mock.Mock()
http_conn.return_value = conn
handle = rw_handles.FileHandle(None)
ret = handle._create_connection('http://localhost/foo?q=bar', 'GET')
self.assertEqual(conn, ret)
conn.putrequest.assert_called_once_with('GET', '/foo?q=bar')
@mock.patch('urllib3.connection.HTTPSConnection')
def test_create_connection_https(self, https_conn):
conn = mock.Mock()
https_conn.return_value = conn
handle = rw_handles.FileHandle(None)
ret = handle._create_connection('https://localhost/foo?q=bar', 'GET')
self.assertEqual(conn, ret)
ca_store = requests.certs.where()
conn.set_cert.assert_called_once_with(
ca_certs=ca_store, cert_reqs=ssl.CERT_NONE,
assert_fingerprint=None)
conn.putrequest.assert_called_once_with('GET', '/foo?q=bar')
@mock.patch('urllib3.connection.HTTPSConnection')
def test_create_connection_https_with_cacerts(self, https_conn):
conn = mock.Mock()
https_conn.return_value = conn
handle = rw_handles.FileHandle(None)
ret = handle._create_connection('https://localhost/foo?q=bar', 'GET',
cacerts=True)
self.assertEqual(conn, ret)
ca_store = requests.certs.where()
conn.set_cert.assert_called_once_with(
ca_certs=ca_store, cert_reqs=ssl.CERT_REQUIRED,
assert_fingerprint=None)
@mock.patch('urllib3.connection.HTTPSConnection')
def test_create_connection_https_with_ssl_thumbprint(self, https_conn):
conn = mock.Mock()
https_conn.return_value = conn
handle = rw_handles.FileHandle(None)
cacerts = mock.sentinel.cacerts
thumbprint = mock.sentinel.thumbprint
ret = handle._create_connection('https://localhost/foo?q=bar', 'GET',
cacerts=cacerts,
ssl_thumbprint=thumbprint)
self.assertEqual(conn, ret)
conn.set_cert.assert_called_once_with(
ca_certs=cacerts, cert_reqs=None, assert_fingerprint=thumbprint)
class FileWriteHandleTest(base.TestCase):
"""Tests for FileWriteHandle."""
def setUp(self):
super(FileWriteHandleTest, self).setUp()
vim_cookie = mock.Mock()
vim_cookie.name = 'name'
vim_cookie.value = 'value'
self._conn = mock.Mock()
patcher = mock.patch(
'urllib3.connection.HTTPConnection')
self.addCleanup(patcher.stop)
HTTPConnectionMock = patcher.start()
HTTPConnectionMock.return_value = self._conn
self.vmw_http_write_file = rw_handles.FileWriteHandle(
'10.1.2.3', 443, 'dc-0', 'ds-0', [vim_cookie], '1.vmdk', 100,
'http')
def test_write(self):
self.vmw_http_write_file.write(None)
self._conn.send.assert_called_once_with(None)
def test_close(self):
self.vmw_http_write_file.close()
self._conn.getresponse.assert_called_once_with()
self._conn.close.assert_called_once_with()
class VmdkHandleTest(base.TestCase):
"""Tests for VmdkHandle."""
def test_find_vmdk_url(self):
device_url_0 = mock.Mock()
device_url_0.disk = False
device_url_1 = mock.Mock()
device_url_1.disk = True
device_url_1.url = 'https://*/ds1/vm1.vmdk'
device_url_1.sslThumbprint = '11:22:33:44:55'
lease_info = mock.Mock()
lease_info.deviceUrl = [device_url_0, device_url_1]
host = '10.1.2.3'
port = 443
exp_url = 'https://%s:%d/ds1/vm1.vmdk' % (host, port)
vmw_http_file = rw_handles.VmdkHandle(None, None, None, None)
url, thumbprint = vmw_http_file._find_vmdk_url(lease_info, host, port)
self.assertEqual(exp_url, url)
self.assertEqual('11:22:33:44:55', thumbprint)
def test_update_progress(self):
session = mock.Mock()
lease = mock.Mock()
handle = rw_handles.VmdkHandle(session, lease, 'fake-url', None)
handle._get_progress = mock.Mock(return_value=50)
handle.update_progress()
session.invoke_api.assert_called_once_with(session.vim,
'HttpNfcLeaseProgress',
lease, percent=50)
def test_update_progress_with_error(self):
session = mock.Mock()
handle = rw_handles.VmdkHandle(session, None, 'fake-url', None)
handle._get_progress = mock.Mock(return_value=0)
session.invoke_api.side_effect = exceptions.VimException(None)
self.assertRaises(exceptions.VimException, handle.update_progress)
def test_fileno(self):
session = mock.Mock()
handle = rw_handles.VmdkHandle(session, None, 'fake-url', None)
self.assertRaises(IOError, handle.fileno)
def test_release_lease_incomplete_transfer(self):
session = mock.Mock()
handle = rw_handles.VmdkHandle(session, None, 'fake-url', None)
handle._get_progress = mock.Mock(return_value=99)
session.invoke_api = mock.Mock()
handle._release_lease()
session.invoke_api.assert_called_with(handle._session.vim,
'HttpNfcLeaseAbort',
handle._lease)
class VmdkWriteHandleTest(base.TestCase):
"""Tests for VmdkWriteHandle."""
def setUp(self):
super(VmdkWriteHandleTest, self).setUp()
self._conn = mock.Mock()
patcher = mock.patch(
'urllib3.connection.HTTPConnection')
self.addCleanup(patcher.stop)
HTTPConnectionMock = patcher.start()
HTTPConnectionMock.return_value = self._conn
def _create_mock_session(self, disk=True, progress=-1):
device_url = mock.Mock()
device_url.disk = disk
device_url.url = 'http://*/ds/disk1.vmdk'
lease_info = mock.Mock()
lease_info.deviceUrl = [device_url]
session = mock.Mock()
def session_invoke_api_side_effect(module, method, *args, **kwargs):
if module == session.vim:
if method == 'ImportVApp':
return mock.Mock()
elif method == 'HttpNfcLeaseProgress':
self.assertEqual(progress, kwargs['percent'])
return
return lease_info
session.invoke_api.side_effect = session_invoke_api_side_effect
vim_cookie = mock.Mock()
vim_cookie.name = 'name'
vim_cookie.value = 'value'
session.vim.client.cookiejar = [vim_cookie]
return session
def test_init_failure(self):
session = self._create_mock_session(False)
self.assertRaises(exceptions.VimException,
rw_handles.VmdkWriteHandle,
session,
'10.1.2.3',
443,
'rp-1',
'folder-1',
None,
100)
def test_write(self):
session = self._create_mock_session()
handle = rw_handles.VmdkWriteHandle(session, '10.1.2.3', 443,
'rp-1', 'folder-1', None,
100)
data = [1] * 10
handle.write(data)
self.assertEqual(len(data), handle._bytes_written)
self._conn.putrequest.assert_called_once_with('PUT', '/ds/disk1.vmdk')
self._conn.send.assert_called_once_with(data)
def test_tell(self):
session = self._create_mock_session()
handle = rw_handles.VmdkWriteHandle(session, '10.1.2.3', 443,
'rp-1', 'folder-1', None,
100)
data = [1] * 10
handle.write(data)
self.assertEqual(len(data), handle._bytes_written)
self.assertEqual(len(data), handle.tell())
def test_write_post(self):
session = self._create_mock_session()
handle = rw_handles.VmdkWriteHandle(session, '10.1.2.3', 443,
'rp-1', 'folder-1', None,
100, http_method='POST')
data = [1] * 10
handle.write(data)
self.assertEqual(len(data), handle._bytes_written)
self._conn.putrequest.assert_called_once_with('POST', '/ds/disk1.vmdk')
self._conn.send.assert_called_once_with(data)
def test_update_progress(self):
vmdk_size = 100
data_size = 10
session = self._create_mock_session(True, 10)
handle = rw_handles.VmdkWriteHandle(session, '10.1.2.3', 443,
'rp-1', 'folder-1', None,
vmdk_size)
handle.write([1] * data_size)
handle.update_progress()
def test_close(self):
session = self._create_mock_session()
handle = rw_handles.VmdkWriteHandle(session, '10.1.2.3', 443,
'rp-1', 'folder-1', None,
100)
def session_invoke_api_side_effect(module, method, *args, **kwargs):
if module == vim_util and method == 'get_object_property':
return 'ready'
self.assertEqual(session.vim, module)
self.assertEqual('HttpNfcLeaseComplete', method)
session.invoke_api = mock.Mock(
side_effect=session_invoke_api_side_effect)
handle._get_progress = mock.Mock(return_value=100)
handle.close()
self.assertEqual(2, session.invoke_api.call_count)
def test_get_vm_incomplete_transfer(self):
session = self._create_mock_session()
handle = rw_handles.VmdkWriteHandle(session, '10.1.2.3', 443, 'rp-1',
'folder-1', None, 100)
handle._get_progress = mock.Mock(return_value=99)
session.invoke_api = mock.Mock()
self.assertRaises(exceptions.ImageTransferException,
handle.get_imported_vm)
class VmdkReadHandleTest(base.TestCase):
"""Tests for VmdkReadHandle."""
def setUp(self):
super(VmdkReadHandleTest, self).setUp()
def _mock_connection(self, read_data='fake-data'):
self._resp = mock.Mock()
self._resp.read.return_value = read_data
self._conn = mock.Mock()
self._conn.getresponse.return_value = self._resp
patcher = mock.patch(
'urllib3.connection.HTTPConnection')
self.addCleanup(patcher.stop)
HTTPConnectionMock = patcher.start()
HTTPConnectionMock.return_value = self._conn
def _create_mock_session(self, disk=True, progress=-1,
read_data='fake-data'):
self._mock_connection(read_data=read_data)
device_url = mock.Mock()
device_url.disk = disk
device_url.url = 'http://*/ds/disk1.vmdk'
lease_info = mock.Mock()
lease_info.deviceUrl = [device_url]
session = mock.Mock()
def session_invoke_api_side_effect(module, method, *args, **kwargs):
if module == session.vim:
if method == 'ExportVm':
return mock.Mock()
elif method == 'HttpNfcLeaseProgress':
self.assertEqual(progress, kwargs['percent'])
return
return lease_info
session.invoke_api.side_effect = session_invoke_api_side_effect
vim_cookie = mock.Mock()
vim_cookie.name = 'name'
vim_cookie.value = 'value'
session.vim.client.cookiejar = [vim_cookie]
return session
def test_init_failure(self):
session = self._create_mock_session(False)
self.assertRaises(exceptions.VimException,
rw_handles.VmdkReadHandle,
session,
'10.1.2.3',
443,
'vm-1',
'[ds] disk1.vmdk',
100)
def test_read(self):
chunk_size = rw_handles.READ_CHUNKSIZE
session = self._create_mock_session()
handle = rw_handles.VmdkReadHandle(session, '10.1.2.3', 443,
'vm-1', '[ds] disk1.vmdk',
chunk_size * 10)
fake_data = 'fake-data'
data = handle.read(chunk_size)
self.assertEqual(fake_data, data)
self.assertEqual(len(fake_data), handle._bytes_read)
def test_read_small(self):
read_data = 'fake'
session = self._create_mock_session(read_data=read_data)
read_size = len(read_data)
handle = rw_handles.VmdkReadHandle(session, '10.1.2.3', 443,
'vm-1', '[ds] disk1.vmdk',
read_size * 10)
handle.read(read_size)
self.assertEqual(read_size, handle._bytes_read)
def test_tell(self):
chunk_size = rw_handles.READ_CHUNKSIZE
session = self._create_mock_session()
handle = rw_handles.VmdkReadHandle(session, '10.1.2.3', 443,
'vm-1', '[ds] disk1.vmdk',
chunk_size * 10)
data = handle.read(chunk_size)
self.assertEqual(len(data), handle.tell())
def test_update_progress(self):
chunk_size = len('fake-data')
vmdk_size = chunk_size * 10
session = self._create_mock_session(True, 10)
handle = rw_handles.VmdkReadHandle(session, '10.1.2.3', 443,
'vm-1', '[ds] disk1.vmdk',
vmdk_size)
data = handle.read(chunk_size)
handle.update_progress()
self.assertEqual('fake-data', data)
def test_close(self):
session = self._create_mock_session()
handle = rw_handles.VmdkReadHandle(session, '10.1.2.3', 443,
'vm-1', '[ds] disk1.vmdk',
100)
def session_invoke_api_side_effect(module, method, *args, **kwargs):
if module == vim_util and method == 'get_object_property':
return 'ready'
self.assertEqual(session.vim, module)
self.assertEqual('HttpNfcLeaseComplete', method)
session.invoke_api = mock.Mock(
side_effect=session_invoke_api_side_effect)
handle._get_progress = mock.Mock(return_value=100)
handle.close()
self.assertEqual(2, session.invoke_api.call_count)
def test_close_with_error(self):
session = self._create_mock_session()
handle = rw_handles.VmdkReadHandle(session, '10.1.2.3', 443,
'vm-1', '[ds] disk1.vmdk',
100)
session.invoke_api.side_effect = exceptions.VimException(None)
self.assertRaises(exceptions.VimException, handle.close)
self._resp.close.assert_called_once_with()
class ImageReadHandleTest(base.TestCase):
"""Tests for ImageReadHandle."""
def test_read(self):
max_items = 10
item = [1] * 10
class ImageReadIterator(object):
def __init__(self):
self.num_items = 0
def __iter__(self):
return self
def __next__(self):
if (self.num_items < max_items):
self.num_items += 1
return item
raise StopIteration
next = __next__
handle = rw_handles.ImageReadHandle(ImageReadIterator())
for _ in range(0, max_items):
self.assertEqual(item, handle.read(10))
self.assertFalse(handle.read(10))
|
hydrospanner/DForurm
|
refs/heads/master
|
DForurm/env/Lib/site-packages/django/contrib/admin/views/main.py
|
49
|
import sys
from collections import OrderedDict
from django.contrib.admin import FieldListFilter
from django.contrib.admin.exceptions import (
DisallowedModelAdminLookup, DisallowedModelAdminToField,
)
from django.contrib.admin.options import (
IS_POPUP_VAR, TO_FIELD_VAR, IncorrectLookupParameters,
)
from django.contrib.admin.utils import (
get_fields_from_path, lookup_needs_distinct, prepare_lookup_value, quote,
)
from django.core.exceptions import (
FieldDoesNotExist, ImproperlyConfigured, SuspiciousOperation,
)
from django.core.paginator import InvalidPage
from django.db import models
from django.urls import reverse
from django.utils import six
from django.utils.encoding import force_text
from django.utils.http import urlencode
from django.utils.translation import ugettext
# Changelist settings
ALL_VAR = 'all'
ORDER_VAR = 'o'
ORDER_TYPE_VAR = 'ot'
PAGE_VAR = 'p'
SEARCH_VAR = 'q'
ERROR_FLAG = 'e'
IGNORED_PARAMS = (
ALL_VAR, ORDER_VAR, ORDER_TYPE_VAR, SEARCH_VAR, IS_POPUP_VAR, TO_FIELD_VAR)
class ChangeList(object):
def __init__(self, request, model, list_display, list_display_links,
list_filter, date_hierarchy, search_fields, list_select_related,
list_per_page, list_max_show_all, list_editable, model_admin):
self.model = model
self.opts = model._meta
self.lookup_opts = self.opts
self.root_queryset = model_admin.get_queryset(request)
self.list_display = list_display
self.list_display_links = list_display_links
self.list_filter = list_filter
self.date_hierarchy = date_hierarchy
self.search_fields = search_fields
self.list_select_related = list_select_related
self.list_per_page = list_per_page
self.list_max_show_all = list_max_show_all
self.model_admin = model_admin
self.preserved_filters = model_admin.get_preserved_filters(request)
# Get search parameters from the query string.
try:
self.page_num = int(request.GET.get(PAGE_VAR, 0))
except ValueError:
self.page_num = 0
self.show_all = ALL_VAR in request.GET
self.is_popup = IS_POPUP_VAR in request.GET
to_field = request.GET.get(TO_FIELD_VAR)
if to_field and not model_admin.to_field_allowed(request, to_field):
raise DisallowedModelAdminToField("The field %s cannot be referenced." % to_field)
self.to_field = to_field
self.params = dict(request.GET.items())
if PAGE_VAR in self.params:
del self.params[PAGE_VAR]
if ERROR_FLAG in self.params:
del self.params[ERROR_FLAG]
if self.is_popup:
self.list_editable = ()
else:
self.list_editable = list_editable
self.query = request.GET.get(SEARCH_VAR, '')
self.queryset = self.get_queryset(request)
self.get_results(request)
if self.is_popup:
title = ugettext('Select %s')
else:
title = ugettext('Select %s to change')
self.title = title % force_text(self.opts.verbose_name)
self.pk_attname = self.lookup_opts.pk.attname
def get_filters_params(self, params=None):
"""
Returns all params except IGNORED_PARAMS
"""
if not params:
params = self.params
lookup_params = params.copy() # a dictionary of the query string
# Remove all the parameters that are globally and systematically
# ignored.
for ignored in IGNORED_PARAMS:
if ignored in lookup_params:
del lookup_params[ignored]
return lookup_params
def get_filters(self, request):
lookup_params = self.get_filters_params()
use_distinct = False
for key, value in lookup_params.items():
if not self.model_admin.lookup_allowed(key, value):
raise DisallowedModelAdminLookup("Filtering by %s not allowed" % key)
filter_specs = []
if self.list_filter:
for list_filter in self.list_filter:
if callable(list_filter):
# This is simply a custom list filter class.
spec = list_filter(request, lookup_params, self.model, self.model_admin)
else:
field_path = None
if isinstance(list_filter, (tuple, list)):
# This is a custom FieldListFilter class for a given field.
field, field_list_filter_class = list_filter
else:
# This is simply a field name, so use the default
# FieldListFilter class that has been registered for
# the type of the given field.
field, field_list_filter_class = list_filter, FieldListFilter.create
if not isinstance(field, models.Field):
field_path = field
field = get_fields_from_path(self.model, field_path)[-1]
lookup_params_count = len(lookup_params)
spec = field_list_filter_class(
field, request, lookup_params,
self.model, self.model_admin, field_path=field_path
)
# field_list_filter_class removes any lookup_params it
# processes. If that happened, check if distinct() is
# needed to remove duplicate results.
if lookup_params_count > len(lookup_params):
use_distinct = use_distinct or lookup_needs_distinct(self.lookup_opts, field_path)
if spec and spec.has_output():
filter_specs.append(spec)
# At this point, all the parameters used by the various ListFilters
# have been removed from lookup_params, which now only contains other
# parameters passed via the query string. We now loop through the
# remaining parameters both to ensure that all the parameters are valid
# fields and to determine if at least one of them needs distinct(). If
# the lookup parameters aren't real fields, then bail out.
try:
for key, value in lookup_params.items():
lookup_params[key] = prepare_lookup_value(key, value)
use_distinct = use_distinct or lookup_needs_distinct(self.lookup_opts, key)
return filter_specs, bool(filter_specs), lookup_params, use_distinct
except FieldDoesNotExist as e:
six.reraise(IncorrectLookupParameters, IncorrectLookupParameters(e), sys.exc_info()[2])
def get_query_string(self, new_params=None, remove=None):
if new_params is None:
new_params = {}
if remove is None:
remove = []
p = self.params.copy()
for r in remove:
for k in list(p):
if k.startswith(r):
del p[k]
for k, v in new_params.items():
if v is None:
if k in p:
del p[k]
else:
p[k] = v
return '?%s' % urlencode(sorted(p.items()))
def get_results(self, request):
paginator = self.model_admin.get_paginator(request, self.queryset, self.list_per_page)
# Get the number of objects, with admin filters applied.
result_count = paginator.count
# Get the total number of objects, with no admin filters applied.
if self.model_admin.show_full_result_count:
full_result_count = self.root_queryset.count()
else:
full_result_count = None
can_show_all = result_count <= self.list_max_show_all
multi_page = result_count > self.list_per_page
# Get the list of objects to display on this page.
if (self.show_all and can_show_all) or not multi_page:
result_list = self.queryset._clone()
else:
try:
result_list = paginator.page(self.page_num + 1).object_list
except InvalidPage:
raise IncorrectLookupParameters
self.result_count = result_count
self.show_full_result_count = self.model_admin.show_full_result_count
# Admin actions are shown if there is at least one entry
# or if entries are not counted because show_full_result_count is disabled
self.show_admin_actions = not self.show_full_result_count or bool(full_result_count)
self.full_result_count = full_result_count
self.result_list = result_list
self.can_show_all = can_show_all
self.multi_page = multi_page
self.paginator = paginator
def _get_default_ordering(self):
ordering = []
if self.model_admin.ordering:
ordering = self.model_admin.ordering
elif self.lookup_opts.ordering:
ordering = self.lookup_opts.ordering
return ordering
def get_ordering_field(self, field_name):
"""
Returns the proper model field name corresponding to the given
field_name to use for ordering. field_name may either be the name of a
proper model field or the name of a method (on the admin or model) or a
callable with the 'admin_order_field' attribute. Returns None if no
proper model field name can be matched.
"""
try:
field = self.lookup_opts.get_field(field_name)
return field.name
except FieldDoesNotExist:
# See whether field_name is a name of a non-field
# that allows sorting.
if callable(field_name):
attr = field_name
elif hasattr(self.model_admin, field_name):
attr = getattr(self.model_admin, field_name)
else:
attr = getattr(self.model, field_name)
return getattr(attr, 'admin_order_field', None)
def get_ordering(self, request, queryset):
"""
Returns the list of ordering fields for the change list.
First we check the get_ordering() method in model admin, then we check
the object's default ordering. Then, any manually-specified ordering
from the query string overrides anything. Finally, a deterministic
order is guaranteed by ensuring the primary key is used as the last
ordering field.
"""
params = self.params
ordering = list(self.model_admin.get_ordering(request) or self._get_default_ordering())
if ORDER_VAR in params:
# Clear ordering and used params
ordering = []
order_params = params[ORDER_VAR].split('.')
for p in order_params:
try:
none, pfx, idx = p.rpartition('-')
field_name = self.list_display[int(idx)]
order_field = self.get_ordering_field(field_name)
if not order_field:
continue # No 'admin_order_field', skip it
# reverse order if order_field has already "-" as prefix
if order_field.startswith('-') and pfx == "-":
ordering.append(order_field[1:])
else:
ordering.append(pfx + order_field)
except (IndexError, ValueError):
continue # Invalid ordering specified, skip it.
# Add the given query's ordering fields, if any.
ordering.extend(queryset.query.order_by)
# Ensure that the primary key is systematically present in the list of
# ordering fields so we can guarantee a deterministic order across all
# database backends.
pk_name = self.lookup_opts.pk.name
if not (set(ordering) & {'pk', '-pk', pk_name, '-' + pk_name}):
# The two sets do not intersect, meaning the pk isn't present. So
# we add it.
ordering.append('-pk')
return ordering
def get_ordering_field_columns(self):
"""
Returns an OrderedDict of ordering field column numbers and asc/desc
"""
# We must cope with more than one column having the same underlying sort
# field, so we base things on column numbers.
ordering = self._get_default_ordering()
ordering_fields = OrderedDict()
if ORDER_VAR not in self.params:
# for ordering specified on ModelAdmin or model Meta, we don't know
# the right column numbers absolutely, because there might be more
# than one column associated with that ordering, so we guess.
for field in ordering:
if field.startswith('-'):
field = field[1:]
order_type = 'desc'
else:
order_type = 'asc'
for index, attr in enumerate(self.list_display):
if self.get_ordering_field(attr) == field:
ordering_fields[index] = order_type
break
else:
for p in self.params[ORDER_VAR].split('.'):
none, pfx, idx = p.rpartition('-')
try:
idx = int(idx)
except ValueError:
continue # skip it
ordering_fields[idx] = 'desc' if pfx == '-' else 'asc'
return ordering_fields
def get_queryset(self, request):
# First, we collect all the declared list filters.
(self.filter_specs, self.has_filters, remaining_lookup_params,
filters_use_distinct) = self.get_filters(request)
# Then, we let every list filter modify the queryset to its liking.
qs = self.root_queryset
for filter_spec in self.filter_specs:
new_qs = filter_spec.queryset(request, qs)
if new_qs is not None:
qs = new_qs
try:
# Finally, we apply the remaining lookup parameters from the query
# string (i.e. those that haven't already been processed by the
# filters).
qs = qs.filter(**remaining_lookup_params)
except (SuspiciousOperation, ImproperlyConfigured):
# Allow certain types of errors to be re-raised as-is so that the
# caller can treat them in a special way.
raise
except Exception as e:
# Every other error is caught with a naked except, because we don't
# have any other way of validating lookup parameters. They might be
# invalid if the keyword arguments are incorrect, or if the values
# are not in the correct type, so we might get FieldError,
# ValueError, ValidationError, or ?.
raise IncorrectLookupParameters(e)
if not qs.query.select_related:
qs = self.apply_select_related(qs)
# Set ordering.
ordering = self.get_ordering(request, qs)
qs = qs.order_by(*ordering)
# Apply search results
qs, search_use_distinct = self.model_admin.get_search_results(request, qs, self.query)
# Remove duplicates from results, if necessary
if filters_use_distinct | search_use_distinct:
return qs.distinct()
else:
return qs
def apply_select_related(self, qs):
if self.list_select_related is True:
return qs.select_related()
if self.list_select_related is False:
if self.has_related_field_in_list_display():
return qs.select_related()
if self.list_select_related:
return qs.select_related(*self.list_select_related)
return qs
def has_related_field_in_list_display(self):
for field_name in self.list_display:
try:
field = self.lookup_opts.get_field(field_name)
except FieldDoesNotExist:
pass
else:
if isinstance(field.remote_field, models.ManyToOneRel):
# <FK>_id field names don't require a join.
if field_name == field.get_attname():
continue
return True
return False
def url_for_result(self, result):
pk = getattr(result, self.pk_attname)
return reverse('admin:%s_%s_change' % (self.opts.app_label,
self.opts.model_name),
args=(quote(pk),),
current_app=self.model_admin.admin_site.name)
|
40223149/2015springcda
|
refs/heads/master
|
static/Brython3.1.0-20150301-090019/Lib/atexit.py
|
743
|
"""allow programmer to define multiple exit functions to be executedupon normal program termination.
Two public functions, register and unregister, are defined.
"""
class __loader__(object):
pass
def _clear(*args,**kw):
"""_clear() -> None
Clear the list of previously registered exit functions."""
pass
def _run_exitfuncs(*args,**kw):
"""_run_exitfuncs() -> None
Run all registered exit functions."""
pass
def register(*args,**kw):
"""register(func, *args, **kwargs) -> func
Register a function to be executed upon normal program termination
func - function to be called at exit
args - optional arguments to pass to func
kwargs - optional keyword arguments to pass to func
func is returned to facilitate usage as a decorator."""
pass
def unregister(*args,**kw):
"""unregister(func) -> None
Unregister a exit function which was previously registered using
atexit.register
func - function to be unregistered"""
pass
|
GenericStudent/home-assistant
|
refs/heads/dev
|
tests/components/canary/conftest.py
|
5
|
"""Define fixtures available for all tests."""
from canary.api import Api
from pytest import fixture
from tests.async_mock import MagicMock, patch
@fixture
def canary(hass):
"""Mock the CanaryApi for easier testing."""
with patch.object(Api, "login", return_value=True), patch(
"homeassistant.components.canary.Api"
) as mock_canary:
instance = mock_canary.return_value = Api(
"test-username",
"test-password",
1,
)
instance.login = MagicMock(return_value=True)
instance.get_entries = MagicMock(return_value=[])
instance.get_locations = MagicMock(return_value=[])
instance.get_location = MagicMock(return_value=None)
instance.get_modes = MagicMock(return_value=[])
instance.get_readings = MagicMock(return_value=[])
instance.get_latest_readings = MagicMock(return_value=[])
instance.set_location_mode = MagicMock(return_value=None)
yield mock_canary
@fixture
def canary_config_flow(hass):
"""Mock the CanaryApi for easier config flow testing."""
with patch.object(Api, "login", return_value=True), patch(
"homeassistant.components.canary.config_flow.Api"
) as mock_canary:
instance = mock_canary.return_value = Api(
"test-username",
"test-password",
1,
)
instance.login = MagicMock(return_value=True)
instance.get_entries = MagicMock(return_value=[])
instance.get_locations = MagicMock(return_value=[])
instance.get_location = MagicMock(return_value=None)
instance.get_modes = MagicMock(return_value=[])
instance.get_readings = MagicMock(return_value=[])
instance.get_latest_readings = MagicMock(return_value=[])
instance.set_location_mode = MagicMock(return_value=None)
yield mock_canary
|
bkjones/django-taxonomy
|
refs/heads/master
|
taxonomy/__init__.py
|
1
|
__author__ = 'Brian K. Jones'
__version__ = (0, 0, 1)
|
jeroenj/CouchPotatoServer
|
refs/heads/master
|
libs/oauthlib/oauth2/draft25/__init__.py
|
112
|
"""
oauthlib.oauth2.draft_25
~~~~~~~~~~~~~~
This module is an implementation of various logic needed
for signing and checking OAuth 2.0 draft 25 requests.
"""
class Client(object):
pass
class Server(object):
pass
|
inercia/evy
|
refs/heads/develop
|
tests/test_fork.py
|
1
|
#
# Evy - a concurrent networking library for Python
#
# Unless otherwise noted, the files in Evy are under the following MIT license:
#
# Copyright (c) 2012, Alvaro Saurin
# Copyright (c) 2008-2010, Eventlet Contributors (see AUTHORS)
# Copyright (c) 2007-2010, Linden Research, Inc.
# Copyright (c) 2005-2006, Bob Ippolito
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
from tests.test_patcher import ProcessBase
class ForkTest(ProcessBase):
def test_simple (self):
newmod = '''
import evy
import os
import sys
import signal
from evy import sleep
from evy.timeout import Timeout
mydir = %r
signal_file = os.path.join(mydir, "output.txt")
pid = os.fork()
if (pid != 0):
Timeout(10)
try:
port = None
while True:
try:
contents = open(signal_file, "rb").read()
port = int(contents.split()[0])
break
except (IOError, IndexError, ValueError, TypeError):
sleep(0.1)
connect(('127.0.0.1', port))
while True:
try:
contents = open(signal_file, "rb").read()
result = contents.split()[1]
break
except (IOError, IndexError):
sleep(0.1)
print 'result', result
finally:
os.kill(pid, signal.SIGTERM)
else:
try:
s = listen(('', 0))
fd = open(signal_file, "wb")
fd.write(str(s.getsockname()[1]))
fd.write("\\n")
fd.flush()
s.accept()
fd.write("done")
fd.flush()
finally:
fd.close()
'''
self.write_to_tempfile("newmod", newmod % self.tempdir)
output, lines = self.launch_subprocess('newmod.py')
self.assertEqual(lines[0], "result done", output)
|
InfraBox/infrabox
|
refs/heads/master
|
src/api/settings.py
|
1
|
import os
from flask import jsonify
from flask_restplus import Resource
from pyinfraboxutils.ibrestplus import api
settings_ns = api.namespace('api/v1/settings',
description="Api settings")
@settings_ns.route('/')
class Settings(Resource):
def get(self):
github_enabled = os.environ['INFRABOX_GITHUB_ENABLED'] == 'true'
o = {
'INFRABOX_GITHUB_ENABLED': github_enabled,
'INFRABOX_GERRIT_ENABLED': os.environ['INFRABOX_GERRIT_ENABLED'] == 'true',
'INFRABOX_ACCOUNT_SIGNUP_ENABLED': os.environ['INFRABOX_ACCOUNT_SIGNUP_ENABLED'] == 'true',
'INFRABOX_ACCOUNT_LDAP_ENABLED': os.environ['INFRABOX_ACCOUNT_LDAP_ENABLED'] == 'true',
'INFRABOX_ROOT_URL': os.environ['INFRABOX_ROOT_URL'],
'INFRABOX_GENERAL_REPORT_ISSUE_URL': os.environ['INFRABOX_GENERAL_REPORT_ISSUE_URL']
}
if github_enabled:
o['INFRABOX_GITHUB_LOGIN_ENABLED'] = os.environ['INFRABOX_GITHUB_LOGIN_ENABLED'] == 'true'
return jsonify(o)
|
ariel17/poppurri
|
refs/heads/master
|
poppurri/common/tests.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Description: TODO
"""
__author__ = "Ariel Gerardo Rios (ariel.gerardo.rios@gmail.com)"
from django.test import TestCase
# Create your tests here.
# vim: ai ts=4 sts=4 et sw=4 ft=python
|
si618/pi-time
|
refs/heads/master
|
node_modules/grunt-pylint/tasks/lib/pylint/test/functional/undefined_variable.py
|
4
|
"""Test warnings about access to undefined variables."""
# pylint: disable=too-few-public-methods, no-init, no-self-use, old-style-class,print-statement
DEFINED = 1
if DEFINED != 1:
if DEFINED in (unknown, DEFINED): # [undefined-variable]
DEFINED += 1
def in_method(var):
"""method doc"""
var = nomoreknown # [undefined-variable]
assert var
DEFINED = {DEFINED:__revision__} # [undefined-variable]
# +1:[undefined-variable]
DEFINED[__revision__] = OTHER = 'move this is astroid test'
OTHER += '$'
def bad_default(var, default=unknown2): # [undefined-variable]
"""function with defaut arg's value set to an unexistant name"""
print var, default
print xxxx # [undefined-variable]
augvar += 1 # [undefined-variable,unused-variable]
del vardel # [undefined-variable]
LMBD = lambda x, y=doesnotexist: x+y # [undefined-variable]
LMBD2 = lambda x, y: x+z # [undefined-variable]
try:
POUET # don't catch me
except NameError:
POUET = 'something'
try:
POUETT # don't catch me
except Exception: # pylint:disable = broad-except
POUETT = 'something'
try:
POUETTT # don't catch me
except: # pylint:disable = bare-except
POUETTT = 'something'
print POUET, POUETT, POUETTT
try:
PLOUF # [used-before-assignment]
except ValueError:
PLOUF = 'something'
print PLOUF
def if_branch_test(something):
"""hop"""
if something == 0:
if xxx == 1: # [used-before-assignment]
pass
else:
print xxx
xxx = 3
def decorator(arg):
"""Decorator with one argument."""
return lambda: list(arg)
@decorator(arg=[i * 2 for i in range(15)])
def func1():
"""A function with a decorator that contains a listcomp."""
@decorator(arg=(i * 2 for i in range(15)))
def func2():
"""A function with a decorator that contains a genexpr."""
@decorator(lambda x: x > 0)
def main():
"""A function with a decorator that contains a lambda."""
# Test shared scope.
def test_arguments(arg=TestClass): # [used-before-assignment]
""" TestClass isn't defined yet. """
return arg
class TestClass(Ancestor): # [used-before-assignment]
""" contains another class, which uses an undefined ancestor. """
class MissingAncestor(Ancestor1): # [used-before-assignment]
""" no op """
def test1(self):
""" It should trigger here, because the two classes
have the same scope.
"""
class UsingBeforeDefinition(Empty): # [used-before-assignment]
""" uses Empty before definition """
class Empty(object):
""" no op """
return UsingBeforeDefinition
def test(self):
""" Ancestor isn't defined yet, but we don't care. """
class MissingAncestor1(Ancestor):
""" no op """
return MissingAncestor1
class Self(object):
""" Detect when using the same name inside the class scope. """
obj = Self # [undefined-variable]
class Self1(object):
""" No error should be raised here. """
def test(self):
""" empty """
return Self1
class Ancestor(object):
""" No op """
class Ancestor1(object):
""" No op """
|
jendap/tensorflow
|
refs/heads/master
|
tensorflow/python/training/training_ops_test.py
|
21
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.learning.training_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.framework.test_util import TensorFlowTestCase
# Import resource_variable_ops for the variables-to-tensor implicit conversion.
from tensorflow.python.ops import resource_variable_ops # pylint: disable=unused-import
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.training import training_ops
class TrainingOpsTest(TensorFlowTestCase):
def _toType(self, dtype):
if dtype == np.float16:
return dtypes.float16
elif dtype == np.float32:
return dtypes.float32
elif dtype == np.float64:
return dtypes.float64
elif dtype == np.int32:
return dtypes.int32
elif dtype == np.int64:
return dtypes.int64
else:
assert False, (dtype)
def _testTypes(self, x, alpha, delta, use_gpu=None):
self.setUp()
with self.session(use_gpu=use_gpu):
var = variables.VariableV1(x)
self.evaluate(variables.global_variables_initializer())
self.assertAllCloseAccordingToType(x, self.evaluate(var))
apply_sgd = training_ops.apply_gradient_descent(var, alpha, delta)
out = self.evaluate(apply_sgd)
self.assertShapeEqual(out, apply_sgd)
self.assertAllCloseAccordingToType(x - alpha * delta, out)
@test_util.run_v1_only("b/120545219")
def testApplyGradientDescent(self):
for (dtype, use_gpu) in itertools.product(
[np.float16, np.float32, np.float64], [False, True]):
x = np.arange(100).astype(dtype)
alpha = np.array(2.0).astype(dtype)
delta = np.arange(100).astype(dtype)
self._testTypes(x, alpha, delta, use_gpu)
def _testTypesForAdagrad(self, x, y, lr, grad, use_gpu=None):
self.setUp()
with self.session(use_gpu=use_gpu):
var = variables.VariableV1(x)
accum = variables.VariableV1(y)
self.evaluate(variables.global_variables_initializer())
self.assertAllCloseAccordingToType(x, self.evaluate(var))
apply_adagrad = training_ops.apply_adagrad(var, accum, lr, grad)
out = self.evaluate(apply_adagrad)
self.assertShapeEqual(out, apply_adagrad)
self.assertAllCloseAccordingToType(x - lr * grad * (y + grad * grad)**
(-0.5), out)
self.assertAllCloseAccordingToType(y + grad * grad, self.evaluate(accum))
def _testTypesForFtrl(self,
x,
y,
z,
lr,
grad,
use_gpu=None,
l1=0.0,
l2=0.0,
lr_power=-0.5):
self.setUp()
with self.session(use_gpu=use_gpu):
var = variables.VariableV1(x)
accum = variables.VariableV1(y)
linear = variables.VariableV1(z)
self.evaluate(variables.global_variables_initializer())
self.assertAllCloseAccordingToType(x, self.evaluate(var))
apply_ftrl = training_ops.apply_ftrl(var, accum, linear, grad, lr, l1, l2,
lr_power)
out = self.evaluate(apply_ftrl)
self.assertShapeEqual(out, apply_ftrl)
accum_update = y + grad * grad
linear_update = z + grad - (accum_update**(-lr_power) - y**
(-lr_power)) / lr * x
quadratic = 1.0 / (accum_update**(lr_power) * lr) + 2 * l2
expected_out = np.array([(
np.sign(linear_update[i]) * l1 - linear_update[i]) / (quadratic[i]) if
np.abs(linear_update[i]) > l1 else 0.0
for i in range(linear_update.size)])
self.assertAllCloseAccordingToType(accum_update, self.evaluate(accum))
if x.dtype == np.float16:
# The calculations here really are not very precise in float16.
self.assertAllClose(
linear_update, self.evaluate(linear), rtol=2e-2, atol=2e-2)
self.assertAllClose(expected_out, out, rtol=2e-2, atol=2e-2)
elif x.dtype == np.float32:
# The calculations here not sufficiently precise in float32.
self.assertAllClose(
linear_update, self.evaluate(linear), rtol=1e-5, atol=1e-5)
self.assertAllClose(expected_out, out, rtol=1e-5, atol=1e-5)
else:
self.assertAllClose(linear_update, self.evaluate(linear))
self.assertAllClose(expected_out, out)
@test_util.run_v1_only("b/120545219")
def testApplyAdagrad(self):
for (dtype, use_gpu) in itertools.product(
[np.float16, np.float32, np.float64], [False, True]):
x = np.arange(100).astype(dtype)
y = np.arange(1, 101).astype(dtype)
lr = np.array(2.0).astype(dtype)
grad = np.arange(100).astype(dtype)
self._testTypesForAdagrad(x, y, lr, grad, use_gpu)
@test_util.run_v1_only("b/120545219")
def testApplyFtrl(self):
for dtype in [np.float16, np.float32, np.float64]:
x = np.arange(100).astype(dtype)
y = np.arange(1, 101).astype(dtype)
z = np.arange(102, 202).astype(dtype)
lr = np.array(2.0).astype(dtype)
l1 = np.array(3.0).astype(dtype)
l2 = np.array(4.0).astype(dtype)
grad = np.arange(100).astype(dtype)
self._testTypesForFtrl(x, y, z, lr, grad, use_gpu=False, l1=l1, l2=l2)
def _testTypesForSparseAdagrad(self, x, y, lr, grad, indices):
self.setUp()
with self.session(use_gpu=False):
var = variables.VariableV1(x)
accum = variables.VariableV1(y)
self.evaluate(variables.global_variables_initializer())
self.assertAllCloseAccordingToType(x, self.evaluate(var))
sparse_apply_adagrad = training_ops.sparse_apply_adagrad(
var, accum, lr, grad,
constant_op.constant(indices, self._toType(indices.dtype)))
out = self.evaluate(sparse_apply_adagrad)
self.assertShapeEqual(out, sparse_apply_adagrad)
for (i, index) in enumerate(indices):
self.assertAllCloseAccordingToType(
x[index] - lr * grad[i] * (y[index] + grad[i] * grad[i])**(-0.5),
self.evaluate(var)[index])
self.assertAllCloseAccordingToType(y[index] + grad[i] * grad[i],
self.evaluate(accum)[index])
def _testTypesForSparseFtrl(self,
x,
y,
z,
lr,
grad,
indices,
l1=0.0,
l2=0.0,
lr_power=-0.5):
self.setUp()
with self.session(use_gpu=False):
var = variables.VariableV1(x)
accum = variables.VariableV1(y)
linear = variables.VariableV1(z)
self.evaluate(variables.global_variables_initializer())
self.assertAllCloseAccordingToType(x, self.evaluate(var))
sparse_apply_ftrl = training_ops.sparse_apply_ftrl(
var,
accum,
linear,
grad,
constant_op.constant(indices, self._toType(indices.dtype)),
lr,
l1,
l2,
lr_power=lr_power)
out = self.evaluate(sparse_apply_ftrl)
self.assertShapeEqual(out, sparse_apply_ftrl)
for (i, index) in enumerate(indices):
self.assertAllCloseAccordingToType(
x[index] - lr * grad[i] * (y[index] + grad[i] * grad[i])**
(lr_power),
self.evaluate(var)[index])
self.assertAllCloseAccordingToType(y[index] + grad[i] * grad[i],
self.evaluate(accum)[index])
@test_util.run_v1_only("b/120545219")
def testSparseApplyAdagrad(self):
for (dtype, index_type) in itertools.product(
[np.float16, np.float32, np.float64], [np.int32, np.int64]):
x_val = [np.arange(10), np.arange(10, 20), np.arange(20, 30)]
y_val = [np.arange(1, 11), np.arange(11, 21), np.arange(21, 31)]
x = np.array(x_val).astype(dtype)
y = np.array(y_val).astype(dtype)
lr = np.array(2.0).astype(dtype)
grad_val = [np.arange(10), np.arange(10)]
grad = np.array(grad_val).astype(dtype)
indices = np.array([0, 2]).astype(index_type)
self._testTypesForSparseAdagrad(x, y, lr, grad, indices)
@test_util.run_v1_only("b/120545219")
def testSparseApplyAdagradDim1(self):
for (dtype, index_type) in itertools.product(
[np.float16, np.float32, np.float64], [np.int32, np.int64]):
x_val = [[1.0], [2.0], [3.0]]
y_val = [[4.0], [5.0], [6.0]]
x = np.array(x_val).astype(dtype)
y = np.array(y_val).astype(dtype)
lr = np.array(2.0).astype(dtype)
grad_val = [[1.5], [2.5]]
grad = np.array(grad_val).astype(dtype)
indices = np.array([0, 2]).astype(index_type)
self._testTypesForSparseAdagrad(x, y, lr, grad, indices)
@test_util.run_v1_only("b/120545219")
def testSparseApplyFtrlDim1(self):
for (dtype, index_type) in itertools.product(
[np.float16, np.float32, np.float64], [np.int32, np.int64]):
x_val = [[0.0], [0.0], [0.0]]
y_val = [[4.0], [5.0], [6.0]]
z_val = [[0.0], [0.0], [0.0]]
x = np.array(x_val).astype(dtype)
y = np.array(y_val).astype(dtype)
z = np.array(z_val).astype(dtype)
lr = np.array(2.0).astype(dtype)
grad_val = [[1.5], [2.5]]
grad = np.array(grad_val).astype(dtype)
indices = np.array([0, 2]).astype(index_type)
self._testTypesForSparseFtrl(x, y, z, lr, grad, indices)
@test_util.run_v1_only("b/120545219")
def testApplyAdam(self):
for dtype, use_gpu in itertools.product(
[np.float16, np.float32, np.float64], [False, True]):
var = np.arange(100).astype(dtype)
m = np.arange(1, 101).astype(dtype)
v = np.arange(101, 201).astype(dtype)
grad = np.arange(100).astype(dtype)
self._testTypesForAdam(var, m, v, grad, use_gpu)
def _testTypesForAdam(self, var, m, v, grad, use_gpu):
self.setUp()
with self.session(use_gpu=use_gpu):
var_t = variables.VariableV1(var)
m_t = variables.VariableV1(m)
v_t = variables.VariableV1(v)
t = 1
beta1 = np.array(0.9, dtype=var.dtype)
beta2 = np.array(0.999, dtype=var.dtype)
beta1_power = beta1**t
beta2_power = beta2**t
lr = np.array(0.001, dtype=var.dtype)
epsilon = np.array(1e-8, dtype=var.dtype)
beta1_t = constant_op.constant(beta1, self._toType(var.dtype), [])
beta2_t = constant_op.constant(beta2, self._toType(var.dtype), [])
beta1_power_t = variables.VariableV1(beta1_power)
beta2_power_t = variables.VariableV1(beta2_power)
lr_t = constant_op.constant(lr, self._toType(var.dtype), [])
epsilon_t = constant_op.constant(epsilon, self._toType(var.dtype), [])
self.evaluate(variables.global_variables_initializer())
self.assertAllCloseAccordingToType(var, self.evaluate(var_t))
new_var, _, _ = self._adamUpdateNumpy(var, grad, t, m, v, lr, beta1,
beta2, epsilon)
apply_adam = training_ops.apply_adam(var_t, m_t, v_t, beta1_power_t,
beta2_power_t, lr_t, beta1_t,
beta2_t, epsilon_t, grad)
out = self.evaluate(apply_adam)
self.assertShapeEqual(out, apply_adam)
self.assertAllCloseAccordingToType(new_var, out)
def _adamUpdateNumpy(self, param, g_t, t, m, v, alpha, beta1, beta2, epsilon):
alpha_t = alpha * np.sqrt(1 - beta2**t) / (1 - beta1**t)
m_t = beta1 * m + (1 - beta1) * g_t
v_t = beta2 * v + (1 - beta2) * g_t * g_t
param_t = param - alpha_t * m_t / (np.sqrt(v_t) + epsilon)
return param_t, m_t, v_t
if __name__ == '__main__':
googletest.main()
|
jrrembert/django
|
refs/heads/master
|
django/contrib/syndication/__init__.py
|
808
|
default_app_config = 'django.contrib.syndication.apps.SyndicationConfig'
|
tralamazza/micropython
|
refs/heads/master
|
tests/cpydiff/types_str_endswith.py
|
30
|
"""
categories: Types,str
description: Start/end indices such as str.endswith(s, start) not implemented
cause: Unknown
workaround: Unknown
"""
print('abc'.endswith('c', 1))
|
ric2b/Vivaldi-browser
|
refs/heads/master
|
chromium/tools/binary_size/generate_official_build_report.py
|
1
|
#!/usr/bin/python
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utilities for generating Supersize HTML Reports for official builds."""
import argparse
import json
import logging
import os
import re
import subprocess
import sys
import urllib2
_SRC_ROOT = os.path.join(os.path.dirname(__file__), os.pardir, os.pardir)
sys.path.append(os.path.join(_SRC_ROOT, 'build', 'android', 'gyp'))
from util import build_utils
_REPORTS_BASE_URL = 'gs://chrome-supersize/official_builds'
_REPORTS_JSON_GS_URL = os.path.join(_REPORTS_BASE_URL, 'reports.json')
_REPORTS_GS_URL = os.path.join(_REPORTS_BASE_URL, 'reports')
_SUPERSIZE_PATH = os.path.join(_SRC_ROOT, 'tools', 'binary_size', 'supersize')
def _FetchReferenceVersion(new_version_str, platform):
all_versions = json.loads(
urllib2.urlopen('http://omahaproxy.appspot.com/history.json').read())
# Filter out versions newer than the last branch point.
new_version_major = new_version_str.split('.')[0]
versions = (e['version'] for e in all_versions if e['os'] == platform)
prev_versions = (e for e in versions if not e.startswith(new_version_major))
return max(prev_versions, key=lambda x: tuple(int(y) for y in x.split('.')))
def _FetchSizeFileForVersion(gs_url, version, gs_size_path, output_path):
gs_path = '{}/{}/{}'.format(gs_url, version, gs_size_path)
cmd = ['gsutil.py', 'cp', gs_path, output_path]
subprocess.check_call(cmd)
def _CreateReports(report_path, diff_report_path, ref_size_path, size_path):
subprocess.check_call(
[_SUPERSIZE_PATH, 'html_report', size_path, report_path])
subprocess.check_call([
_SUPERSIZE_PATH, 'html_report', '--diff-with', ref_size_path, size_path,
diff_report_path
])
def _WriteReportsJson(out):
output = subprocess.check_output(['gsutil.py', 'ls', '-R', _REPORTS_GS_URL])
reports = []
report_re = re.compile(
_REPORTS_GS_URL +
r'/(?P<cpu>\S+)/(?P<apk>\S+)/(?P<path>report_(?P<version>[^_]+)\.ndjson)')
for line in output.splitlines():
m = report_re.search(line)
if m:
meta = {
'cpu': m.group('cpu'),
'version': m.group('version'),
'apk': m.group('apk'),
'path': m.group('path')
}
diff_re = re.compile(
r'{}/{}/(?P<path>report_(?P<version>\S+)_{}.ndjson)'.format(
meta['cpu'], meta['apk'], meta['version']))
m = diff_re.search(output)
if not m:
raise Exception('Missing diff report for {}'.format(str(meta)))
meta['diff_path'] = m.group('path')
meta['reference_version'] = m.group('version')
reports.append(meta)
return json.dump({'pushed': reports}, out)
def _UploadReports(reports_json_path, base_url, *ndjson_paths):
for path in ndjson_paths:
dst = os.path.join(base_url, os.path.basename(path))
cmd = ['gsutil.py', 'cp', '-a', 'public-read', path, dst]
logging.warning(' '.join(cmd))
subprocess.check_call(cmd)
with open(reports_json_path, 'w') as f:
_WriteReportsJson(f)
cmd = [
'gsutil.py', 'cp', '-a', 'public-read', reports_json_path,
_REPORTS_JSON_GS_URL
]
logging.warning(' '.join(cmd))
subprocess.check_call(cmd)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--version',
required=True,
help='Official build version to generate report for (ex. "72.0.3626.7").')
parser.add_argument(
'--size-path',
required=True,
help='Path to .size file for the given version.')
parser.add_argument(
'--gs-size-url',
required=True,
help='Bucket url that contains the .size files.')
parser.add_argument(
'--gs-size-path',
required=True,
help='Path within bucket to a .size file (full path looks like '
'GS_SIZE_URL/VERSION/GS_SIZE_PATH) used to locate reference .size file.')
parser.add_argument(
'--arch', required=True, help='Compiler architecture of build.')
parser.add_argument(
'--platform',
required=True,
help='OS corresponding to those used by omahaproxy.',
choices=['android', 'webview'])
args = parser.parse_args()
with build_utils.TempDir() as tmp_dir:
ref_version = _FetchReferenceVersion(args.version, args.platform)
logging.warning('Found reference version name: %s', ref_version)
ref_size_path = os.path.join(tmp_dir, ref_version) + '.size'
report_path = os.path.join(tmp_dir, 'report_{}.ndjson'.format(args.version))
diff_report_path = os.path.join(
tmp_dir, 'report_{}_{}.ndjson'.format(ref_version, args.version))
reports_json_path = os.path.join(tmp_dir, 'reports.json')
report_basename = os.path.splitext(os.path.basename(args.size_path))[0]
# Maintain name through transition to bundles.
report_basename = report_basename.replace('.minimal.apks', '.apk')
reports_base_url = os.path.join(_REPORTS_GS_URL, args.arch, report_basename)
_FetchSizeFileForVersion(args.gs_size_url, ref_version, args.gs_size_path,
ref_size_path)
_CreateReports(report_path, diff_report_path, ref_size_path, args.size_path)
_UploadReports(reports_json_path, reports_base_url, report_path,
diff_report_path)
if __name__ == '__main__':
main()
|
lukauskas/scipy
|
refs/heads/master
|
scipy/weave/setup.py
|
76
|
#!/usr/bin/env python
from __future__ import absolute_import, print_function
from os.path import join
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('weave',parent_package,top_path)
config.add_data_dir('tests')
config.add_data_dir('scxx')
config.add_data_dir(join('blitz','blitz'))
config.add_data_dir('doc')
config.add_data_dir('examples')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
from .weave_version import weave_version
setup(version=weave_version,
description="Tools for inlining C/C++ in Python",
author="Eric Jones",
author_email="eric@enthought.com",
licence="SciPy License (BSD Style)",
url='http://www.scipy.org',
**configuration(top_path='').todict())
|
EduPepperPDTesting/pepper2013-testing
|
refs/heads/www0
|
lms/djangoapps/courseware/tests/modulestore_config.py
|
5
|
"""
Define test configuration for modulestores.
"""
from xmodule.modulestore.tests.django_utils import xml_store_config, \
mongo_store_config, draft_mongo_store_config,\
mixed_store_config
from django.conf import settings
TEST_DATA_DIR = settings.COMMON_TEST_DATA_ROOT
TEST_DATA_XML_MODULESTORE = xml_store_config(TEST_DATA_DIR)
TEST_DATA_MONGO_MODULESTORE = mongo_store_config(TEST_DATA_DIR)
TEST_DATA_DRAFT_MONGO_MODULESTORE = draft_mongo_store_config(TEST_DATA_DIR)
# Map all XML course fixtures so they are accessible through
# the MixedModuleStore
MAPPINGS = {
'edX/toy/2012_Fall': 'xml',
'edX/toy/TT_2012_Fall': 'xml',
'edX/test_end/2012_Fall': 'xml',
'edX/test_about_blob_end_date/2012_Fall': 'xml',
'edX/graded/2012_Fall': 'xml',
'edX/open_ended/2012_Fall': 'xml',
}
TEST_DATA_MIXED_MODULESTORE = mixed_store_config(TEST_DATA_DIR, MAPPINGS)
|
deliveryhero/dhh-system-engineering
|
refs/heads/master
|
terraform/aws/modules/lambda_kubernetes_deployer/patch_deployment.py
|
1
|
# -*- coding: utf-8 -*-
"""
Kubernetes Deployment Patcher
=============================
Install requirements:
* `python3 -m venv pdep && source pdep/bin/activate`
* `pip install -r requirements.txt`
Required environment variables:
Name | Example
----------------------------| -------
AWS_ECR_REPOSITORY_BASE | 2222222222.dkr.ecr.eu-west-1.amazonaws.com
KUBE_CONFIG_NAME | cluster1.k8s.yourdomain.com
"""
__author__ = "Daniel König"
__copyright__ = "Copyright 2017, Delivery Hero AG"
__version__ = "1.0.1"
__maintainer__ = "Daniel König"
__email__ = "daniel.koenig@deliveryhero.com"
import sys
sys.path.insert(0, "python-packages")
import yaml
import kubernetes
import os
def get_kube_client(kube_config_name):
kube_config_file = 'kube_config'
kubernetes_data = {}
with open(kube_config_file, 'r') as stream:
data = yaml.load(stream)
for item in data['clusters']:
if item['name'] == kube_config_name:
kubernetes_data['certificate-authority-data'] = item['cluster']['certificate-authority-data']
kubernetes_data['server'] = item['cluster']['server']
for item in data['users']:
if item['name'] == kube_config_name:
kubernetes_data['client-certificate-data'] = item['user']['client-certificate-data']
kubernetes_data['client-key-data'] = item['user']['client-key-data']
conf = {'apiVersion': 'v1',
'clusters': [
{'name': 'kube',
'cluster': {'certificate-authority-data': kubernetes_data['certificate-authority-data'],
'server': kubernetes_data['server']}}
],
'users': [
{'name': 'superuser',
'user': {'client-certificate-data': kubernetes_data['client-certificate-data'],
'client-key-data': kubernetes_data['client-key-data']}}
],
'contexts': [
{'context': {'cluster': 'kube',
'user': 'superuser'},
'name': 'ctx'}
],
'current-context': 'ctx'}
client_config = kubernetes.client.ConfigurationObject()
kubernetes.config.kube_config.KubeConfigLoader(
config_dict=conf,
client_configuration=client_config).load_and_set()
return kubernetes.client.ApiClient(config=client_config)
def patch_deployment(client, name, new_image):
api = kubernetes.client.AppsV1beta1Api(api_client=client)
deployments = api.list_deployment_for_all_namespaces(label_selector='name={name}'.format(name=name), timeout_seconds=5)
if not len(deployments.items):
raise Exception('Could not patch deployment (no deployment with name "{name}" found).'.format(name=name))
deployment = deployments.items[0]
deployment.spec.template.spec.containers[0].image = new_image
deployment.spec.strategy = None
api.patch_namespaced_deployment_with_http_info(name, deployment.metadata.namespace, deployment)
def get_deployment_pods(client, name):
api = kubernetes.client.CoreV1Api(api_client=client)
return api.list_pod_for_all_namespaces(label_selector="name={name}".format(name=name), timeout_seconds=5).items
def delete_pods(client, name):
api = kubernetes.client.CoreV1Api(api_client=client)
for pod in get_deployment_pods(client, name, timeout_seconds=5):
response = api.delete_namespaced_pod_with_http_info(pod.metadata.name,
pod.metadata.namespace,
kubernetes.client.V1DeleteOptions())
print(response)
def _env():
env, miss = {}, {}
for var in ('AWS_ECR_REPOSITORY_BASE',
'KUBE_CONFIG_NAME'):
value = os.getenv(var)
(env, miss)[not value][var] = value
if miss:
raise Exception('Missing required environment variables: {}'.format(
', '.join(miss.keys())))
return env
def lambda_handler(event, context=None):
if not event:
raise Exception('No event data')
try:
image_tag = event['detail']['requestParameters']['imageTag']
app_name = event['detail']['requestParameters']['repositoryName']
user_name = event['detail']['userIdentity']['userName']
except KeyError:
raise Exception('Required information missing in event: {0}'.format(event))
except Exception as e:
raise Exception('Error parsing event: {0}'.format(repr(e)))
env = _env()
client = get_kube_client(env['KUBE_CONFIG_NAME'])
new_image = '{repository_base}/{image_name}:{image_tag}'.format(
repository_base=env['AWS_ECR_REPOSITORY_BASE'],
image_name=app_name,
image_tag=image_tag)
patch_deployment(client, app_name, new_image)
message = 'Kubernetes deployment {app_name} updated with new image {image} by {user}'.format(
app_name=app_name, image=new_image, user=user_name)
print(message)
return message
|
modeswitch/barrelfish
|
refs/heads/default
|
tools/harness/machines/eth.py
|
6
|
##########################################################################
# Copyright (c) 2009-2011, ETH Zurich.
# All rights reserved.
#
# This file is distributed under the terms in the attached LICENSE file.
# If you do not find this file, copies can be found by writing to:
# ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
##########################################################################
import sys, os, signal, time, getpass, subprocess, socket, pty
import debug, machines, eth_machinedata
from machines import Machine, MachineLockedError
TFTP_PATH='/home/netos/tftpboot'
TOOLS_PATH='/home/netos/tools/bin'
RACKBOOT=os.path.join(TOOLS_PATH, 'rackboot.sh')
RACKPOWER=os.path.join(TOOLS_PATH, 'rackpower')
class ETHMachine(Machine):
_eth_machines = eth_machinedata.machines
def __init__(self, options):
super(ETHMachine, self).__init__(options)
self.lockprocess = None
self.masterfd = None
def get_bootarch(self):
b = self._eth_machines[self.name]['bootarch']
assert(b in self.get_buildarchs())
return b
def get_machine_name(self):
return self._eth_machines[self.name]['machine_name']
def get_buildarchs(self):
return self._eth_machines[self.name]['buildarchs']
def get_ncores(self):
return self._eth_machines[self.name]['ncores']
def get_cores_per_socket(self):
return self._eth_machines[self.name]['cores_per_socket']
def get_tickrate(self):
return self._eth_machines[self.name]['tickrate']
def get_perfcount_type(self):
return self._eth_machines[self.name]['perfcount_type']
def get_kernel_args(self):
return self._eth_machines[self.name].get('kernel_args')
def get_boot_timeout(self):
return self._eth_machines[self.name].get('boot_timeout')
def get_hostname(self):
return self.get_machine_name() + '.in.barrelfish.org'
def get_ip(self):
return socket.gethostbyname(self.get_hostname())
def get_tftp_dir(self):
user = getpass.getuser()
return os.path.join(TFTP_PATH, user, self.name + "_harness")
def _write_menu_lst(self, data, path):
debug.verbose('writing %s' % path)
debug.debug(data)
f = open(path, 'w')
f.write(data)
f.close()
def _set_menu_lst(self, relpath):
ip_menu_name = os.path.join(TFTP_PATH, "menu.lst." + self.get_ip())
debug.verbose('relinking %s to %s' % (ip_menu_name, relpath))
os.remove(ip_menu_name)
os.symlink(relpath, ip_menu_name)
def set_bootmodules(self, modules):
fullpath = os.path.join(self.get_tftp_dir(), 'menu.lst')
relpath = os.path.relpath(fullpath, TFTP_PATH)
tftppath = '/' + os.path.relpath(self.get_tftp_dir(), TFTP_PATH)
self._write_menu_lst(modules.get_menu_data(tftppath), fullpath)
self._set_menu_lst(relpath)
def lock(self):
"""Use conserver to lock the machine."""
# find out current status of console
debug.verbose('executing "console -i %s" to check state' %
self.get_machine_name())
proc = subprocess.Popen(["console", "-i", self.get_machine_name()],
stdout=subprocess.PIPE)
line = proc.communicate()[0]
assert(proc.returncode == 0)
# check that nobody else has it open for writing
myuser = getpass.getuser()
parts = line.strip().split(':')
conname, child, contype, details, users, state = parts[:6]
if users:
for userinfo in users.split(','):
mode, username, host, port = userinfo.split('@')[:4]
if 'w' in mode and username != myuser:
raise MachineLockedError # Machine is not free
# run a console in the background to 'hold' the lock and read output
debug.verbose('starting "console %s"' % self.get_machine_name())
# run on a PTY to work around terminal mangling code in console
(self.masterfd, slavefd) = pty.openpty()
self.lockprocess = subprocess.Popen(["console", self.get_machine_name()],
close_fds=True,
stdout=slavefd, stdin=slavefd)
os.close(slavefd)
# XXX: open in binary mode with no buffering
# otherwise select.select() may block when there is data in the buffer
self.console_out = os.fdopen(self.masterfd, 'rb', 0)
def unlock(self):
if self.lockprocess is None:
return # noop
debug.verbose('quitting console process (%d)' % self.lockprocess.pid)
# os.kill(self.lockprocess.pid, signal.SIGTERM)
os.write(self.masterfd, "\x05c.")
self.lockprocess.wait()
self.lockprocess = None
self.masterfd = None
def __rackboot(self, args):
debug.checkcmd([RACKBOOT] + args + [self.get_machine_name()])
def setup(self):
self.__rackboot(["-b", "-n"])
def __rackpower(self, arg):
try:
debug.checkcmd([RACKPOWER, arg, self.get_machine_name()])
except subprocess.CalledProcessError:
debug.warning("rackpower %s %s failed" %
(arg, self.get_machine_name()))
def reboot(self):
self.__rackpower('-r')
def shutdown(self):
self.__rackpower('-d')
def get_output(self):
return self.console_out
for n in sorted(ETHMachine._eth_machines.keys()):
class TmpMachine(ETHMachine):
name = n
machines.add_machine(TmpMachine)
|
NERC-CEH/jules-jasmin
|
refs/heads/master
|
majic/joj/tests/functional/test_dap_client.py
|
1
|
"""
# Majic
# Copyright (C) 2014 CEH
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
import urllib
import datetime
from decorator import decorator
from hamcrest import *
from pylons import config
from joj.services.dap_client.dap_client_factory import DapClientFactory
from joj.tests import TestController
@decorator
def skip_if_thredds_down(func, self, *args, **kwargs):
"""
Decorator to skip a method if the THREDDS server is not running
:param func:
:param args:
:param kwargs:
:return:
"""
if self.is_thredds_available:
return func(self, *args, **kwargs)
else:
raise self.skipTest("Couldn't connect to THREDDS Server")
class BaseDapClientTest(TestController):
def __init__(self, *args, **kwargs):
super(BaseDapClientTest, self).__init__(*args, **kwargs)
try:
code = urllib.urlopen(config['thredds.server_url']).getcode()
self.is_thredds_available = (code == 200 or code == 302)
except:
self.is_thredds_available = False
self.dap_client_factory = DapClientFactory()
@classmethod
def setUpClass(cls):
config['run_in_test_mode'] = "false" # Deactivate 'test mode' so we can make real calls to THREDDS
assert_that(config['run_in_test_mode'], is_("false"))
# noinspection PyArgumentList
class TestBaseDapClientOnWatchData(BaseDapClientTest):
@skip_if_thredds_down
def setUp(self):
test_dataset = "/dodsC/model_runs/data/WATCH_2D/driving/PSurf_WFD/PSurf_WFD_190101.nc"
url = config['thredds.server_url'] + test_dataset
self.dap_client = self.dap_client_factory.get_dap_client(url)
def test_GIVEN_dataset_WHEN_get_longname_THEN_longname_returned(self):
longname = self.dap_client.get_longname()
assert_that(longname, is_("Surface pressure"))
def test_GIVEN_dataset_WHEN_get_data_range_THEN_data_range_returned(self):
range = self.dap_client.get_data_range()
assert_that(range, is_([47567.066, 106457.453]))
def test_GIVEN_dataset_WHEN_get_variable_units_THEN_units_correctly_returned(self):
units = self.dap_client.get_variable_units()
assert_that(units, is_("Pa"))
def test_GIVEN_datetime_exactly_matches_a_datapoint_WHEN_get_time_immediately_after_THEN_that_time_returned(self):
time = datetime.datetime(1901, 1, 1)
closest_time = self.dap_client.get_time_immediately_after(time)
assert_that(closest_time, is_(time))
def test_GIVEN_datetime_not_a_datapoint_WHEN_get_time_immediately_after_THEN_next_time_returned(self):
time = datetime.datetime(1901, 1, 1, 13, 14, 15)
expected_time = datetime.datetime(1901, 1, 1, 15, 0, 0)
closest_time = self.dap_client.get_time_immediately_after(time)
assert_that(closest_time, is_(expected_time))
def test_GIVEN_datetime_before_first_datapoint_WHEN_get_time_immediately_after_THEN_first_time_returned(self):
#The first data point in this set is at 1901-01-01 00:00
time = datetime.datetime(1800, 1, 1)
expected_time = datetime.datetime(1901, 1, 1)
closest_time = self.dap_client.get_time_immediately_after(time)
assert_that(closest_time, is_(expected_time))
def test_GIVEN_datetime_after_last_datapoint_WHEN_get_time_immediately_after_THEN_None_returned(self):
#The last data point in this set is at 1901-01-31 12:00
time = datetime.datetime(2000, 1, 1)
closest_time = self.dap_client.get_time_immediately_after(time)
assert_that(closest_time, is_(None))
def test_GIVEN_datetime_exactly_matches_a_datapoint_WHEN_get_time_immediately_before_THEN_that_time_returned(self):
time = datetime.datetime(1901, 1, 5)
closest_time = self.dap_client.get_time_immediately_before(time)
assert_that(closest_time, is_(time))
def test_GIVEN_datetime_not_a_datapoint_WHEN_get_time_immediately_before_THEN_previous_time_returned(self):
time = datetime.datetime(1901, 1, 1, 13, 14, 15)
expected_time = datetime.datetime(1901, 1, 1, 12, 0, 0)
closest_time = self.dap_client.get_time_immediately_before(time)
assert_that(closest_time, is_(expected_time))
def test_GIVEN_datetime_before_first_datapoint_WHEN_get_time_immediately_before_THEN_None_returned(self):
#The first data point in this set is at 1901-01-01 00:00
time = datetime.datetime(1800, 1, 1)
closest_time = self.dap_client.get_time_immediately_before(time)
assert_that(closest_time, is_(None))
def test_GIVEN_datetime_after_last_datapoint_WHEN_get_time_immediately_before_THEN_None_returned(self):
#The last data point in this set is at 1901-01-31 12:00
time = datetime.datetime(2000, 1, 1)
expected_time = datetime.datetime(1901, 1, 31, 21, 0, 0)
closest_time = self.dap_client.get_time_immediately_before(time)
assert_that(closest_time, is_(expected_time))
def test_GIVEN_lat_lon_match_datapoint_exactly_WHEN_get_closest_lat_lon_THEN_that_datapoint_returned(self):
lat, lon = 51.75, -0.25
returned_lat, returned_lon = self.dap_client.get_closest_lat_lon(lat, lon)
assert_that(returned_lat, is_(51.75))
assert_that(returned_lon, is_(-0.25))
def test_GIVEN_lat_lon_not_a_datapoint_WHEN_get_closest_lat_lon_THEN_closest_datapoint_returned(self):
lat, lon = 51.99, -0.31
returned_lat, returned_lon = self.dap_client.get_closest_lat_lon(lat, lon)
assert_that(returned_lat, is_(51.75))
assert_that(returned_lon, is_(-0.25))
def test_GIVEN_lat_lon_outside_of_grid_WHEN_get_closest_lat_lon_THEN_closest_datapoint_in_grid_returned(self):
lat, lon = 90, 360
returned_lat, returned_lon = self.dap_client.get_closest_lat_lon(lat, lon)
assert_that(returned_lat, is_(83.75))
assert_that(returned_lon, is_(179.75))
def test_GIVEN_location_and_time_in_grid_WHEN_get_data_at_THEN_correct_data_returned(self):
lat, lon = 51.75, -0.25
time = datetime.datetime(1901, 1, 1)
lat_index, lon_index = self.dap_client.get_lat_lon_index(lat, lon)
time_index = self.dap_client.get_time_index(time)
data = self.dap_client.get_data_at(lat_index, lon_index, time_index)
assert_that(data, is_(102080.1875))
def test_GIVEN_location_outside_grid_WHEN_get_data_at_THEN_missing_value_returned(self):
lat, lon = 90, 360
time = datetime.datetime(1901, 1, 1)
lat_index, lon_index = self.dap_client.get_lat_lon_index(lat, lon)
time_index = self.dap_client.get_time_index(time)
data = self.dap_client.get_data_at(lat_index, lon_index, time_index)
assert_that(data, is_(-9999.99))
def test_GIVEN_time_outside_range_WHEN_get_data_at_THEN_closest_value_returned(self):
lat, lon = 51.75, -0.25
time = datetime.datetime(1066, 1, 1)
lat_index, lon_index = self.dap_client.get_lat_lon_index(lat, lon)
time_index = self.dap_client.get_time_index(time)
data = self.dap_client.get_data_at(lat_index, lon_index, time_index)
assert_that(data, is_(102080.1875))
def test_GIVEN_already_got_data_at_a_point_WHEN_get_data_at_different_point_THEN_new_data_returned(self):
# Testing that the cache is updated if we have moved lat / lon but not time.
lat, lon = 51.75, -0.25
time = datetime.datetime(1901, 1, 1)
lat_index, lon_index = self.dap_client.get_lat_lon_index(lat, lon)
time_index = self.dap_client.get_time_index(time)
data = self.dap_client.get_data_at(lat_index, lon_index, time_index)
assert_that(data, is_(102080.1875))
lat, lon = 41.75, -0.25
time = datetime.datetime(1901, 1, 1)
lat_index, lon_index = self.dap_client.get_lat_lon_index(lat, lon)
time_index = self.dap_client.get_time_index(time)
data = self.dap_client.get_data_at(lat_index, lon_index, time_index)
assert_that(data, is_(97743.3984375))
def test_GIVEN_nothing_WHEN_get_timestamps_THEN_timestamps_returned(self):
timestamps = self.dap_client.get_timestamps()
assert_that(len(timestamps), is_(248))
start_time = datetime.datetime(1901, 1, 1)
end_time = datetime.datetime(1901, 1, 31, 21, 0)
assert_that(timestamps[0], is_(start_time))
assert_that(timestamps[-1], is_(end_time))
# noinspection PyArgumentList
class TestBaseDapClientOnCHESSData(BaseDapClientTest):
@skip_if_thredds_down
def setUp(self):
test_dataset = "/dodsC/model_runs/data/CHESS/driving/chess_dtr_copy.ncml"
url = config['thredds.server_url'] + test_dataset
self.dap_client = self.dap_client_factory.get_dap_client(url)
def test_GIVEN_dataset_WHEN_get_longname_THEN_longname_returned(self):
longname = self.dap_client.get_longname()
assert_that(longname, is_("Daily temperature range"))
def test_GIVEN_dataset_WHEN_get_variable_units_THEN_units_correctly_returned(self):
units = self.dap_client.get_variable_units()
assert_that(units, is_("K"))
def test_GIVEN_datetime_exactly_matches_a_datapoint_WHEN_get_time_immediately_after_THEN_that_time_returned(self):
time = datetime.datetime(1961, 1, 1)
closest_time = self.dap_client.get_time_immediately_after(time)
assert_that(closest_time, is_(time))
def test_GIVEN_datetime_not_a_datapoint_WHEN_get_time_immediately_after_THEN_next_time_returned(self):
time = datetime.datetime(1961, 1, 1, 13, 14, 15)
expected_time = datetime.datetime(1961, 1, 2, 0, 0, 0)
closest_time = self.dap_client.get_time_immediately_after(time)
assert_that(closest_time, is_(expected_time))
def test_GIVEN_datetime_before_first_datapoint_WHEN_get_time_immediately_after_THEN_first_time_returned(self):
#The first data point in this set is at 1901-01-01 00:00
time = datetime.datetime(1800, 1, 1)
expected_time = datetime.datetime(1961, 1, 1)
closest_time = self.dap_client.get_time_immediately_after(time)
assert_that(closest_time, is_(expected_time))
def test_GIVEN_datetime_after_last_datapoint_WHEN_get_time_immediately_after_THEN_None_returned(self):
#The last data point in this set is at 1901-01-31 12:00
time = datetime.datetime(2000, 1, 1)
closest_time = self.dap_client.get_time_immediately_after(time)
assert_that(closest_time, is_(None))
def test_GIVEN_datetime_exactly_matches_a_datapoint_WHEN_get_time_immediately_before_THEN_that_time_returned(self):
time = datetime.datetime(1961, 1, 5)
closest_time = self.dap_client.get_time_immediately_before(time)
assert_that(closest_time, is_(time))
def test_GIVEN_lat_lon_match_datapoint_exactly_WHEN_get_closest_lat_lon_THEN_that_datapoint_returned(self):
lat, lon = 50.273550469933824, -6.211878376550269
returned_lat, returned_lon = self.dap_client.get_closest_lat_lon(lat, lon)
assert_that(returned_lat, is_(50.273550469933824))
assert_that(returned_lon, is_(-6.211878376550269))
def test_GIVEN_lat_lon_not_a_datapoint_WHEN_get_closest_lat_lon_THEN_closest_datapoint_returned(self):
lat, lon = 50.2738, -6.2117
returned_lat, returned_lon = self.dap_client.get_closest_lat_lon(lat, lon)
assert_that(returned_lat, is_(50.273550469933824))
assert_that(returned_lon, is_(-6.211878376550269))
def test_GIVEN_lat_lon_outside_of_grid_WHEN_get_closest_lat_lon_THEN_closest_datapoint_in_grid_returned(self):
lat, lon = 0, -40
returned_lat, returned_lon = self.dap_client.get_closest_lat_lon(lat, lon)
assert_that(returned_lat, is_(49.76680723189604))
assert_that(returned_lon, is_(-7.557159842082696))
def test_GIVEN_location_and_time_in_grid_WHEN_get_data_at_THEN_correct_data_returned(self):
# point at (60, 200)
lat, lon = 50.405754059495266, -4.815923234749663
time = datetime.datetime(1961, 1, 1)
lat_index, lon_index = self.dap_client.get_lat_lon_index(lat, lon)
time_index = self.dap_client.get_time_index(time)
data = self.dap_client.get_data_at(lat_index, lon_index, time_index)
assert_that(data, close_to(5.2, 0.001))
def test_GIVEN_location_outside_grid_WHEN_get_data_at_THEN_missing_value_returned(self):
lat, lon = 90, 360
time = datetime.datetime(1961, 1, 1)
lat_index, lon_index = self.dap_client.get_lat_lon_index(lat, lon)
time_index = self.dap_client.get_time_index(time)
data = self.dap_client.get_data_at(lat_index, lon_index, time_index)
assert_that(data, close_to(-99999.0, 0.001))
def test_GIVEN_time_outside_range_WHEN_get_data_at_THEN_closest_value_returned(self):
lat, lon = 50.405754059495266, -4.815923234749663
time = datetime.datetime(1066, 1, 1)
lat_index, lon_index = self.dap_client.get_lat_lon_index(lat, lon)
time_index = self.dap_client.get_time_index(time)
data = self.dap_client.get_data_at(lat_index, lon_index, time_index)
assert_that(data, close_to(5.2, 0.001))
# noinspection PyArgumentList
class TestGraphingDapClient(BaseDapClientTest):
@skip_if_thredds_down
def setUp(self):
test_dataset = "/dodsC/model_runs/data/WATCH_2D/driving/PSurf_WFD/PSurf_WFD_190101.nc"
url = config['thredds.server_url'] + test_dataset
self.dap_client = self.dap_client_factory.get_graphing_dap_client(url)
def test_GIVEN_data_at_latlon_WHEN_get_graph_data_THEN_correct_data_dictionary_returned(self):
lat, lon = 51.75, -0.25 # 215, 359
time = datetime.datetime(1901, 1, 1)
data = self.dap_client.get_graph_data(lat, lon, time)
assert_that(data['lat'], is_(lat))
assert_that(data['lon'], is_(lon))
assert_that(data['label'], is_("Surface pressure (Pa)"))
assert_that(data['xmin'], is_(-2177452800000.0)) # Milliseconds since the UNIX epoch
assert_that(data['xmax'], is_(-2174785200000.0)) # Milliseconds since the UNIX epoch
assert_that(data['ymin'], is_(98193.515625))
assert_that(data['ymax'], is_(102379.203125))
assert_that(len(data['data']), is_(248))
assert_that(data['data'][0], is_([-2177452800000, 102080.1875]))
assert_that(data['data'][247], is_([-2174785200000, 99755.59375]))
assert_that(data['data'][123], is_([-2176124400000, 99523.140625]))
def test_GIVEN_data_at_latlon_but_not_time_WHEN_get_graph_data_THEN_correct_data_dictionary_returned_as_if_time_is_at_origin(self):
lat, lon = 51.75, -0.25 # 215, 359
time = None
data = self.dap_client.get_graph_data(lat, lon, time)
assert_that(data['lat'], is_(lat))
assert_that(data['lon'], is_(lon))
assert_that(data['label'], is_("Surface pressure (Pa)"))
assert_that(data['xmin'], is_(-2177452800000.0)) # Milliseconds since the UNIX epoch
assert_that(data['xmax'], is_(-2174785200000.0)) # Milliseconds since the UNIX epoch
assert_that(data['ymin'], is_(98193.515625))
assert_that(data['ymax'], is_(102379.203125))
assert_that(len(data['data']), is_(248))
assert_that(data['data'][0], is_([-2177452800000, 102080.1875]))
assert_that(data['data'][247], is_([-2174785200000, 99755.59375]))
assert_that(data['data'][123], is_([-2176124400000, 99523.140625]))
def test_GIVEN_missing_values_at_latlon_WHEN_get_graph_data_THEN_nones_returned(self):
lat, lon = 50, -30 # Sea
time = datetime.datetime(1901, 1, 1)
data = self.dap_client.get_graph_data(lat, lon, time)
assert_that(data['lat'], is_(lat))
assert_that(data['lon'], is_(lon))
assert_that(len(data['data']), is_(248))
for datum in data['data']:
assert_that(datum[1], is_(None))
def test_GIVEN_latlon_outside_grid_WHEN_get_graph_data_THEN_nones_returned(self):
lat, lon = 90, -32.25 # Out of grid (North of greenland)
time = datetime.datetime(1901, 1, 1)
data = self.dap_client.get_graph_data(lat, lon, time)
assert_that(data['lat'], is_(lat))
assert_that(data['lon'], is_(lon))
assert_that(len(data['data']), is_(248))
for datum in data['data']:
assert_that(datum[1], is_(None))
def test_GIVEN_npoints_and_time_WHEN_get_graph_data_THEN_subset_of_points_returned(self):
lat, lon = 51.75, -0.25 # 215, 359
time = datetime.datetime(1901, 01, 15)
data = self.dap_client.get_graph_data(lat, lon, time, npoints=10)
assert_that(len(data['data']), is_(10))
data_at_time = [self.dap_client._get_millis_since_epoch(3888000.0), 99770.203125]
assert_that(data['data'][4], is_(data_at_time))
data_at_start = [self.dap_client._get_millis_since_epoch(3844800.0), 99984.34375]
assert_that(data['data'][0], is_(data_at_start))
data_at_end = [self.dap_client._get_millis_since_epoch(3942000.0), 100408.203125]
assert_that(data['data'][9], is_(data_at_end))
def test_GIVEN_npoints_and_time_at_start_of_data_WHEN_get_graph_data_THEN_subset_of_points_returned(self):
lat, lon = 51.75, -0.25 # 215, 359
time = datetime.datetime(1901, 01, 01)
data = self.dap_client.get_graph_data(lat, lon, time, npoints=10)
assert_that(len(data['data']), is_(6))
data_at_start = [self.dap_client._get_millis_since_epoch(2678400.0), 102080.1875]
assert_that(data['data'][0], is_(data_at_start))
data_at_end = [self.dap_client._get_millis_since_epoch(2732400.0), 101583.6875]
assert_that(data['data'][5], is_(data_at_end))
def test_GIVEN_npoints_and_time_at_end_of_data_WHEN_get_graph_data_THEN_subset_of_points_returned(self):
lat, lon = 51.75, -0.25 # 215, 359
time = datetime.datetime(1901, 01, 31, 21, 0)
data = self.dap_client.get_graph_data(lat, lon, time, npoints=10)
assert_that(len(data['data']), is_(5))
data_at_start = [self.dap_client._get_millis_since_epoch(5302800.0), 99889.390625]
assert_that(data['data'][0], is_(data_at_start))
data_at_end = [self.dap_client._get_millis_since_epoch(5346000.0), 99755.59375]
assert_that(data['data'][4], is_(data_at_end))
# noinspection PyArgumentList
class TestLandCoverDapClient(BaseDapClientTest):
@skip_if_thredds_down
def setUp(self):
test_dataset = "/dodsC/model_runs/data/WATCH_2D/ancils/frac_igbp_watch_0p5deg_capUM6.6_2D.nc"
url = config['thredds.server_url'] + test_dataset
self.dap_client = self.dap_client_factory.get_land_cover_dap_client(url, key='frac')
def test_GIVEN_location_WHEN_get_fractional_cover_THEN_fractional_cover_returned(self):
lat, lon = 51.75, -0.25 # 215, 359
expected_cover = [0.010219395160675049, 0.0, 0.4611019790172577, 0.034402914345264435,
0.0, 0.36932751536369324, 0.0, 0.12494818866252899, 0.0]
returned_cover = self.dap_client.get_fractional_cover(lat, lon)
assert_that(returned_cover, is_(expected_cover))
def test_GIVEN_location_outside_range_WHEN_get_fractional_cover_THEN_missing_values_returned(self):
lat, lon = 90, 360 # 215, 359
expected_cover = 9 * [-9999.99]
returned_cover = self.dap_client.get_fractional_cover(lat, lon)
assert_that(returned_cover, is_(expected_cover))
# noinspection PyArgumentList
class TestSoilPropsDapClient(BaseDapClientTest):
@skip_if_thredds_down
def setUp(self):
test_dataset = "/dodsC/model_runs/data/WATCH_2D/ancils/soil_igbp_bc_watch_0p5deg_capUM6.6_2D.nc"
url = config['thredds.server_url'] + test_dataset
self.dap_client = self.dap_client_factory.get_soil_properties_dap_client(url)
def test_GIVEN_location_WHEN_get_soil_properties_THEN_soil_properties_returned(self):
lat, lon = 51.75, -0.25 # 215, 359
expected_soil_props = {
'albsoil': 0.1389661282300949,
'bexp': 8.749199867248535,
'csoil': 12.100000381469727,
'hcap': 1105759.25,
'hcon': 0.21882377564907074,
'satcon': 0.0035789860412478447,
'sathh': 0.1827763468027115,
'vcrit': 0.3086773455142975,
'vsat': 0.43060800433158875,
'vwilt': 0.1995505690574646
}
#def get_soil_properties(self, lat, lon, var_names_in_file, use_file_list, const_vals):
var_names = expected_soil_props.keys()
use_file = 10 * [True]
const_vals = 10 * [None]
returned_soil_props = self.dap_client.get_soil_properties(lat, lon, var_names, use_file, const_vals)
assert_that(len(returned_soil_props), is_(10))
assert_that(returned_soil_props, has_entries(expected_soil_props))
|
brianwc/courtlistener
|
refs/heads/master
|
cl/celery.py
|
3
|
from __future__ import absolute_import
import os
from celery import Celery
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'cl.settings')
from django.conf import settings
app = Celery('cl')
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
|
lxneng/incubator-airflow
|
refs/heads/master
|
tests/operators/test_email_operator.py
|
18
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function, unicode_literals
import datetime
import mock
import unittest
from airflow import configuration, DAG
from airflow.operators.email_operator import EmailOperator
from airflow.utils import timezone
DEFAULT_DATE = timezone.datetime(2016, 1, 1)
END_DATE = timezone.datetime(2016, 1, 2)
INTERVAL = datetime.timedelta(hours=12)
FROZEN_NOW = timezone.datetime(2016, 1, 2, 12, 1, 1)
send_email_test = mock.Mock()
class TestEmailOperator(unittest.TestCase):
def setUp(self):
super(TestEmailOperator, self).setUp()
configuration.load_test_config()
self.dag = DAG(
'test_dag',
default_args={
'owner': 'airflow',
'start_date': DEFAULT_DATE},
schedule_interval=INTERVAL)
self.addCleanup(self.dag.clear)
def _run_as_operator(self, **kwargs):
task = EmailOperator(
to='airflow@example.com',
subject='Test Run',
html_content='The quick brown fox jumps over the lazy dog',
task_id='task',
dag=self.dag,
**kwargs)
task.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
def test_execute(self):
configuration.conf.set('email', 'EMAIL_BACKEND',
'tests.operators.test_email_operator.send_email_test')
self._run_as_operator()
send_email_test.assert_called_once()
|
chromium/chromium
|
refs/heads/master
|
third_party/chromevox/third_party/closure-library/closure/bin/build/source.py
|
166
|
# Copyright 2009 The Closure Library Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Scans a source JS file for its provided and required namespaces.
Simple class to scan a JavaScript file and express its dependencies.
"""
__author__ = 'nnaze@google.com'
import re
_BASE_REGEX_STRING = '^\s*goog\.%s\(\s*[\'"](.+)[\'"]\s*\)'
_PROVIDE_REGEX = re.compile(_BASE_REGEX_STRING % 'provide')
_REQUIRES_REGEX = re.compile(_BASE_REGEX_STRING % 'require')
class Source(object):
"""Scans a JavaScript source for its provided and required namespaces."""
# Matches a "/* ... */" comment.
# Note: We can't definitively distinguish a "/*" in a string literal without a
# state machine tokenizer. We'll assume that a line starting with whitespace
# and "/*" is a comment.
_COMMENT_REGEX = re.compile(
r"""
^\s* # Start of a new line and whitespace
/\* # Opening "/*"
.*? # Non greedy match of any characters (including newlines)
\*/ # Closing "*/""",
re.MULTILINE | re.DOTALL | re.VERBOSE)
def __init__(self, source):
"""Initialize a source.
Args:
source: str, The JavaScript source.
"""
self.provides = set()
self.requires = set()
self._source = source
self._ScanSource()
def GetSource(self):
"""Get the source as a string."""
return self._source
@classmethod
def _StripComments(cls, source):
return cls._COMMENT_REGEX.sub('', source)
@classmethod
def _HasProvideGoogFlag(cls, source):
"""Determines whether the @provideGoog flag is in a comment."""
for comment_content in cls._COMMENT_REGEX.findall(source):
if '@provideGoog' in comment_content:
return True
return False
def _ScanSource(self):
"""Fill in provides and requires by scanning the source."""
stripped_source = self._StripComments(self.GetSource())
source_lines = stripped_source.splitlines()
for line in source_lines:
match = _PROVIDE_REGEX.match(line)
if match:
self.provides.add(match.group(1))
match = _REQUIRES_REGEX.match(line)
if match:
self.requires.add(match.group(1))
# Closure's base file implicitly provides 'goog'.
# This is indicated with the @provideGoog flag.
if self._HasProvideGoogFlag(self.GetSource()):
if len(self.provides) or len(self.requires):
raise Exception(
'Base file should not provide or require namespaces.')
self.provides.add('goog')
def GetFileContents(path):
"""Get a file's contents as a string.
Args:
path: str, Path to file.
Returns:
str, Contents of file.
Raises:
IOError: An error occurred opening or reading the file.
"""
fileobj = open(path)
try:
return fileobj.read()
finally:
fileobj.close()
|
jwlawson/tensorflow
|
refs/heads/master
|
tensorflow/contrib/learn/python/learn/monitors_test.py
|
40
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Monitors tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import shutil
import tempfile
import time
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib import testing
from tensorflow.contrib.framework.python.framework import checkpoint_utils
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn import estimators
from tensorflow.python.client import session as session_lib
from tensorflow.python.estimator import estimator as core_estimator
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver
from tensorflow.python.training import training_util
class _MyEveryN(learn.monitors.EveryN):
def __init__(self, every_n_steps=100, first_n_steps=1):
super(_MyEveryN, self).__init__(
every_n_steps=every_n_steps, first_n_steps=first_n_steps)
self._steps_begun = []
self._steps_ended = []
self._post_steps = []
@property
def steps_begun(self):
return self._steps_begun
@property
def steps_ended(self):
return self._steps_ended
@property
def post_steps(self):
return self._post_steps
def every_n_step_begin(self, step):
super(_MyEveryN, self).every_n_step_begin(step)
self._steps_begun.append(step)
return []
def every_n_step_end(self, step, outputs):
super(_MyEveryN, self).every_n_step_end(step, outputs)
self._steps_ended.append(step)
return False
def every_n_post_step(self, step, session):
super(_MyEveryN, self).every_n_post_step(step, session)
self._post_steps.append(step)
return False
class MonitorsTest(test.TestCase):
"""Monitors tests."""
def setUp(self):
# Mock out logging calls so we can verify whether correct tensors are being
# monitored.
self._actual_log = logging.info
def mockLog(*args, **kwargs): # pylint: disable=invalid-name
self.logged_message = args
self._actual_log(*args, **kwargs)
logging.info = mockLog
def tearDown(self):
logging.info = self._actual_log
def _run_monitor(self,
monitor,
num_epochs=3,
num_steps_per_epoch=10,
pass_max_steps=True):
if pass_max_steps:
max_steps = num_epochs * num_steps_per_epoch - 1
else:
max_steps = None
monitor.begin(max_steps=max_steps)
for epoch in xrange(num_epochs):
monitor.epoch_begin(epoch)
should_stop = False
step = epoch * num_steps_per_epoch
next_epoch_step = step + num_steps_per_epoch
while (not should_stop) and (step < next_epoch_step):
tensors = monitor.step_begin(step)
output = ops.get_default_session().run(tensors) if tensors else {}
output = dict(
zip([t.name if isinstance(t, ops.Tensor) else t for t in tensors],
output))
should_stop = monitor.step_end(step=step, output=output)
monitor.post_step(step=step, session=None)
step += 1
monitor.epoch_end(epoch)
monitor.end()
def test_base_monitor(self):
with ops.Graph().as_default() as g, self.test_session(g):
self._run_monitor(learn.monitors.BaseMonitor())
def test_every_0(self):
monitor = _MyEveryN(every_n_steps=0, first_n_steps=-1)
with ops.Graph().as_default() as g, self.test_session(g):
self._run_monitor(monitor, num_epochs=3, num_steps_per_epoch=10)
expected_steps = list(range(30))
self.assertAllEqual(expected_steps, monitor.steps_begun)
self.assertAllEqual(expected_steps, monitor.steps_ended)
self.assertAllEqual(expected_steps, monitor.post_steps)
def test_every_1(self):
monitor = _MyEveryN(every_n_steps=1, first_n_steps=-1)
with ops.Graph().as_default() as g, self.test_session(g):
self._run_monitor(monitor, num_epochs=3, num_steps_per_epoch=10)
expected_steps = list(range(1, 30))
self.assertEqual(expected_steps, monitor.steps_begun)
self.assertEqual(expected_steps, monitor.steps_ended)
self.assertEqual(expected_steps, monitor.post_steps)
def test_every_2(self):
monitor = _MyEveryN(every_n_steps=2, first_n_steps=-1)
with ops.Graph().as_default() as g, self.test_session(g):
self._run_monitor(monitor, num_epochs=3, num_steps_per_epoch=10)
expected_steps = list(range(2, 29, 2)) + [29]
self.assertEqual(expected_steps, monitor.steps_begun)
self.assertEqual(expected_steps, monitor.steps_ended)
self.assertEqual(expected_steps, monitor.post_steps)
def test_every_8(self):
monitor = _MyEveryN(every_n_steps=8, first_n_steps=2)
with ops.Graph().as_default() as g, self.test_session(g):
self._run_monitor(monitor, num_epochs=3, num_steps_per_epoch=10)
expected_steps = [0, 1, 2, 10, 18, 26, 29]
self.assertEqual(expected_steps, monitor.steps_begun)
self.assertEqual(expected_steps, monitor.steps_ended)
self.assertEqual(expected_steps, monitor.post_steps)
def test_every_8_no_max_steps(self):
monitor = _MyEveryN(every_n_steps=8, first_n_steps=2)
with ops.Graph().as_default() as g, self.test_session(g):
self._run_monitor(
monitor, num_epochs=3, num_steps_per_epoch=10, pass_max_steps=False)
begin_end_steps = [0, 1, 2, 10, 18, 26]
post_steps = [0, 1, 2, 10, 18, 26, 29]
self.assertEqual(begin_end_steps, monitor.steps_begun)
self.assertEqual(begin_end_steps, monitor.steps_ended)
self.assertEqual(post_steps, monitor.post_steps)
def test_every_8_recovered_after_step_begin(self):
monitor = _MyEveryN(every_n_steps=8)
with ops.Graph().as_default() as g, self.test_session(g):
for step in [8, 16]:
monitor.step_begin(step)
monitor.step_begin(step)
monitor.step_end(step, output=None)
monitor.post_step(step, session=None)
# It should call begin again since, end was not called
self.assertEqual([8, 8, 16, 16], monitor.steps_begun)
self.assertEqual([8, 16], monitor.steps_ended)
self.assertEqual([8, 16], monitor.post_steps)
def test_every_8_recovered_after_step_end(self):
monitor = _MyEveryN(every_n_steps=8)
with ops.Graph().as_default() as g, self.test_session(g):
for step in [8, 16]:
monitor.step_begin(step)
monitor.step_end(step, output=None)
monitor.post_step(step, session=None)
monitor.step_begin(step)
monitor.step_end(step, output=None)
monitor.post_step(step, session=None)
# It should not call begin twice since end was called
self.assertEqual([8, 16], monitor.steps_begun)
self.assertEqual([8, 16], monitor.steps_ended)
self.assertEqual([8, 16], monitor.post_steps)
def test_every_8_call_post_step_at_the_end(self):
monitor = _MyEveryN(every_n_steps=8)
with ops.Graph().as_default() as g, self.test_session(g):
monitor.begin()
for step in [8, 16]:
monitor.step_begin(step)
monitor.step_end(step, output=None)
monitor.post_step(step, session=None)
monitor.step_begin(19)
monitor.step_end(19, output=None)
monitor.post_step(19, session=None)
monitor.end(session=None)
# It should not call begin twice since end was called
self.assertEqual([8, 16], monitor.steps_begun)
self.assertEqual([8, 16], monitor.steps_ended)
self.assertEqual([8, 16, 19], monitor.post_steps)
def test_every_8_call_post_step_should_not_be_called_twice(self):
monitor = _MyEveryN(every_n_steps=8)
with ops.Graph().as_default() as g, self.test_session(g):
monitor.begin()
for step in [8, 16]:
monitor.step_begin(step)
monitor.step_end(step, output=None)
monitor.post_step(step, session=None)
monitor.step_begin(16)
monitor.step_end(16, output=None)
monitor.post_step(16, session=None)
monitor.end(session=None)
# It should not call begin twice since end was called
self.assertEqual([8, 16], monitor.steps_begun)
self.assertEqual([8, 16], monitor.steps_ended)
self.assertEqual([8, 16], monitor.post_steps)
def test_print(self):
with ops.Graph().as_default() as g, self.test_session(g):
t = constant_op.constant(42.0, name='foo')
self._run_monitor(learn.monitors.PrintTensor(tensor_names=[t.name]))
self.assertRegexpMatches(str(self.logged_message), t.name)
def test_logging_trainable(self):
with ops.Graph().as_default() as g, self.test_session(g):
var = variables.Variable(constant_op.constant(42.0), name='foo')
var.initializer.run()
cof = constant_op.constant(1.0)
loss = math_ops.subtract(
math_ops.multiply(var, cof), constant_op.constant(1.0))
train_step = gradient_descent.GradientDescentOptimizer(0.5).minimize(loss)
ops.get_default_session().run(train_step)
self._run_monitor(learn.monitors.LoggingTrainable('foo'))
self.assertRegexpMatches(str(self.logged_message), var.name)
def test_summary_saver(self):
with ops.Graph().as_default() as g, self.test_session(g):
log_dir = 'log/dir'
summary_writer = testing.FakeSummaryWriter(log_dir, g)
var = variables.Variable(0.0)
var.initializer.run()
tensor = state_ops.assign_add(var, 1.0)
summary_op = summary.scalar('my_summary', tensor)
self._run_monitor(
learn.monitors.SummarySaver(
summary_op=summary_op,
save_steps=8,
summary_writer=summary_writer),
num_epochs=3,
num_steps_per_epoch=10)
summary_writer.assert_summaries(
test_case=self,
expected_logdir=log_dir,
expected_graph=g,
expected_summaries={
0: {
'my_summary': 1.0
},
1: {
'my_summary': 2.0
},
9: {
'my_summary': 3.0
},
17: {
'my_summary': 4.0
},
25: {
'my_summary': 5.0
},
29: {
'my_summary': 6.0
},
})
def _assert_validation_monitor(self,
monitor,
expected_early_stopped=False,
expected_best_step=None,
expected_best_value=None,
expected_best_metrics=None):
self.assertEqual(expected_early_stopped, monitor.early_stopped)
self.assertEqual(expected_best_step, monitor.best_step)
self.assertEqual(expected_best_value, monitor.best_value)
self.assertEqual(expected_best_metrics, monitor.best_metrics)
def test_validation_monitor_no_estimator(self):
monitor = learn.monitors.ValidationMonitor(
x=constant_op.constant(2.0), every_n_steps=0)
self._assert_validation_monitor(monitor)
with ops.Graph().as_default() as g, self.test_session(g):
with self.assertRaisesRegexp(ValueError, 'set_estimator'):
self._run_monitor(monitor)
@test.mock.patch.object(estimators, 'Estimator', autospec=True)
@test.mock.patch.object(saver, 'latest_checkpoint')
def test_validation_monitor_no_ckpt(self, mock_latest_checkpoint,
mock_estimator_class):
estimator = mock_estimator_class()
model_dir = 'model/dir'
estimator.model_dir = model_dir
mock_latest_checkpoint.return_value = None
# Do nothing with no checkpoint.
monitor = learn.monitors.ValidationMonitor(
x=constant_op.constant(2.0), every_n_steps=0)
self._assert_validation_monitor(monitor)
monitor.set_estimator(estimator)
with ops.Graph().as_default() as g, self.test_session(g):
self._run_monitor(monitor)
self._assert_validation_monitor(monitor)
mock_latest_checkpoint.assert_called_with(model_dir)
@test.mock.patch.object(estimators, 'Estimator', autospec=True)
@test.mock.patch.object(saver, 'latest_checkpoint')
def test_validation_monitor_no_early_stopping_rounds(self,
mock_latest_checkpoint,
mock_estimator_class):
estimator = mock_estimator_class()
model_dir = 'model/dir'
estimator.model_dir = model_dir
estimator.evaluate.return_value = {}
mock_latest_checkpoint.return_value = '%s/ckpt' % model_dir
# Do nothing with early_stopping_rounds=None.
monitor = learn.monitors.ValidationMonitor(
x=constant_op.constant(2.0), every_n_steps=0)
self._assert_validation_monitor(monitor)
monitor.set_estimator(estimator)
with ops.Graph().as_default() as g, self.test_session(g):
self._run_monitor(monitor)
self._assert_validation_monitor(monitor)
@test.mock.patch.object(estimators, 'Estimator', autospec=True)
@test.mock.patch.object(saver, 'latest_checkpoint')
def test_validation_monitor_invalid_metric(self, mock_latest_checkpoint,
mock_estimator_class):
estimator = mock_estimator_class()
model_dir = 'model/dir'
estimator.model_dir = model_dir
estimator.evaluate.return_value = {}
mock_latest_checkpoint.return_value = '%s/ckpt' % model_dir
# Fail for missing metric.
monitor = learn.monitors.ValidationMonitor(
x=constant_op.constant(2.0), every_n_steps=0, early_stopping_rounds=1)
self._assert_validation_monitor(monitor)
monitor.set_estimator(estimator)
with ops.Graph().as_default() as g, self.test_session(g):
with self.assertRaisesRegexp(ValueError, 'missing from outputs'):
self._run_monitor(monitor, num_epochs=1, num_steps_per_epoch=1)
@test.mock.patch.object(estimators, 'Estimator', autospec=True)
@test.mock.patch.object(saver, 'latest_checkpoint')
def test_validation_monitor(self, mock_latest_checkpoint,
mock_estimator_class):
estimator = mock_estimator_class()
model_dir = 'model/dir'
estimator.model_dir = model_dir
validation_outputs = {'loss': None, 'auc': None}
estimator.evaluate.return_value = validation_outputs
monitor = learn.monitors.ValidationMonitor(
x=constant_op.constant(2.0), every_n_steps=0, early_stopping_rounds=2)
self._assert_validation_monitor(monitor)
monitor.set_estimator(estimator)
with ops.Graph().as_default() as g, self.test_session(g):
monitor.begin(max_steps=100)
monitor.epoch_begin(epoch=0)
self.assertEqual(0, estimator.evaluate.call_count)
# Step 0, initial loss.
step = 0
mock_latest_checkpoint.return_value = '%s/ckpt.%s' % (model_dir, step)
validation_outputs['loss'] = 42.0
validation_outputs['auc'] = 0.5
self.assertEqual(0, len(monitor.step_begin(step=step)))
self.assertFalse(monitor.step_end(step=step, output={}))
self.assertEqual(1, estimator.evaluate.call_count)
self._assert_validation_monitor(
monitor, expected_best_step=0, expected_best_value=42.0,
expected_best_metrics={'loss': 42.0, 'auc': 0.5})
monitor.post_step(step=step, session=None)
# Step 1, same checkpoint, no eval.
step = 1
self.assertEqual(0, len(monitor.step_begin(step=step)))
self.assertFalse(monitor.step_end(step=step, output={}))
self.assertEqual(1, estimator.evaluate.call_count)
self._assert_validation_monitor(
monitor, expected_best_step=0, expected_best_value=42.0,
expected_best_metrics={'loss': 42.0, 'auc': 0.5})
monitor.post_step(step=step, session=None)
# Step 2, lower loss.
step = 2
mock_latest_checkpoint.return_value = '%s/ckpt.%s' % (model_dir, step)
validation_outputs['loss'] = 40.0
validation_outputs['auc'] = 0.6
self.assertEqual(0, len(monitor.step_begin(step=step)))
self.assertFalse(monitor.step_end(step=step, output={}))
self.assertEqual(2, estimator.evaluate.call_count)
self._assert_validation_monitor(
monitor, expected_best_step=2, expected_best_value=40.0,
expected_best_metrics={'loss': 40.0, 'auc': 0.6})
monitor.post_step(step=step, session=None)
# Step 3, higher loss.
step = 3
mock_latest_checkpoint.return_value = '%s/ckpt.%s' % (model_dir, step)
validation_outputs['loss'] = 44.0
validation_outputs['auc'] = 0.7
self.assertEqual(0, len(monitor.step_begin(step=step)))
self.assertFalse(monitor.step_end(step=step, output={}))
self.assertEqual(3, estimator.evaluate.call_count)
self._assert_validation_monitor(
monitor, expected_best_step=2, expected_best_value=40.0,
expected_best_metrics={'loss': 40.0, 'auc': 0.6})
monitor.post_step(step=step, session=None)
# Step 4, higher loss for 2 steps, early stopping.
step = 4
mock_latest_checkpoint.return_value = '%s/ckpt.%s' % (model_dir, step)
validation_outputs['loss'] = 43.0
self.assertEqual(0, len(monitor.step_begin(step=step)))
self.assertTrue(monitor.step_end(step=step, output={}))
self.assertEqual(4, estimator.evaluate.call_count)
self._assert_validation_monitor(
monitor,
expected_early_stopped=True,
expected_best_step=2,
expected_best_value=40.0,
expected_best_metrics={'loss': 40.0, 'auc': 0.6})
monitor.post_step(step=step, session=None)
monitor.epoch_end(epoch=0)
monitor.end()
@test.mock.patch.object(saver, 'latest_checkpoint')
def test_validation_monitor_with_core_estimator(self, mock_latest_checkpoint):
estimator = test.mock.Mock(spec=core_estimator.Estimator)
model_dir = 'model/dir'
estimator.model_dir = model_dir
validation_outputs = {'loss': None, 'auc': None}
estimator.evaluate.return_value = validation_outputs
monitor = learn.monitors.ValidationMonitor(
input_fn=lambda: constant_op.constant(2.0),
every_n_steps=0, early_stopping_rounds=2)
self._assert_validation_monitor(monitor)
monitor.set_estimator(estimator)
with ops.Graph().as_default() as g, self.test_session(g):
monitor.begin(max_steps=100)
monitor.epoch_begin(epoch=0)
self.assertEqual(0, estimator.evaluate.call_count)
# Step 0, initial loss.
step = 0
mock_latest_checkpoint.return_value = '%s/ckpt.%s' % (model_dir, step)
validation_outputs['loss'] = 42.0
validation_outputs['auc'] = 0.5
self.assertEqual(0, len(monitor.step_begin(step=step)))
self.assertFalse(monitor.step_end(step=step, output={}))
self.assertEqual(1, estimator.evaluate.call_count)
self._assert_validation_monitor(
monitor, expected_best_step=0, expected_best_value=42.0,
expected_best_metrics={'loss': 42.0, 'auc': 0.5})
monitor.post_step(step=step, session=None)
@test.mock.patch.object(saver, 'latest_checkpoint')
def test_validation_monitor_fail_with_core_estimator_and_metrics(
self, mock_latest_checkpoint):
estimator = test.mock.Mock(spec=core_estimator.Estimator)
model_dir = 'model/dir'
estimator.model_dir = model_dir
validation_outputs = {'loss': None}
estimator.evaluate.return_value = validation_outputs
monitor = learn.monitors.ValidationMonitor(
input_fn=lambda: constant_op.constant(2.0),
metrics=constant_op.constant(2.0),
every_n_steps=0, early_stopping_rounds=2)
monitor.set_estimator(estimator)
with ops.Graph().as_default() as g, self.test_session(g):
monitor.begin(max_steps=100)
monitor.epoch_begin(epoch=0)
with self.assertRaisesRegexp(
ValueError,
'tf.estimator.Estimator does not support .* metrics'):
step = 0
mock_latest_checkpoint.return_value = '%s/ckpt.%s' % (model_dir, step)
validation_outputs['loss'] = 42.0
self.assertEqual(0, len(monitor.step_begin(step=step)))
self.assertFalse(monitor.step_end(step=step, output={}))
def test_graph_dump(self):
monitor0 = learn.monitors.GraphDump()
monitor1 = learn.monitors.GraphDump()
with ops.Graph().as_default() as g, self.test_session(g):
const_var = variables.Variable(42.0, name='my_const')
counter_var = variables.Variable(0.0, name='my_counter')
assign_add = state_ops.assign_add(counter_var, 1.0, name='my_assign_add')
variables.global_variables_initializer().run()
self._run_monitor(monitor0, num_epochs=3, num_steps_per_epoch=10)
self.assertEqual({
step: {
const_var.name: 42.0,
counter_var.name: step + 1.0,
assign_add.name: step + 1.0,
}
for step in xrange(30)
}, monitor0.data)
self._run_monitor(monitor1, num_epochs=3, num_steps_per_epoch=10)
self.assertEqual({
step: {
const_var.name: 42.0,
counter_var.name: step + 31.0,
assign_add.name: step + 31.0,
}
for step in xrange(30)
}, monitor1.data)
for step in xrange(30):
matched, non_matched = monitor1.compare(monitor0, step=step)
self.assertEqual([const_var.name], matched)
self.assertEqual({
assign_add.name: (step + 31.0, step + 1.0),
counter_var.name: (step + 31.0, step + 1.0),
}, non_matched)
matched, non_matched = monitor0.compare(monitor1, step=step)
self.assertEqual([const_var.name], matched)
self.assertEqual({
assign_add.name: (step + 1.0, step + 31.0),
counter_var.name: (step + 1.0, step + 31.0),
}, non_matched)
def test_capture_variable(self):
monitor = learn.monitors.CaptureVariable(
var_name='my_assign_add:0', every_n=8, first_n=2)
with ops.Graph().as_default() as g, self.test_session(g):
var = variables.Variable(0.0, name='my_var')
var.initializer.run()
state_ops.assign_add(var, 1.0, name='my_assign_add')
self._run_monitor(monitor, num_epochs=3, num_steps_per_epoch=10)
self.assertEqual({
0: 1.0,
1: 2.0,
2: 3.0,
10: 4.0,
18: 5.0,
26: 6.0,
29: 7.0,
}, monitor.values)
class StopAtStepTest(test.TestCase):
def test_raise_in_both_last_step_and_num_steps(self):
with self.assertRaises(ValueError):
learn.monitors.StopAtStep(num_steps=10, last_step=20)
def test_stop_based_on_last_step(self):
m = learn.monitors.StopAtStep(last_step=10)
m.step_begin(5)
self.assertFalse(m.step_end(5, None))
m.step_begin(9)
self.assertFalse(m.step_end(9, None))
m.step_begin(10)
self.assertTrue(m.step_end(10, None))
m.step_begin(11)
self.assertTrue(m.step_end(11, None))
def test_stop_based_on_num_step(self):
m = learn.monitors.StopAtStep(num_steps=10)
m.step_begin(5)
self.assertFalse(m.step_end(5, None))
m.step_begin(13)
self.assertFalse(m.step_end(13, None))
m.step_begin(14)
self.assertTrue(m.step_end(14, None))
m.step_begin(15)
self.assertTrue(m.step_end(15, None))
class CheckpointSaverTest(test.TestCase):
def setUp(self):
self.model_dir = tempfile.mkdtemp()
self.graph = ops.Graph()
with self.graph.as_default():
self.scaffold = monitored_session.Scaffold()
self.global_step = training_util.get_or_create_global_step()
self.train_op = state_ops.assign_add(self.global_step, 1)
def tearDown(self):
shutil.rmtree(self.model_dir, ignore_errors=True)
def _run(self, monitor, step, train_op, sess):
monitor.step_begin(step)
sess.run(train_op)
monitor.post_step(step, sess)
def test_raise_in_both_secs_and_steps(self):
with self.assertRaises(ValueError):
learn.monitors.CheckpointSaver(
self.model_dir, save_secs=10, save_steps=20)
def test_raise_in_none_secs_and_steps(self):
with self.assertRaises(ValueError):
learn.monitors.CheckpointSaver(self.model_dir)
def test_save_secs_saves_in_first_step(self):
with self.graph.as_default():
monitor = learn.monitors.CheckpointSaver(
self.model_dir, save_secs=2, scaffold=self.scaffold)
monitor.begin()
self.scaffold.finalize()
with session_lib.Session() as sess:
sess.run(self.scaffold.init_op)
self._run(monitor, 1, self.train_op, sess)
self.assertEqual(1,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
# TODO(gunan): Reenable this test after b/32446874 is fixed.
def disabled_test_save_secs_saves_periodically(self):
with self.graph.as_default():
monitor = learn.monitors.CheckpointSaver(
self.model_dir, save_secs=2, scaffold=self.scaffold)
monitor.begin()
self.scaffold.finalize()
with session_lib.Session() as sess:
sess.run(self.scaffold.init_op)
self._run(monitor, 1, self.train_op, sess)
self._run(monitor, 2, self.train_op, sess)
# Not saved
self.assertEqual(1,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
time.sleep(2.5)
self._run(monitor, 3, self.train_op, sess)
# saved
self.assertEqual(3,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
self._run(monitor, 4, self.train_op, sess)
self._run(monitor, 5, self.train_op, sess)
# Not saved
self.assertEqual(3,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
time.sleep(2.5)
self._run(monitor, 6, self.train_op, sess)
# saved
self.assertEqual(6,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
def test_save_steps_saves_in_first_step(self):
with self.graph.as_default():
monitor = learn.monitors.CheckpointSaver(
self.model_dir, save_steps=2, scaffold=self.scaffold)
monitor.begin()
self.scaffold.finalize()
with session_lib.Session() as sess:
sess.run(self.scaffold.init_op)
self._run(monitor, 1, self.train_op, sess)
self.assertEqual(1,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
def test_save_steps_saves_periodically(self):
with self.graph.as_default():
monitor = learn.monitors.CheckpointSaver(
self.model_dir, save_steps=2, scaffold=self.scaffold)
monitor.begin()
self.scaffold.finalize()
with session_lib.Session() as sess:
sess.run(self.scaffold.init_op)
self._run(monitor, 1, self.train_op, sess)
self._run(monitor, 2, self.train_op, sess)
# Not saved
self.assertEqual(1,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
self._run(monitor, 3, self.train_op, sess)
# saved
self.assertEqual(3,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
self._run(monitor, 4, self.train_op, sess)
# Not saved
self.assertEqual(3,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
self._run(monitor, 5, self.train_op, sess)
# saved
self.assertEqual(5,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
def test_save_saves_at_end(self):
with self.graph.as_default():
monitor = learn.monitors.CheckpointSaver(
self.model_dir, save_secs=2, scaffold=self.scaffold)
monitor.begin()
self.scaffold.finalize()
with session_lib.Session() as sess:
sess.run(self.scaffold.init_op)
self._run(monitor, 1, self.train_op, sess)
self._run(monitor, 2, self.train_op, sess)
monitor.end(sess)
self.assertEqual(2,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
class FakeMonitor(learn.monitors.BaseMonitor):
def __init__(self):
learn.monitors.BaseMonitor.__init__(self)
self.should_stop = False
self.requested_tensors = []
self.call_counter = collections.Counter()
self.last_begin_step = None
self.last_end_step = None
self.last_post_step = None
def begin(self, max_steps):
self.call_counter['begin'] += 1
def end(self, session):
self.call_counter['end'] += 1
def step_begin(self, step):
self.call_counter['step_begin'] += 1
self.last_begin_step = step
return self.requested_tensors
def step_end(self, step, output):
self.call_counter['step_end'] += 1
self.last_end_step = step
self.output = output
return self.should_stop
def post_step(self, step, session):
self.call_counter['post_step'] += 1
self.last_post_step = step
self.session = session
class RunHookAdapterForMonitorsTest(test.TestCase):
def test_calls_and_steps(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
global_step_tensor = training_util.create_global_step()
inc_5 = state_ops.assign_add(global_step_tensor, 5)
mock_mon = FakeMonitor()
mock_mon2 = FakeMonitor()
hook = learn.monitors.RunHookAdapterForMonitors([mock_mon, mock_mon2])
hook.begin()
for mon in [mock_mon, mock_mon2]:
self.assertEqual(mon.call_counter['begin'], 1)
sess.run(variables.global_variables_initializer())
sess.run(global_step_tensor.assign(10))
mon_sess = monitored_session._HookedSession(sess=sess, hooks=[hook])
mon_sess.run(inc_5)
for mon in [mock_mon, mock_mon2]:
self.assertEqual(mon.output, {})
self.assertEqual(mon.last_begin_step, 11)
self.assertEqual(mon.last_end_step, 11)
self.assertEqual(mon.last_post_step, 11)
self.assertEqual(mon.call_counter['step_end'], 1)
self.assertEqual(mon.call_counter['step_begin'], 1)
self.assertEqual(mon.call_counter['post_step'], 1)
mon_sess.run(inc_5)
for mon in [mock_mon, mock_mon2]:
self.assertEqual(mon.output, {})
self.assertEqual(mon.last_begin_step, 16)
self.assertEqual(mon.last_end_step, 16)
self.assertEqual(mon.last_post_step, 16)
self.assertEqual(mon.call_counter['step_end'], 2)
self.assertEqual(mon.call_counter['step_begin'], 2)
self.assertEqual(mon.call_counter['post_step'], 2)
hook.end(sess)
for mon in [mock_mon, mock_mon2]:
self.assertEqual(mon.call_counter['end'], 1)
def test_requests(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
training_util.create_global_step()
mock_mon = FakeMonitor()
mock_mon2 = FakeMonitor()
hook = learn.monitors.RunHookAdapterForMonitors([mock_mon, mock_mon2])
hook.begin()
mon_sess = monitored_session._HookedSession(sess=sess, hooks=[hook])
a_tensor = constant_op.constant([0], name='a_tensor')
constant_op.constant([5], name='another_tensor')
constant_op.constant([10], name='third_tensor')
mock_mon.requested_tensors = ['another_tensor']
mock_mon2.requested_tensors = ['third_tensor']
sess.run(variables.global_variables_initializer())
output = mon_sess.run(a_tensor)
self.assertEqual(output, [0])
self.assertEqual(mock_mon.output['another_tensor'], [5])
self.assertEqual(mock_mon2.output['third_tensor'], [10])
if __name__ == '__main__':
test.main()
|
tdtrask/ansible
|
refs/heads/devel
|
lib/ansible/plugins/action/ce_config.py
|
89
|
#
# Copyright 2015 Peter Sprygada <psprygada@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import re
import time
import glob
from ansible.plugins.action.ce import ActionModule as _ActionModule
from ansible.module_utils._text import to_text
from ansible.module_utils.six.moves.urllib.parse import urlsplit
from ansible.utils.vars import merge_hash
PRIVATE_KEYS_RE = re.compile('__.+__')
class ActionModule(_ActionModule):
def run(self, tmp=None, task_vars=None):
if self._task.args.get('src'):
try:
self._handle_template()
except ValueError as exc:
return dict(failed=True, msg=exc.message)
result = super(ActionModule, self).run(tmp, task_vars)
if self._task.args.get('backup') and result.get('__backup__'):
# User requested backup and no error occurred in module.
# NOTE: If there is a parameter error, _backup key may not be in results.
filepath = self._write_backup(task_vars['inventory_hostname'],
result['__backup__'])
result['backup_path'] = filepath
# strip out any keys that have two leading and two trailing
# underscore characters
for key in result.keys():
if PRIVATE_KEYS_RE.match(key):
del result[key]
return result
def _get_working_path(self):
cwd = self._loader.get_basedir()
if self._task._role is not None:
cwd = self._task._role._role_path
return cwd
def _write_backup(self, host, contents):
backup_path = self._get_working_path() + '/backup'
if not os.path.exists(backup_path):
os.mkdir(backup_path)
for fn in glob.glob('%s/%s*' % (backup_path, host)):
os.remove(fn)
tstamp = time.strftime("%Y-%m-%d@%H:%M:%S", time.localtime(time.time()))
filename = '%s/%s_config.%s' % (backup_path, host, tstamp)
open(filename, 'w').write(contents)
return filename
def _handle_template(self):
src = self._task.args.get('src')
working_path = self._get_working_path()
if os.path.isabs(src) or urlsplit('src').scheme:
source = src
else:
source = self._loader.path_dwim_relative(working_path, 'templates', src)
if not source:
source = self._loader.path_dwim_relative(working_path, src)
if not os.path.exists(source):
raise ValueError('path specified in src not found')
try:
with open(source, 'r') as f:
template_data = to_text(f.read())
except IOError:
return dict(failed=True, msg='unable to load src file')
# Create a template search path in the following order:
# [working_path, self_role_path, dependent_role_paths, dirname(source)]
searchpath = [working_path]
if self._task._role is not None:
searchpath.append(self._task._role._role_path)
if hasattr(self._task, "_block:"):
dep_chain = self._task._block.get_dep_chain()
if dep_chain is not None:
for role in dep_chain:
searchpath.append(role._role_path)
searchpath.append(os.path.dirname(source))
self._templar.environment.loader.searchpath = searchpath
self._task.args['src'] = self._templar.template(template_data)
|
eric-haibin-lin/mxnet
|
refs/heads/master
|
example/reinforcement-learning/parallel_actor_critic/config.py
|
52
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import mxnet as mx
class Config(object):
def __init__(self, args):
# Default training settings
self.ctx = mx.gpu(0) if args.gpu else mx.cpu()
self.init_func = mx.init.Xavier(rnd_type='uniform', factor_type="in",
magnitude=1)
self.learning_rate = 1e-3
self.update_rule = "adam"
self.grad_clip = True
self.clip_magnitude = 40
# Default model settings
self.hidden_size = 200
self.gamma = 0.99
self.lambda_ = 1.0
self.vf_wt = 0.5 # Weight of value function term in the loss
self.entropy_wt = 0.01 # Weight of entropy term in the loss
self.num_envs = 16
self.t_max = 50
# Override defaults with values from `args`.
for arg in self.__dict__:
if arg in args.__dict__:
self.__setattr__(arg, args.__dict__[arg])
|
YueLinHo/Subversion
|
refs/heads/master
|
tools/dev/mlpatch.py
|
7
|
#!/usr/bin/env python
#
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#
# mlpatch.py: Run with no arguments for usage
import sys, os
import sgmllib
try:
# Python >=3.0
from html.entities import entitydefs
from urllib.request import urlopen as urllib_request_urlopen
except ImportError:
# Python <3.0
from htmlentitydefs import entitydefs
from urllib2 import urlopen as urllib_request_urlopen
import fileinput
CHUNKSIZE = 8 * 1024
class MyParser(sgmllib.SGMLParser):
def __init__(self):
self.baseclass = sgmllib.SGMLParser
self.baseclass.__init__(self)
self.entitydefs = entitydefs
self.entitydefs["nbsp"] = " "
self.inbody = False
self.complete_line = False
self.discard_gathered()
def discard_gathered(self):
self.gather_data = False
self.gathered_data = ""
def noop(self):
pass
def out(self, data):
sys.stdout.write(data)
def handle_starttag(self, tag, method, attrs):
if not self.inbody: return
self.baseclass.handle_starttag(self, tag, method, attrs)
def handle_endtag(self, tag, method):
if not self.inbody: return
self.baseclass.handle_endtag(self, tag, method)
def handle_data(self, data):
if not self.inbody: return
data = data.replace('\n','')
if len(data) == 0: return
if self.gather_data:
self.gathered_data += data
else:
if self.complete_line:
if data[0] in ('+', '-', ' ', '#') \
or data.startswith("Index:") \
or data.startswith("@@ ") \
or data.startswith("======"):
# Real new line
self.out('\n')
else:
# Presume that we are wrapped
self.out(' ')
self.complete_line = False
self.out(data)
def handle_charref(self, ref):
if not self.inbody: return
self.baseclass.handle_charref(self, ref)
def handle_entityref(self, ref):
if not self.inbody: return
self.baseclass.handle_entityref(self, ref)
def handle_comment(self, comment):
if comment == ' body="start" ':
self.inbody = True
elif comment == ' body="end" ':
self.inbody = False
def handle_decl(self, data):
if not self.inbody: return
print("DECL: " + data)
def unknown_starttag(self, tag, attrs):
if not self.inbody: return
print("UNKTAG: %s %s" % (tag, attrs))
def unknown_endtag(self, tag):
if not self.inbody: return
print("UNKTAG: /%s" % (tag))
def do_br(self, attrs):
self.complete_line = True
def do_p(self, attrs):
if self.complete_line:
self.out('\n')
self.out(' ')
self.complete_line = True
def start_a(self, attrs):
self.gather_data = True
def end_a(self):
self.out(self.gathered_data.replace('_at_', '@'))
self.discard_gathered()
def close(self):
if self.complete_line:
self.out('\n')
self.baseclass.close(self)
def main():
if len(sys.argv) == 1:
sys.stderr.write(
"usage: mlpatch.py dev|users year month msgno > foobar.patch\n" +
"example: mlpatch.py dev 2005 01 0001 > issue-XXXX.patch\n" +
"""
Very annoyingly, the http://svn.haxx.se/ subversion mailing list archives
mangle inline patches, and provide no raw message download facility
(other than for an entire month's email as an mbox).
So, I wrote this script, to demangle them. It's not perfect, as it has to
guess about whitespace, but it does an acceptable job.\n""")
sys.exit(0)
elif len(sys.argv) != 5:
sys.stderr.write("error: mlpatch.py: Bad parameters - run with no "
+ "parameters for usage\n")
sys.exit(1)
else:
list, year, month, msgno = sys.argv[1:]
url = "http://svn.haxx.se/" \
+ "%(list)s/archive-%(year)s-%(month)s/%(msgno)s.shtml" % locals()
print("MsgUrl: " + url)
msgfile = urllib_request_urlopen(url)
p = MyParser()
buffer = msgfile.read(CHUNKSIZE)
while buffer:
p.feed(buffer)
buffer = msgfile.read(CHUNKSIZE)
p.close()
msgfile.close()
if __name__ == '__main__':
main()
|
partofthething/home-assistant
|
refs/heads/dev
|
homeassistant/components/flunearyou/__init__.py
|
4
|
"""The flunearyou component."""
import asyncio
from datetime import timedelta
from functools import partial
from pyflunearyou import Client
from pyflunearyou.errors import FluNearYouError
from homeassistant.const import CONF_LATITUDE, CONF_LONGITUDE
from homeassistant.helpers import aiohttp_client, config_validation as cv
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from .const import (
CATEGORY_CDC_REPORT,
CATEGORY_USER_REPORT,
DATA_COORDINATOR,
DOMAIN,
LOGGER,
)
DEFAULT_UPDATE_INTERVAL = timedelta(minutes=30)
CONFIG_SCHEMA = cv.deprecated(DOMAIN)
PLATFORMS = ["sensor"]
async def async_setup(hass, config):
"""Set up the Flu Near You component."""
hass.data[DOMAIN] = {DATA_COORDINATOR: {}}
return True
async def async_setup_entry(hass, config_entry):
"""Set up Flu Near You as config entry."""
hass.data[DOMAIN][DATA_COORDINATOR][config_entry.entry_id] = {}
websession = aiohttp_client.async_get_clientsession(hass)
client = Client(websession)
latitude = config_entry.data.get(CONF_LATITUDE, hass.config.latitude)
longitude = config_entry.data.get(CONF_LONGITUDE, hass.config.longitude)
async def async_update(api_category):
"""Get updated date from the API based on category."""
try:
if api_category == CATEGORY_CDC_REPORT:
return await client.cdc_reports.status_by_coordinates(
latitude, longitude
)
return await client.user_reports.status_by_coordinates(latitude, longitude)
except FluNearYouError as err:
raise UpdateFailed(err) from err
data_init_tasks = []
for api_category in [CATEGORY_CDC_REPORT, CATEGORY_USER_REPORT]:
coordinator = hass.data[DOMAIN][DATA_COORDINATOR][config_entry.entry_id][
api_category
] = DataUpdateCoordinator(
hass,
LOGGER,
name=f"{api_category} ({latitude}, {longitude})",
update_interval=DEFAULT_UPDATE_INTERVAL,
update_method=partial(async_update, api_category),
)
data_init_tasks.append(coordinator.async_refresh())
await asyncio.gather(*data_init_tasks)
for platform in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(config_entry, platform)
)
return True
async def async_unload_entry(hass, config_entry):
"""Unload an Flu Near You config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(config_entry, platform)
for platform in PLATFORMS
]
)
)
if unload_ok:
hass.data[DOMAIN][DATA_COORDINATOR].pop(config_entry.entry_id)
return unload_ok
|
jonesgithub/zulip
|
refs/heads/master
|
zerver/management/commands/email-mirror.py
|
115
|
#!/usr/bin/python
"""
Forward messages sent to the configured email gateway to Zulip.
For zulip.com, messages to that address go to the Inbox of emailgateway@zulip.com.
Zulip voyager configurations will differ.
Messages meant for Zulip have a special recipient form of
<stream name>+<regenerable stream token>@streams.zulip.com
This pattern is configurable via the EMAIL_GATEWAY_PATTERN settings.py
variable.
This script can be used via two mechanisms:
1) Run this in a cronjob every N minutes if you have configured Zulip to poll
an external IMAP mailbox for messages. The script will then connect to
your IMAP server and batch-process all messages.
We extract and validate the target stream from information in the
recipient address and retrieve, forward, and archive the message.
2) Alternatively, configure your MTA to execute this script on message
receipt with the contents of the message piped to standard input. The
script will queue the message for processing. In this mode of invocation,
you should pass the destination email address in the ORIGINAL_RECIPIENT
environment variable.
In Postfix, you can express that via an /etc/aliases entry like this:
|/usr/bin/python /home/zulip/deployments/current/manage.py email-mirror
"""
from __future__ import absolute_import
import email
import os
from email.header import decode_header
import logging
import re
import sys
import posix
from django.conf import settings
from django.core.management.base import BaseCommand
from zerver.lib.actions import decode_email_address
from zerver.lib.notifications import convert_html_to_markdown
from zerver.lib.upload import upload_message_image
from zerver.lib.queue import queue_json_publish
from zerver.models import Stream, get_user_profile_by_email, UserProfile
from zerver.lib.email_mirror import logger, process_message, \
extract_and_validate, ZulipEmailForwardError, \
mark_missed_message_address_as_used, is_missed_message_address
from twisted.internet import protocol, reactor, ssl
from twisted.mail import imap4
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "../../../api"))
import zulip
## Setup ##
log_format = "%(asctime)s: %(message)s"
logging.basicConfig(format=log_format)
formatter = logging.Formatter(log_format)
file_handler = logging.FileHandler(settings.EMAIL_MIRROR_LOG_PATH)
file_handler.setFormatter(formatter)
logger.setLevel(logging.DEBUG)
logger.addHandler(file_handler)
## IMAP callbacks ##
def logout(result, proto):
# Log out.
return proto.logout()
def delete(result, proto):
# Close the connection, which also processes any flags that were
# set on messages.
return proto.close().addCallback(logout, proto)
def fetch(result, proto, mailboxes):
if not result:
return proto.logout()
message_uids = result.keys()
# Make sure we forward the messages in time-order.
message_uids.sort()
for uid in message_uids:
message = email.message_from_string(result[uid]["RFC822"])
process_message(message)
# Delete the processed messages from the Inbox.
message_set = ",".join([result[key]["UID"] for key in message_uids])
d = proto.addFlags(message_set, ["\\Deleted"], uid=True, silent=False)
d.addCallback(delete, proto)
return d
def examine_mailbox(result, proto, mailbox):
# Fetch messages from a particular mailbox.
return proto.fetchMessage("1:*", uid=True).addCallback(fetch, proto, mailbox)
def select_mailbox(result, proto):
# Select which mailbox we care about.
mbox = filter(lambda x: settings.EMAIL_GATEWAY_IMAP_FOLDER in x[2], result)[0][2]
return proto.select(mbox).addCallback(examine_mailbox, proto, result)
def list_mailboxes(res, proto):
# List all of the mailboxes for this account.
return proto.list("","*").addCallback(select_mailbox, proto)
def connected(proto):
d = proto.login(settings.EMAIL_GATEWAY_LOGIN, settings.EMAIL_GATEWAY_PASSWORD)
d.addCallback(list_mailboxes, proto)
d.addErrback(login_failed)
return d
def login_failed(failure):
return failure
def done(_):
reactor.callLater(0, reactor.stop)
def main():
imap_client = protocol.ClientCreator(reactor, imap4.IMAP4Client)
d = imap_client.connectSSL(settings.EMAIL_GATEWAY_IMAP_SERVER, settings.EMAIL_GATEWAY_IMAP_PORT, ssl.ClientContextFactory())
d.addCallbacks(connected, login_failed)
d.addBoth(done)
class Command(BaseCommand):
help = __doc__
def add_arguments(self, parser):
parser.add_argument('recipient', metavar='<recipient>', type=str, nargs='?', default=None,
help="original recipient")
def handle(self, *args, **options):
rcpt_to = os.environ.get("ORIGINAL_RECIPIENT", options['recipient'])
if rcpt_to is not None:
if is_missed_message_address(rcpt_to):
try:
mark_missed_message_address_as_used(rcpt_to)
except ZulipEmailForwardError:
print "5.1.1 Bad destination mailbox address: Bad or expired missed message address."
exit(posix.EX_NOUSER)
else:
try:
extract_and_validate(rcpt_to)
except ZulipEmailForwardError:
print "5.1.1 Bad destination mailbox address: Please use the address specified in your Streams page."
exit(posix.EX_NOUSER)
# Read in the message, at most 25MiB. This is the limit enforced by
# Gmail, which we use here as a decent metric.
message = sys.stdin.read(25*1024*1024)
if len(sys.stdin.read(1)) != 0:
# We're not at EOF, reject large mail.
print "5.3.4 Message too big for system: Max size is 25MiB"
exit(posix.EX_DATAERR)
queue_json_publish(
"email_mirror",
{
"message": message,
"rcpt_to": rcpt_to
},
lambda x: None
)
else:
# We're probably running from cron, try to batch-process mail
if (not settings.EMAIL_GATEWAY_BOT or not settings.EMAIL_GATEWAY_LOGIN or
not settings.EMAIL_GATEWAY_PASSWORD or not settings.EMAIL_GATEWAY_IMAP_SERVER or
not settings.EMAIL_GATEWAY_IMAP_PORT or not settings.EMAIL_GATEWAY_IMAP_FOLDER):
print "Please configure the Email Mirror Gateway in your local_settings.py, or specify $ORIGINAL_RECIPIENT if piping a single mail."
exit(1)
reactor.callLater(0, main)
reactor.run()
|
sportarchive/CloudProcessingEngine-Decider
|
refs/heads/master
|
pydecider/plan.py
|
1
|
from __future__ import (
absolute_import,
division,
print_function
)
import logging
from .step import Step
from .activity import Activity
from .schema import SchemaValidator
_LOGGER = logging.getLogger(__name__)
class Plan(object):
"""Workflow plan.
"""
_DATA_SCHEMA = {
'$schema': 'http://json-schema.org/draft-04/schema#',
'type': 'object',
'properties': {
'name': {
'type': 'string',
},
'version': {
'type': 'string',
},
'default_execution_start_to_close_timeout': {
'type': 'string',
},
'default_task_start_to_close_timeout': {
'type': 'string',
},
'input_spec': {
'oneOf': [
{'type': 'null'},
{'$ref': '#/definitions/input_spec'},
],
},
'activities': {
'type': 'array',
'minItem': 1,
'items': {
'type': 'object'
}
},
'steps': {
'type': 'array',
'minItem': 1,
'items': {
'type': 'object'
}
}
},
'additionalProperties': False,
'definitions': {
'input_spec': {
'$ref': 'http://json-schema.org/draft-04/schema#',
},
},
}
__slots__ = ('name',
'version',
'default_execution_start_to_close_timeout',
'default_task_start_to_close_timeout',
'steps',
'activities',
'_input_validator',
'__weakref__')
def __init__(self, name, version,
default_execution_start_to_close_timeout,
default_task_start_to_close_timeout,
input_spec=None, steps=(), activities=()):
self.name = name
self.version = version
self.default_execution_start_to_close_timeout = default_execution_start_to_close_timeout
self.default_task_start_to_close_timeout = default_task_start_to_close_timeout
self.steps = list(steps)
self.activities = dict(activities)
self._input_validator = SchemaValidator(input_spec=input_spec)
def check_input(self, plan_input):
return self._input_validator.validate(plan_input)
@classmethod
def from_data(cls, plan_data):
"""Define a plan from a dictionary of attributes.
"""
validator = SchemaValidator(cls._DATA_SCHEMA)
validator.validate(plan_data)
activities = {
activity_data['name']: Activity.from_data(activity_data)
for activity_data in plan_data['activities']
}
steps = []
for step_data in plan_data['steps']:
step = Step.from_data(step_data, activities)
steps.append(step)
plan = cls(
name=plan_data['name'],
version=plan_data['version'],
default_execution_start_to_close_timeout=plan_data['default_execution_start_to_close_timeout'],
default_task_start_to_close_timeout=plan_data['default_task_start_to_close_timeout'],
input_spec=plan_data.get('input_spec', None),
steps=steps,
activities=activities,
)
_LOGGER.info('Loaded plan %s(steps:%d activities:%d)',
plan, len(steps), len(activities))
return plan
def __repr__(self):
return 'Plan(name={name})'.format(name=self.name)
|
rockyzhang/zhangyanhit-python-for-android-mips
|
refs/heads/master
|
python3-alpha/python3-src/Lib/test/test_pkgimport.py
|
57
|
import os
import sys
import shutil
import string
import random
import tempfile
import unittest
from imp import cache_from_source
from test.support import run_unittest
class TestImport(unittest.TestCase):
def __init__(self, *args, **kw):
self.package_name = 'PACKAGE_'
while self.package_name in sys.modules:
self.package_name += random.choose(string.ascii_letters)
self.module_name = self.package_name + '.foo'
unittest.TestCase.__init__(self, *args, **kw)
def remove_modules(self):
for module_name in (self.package_name, self.module_name):
if module_name in sys.modules:
del sys.modules[module_name]
def setUp(self):
self.test_dir = tempfile.mkdtemp()
sys.path.append(self.test_dir)
self.package_dir = os.path.join(self.test_dir,
self.package_name)
os.mkdir(self.package_dir)
open(os.path.join(self.package_dir, '__init__.py'), 'w').close()
self.module_path = os.path.join(self.package_dir, 'foo.py')
def tearDown(self):
shutil.rmtree(self.test_dir)
self.assertNotEqual(sys.path.count(self.test_dir), 0)
sys.path.remove(self.test_dir)
self.remove_modules()
def rewrite_file(self, contents):
compiled_path = cache_from_source(self.module_path)
if os.path.exists(compiled_path):
os.remove(compiled_path)
with open(self.module_path, 'w') as f:
f.write(contents)
def test_package_import__semantics(self):
# Generate a couple of broken modules to try importing.
# ...try loading the module when there's a SyntaxError
self.rewrite_file('for')
try: __import__(self.module_name)
except SyntaxError: pass
else: raise RuntimeError('Failed to induce SyntaxError') # self.fail()?
self.assertNotIn(self.module_name, sys.modules)
self.assertFalse(hasattr(sys.modules[self.package_name], 'foo'))
# ...make up a variable name that isn't bound in __builtins__
var = 'a'
while var in dir(__builtins__):
var += random.choose(string.ascii_letters)
# ...make a module that just contains that
self.rewrite_file(var)
try: __import__(self.module_name)
except NameError: pass
else: raise RuntimeError('Failed to induce NameError.')
# ...now change the module so that the NameError doesn't
# happen
self.rewrite_file('%s = 1' % var)
module = __import__(self.module_name).foo
self.assertEqual(getattr(module, var), 1)
def test_main():
run_unittest(TestImport)
if __name__ == "__main__":
test_main()
|
IlyaDjurin/django-shop
|
refs/heads/master
|
orders/admin.py
|
1
|
from django.contrib import admin
from .models import Order, OrderItem
class OrderItemInline(admin.TabularInline):
model = OrderItem
raw_id_field = ['product']
class OrderAdmin(admin.ModelAdmin):
list_display = ['id', 'first_name', 'last_name', 'email', 'address',
'postal_code', 'city', 'paid', 'created',
'updated']
list_filter = ['paid', 'created', 'updated']
inlines = [OrderItemInline]
admin.site.register(Order, OrderAdmin)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.