repo_name
stringlengths
5
100
path
stringlengths
4
294
copies
stringclasses
990 values
size
stringlengths
4
7
content
stringlengths
666
1M
license
stringclasses
15 values
xadahiya/django
django/contrib/gis/gdal/feature.py
439
4153
from django.contrib.gis.gdal.base import GDALBase from django.contrib.gis.gdal.error import GDALException, OGRIndexError from django.contrib.gis.gdal.field import Field from django.contrib.gis.gdal.geometries import OGRGeometry, OGRGeomType from django.contrib.gis.gdal.prototypes import ds as capi, geom as geom_api from django.utils import six from django.utils.encoding import force_bytes, force_text from django.utils.six.moves import range # For more information, see the OGR C API source code: # http://www.gdal.org/ogr/ogr__api_8h.html # # The OGR_F_* routines are relevant here. class Feature(GDALBase): """ This class that wraps an OGR Feature, needs to be instantiated from a Layer object. """ def __init__(self, feat, layer): """ Initializes Feature from a pointer and its Layer object. """ if not feat: raise GDALException('Cannot create OGR Feature, invalid pointer given.') self.ptr = feat self._layer = layer def __del__(self): "Releases a reference to this object." if self._ptr and capi: capi.destroy_feature(self._ptr) def __getitem__(self, index): """ Gets the Field object at the specified index, which may be either an integer or the Field's string label. Note that the Field object is not the field's _value_ -- use the `get` method instead to retrieve the value (e.g. an integer) instead of a Field instance. """ if isinstance(index, six.string_types): i = self.index(index) else: if index < 0 or index > self.num_fields: raise OGRIndexError('index out of range') i = index return Field(self, i) def __iter__(self): "Iterates over each field in the Feature." for i in range(self.num_fields): yield self[i] def __len__(self): "Returns the count of fields in this feature." return self.num_fields def __str__(self): "The string name of the feature." return 'Feature FID %d in Layer<%s>' % (self.fid, self.layer_name) def __eq__(self, other): "Does equivalence testing on the features." return bool(capi.feature_equal(self.ptr, other._ptr)) # #### Feature Properties #### @property def encoding(self): return self._layer._ds.encoding @property def fid(self): "Returns the feature identifier." return capi.get_fid(self.ptr) @property def layer_name(self): "Returns the name of the layer for the feature." name = capi.get_feat_name(self._layer._ldefn) return force_text(name, self.encoding, strings_only=True) @property def num_fields(self): "Returns the number of fields in the Feature." return capi.get_feat_field_count(self.ptr) @property def fields(self): "Returns a list of fields in the Feature." return [capi.get_field_name(capi.get_field_defn(self._layer._ldefn, i)) for i in range(self.num_fields)] @property def geom(self): "Returns the OGR Geometry for this Feature." # Retrieving the geometry pointer for the feature. geom_ptr = capi.get_feat_geom_ref(self.ptr) return OGRGeometry(geom_api.clone_geom(geom_ptr)) @property def geom_type(self): "Returns the OGR Geometry Type for this Feture." return OGRGeomType(capi.get_fd_geom_type(self._layer._ldefn)) # #### Feature Methods #### def get(self, field): """ Returns the value of the field, instead of an instance of the Field object. May take a string of the field name or a Field object as parameters. """ field_name = getattr(field, 'name', field) return self[field_name].value def index(self, field_name): "Returns the index of the given field name." i = capi.get_field_index(self.ptr, force_bytes(field_name)) if i < 0: raise OGRIndexError('invalid OFT field name given: "%s"' % field_name) return i
bsd-3-clause
zinsword/p2pool
SOAPpy/Errors.py
294
3002
""" ################################################################################ # # SOAPpy - Cayce Ullman (cayce@actzero.com) # Brian Matthews (blm@actzero.com) # Gregory Warnes (Gregory.R.Warnes@Pfizer.com) # Christopher Blunck (blunck@gst.com) # ################################################################################ # Copyright (c) 2003, Pfizer # Copyright (c) 2001, Cayce Ullman. # Copyright (c) 2001, Brian Matthews. # # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # Neither the name of actzero, inc. nor the names of its contributors may # be used to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ################################################################################ """ ident = '$Id: Errors.py 921 2005-02-15 16:32:23Z warnes $' from version import __version__ import exceptions ################################################################################ # Exceptions ################################################################################ class Error(exceptions.Exception): def __init__(self, msg): self.msg = msg def __str__(self): return "<Error : %s>" % self.msg __repr__ = __str__ def __call__(self): return (msg,) class RecursionError(Error): pass class UnknownTypeError(Error): pass class HTTPError(Error): # indicates an HTTP protocol error def __init__(self, code, msg): self.code = code self.msg = msg def __str__(self): return "<HTTPError %s %s>" % (self.code, self.msg) __repr__ = __str__ def __call___(self): return (self.code, self.msg, ) class UnderflowError(exceptions.ArithmeticError): pass
gpl-3.0
MalloyPower/parsing-python
front-end/testsuite-python-lib/Python-2.4/Lib/test/test_long.py
10
18747
from test.test_support import verify, verbose, TestFailed, fcmp from string import join from random import random, randint # SHIFT should match the value in longintrepr.h for best testing. SHIFT = 15 BASE = 2 ** SHIFT MASK = BASE - 1 KARATSUBA_CUTOFF = 70 # from longobject.c # Max number of base BASE digits to use in test cases. Doubling # this will more than double the runtime. MAXDIGITS = 15 # build some special values special = map(long, [0, 1, 2, BASE, BASE >> 1]) special.append(0x5555555555555555L) special.append(0xaaaaaaaaaaaaaaaaL) # some solid strings of one bits p2 = 4L # 0 and 1 already added for i in range(2*SHIFT): special.append(p2 - 1) p2 = p2 << 1 del p2 # add complements & negations special = special + map(lambda x: ~x, special) + \ map(lambda x: -x, special) # ------------------------------------------------------------ utilities # Use check instead of assert so the test still does something # under -O. def check(ok, *args): if not ok: raise TestFailed, join(map(str, args), " ") # Get quasi-random long consisting of ndigits digits (in base BASE). # quasi == the most-significant digit will not be 0, and the number # is constructed to contain long strings of 0 and 1 bits. These are # more likely than random bits to provoke digit-boundary errors. # The sign of the number is also random. def getran(ndigits): verify(ndigits > 0) nbits_hi = ndigits * SHIFT nbits_lo = nbits_hi - SHIFT + 1 answer = 0L nbits = 0 r = int(random() * (SHIFT * 2)) | 1 # force 1 bits to start while nbits < nbits_lo: bits = (r >> 1) + 1 bits = min(bits, nbits_hi - nbits) verify(1 <= bits <= SHIFT) nbits = nbits + bits answer = answer << bits if r & 1: answer = answer | ((1 << bits) - 1) r = int(random() * (SHIFT * 2)) verify(nbits_lo <= nbits <= nbits_hi) if random() < 0.5: answer = -answer return answer # Get random long consisting of ndigits random digits (relative to base # BASE). The sign bit is also random. def getran2(ndigits): answer = 0L for i in range(ndigits): answer = (answer << SHIFT) | randint(0, MASK) if random() < 0.5: answer = -answer return answer # --------------------------------------------------------------- divmod def test_division_2(x, y): q, r = divmod(x, y) q2, r2 = x//y, x%y pab, pba = x*y, y*x check(pab == pba, "multiplication does not commute for", x, y) check(q == q2, "divmod returns different quotient than / for", x, y) check(r == r2, "divmod returns different mod than % for", x, y) check(x == q*y + r, "x != q*y + r after divmod on", x, y) if y > 0: check(0 <= r < y, "bad mod from divmod on", x, y) else: check(y < r <= 0, "bad mod from divmod on", x, y) def test_division(maxdigits=MAXDIGITS): if verbose: print "long / * % divmod" digits = range(1, maxdigits+1) + range(KARATSUBA_CUTOFF, KARATSUBA_CUTOFF + 14) digits.append(KARATSUBA_CUTOFF * 3) for lenx in digits: x = getran(lenx) for leny in digits: y = getran(leny) or 1L test_division_2(x, y) # ------------------------------------------------------------ karatsuba def test_karatsuba(): if verbose: print "Karatsuba" digits = range(1, 5) + range(KARATSUBA_CUTOFF, KARATSUBA_CUTOFF + 10) digits.extend([KARATSUBA_CUTOFF * 10, KARATSUBA_CUTOFF * 100]) bits = [digit * SHIFT for digit in digits] # Test products of long strings of 1 bits -- (2**x-1)*(2**y-1) == # 2**(x+y) - 2**x - 2**y + 1, so the proper result is easy to check. for abits in bits: a = (1L << abits) - 1 for bbits in bits: if bbits < abits: continue b = (1L << bbits) - 1 x = a * b y = ((1L << (abits + bbits)) - (1L << abits) - (1L << bbits) + 1) check(x == y, "bad result for", a, "*", b, x, y) # -------------------------------------------------------------- ~ & | ^ def test_bitop_identities_1(x): check(x & 0 == 0, "x & 0 != 0 for", x) check(x | 0 == x, "x | 0 != x for", x) check(x ^ 0 == x, "x ^ 0 != x for", x) check(x & -1 == x, "x & -1 != x for", x) check(x | -1 == -1, "x | -1 != -1 for", x) check(x ^ -1 == ~x, "x ^ -1 != ~x for", x) check(x == ~~x, "x != ~~x for", x) check(x & x == x, "x & x != x for", x) check(x | x == x, "x | x != x for", x) check(x ^ x == 0, "x ^ x != 0 for", x) check(x & ~x == 0, "x & ~x != 0 for", x) check(x | ~x == -1, "x | ~x != -1 for", x) check(x ^ ~x == -1, "x ^ ~x != -1 for", x) check(-x == 1 + ~x == ~(x-1), "not -x == 1 + ~x == ~(x-1) for", x) for n in range(2*SHIFT): p2 = 2L ** n check(x << n >> n == x, "x << n >> n != x for", x, n) check(x // p2 == x >> n, "x // p2 != x >> n for x n p2", x, n, p2) check(x * p2 == x << n, "x * p2 != x << n for x n p2", x, n, p2) check(x & -p2 == x >> n << n == x & ~(p2 - 1), "not x & -p2 == x >> n << n == x & ~(p2 - 1) for x n p2", x, n, p2) def test_bitop_identities_2(x, y): check(x & y == y & x, "x & y != y & x for", x, y) check(x | y == y | x, "x | y != y | x for", x, y) check(x ^ y == y ^ x, "x ^ y != y ^ x for", x, y) check(x ^ y ^ x == y, "x ^ y ^ x != y for", x, y) check(x & y == ~(~x | ~y), "x & y != ~(~x | ~y) for", x, y) check(x | y == ~(~x & ~y), "x | y != ~(~x & ~y) for", x, y) check(x ^ y == (x | y) & ~(x & y), "x ^ y != (x | y) & ~(x & y) for", x, y) check(x ^ y == (x & ~y) | (~x & y), "x ^ y == (x & ~y) | (~x & y) for", x, y) check(x ^ y == (x | y) & (~x | ~y), "x ^ y == (x | y) & (~x | ~y) for", x, y) def test_bitop_identities_3(x, y, z): check((x & y) & z == x & (y & z), "(x & y) & z != x & (y & z) for", x, y, z) check((x | y) | z == x | (y | z), "(x | y) | z != x | (y | z) for", x, y, z) check((x ^ y) ^ z == x ^ (y ^ z), "(x ^ y) ^ z != x ^ (y ^ z) for", x, y, z) check(x & (y | z) == (x & y) | (x & z), "x & (y | z) != (x & y) | (x & z) for", x, y, z) check(x | (y & z) == (x | y) & (x | z), "x | (y & z) != (x | y) & (x | z) for", x, y, z) def test_bitop_identities(maxdigits=MAXDIGITS): if verbose: print "long bit-operation identities" for x in special: test_bitop_identities_1(x) digits = range(1, maxdigits+1) for lenx in digits: x = getran(lenx) test_bitop_identities_1(x) for leny in digits: y = getran(leny) test_bitop_identities_2(x, y) test_bitop_identities_3(x, y, getran((lenx + leny)//2)) # ------------------------------------------------- hex oct repr str atol def slow_format(x, base): if (x, base) == (0, 8): # this is an oddball! return "0L" digits = [] sign = 0 if x < 0: sign, x = 1, -x while x: x, r = divmod(x, base) digits.append(int(r)) digits.reverse() digits = digits or [0] return '-'[:sign] + \ {8: '0', 10: '', 16: '0x'}[base] + \ join(map(lambda i: "0123456789ABCDEF"[i], digits), '') + \ "L" def test_format_1(x): from string import atol for base, mapper in (8, oct), (10, repr), (16, hex): got = mapper(x) expected = slow_format(x, base) check(got == expected, mapper.__name__, "returned", got, "but expected", expected, "for", x) check(atol(got, 0) == x, 'atol("%s", 0) !=' % got, x) # str() has to be checked a little differently since there's no # trailing "L" got = str(x) expected = slow_format(x, 10)[:-1] check(got == expected, mapper.__name__, "returned", got, "but expected", expected, "for", x) def test_format(maxdigits=MAXDIGITS): if verbose: print "long str/hex/oct/atol" for x in special: test_format_1(x) for i in range(10): for lenx in range(1, maxdigits+1): x = getran(lenx) test_format_1(x) # ----------------------------------------------------------------- misc def test_misc(maxdigits=MAXDIGITS): if verbose: print "long miscellaneous operations" import sys # check the extremes in int<->long conversion hugepos = sys.maxint hugeneg = -hugepos - 1 hugepos_aslong = long(hugepos) hugeneg_aslong = long(hugeneg) check(hugepos == hugepos_aslong, "long(sys.maxint) != sys.maxint") check(hugeneg == hugeneg_aslong, "long(-sys.maxint-1) != -sys.maxint-1") # long -> int should not fail for hugepos_aslong or hugeneg_aslong try: check(int(hugepos_aslong) == hugepos, "converting sys.maxint to long and back to int fails") except OverflowError: raise TestFailed, "int(long(sys.maxint)) overflowed!" try: check(int(hugeneg_aslong) == hugeneg, "converting -sys.maxint-1 to long and back to int fails") except OverflowError: raise TestFailed, "int(long(-sys.maxint-1)) overflowed!" # but long -> int should overflow for hugepos+1 and hugeneg-1 x = hugepos_aslong + 1 try: y = int(x) except OverflowError: raise TestFailed, "int(long(sys.maxint) + 1) mustn't overflow" if not isinstance(y, long): raise TestFailed("int(long(sys.maxint) + 1) should have returned long") x = hugeneg_aslong - 1 try: y = int(x) except OverflowError: raise TestFailed, "int(long(-sys.maxint-1) - 1) mustn't overflow" if not isinstance(y, long): raise TestFailed("int(long(-sys.maxint-1) - 1) should have returned long") class long2(long): pass x = long2(1L<<100) y = int(x) if type(y) is not long: raise TestFailed("overflowing int conversion must return long not long subtype") # ----------------------------------- tests of auto int->long conversion def test_auto_overflow(): import math, sys if verbose: print "auto-convert int->long on overflow" special = [0, 1, 2, 3, sys.maxint-1, sys.maxint, sys.maxint+1] sqrt = int(math.sqrt(sys.maxint)) special.extend([sqrt-1, sqrt, sqrt+1]) special.extend([-i for i in special]) def checkit(*args): # Heavy use of nested scopes here! verify(got == expected, "for %r expected %r got %r" % (args, expected, got)) for x in special: longx = long(x) expected = -longx got = -x checkit('-', x) for y in special: longy = long(y) expected = longx + longy got = x + y checkit(x, '+', y) expected = longx - longy got = x - y checkit(x, '-', y) expected = longx * longy got = x * y checkit(x, '*', y) if y: expected = longx / longy got = x / y checkit(x, '/', y) expected = longx // longy got = x // y checkit(x, '//', y) expected = divmod(longx, longy) got = divmod(longx, longy) checkit(x, 'divmod', y) if abs(y) < 5 and not (x == 0 and y < 0): expected = longx ** longy got = x ** y checkit(x, '**', y) for z in special: if z != 0 : if y >= 0: expected = pow(longx, longy, long(z)) got = pow(x, y, z) checkit('pow', x, y, '%', z) else: try: pow(longx, longy, long(z)) except TypeError: pass else: raise TestFailed("pow%r should have raised " "TypeError" % ((longx, longy, long(z)),)) # ---------------------------------------- tests of long->float overflow def test_float_overflow(): import math if verbose: print "long->float overflow" for x in -2.0, -1.0, 0.0, 1.0, 2.0: verify(float(long(x)) == x) shuge = '12345' * 120 huge = 1L << 30000 mhuge = -huge namespace = {'huge': huge, 'mhuge': mhuge, 'shuge': shuge, 'math': math} for test in ["float(huge)", "float(mhuge)", "complex(huge)", "complex(mhuge)", "complex(huge, 1)", "complex(mhuge, 1)", "complex(1, huge)", "complex(1, mhuge)", "1. + huge", "huge + 1.", "1. + mhuge", "mhuge + 1.", "1. - huge", "huge - 1.", "1. - mhuge", "mhuge - 1.", "1. * huge", "huge * 1.", "1. * mhuge", "mhuge * 1.", "1. // huge", "huge // 1.", "1. // mhuge", "mhuge // 1.", "1. / huge", "huge / 1.", "1. / mhuge", "mhuge / 1.", "1. ** huge", "huge ** 1.", "1. ** mhuge", "mhuge ** 1.", "math.sin(huge)", "math.sin(mhuge)", "math.sqrt(huge)", "math.sqrt(mhuge)", # should do better "math.floor(huge)", "math.floor(mhuge)"]: try: eval(test, namespace) except OverflowError: pass else: raise TestFailed("expected OverflowError from %s" % test) # XXX Perhaps float(shuge) can raise OverflowError on some box? # The comparison should not. if float(shuge) == int(shuge): raise TestFailed("float(shuge) should not equal int(shuge)") # ---------------------------------------------- test huge log and log10 def test_logs(): import math if verbose: print "log and log10" LOG10E = math.log10(math.e) for exp in range(10) + [100, 1000, 10000]: value = 10 ** exp log10 = math.log10(value) verify(fcmp(log10, exp) == 0) # log10(value) == exp, so log(value) == log10(value)/log10(e) == # exp/LOG10E expected = exp / LOG10E log = math.log(value) verify(fcmp(log, expected) == 0) for bad in -(1L << 10000), -2L, 0L: try: math.log(bad) raise TestFailed("expected ValueError from log(<= 0)") except ValueError: pass try: math.log10(bad) raise TestFailed("expected ValueError from log10(<= 0)") except ValueError: pass # ----------------------------------------------- test mixed comparisons def test_mixed_compares(): import math import sys if verbose: print "mixed comparisons" # We're mostly concerned with that mixing floats and longs does the # right stuff, even when longs are too large to fit in a float. # The safest way to check the results is to use an entirely different # method, which we do here via a skeletal rational class (which # represents all Python ints, longs and floats exactly). class Rat: def __init__(self, value): if isinstance(value, (int, long)): self.n = value self.d = 1 elif isinstance(value, float): # Convert to exact rational equivalent. f, e = math.frexp(abs(value)) assert f == 0 or 0.5 <= f < 1.0 # |value| = f * 2**e exactly # Suck up CHUNK bits at a time; 28 is enough so that we suck # up all bits in 2 iterations for all known binary double- # precision formats, and small enough to fit in an int. CHUNK = 28 top = 0 # invariant: |value| = (top + f) * 2**e exactly while f: f = math.ldexp(f, CHUNK) digit = int(f) assert digit >> CHUNK == 0 top = (top << CHUNK) | digit f -= digit assert 0.0 <= f < 1.0 e -= CHUNK # Now |value| = top * 2**e exactly. if e >= 0: n = top << e d = 1 else: n = top d = 1 << -e if value < 0: n = -n self.n = n self.d = d assert float(n) / float(d) == value else: raise TypeError("can't deal with %r" % val) def __cmp__(self, other): if not isinstance(other, Rat): other = Rat(other) return cmp(self.n * other.d, self.d * other.n) cases = [0, 0.001, 0.99, 1.0, 1.5, 1e20, 1e200] # 2**48 is an important boundary in the internals. 2**53 is an # important boundary for IEEE double precision. for t in 2.0**48, 2.0**50, 2.0**53: cases.extend([t - 1.0, t - 0.3, t, t + 0.3, t + 1.0, long(t-1), long(t), long(t+1)]) cases.extend([0, 1, 2, sys.maxint, float(sys.maxint)]) # 1L<<20000 should exceed all double formats. long(1e200) is to # check that we get equality with 1e200 above. t = long(1e200) cases.extend([0L, 1L, 2L, 1L << 20000, t-1, t, t+1]) cases.extend([-x for x in cases]) for x in cases: Rx = Rat(x) for y in cases: Ry = Rat(y) Rcmp = cmp(Rx, Ry) xycmp = cmp(x, y) if Rcmp != xycmp: raise TestFailed('%r %r %d %d' % (x, y, Rcmp, xycmp)) if (x == y) != (Rcmp == 0): raise TestFailed('%r == %r %d' % (x, y, Rcmp)) if (x != y) != (Rcmp != 0): raise TestFailed('%r != %r %d' % (x, y, Rcmp)) if (x < y) != (Rcmp < 0): raise TestFailed('%r < %r %d' % (x, y, Rcmp)) if (x <= y) != (Rcmp <= 0): raise TestFailed('%r <= %r %d' % (x, y, Rcmp)) if (x > y) != (Rcmp > 0): raise TestFailed('%r > %r %d' % (x, y, Rcmp)) if (x >= y) != (Rcmp >= 0): raise TestFailed('%r >= %r %d' % (x, y, Rcmp)) # ---------------------------------------------------------------- do it test_division() test_karatsuba() test_bitop_identities() test_format() test_misc() test_auto_overflow() test_float_overflow() test_logs() test_mixed_compares()
mit
lpramuk/robottelo
tests/foreman/cli/test_gpgkey.py
1
40852
# -*- encoding: utf-8 -*- """Test class for GPG Key CLI :Requirement: Gpgkey :CaseAutomation: Automated :CaseLevel: Component :CaseComponent: GPGKeys :TestType: Functional :CaseImportance: High :Upstream: No """ from tempfile import mkstemp from fauxfactory import gen_alphanumeric from fauxfactory import gen_choice from fauxfactory import gen_integer from fauxfactory import gen_string from robottelo import ssh from robottelo.cli.base import CLIReturnCodeError from robottelo.cli.factory import CLIFactoryError from robottelo.cli.factory import make_gpg_key from robottelo.cli.factory import make_org from robottelo.cli.factory import make_product from robottelo.cli.factory import make_repository from robottelo.cli.gpgkey import GPGKey from robottelo.cli.org import Org from robottelo.cli.product import Product from robottelo.cli.repository import Repository from robottelo.constants import DEFAULT_ORG from robottelo.constants import VALID_GPG_KEY_FILE from robottelo.datafactory import invalid_values_list from robottelo.datafactory import valid_data_list from robottelo.decorators import stubbed from robottelo.decorators import tier1 from robottelo.decorators import tier2 from robottelo.decorators import upgrade from robottelo.helpers import get_data_file from robottelo.test import CLITestCase VALID_GPG_KEY_FILE_PATH = get_data_file(VALID_GPG_KEY_FILE) def create_gpg_key_file(content=None): """Creates a fake GPG Key file and returns its path or None if an error happens. """ (_, key_filename) = mkstemp(text=True) if not content: content = gen_alphanumeric(gen_integer(20, 50)) with open(key_filename, "w") as gpg_key_file: gpg_key_file.write(content) return key_filename return None class TestGPGKey(CLITestCase): """Tests for GPG Keys via Hammer CLI""" search_key = 'name' @classmethod def setUpClass(cls): """Create a shared organization for all tests to avoid generating hundreds of organizations """ super(TestGPGKey, cls).setUpClass() cls.org = make_org(cached=True) # Bug verification @tier1 def test_verify_redmine_4272(self): """gpg info should display key content :id: 2c6176ca-34dd-4d52-930d-6e79da6b0c15 :expectedresults: gpg info should display key content :CaseImportance: Critical """ # Setup a new key file content = gen_alphanumeric() gpg_key = create_gpg_key_file(content=content) self.assertIsNotNone(gpg_key, 'GPG Key file must be created') gpg_key = make_gpg_key( {'key': gpg_key, 'name': gen_string('alpha'), 'organization-id': self.org['id']} ) self.assertEqual(gpg_key['content'], content) @tier1 def test_positive_get_info_by_name(self): """Create single gpg key and get its info by name :id: be418cf8-8a90-46db-9e8c-8ff349c98401 :expectedresults: specific information for GPG key matches the creation name :CaseImportance: Critical """ name = gen_string('utf8') gpg_key = make_gpg_key( {'key': VALID_GPG_KEY_FILE_PATH, 'name': name, 'organization-id': self.org['id']} ) gpg_key = GPGKey.info({'name': gpg_key['name'], 'organization-id': self.org['id']}) self.assertEqual(gpg_key['name'], name) # Positive Create @tier1 def test_positive_create_with_default_org(self): """Create gpg key with valid name and valid gpg key via file import using the default created organization :id: c64d4959-e53e-44c0-82da-dc4dd4c89733 :expectedresults: gpg key is created :CaseImportance: Critical """ org = Org.info({'name': DEFAULT_ORG}) for name in valid_data_list(): with self.subTest(name): gpg_key = make_gpg_key( {'key': VALID_GPG_KEY_FILE_PATH, 'name': name, 'organization-id': org['id']} ) # Can we find the new object? result = GPGKey.exists( {'organization-id': org['id']}, (self.search_key, gpg_key[self.search_key]) ) self.assertEqual(gpg_key[self.search_key], result[self.search_key]) @tier1 def test_positive_create_with_custom_org(self): """Create gpg key with valid name and valid gpg key via file import using a new organization :id: f1bcf748-0890-4b54-8f30-2df4924c80b3 :expectedresults: gpg key is created :CaseImportance: Critical """ for name in valid_data_list(): with self.subTest(name): gpg_key = make_gpg_key( { 'key': VALID_GPG_KEY_FILE_PATH, 'name': name, 'organization-id': self.org['id'], } ) # Can we find the new object? result = GPGKey.exists( {'organization-id': self.org['id']}, (self.search_key, gpg_key[self.search_key]), ) self.assertEqual(gpg_key[self.search_key], result[self.search_key]) # Negative Create @tier1 def test_negative_create_with_same_name(self): """Create gpg key with valid name and valid gpg key via file import then try to create new one with same name :id: 3f1423da-bcc1-4320-8b9b-260784eb123c :expectedresults: gpg key is not created :CaseImportance: Critical """ name = gen_string('alphanumeric') gpg_key = make_gpg_key({'name': name, 'organization-id': self.org['id']}) # Can we find the new object? result = GPGKey.exists( {'organization-id': self.org['id']}, (self.search_key, gpg_key[self.search_key]) ) self.assertEqual(gpg_key[self.search_key], result[self.search_key]) # Try to create a gpg key with the same name with self.assertRaises(CLIFactoryError): make_gpg_key({'name': name, 'organization-id': self.org['id']}) @tier1 def test_negative_create_with_no_gpg_key(self): """Create gpg key with valid name and no gpg key :id: 9440a1a0-eb0d-445e-88d3-3139c2b1d17a :expectedresults: gpg key is not created :CaseImportance: Critical """ for name in valid_data_list(): with self.subTest(name): with self.assertRaises(CLIReturnCodeError): GPGKey.create({'name': name, 'organization-id': self.org['id']}) @tier1 def test_negative_create_with_invalid_name(self): """Create gpg key with invalid name and valid gpg key via file import :id: 93160f88-b653-42a9-b44f-9b2ba56f38d9 :expectedresults: gpg key is not created :CaseImportance: Critical """ for name in invalid_values_list(): with self.subTest(name): with self.assertRaises(CLIFactoryError): # factory will provide a valid key make_gpg_key({'name': name, 'organization-id': self.org['id']}) # Positive Delete @tier1 @upgrade def test_positive_delete(self): """Create gpg key with valid name and valid gpg key via file import then delete it :id: 5bf72e5c-767a-4321-8781-a5cea9474421 :expectedresults: gpg key is deleted :CaseImportance: Critical """ for name in valid_data_list(): with self.subTest(name): gpg_key = make_gpg_key({'name': name, 'organization-id': self.org['id']}) result = GPGKey.exists( {'organization-id': self.org['id']}, (self.search_key, gpg_key[self.search_key]), ) self.assertEqual(gpg_key[self.search_key], result[self.search_key]) GPGKey.delete({'name': name, 'organization-id': self.org['id']}) result = GPGKey.exists( {'organization-id': self.org['id']}, (self.search_key, gpg_key[self.search_key]), ) self.assertEqual(len(result), 0) # Positive Update @tier1 def test_positive_update_name(self): """Create gpg key with valid name and valid gpg key via file import then update its name :id: e18d7cd8-2757-4134-9ed9-7eb68f2872e2 :expectedresults: gpg key is updated :CaseImportance: Critical """ gpg_key = make_gpg_key({'organization-id': self.org['id']}) for new_name in valid_data_list(): with self.subTest(new_name): GPGKey.update( { 'name': gpg_key['name'], 'new-name': new_name, 'organization-id': self.org['id'], } ) gpg_key = GPGKey.info({'name': new_name, 'organization-id': self.org['id']}) @tier1 def test_positive_update_key(self): """Create gpg key with valid name and valid gpg key via file import then update its gpg key file :id: 58a8ed14-adfc-4046-af63-59a7008ff4d7 :expectedresults: gpg key is updated :CaseImportance: Critical """ gpg_key = make_gpg_key({'organization-id': self.org['id']}) content = gen_alphanumeric(gen_integer(20, 50)) self.assertNotEqual(gpg_key['content'], content) local_key = create_gpg_key_file(content) self.assertIsNotNone(local_key, 'GPG Key file must be created') key = '/tmp/%s' % gen_alphanumeric() ssh.upload_file(local_file=local_key, remote_file=key) GPGKey.update({'key': key, 'name': gpg_key['name'], 'organization-id': self.org['id']}) gpg_key = GPGKey.info({'name': gpg_key['name'], 'organization-id': self.org['id']}) self.assertEqual(gpg_key['content'], content) # Negative Update @tier1 def test_negative_update_name(self): """Create gpg key with valid name and valid gpg key via file import then fail to update its name :id: 938d2925-c82c-43b6-8dfc-29c42eca7424 :expectedresults: gpg key is not updated :CaseImportance: Critical """ gpg_key = make_gpg_key({'organization-id': self.org['id']}) for new_name in invalid_values_list(): with self.subTest(new_name): with self.assertRaises(CLIReturnCodeError): GPGKey.update( { 'name': gpg_key['name'], 'new-name': new_name, 'organization-id': self.org['id'], } ) # Product association @tier2 def test_positive_add_empty_product(self): """Create gpg key with valid name and valid gpg key via file import then associate it with empty (no repos) custom product :id: b7477c2f-586c-4593-96c0-1fbc532ce8bf :expectedresults: gpg key is associated with product :CaseLevel: Integration """ gpg_key = make_gpg_key({'organization-id': self.org['id']}) product = make_product({'gpg-key-id': gpg_key['id'], 'organization-id': self.org['id']}) self.assertEqual(product['gpg']['gpg-key'], gpg_key['name']) @tier2 def test_positive_add_product_with_repo(self): """Create gpg key with valid name and valid gpg key via file import then associate it with custom product that has one repository :id: 5529a852-9ef6-48f8-b2bc-2bbf463657dd :expectedresults: gpg key is associated with product as well as with the repository :CaseLevel: Integration """ product = make_product({'organization-id': self.org['id']}) repo = make_repository({'product-id': product['id']}) gpg_key = make_gpg_key({'organization-id': self.org['id']}) Product.update( {'gpg-key': gpg_key['name'], 'id': product['id'], 'organization-id': self.org['id']} ) product = Product.info({'id': product['id'], 'organization-id': self.org['id']}) repo = Repository.info({'id': repo['id']}) self.assertEqual(product['gpg']['gpg-key-id'], gpg_key['id']) self.assertEqual(repo['gpg-key']['id'], gpg_key['id']) @tier2 def test_positive_add_product_with_repos(self): """Create gpg key with valid name and valid gpg key via file import then associate it with custom product that has more than one repository :id: b05c5223-44d5-4a48-9d99-18ca351c84a5 :expectedresults: gpg key is associated with product as well as with the repositories :CaseLevel: Integration """ product = make_product({'organization-id': self.org['id']}) repos = [make_repository({'product-id': product['id']}) for _ in range(gen_integer(2, 5))] gpg_key = make_gpg_key({'organization-id': self.org['id']}) Product.update( {'gpg-key': gpg_key['name'], 'id': product['id'], 'organization-id': self.org['id']} ) product = Product.info({'id': product['id'], 'organization-id': self.org['id']}) self.assertEqual(product['gpg']['gpg-key-id'], gpg_key['id']) for repo in repos: repo = Repository.info({'id': repo['id']}) self.assertEqual(repo['gpg-key']['id'], gpg_key['id']) @stubbed() @tier2 def test_positive_add_product_using_repo_discovery(self): """Create gpg key with valid name and valid gpg key via file import then associate it with custom product using Repo discovery method :id: fb12db0f-583f-49f4-9d8f-d19f2d5550ee :expectedresults: gpg key is associated with product but not the repositories :CaseAutomation: notautomated :CaseLevel: Integration """ @tier2 def test_positive_add_repo_from_product_with_repo(self): """Create gpg key with valid name and valid gpg key via file import then associate it to repository from custom product that has one repository :id: 1427f145-9faf-41ef-ae42-dc91d61ce1f6 :expectedresults: gpg key is associated with the repository but not with the product :CaseLevel: Integration """ product = make_product({'organization-id': self.org['id']}) repo = make_repository({'product-id': product['id']}) gpg_key = make_gpg_key({'organization-id': self.org['id']}) Repository.update( {'gpg-key-id': gpg_key['id'], 'id': repo['id'], 'organization-id': self.org['id']} ) product = Product.info({'id': product['id'], 'organization-id': self.org['id']}) repo = Repository.info({'id': repo['id']}) self.assertEqual(repo['gpg-key']['id'], gpg_key['id']) self.assertNotEqual(product['gpg'].get('gpg-key-id'), gpg_key['id']) @tier2 def test_positive_add_repo_from_product_with_repos(self): """Create gpg key via file import and associate with custom repo GPGKey should contain valid name and valid key and should be associated to one repository from custom product. Make sure custom product should have more than one repository. :id: 9796f6f0-e688-4f14-89ec-447feb4e4911 :expectedresults: gpg key is associated with the repository :CaseLevel: Integration """ product = make_product({'organization-id': self.org['id']}) repos = [make_repository({'product-id': product['id']}) for _ in range(gen_integer(2, 5))] gpg_key = make_gpg_key({'organization-id': self.org['id']}) Repository.update( {'gpg-key': gpg_key['name'], 'id': repos[0]['id'], 'organization-id': self.org['id']} ) product = Product.info({'id': product['id'], 'organization-id': self.org['id']}) self.assertNotEqual(product['gpg'].get('gpg-key-id'), gpg_key['id']) # First repo should have a valid gpg key assigned repo = Repository.info({'id': repos.pop(0)['id']}) self.assertEqual(repo['gpg-key']['id'], gpg_key['id']) # The rest of repos should not for repo in repos: repo = Repository.info({'id': repo['id']}) self.assertNotEqual(repo['gpg-key'].get('id'), gpg_key['id']) @stubbed() @tier2 def test_positive_add_repos_using_repo_discovery(self): """Create gpg key with valid name and valid gpg key via file import then associate it to repos from custom product using Repo discovery method :id: 1e91871c-0298-4cd0-b63b-f02d02622259 :expectedresults: gpg key is associated with product and all the repositories :CaseAutomation: notautomated :CaseLevel: Integration """ @tier2 def test_positive_update_key_for_empty_product(self): """Create gpg key with valid name and valid gpg key via file import then associate it with empty (no repos) custom product then update the key :id: c0c84c45-21fc-4940-9d52-00babb807ec7 :expectedresults: gpg key is associated with product before/after update :CaseLevel: Integration """ # Create a product and a gpg key product = make_product({'organization-id': self.org['id']}) gpg_key = make_gpg_key({'organization-id': self.org['id']}) # Associate gpg key with a product Product.update( {'gpg-key': gpg_key['name'], 'id': product['id'], 'organization-id': self.org['id']} ) # Verify gpg key was associated product = Product.info({'id': product['id'], 'organization-id': self.org['id']}) self.assertEqual(product['gpg']['gpg-key'], gpg_key['name']) # Update the gpg key new_name = gen_choice(valid_data_list()) GPGKey.update( {'name': gpg_key['name'], 'new-name': new_name, 'organization-id': self.org['id']} ) # Verify changes are reflected in the gpg key gpg_key = GPGKey.info({'id': gpg_key['id'], 'organization-id': self.org['id']}) self.assertEqual(gpg_key['name'], new_name) # Verify changes are reflected in the product product = Product.info({'id': product['id'], 'organization-id': self.org['id']}) self.assertEqual(product['gpg']['gpg-key'], new_name) @tier2 def test_positive_update_key_for_product_with_repo(self): """Create gpg key with valid name and valid gpg key via file import then associate it with custom product that has one repository then update the key :id: 3fb550a7-507e-4988-beb6-35bdfc2e99a8 :expectedresults: gpg key is associated with product before/after update as well as with the repository :CaseLevel: Integration """ # Create a product and a gpg key product = make_product({'organization-id': self.org['id']}) gpg_key = make_gpg_key({'organization-id': self.org['id']}) # Create a repository and assign it to the product repo = make_repository({'product-id': product['id']}) # Associate gpg key with a product Product.update( {'gpg-key': gpg_key['name'], 'id': product['id'], 'organization-id': self.org['id']} ) # Verify gpg key was associated product = Product.info({'id': product['id'], 'organization-id': self.org['id']}) repo = Repository.info({'id': repo['id']}) self.assertEqual(product['gpg']['gpg-key'], gpg_key['name']) self.assertEqual(repo['gpg-key'].get('name'), gpg_key['name']) # Update the gpg key new_name = gen_choice(valid_data_list()) GPGKey.update( {'name': gpg_key['name'], 'new-name': new_name, 'organization-id': self.org['id']} ) # Verify changes are reflected in the gpg key gpg_key = GPGKey.info({'id': gpg_key['id'], 'organization-id': self.org['id']}) self.assertEqual(gpg_key['name'], new_name) # Verify changes are reflected in the product product = Product.info({'id': product['id'], 'organization-id': self.org['id']}) self.assertEqual(product['gpg']['gpg-key'], new_name) # Verify changes are reflected in the repository repo = Repository.info({'id': repo['id']}) self.assertEqual(repo['gpg-key'].get('id'), gpg_key['id']) @tier2 def test_positive_update_key_for_product_with_repos(self): """Create gpg key with valid name and valid gpg key via file import then associate it with custom product that has more than one repository then update the key :id: a95eb51b-4b6b-4c04-bb4d-cbe600431850 :expectedresults: gpg key is associated with product before/after update as well as with the repositories :CaseLevel: Integration """ # Create a product and a gpg key product = make_product({'organization-id': self.org['id']}) gpg_key = make_gpg_key({'organization-id': self.org['id']}) # Create repositories and assign them to the product repos = [make_repository({'product-id': product['id']}) for _ in range(gen_integer(2, 5))] # Associate gpg key with a product Product.update( {'gpg-key': gpg_key['name'], 'id': product['id'], 'organization-id': self.org['id']} ) # Verify gpg key was associated product = Product.info({'id': product['id'], 'organization-id': self.org['id']}) self.assertEqual(product['gpg']['gpg-key'], gpg_key['name']) for repo in repos: repo = Repository.info({'id': repo['id']}) self.assertEqual(repo['gpg-key'].get('name'), gpg_key['name']) # Update the gpg key new_name = gen_choice(valid_data_list()) GPGKey.update( {'name': gpg_key['name'], 'new-name': new_name, 'organization-id': self.org['id']} ) # Verify changes are reflected in the gpg key gpg_key = GPGKey.info({'id': gpg_key['id'], 'organization-id': self.org['id']}) self.assertEqual(gpg_key['name'], new_name) # Verify changes are reflected in the product product = Product.info({'id': product['id'], 'organization-id': self.org['id']}) self.assertEqual(product['gpg']['gpg-key'], new_name) # Verify changes are reflected in the repositories for repo in repos: repo = Repository.info({'id': repo['id']}) self.assertEqual(repo['gpg-key'].get('name'), new_name) @stubbed() @tier2 def test_positive_update_key_for_product_using_repo_discovery(self): """Create gpg key with valid name and valid gpg key via file import then associate it with custom product using Repo discovery method then update the key :id: 8092bd11-75f3-4657-9309-d327498e7d52 :expectedresults: gpg key is associated with product before/after update but not the repositories :CaseAutomation: notautomated :CaseLevel: Integration """ @tier2 def test_positive_update_key_for_repo_from_product_with_repo(self): """Create gpg key with valid name and valid gpg key via file import then associate it to repository from custom product that has one repository then update the key :id: 549e2e1e-fd10-4487-a3a5-fdee9b8cfc48 :expectedresults: gpg key is associated with the repository before/after update, but not with the product :CaseLevel: Integration """ # Create a product and a gpg key product = make_product({'organization-id': self.org['id']}) gpg_key = make_gpg_key({'organization-id': self.org['id']}) # Create repository, assign product and gpg-key repo = make_repository({'gpg-key-id': gpg_key['id'], 'product-id': product['id']}) # Verify gpg key was associated self.assertEqual(repo['gpg-key'].get('name'), gpg_key['name']) # Update the gpg key new_name = gen_choice(valid_data_list()) GPGKey.update( {'name': gpg_key['name'], 'new-name': new_name, 'organization-id': self.org['id']} ) # Verify changes are reflected in the gpg key gpg_key = GPGKey.info({'id': gpg_key['id'], 'organization-id': self.org['id']}) self.assertEqual(gpg_key['name'], new_name) # Verify changes are reflected in the repositories repo = Repository.info({'id': repo['id']}) self.assertEqual(repo['gpg-key'].get('name'), new_name) # Verify gpg key wasn't added to the product product = Product.info({'id': product['id'], 'organization-id': self.org['id']}) self.assertNotEqual(product['gpg']['gpg-key'], new_name) @tier2 def test_positive_update_key_for_repo_from_product_with_repos(self): """Create gpg key with valid name and valid gpg key via file import then associate it to repository from custom product that has more than one repository then update the key :id: 773a9141-9f04-40ba-b3df-4b6d80db25a6 :expectedresults: gpg key is associated with a single repository before/after update and not associated with product or other repositories :CaseLevel: Integration """ # Create a product and a gpg key product = make_product({'organization-id': self.org['id']}) gpg_key = make_gpg_key({'organization-id': self.org['id']}) # Create repositories and assign them to the product repos = [make_repository({'product-id': product['id']}) for _ in range(gen_integer(2, 5))] # Associate gpg key with a single repository Repository.update( {'gpg-key': gpg_key['name'], 'id': repos[0]['id'], 'organization-id': self.org['id']} ) # Verify gpg key was associated repos[0] = Repository.info({'id': repos[0]['id']}) self.assertEqual(repos[0]['gpg-key']['name'], gpg_key['name']) # Update the gpg key new_name = gen_choice(valid_data_list()) GPGKey.update( {'name': gpg_key['name'], 'new-name': new_name, 'organization-id': self.org['id']} ) # Verify changes are reflected in the gpg key gpg_key = GPGKey.info({'id': gpg_key['id'], 'organization-id': self.org['id']}) self.assertEqual(gpg_key['name'], new_name) # Verify changes are reflected in the associated repository repos[0] = Repository.info({'id': repos[0]['id']}) self.assertEqual(repos[0]['gpg-key'].get('name'), new_name) # Verify changes are not reflected in the product product = Product.info({'id': product['id'], 'organization-id': self.org['id']}) self.assertNotEqual(product['gpg']['gpg-key'], new_name) # Verify changes are not reflected in the rest of repositories for repo in repos[1:]: repo = Repository.info({'id': repo['id']}) self.assertNotEqual(repo['gpg-key'].get('name'), new_name) @stubbed() @tier2 def test_positive_update_key_for_repos_using_repo_discovery(self): """Create gpg key with valid name and valid gpg key via file import then associate it to repos from custom product using Repo discovery method then update the key :id: 21dfd9b0-3de9-4876-aeea-c856adb5ed98 :expectedresults: gpg key is associated with product and all repositories before/after update :CaseAutomation: notautomated :CaseLevel: Integration """ @tier2 def test_positive_delete_key_for_empty_product(self): """Create gpg key with valid name and valid gpg key via file import then associate it with empty (no repos) custom product then delete it :id: da76cada-5ccf-47e1-8c12-24f30c41c8b6 :expectedresults: gpg key is associated with product during creation but removed from product after deletion :CaseLevel: Integration """ # Create a product and a gpg key gpg_key = make_gpg_key({'organization-id': self.org['id']}) product = make_product({'gpg-key-id': gpg_key['id'], 'organization-id': self.org['id']}) # Verify gpg key was associated self.assertEqual(product['gpg']['gpg-key'], gpg_key['name']) # Delete the gpg key GPGKey.delete({'name': gpg_key['name'], 'organization-id': self.org['id']}) # Verify gpg key was actually deleted with self.assertRaises(CLIReturnCodeError): GPGKey.info({'id': gpg_key['id'], 'organization-id': self.org['id']}) # Verify gpg key was disassociated from the product product = Product.info({'id': product['id'], 'organization-id': self.org['id']}) self.assertNotEqual(product['gpg']['gpg-key'], gpg_key['name']) @tier2 @upgrade def test_positive_delete_key_for_product_with_repo(self): """Create gpg key with valid name and valid gpg key via file import then associate it with custom product that has one repository then delete it :id: a5d4ea02-f015-4026-b4dc-7365eaf00049 :expectedresults: gpg key is associated with product but and its repository during creation but removed from product and repository after deletion :CaseLevel: Integration """ # Create product, repository and gpg key product = make_product({'organization-id': self.org['id']}) repo = make_repository({'product-id': product['id']}) gpg_key = make_gpg_key({'organization-id': self.org['id']}) # Associate gpg key with a product Product.update( {'gpg-key': gpg_key['name'], 'id': product['id'], 'organization-id': self.org['id']} ) # Verify gpg key was associated both with product and its repository product = Product.info({'id': product['id'], 'organization-id': self.org['id']}) repo = Repository.info({'id': repo['id']}) self.assertEqual(product['gpg']['gpg-key'], gpg_key['name']) self.assertEqual(repo['gpg-key'].get('name'), gpg_key['name']) # Delete the gpg key GPGKey.delete({'name': gpg_key['name'], 'organization-id': self.org['id']}) # Verify gpg key was actually deleted with self.assertRaises(CLIReturnCodeError): GPGKey.info({'id': gpg_key['id'], 'organization-id': self.org['id']}) # Verify gpg key was disassociated from the product and its repository product = Product.info({'id': product['id'], 'organization-id': self.org['id']}) repo = Repository.info({'id': repo['id']}) self.assertNotEqual(product['gpg']['gpg-key'], gpg_key['name']) self.assertNotEqual(repo['gpg-key'].get('name'), gpg_key['name']) @tier2 def test_positive_delete_key_for_product_with_repos(self): """Create gpg key with valid name and valid gpg key via file import then associate it with custom product that has more than one repository then delete it :id: f92d4643-1892-4f95-ae6b-fcea8e726946 :expectedresults: gpg key is associated with product and its repositories during creation but removed from the product and the repositories after deletion :CaseLevel: Integration """ # Create product, repositories and gpg key product = make_product({'organization-id': self.org['id']}) repos = [make_repository({'product-id': product['id']}) for _ in range(gen_integer(2, 5))] gpg_key = make_gpg_key({'organization-id': self.org['id']}) # Associate gpg key with a product Product.update( {'gpg-key': gpg_key['name'], 'id': product['id'], 'organization-id': self.org['id']} ) # Verify gpg key was associated with product and its repositories product = Product.info({'id': product['id'], 'organization-id': self.org['id']}) self.assertEqual(product['gpg']['gpg-key'], gpg_key['name']) for repo in repos: repo = Repository.info({'id': repo['id']}) self.assertEqual(repo['gpg-key'].get('name'), gpg_key['name']) # Delete the gpg key GPGKey.delete({'name': gpg_key['name'], 'organization-id': self.org['id']}) # Verify gpg key was actually deleted with self.assertRaises(CLIReturnCodeError): GPGKey.info({'id': gpg_key['id'], 'organization-id': self.org['id']}) # Verify gpg key was disassociated from the product and its # repositories product = Product.info({'id': product['id'], 'organization-id': self.org['id']}) self.assertNotEqual(product['gpg']['gpg-key'], gpg_key['name']) for repo in repos: repo = Repository.info({'id': repo['id']}) self.assertNotEqual(repo['gpg-key'].get('name'), gpg_key['name']) @stubbed() @tier2 def test_positive_delete_key_for_product_using_repo_discovery(self): """Create gpg key with valid name and valid gpg key via file import then associate it with custom product using Repo discovery method then delete it :id: f8492db8-12f3-4d32-833a-f177734e2253 :expectedresults: gpg key is associated with product but not the repositories during creation but removed from product after deletion :CaseAutomation: notautomated :CaseLevel: Integration """ @tier2 def test_positive_delete_key_for_repo_from_product_with_repo(self): """Create gpg key with valid name and valid gpg key via file import then associate it to repository from custom product that has one repository then delete the key :id: 3658e04d-fc63-499f-a22d-b512941cc96b :expectedresults: gpg key is associated with the single repository but not the product during creation and was removed from repository after deletion :CaseLevel: Integration """ # Create product, repository and gpg key product = make_product({'organization-id': self.org['id']}) repo = make_repository({'product-id': product['id']}) gpg_key = make_gpg_key({'organization-id': self.org['id']}) # Associate gpg key with a repository Repository.update( {'gpg-key': gpg_key['name'], 'id': repo['id'], 'organization-id': self.org['id']} ) # Verify gpg key was associated with the repository but not with the # product product = Product.info({'id': product['id'], 'organization-id': self.org['id']}) repo = Repository.info({'id': repo['id']}) self.assertNotEqual(product['gpg']['gpg-key'], gpg_key['name']) self.assertEqual(repo['gpg-key'].get('name'), gpg_key['name']) # Delete the gpg key GPGKey.delete({'name': gpg_key['name'], 'organization-id': self.org['id']}) # Verify gpg key was actually deleted with self.assertRaises(CLIReturnCodeError): GPGKey.info({'id': gpg_key['id'], 'organization-id': self.org['id']}) # Verify gpg key was disassociated from the repository repo = Repository.info({'id': repo['id']}) self.assertNotEqual(repo['gpg-key'].get('name'), gpg_key['name']) @tier2 def test_positive_delete_key_for_repo_from_product_with_repos(self): """Create gpg key with valid name and valid gpg key via file import then associate it to repository from custom product that has more than one repository then delete the key :id: e7ed4ed9-ecfe-4954-b806-cdd0668e8822 :expectedresults: gpg key is associated with a single repository but not the product during creation and removed from repository after deletion :CaseLevel: Integration """ # Create product, repositories and gpg key product = make_product({'organization-id': self.org['id']}) repos = [] for _ in range(gen_integer(2, 5)): repos.append(make_repository({'product-id': product['id']})) gpg_key = make_gpg_key({'organization-id': self.org['id']}) # Associate gpg key with a repository Repository.update( {'gpg-key': gpg_key['name'], 'id': repos[0]['id'], 'organization-id': self.org['id']} ) # Verify gpg key was associated with the repository repos[0] = Repository.info({'id': repos[0]['id']}) self.assertEqual(repos[0]['gpg-key']['name'], gpg_key['name']) # Delete the gpg key GPGKey.delete({'name': gpg_key['name'], 'organization-id': self.org['id']}) # Verify gpg key was actually deleted with self.assertRaises(CLIReturnCodeError): GPGKey.info({'id': gpg_key['id'], 'organization-id': self.org['id']}) # Verify gpg key is not associated with any repository or the product # itself product = Product.info({'id': product['id'], 'organization-id': self.org['id']}) self.assertNotEqual(product['gpg']['gpg-key'], gpg_key['name']) for repo in repos: repo = Repository.info({'id': repo['id']}) self.assertNotEqual(repo['gpg-key'].get('name'), gpg_key['name']) @stubbed() @tier2 def test_positive_delete_key_for_repos_using_repo_discovery(self): """Create gpg key with valid name and valid gpg key via file import then associate it to repos from custom product using Repo discovery method then delete the key :id: 8ae226c6-f27c-4fb5-94f2-89792cccda0b :expectedresults: gpg key is associated with product and all repositories during creation but removed from product and all repositories after deletion :CaseAutomation: notautomated :CaseLevel: Integration """ # Content @stubbed() @tier2 def test_positive_consume_content_using_repo(self): """Hosts can install packages using gpg key associated with single custom repository :id: 39357649-4c60-4c82-9114-a43dfef81e5b :expectedresults: host can install package from custom repository :CaseAutomation: notautomated :CaseLevel: Integration """ @stubbed() @tier2 @upgrade def test_positive_consume_content_using_repos(self): """Hosts can install packages using gpg key associated with multiple custom repositories :id: fedd6fa2-e28b-468b-8e15-802b52970bb9 :expectedresults: host can install package from custom repositories :CaseAutomation: notautomated :CaseLevel: Integration """ @stubbed() @tier2 def test_positive_consume_content_using_repos_and_different_keys(self): """Hosts can install packages using different gpg keys associated with multiple custom repositories :id: ac908aee-0928-4f81-a98b-b60d46b10c90 :expectedresults: host can install package from custom repositories :CaseAutomation: notautomated :CaseLevel: Integration """ # Miscelaneous @tier1 def test_positive_list(self): """Create gpg key and list it :id: 5da535b3-1728-4edf-bd33-3822c4427ef3 :expectedresults: gpg key is displayed/listed :CaseImportance: Critical """ gpg_key = make_gpg_key({'key': VALID_GPG_KEY_FILE_PATH, 'organization-id': self.org['id']}) gpg_keys_list = GPGKey.list({'organization-id': self.org['id']}) self.assertIn(gpg_key['id'], [gpg['id'] for gpg in gpg_keys_list]) @tier1 def test_positive_search(self): """Create gpg key and search/find it :id: 9ef15add-b067-4134-b930-aaeda18bddfa :expectedresults: gpg key can be found :CaseImportance: Critical """ for name in valid_data_list(): with self.subTest(name): gpg_key = make_gpg_key( { 'key': VALID_GPG_KEY_FILE_PATH, 'name': name, 'organization-id': self.org['id'], } ) # Can we find the new object? result = GPGKey.exists( {'organization-id': self.org['id']}, search=('name', gpg_key['name']) ) self.assertEqual(gpg_key['name'], result['name'])
gpl-3.0
fhoring/autorest
src/generator/AutoRest.Python.Tests/Expected/AcceptanceTests/ModelFlattening/autorestresourceflatteningtestservice/models/base_product.py
8
1269
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from msrest.serialization import Model class BaseProduct(Model): """The product documentation. :param product_id: Unique identifier representing a specific product for a given latitude & longitude. For example, uberX in San Francisco will have a different product_id than uberX in Los Angeles. :type product_id: str :param description: Description of product. :type description: str """ _validation = { 'product_id': {'required': True}, } _attribute_map = { 'product_id': {'key': 'base_product_id', 'type': 'str'}, 'description': {'key': 'base_product_description', 'type': 'str'}, } def __init__(self, product_id, description=None): self.product_id = product_id self.description = description
mit
aforalee/keystone
keystone/tests/unit/common/test_ldap.py
7
22334
# -*- coding: utf-8 -*- # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import tempfile import uuid import fixtures import ldap.dn import mock from oslo_config import cfg from testtools import matchers from keystone.common import driver_hints from keystone.common import ldap as ks_ldap from keystone.common.ldap import core as common_ldap_core from keystone.tests import unit from keystone.tests.unit import default_fixtures from keystone.tests.unit import fakeldap CONF = cfg.CONF class DnCompareTest(unit.BaseTestCase): """Tests for the DN comparison functions in keystone.common.ldap.core.""" def test_prep(self): # prep_case_insensitive returns the string with spaces at the front and # end if it's already lowercase and no insignificant characters. value = 'lowercase value' self.assertEqual(value, ks_ldap.prep_case_insensitive(value)) def test_prep_lowercase(self): # prep_case_insensitive returns the string with spaces at the front and # end and lowercases the value. value = 'UPPERCASE VALUE' exp_value = value.lower() self.assertEqual(exp_value, ks_ldap.prep_case_insensitive(value)) def test_prep_insignificant(self): # prep_case_insensitive remove insignificant spaces. value = 'before after' exp_value = 'before after' self.assertEqual(exp_value, ks_ldap.prep_case_insensitive(value)) def test_prep_insignificant_pre_post(self): # prep_case_insensitive remove insignificant spaces. value = ' value ' exp_value = 'value' self.assertEqual(exp_value, ks_ldap.prep_case_insensitive(value)) def test_ava_equal_same(self): # is_ava_value_equal returns True if the two values are the same. value = 'val1' self.assertTrue(ks_ldap.is_ava_value_equal('cn', value, value)) def test_ava_equal_complex(self): # is_ava_value_equal returns True if the two values are the same using # a value that's got different capitalization and insignificant chars. val1 = 'before after' val2 = ' BEFORE afTer ' self.assertTrue(ks_ldap.is_ava_value_equal('cn', val1, val2)) def test_ava_different(self): # is_ava_value_equal returns False if the values aren't the same. self.assertFalse(ks_ldap.is_ava_value_equal('cn', 'val1', 'val2')) def test_rdn_same(self): # is_rdn_equal returns True if the two values are the same. rdn = ldap.dn.str2dn('cn=val1')[0] self.assertTrue(ks_ldap.is_rdn_equal(rdn, rdn)) def test_rdn_diff_length(self): # is_rdn_equal returns False if the RDNs have a different number of # AVAs. rdn1 = ldap.dn.str2dn('cn=cn1')[0] rdn2 = ldap.dn.str2dn('cn=cn1+ou=ou1')[0] self.assertFalse(ks_ldap.is_rdn_equal(rdn1, rdn2)) def test_rdn_multi_ava_same_order(self): # is_rdn_equal returns True if the RDNs have the same number of AVAs # and the values are the same. rdn1 = ldap.dn.str2dn('cn=cn1+ou=ou1')[0] rdn2 = ldap.dn.str2dn('cn=CN1+ou=OU1')[0] self.assertTrue(ks_ldap.is_rdn_equal(rdn1, rdn2)) def test_rdn_multi_ava_diff_order(self): # is_rdn_equal returns True if the RDNs have the same number of AVAs # and the values are the same, even if in a different order rdn1 = ldap.dn.str2dn('cn=cn1+ou=ou1')[0] rdn2 = ldap.dn.str2dn('ou=OU1+cn=CN1')[0] self.assertTrue(ks_ldap.is_rdn_equal(rdn1, rdn2)) def test_rdn_multi_ava_diff_type(self): # is_rdn_equal returns False if the RDNs have the same number of AVAs # and the attribute types are different. rdn1 = ldap.dn.str2dn('cn=cn1+ou=ou1')[0] rdn2 = ldap.dn.str2dn('cn=cn1+sn=sn1')[0] self.assertFalse(ks_ldap.is_rdn_equal(rdn1, rdn2)) def test_rdn_attr_type_case_diff(self): # is_rdn_equal returns True for same RDNs even when attr type case is # different. rdn1 = ldap.dn.str2dn('cn=cn1')[0] rdn2 = ldap.dn.str2dn('CN=cn1')[0] self.assertTrue(ks_ldap.is_rdn_equal(rdn1, rdn2)) def test_rdn_attr_type_alias(self): # is_rdn_equal returns False for same RDNs even when attr type alias is # used. Note that this is a limitation since an LDAP server should # consider them equal. rdn1 = ldap.dn.str2dn('cn=cn1')[0] rdn2 = ldap.dn.str2dn('2.5.4.3=cn1')[0] self.assertFalse(ks_ldap.is_rdn_equal(rdn1, rdn2)) def test_dn_same(self): # is_dn_equal returns True if the DNs are the same. dn = 'cn=Babs Jansen,ou=OpenStack' self.assertTrue(ks_ldap.is_dn_equal(dn, dn)) def test_dn_equal_unicode(self): # is_dn_equal can accept unicode dn = u'cn=fäké,ou=OpenStack' self.assertTrue(ks_ldap.is_dn_equal(dn, dn)) def test_dn_diff_length(self): # is_dn_equal returns False if the DNs don't have the same number of # RDNs dn1 = 'cn=Babs Jansen,ou=OpenStack' dn2 = 'cn=Babs Jansen,ou=OpenStack,dc=example.com' self.assertFalse(ks_ldap.is_dn_equal(dn1, dn2)) def test_dn_equal_rdns(self): # is_dn_equal returns True if the DNs have the same number of RDNs # and each RDN is the same. dn1 = 'cn=Babs Jansen,ou=OpenStack+cn=OpenSource' dn2 = 'CN=Babs Jansen,cn=OpenSource+ou=OpenStack' self.assertTrue(ks_ldap.is_dn_equal(dn1, dn2)) def test_dn_parsed_dns(self): # is_dn_equal can also accept parsed DNs. dn_str1 = ldap.dn.str2dn('cn=Babs Jansen,ou=OpenStack+cn=OpenSource') dn_str2 = ldap.dn.str2dn('CN=Babs Jansen,cn=OpenSource+ou=OpenStack') self.assertTrue(ks_ldap.is_dn_equal(dn_str1, dn_str2)) def test_startswith_under_child(self): # dn_startswith returns True if descendant_dn is a child of dn. child = 'cn=Babs Jansen,ou=OpenStack' parent = 'ou=OpenStack' self.assertTrue(ks_ldap.dn_startswith(child, parent)) def test_startswith_parent(self): # dn_startswith returns False if descendant_dn is a parent of dn. child = 'cn=Babs Jansen,ou=OpenStack' parent = 'ou=OpenStack' self.assertFalse(ks_ldap.dn_startswith(parent, child)) def test_startswith_same(self): # dn_startswith returns False if DNs are the same. dn = 'cn=Babs Jansen,ou=OpenStack' self.assertFalse(ks_ldap.dn_startswith(dn, dn)) def test_startswith_not_parent(self): # dn_startswith returns False if descendant_dn is not under the dn child = 'cn=Babs Jansen,ou=OpenStack' parent = 'dc=example.com' self.assertFalse(ks_ldap.dn_startswith(child, parent)) def test_startswith_descendant(self): # dn_startswith returns True if descendant_dn is a descendant of dn. descendant = 'cn=Babs Jansen,ou=Keystone,ou=OpenStack,dc=example.com' dn = 'ou=OpenStack,dc=example.com' self.assertTrue(ks_ldap.dn_startswith(descendant, dn)) descendant = 'uid=12345,ou=Users,dc=example,dc=com' dn = 'ou=Users,dc=example,dc=com' self.assertTrue(ks_ldap.dn_startswith(descendant, dn)) def test_startswith_parsed_dns(self): # dn_startswith also accepts parsed DNs. descendant = ldap.dn.str2dn('cn=Babs Jansen,ou=OpenStack') dn = ldap.dn.str2dn('ou=OpenStack') self.assertTrue(ks_ldap.dn_startswith(descendant, dn)) def test_startswith_unicode(self): # dn_startswith accepts unicode. child = u'cn=cn=fäké,ou=OpenStäck' parent = 'ou=OpenStäck' self.assertTrue(ks_ldap.dn_startswith(child, parent)) class LDAPDeleteTreeTest(unit.TestCase): def setUp(self): super(LDAPDeleteTreeTest, self).setUp() ks_ldap.register_handler('fake://', fakeldap.FakeLdapNoSubtreeDelete) self.load_backends() self.load_fixtures(default_fixtures) self.addCleanup(self.clear_database) self.addCleanup(common_ldap_core._HANDLERS.clear) def clear_database(self): for shelf in fakeldap.FakeShelves: fakeldap.FakeShelves[shelf].clear() def config_overrides(self): super(LDAPDeleteTreeTest, self).config_overrides() self.config_fixture.config(group='identity', driver='ldap') def config_files(self): config_files = super(LDAPDeleteTreeTest, self).config_files() config_files.append(unit.dirs.tests_conf('backend_ldap.conf')) return config_files def test_deleteTree(self): """Test manually deleting a tree. Few LDAP servers support CONTROL_DELETETREE. This test exercises the alternate code paths in BaseLdap.deleteTree. """ conn = self.identity_api.user.get_connection() id_attr = self.identity_api.user.id_attr objclass = self.identity_api.user.object_class.lower() tree_dn = self.identity_api.user.tree_dn def create_entry(name, parent_dn=None): if not parent_dn: parent_dn = tree_dn dn = '%s=%s,%s' % (id_attr, name, parent_dn) attrs = [('objectclass', [objclass, 'ldapsubentry']), (id_attr, [name])] conn.add_s(dn, attrs) return dn # create 3 entries like this: # cn=base # cn=child,cn=base # cn=grandchild,cn=child,cn=base # then attempt to deleteTree(cn=base) base_id = 'base' base_dn = create_entry(base_id) child_dn = create_entry('child', base_dn) grandchild_dn = create_entry('grandchild', child_dn) # verify that the three entries were created scope = ldap.SCOPE_SUBTREE filt = '(|(objectclass=*)(objectclass=ldapsubentry))' entries = conn.search_s(base_dn, scope, filt, attrlist=common_ldap_core.DN_ONLY) self.assertThat(entries, matchers.HasLength(3)) sort_ents = sorted([e[0] for e in entries], key=len, reverse=True) self.assertEqual([grandchild_dn, child_dn, base_dn], sort_ents) # verify that a non-leaf node can't be deleted directly by the # LDAP server self.assertRaises(ldap.NOT_ALLOWED_ON_NONLEAF, conn.delete_s, base_dn) self.assertRaises(ldap.NOT_ALLOWED_ON_NONLEAF, conn.delete_s, child_dn) # call our deleteTree implementation self.identity_api.user.deleteTree(base_id) self.assertRaises(ldap.NO_SUCH_OBJECT, conn.search_s, base_dn, ldap.SCOPE_BASE) self.assertRaises(ldap.NO_SUCH_OBJECT, conn.search_s, child_dn, ldap.SCOPE_BASE) self.assertRaises(ldap.NO_SUCH_OBJECT, conn.search_s, grandchild_dn, ldap.SCOPE_BASE) class SslTlsTest(unit.TestCase): """Tests for the SSL/TLS functionality in keystone.common.ldap.core.""" @mock.patch.object(ks_ldap.core.KeystoneLDAPHandler, 'simple_bind_s') @mock.patch.object(ldap.ldapobject.LDAPObject, 'start_tls_s') def _init_ldap_connection(self, config, mock_ldap_one, mock_ldap_two): # Attempt to connect to initialize python-ldap. base_ldap = ks_ldap.BaseLdap(config) base_ldap.get_connection() def test_certfile_trust_tls(self): # We need this to actually exist, so we create a tempfile. (handle, certfile) = tempfile.mkstemp() self.addCleanup(os.unlink, certfile) self.addCleanup(os.close, handle) self.config_fixture.config(group='ldap', url='ldap://localhost', use_tls=True, tls_cacertfile=certfile) self._init_ldap_connection(CONF) # Ensure the cert trust option is set. self.assertEqual(certfile, ldap.get_option(ldap.OPT_X_TLS_CACERTFILE)) def test_certdir_trust_tls(self): # We need this to actually exist, so we create a tempdir. certdir = self.useFixture(fixtures.TempDir()).path self.config_fixture.config(group='ldap', url='ldap://localhost', use_tls=True, tls_cacertdir=certdir) self._init_ldap_connection(CONF) # Ensure the cert trust option is set. self.assertEqual(certdir, ldap.get_option(ldap.OPT_X_TLS_CACERTDIR)) def test_certfile_trust_ldaps(self): # We need this to actually exist, so we create a tempfile. (handle, certfile) = tempfile.mkstemp() self.addCleanup(os.unlink, certfile) self.addCleanup(os.close, handle) self.config_fixture.config(group='ldap', url='ldaps://localhost', use_tls=False, tls_cacertfile=certfile) self._init_ldap_connection(CONF) # Ensure the cert trust option is set. self.assertEqual(certfile, ldap.get_option(ldap.OPT_X_TLS_CACERTFILE)) def test_certdir_trust_ldaps(self): # We need this to actually exist, so we create a tempdir. certdir = self.useFixture(fixtures.TempDir()).path self.config_fixture.config(group='ldap', url='ldaps://localhost', use_tls=False, tls_cacertdir=certdir) self._init_ldap_connection(CONF) # Ensure the cert trust option is set. self.assertEqual(certdir, ldap.get_option(ldap.OPT_X_TLS_CACERTDIR)) class LDAPPagedResultsTest(unit.TestCase): """Tests the paged results functionality in keystone.common.ldap.core.""" def setUp(self): super(LDAPPagedResultsTest, self).setUp() self.clear_database() ks_ldap.register_handler('fake://', fakeldap.FakeLdap) self.addCleanup(common_ldap_core._HANDLERS.clear) self.load_backends() self.load_fixtures(default_fixtures) def clear_database(self): for shelf in fakeldap.FakeShelves: fakeldap.FakeShelves[shelf].clear() def config_overrides(self): super(LDAPPagedResultsTest, self).config_overrides() self.config_fixture.config(group='identity', driver='ldap') def config_files(self): config_files = super(LDAPPagedResultsTest, self).config_files() config_files.append(unit.dirs.tests_conf('backend_ldap.conf')) return config_files @mock.patch.object(fakeldap.FakeLdap, 'search_ext') @mock.patch.object(fakeldap.FakeLdap, 'result3') def test_paged_results_control_api(self, mock_result3, mock_search_ext): mock_result3.return_value = ('', [], 1, []) self.config_fixture.config(group='ldap', page_size=1) conn = self.identity_api.user.get_connection() conn._paged_search_s('dc=example,dc=test', ldap.SCOPE_SUBTREE, 'objectclass=*') class CommonLdapTestCase(unit.BaseTestCase): """These test cases call functions in keystone.common.ldap.""" def test_binary_attribute_values(self): result = [( 'cn=junk,dc=example,dc=com', { 'cn': ['junk'], 'sn': [uuid.uuid4().hex], 'mail': [uuid.uuid4().hex], 'binary_attr': ['\x00\xFF\x00\xFF'] } ), ] py_result = ks_ldap.convert_ldap_result(result) # The attribute containing the binary value should # not be present in the converted result. self.assertNotIn('binary_attr', py_result[0][1]) def test_utf8_conversion(self): value_unicode = u'fäké1' value_utf8 = value_unicode.encode('utf-8') result_utf8 = ks_ldap.utf8_encode(value_unicode) self.assertEqual(value_utf8, result_utf8) result_utf8 = ks_ldap.utf8_encode(value_utf8) self.assertEqual(value_utf8, result_utf8) result_unicode = ks_ldap.utf8_decode(value_utf8) self.assertEqual(value_unicode, result_unicode) result_unicode = ks_ldap.utf8_decode(value_unicode) self.assertEqual(value_unicode, result_unicode) self.assertRaises(TypeError, ks_ldap.utf8_encode, 100) result_unicode = ks_ldap.utf8_decode(100) self.assertEqual(u'100', result_unicode) def test_user_id_begins_with_0(self): user_id = '0123456' result = [( 'cn=dummy,dc=example,dc=com', { 'user_id': [user_id], 'enabled': ['TRUE'] } ), ] py_result = ks_ldap.convert_ldap_result(result) # The user id should be 0123456, and the enabled # flag should be True self.assertIs(py_result[0][1]['enabled'][0], True) self.assertEqual(user_id, py_result[0][1]['user_id'][0]) def test_user_id_begins_with_0_and_enabled_bit_mask(self): user_id = '0123456' bitmask = '225' expected_bitmask = 225 result = [( 'cn=dummy,dc=example,dc=com', { 'user_id': [user_id], 'enabled': [bitmask] } ), ] py_result = ks_ldap.convert_ldap_result(result) # The user id should be 0123456, and the enabled # flag should be 225 self.assertEqual(expected_bitmask, py_result[0][1]['enabled'][0]) self.assertEqual(user_id, py_result[0][1]['user_id'][0]) def test_user_id_and_bitmask_begins_with_0(self): user_id = '0123456' bitmask = '0225' expected_bitmask = 225 result = [( 'cn=dummy,dc=example,dc=com', { 'user_id': [user_id], 'enabled': [bitmask] } ), ] py_result = ks_ldap.convert_ldap_result(result) # The user id should be 0123456, and the enabled # flag should be 225, the 0 is dropped. self.assertEqual(expected_bitmask, py_result[0][1]['enabled'][0]) self.assertEqual(user_id, py_result[0][1]['user_id'][0]) def test_user_id_and_user_name_with_boolean_string(self): boolean_strings = ['TRUE', 'FALSE', 'true', 'false', 'True', 'False', 'TrUe' 'FaLse'] for user_name in boolean_strings: user_id = uuid.uuid4().hex result = [( 'cn=dummy,dc=example,dc=com', { 'user_id': [user_id], 'user_name': [user_name] } ), ] py_result = ks_ldap.convert_ldap_result(result) # The user name should still be a string value. self.assertEqual(user_name, py_result[0][1]['user_name'][0]) class LDAPFilterQueryCompositionTest(unit.TestCase): """These test cases test LDAP filter generation.""" def setUp(self): super(LDAPFilterQueryCompositionTest, self).setUp() self.base_ldap = ks_ldap.BaseLdap(self.config_fixture.conf) # The tests need an attribute mapping to use. self.attribute_name = uuid.uuid4().hex self.filter_attribute_name = uuid.uuid4().hex self.base_ldap.attribute_mapping = { self.attribute_name: self.filter_attribute_name } def test_return_query_with_no_hints(self): hints = driver_hints.Hints() # NOTE: doesn't have to be a real query, we just need to make sure the # same string is returned if there are no hints. query = uuid.uuid4().hex self.assertEqual(query, self.base_ldap.filter_query(hints=hints, query=query)) # make sure the default query is an empty string self.assertEqual('', self.base_ldap.filter_query(hints=hints)) def test_filter_with_empty_query_and_hints_set(self): hints = driver_hints.Hints() username = uuid.uuid4().hex hints.add_filter(name=self.attribute_name, value=username, comparator='equals', case_sensitive=False) expected_ldap_filter = '(&(%s=%s))' % ( self.filter_attribute_name, username) self.assertEqual(expected_ldap_filter, self.base_ldap.filter_query(hints=hints)) def test_filter_with_both_query_and_hints_set(self): hints = driver_hints.Hints() # NOTE: doesn't have to be a real query, we just need to make sure the # filter string is concatenated correctly query = uuid.uuid4().hex username = uuid.uuid4().hex expected_result = '(&%(query)s(%(user_name_attr)s=%(username)s))' % ( {'query': query, 'user_name_attr': self.filter_attribute_name, 'username': username}) hints.add_filter(self.attribute_name, username) self.assertEqual(expected_result, self.base_ldap.filter_query(hints=hints, query=query)) def test_filter_with_hints_and_query_is_none(self): hints = driver_hints.Hints() username = uuid.uuid4().hex hints.add_filter(name=self.attribute_name, value=username, comparator='equals', case_sensitive=False) expected_ldap_filter = '(&(%s=%s))' % ( self.filter_attribute_name, username) self.assertEqual(expected_ldap_filter, self.base_ldap.filter_query(hints=hints, query=None))
apache-2.0
YYWen0o0/python-frame-django
tests/check_framework/tests.py
10
11670
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.utils.six import StringIO import sys from django.apps import apps from django.conf import settings from django.core import checks from django.core.checks import Error, Warning from django.core.checks.registry import CheckRegistry from django.core.checks.compatibility.django_1_6_0 import check_1_6_compatibility from django.core.checks.compatibility.django_1_7_0 import check_1_7_compatibility from django.core.management.base import CommandError from django.core.management import call_command from django.db.models.fields import NOT_PROVIDED from django.test import TestCase from django.test.utils import override_settings, override_system_checks from django.utils.encoding import force_text from .models import SimpleModel, Book class DummyObj(object): def __repr__(self): return "obj" class SystemCheckFrameworkTests(TestCase): def test_register_and_run_checks(self): calls = [0] registry = CheckRegistry() @registry.register() def f(**kwargs): calls[0] += 1 return [1, 2, 3] errors = registry.run_checks() self.assertEqual(errors, [1, 2, 3]) self.assertEqual(calls[0], 1) class MessageTests(TestCase): def test_printing(self): e = Error("Message", hint="Hint", obj=DummyObj()) expected = "obj: Message\n\tHINT: Hint" self.assertEqual(force_text(e), expected) def test_printing_no_hint(self): e = Error("Message", hint=None, obj=DummyObj()) expected = "obj: Message" self.assertEqual(force_text(e), expected) def test_printing_no_object(self): e = Error("Message", hint="Hint", obj=None) expected = "?: Message\n\tHINT: Hint" self.assertEqual(force_text(e), expected) def test_printing_with_given_id(self): e = Error("Message", hint="Hint", obj=DummyObj(), id="ID") expected = "obj: (ID) Message\n\tHINT: Hint" self.assertEqual(force_text(e), expected) def test_printing_field_error(self): field = SimpleModel._meta.get_field('field') e = Error("Error", hint=None, obj=field) expected = "check_framework.SimpleModel.field: Error" self.assertEqual(force_text(e), expected) def test_printing_model_error(self): e = Error("Error", hint=None, obj=SimpleModel) expected = "check_framework.SimpleModel: Error" self.assertEqual(force_text(e), expected) def test_printing_manager_error(self): manager = SimpleModel.manager e = Error("Error", hint=None, obj=manager) expected = "check_framework.SimpleModel.manager: Error" self.assertEqual(force_text(e), expected) class Django_1_6_0_CompatibilityChecks(TestCase): @override_settings(TEST_RUNNER='django.test.runner.DiscoverRunner') def test_test_runner_new_default(self): errors = check_1_6_compatibility() self.assertEqual(errors, []) @override_settings(TEST_RUNNER='myapp.test.CustomRunner') def test_test_runner_overriden(self): errors = check_1_6_compatibility() self.assertEqual(errors, []) def test_test_runner_not_set_explicitly(self): # If TEST_RUNNER was set explicitly, temporarily pretend it wasn't test_runner_overridden = False if 'TEST_RUNNER' in settings._wrapped._explicit_settings: test_runner_overridden = True settings._wrapped._explicit_settings.remove('TEST_RUNNER') # We remove some settings to make this look like a project generated under Django 1.5. settings._wrapped._explicit_settings.add('MANAGERS') settings._wrapped._explicit_settings.add('ADMINS') try: errors = check_1_6_compatibility() expected = [ checks.Warning( "Some project unittests may not execute as expected.", hint=("Django 1.6 introduced a new default test runner. It looks like " "this project was generated using Django 1.5 or earlier. You should " "ensure your tests are all running & behaving as expected. See " "https://docs.djangoproject.com/en/dev/releases/1.6/#new-test-runner " "for more information."), obj=None, id='1_6.W001', ) ] self.assertEqual(errors, expected) finally: # Restore settings value if test_runner_overridden: settings._wrapped._explicit_settings.add('TEST_RUNNER') settings._wrapped._explicit_settings.remove('MANAGERS') settings._wrapped._explicit_settings.remove('ADMINS') def test_boolean_field_default_value(self): with self.settings(TEST_RUNNER='myapp.test.CustomRunnner'): # We patch the field's default value to trigger the warning boolean_field = Book._meta.get_field('is_published') old_default = boolean_field.default try: boolean_field.default = NOT_PROVIDED errors = check_1_6_compatibility() expected = [ checks.Warning( 'BooleanField does not have a default value.', hint=('Django 1.6 changed the default value of BooleanField from False to None. ' 'See https://docs.djangoproject.com/en/1.6/ref/models/fields/#booleanfield ' 'for more information.'), obj=boolean_field, id='1_6.W002', ) ] self.assertEqual(errors, expected) finally: # Restore the ``default`` boolean_field.default = old_default class Django_1_7_0_CompatibilityChecks(TestCase): @override_settings(MIDDLEWARE_CLASSES=('django.contrib.sessions.middleware.SessionMiddleware',)) def test_middleware_classes_overridden(self): errors = check_1_7_compatibility() self.assertEqual(errors, []) def test_middleware_classes_not_set_explicitly(self): # If MIDDLEWARE_CLASSES was set explicitly, temporarily pretend it wasn't middleware_classes_overridden = False if 'MIDDLEWARE_CLASSES' in settings._wrapped._explicit_settings: middleware_classes_overridden = True settings._wrapped._explicit_settings.remove('MIDDLEWARE_CLASSES') try: errors = check_1_7_compatibility() expected = [ checks.Warning( "MIDDLEWARE_CLASSES is not set.", hint=("Django 1.7 changed the global defaults for the MIDDLEWARE_CLASSES. " "django.contrib.sessions.middleware.SessionMiddleware, " "django.contrib.auth.middleware.AuthenticationMiddleware, and " "django.contrib.messages.middleware.MessageMiddleware were removed from the defaults. " "If your project needs these middleware then you should configure this setting."), obj=None, id='1_7.W001', ) ] self.assertEqual(errors, expected) finally: # Restore settings value if middleware_classes_overridden: settings._wrapped._explicit_settings.add('MIDDLEWARE_CLASSES') def simple_system_check(**kwargs): simple_system_check.kwargs = kwargs return [] def tagged_system_check(**kwargs): tagged_system_check.kwargs = kwargs return [] tagged_system_check.tags = ['simpletag'] class CheckCommandTests(TestCase): def setUp(self): simple_system_check.kwargs = None tagged_system_check.kwargs = None self.old_stdout, self.old_stderr = sys.stdout, sys.stderr sys.stdout, sys.stderr = StringIO(), StringIO() def tearDown(self): sys.stdout, sys.stderr = self.old_stdout, self.old_stderr @override_system_checks([simple_system_check, tagged_system_check]) def test_simple_call(self): call_command('check') self.assertEqual(simple_system_check.kwargs, {'app_configs': None}) self.assertEqual(tagged_system_check.kwargs, {'app_configs': None}) @override_system_checks([simple_system_check, tagged_system_check]) def test_given_app(self): call_command('check', 'auth', 'admin') auth_config = apps.get_app_config('auth') admin_config = apps.get_app_config('admin') self.assertEqual(simple_system_check.kwargs, {'app_configs': [auth_config, admin_config]}) self.assertEqual(tagged_system_check.kwargs, {'app_configs': [auth_config, admin_config]}) @override_system_checks([simple_system_check, tagged_system_check]) def test_given_tag(self): call_command('check', tags=['simpletag']) self.assertEqual(simple_system_check.kwargs, None) self.assertEqual(tagged_system_check.kwargs, {'app_configs': None}) @override_system_checks([simple_system_check, tagged_system_check]) def test_invalid_tag(self): self.assertRaises(CommandError, call_command, 'check', tags=['missingtag']) @override_system_checks([simple_system_check]) def test_list_tags_empty(self): call_command('check', list_tags=True) self.assertEqual('\n', sys.stdout.getvalue()) @override_system_checks([tagged_system_check]) def test_list_tags(self): call_command('check', list_tags=True) self.assertEqual('simpletag\n', sys.stdout.getvalue()) def custom_error_system_check(app_configs, **kwargs): return [ Error( 'Error', hint=None, id='myerrorcheck.E001', ) ] def custom_warning_system_check(app_configs, **kwargs): return [ Warning( 'Warning', hint=None, id='mywarningcheck.E001', ) ] class SilencingCheckTests(TestCase): def setUp(self): self.old_stdout, self.old_stderr = sys.stdout, sys.stderr self.stdout, self.stderr = StringIO(), StringIO() sys.stdout, sys.stderr = self.stdout, self.stderr def tearDown(self): sys.stdout, sys.stderr = self.old_stdout, self.old_stderr @override_settings(SILENCED_SYSTEM_CHECKS=['myerrorcheck.E001']) @override_system_checks([custom_error_system_check]) def test_silenced_error(self): out = StringIO() err = StringIO() try: call_command('check', stdout=out, stderr=err) except CommandError: self.fail("The mycheck.E001 check should be silenced.") self.assertEqual(out.getvalue(), '') self.assertEqual( err.getvalue(), 'System check identified some issues:\n\n' 'ERRORS:\n' '?: (myerrorcheck.E001) Error\n\n' 'System check identified 1 issue (0 silenced).\n' ) @override_settings(SILENCED_SYSTEM_CHECKS=['mywarningcheck.E001']) @override_system_checks([custom_warning_system_check]) def test_silenced_warning(self): out = StringIO() err = StringIO() try: call_command('check', stdout=out, stderr=err) except CommandError: self.fail("The mycheck.E001 check should be silenced.") self.assertEqual(out.getvalue(), 'System check identified no issues (1 silenced).\n') self.assertEqual(err.getvalue(), '')
bsd-3-clause
hrishioa/Navo
Raspi-Code/Lib/venv/lib/python2.7/site-packages/pip/_vendor/progress/spinner.py
404
1341
# -*- coding: utf-8 -*- # Copyright (c) 2012 Giorgos Verigakis <verigak@gmail.com> # # Permission to use, copy, modify, and distribute this software for any # purpose with or without fee is hereby granted, provided that the above # copyright notice and this permission notice appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. from __future__ import unicode_literals from . import Infinite from .helpers import WriteMixin class Spinner(WriteMixin, Infinite): message = '' phases = ('-', '\\', '|', '/') hide_cursor = True def update(self): i = self.index % len(self.phases) self.write(self.phases[i]) class PieSpinner(Spinner): phases = ['◷', '◶', '◵', '◴'] class MoonSpinner(Spinner): phases = ['◑', '◒', '◐', '◓'] class LineSpinner(Spinner): phases = ['⎺', '⎻', '⎼', '⎽', '⎼', '⎻']
gpl-2.0
Matt-Deacalion/django
tests/model_formsets_regress/tests.py
173
20725
from __future__ import unicode_literals from django import forms from django.forms.formsets import DELETION_FIELD_NAME, BaseFormSet from django.forms.models import ( BaseModelFormSet, inlineformset_factory, modelform_factory, modelformset_factory, ) from django.forms.utils import ErrorDict, ErrorList from django.test import TestCase from django.utils import six from .models import ( Host, Manager, Network, ProfileNetwork, Restaurant, User, UserProfile, UserSite, ) class InlineFormsetTests(TestCase): def test_formset_over_to_field(self): "A formset over a ForeignKey with a to_field can be saved. Regression for #10243" Form = modelform_factory(User, fields="__all__") FormSet = inlineformset_factory(User, UserSite, fields="__all__") # Instantiate the Form and FormSet to prove # you can create a form with no data form = Form() form_set = FormSet(instance=User()) # Now create a new User and UserSite instance data = { 'serial': '1', 'username': 'apollo13', 'usersite_set-TOTAL_FORMS': '1', 'usersite_set-INITIAL_FORMS': '0', 'usersite_set-MAX_NUM_FORMS': '0', 'usersite_set-0-data': '10', 'usersite_set-0-user': 'apollo13' } user = User() form = Form(data) if form.is_valid(): user = form.save() else: self.fail('Errors found on form:%s' % form_set) form_set = FormSet(data, instance=user) if form_set.is_valid(): form_set.save() usersite = UserSite.objects.all().values() self.assertEqual(usersite[0]['data'], 10) self.assertEqual(usersite[0]['user_id'], 'apollo13') else: self.fail('Errors found on formset:%s' % form_set.errors) # Now update the UserSite instance data = { 'usersite_set-TOTAL_FORMS': '1', 'usersite_set-INITIAL_FORMS': '1', 'usersite_set-MAX_NUM_FORMS': '0', 'usersite_set-0-id': six.text_type(usersite[0]['id']), 'usersite_set-0-data': '11', 'usersite_set-0-user': 'apollo13' } form_set = FormSet(data, instance=user) if form_set.is_valid(): form_set.save() usersite = UserSite.objects.all().values() self.assertEqual(usersite[0]['data'], 11) self.assertEqual(usersite[0]['user_id'], 'apollo13') else: self.fail('Errors found on formset:%s' % form_set.errors) # Now add a new UserSite instance data = { 'usersite_set-TOTAL_FORMS': '2', 'usersite_set-INITIAL_FORMS': '1', 'usersite_set-MAX_NUM_FORMS': '0', 'usersite_set-0-id': six.text_type(usersite[0]['id']), 'usersite_set-0-data': '11', 'usersite_set-0-user': 'apollo13', 'usersite_set-1-data': '42', 'usersite_set-1-user': 'apollo13' } form_set = FormSet(data, instance=user) if form_set.is_valid(): form_set.save() usersite = UserSite.objects.all().values().order_by('data') self.assertEqual(usersite[0]['data'], 11) self.assertEqual(usersite[0]['user_id'], 'apollo13') self.assertEqual(usersite[1]['data'], 42) self.assertEqual(usersite[1]['user_id'], 'apollo13') else: self.fail('Errors found on formset:%s' % form_set.errors) def test_formset_over_inherited_model(self): "A formset over a ForeignKey with a to_field can be saved. Regression for #11120" Form = modelform_factory(Restaurant, fields="__all__") FormSet = inlineformset_factory(Restaurant, Manager, fields="__all__") # Instantiate the Form and FormSet to prove # you can create a form with no data form = Form() form_set = FormSet(instance=Restaurant()) # Now create a new Restaurant and Manager instance data = { 'name': "Guido's House of Pasta", 'manager_set-TOTAL_FORMS': '1', 'manager_set-INITIAL_FORMS': '0', 'manager_set-MAX_NUM_FORMS': '0', 'manager_set-0-name': 'Guido Van Rossum' } restaurant = User() form = Form(data) if form.is_valid(): restaurant = form.save() else: self.fail('Errors found on form:%s' % form_set) form_set = FormSet(data, instance=restaurant) if form_set.is_valid(): form_set.save() manager = Manager.objects.all().values() self.assertEqual(manager[0]['name'], 'Guido Van Rossum') else: self.fail('Errors found on formset:%s' % form_set.errors) # Now update the Manager instance data = { 'manager_set-TOTAL_FORMS': '1', 'manager_set-INITIAL_FORMS': '1', 'manager_set-MAX_NUM_FORMS': '0', 'manager_set-0-id': six.text_type(manager[0]['id']), 'manager_set-0-name': 'Terry Gilliam' } form_set = FormSet(data, instance=restaurant) if form_set.is_valid(): form_set.save() manager = Manager.objects.all().values() self.assertEqual(manager[0]['name'], 'Terry Gilliam') else: self.fail('Errors found on formset:%s' % form_set.errors) # Now add a new Manager instance data = { 'manager_set-TOTAL_FORMS': '2', 'manager_set-INITIAL_FORMS': '1', 'manager_set-MAX_NUM_FORMS': '0', 'manager_set-0-id': six.text_type(manager[0]['id']), 'manager_set-0-name': 'Terry Gilliam', 'manager_set-1-name': 'John Cleese' } form_set = FormSet(data, instance=restaurant) if form_set.is_valid(): form_set.save() manager = Manager.objects.all().values().order_by('name') self.assertEqual(manager[0]['name'], 'John Cleese') self.assertEqual(manager[1]['name'], 'Terry Gilliam') else: self.fail('Errors found on formset:%s' % form_set.errors) def test_inline_model_with_to_field(self): """ #13794 --- An inline model with a to_field of a formset with instance has working relations. """ FormSet = inlineformset_factory(User, UserSite, exclude=('is_superuser',)) user = User.objects.create(username="guido", serial=1337) UserSite.objects.create(user=user, data=10) formset = FormSet(instance=user) # Testing the inline model's relation self.assertEqual(formset[0].instance.user_id, "guido") def test_inline_model_with_to_field_to_rel(self): """ #13794 --- An inline model with a to_field to a related field of a formset with instance has working relations. """ FormSet = inlineformset_factory(UserProfile, ProfileNetwork, exclude=[]) user = User.objects.create(username="guido", serial=1337, pk=1) self.assertEqual(user.pk, 1) profile = UserProfile.objects.create(user=user, about="about", pk=2) self.assertEqual(profile.pk, 2) ProfileNetwork.objects.create(profile=profile, network=10, identifier=10) formset = FormSet(instance=profile) # Testing the inline model's relation self.assertEqual(formset[0].instance.profile_id, 1) def test_formset_with_none_instance(self): "A formset with instance=None can be created. Regression for #11872" Form = modelform_factory(User, fields="__all__") FormSet = inlineformset_factory(User, UserSite, fields="__all__") # Instantiate the Form and FormSet to prove # you can create a formset with an instance of None Form(instance=None) FormSet(instance=None) def test_empty_fields_on_modelformset(self): """ No fields passed to modelformset_factory() should result in no fields on returned forms except for the id (#14119). """ UserFormSet = modelformset_factory(User, fields=()) formset = UserFormSet() for form in formset.forms: self.assertIn('id', form.fields) self.assertEqual(len(form.fields), 1) def test_save_as_new_with_new_inlines(self): """ Existing and new inlines are saved with save_as_new. Regression for #14938. """ efnet = Network.objects.create(name="EFNet") host1 = Host.objects.create(hostname="irc.he.net", network=efnet) HostFormSet = inlineformset_factory(Network, Host, fields="__all__") # Add a new host, modify previous host, and save-as-new data = { 'host_set-TOTAL_FORMS': '2', 'host_set-INITIAL_FORMS': '1', 'host_set-MAX_NUM_FORMS': '0', 'host_set-0-id': six.text_type(host1.id), 'host_set-0-hostname': 'tranquility.hub.dal.net', 'host_set-1-hostname': 'matrix.de.eu.dal.net' } # To save a formset as new, it needs a new hub instance dalnet = Network.objects.create(name="DALnet") formset = HostFormSet(data, instance=dalnet, save_as_new=True) self.assertTrue(formset.is_valid()) formset.save() self.assertQuerysetEqual( dalnet.host_set.order_by("hostname"), ["<Host: matrix.de.eu.dal.net>", "<Host: tranquility.hub.dal.net>"] ) def test_initial_data(self): user = User.objects.create(username="bibi", serial=1) UserSite.objects.create(user=user, data=7) FormSet = inlineformset_factory(User, UserSite, extra=2, fields="__all__") formset = FormSet(instance=user, initial=[{'data': 41}, {'data': 42}]) self.assertEqual(formset.forms[0].initial['data'], 7) self.assertEqual(formset.extra_forms[0].initial['data'], 41) self.assertIn('value="42"', formset.extra_forms[1].as_p()) class FormsetTests(TestCase): def test_error_class(self): ''' Test the type of Formset and Form error attributes ''' Formset = modelformset_factory(User, fields="__all__") data = { 'form-TOTAL_FORMS': '2', 'form-INITIAL_FORMS': '0', 'form-MAX_NUM_FORMS': '0', 'form-0-id': '', 'form-0-username': 'apollo13', 'form-0-serial': '1', 'form-1-id': '', 'form-1-username': 'apollo13', 'form-1-serial': '2', } formset = Formset(data) # check if the returned error classes are correct # note: formset.errors returns a list as documented self.assertIsInstance(formset.errors, list) self.assertIsInstance(formset.non_form_errors(), ErrorList) for form in formset.forms: self.assertIsInstance(form.errors, ErrorDict) self.assertIsInstance(form.non_field_errors(), ErrorList) def test_initial_data(self): User.objects.create(username="bibi", serial=1) Formset = modelformset_factory(User, fields="__all__", extra=2) formset = Formset(initial=[{'username': 'apollo11'}, {'username': 'apollo12'}]) self.assertEqual(formset.forms[0].initial['username'], "bibi") self.assertEqual(formset.extra_forms[0].initial['username'], "apollo11") self.assertIn('value="apollo12"', formset.extra_forms[1].as_p()) def test_extraneous_query_is_not_run(self): Formset = modelformset_factory(Network, fields="__all__") data = {'test-TOTAL_FORMS': '1', 'test-INITIAL_FORMS': '0', 'test-MAX_NUM_FORMS': '', 'test-0-name': 'Random Place', } with self.assertNumQueries(1): formset = Formset(data, prefix="test") formset.save() class CustomWidget(forms.widgets.TextInput): pass class UserSiteForm(forms.ModelForm): class Meta: model = UserSite fields = "__all__" widgets = { 'id': CustomWidget, 'data': CustomWidget, } localized_fields = ('data',) class Callback(object): def __init__(self): self.log = [] def __call__(self, db_field, **kwargs): self.log.append((db_field, kwargs)) return db_field.formfield(**kwargs) class FormfieldCallbackTests(TestCase): """ Regression for #13095 and #17683: Using base forms with widgets defined in Meta should not raise errors and BaseModelForm should respect the specified pk widget. """ def test_inlineformset_factory_default(self): Formset = inlineformset_factory(User, UserSite, form=UserSiteForm, fields="__all__") form = Formset().forms[0] self.assertIsInstance(form['id'].field.widget, CustomWidget) self.assertIsInstance(form['data'].field.widget, CustomWidget) self.assertFalse(form.fields['id'].localize) self.assertTrue(form.fields['data'].localize) def test_modelformset_factory_default(self): Formset = modelformset_factory(UserSite, form=UserSiteForm) form = Formset().forms[0] self.assertIsInstance(form['id'].field.widget, CustomWidget) self.assertIsInstance(form['data'].field.widget, CustomWidget) self.assertFalse(form.fields['id'].localize) self.assertTrue(form.fields['data'].localize) def assertCallbackCalled(self, callback): id_field, user_field, data_field = UserSite._meta.fields expected_log = [ (id_field, {'widget': CustomWidget}), (user_field, {}), (data_field, {'widget': CustomWidget, 'localize': True}), ] self.assertEqual(callback.log, expected_log) def test_inlineformset_custom_callback(self): callback = Callback() inlineformset_factory(User, UserSite, form=UserSiteForm, formfield_callback=callback, fields="__all__") self.assertCallbackCalled(callback) def test_modelformset_custom_callback(self): callback = Callback() modelformset_factory(UserSite, form=UserSiteForm, formfield_callback=callback) self.assertCallbackCalled(callback) class BaseCustomDeleteFormSet(BaseFormSet): """ A formset mix-in that lets a form decide if it's to be deleted. Works for BaseFormSets. Also works for ModelFormSets with #14099 fixed. form.should_delete() is called. The formset delete field is also suppressed. """ def add_fields(self, form, index): super(BaseCustomDeleteFormSet, self).add_fields(form, index) self.can_delete = True if DELETION_FIELD_NAME in form.fields: del form.fields[DELETION_FIELD_NAME] def _should_delete_form(self, form): return hasattr(form, 'should_delete') and form.should_delete() class FormfieldShouldDeleteFormTests(TestCase): """ Regression for #14099: BaseModelFormSet should use ModelFormSet method _should_delete_form """ class BaseCustomDeleteModelFormSet(BaseModelFormSet, BaseCustomDeleteFormSet): """ Model FormSet with CustomDelete MixIn """ class CustomDeleteUserForm(forms.ModelForm): """ A model form with a 'should_delete' method """ class Meta: model = User fields = "__all__" def should_delete(self): """ delete form if odd PK """ return self.instance.pk % 2 != 0 NormalFormset = modelformset_factory(User, form=CustomDeleteUserForm, can_delete=True) DeleteFormset = modelformset_factory(User, form=CustomDeleteUserForm, formset=BaseCustomDeleteModelFormSet) data = { 'form-TOTAL_FORMS': '4', 'form-INITIAL_FORMS': '0', 'form-MAX_NUM_FORMS': '4', 'form-0-username': 'John', 'form-0-serial': '1', 'form-1-username': 'Paul', 'form-1-serial': '2', 'form-2-username': 'George', 'form-2-serial': '3', 'form-3-username': 'Ringo', 'form-3-serial': '5', } delete_all_ids = { 'form-0-DELETE': '1', 'form-1-DELETE': '1', 'form-2-DELETE': '1', 'form-3-DELETE': '1', } def test_init_database(self): """ Add test data to database via formset """ formset = self.NormalFormset(self.data) self.assertTrue(formset.is_valid()) self.assertEqual(len(formset.save()), 4) def test_no_delete(self): """ Verify base formset doesn't modify database """ # reload database self.test_init_database() # pass standard data dict & see none updated data = dict(self.data) data['form-INITIAL_FORMS'] = 4 data.update({ 'form-%d-id' % i: user.pk for i, user in enumerate(User.objects.all()) }) formset = self.NormalFormset(data, queryset=User.objects.all()) self.assertTrue(formset.is_valid()) self.assertEqual(len(formset.save()), 0) self.assertEqual(len(User.objects.all()), 4) def test_all_delete(self): """ Verify base formset honors DELETE field """ # reload database self.test_init_database() # create data dict with all fields marked for deletion data = dict(self.data) data['form-INITIAL_FORMS'] = 4 data.update({ 'form-%d-id' % i: user.pk for i, user in enumerate(User.objects.all()) }) data.update(self.delete_all_ids) formset = self.NormalFormset(data, queryset=User.objects.all()) self.assertTrue(formset.is_valid()) self.assertEqual(len(formset.save()), 0) self.assertEqual(len(User.objects.all()), 0) def test_custom_delete(self): """ Verify DeleteFormset ignores DELETE field and uses form method """ # reload database self.test_init_database() # Create formset with custom Delete function # create data dict with all fields marked for deletion data = dict(self.data) data['form-INITIAL_FORMS'] = 4 data.update({ 'form-%d-id' % i: user.pk for i, user in enumerate(User.objects.all()) }) data.update(self.delete_all_ids) formset = self.DeleteFormset(data, queryset=User.objects.all()) # verify two were deleted self.assertTrue(formset.is_valid()) self.assertEqual(len(formset.save()), 0) self.assertEqual(len(User.objects.all()), 2) # verify no "odd" PKs left odd_ids = [user.pk for user in User.objects.all() if user.pk % 2] self.assertEqual(len(odd_ids), 0) class RedeleteTests(TestCase): def test_resubmit(self): u = User.objects.create(username='foo', serial=1) us = UserSite.objects.create(user=u, data=7) formset_cls = inlineformset_factory(User, UserSite, fields="__all__") data = { 'serial': '1', 'username': 'foo', 'usersite_set-TOTAL_FORMS': '1', 'usersite_set-INITIAL_FORMS': '1', 'usersite_set-MAX_NUM_FORMS': '1', 'usersite_set-0-id': six.text_type(us.pk), 'usersite_set-0-data': '7', 'usersite_set-0-user': 'foo', 'usersite_set-0-DELETE': '1' } formset = formset_cls(data, instance=u) self.assertTrue(formset.is_valid()) formset.save() self.assertEqual(UserSite.objects.count(), 0) formset = formset_cls(data, instance=u) # Even if the "us" object isn't in the DB any more, the form # validates. self.assertTrue(formset.is_valid()) formset.save() self.assertEqual(UserSite.objects.count(), 0) def test_delete_already_deleted(self): u = User.objects.create(username='foo', serial=1) us = UserSite.objects.create(user=u, data=7) formset_cls = inlineformset_factory(User, UserSite, fields="__all__") data = { 'serial': '1', 'username': 'foo', 'usersite_set-TOTAL_FORMS': '1', 'usersite_set-INITIAL_FORMS': '1', 'usersite_set-MAX_NUM_FORMS': '1', 'usersite_set-0-id': six.text_type(us.pk), 'usersite_set-0-data': '7', 'usersite_set-0-user': 'foo', 'usersite_set-0-DELETE': '1' } formset = formset_cls(data, instance=u) us.delete() self.assertTrue(formset.is_valid()) formset.save() self.assertEqual(UserSite.objects.count(), 0)
bsd-3-clause
muellni/vupradio_os
support/scripts/gen-manual-lists.py
65
20516
## gen-manual-lists.py ## ## This script generates the following Buildroot manual appendices: ## - the package tables (one for the target, the other for host tools); ## - the deprecated items. ## ## Author(s): ## - Samuel Martin <s.martin49@gmail.com> ## ## Copyright (C) 2013 Samuel Martin ## ## This program is free software; you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation; either version 2 of the License, or ## (at your option) any later version. ## ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with this program; if not, write to the Free Software ## Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ## ## Note about python2. ## ## This script can currently only be run using python2 interpreter due to ## its kconfiglib dependency (which is not yet python3 friendly). from __future__ import print_function from __future__ import unicode_literals import os import re import sys import datetime from argparse import ArgumentParser try: import kconfiglib except ImportError: message = """ Could not find the module 'kconfiglib' in the PYTHONPATH: """ message += "\n".join([" {0}".format(path) for path in sys.path]) message += """ Make sure the Kconfiglib directory is in the PYTHONPATH, then relaunch the script. You can get kconfiglib from: https://github.com/ulfalizer/Kconfiglib """ sys.stderr.write(message) raise def get_symbol_subset(root, filter_func): """ Return a generator of kconfig items. :param root_item: Root item of the generated subset of items :param filter_func: Filter function """ if hasattr(root, "get_items"): get_items = root.get_items elif hasattr(root, "get_top_level_items"): get_items = root.get_top_level_items else: message = "The symbol does not contain any subset of symbols" raise Exception(message) for item in get_items(): if item.is_symbol(): if not filter_func(item): continue yield item elif item.is_menu() or item.is_choice(): for i in get_symbol_subset(item, filter_func): yield i def get_symbol_parents(item, root=None, enable_choice=False): """ Return the list of the item's parents. The last item of the list is the closest parent, the first the furthest. :param item: Item from which the the parent list is generated :param root: Root item stopping the search (not included in the parent list) :param enable_choice: Flag enabling choices to appear in the parent list """ parent = item.get_parent() parents = [] while parent and parent != root: if parent.is_menu(): parents.append(parent.get_title()) elif enable_choice and parent.is_choice(): parents.append(parent.prompts[0][0]) parent = parent.get_parent() if isinstance(root, kconfiglib.Menu) or \ (enable_choice and isinstance(root, kconfiglib.Choice)): parents.append("") # Dummy empty parent to get a leading arrow -> parents.reverse() return parents def format_asciidoc_table(root, get_label_func, filter_func=lambda x: True, format_func=lambda x: x, enable_choice=False, sorted=True, item_label=None): """ Return the asciidoc formatted table of the items and their location. :param root: Root item of the item subset :param get_label_func: Item's label getter function :param filter_func: Filter function to apply on the item subset :param format_func: Function to format a symbol and the table header :param enable_choice: Enable choices to appear as part of the item's location :param sorted: Flag to alphabetically sort the table """ lines = [] for item in get_symbol_subset(root, filter_func): lines.append(format_func(what="symbol", symbol=item, root=root, get_label_func=get_label_func, enable_choice=enable_choice)) if sorted: lines.sort(key=lambda x: x.lower()) table = ":halign: center\n\n" width, columns = format_func(what="layout") table = "[width=\"{0}\",cols=\"{1}\",options=\"header\"]\n".format(width, columns) table += "|===================================================\n" table += format_func(what="header", header=item_label, root=root) table += "\n" + "".join(lines) + "\n" table += "|===================================================\n" return table class Buildroot: """ Buildroot configuration object. """ root_config = "Config.in" package_dirname = "package" package_prefixes = ["BR2_PACKAGE_", "BR2_PACKAGE_HOST_"] re_pkg_prefix = re.compile(r"^(" + "|".join(package_prefixes) + ").*") deprecated_symbol = "BR2_DEPRECATED" list_in = """\ // // Automatically generated list for Buildroot manual. // {table} """ list_info = { 'target-packages': { 'filename': "package-list", 'root_menu': "Target packages", 'filter': "_is_real_package", 'format': "_format_symbol_prompt_location", 'sorted': True, }, 'host-packages': { 'filename': "host-package-list", 'root_menu': "Host utilities", 'filter': "_is_real_package", 'format': "_format_symbol_prompt", 'sorted': True, }, 'virtual-packages': { 'filename': "virtual-package-list", 'root_menu': "Target packages", 'filter': "_is_virtual_package", 'format': "_format_symbol_virtual", 'sorted': True, }, 'deprecated': { 'filename': "deprecated-list", 'root_menu': None, 'filter': "_is_deprecated", 'format': "_format_symbol_prompt_location", 'sorted': False, }, } def __init__(self): self.base_dir = os.environ.get("TOPDIR") self.output_dir = os.environ.get("O") self.package_dir = os.path.join(self.base_dir, self.package_dirname) # The kconfiglib requires an environment variable named "srctree" to # load the configuration, so set it. os.environ.update({'srctree': self.base_dir}) self.config = kconfiglib.Config(os.path.join(self.base_dir, self.root_config)) self._deprecated = self.config.get_symbol(self.deprecated_symbol) self.gen_date = datetime.datetime.utcnow() self.br_version_full = os.environ.get("BR2_VERSION_FULL") if self.br_version_full and self.br_version_full.endswith("-git"): self.br_version_full = self.br_version_full[:-4] if not self.br_version_full: self.br_version_full = "undefined" def _get_package_symbols(self, package_name): """ Return a tuple containing the target and host package symbol. """ symbols = re.sub("[-+.]", "_", package_name) symbols = symbols.upper() symbols = tuple([prefix + symbols for prefix in self.package_prefixes]) return symbols def _is_deprecated(self, symbol): """ Return True if the symbol is marked as deprecated, otherwise False. """ # This also catches BR2_DEPRECATED_SINCE_xxxx_xx return bool([ symbol for x in symbol.get_referenced_symbols() if x.get_name().startswith(self._deprecated.get_name()) ]) def _is_package(self, symbol, type='real'): """ Return True if the symbol is a package or a host package, otherwise False. :param symbol: The symbol to check :param type: Limit to 'real' or 'virtual' types of packages, with 'real' being the default. Note: only 'real' is (implictly) handled for now """ if not symbol.is_symbol(): return False if type == 'real' and not symbol.prompts: return False if type == 'virtual' and symbol.prompts: return False if not self.re_pkg_prefix.match(symbol.get_name()): return False pkg_name = self._get_pkg_name(symbol) pattern = "^(HOST_)?" + pkg_name + "$" pattern = re.sub("_", ".", pattern) pattern = re.compile(pattern, re.IGNORECASE) # Here, we cannot just check for the location of the Config.in because # of the "virtual" package. # # So, to check that a symbol is a package (not a package option or # anything else), we check for the existence of the package *.mk file. # # By the way, to actually check for a package, we should grep all *.mk # files for the following regex: # "\$\(eval \$\((host-)?(generic|autotools|cmake)-package\)\)" # # Implementation details: # # * The package list is generated from the *.mk file existence, the # first time this function is called. Despite the memory consumption, # this list is stored because the execution time of this script is # noticeably shorter than rescanning the package sub-tree for each # symbol. if not hasattr(self, "_package_list"): pkg_list = [] for _, _, files in os.walk(self.package_dir): for file_ in (f for f in files if f.endswith(".mk")): pkg_list.append(re.sub(r"(.*?)\.mk", r"\1", file_)) setattr(self, "_package_list", pkg_list) for pkg in getattr(self, "_package_list"): if type == 'real': if pattern.match(pkg) and not self._exists_virt_symbol(pkg): return True if type == 'virtual': if pattern.match('has_' + pkg): return True return False def _is_real_package(self, symbol): return self._is_package(symbol, 'real') def _is_virtual_package(self, symbol): return self._is_package(symbol, 'virtual') def _exists_virt_symbol(self, pkg_name): """ Return True if a symbol exists that defines the package as a virtual package, False otherwise :param pkg_name: The name of the package, for which to check if a symbol exists defining it as a virtual package """ virt_pattern = "BR2_PACKAGE_HAS_" + pkg_name + "$" virt_pattern = re.sub("_", ".", virt_pattern) virt_pattern = re.compile(virt_pattern, re.IGNORECASE) for sym in self.config: if virt_pattern.match(sym.get_name()): return True return False def _get_pkg_name(self, symbol): """ Return the package name of the specified symbol. :param symbol: The symbol to get the package name of """ return re.sub("BR2_PACKAGE_(HOST_)?(.*)", r"\2", symbol.get_name()) def _get_symbol_label(self, symbol, mark_deprecated=True): """ Return the label (a.k.a. prompt text) of the symbol. :param symbol: The symbol :param mark_deprecated: Append a 'deprecated' to the label """ label = symbol.prompts[0][0] if self._is_deprecated(symbol) and mark_deprecated: label += " *(deprecated)*" return label def _format_symbol_prompt(self, what=None, symbol=None, root=None, enable_choice=False, header=None, get_label_func=lambda x: x): if what == "layout": return ( "30%", "^1" ) if what == "header": return "| {0:<40}\n".format(header) if what == "symbol": return "| {0:<40}\n".format(get_label_func(symbol)) message = "Invalid argument 'what': '%s'\n" % str(what) message += "Allowed values are: 'layout', 'header' and 'symbol'" raise Exception(message) def _format_symbol_prompt_location(self, what=None, symbol=None, root=None, enable_choice=False, header=None, get_label_func=lambda x: x): if what == "layout": return ( "100%", "^1,4" ) if what == "header": if hasattr(root, "get_title"): loc_label = get_symbol_parents(root, None, enable_choice=enable_choice) loc_label += [root.get_title(), "..."] else: loc_label = ["Location"] return "| {0:<40} <| {1}\n".format(header, " -> ".join(loc_label)) if what == "symbol": parents = get_symbol_parents(symbol, root, enable_choice) return "| {0:<40} <| {1}\n".format(get_label_func(symbol), " -> ".join(parents)) message = "Invalid argument 'what': '%s'\n" % str(what) message += "Allowed values are: 'layout', 'header' and 'symbol'" raise Exception(message) def _format_symbol_virtual(self, what=None, symbol=None, root=None, enable_choice=False, header=None, get_label_func=lambda x: "?"): def _symbol_is_legacy(symbol): selects = [ s.get_name() for s in symbol.get_selected_symbols() ] return ("BR2_LEGACY" in selects) def _get_parent_package(sym): if self._is_real_package(sym): return None # Trim the symbol name from its last component (separated with # underscores), until we either find a symbol which is a real # package, or until we have no component (i.e. just 'BR2') name = sym.get_name() while name != "BR2": name = name.rsplit("_", 1)[0] s = self.config.get_symbol(name) if s is None: continue if self._is_real_package(s): return s return None def _get_providers(symbol): providers = list() for sym in self.config: if not sym.is_symbol(): continue if _symbol_is_legacy(sym): continue selects = sym.get_selected_symbols() if not selects: continue for s in selects: if s == symbol: if sym.prompts: l = self._get_symbol_label(sym,False) parent_pkg = _get_parent_package(sym) if parent_pkg is not None: l = self._get_symbol_label(parent_pkg, False) \ + " (w/ " + l + ")" providers.append(l) else: providers.extend(_get_providers(sym)) return providers if what == "layout": return ( "100%", "^1,4,4" ) if what == "header": return "| {0:<20} <| {1:<32} <| Providers\n".format("Virtual packages", "Symbols") if what == "symbol": pkg = re.sub(r"^BR2_PACKAGE_HAS_(.+)$", r"\1", symbol.get_name()) providers = _get_providers(symbol) return "| {0:<20} <| {1:<32} <| {2}\n".format(pkg.lower(), '+' + symbol.get_name() + '+', ", ".join(providers)) message = "Invalid argument 'what': '%s'\n" % str(what) message += "Allowed values are: 'layout', 'header' and 'symbol'" raise Exception(message) def print_list(self, list_type, enable_choice=True, enable_deprecated=True, dry_run=False, output=None): """ Print the requested list. If not dry run, then the list is automatically written in its own file. :param list_type: The list type to be generated :param enable_choice: Flag enabling choices to appear in the list :param enable_deprecated: Flag enabling deprecated items to appear in the package lists :param dry_run: Dry run (print the list in stdout instead of writing the list file """ def _get_menu(title): """ Return the first symbol menu matching the given title. """ menus = self.config.get_menus() menu = [m for m in menus if m.get_title().lower() == title.lower()] if not menu: message = "No such menu: '{0}'".format(title) raise Exception(message) return menu[0] list_config = self.list_info[list_type] root_title = list_config.get('root_menu') if root_title: root_item = _get_menu(root_title) else: root_item = self.config filter_ = getattr(self, list_config.get('filter')) filter_func = lambda x: filter_(x) format_func = getattr(self, list_config.get('format')) if not enable_deprecated and list_type != "deprecated": filter_func = lambda x: filter_(x) and not self._is_deprecated(x) mark_depr = list_type != "deprecated" get_label = lambda x: self._get_symbol_label(x, mark_depr) item_label = "Features" if list_type == "deprecated" else "Packages" table = format_asciidoc_table(root_item, get_label, filter_func=filter_func, format_func=format_func, enable_choice=enable_choice, sorted=list_config.get('sorted'), item_label=item_label) content = self.list_in.format(table=table) if dry_run: print(content) return if not output: output_dir = self.output_dir if not output_dir: print("Warning: Undefined output directory.") print("\tUse source directory as output location.") output_dir = self.base_dir output = os.path.join(output_dir, list_config.get('filename') + ".txt") if not os.path.exists(os.path.dirname(output)): os.makedirs(os.path.dirname(output)) print("Writing the {0} list in:\n\t{1}".format(list_type, output)) with open(output, 'w') as fout: fout.write(content) if __name__ == '__main__': list_types = ['target-packages', 'host-packages', 'virtual-packages', 'deprecated'] parser = ArgumentParser() parser.add_argument("list_type", nargs="?", choices=list_types, help="""\ Generate the given list (generate all lists if unspecified)""") parser.add_argument("-n", "--dry-run", dest="dry_run", action='store_true', help="Output the generated list to stdout") parser.add_argument("--output-target", dest="output_target", help="Output target package file") parser.add_argument("--output-host", dest="output_host", help="Output host package file") parser.add_argument("--output-virtual", dest="output_virtual", help="Output virtual package file") parser.add_argument("--output-deprecated", dest="output_deprecated", help="Output deprecated file") args = parser.parse_args() lists = [args.list_type] if args.list_type else list_types buildroot = Buildroot() for list_name in lists: output = getattr(args, "output_" + list_name.split("-", 1)[0]) buildroot.print_list(list_name, dry_run=args.dry_run, output=output)
gpl-2.0
oscarolar/odoo
addons/hr_timesheet_invoice/report/report_analytic.py
299
5164
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import fields,osv from openerp import tools from openerp.addons.decimal_precision import decimal_precision as dp class report_analytic_account_close(osv.osv): _name = "report.analytic.account.close" _description = "Analytic account to close" _auto = False _columns = { 'name': fields.many2one('account.analytic.account', 'Analytic account', readonly=True), 'state': fields.char('Status', readonly=True), 'partner_id': fields.many2one('res.partner', 'Partner', readonly=True), 'quantity': fields.float('Quantity', readonly=True), 'quantity_max': fields.float('Max. Quantity', readonly=True), 'balance': fields.float('Balance', readonly=True), 'date_deadline': fields.date('Deadline', readonly=True), } def init(self, cr): tools.drop_view_if_exists(cr, 'report_analytic_account_close') cr.execute(""" create or replace view report_analytic_account_close as ( select a.id as id, a.id as name, a.state as state, sum(l.unit_amount) as quantity, sum(l.amount) as balance, a.partner_id as partner_id, a.quantity_max as quantity_max, a.date as date_deadline from account_analytic_line l right join account_analytic_account a on (l.account_id=a.id) group by a.id,a.state, a.quantity_max,a.date,a.partner_id having (a.quantity_max>0 and (sum(l.unit_amount)>=a.quantity_max)) or a.date <= current_date )""") class report_account_analytic_line_to_invoice(osv.osv): _name = "report.account.analytic.line.to.invoice" _description = "Analytic lines to invoice report" _auto = False _columns = { 'name': fields.char('Year', required=False, readonly=True), 'product_id':fields.many2one('product.product', 'Product', readonly=True), 'account_id':fields.many2one('account.analytic.account', 'Analytic account', readonly=True), 'product_uom_id':fields.many2one('product.uom', 'Unit of Measure', readonly=True), 'unit_amount': fields.float('Units', readonly=True), 'sale_price': fields.float('Sale price', readonly=True, digits_compute=dp.get_precision('Product Price')), 'amount': fields.float('Amount', readonly=True, digits_compute=dp.get_precision('Account')), 'month':fields.selection([('01','January'), ('02','February'), ('03','March'), ('04','April'), ('05','May'), ('06','June'), ('07','July'), ('08','August'), ('09','September'), ('10','October'), ('11','November'), ('12','December')],'Month', readonly=True), } _order = 'name desc, product_id asc, account_id asc' def init(self, cr): tools.drop_view_if_exists(cr, 'report_account_analytic_line_to_invoice') cr.execute(""" CREATE OR REPLACE VIEW report_account_analytic_line_to_invoice AS ( SELECT DISTINCT(to_char(l.date,'MM')) as month, to_char(l.date, 'YYYY') as name, MIN(l.id) AS id, l.product_id, l.account_id, SUM(l.amount) AS amount, SUM(l.unit_amount*t.list_price) AS sale_price, SUM(l.unit_amount) AS unit_amount, l.product_uom_id FROM account_analytic_line l left join product_product p on (l.product_id=p.id) left join product_template t on (p.product_tmpl_id=t.id) WHERE (invoice_id IS NULL) and (to_invoice IS NOT NULL) GROUP BY to_char(l.date, 'YYYY'), to_char(l.date,'MM'), product_id, product_uom_id, account_id ) """) # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
ISeaTeL/ISeaTeL_Cup_Site
users/views.py
1
1367
from django.shortcuts import render, redirect from users.forms import * from django.contrib.auth import authenticate, login, logout # Create your views here. def create(request): if request.method == 'POST': user_form = UserCreationForm(request.POST) if user_form.is_valid(): user = user_form.save() user.backend = 'django.contrib.auth.backends.ModelBackend' login(request, user) return redirect('/') else: return render(request, 'user.html', {'form': user_form}) return render(request, 'user.html', {'form': UserCreationForm()}) def logout_view(request): logout(request) return redirect('/') def login_view(request): if request.user.is_authenticated(): return redirect('/') if request.method == 'POST': user_form = AuthenticationForm(data=request.POST) if user_form.is_valid(): user = authenticate(username=user_form.cleaned_data['username'], password=user_form.cleaned_data['password']) print user user.backend = 'django.contrib.auth.backends.ModelBackend' login(request, user) return redirect('/') else: return render(request, 'user.html', {'form': user_form}) return render(request, 'user.html', {'form': AuthenticationForm()})
mit
gxx/lettuce
tests/integration/lib/Django-1.3/tests/regressiontests/many_to_one_regress/tests.py
92
4466
from django.db import models from django.test import TestCase from models import First, Second, Third, Parent, Child, Category, Record, Relation class ManyToOneRegressionTests(TestCase): def test_object_creation(self): Third.objects.create(id='3', name='An example') parent = Parent(name='fred') parent.save() Child.objects.create(name='bam-bam', parent=parent) def test_fk_assignment_and_related_object_cache(self): # Tests of ForeignKey assignment and the related-object cache (see #6886). p = Parent.objects.create(name="Parent") c = Child.objects.create(name="Child", parent=p) # Look up the object again so that we get a "fresh" object. c = Child.objects.get(name="Child") p = c.parent # Accessing the related object again returns the exactly same object. self.assertTrue(c.parent is p) # But if we kill the cache, we get a new object. del c._parent_cache self.assertFalse(c.parent is p) # Assigning a new object results in that object getting cached immediately. p2 = Parent.objects.create(name="Parent 2") c.parent = p2 self.assertTrue(c.parent is p2) # Assigning None succeeds if field is null=True. p.bestchild = None self.assertTrue(p.bestchild is None) # bestchild should still be None after saving. p.save() self.assertTrue(p.bestchild is None) # bestchild should still be None after fetching the object again. p = Parent.objects.get(name="Parent") self.assertTrue(p.bestchild is None) # Assigning None fails: Child.parent is null=False. self.assertRaises(ValueError, setattr, c, "parent", None) # You also can't assign an object of the wrong type here self.assertRaises(ValueError, setattr, c, "parent", First(id=1, second=1)) # Nor can you explicitly assign None to Child.parent during object # creation (regression for #9649). self.assertRaises(ValueError, Child, name='xyzzy', parent=None) self.assertRaises(ValueError, Child.objects.create, name='xyzzy', parent=None) # Creation using keyword argument should cache the related object. p = Parent.objects.get(name="Parent") c = Child(parent=p) self.assertTrue(c.parent is p) # Creation using keyword argument and unsaved related instance (#8070). p = Parent() c = Child(parent=p) self.assertTrue(c.parent is p) # Creation using attname keyword argument and an id will cause the # related object to be fetched. p = Parent.objects.get(name="Parent") c = Child(parent_id=p.id) self.assertFalse(c.parent is p) self.assertEqual(c.parent, p) def test_multiple_foreignkeys(self): # Test of multiple ForeignKeys to the same model (bug #7125). c1 = Category.objects.create(name='First') c2 = Category.objects.create(name='Second') c3 = Category.objects.create(name='Third') r1 = Record.objects.create(category=c1) r2 = Record.objects.create(category=c1) r3 = Record.objects.create(category=c2) r4 = Record.objects.create(category=c2) r5 = Record.objects.create(category=c3) r = Relation.objects.create(left=r1, right=r2) r = Relation.objects.create(left=r3, right=r4) r = Relation.objects.create(left=r1, right=r3) r = Relation.objects.create(left=r5, right=r2) r = Relation.objects.create(left=r3, right=r2) q1 = Relation.objects.filter(left__category__name__in=['First'], right__category__name__in=['Second']) self.assertQuerysetEqual(q1, ["<Relation: First - Second>"]) q2 = Category.objects.filter(record__left_set__right__category__name='Second').order_by('name') self.assertQuerysetEqual(q2, ["<Category: First>", "<Category: Second>"]) p = Parent.objects.create(name="Parent") c = Child.objects.create(name="Child", parent=p) self.assertRaises(ValueError, Child.objects.create, name="Grandchild", parent=c) def test_fk_instantiation_outside_model(self): # Regression for #12190 -- Should be able to instantiate a FK outside # of a model, and interrogate its related field. cat = models.ForeignKey(Category) self.assertEqual('id', cat.rel.get_related_field().name)
gpl-3.0
wavemind/gcb17ml
tools/verify.py
1
62911
# Copyright 2013 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # @author: psimakov@google.com (Pavel Simakov) """Enforces schema and verifies course files for referential integrity. Use this script to verify referential integrity of your course definition files before you import them into the production instance of Google AppEngine. Here is how to use the script: - prepare your course files - edit the data/unit.csv file - edit the data/lesson.csv file - edit the assets/js/activity-*.*.js files - edit the assets/js/assessment-*.js files - run the script from a command line by navigating to the root directory of the app and then typing "python tools/verify.py" - review the report printed to the console for errors and warnings Good luck! """ import csv import json import os import re from StringIO import StringIO import sys BOOLEAN = object() STRING = object() FLOAT = object() INTEGER = object() CORRECT = object() REGEX = object() INTEGER_OR_INTEGER_LIST = object() SCHEMA = { 'assessment': { 'assessmentName': STRING, 'preamble': STRING, 'checkAnswers': BOOLEAN, 'questionsList': [{ 'questionHTML': STRING, 'lesson': STRING, 'choices': [STRING, CORRECT], # The fractional score for each choice in this question, if it is # multiple-choice. Each of these values should be between 0.0 and # 1.0, inclusive. 'choiceScores': [FLOAT], # The weight given to the entire question. 'weight': INTEGER, 'multiLine': BOOLEAN, 'correctAnswerNumeric': FLOAT, 'correctAnswerString': STRING, 'correctAnswerRegex': REGEX}] }, 'activity': [ STRING, { 'questionType': 'multiple choice', 'questionHTML': STRING, 'choices': [[STRING, BOOLEAN, STRING]] }, { 'questionType': 'multiple choice group', 'questionGroupHTML': STRING, 'questionsList': [{ 'questionHTML': STRING, 'choices': [STRING], 'correctIndex': INTEGER_OR_INTEGER_LIST, 'multiSelect': BOOLEAN}], 'allCorrectMinCount': INTEGER, 'allCorrectOutput': STRING, 'someIncorrectOutput': STRING }, { 'questionType': 'freetext', 'questionHTML': STRING, 'correctAnswerRegex': REGEX, 'correctAnswerOutput': STRING, 'incorrectAnswerOutput': STRING, 'showAnswerOutput': STRING, 'showAnswerPrompt': STRING, 'outputHeight': STRING }]} UNIT_TYPE_UNIT = 'U' UNIT_TYPE_LINK = 'O' UNIT_TYPE_ASSESSMENT = 'A' UNIT_TYPE_CUSTOM = 'X' UNIT_TYPES = [UNIT_TYPE_UNIT, UNIT_TYPE_LINK, UNIT_TYPE_ASSESSMENT, UNIT_TYPE_CUSTOM] UNIT_TYPE_NAMES = { UNIT_TYPE_UNIT: 'Unit', UNIT_TYPE_LINK: 'Link', UNIT_TYPE_ASSESSMENT: 'Assessment', UNIT_TYPE_CUSTOM: 'Custom Unit'} UNITS_HEADER = ( 'id,type,unit_id,title,release_date,now_available') LESSONS_HEADER = ( 'unit_id,unit_title,lesson_id,lesson_title,lesson_activity,' 'lesson_activity_name,lesson_notes,lesson_video_id,lesson_objectives') UNIT_CSV_TO_DB_CONVERTER = { 'id': None, 'type': ('type', unicode), 'unit_id': ('unit_id', unicode), 'title': ('title', unicode), 'release_date': ('release_date', unicode), 'now_available': ('now_available', lambda value: value == 'True') } LESSON_CSV_TO_DB_CONVERTER = { 'unit_id': ('unit_id', int), # Field 'unit_title' is a duplicate of Unit.title. We enforce that both # values are the same and ignore this value altogether. 'unit_title': None, 'lesson_id': ('lesson_id', int), 'lesson_title': ('title', unicode), 'lesson_activity': ('activity', lambda value: value == 'yes'), 'lesson_activity_name': ('activity_title', unicode), 'lesson_video_id': ('video', unicode), 'lesson_objectives': ('objectives', unicode), 'lesson_notes': ('notes', unicode) } # pylint: disable=anomalous-backslash-in-string NO_VERIFY_TAG_NAME_OPEN = '<gcb-no-verify>\s*\n' # pylint: enable=anomalous-backslash-in-string NO_VERIFY_TAG_NAME_CLOSE = '</gcb-no-verify>' OUTPUT_FINE_LOG = False OUTPUT_DEBUG_LOG = False class Term(object): def __init__(self, term_type, value=None): self.term_type = term_type self.value = value def __eq__(self, other): if type(other) is not Term: return False else: return ((self.term_type == other.term_type) and (self.value == other.value)) class SchemaException(Exception): """A class to represent a schema error.""" def format_primitive_value_name(self, name): if name == REGEX: return 'REGEX(...)' if name == CORRECT: return 'CORRECT(...)' if name == BOOLEAN: return 'BOOLEAN' return name def format_primitive_type_name(self, name): """Formats a name for a primitive type.""" if name == BOOLEAN: return 'BOOLEAN' if name == REGEX: return 'REGEX(...)' if name == CORRECT: return 'CORRECT(...)' if name == STRING or isinstance(name, basestring): return 'STRING' if name == FLOAT: return 'FLOAT' if name == INTEGER_OR_INTEGER_LIST: return 'INTEGER_OR_INTEGER_LIST' if name == INTEGER: return 'INTEGER' if isinstance(name, dict): return '{...}' if isinstance(name, list): return '[...]' return 'Unknown type name \'%s\'' % name.__class__.__name__ def format_type_names(self, names): if isinstance(names, list): captions = [] for name in names: captions.append(self.format_primitive_type_name(name)) return captions else: return self.format_primitive_type_name(names) def __init__(self, message, value=None, types=None, path=None): prefix = '' if path: prefix = 'Error at %s\n' % path if types is not None: if value: message = prefix + message % ( self.format_primitive_value_name(value), self.format_type_names(types)) else: message = prefix + message % self.format_type_names(types) else: if value: message = prefix + ( message % self.format_primitive_value_name(value)) else: message = prefix + message super(SchemaException, self).__init__(message) class Context(object): """"A class that manages a stack of traversal contexts.""" def __init__(self): self.parent = None self.path = ['/'] def new(self, names): """"Derives a new context from the current one.""" context = Context() context.parent = self context.path = list(self.path) if names: if isinstance(names, list): for name in names: if name: context.path.append('/' + '%s' % name) else: context.path.append('/' + '%s' % names) return context def format_path(self): """Formats the canonical name of this context.""" return ''.join(self.path) class SchemaHelper(object): """A class that knows how to apply the schema.""" def __init__(self): self.type_stats = {} def visit_element(self, atype, value, context, is_terminal=True): """Callback for each schema element being traversed.""" if atype in self.type_stats: count = self.type_stats[atype] else: count = 0 self.type_stats[atype] = count + 1 if is_terminal: self.parse_log.append(' TERMINAL: %s %s = %s' % ( atype, context.format_path(), value)) else: self.parse_log.append(' NON-TERMINAL: %s %s' % ( atype, context.format_path())) def extract_all_terms_to_depth(self, key, values, type_map): """Walks schema type map recursively to depth.""" # Walks schema type map recursively to depth and creates a list of all # possible {key: value} pairs. The latter is a list of all non-terminal # and terminal terms allowed in the schema. The list of terms from this # method can be bound to an execution context for evaluating whether a # given instance's map complies with the schema. if key: type_map.update({key: key}) if values == REGEX: type_map.update({'regex': lambda x: Term(REGEX, x)}) return if values == CORRECT: type_map.update({'correct': lambda x: Term(CORRECT, x)}) return if values == BOOLEAN: type_map.update( {'true': Term(BOOLEAN, True), 'false': Term(BOOLEAN, False)}) return if values == STRING or values == INTEGER: return if isinstance(values, dict): for new_key, new_value in values.items(): self.extract_all_terms_to_depth(new_key, new_value, type_map) return if isinstance(values, list): for new_value in values: self.extract_all_terms_to_depth(None, new_value, type_map) return def find_selectors(self, type_map): """Finds all type selectors.""" # Finds all elements in the type map where both a key and a value are # strings. These elements are used to find one specific type map among # several alternative type maps. selector = {} for akey, avalue in type_map.items(): if isinstance(akey, basestring) and isinstance(avalue, basestring): selector.update({akey: avalue}) return selector def find_compatible_dict(self, value_map, type_map, unused_context): """Find the type map most compatible with the value map.""" # A value map is considered compatible with a type map when former # contains the same key names and the value types as the type map. # special case when we have just one type; check name and type are the # same if len(type_map) == 1: for value_key in value_map.keys(): for key in type_map[0].keys(): if value_key == key: return key, type_map[0] raise SchemaException( "Expected: '%s'\nfound: %s", type_map[0].keys()[0], value_map) # case when we have several types to choose from for adict in type_map: dict_selector = self.find_selectors(adict) for akey, avalue in dict_selector.items(): if value_map[akey] == avalue: return akey, adict return None, None def check_single_value_matches_type(self, value, atype, context): """Checks if a single value matches a specific (primitive) type.""" if atype == BOOLEAN: if isinstance(value, bool) or value.term_type == BOOLEAN: self.visit_element('BOOLEAN', value, context) return True else: raise SchemaException( 'Expected: \'true\' or \'false\'\nfound: %s', value) if isinstance(atype, basestring): if isinstance(value, basestring): self.visit_element('str', value, context) return True else: raise SchemaException('Expected: \'string\'\nfound: %s', value) if atype == STRING: if isinstance(value, basestring): self.visit_element('STRING', value, context) return True else: raise SchemaException('Expected: \'string\'\nfound: %s', value) if atype == REGEX and value.term_type == REGEX: self.visit_element('REGEX', value, context) return True if atype == CORRECT and value.term_type == CORRECT: self.visit_element('CORRECT', value, context) return True if atype == FLOAT: if is_number(value): self.visit_element('NUMBER', value, context) return True else: raise SchemaException('Expected: \'number\'\nfound: %s', value) if atype == INTEGER_OR_INTEGER_LIST: if is_integer(value): self.visit_element('INTEGER', value, context) return True if is_integer_list(value): self.visit_element('INTEGER_OR_INTEGER_LIST', value, context) return True raise SchemaException( 'Expected: \'integer\' or ' '\'array of integer\'\nfound: %s', value, path=context.format_path()) if atype == INTEGER: if is_integer(value): self.visit_element('INTEGER', value, context) return True else: raise SchemaException( 'Expected: \'integer\'\nfound: %s', value, path=context.format_path()) raise SchemaException( 'Unexpected value \'%s\'\n' 'for type %s', value, atype, path=context.format_path()) def check_value_list_matches_type(self, value, atype, context): """Checks if all items in value list match a specific type.""" for value_item in value: found = False for atype_item in atype: if isinstance(atype_item, list): for atype_item_item in atype_item: if self.does_value_match_type( value_item, atype_item_item, context): found = True break else: if self.does_value_match_type( value_item, atype_item, context): found = True break if not found: raise SchemaException( 'Expected: \'%s\'\nfound: %s', atype, value) return True def check_value_matches_type(self, value, atype, context): """Checks if single value or a list of values match a specific type.""" if isinstance(atype, list) and isinstance(value, list): return self.check_value_list_matches_type(value, atype, context) else: return self.check_single_value_matches_type(value, atype, context) def does_value_match_type(self, value, atype, context): """Same as other method, but does not throw an exception.""" try: return self.check_value_matches_type(value, atype, context) except SchemaException: return False def does_value_match_one_of_types(self, value, types, context): """Checks if a value matches to one of the types in the list.""" type_names = None if isinstance(types, list): type_names = types if type_names: for i in range(0, len(type_names)): if self.does_value_match_type(value, type_names[i], context): return True return False def does_value_match_map_of_type(self, value, types, context): """Checks if value matches any variation of {...} type.""" # find all possible map types maps = [] for atype in types: if isinstance(atype, dict): maps.append(atype) if not maps and isinstance(types, dict): maps.append(types) # check if the structure of value matches one of the maps if isinstance(value, dict): aname, adict = self.find_compatible_dict(value, maps, context) if adict: self.visit_element( 'dict', value, context.new(aname), is_terminal=False) for akey, avalue in value.items(): if akey not in adict: raise SchemaException( 'Unknown term \'%s\'', akey, path=context.format_path()) self.check_value_of_valid_type( avalue, adict[akey], context.new([aname, akey])) return True raise SchemaException( 'The value:\n %s\n' 'is incompatible with expected type(s):\n %s', value, types, path=context.format_path()) return False def format_name_with_index(self, alist, aindex): """A function to format a context name with an array element index.""" if len(alist) == 1: return '' else: return '[%s]' % aindex def does_value_match_list_of_types_in_order( self, value, types, context, target): """Iterates the value and types in given order and checks for match.""" all_values_are_lists = True for avalue in value: if not isinstance(avalue, list): all_values_are_lists = False if all_values_are_lists: for i in range(0, len(value)): self.check_value_of_valid_type(value[i], types, context.new( self.format_name_with_index(value, i)), in_order=True) else: if len(target) != len(value): raise SchemaException( 'Expected: \'%s\' values\n' + 'found: %s.' % value, len(target), path=context.format_path()) for i in range(0, len(value)): self.check_value_of_valid_type(value[i], target[i], context.new( self.format_name_with_index(value, i))) return True def does_value_match_list_of_types_any_order(self, value, types, context, lists): """Iterates the value and types, checks if they match in any order.""" target = lists if not target: if not isinstance(types, list): raise SchemaException( 'Unsupported type %s', None, types, path=context.format_path()) target = types for i in range(0, len(value)): found = False for atarget in target: try: self.check_value_of_valid_type( value[i], atarget, context.new(self.format_name_with_index(value, i))) found = True break except SchemaException as unused_e: continue if not found: raise SchemaException( 'The value:\n %s\n' 'is incompatible with expected type(s):\n %s', value, types, path=context.format_path()) return True def does_value_match_list_of_type(self, value, types, context, in_order): """Checks if a value matches a variation of [...] type.""" # Extra argument controls whether matching must be done in a specific # or in any order. A specific order is demanded by [[...]]] construct, # i.e. [[STRING, INTEGER, BOOLEAN]], while sub elements inside {...} and # [...] can be matched in any order. # prepare a list of list types lists = [] for atype in types: if isinstance(atype, list): lists.append(atype) if len(lists) > 1: raise SchemaException( 'Unable to validate types with multiple alternative ' 'lists %s', None, types, path=context.format_path()) if isinstance(value, list): if len(lists) > 1: raise SchemaException( 'Allowed at most one list\nfound: %s.', None, types, path=context.format_path()) # determine if list is in order or not as hinted by double array # [[..]]; [STRING, NUMBER] is in any order, but [[STRING, NUMBER]] # demands order ordered = len(lists) == 1 and isinstance(types, list) if in_order or ordered: return self.does_value_match_list_of_types_in_order( value, types, context, lists[0]) else: return self.does_value_match_list_of_types_any_order( value, types, context, lists) return False def check_value_of_valid_type(self, value, types, context, in_order=None): """Check if a value matches any of the given types.""" if not (isinstance(types, list) or isinstance(types, dict)): self.check_value_matches_type(value, types, context) return if (self.does_value_match_list_of_type(value, types, context, in_order) or self.does_value_match_map_of_type(value, types, context) or self.does_value_match_one_of_types(value, types, context)): return raise SchemaException( 'Unknown type %s', value, path=context.format_path()) def check_instances_match_schema(self, values, types, name): """Recursively decompose 'values' to see if they match schema types.""" self.parse_log = [] context = Context().new(name) self.parse_log.append(' ROOT %s' % context.format_path()) # pylint: disable=protected-access values_class = values.__class__ # pylint: enable=protected-access # handle {..} containers if isinstance(types, dict): if not isinstance(values, dict): raise SchemaException( 'Error at \'/\': expected {...}, found %s' % ( values_class.__name__)) self.check_value_of_valid_type(values, types, context.new([])) return # handle [...] containers if isinstance(types, list): if not isinstance(values, list): raise SchemaException( 'Error at \'/\': expected [...], found %s' % ( values_class.__name__)) for i in range(0, len(values)): self.check_value_of_valid_type( values[i], types, context.new('[%s]' % i)) return raise SchemaException( 'Expected an array or a dictionary.', None, path=context.format_path()) def escape_quote(value): return unicode(value).replace('\'', r'\'') class Unit(object): """A class to represent a Unit.""" def __init__(self): self.id = 0 self.type = '' self.unit_id = '' self.title = '' self.release_date = '' self.now_available = False def list_properties(self, name, output): """Outputs all properties of the unit.""" output.append('%s[\'id\'] = %s;' % (name, self.id)) output.append('%s[\'type\'] = \'%s\';' % ( name, escape_quote(self.type))) output.append('%s[\'unit_id\'] = \'%s\';' % ( name, escape_quote(self.unit_id))) output.append('%s[\'title\'] = \'%s\';' % ( name, escape_quote(self.title))) output.append('%s[\'release_date\'] = \'%s\';' % ( name, escape_quote(self.release_date))) output.append('%s[\'now_available\'] = %s;' % ( name, str(self.now_available).lower())) class Lesson(object): """A class to represent a Lesson.""" def __init__(self): self.unit_id = 0 self.unit_title = '' self.lesson_id = 0 self.lesson_title = '' self.lesson_activity = '' self.lesson_activity_name = '' self.lesson_notes = '' self.lesson_video_id = '' self.lesson_objectives = '' def list_properties(self, name, output): """Outputs all properties of the lesson.""" activity = 'false' if self.lesson_activity == 'yes': activity = 'true' output.append('%s[\'unit_id\'] = %s;' % (name, self.unit_id)) output.append('%s[\'unit_title\'] = \'%s\';' % ( name, escape_quote(self.unit_title))) output.append('%s[\'lesson_id\'] = %s;' % (name, self.lesson_id)) output.append('%s[\'lesson_title\'] = \'%s\';' % ( name, escape_quote(self.lesson_title))) output.append('%s[\'lesson_activity\'] = %s;' % (name, activity)) output.append('%s[\'lesson_activity_name\'] = \'%s\';' % ( name, escape_quote(self.lesson_activity_name))) output.append('%s[\'lesson_notes\'] = \'%s\';' % ( name, escape_quote(self.lesson_notes))) output.append('%s[\'lesson_video_id\'] = \'%s\';' % ( name, escape_quote(self.lesson_video_id))) output.append('%s[\'lesson_objectives\'] = \'%s\';' % ( name, escape_quote(self.lesson_objectives))) def to_id_string(self): return '%s.%s.%s' % (self.unit_id, self.lesson_id, self.lesson_title) class Assessment(object): """A class to represent a Assessment.""" def __init__(self): self.scope = {} SchemaHelper().extract_all_terms_to_depth( 'assessment', SCHEMA['assessment'], self.scope) class Activity(object): """A class to represent a Activity.""" def __init__(self): self.scope = {} SchemaHelper().extract_all_terms_to_depth( 'activity', SCHEMA['activity'], self.scope) def silent_echo(unused_message): pass def echo(message): print message def is_integer_list(s): try: if not isinstance(s, list): return False for item in s: if not isinstance(item, int): return False return True except ValueError: return False def is_integer(s): try: return int(s) == float(s) except Exception: # pylint: disable=broad-except return False def is_boolean(s): try: return s == 'True' or s == 'False' except ValueError: return False def is_number(s): try: float(s) return True except ValueError: return False def is_one_of(value, values): for current in values: if value == current: return True return False def text_to_line_numbered_text(text): """Adds line numbers to the provided text.""" lines = text.split('\n') results = [] i = 1 for line in lines: results.append(str(i) + ': ' + line) i += 1 return '\n '.join(results) def set_object_attributes(target_object, names, values, converter=None): """Sets object attributes from provided values.""" if len(names) != len(values): raise SchemaException( 'The number of elements must match: %s and %s' % (names, values)) for i in range(len(names)): if converter: target_def = converter.get(names[i]) if target_def: target_name = target_def[0] target_type = target_def[1] setattr(target_object, target_name, target_type(values[i])) continue if is_integer(values[i]): # if we are setting an attribute of an object that support # metadata, try to infer the target type and convert 'int' into # 'str' here target_type = None if hasattr(target_object.__class__, names[i]): attribute = getattr(target_object.__class__, names[i]) if hasattr(attribute, 'data_type'): target_type = attribute.data_type.__name__ if target_type and (target_type == 'str' or target_type == 'basestring'): setattr(target_object, names[i], str(values[i])) else: setattr(target_object, names[i], int(values[i])) continue if is_boolean(values[i]): setattr(target_object, names[i], bool(values[i])) continue setattr(target_object, names[i], values[i]) def read_objects_from_csv_stream(stream, header, new_object, converter=None): return read_objects_from_csv( csv.reader(StringIO(stream.read())), header, new_object, converter=converter) def read_objects_from_csv_file(fname, header, new_object): return read_objects_from_csv_stream(open(fname), header, new_object) def read_objects_from_csv(value_rows, header, new_object, converter=None): """Reads objects from the rows of a CSV file.""" values = [] for row in value_rows: if not row: continue values.append(row) names = header.split(',') if names != values[0]: raise SchemaException( 'Error reading CSV header.\n ' 'Header row had %s element(s): %s\n ' 'Expected header row with %s element(s): %s' % ( len(values[0]), values[0], len(names), names)) items = [] for i in range(1, len(values)): if len(names) != len(values[i]): raise SchemaException( 'Error reading CSV data row.\n ' 'Row #%s had %s element(s): %s\n ' 'Expected %s element(s): %s' % ( i, len(values[i]), values[i], len(names), names)) # Decode string values in case they were encoded in UTF-8. The CSV # reader should do this automatically, but it does not. The issue is # discussed here: http://docs.python.org/2/library/csv.html decoded_values = [] for value in values[i]: if isinstance(value, basestring): value = unicode(value.decode('utf-8')) decoded_values.append(value) item = new_object() set_object_attributes(item, names, decoded_values, converter=converter) items.append(item) return items def escape_javascript_regex(text): return re.sub( r'correctAnswerRegex([:][ ]*)([/])(.*)([/][ismx]*)', r'correctAnswerRegex: regex("\2\3\4")', text) def remove_javascript_single_line_comment(text): text = re.sub(re.compile('^(.*?)[ ]+//(.*)$', re.MULTILINE), r'\1', text) text = re.sub(re.compile('^//(.*)$', re.MULTILINE), r'', text) return text def remove_javascript_multi_line_comment(text): # pylint: disable=anomalous-backslash-in-string return re.sub( re.compile('/\*(.*)\*/', re.MULTILINE + re.DOTALL), r'', text) # pylint: enable=anomalous-backslash-in-string def parse_content_marked_no_verify(content): """Parses and returns a tuple of real content and no-verify text.""" # If you have any free-form JavaScript in the activity file, you need # to place it between //<gcb-no-verify> ... //</gcb-no-verify> tags # so that the verifier can selectively ignore it. pattern = re.compile('%s(.*)%s' % ( NO_VERIFY_TAG_NAME_OPEN, NO_VERIFY_TAG_NAME_CLOSE), re.DOTALL) m = pattern.search(content) noverify_text = None if m: noverify_text = m.group(1) return (re.sub(pattern, '', content), noverify_text) def convert_javascript_to_python(content, root_name): """Removes JavaScript specific syntactic constructs and returns a tuple.""" # Reads the content and removes JavaScript comments, var's, and escapes # regular expressions. (content, noverify_text) = parse_content_marked_no_verify(content) content = remove_javascript_multi_line_comment(content) content = remove_javascript_single_line_comment(content) content = content.replace('var %s = ' % root_name, '%s = ' % root_name) content = escape_javascript_regex(content) return (content, noverify_text) def convert_javascript_file_to_python(fname, root_name): return convert_javascript_to_python( ''.join(open(fname, 'r').readlines()), root_name) def legacy_eval_python_expression_for_test(content, scope, unused_root_name): """Legacy content parsing function using compile/exec.""" print 'WARNING! This code is unsafe and uses compile/exec!' # First compiles and then evaluates a Python script text in a restricted # environment using provided bindings. Returns the resulting bindings if # evaluation completed. # create a new execution scope that has only the schema terms defined; # remove all other languages constructs including __builtins__ restricted_scope = {} restricted_scope.update(scope) restricted_scope.update({'__builtins__': {}}) code = compile(content, '<string>', 'exec') # pylint: disable=exec-statement exec code in restricted_scope # pylint: enable=exec-statement return restricted_scope def not_implemented_parse_content( unused_content, unused_scope, unused_root_name): raise Exception('Not implemented.') # by default no parser method is configured; set custom parser if you have it parse_content = not_implemented_parse_content def evaluate_python_expression_from_text(content, root_name, scope, noverify_text): """Compiles and evaluates a Python script in a restricted environment.""" restricted_scope = parse_content(content, scope, root_name) if noverify_text: restricted_scope['noverify'] = noverify_text if restricted_scope.get(root_name) is None: raise Exception('Unable to find \'%s\'' % root_name) return restricted_scope def evaluate_javascript_expression_from_file(fname, root_name, scope, error): (content, noverify_text) = convert_javascript_file_to_python(fname, root_name) try: return evaluate_python_expression_from_text(content, root_name, scope, noverify_text) except: error('Unable to parse %s in file %s\n %s' % ( root_name, fname, text_to_line_numbered_text(content))) for message in sys.exc_info(): error(str(message)) raise class Verifier(object): """Verifies Units, Lessons, Assessments, Activities and their relations.""" def __init__(self): self.echo_func = silent_echo self.schema_helper = SchemaHelper() self.errors = 0 self.warnings = 0 self.export = [] def verify_unit_fields(self, units): self.export.append('units = Array();') for unit in units: if not is_one_of(unit.now_available, [True, False]): self.error( 'Bad now_available \'%s\' for unit id %s; expected ' '\'True\' or \'False\'' % (unit.now_available, unit.id)) if not is_one_of(unit.type, UNIT_TYPES): self.error( 'Bad type \'%s\' for unit id %s; ' 'expected: %s.' % (unit.type, unit.id, UNIT_TYPES)) if unit.type == 'U': if not is_integer(unit.unit_id): self.error( 'Expected integer unit_id, found %s in unit id ' ' %s' % (unit.unit_id, unit.id)) self.export.append('') self.export.append('units[%s] = Array();' % unit.id) self.export.append('units[%s][\'lessons\'] = Array();' % unit.id) unit.list_properties('units[%s]' % unit.id, self.export) def verify_lesson_fields(self, lessons): for lesson in lessons: if not is_one_of(lesson.lesson_activity, ['yes', '']): self.error('Bad lesson_activity \'%s\' for lesson_id %s' % ( lesson.lesson_activity, lesson.lesson_id)) self.export.append('') self.export.append('units[%s][\'lessons\'][%s] = Array();' % ( lesson.unit_id, lesson.lesson_id)) lesson.list_properties('units[%s][\'lessons\'][%s]' % ( lesson.unit_id, lesson.lesson_id), self.export) def verify_unit_lesson_relationships(self, units, lessons): """Checks each lesson points to a unit and all lessons are in use.""" used_lessons = [] units.sort(key=lambda x: x.id) # for unit in units: for i in range(0, len(units)): unit = units[i] # check that unit ids are 1-based and sequential if unit.id != i + 1: self.error('Unit out of order: %s' % (unit.id)) # get the list of lessons for each unit self.fine('Unit %s: %s' % (unit.id, unit.title)) unit_lessons = [] for lesson in lessons: if lesson.unit_id == unit.unit_id: if lesson.unit_title != unit.title: raise Exception(''.join([ 'A unit_title of a lesson (id=%s) must match ', 'title of a unit (id=%s) the lesson belongs to.' ]) % (lesson.lesson_id, lesson.unit_id)) unit_lessons.append(lesson) used_lessons.append(lesson) # inspect all lessons for the current unit unit_lessons.sort(key=lambda x: x.lesson_id) for j in range(0, len(unit_lessons)): lesson = unit_lessons[j] # check that lesson_ids are 1-based and sequential if lesson.lesson_id != j + 1: self.warn( 'Lesson lesson_id is out of order: expected %s, found ' ' %s (%s)' % ( j + 1, lesson.lesson_id, lesson.to_id_string())) self.fine(' Lesson %s: %s' % ( lesson.lesson_id, lesson.lesson_title)) # find lessons not used by any of the units unused_lessons = list(lessons) for lesson in used_lessons: unused_lessons.remove(lesson) for lesson in unused_lessons: self.warn('Unused lesson_id %s (%s)' % ( lesson.lesson_id, lesson.to_id_string())) # check all lessons point to known units for lesson in lessons: has = False for unit in units: if lesson.unit_id == unit.unit_id: has = True break if not has: self.error('Lesson has unknown unit_id %s (%s)' % ( lesson.unit_id, lesson.to_id_string())) def get_activity_as_python(self, unit_id, lesson_id): fname = os.path.join( os.path.dirname(__file__), '../assets/js/activity-%s.%s.js' % (unit_id, lesson_id)) if not os.path.exists(fname): self.error(' Missing activity: %s' % fname) else: activity = evaluate_javascript_expression_from_file( fname, 'activity', Activity().scope, self.error) self.verify_activity_instance(activity, fname) return activity def verify_activities(self, lessons): """Loads and verifies all activities.""" self.info('Loading activities:') count = 0 for lesson in lessons: if lesson.lesson_activity == 'yes': count += 1 activity = self.get_activity_as_python( lesson.unit_id, lesson.lesson_id) self.export.append('') self.encode_activity_json( activity, lesson.unit_id, lesson.lesson_id) self.info('Read %s activities' % count) def verify_assessment(self, units): """Loads and verifies all assessments.""" self.export.append('') self.export.append('assessments = Array();') self.info('Loading assessment:') count = 0 for unit in units: if unit.type == 'A': count += 1 assessment_name = str(unit.unit_id) fname = os.path.join( os.path.dirname(__file__), '../assets/js/assessment-%s.js' % assessment_name) if not os.path.exists(fname): self.error(' Missing assessment: %s' % fname) else: assessment = evaluate_javascript_expression_from_file( fname, 'assessment', Assessment().scope, self.error) self.verify_assessment_instance(assessment, fname) self.export.append('') self.encode_assessment_json(assessment, assessment_name) self.info('Read %s assessments' % count) # NB: The exported script needs to define a gcb_regex() wrapper function @staticmethod def encode_regex(regex_str): """Encodes a JavaScript-style regex into a Python gcb_regex call.""" # parse the regex into the base and modifiers. e.g., for /foo/i # base is 'foo' and modifiers is 'i' assert regex_str[0] == '/' # find the LAST '/' in regex_str (because there might be other # escaped '/' characters in the middle of regex_str) final_slash_index = regex_str.rfind('/') assert final_slash_index > 0 base = regex_str[1:final_slash_index] modifiers = regex_str[final_slash_index + 1:] func_str = 'gcb_regex(' + repr(base) + ', ' + repr(modifiers) + ')' return func_str def encode_activity_json(self, activity_dict, unit_id, lesson_id): """Encodes an activity dictionary into JSON.""" output = [] for elt in activity_dict['activity']: t = type(elt) encoded_elt = None if t is str: encoded_elt = {'type': 'string', 'value': elt} elif t is dict: qt = elt['questionType'] encoded_elt = {'type': qt} if qt == 'multiple choice': choices = elt['choices'] encoded_choices = [[x, y.value, z] for x, y, z in choices] encoded_elt['choices'] = encoded_choices elif qt == 'multiple choice group': # everything inside are primitive types that can be encoded elt_copy = dict(elt) del elt_copy['questionType'] # redundant encoded_elt['value'] = elt_copy elif qt == 'freetext': for k in elt.keys(): if k == 'questionType': continue elif k == 'correctAnswerRegex': encoded_elt[k] = Verifier.encode_regex(elt[k].value) else: # ordinary string encoded_elt[k] = elt[k] else: assert False else: assert False assert encoded_elt output.append(encoded_elt) # N.B.: make sure to get the string quoting right! code_str = "units[%s]['lessons'][%s]['activity'] = " % ( unit_id, lesson_id) + repr(json.dumps(output)) + ';' self.export.append(code_str) if 'noverify' in activity_dict: self.export.append('') noverify_code_str = "units[%s]['lessons'][%s]['code'] = " % ( unit_id, lesson_id) + repr(activity_dict['noverify']) + ';' self.export.append(noverify_code_str) def encode_assessment_json(self, assessment_dict, assessment_name): """Encodes an assessment dictionary into JSON.""" real_dict = assessment_dict['assessment'] output = {} output['assessmentName'] = real_dict['assessmentName'] if 'preamble' in real_dict: output['preamble'] = real_dict['preamble'] output['checkAnswers'] = real_dict['checkAnswers'].value encoded_questions_list = [] for elt in real_dict['questionsList']: encoded_elt = {} encoded_elt['questionHTML'] = elt['questionHTML'] if 'lesson' in elt: encoded_elt['lesson'] = elt['lesson'] if 'correctAnswerNumeric' in elt: encoded_elt['correctAnswerNumeric'] = elt[ 'correctAnswerNumeric'] if 'correctAnswerString' in elt: encoded_elt['correctAnswerString'] = elt['correctAnswerString'] if 'correctAnswerRegex' in elt: encoded_elt['correctAnswerRegex'] = Verifier.encode_regex( elt['correctAnswerRegex'].value) if 'choices' in elt: encoded_choices = [] correct_answer_index = None for (ind, e) in enumerate(elt['choices']): if type(e) is str: encoded_choices.append(e) elif e.term_type == CORRECT: encoded_choices.append(e.value) correct_answer_index = ind else: raise Exception("Invalid type in 'choices'") encoded_elt['choices'] = encoded_choices encoded_elt['correctAnswerIndex'] = correct_answer_index encoded_questions_list.append(encoded_elt) output['questionsList'] = encoded_questions_list # N.B.: make sure to get the string quoting right! code_str = 'assessments[\'' + assessment_name + '\'] = ' + repr( json.dumps(output)) + ';' self.export.append(code_str) if 'noverify' in assessment_dict: self.export.append('') noverify_code_str = ('assessments[\'' + assessment_name + '\'] = ' + repr(assessment_dict['noverify']) + ';') self.export.append(noverify_code_str) def format_parse_log(self): return 'Parse log:\n%s' % '\n'.join(self.schema_helper.parse_log) def verify_assessment_instance(self, scope, fname): """Verifies compliance of assessment with schema.""" if scope: try: self.schema_helper.check_instances_match_schema( scope['assessment'], SCHEMA['assessment'], 'assessment') self.info(' Verified assessment %s' % fname) if OUTPUT_DEBUG_LOG: self.info(self.format_parse_log()) except SchemaException as e: self.error(' Error in assessment %s\n%s' % ( fname, self.format_parse_log())) raise e else: self.error(' Unable to evaluate \'assessment =\' in %s' % fname) def verify_activity_instance(self, scope, fname): """Verifies compliance of activity with schema.""" if scope: try: self.schema_helper.check_instances_match_schema( scope['activity'], SCHEMA['activity'], 'activity') self.info(' Verified activity %s' % fname) if OUTPUT_DEBUG_LOG: self.info(self.format_parse_log()) except SchemaException as e: self.error(' Error in activity %s\n%s' % ( fname, self.format_parse_log())) raise e else: self.error(' Unable to evaluate \'activity =\' in %s' % fname) def fine(self, x): if OUTPUT_FINE_LOG: self.echo_func('FINE: ' + x) def info(self, x): self.echo_func('INFO: ' + x) def warn(self, x): self.warnings += 1 self.echo_func('WARNING: ' + x) def error(self, x): self.errors += 1 self.echo_func('ERROR: ' + x) def load_and_verify_model(self, echo_func): """Loads, parses and verifies all content for a course.""" self.echo_func = echo_func self.info('Started verification in: %s' % __file__) unit_file = os.path.join(os.path.dirname(__file__), '../data/unit.csv') lesson_file = os.path.join( os.path.dirname(__file__), '../data/lesson.csv') self.info('Loading units from: %s' % unit_file) units = read_objects_from_csv_file(unit_file, UNITS_HEADER, Unit) self.info('Read %s units' % len(units)) self.info('Loading lessons from: %s' % lesson_file) lessons = read_objects_from_csv_file( lesson_file, LESSONS_HEADER, Lesson) self.info('Read %s lessons' % len(lessons)) self.verify_unit_fields(units) self.verify_lesson_fields(lessons) self.verify_unit_lesson_relationships(units, lessons) try: self.verify_activities(lessons) self.verify_assessment(units) except SchemaException as e: self.error(str(e)) info = ( 'Schema usage statistics: %s' 'Completed verification: %s warnings, %s errors.' % ( self.schema_helper.type_stats, self.warnings, self.errors)) self.info(info) return self.warnings, self.errors, info def run_all_regex_unit_tests(): """Executes all tests related to regular expressions.""" # pylint: disable=anomalous-backslash-in-string assert escape_javascript_regex( 'correctAnswerRegex: /site:bls.gov?/i, blah') == ( 'correctAnswerRegex: regex(\"/site:bls.gov?/i\"), blah') assert escape_javascript_regex( 'correctAnswerRegex: /site:http:\/\/www.google.com?q=abc/i, blah') == ( 'correctAnswerRegex: ' 'regex(\"/site:http:\/\/www.google.com?q=abc/i\"), blah') assert remove_javascript_multi_line_comment( 'blah\n/*\ncomment\n*/\nblah') == 'blah\n\nblah' assert remove_javascript_multi_line_comment( 'blah\nblah /*\ncomment\nblah */\nblah') == ('blah\nblah \nblah') assert remove_javascript_single_line_comment( 'blah\n// comment\nblah') == 'blah\n\nblah' assert remove_javascript_single_line_comment( 'blah\nblah http://www.foo.com\nblah') == ( 'blah\nblah http://www.foo.com\nblah') assert remove_javascript_single_line_comment( 'blah\nblah // comment\nblah') == 'blah\nblah\nblah' assert remove_javascript_single_line_comment( 'blah\nblah // comment http://www.foo.com\nblah') == ( 'blah\nblah\nblah') assert parse_content_marked_no_verify( 'blah1\n// <gcb-no-verify>\n/blah2\n// </gcb-no-verify>\nblah3')[0] == ( 'blah1\n// \nblah3') # pylint: enable=anomalous-backslash-in-string assert Verifier.encode_regex('/white?/i') == """gcb_regex('white?', 'i')""" assert (Verifier.encode_regex('/jane austen (book|books) \\-price/i') == r"""gcb_regex('jane austen (book|books) \\-price', 'i')""") assert (Verifier.encode_regex('/Kozanji|Kozan-ji|Kosanji|Kosan-ji/i') == r"""gcb_regex('Kozanji|Kozan-ji|Kosanji|Kosan-ji', 'i')""") assert (Verifier.encode_regex('/Big Time College Sport?/i') == "gcb_regex('Big Time College Sport?', 'i')") assert (Verifier.encode_regex('/354\\s*[+]\\s*651/') == r"""gcb_regex('354\\s*[+]\\s*651', '')""") # pylint: disable=too-many-statements def run_all_schema_helper_unit_tests(): """Executes all tests related to schema validation.""" def assert_same(a, b): if a != b: raise Exception('Expected:\n %s\nFound:\n %s' % (a, b)) def assert_pass(instances, types, expected_result=None): try: schema_helper = SchemaHelper() result = schema_helper.check_instances_match_schema( instances, types, 'test') if OUTPUT_DEBUG_LOG: print '\n'.join(schema_helper.parse_log) if expected_result: assert_same(expected_result, result) except SchemaException as e: if OUTPUT_DEBUG_LOG: print str(e) print '\n'.join(schema_helper.parse_log) raise def assert_fails(func): try: func() raise Exception('Expected to fail') except SchemaException as e: if OUTPUT_DEBUG_LOG: print str(e) def assert_fail(instances, types): assert_fails(lambda: assert_pass(instances, types)) def create_python_dict_from_js_object(js_object): python_str, noverify = convert_javascript_to_python( 'var x = ' + js_object, 'x') ret = evaluate_python_expression_from_text( python_str, 'x', Assessment().scope, noverify) return ret['x'] # CSV tests units = read_objects_from_csv( [ ['id', 'type', 'now_available'], [1, 'U', 'True'], [1, 'U', 'False']], 'id,type,now_available', Unit, converter=UNIT_CSV_TO_DB_CONVERTER) assert units[0].now_available assert not units[1].now_available read_objects_from_csv( [['id', 'type'], [1, 'none']], 'id,type', Unit) def reader_one(): return read_objects_from_csv( [['id', 'type'], [1, 'none']], 'id,type,title', Unit) assert_fails(reader_one) def reader_two(): read_objects_from_csv( [['id', 'type', 'title'], [1, 'none']], 'id,type,title', Unit) assert_fails(reader_two) # context tests assert_same(Context().new([]).new(['a']).new(['b', 'c']).format_path(), ('//a/b/c')) # simple map tests assert_pass({'name': 'Bob'}, {'name': STRING}) assert_fail('foo', 'bar') assert_fail({'name': 'Bob'}, {'name': INTEGER}) assert_fail({'name': 12345}, {'name': STRING}) assert_fail({'amount': 12345}, {'name': INTEGER}) assert_fail({'regex': Term(CORRECT)}, {'regex': Term(REGEX)}) assert_pass({'name': 'Bob'}, {'name': STRING, 'phone': STRING}) assert_pass({'name': 'Bob'}, {'phone': STRING, 'name': STRING}) assert_pass({'name': 'Bob'}, {'phone': STRING, 'name': STRING, 'age': INTEGER}) # mixed attributes tests assert_pass({'colors': ['red', 'blue']}, {'colors': [STRING]}) assert_pass({'colors': []}, {'colors': [STRING]}) assert_fail({'colors': {'red': 'blue'}}, {'colors': [STRING]}) assert_fail({'colors': {'red': 'blue'}}, {'colors': [FLOAT]}) assert_fail({'colors': ['red', 'blue', 5.5]}, {'colors': [STRING]}) assert_fail({'colors': ['red', 'blue', {'foo': 'bar'}]}, {'colors': [STRING]}) assert_fail({'colors': ['red', 'blue'], 'foo': 'bar'}, {'colors': [STRING]}) assert_pass({'colors': ['red', 1]}, {'colors': [[STRING, INTEGER]]}) assert_fail({'colors': ['red', 'blue']}, {'colors': [[STRING, INTEGER]]}) assert_fail({'colors': [1, 2, 3]}, {'colors': [[STRING, INTEGER]]}) assert_fail({'colors': ['red', 1, 5.3]}, {'colors': [[STRING, INTEGER]]}) assert_pass({'colors': ['red', 'blue']}, {'colors': [STRING]}) assert_fail({'colors': ['red', 'blue']}, {'colors': [[STRING]]}) assert_fail({'colors': ['red', ['blue']]}, {'colors': [STRING]}) assert_fail({'colors': ['red', ['blue', 'green']]}, {'colors': [STRING]}) # required attribute tests assert_pass({'colors': ['red', 5]}, {'colors': [[STRING, INTEGER]]}) assert_fail({'colors': ['red', 5]}, {'colors': [[INTEGER, STRING]]}) assert_pass({'colors': ['red', 5]}, {'colors': [STRING, INTEGER]}) assert_pass({'colors': ['red', 5]}, {'colors': [INTEGER, STRING]}) assert_fail({'colors': ['red', 5, 'FF0000']}, {'colors': [[STRING, INTEGER]]}) # an array and a map of primitive type tests assert_pass({'color': {'name': 'red', 'rgb': 'FF0000'}}, {'color': {'name': STRING, 'rgb': STRING}}) assert_fail({'color': {'name': 'red', 'rgb': ['FF0000']}}, {'color': {'name': STRING, 'rgb': STRING}}) assert_fail({'color': {'name': 'red', 'rgb': 'FF0000'}}, {'color': {'name': STRING, 'rgb': INTEGER}}) assert_fail({'color': {'name': 'red', 'rgb': 'FF0000'}}, {'color': {'name': STRING, 'rgb': {'hex': STRING}}}) assert_pass({'color': {'name': 'red', 'rgb': 'FF0000'}}, {'color': {'name': STRING, 'rgb': STRING}}) assert_pass({'colors': [{'name': 'red', 'rgb': 'FF0000'}, {'name': 'blue', 'rgb': '0000FF'}]}, {'colors': [{'name': STRING, 'rgb': STRING}]}) assert_fail({'colors': [{'name': 'red', 'rgb': 'FF0000'}, {'phone': 'blue', 'rgb': '0000FF'}]}, {'colors': [{'name': STRING, 'rgb': STRING}]}) # boolean type tests assert_pass({'name': 'Bob', 'active': True}, {'name': STRING, 'active': BOOLEAN}) assert_pass({'name': 'Bob', 'active': [5, True, False]}, {'name': STRING, 'active': [INTEGER, BOOLEAN]}) assert_pass({'name': 'Bob', 'active': [5, True, 'false']}, {'name': STRING, 'active': [STRING, INTEGER, BOOLEAN]}) assert_fail({'name': 'Bob', 'active': [5, True, 'False']}, {'name': STRING, 'active': [[INTEGER, BOOLEAN]]}) # optional attribute tests assert_pass({'points': [{'x': 1, 'y': 2, 'z': 3}, {'x': 3, 'y': 2, 'z': 1}, {'x': 2, 'y': 3, 'z': 1}]}, {'points': [{'x': INTEGER, 'y': INTEGER, 'z': INTEGER}]}) assert_pass({'points': [{'x': 1, 'z': 3}, {'x': 3, 'y': 2}, {'y': 3, 'z': 1}]}, {'points': [{'x': INTEGER, 'y': INTEGER, 'z': INTEGER}]}) assert_pass({'account': [{'name': 'Bob', 'age': 25, 'active': True}]}, {'account': [{'age': INTEGER, 'name': STRING, 'active': BOOLEAN}]}) assert_pass({'account': [{'name': 'Bob', 'active': True}]}, {'account': [{'age': INTEGER, 'name': STRING, 'active': BOOLEAN}]}) # nested array tests assert_fail({'name': 'Bob', 'active': [5, True, 'false']}, {'name': STRING, 'active': [[BOOLEAN]]}) assert_fail({'name': 'Bob', 'active': [True]}, {'name': STRING, 'active': [[STRING]]}) assert_pass({'name': 'Bob', 'active': ['true']}, {'name': STRING, 'active': [[STRING]]}) assert_pass({'name': 'flowers', 'price': ['USD', 9.99]}, {'name': STRING, 'price': [[STRING, FLOAT]]}) assert_pass({'name': 'flowers', 'price': [['USD', 9.99], ['CAD', 11.79], ['RUB', 250.23]]}, {'name': STRING, 'price': [[STRING, FLOAT]]}) # selector tests assert_pass({'likes': [{'state': 'CA', 'food': 'cheese'}, {'state': 'NY', 'drink': 'wine'}]}, {'likes': [{'state': 'CA', 'food': STRING}, {'state': 'NY', 'drink': STRING}]}) assert_pass({'likes': [{'state': 'CA', 'food': 'cheese'}, {'state': 'CA', 'food': 'nuts'}]}, {'likes': [{'state': 'CA', 'food': STRING}, {'state': 'NY', 'drink': STRING}]}) assert_fail({'likes': {'state': 'CA', 'drink': 'cheese'}}, {'likes': [{'state': 'CA', 'food': STRING}, {'state': 'NY', 'drink': STRING}]}) # creating from dict tests assert_same(create_python_dict_from_js_object('{"active": true}'), {'active': Term(BOOLEAN, True)}) assert_same(create_python_dict_from_js_object( '{"a": correct("hello world")}'), {'a': Term(CORRECT, 'hello world')}) assert_same(create_python_dict_from_js_object( '{correctAnswerRegex: /hello/i}'), {'correctAnswerRegex': Term(REGEX, '/hello/i')}) def run_example_activity_tests(): """Parses and validates example activity file.""" fname = os.path.join( os.path.dirname(__file__), '../assets/js/activity-examples.js') if not os.path.exists(fname): raise Exception('Missing file: %s', fname) verifier = Verifier() verifier.echo_func = echo activity = evaluate_javascript_expression_from_file( fname, 'activity', Activity().scope, verifier.echo_func) verifier.verify_activity_instance(activity, fname) def test_exec(): """This test shows that exec/compile are explitable, thus not safe.""" content = """ foo = [ c for c in ().__class__.__base__.__subclasses__() if c.__name__ == 'catch_warnings' ][0]()._module.__builtins__ """ restricted_scope = {} restricted_scope.update({'__builtins__': {}}) code = compile(content, '<string>', 'exec') # pylint: disable=exec-statement exec code in restricted_scope # pylint: enable=exec-statement assert 'isinstance' in restricted_scope.get('foo') def test_sample_assets(): """Test assets shipped with the sample course.""" _, _, output = Verifier().load_and_verify_model(echo) if ( 'Schema usage statistics: {' '\'REGEX\': 19, \'STRING\': 415, \'NUMBER\': 1, ' '\'BOOLEAN\': 81, \'dict\': 73, \'str\': 41, \'INTEGER\': 9, ' '\'CORRECT\': 9}' not in output or 'Completed verification: 0 warnings, 0 errors.' not in output): raise Exception('Sample course verification failed.\n%s' % output) def run_all_unit_tests(): """Runs all unit tests in this module.""" global parse_content original = parse_content try: parse_content = legacy_eval_python_expression_for_test run_all_regex_unit_tests() run_all_schema_helper_unit_tests() run_example_activity_tests() test_exec() test_sample_assets() finally: parse_content = original if __name__ == '__main__': run_all_unit_tests()
apache-2.0
appleseedhq/gaffer
python/GafferTest/TypedPlugTest.py
1
6995
########################################################################## # # Copyright (c) 2011-2012, John Haddon. All rights reserved. # Copyright (c) 2011-2013, Image Engine Design Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above # copyright notice, this list of conditions and the following # disclaimer. # # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided with # the distribution. # # * Neither the name of John Haddon nor the names of # any other contributors to this software may be used to endorse or # promote products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ########################################################################## import unittest import IECore import Gaffer import GafferTest class TypedPlugTest( GafferTest.TestCase ) : def testConstructor( self ) : s = Gaffer.StringPlug() self.assertEqual( s.defaultValue(), "" ) self.assertEqual( s.getName(), "StringPlug" ) s = Gaffer.StringPlug( direction=Gaffer.Plug.Direction.Out, defaultValue = "a" ) self.assertEqual( s.direction(), Gaffer.Plug.Direction.Out ) self.assertEqual( s.defaultValue(), "a" ) s = Gaffer.StringPlug( defaultValue="b", name="a" ) self.assertEqual( s.defaultValue(), "b" ) self.assertEqual( s.getName(), "a" ) def testDisconnection( self ) : p1 = Gaffer.StringPlug( direction=Gaffer.Plug.Direction.Out ) p2 = Gaffer.StringPlug( direction=Gaffer.Plug.Direction.In ) p2.setInput( p1 ) self.assert_( p2.getInput().isSame( p1 ) ) p2.setInput( None ) self.assert_( p2.getInput() is None ) def testAcceptsNoneInput( self ) : p = Gaffer.StringPlug( "hello" ) self.failUnless( p.acceptsInput( None ) ) def testRunTimeTyped( self ) : p = Gaffer.BoolPlug( "b" ) self.assertEqual( p.typeName(), "Gaffer::BoolPlug" ) self.assertEqual( IECore.RunTimeTyped.typeNameFromTypeId( p.typeId() ), "Gaffer::BoolPlug" ) self.assertEqual( IECore.RunTimeTyped.baseTypeId( p.typeId() ), Gaffer.ValuePlug.staticTypeId() ) def testSetToDefault( self ) : s = Gaffer.StringPlug( "s", defaultValue = "apple" ) self.assertEqual( s.getValue(), "apple" ) s.setValue( "pear" ) self.assertEqual( s.getValue(), "pear" ) s.setToDefault() self.assertEqual( s.getValue(), "apple" ) def testStringDefaultValueHash( self ) : p1 = Gaffer.StringPlug( "p", Gaffer.Plug.Direction.In, "a" ) p2 = Gaffer.StringPlug( "p", Gaffer.Plug.Direction.In, "b" ) p3 = Gaffer.StringPlug( "p", Gaffer.Plug.Direction.In, "b" ) self.assertNotEqual( p1.hash(), p2.hash() ) self.assertEqual( p2.hash(), p3.hash() ) def testBoolDefaultValueHash( self ) : p1 = Gaffer.BoolPlug( "p", Gaffer.Plug.Direction.In, True ) p2 = Gaffer.BoolPlug( "p", Gaffer.Plug.Direction.In, False ) p3 = Gaffer.BoolPlug( "p", Gaffer.Plug.Direction.In, False ) self.assertNotEqual( p1.hash(), p2.hash() ) self.assertEqual( p2.hash(), p3.hash() ) def testCreateCounterpart( self ) : p1 = Gaffer.BoolPlug( "p", Gaffer.Plug.Direction.In, True ) p2 = p1.createCounterpart( "a", Gaffer.Plug.Direction.Out ) self.assertEqual( p2.getName(), "a" ) self.assertEqual( p2.direction(), Gaffer.Plug.Direction.Out ) self.assertEqual( p2.defaultValue(), p1.defaultValue() ) self.assertEqual( p2.getFlags(), p1.getFlags() ) def testRepr( self ) : p1 = Gaffer.StringPlug( "p", Gaffer.Plug.Direction.In, "defaultValue", flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ) p2 = eval( repr( p1 ) ) self.assertEqual( p2.getName(), p1.getName() ) self.assertEqual( p2.direction(), p1.direction() ) self.assertEqual( p2.defaultValue(), p1.defaultValue() ) self.assertEqual( p2.getFlags(), p1.getFlags() ) def testBoolPlugNumericConnections( self ) : b = Gaffer.BoolPlug() for p in ( Gaffer.FloatPlug(), Gaffer.IntPlug() ) : b.setInput( p ) self.assertEqual( b.getValue(), False ) p.setValue( 1 ) self.assertEqual( b.getValue(), True ) p.setValue( 0 ) self.assertEqual( b.getValue(), False ) p.setValue( 1000 ) self.assertEqual( b.getValue(), True ) def testNoChildrenAccepted( self ) : p1 = Gaffer.BoolPlug() p2 = Gaffer.BoolPlug() self.assertFalse( p1.acceptsChild( p2 ) ) self.assertRaises( RuntimeError, p1.addChild, p2 ) def testPrecomputedHash( self ) : n = GafferTest.StringInOutNode() n["in"].setValue( "hi" ) self.assertEqual( n["out"].getValue(), "hi" ) self.assertEqual( n.numHashCalls, 1 ) self.assertEqual( n.numComputeCalls, 1 ) h = n["out"].hash() numHashCalls = n.numHashCalls # Accept either 1 or 2 - it would be reasonable for the ValuePlug # to have either cached the hash or not, but that's not what we're # testing here. self.assertTrue( numHashCalls == 1 or numHashCalls == 2 ) self.assertEqual( n.numComputeCalls, 1 ) # What we care about is that calling getValue() with a precomputed hash # definitely doesn't recompute the hash again. self.assertEqual( n["out"].getValue( _precomputedHash = h ), "hi" ) self.assertEqual( n.numHashCalls, numHashCalls ) self.assertEqual( n.numComputeCalls, 1 ) def testBoolPlugStringConnections( self ) : n = GafferTest.AddNode() n["op1"].setValue( 0 ) n["op2"].setValue( 2 ) self.assertEqual( n["sum"].getValue(), 2 ) s = Gaffer.StringPlug() n["enabled"].setInput( s ) self.assertEqual( n["sum"].getValue(), 0 ) s.setValue( "notEmpty" ) self.assertEqual( n["sum"].getValue(), 2 ) s.setValue( "${test}" ) self.assertEqual( n["sum"].getValue(), 0 ) with Gaffer.Context() as c : c["test"] = "notEmpty" self.assertEqual( n["sum"].getValue(), 2 ) c["test"] = "" self.assertEqual( n["sum"].getValue(), 0 ) if __name__ == "__main__": unittest.main()
bsd-3-clause
nomnombtc/bitcoin
qa/rpc-tests/walletbackup.py
85
7304
#!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """ Exercise the wallet backup code. Ported from walletbackup.sh. Test case is: 4 nodes. 1 2 and 3 send transactions between each other, fourth node is a miner. 1 2 3 each mine a block to start, then Miner creates 100 blocks so 1 2 3 each have 50 mature coins to spend. Then 5 iterations of 1/2/3 sending coins amongst themselves to get transactions in the wallets, and the miner mining one block. Wallets are backed up using dumpwallet/backupwallet. Then 5 more iterations of transactions and mining a block. Miner then generates 101 more blocks, so any transaction fees paid mature. Sanity check: Sum(1,2,3,4 balances) == 114*50 1/2/3 are shutdown, and their wallets erased. Then restore using wallet.dat backup. And confirm 1/2/3/4 balances are same as before. Shutdown again, restore using importwallet, and confirm again balances are correct. """ from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * from random import randint import logging logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO, stream=sys.stdout) class WalletBackupTest(BitcoinTestFramework): def __init__(self): super().__init__() self.setup_clean_chain = True self.num_nodes = 4 # nodes 1, 2,3 are spenders, let's give them a keypool=100 self.extra_args = [["-keypool=100"], ["-keypool=100"], ["-keypool=100"], []] # This mirrors how the network was setup in the bash test def setup_network(self, split=False): self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, self.extra_args) connect_nodes(self.nodes[0], 3) connect_nodes(self.nodes[1], 3) connect_nodes(self.nodes[2], 3) connect_nodes(self.nodes[2], 0) self.is_network_split=False self.sync_all() def one_send(self, from_node, to_address): if (randint(1,2) == 1): amount = Decimal(randint(1,10)) / Decimal(10) self.nodes[from_node].sendtoaddress(to_address, amount) def do_one_round(self): a0 = self.nodes[0].getnewaddress() a1 = self.nodes[1].getnewaddress() a2 = self.nodes[2].getnewaddress() self.one_send(0, a1) self.one_send(0, a2) self.one_send(1, a0) self.one_send(1, a2) self.one_send(2, a0) self.one_send(2, a1) # Have the miner (node3) mine a block. # Must sync mempools before mining. sync_mempools(self.nodes) self.nodes[3].generate(1) sync_blocks(self.nodes) # As above, this mirrors the original bash test. def start_three(self): self.nodes[0] = start_node(0, self.options.tmpdir) self.nodes[1] = start_node(1, self.options.tmpdir) self.nodes[2] = start_node(2, self.options.tmpdir) connect_nodes(self.nodes[0], 3) connect_nodes(self.nodes[1], 3) connect_nodes(self.nodes[2], 3) connect_nodes(self.nodes[2], 0) def stop_three(self): stop_node(self.nodes[0], 0) stop_node(self.nodes[1], 1) stop_node(self.nodes[2], 2) def erase_three(self): os.remove(self.options.tmpdir + "/node0/regtest/wallet.dat") os.remove(self.options.tmpdir + "/node1/regtest/wallet.dat") os.remove(self.options.tmpdir + "/node2/regtest/wallet.dat") def run_test(self): logging.info("Generating initial blockchain") self.nodes[0].generate(1) sync_blocks(self.nodes) self.nodes[1].generate(1) sync_blocks(self.nodes) self.nodes[2].generate(1) sync_blocks(self.nodes) self.nodes[3].generate(100) sync_blocks(self.nodes) assert_equal(self.nodes[0].getbalance(), 50) assert_equal(self.nodes[1].getbalance(), 50) assert_equal(self.nodes[2].getbalance(), 50) assert_equal(self.nodes[3].getbalance(), 0) logging.info("Creating transactions") # Five rounds of sending each other transactions. for i in range(5): self.do_one_round() logging.info("Backing up") tmpdir = self.options.tmpdir self.nodes[0].backupwallet(tmpdir + "/node0/wallet.bak") self.nodes[0].dumpwallet(tmpdir + "/node0/wallet.dump") self.nodes[1].backupwallet(tmpdir + "/node1/wallet.bak") self.nodes[1].dumpwallet(tmpdir + "/node1/wallet.dump") self.nodes[2].backupwallet(tmpdir + "/node2/wallet.bak") self.nodes[2].dumpwallet(tmpdir + "/node2/wallet.dump") logging.info("More transactions") for i in range(5): self.do_one_round() # Generate 101 more blocks, so any fees paid mature self.nodes[3].generate(101) self.sync_all() balance0 = self.nodes[0].getbalance() balance1 = self.nodes[1].getbalance() balance2 = self.nodes[2].getbalance() balance3 = self.nodes[3].getbalance() total = balance0 + balance1 + balance2 + balance3 # At this point, there are 214 blocks (103 for setup, then 10 rounds, then 101.) # 114 are mature, so the sum of all wallets should be 114 * 50 = 5700. assert_equal(total, 5700) ## # Test restoring spender wallets from backups ## logging.info("Restoring using wallet.dat") self.stop_three() self.erase_three() # Start node2 with no chain shutil.rmtree(self.options.tmpdir + "/node2/regtest/blocks") shutil.rmtree(self.options.tmpdir + "/node2/regtest/chainstate") # Restore wallets from backup shutil.copyfile(tmpdir + "/node0/wallet.bak", tmpdir + "/node0/regtest/wallet.dat") shutil.copyfile(tmpdir + "/node1/wallet.bak", tmpdir + "/node1/regtest/wallet.dat") shutil.copyfile(tmpdir + "/node2/wallet.bak", tmpdir + "/node2/regtest/wallet.dat") logging.info("Re-starting nodes") self.start_three() sync_blocks(self.nodes) assert_equal(self.nodes[0].getbalance(), balance0) assert_equal(self.nodes[1].getbalance(), balance1) assert_equal(self.nodes[2].getbalance(), balance2) logging.info("Restoring using dumped wallet") self.stop_three() self.erase_three() #start node2 with no chain shutil.rmtree(self.options.tmpdir + "/node2/regtest/blocks") shutil.rmtree(self.options.tmpdir + "/node2/regtest/chainstate") self.start_three() assert_equal(self.nodes[0].getbalance(), 0) assert_equal(self.nodes[1].getbalance(), 0) assert_equal(self.nodes[2].getbalance(), 0) self.nodes[0].importwallet(tmpdir + "/node0/wallet.dump") self.nodes[1].importwallet(tmpdir + "/node1/wallet.dump") self.nodes[2].importwallet(tmpdir + "/node2/wallet.dump") sync_blocks(self.nodes) assert_equal(self.nodes[0].getbalance(), balance0) assert_equal(self.nodes[1].getbalance(), balance1) assert_equal(self.nodes[2].getbalance(), balance2) if __name__ == '__main__': WalletBackupTest().main()
mit
tylertian/Openstack
openstack F/cinder/cinder/volume/netapp.py
2
54122
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2012 NetApp, Inc. # Copyright (c) 2012 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Volume driver for NetApp storage systems. This driver requires NetApp OnCommand 5.0 and one or more Data ONTAP 7-mode storage systems with installed iSCSI licenses. """ import time import suds from suds import client from suds.sax import text from cinder import exception from cinder import flags from cinder.openstack.common import log as logging from cinder.openstack.common import cfg from cinder.volume import driver from cinder.volume import volume_types LOG = logging.getLogger("cinder.volume.driver") netapp_opts = [ cfg.StrOpt('netapp_wsdl_url', default=None, help='URL of the WSDL file for the DFM server'), cfg.StrOpt('netapp_login', default=None, help='User name for the DFM server'), cfg.StrOpt('netapp_password', default=None, help='Password for the DFM server'), cfg.StrOpt('netapp_server_hostname', default=None, help='Hostname for the DFM server'), cfg.IntOpt('netapp_server_port', default=8088, help='Port number for the DFM server'), cfg.StrOpt('netapp_storage_service', default=None, help=('Storage service to use for provisioning ' '(when volume_type=None)')), cfg.StrOpt('netapp_storage_service_prefix', default=None, help=('Prefix of storage service name to use for ' 'provisioning (volume_type name will be appended)')), cfg.StrOpt('netapp_vfiler', default=None, help='Vfiler to use for provisioning'), ] FLAGS = flags.FLAGS FLAGS.register_opts(netapp_opts) class DfmDataset(object): def __init__(self, id, name, project, type): self.id = id self.name = name self.project = project self.type = type class DfmLun(object): def __init__(self, dataset, lunpath, id): self.dataset = dataset self.lunpath = lunpath self.id = id class NetAppISCSIDriver(driver.ISCSIDriver): """NetApp iSCSI volume driver.""" IGROUP_PREFIX = 'openstack-' DATASET_PREFIX = 'OpenStack_' DATASET_METADATA_PROJECT_KEY = 'OpenStackProject' DATASET_METADATA_VOL_TYPE_KEY = 'OpenStackVolType' def __init__(self, *args, **kwargs): super(NetAppISCSIDriver, self).__init__(*args, **kwargs) self.discovered_luns = [] self.discovered_datasets = [] self.lun_table = {} def _check_fail(self, request, response): """Utility routine to handle checking ZAPI failures.""" if 'failed' == response.Status: name = request.Name reason = response.Reason msg = _('API %(name)s failed: %(reason)s') raise exception.VolumeBackendAPIException(data=msg % locals()) def _create_client(self, **kwargs): """Instantiate a web services client. This method creates a "suds" client to make web services calls to the DFM server. Note that the WSDL file is quite large and may take a few seconds to parse. """ wsdl_url = kwargs['wsdl_url'] LOG.debug(_('Using WSDL: %s') % wsdl_url) if kwargs['cache']: self.client = client.Client(wsdl_url, username=kwargs['login'], password=kwargs['password']) else: self.client = client.Client(wsdl_url, username=kwargs['login'], password=kwargs['password'], cache=None) soap_url = 'http://%s:%s/apis/soap/v1' % (kwargs['hostname'], kwargs['port']) LOG.debug(_('Using DFM server: %s') % soap_url) self.client.set_options(location=soap_url) def _set_storage_service(self, storage_service): """Set the storage service to use for provisioning.""" LOG.debug(_('Using storage service: %s') % storage_service) self.storage_service = storage_service def _set_storage_service_prefix(self, storage_service_prefix): """Set the storage service prefix to use for provisioning.""" LOG.debug(_('Using storage service prefix: %s') % storage_service_prefix) self.storage_service_prefix = storage_service_prefix def _set_vfiler(self, vfiler): """Set the vfiler to use for provisioning.""" LOG.debug(_('Using vfiler: %s') % vfiler) self.vfiler = vfiler def _check_flags(self): """Ensure that the flags we care about are set.""" required_flags = ['netapp_wsdl_url', 'netapp_login', 'netapp_password', 'netapp_server_hostname', 'netapp_server_port'] for flag in required_flags: if not getattr(FLAGS, flag, None): raise exception.InvalidInput(reason=_('%s is not set') % flag) if not (FLAGS.netapp_storage_service or FLAGS.netapp_storage_service_prefix): raise exception.InvalidInput(reason=_('Either ' 'netapp_storage_service or netapp_storage_service_prefix must ' 'be set')) def do_setup(self, context): """Setup the NetApp Volume driver. Called one time by the manager after the driver is loaded. Validate the flags we care about and setup the suds (web services) client. """ self._check_flags() self._create_client(wsdl_url=FLAGS.netapp_wsdl_url, login=FLAGS.netapp_login, password=FLAGS.netapp_password, hostname=FLAGS.netapp_server_hostname, port=FLAGS.netapp_server_port, cache=True) self._set_storage_service(FLAGS.netapp_storage_service) self._set_storage_service_prefix(FLAGS.netapp_storage_service_prefix) self._set_vfiler(FLAGS.netapp_vfiler) def check_for_setup_error(self): """Check that the driver is working and can communicate. Invoke a web services API to make sure we can talk to the server. Also perform the discovery of datasets and LUNs from DFM. """ self.client.service.DfmAbout() LOG.debug(_("Connected to DFM server")) self._discover_luns() def _get_datasets(self): """Get the list of datasets from DFM.""" server = self.client.service res = server.DatasetListInfoIterStart(IncludeMetadata=True) tag = res.Tag datasets = [] try: while True: res = server.DatasetListInfoIterNext(Tag=tag, Maximum=100) if not res.Datasets: break datasets.extend(res.Datasets.DatasetInfo) finally: server.DatasetListInfoIterEnd(Tag=tag) return datasets def _discover_dataset_luns(self, dataset, volume): """Discover all of the LUNs in a dataset.""" server = self.client.service res = server.DatasetMemberListInfoIterStart( DatasetNameOrId=dataset.id, IncludeExportsInfo=True, IncludeIndirect=True, MemberType='lun_path') tag = res.Tag suffix = None if volume: suffix = '/' + volume try: while True: res = server.DatasetMemberListInfoIterNext(Tag=tag, Maximum=100) if (not hasattr(res, 'DatasetMembers') or not res.DatasetMembers): break for member in res.DatasetMembers.DatasetMemberInfo: if suffix and not member.MemberName.endswith(suffix): continue # MemberName is the full LUN path in this format: # host:/volume/qtree/lun lun = DfmLun(dataset, member.MemberName, member.MemberId) self.discovered_luns.append(lun) finally: server.DatasetMemberListInfoIterEnd(Tag=tag) def _discover_luns(self): """Discover the LUNs from DFM. Discover all of the OpenStack-created datasets and LUNs in the DFM database. """ datasets = self._get_datasets() self.discovered_datasets = [] self.discovered_luns = [] for dataset in datasets: if not dataset.DatasetName.startswith(self.DATASET_PREFIX): continue if (not hasattr(dataset, 'DatasetMetadata') or not dataset.DatasetMetadata): continue project = None type = None for field in dataset.DatasetMetadata.DfmMetadataField: if field.FieldName == self.DATASET_METADATA_PROJECT_KEY: project = field.FieldValue elif field.FieldName == self.DATASET_METADATA_VOL_TYPE_KEY: type = field.FieldValue if not project: continue ds = DfmDataset(dataset.DatasetId, dataset.DatasetName, project, type) self.discovered_datasets.append(ds) self._discover_dataset_luns(ds, None) dataset_count = len(self.discovered_datasets) lun_count = len(self.discovered_luns) msg = _("Discovered %(dataset_count)s datasets and %(lun_count)s LUNs") LOG.debug(msg % locals()) self.lun_table = {} def _get_job_progress(self, job_id): """Get progress of one running DFM job. Obtain the latest progress report for the job and return the list of progress events. """ server = self.client.service res = server.DpJobProgressEventListIterStart(JobId=job_id) tag = res.Tag event_list = [] try: while True: res = server.DpJobProgressEventListIterNext(Tag=tag, Maximum=100) if not hasattr(res, 'ProgressEvents'): break event_list += res.ProgressEvents.DpJobProgressEventInfo finally: server.DpJobProgressEventListIterEnd(Tag=tag) return event_list def _wait_for_job(self, job_id): """Wait until a job terminates. Poll the job until it completes or an error is detected. Return the final list of progress events if it completes successfully. """ while True: events = self._get_job_progress(job_id) for event in events: if event.EventStatus == 'error': msg = _('Job failed: %s') % (event.ErrorMessage) raise exception.VolumeBackendAPIException(data=msg) if event.EventType == 'job-end': return events time.sleep(5) def _dataset_name(self, project, ss_type): """Return the dataset name for a given project and volume type.""" _project = project.replace(' ', '_').replace('-', '_') dataset_name = self.DATASET_PREFIX + _project if not ss_type: return dataset_name _type = ss_type.replace(' ', '_').replace('-', '_') return dataset_name + '_' + _type def _get_dataset(self, dataset_name): """Lookup a dataset by name in the list of discovered datasets.""" for dataset in self.discovered_datasets: if dataset.name == dataset_name: return dataset return None def _create_dataset(self, dataset_name, project, ss_type): """Create a new dataset using the storage service. The export settings are set to create iSCSI LUNs aligned for Linux. Returns the ID of the new dataset. """ if ss_type and not self.storage_service_prefix: msg = _('Attempt to use volume_type without specifying ' 'netapp_storage_service_prefix flag.') raise exception.VolumeBackendAPIException(data=msg) if not (ss_type or self.storage_service): msg = _('You must set the netapp_storage_service flag in order to ' 'create volumes with no volume_type.') raise exception.VolumeBackendAPIException(data=msg) storage_service = self.storage_service if ss_type: storage_service = self.storage_service_prefix + ss_type factory = self.client.factory lunmap = factory.create('DatasetLunMappingInfo') lunmap.IgroupOsType = 'linux' export = factory.create('DatasetExportInfo') export.DatasetExportProtocol = 'iscsi' export.DatasetLunMappingInfo = lunmap detail = factory.create('StorageSetInfo') detail.DpNodeName = 'Primary data' detail.DatasetExportInfo = export if hasattr(self, 'vfiler') and self.vfiler: detail.ServerNameOrId = self.vfiler details = factory.create('ArrayOfStorageSetInfo') details.StorageSetInfo = [detail] field1 = factory.create('DfmMetadataField') field1.FieldName = self.DATASET_METADATA_PROJECT_KEY field1.FieldValue = project field2 = factory.create('DfmMetadataField') field2.FieldName = self.DATASET_METADATA_VOL_TYPE_KEY field2.FieldValue = ss_type metadata = factory.create('ArrayOfDfmMetadataField') metadata.DfmMetadataField = [field1, field2] res = self.client.service.StorageServiceDatasetProvision( StorageServiceNameOrId=storage_service, DatasetName=dataset_name, AssumeConfirmation=True, StorageSetDetails=details, DatasetMetadata=metadata) ds = DfmDataset(res.DatasetId, dataset_name, project, ss_type) self.discovered_datasets.append(ds) return ds def _provision(self, name, description, project, ss_type, size): """Provision a LUN through provisioning manager. The LUN will be created inside a dataset associated with the project. If the dataset doesn't already exist, we create it using the storage service specified in the cinder conf. """ dataset_name = self._dataset_name(project, ss_type) dataset = self._get_dataset(dataset_name) if not dataset: dataset = self._create_dataset(dataset_name, project, ss_type) info = self.client.factory.create('ProvisionMemberRequestInfo') info.Name = name if description: info.Description = description info.Size = size info.MaximumSnapshotSpace = 2 * long(size) server = self.client.service lock_id = server.DatasetEditBegin(DatasetNameOrId=dataset.id) try: server.DatasetProvisionMember(EditLockId=lock_id, ProvisionMemberRequestInfo=info) res = server.DatasetEditCommit(EditLockId=lock_id, AssumeConfirmation=True) except (suds.WebFault, Exception): server.DatasetEditRollback(EditLockId=lock_id) msg = _('Failed to provision dataset member') raise exception.VolumeBackendAPIException(data=msg) lun_id = None lunpath = None for info in res.JobIds.JobInfo: events = self._wait_for_job(info.JobId) for event in events: if event.EventType != 'lun-create': continue lunpath = event.ProgressLunInfo.LunName lun_id = event.ProgressLunInfo.LunPathId if not lun_id: msg = _('No LUN was created by the provision job') raise exception.VolumeBackendAPIException(data=msg) lun = DfmLun(dataset, lunpath, lun_id) self.discovered_luns.append(lun) self.lun_table[name] = lun def _get_ss_type(self, volume): """Get the storage service type for a volume.""" id = volume['volume_type_id'] if not id: return None volume_type = volume_types.get_volume_type(None, id) if not volume_type: return None return volume_type['name'] def _remove_destroy(self, name, project): """Remove the LUN from the dataset, also destroying it. Remove the LUN from the dataset and destroy the actual LUN on the storage system. """ lun = self._lookup_lun_for_volume(name, project) member = self.client.factory.create('DatasetMemberParameter') member.ObjectNameOrId = lun.id members = self.client.factory.create('ArrayOfDatasetMemberParameter') members.DatasetMemberParameter = [member] server = self.client.service lock_id = server.DatasetEditBegin(DatasetNameOrId=lun.dataset.id) try: server.DatasetRemoveMember(EditLockId=lock_id, Destroy=True, DatasetMemberParameters=members) server.DatasetEditCommit(EditLockId=lock_id, AssumeConfirmation=True) except (suds.WebFault, Exception): server.DatasetEditRollback(EditLockId=lock_id) msg = _('Failed to remove and delete dataset member') raise exception.VolumeBackendAPIException(data=msg) def create_volume(self, volume): """Driver entry point for creating a new volume.""" default_size = '104857600' # 100 MB gigabytes = 1073741824L # 2^30 name = volume['name'] project = volume['project_id'] display_name = volume['display_name'] display_description = volume['display_description'] description = None if display_name: if display_description: description = display_name + "\n" + display_description else: description = display_name elif display_description: description = display_description if int(volume['size']) == 0: size = default_size else: size = str(int(volume['size']) * gigabytes) ss_type = self._get_ss_type(volume) self._provision(name, description, project, ss_type, size) def _lookup_lun_for_volume(self, name, project): """Lookup the LUN that corresponds to the give volume. Initial lookups involve a table scan of all of the discovered LUNs, but later lookups are done instantly from the hashtable. """ if name in self.lun_table: return self.lun_table[name] lunpath_suffix = '/' + name for lun in self.discovered_luns: if lun.dataset.project != project: continue if lun.lunpath.endswith(lunpath_suffix): self.lun_table[name] = lun return lun msg = _("No entry in LUN table for volume %s") % (name) raise exception.VolumeBackendAPIException(data=msg) def delete_volume(self, volume): """Driver entry point for destroying existing volumes.""" name = volume['name'] project = volume['project_id'] self._remove_destroy(name, project) def _get_lun_details(self, lun_id): """Given the ID of a LUN, get the details about that LUN.""" server = self.client.service res = server.LunListInfoIterStart(ObjectNameOrId=lun_id) tag = res.Tag try: res = server.LunListInfoIterNext(Tag=tag, Maximum=1) if hasattr(res, 'Luns') and res.Luns.LunInfo: return res.Luns.LunInfo[0] finally: server.LunListInfoIterEnd(Tag=tag) msg = _('Failed to get LUN details for LUN ID %s') raise exception.VolumeBackendAPIException(data=msg % lun_id) def _get_host_details(self, host_id): """Given the ID of a host, get the details about it. A "host" is a storage system here. """ server = self.client.service res = server.HostListInfoIterStart(ObjectNameOrId=host_id) tag = res.Tag try: res = server.HostListInfoIterNext(Tag=tag, Maximum=1) if hasattr(res, 'Hosts') and res.Hosts.HostInfo: return res.Hosts.HostInfo[0] finally: server.HostListInfoIterEnd(Tag=tag) msg = _('Failed to get host details for host ID %s') raise exception.VolumeBackendAPIException(data=msg % host_id) def _get_iqn_for_host(self, host_id): """Get the iSCSI Target Name for a storage system.""" request = self.client.factory.create('Request') request.Name = 'iscsi-node-get-name' response = self.client.service.ApiProxy(Target=host_id, Request=request) self._check_fail(request, response) return response.Results['node-name'][0] def _api_elem_is_empty(self, elem): """Return true if the API element should be considered empty. Helper routine to figure out if a list returned from a proxy API is empty. This is necessary because the API proxy produces nasty looking XML. """ if not type(elem) is list: return True if 0 == len(elem): return True child = elem[0] if isinstance(child, text.Text): return True if type(child) is str: return True return False def _get_target_portal_for_host(self, host_id, host_address): """Get iSCSI target portal for a storage system. Get the iSCSI Target Portal details for a particular IP address on a storage system. """ request = self.client.factory.create('Request') request.Name = 'iscsi-portal-list-info' response = self.client.service.ApiProxy(Target=host_id, Request=request) self._check_fail(request, response) portal = {} portals = response.Results['iscsi-portal-list-entries'] if self._api_elem_is_empty(portals): return portal portal_infos = portals[0]['iscsi-portal-list-entry-info'] for portal_info in portal_infos: portal['address'] = portal_info['ip-address'][0] portal['port'] = portal_info['ip-port'][0] portal['portal'] = portal_info['tpgroup-tag'][0] if host_address == portal['address']: break return portal def _get_export(self, volume): """Get the iSCSI export details for a volume. Looks up the LUN in DFM based on the volume and project name, then get the LUN's ID. We store that value in the database instead of the iSCSI details because we will not have the true iSCSI details until masking time (when initialize_connection() is called). """ name = volume['name'] project = volume['project_id'] lun = self._lookup_lun_for_volume(name, project) return {'provider_location': lun.id} def ensure_export(self, context, volume): """Driver entry point to get the export info for an existing volume.""" return self._get_export(volume) def create_export(self, context, volume): """Driver entry point to get the export info for a new volume.""" return self._get_export(volume) def remove_export(self, context, volume): """Driver exntry point to remove an export for a volume. Since exporting is idempotent in this driver, we have nothing to do for unexporting. """ pass def _find_igroup_for_initiator(self, host_id, initiator_name): """Get the igroup for an initiator. Look for an existing igroup (initiator group) on the storage system containing a given iSCSI initiator and return the name of the igroup. """ request = self.client.factory.create('Request') request.Name = 'igroup-list-info' response = self.client.service.ApiProxy(Target=host_id, Request=request) self._check_fail(request, response) igroups = response.Results['initiator-groups'] if self._api_elem_is_empty(igroups): return None igroup_infos = igroups[0]['initiator-group-info'] for igroup_info in igroup_infos: if ('iscsi' != igroup_info['initiator-group-type'][0] or 'linux' != igroup_info['initiator-group-os-type'][0]): continue igroup_name = igroup_info['initiator-group-name'][0] if not igroup_name.startswith(self.IGROUP_PREFIX): continue initiators = igroup_info['initiators'][0]['initiator-info'] for initiator in initiators: if initiator_name == initiator['initiator-name'][0]: return igroup_name return None def _create_igroup(self, host_id, initiator_name): """Create a new igroup. Create a new igroup (initiator group) on the storage system to hold the given iSCSI initiator. The group will only have 1 member and will be named "openstack-${initiator_name}". """ igroup_name = self.IGROUP_PREFIX + initiator_name request = self.client.factory.create('Request') request.Name = 'igroup-create' igroup_create_xml = ( '<initiator-group-name>%s</initiator-group-name>' '<initiator-group-type>iscsi</initiator-group-type>' '<os-type>linux</os-type><ostype>linux</ostype>') request.Args = text.Raw(igroup_create_xml % igroup_name) response = self.client.service.ApiProxy(Target=host_id, Request=request) self._check_fail(request, response) request = self.client.factory.create('Request') request.Name = 'igroup-add' igroup_add_xml = ( '<initiator-group-name>%s</initiator-group-name>' '<initiator>%s</initiator>') request.Args = text.Raw(igroup_add_xml % (igroup_name, initiator_name)) response = self.client.service.ApiProxy(Target=host_id, Request=request) self._check_fail(request, response) return igroup_name def _get_lun_mappping(self, host_id, lunpath, igroup_name): """Get the mapping between a LUN and an igroup. Check if a given LUN is already mapped to the given igroup (initiator group). If the LUN is mapped, also return the LUN number for the mapping. """ request = self.client.factory.create('Request') request.Name = 'lun-map-list-info' request.Args = text.Raw('<path>%s</path>' % (lunpath)) response = self.client.service.ApiProxy(Target=host_id, Request=request) self._check_fail(request, response) igroups = response.Results['initiator-groups'] if self._api_elem_is_empty(igroups): return {'mapped': False} igroup_infos = igroups[0]['initiator-group-info'] for igroup_info in igroup_infos: if igroup_name == igroup_info['initiator-group-name'][0]: return {'mapped': True, 'lun_num': igroup_info['lun-id'][0]} return {'mapped': False} def _map_initiator(self, host_id, lunpath, igroup_name): """Map a LUN to an igroup. Map the given LUN to the given igroup (initiator group). Return the LUN number that the LUN was mapped to (the filer will choose the lowest available number). """ request = self.client.factory.create('Request') request.Name = 'lun-map' lun_map_xml = ('<initiator-group>%s</initiator-group>' '<path>%s</path>') request.Args = text.Raw(lun_map_xml % (igroup_name, lunpath)) response = self.client.service.ApiProxy(Target=host_id, Request=request) self._check_fail(request, response) return response.Results['lun-id-assigned'][0] def _unmap_initiator(self, host_id, lunpath, igroup_name): """Unmap the given LUN from the given igroup (initiator group).""" request = self.client.factory.create('Request') request.Name = 'lun-unmap' lun_unmap_xml = ('<initiator-group>%s</initiator-group>' '<path>%s</path>') request.Args = text.Raw(lun_unmap_xml % (igroup_name, lunpath)) response = self.client.service.ApiProxy(Target=host_id, Request=request) self._check_fail(request, response) def _ensure_initiator_mapped(self, host_id, lunpath, initiator_name): """Ensure that a LUN is mapped to a particular initiator. Check if a LUN is mapped to a given initiator already and create the mapping if it is not. A new igroup will be created if needed. Returns the LUN number for the mapping between the LUN and initiator in both cases. """ lunpath = '/vol/' + lunpath igroup_name = self._find_igroup_for_initiator(host_id, initiator_name) if not igroup_name: igroup_name = self._create_igroup(host_id, initiator_name) mapping = self._get_lun_mappping(host_id, lunpath, igroup_name) if mapping['mapped']: return mapping['lun_num'] return self._map_initiator(host_id, lunpath, igroup_name) def _ensure_initiator_unmapped(self, host_id, lunpath, initiator_name): """Ensure that a LUN is not mapped to a particular initiator. Check if a LUN is mapped to a given initiator and remove the mapping if it is. This does not destroy the igroup. """ lunpath = '/vol/' + lunpath igroup_name = self._find_igroup_for_initiator(host_id, initiator_name) if not igroup_name: return mapping = self._get_lun_mappping(host_id, lunpath, igroup_name) if mapping['mapped']: self._unmap_initiator(host_id, lunpath, igroup_name) def initialize_connection(self, volume, connector): """Driver entry point to attach a volume to an instance. Do the LUN masking on the storage system so the initiator can access the LUN on the target. Also return the iSCSI properties so the initiator can find the LUN. This implementation does not call _get_iscsi_properties() to get the properties because cannot store the LUN number in the database. We only find out what the LUN number will be during this method call so we construct the properties dictionary ourselves. """ initiator_name = connector['initiator'] lun_id = volume['provider_location'] if not lun_id: msg = _("No LUN ID for volume %s") % volume['name'] raise exception.VolumeBackendAPIException(data=msg) lun = self._get_lun_details(lun_id) lun_num = self._ensure_initiator_mapped(lun.HostId, lun.LunPath, initiator_name) host = self._get_host_details(lun.HostId) portal = self._get_target_portal_for_host(host.HostId, host.HostAddress) if not portal: msg = _('Failed to get target portal for filer: %s') raise exception.VolumeBackendAPIException(data=msg % host.HostName) iqn = self._get_iqn_for_host(host.HostId) if not iqn: msg = _('Failed to get target IQN for filer: %s') raise exception.VolumeBackendAPIException(data=msg % host.HostName) properties = {} properties['target_discovered'] = False (address, port) = (portal['address'], portal['port']) properties['target_portal'] = '%s:%s' % (address, port) properties['target_iqn'] = iqn properties['target_lun'] = lun_num properties['volume_id'] = volume['id'] auth = volume['provider_auth'] if auth: (auth_method, auth_username, auth_secret) = auth.split() properties['auth_method'] = auth_method properties['auth_username'] = auth_username properties['auth_password'] = auth_secret return { 'driver_volume_type': 'iscsi', 'data': properties, } def terminate_connection(self, volume, connector): """Driver entry point to unattach a volume from an instance. Unmask the LUN on the storage system so the given intiator can no longer access it. """ initiator_name = connector['initiator'] lun_id = volume['provider_location'] if not lun_id: msg = _('No LUN ID for volume %s') % volume['name'] raise exception.VolumeBackendAPIException(data=msg) lun = self._get_lun_details(lun_id) self._ensure_initiator_unmapped(lun.HostId, lun.LunPath, initiator_name) def _is_clone_done(self, host_id, clone_op_id, volume_uuid): """Check the status of a clone operation. Return True if done, False otherwise. """ request = self.client.factory.create('Request') request.Name = 'clone-list-status' clone_list_status_xml = ( '<clone-id><clone-id-info>' '<clone-op-id>%s</clone-op-id>' '<volume-uuid>%s</volume-uuid>' '</clone-id-info></clone-id>') request.Args = text.Raw(clone_list_status_xml % (clone_op_id, volume_uuid)) response = self.client.service.ApiProxy(Target=host_id, Request=request) self._check_fail(request, response) status = response.Results['status'] if self._api_elem_is_empty(status): return False ops_info = status[0]['ops-info'][0] state = ops_info['clone-state'][0] return 'completed' == state def _clone_lun(self, host_id, src_path, dest_path, snap): """Create a clone of a NetApp LUN. The clone initially consumes no space and is not space reserved. """ request = self.client.factory.create('Request') request.Name = 'clone-start' clone_start_xml = ( '<source-path>%s</source-path><no-snap>%s</no-snap>' '<destination-path>%s</destination-path>') if snap: no_snap = 'false' else: no_snap = 'true' request.Args = text.Raw(clone_start_xml % (src_path, no_snap, dest_path)) response = self.client.service.ApiProxy(Target=host_id, Request=request) self._check_fail(request, response) clone_id = response.Results['clone-id'][0] clone_id_info = clone_id['clone-id-info'][0] clone_op_id = clone_id_info['clone-op-id'][0] volume_uuid = clone_id_info['volume-uuid'][0] while not self._is_clone_done(host_id, clone_op_id, volume_uuid): time.sleep(5) def _refresh_dfm_luns(self, host_id): """Refresh the LUN list for one filer in DFM.""" server = self.client.service server.DfmObjectRefresh(ObjectNameOrId=host_id, ChildType='lun_path') while True: time.sleep(15) res = server.DfmMonitorTimestampList(HostNameOrId=host_id) for timestamp in res.DfmMonitoringTimestamp: if 'lun' != timestamp.MonitorName: continue if timestamp.LastMonitoringTimestamp: return def _destroy_lun(self, host_id, lun_path): """Destroy a LUN on the filer.""" request = self.client.factory.create('Request') request.Name = 'lun-offline' path_xml = '<path>%s</path>' request.Args = text.Raw(path_xml % lun_path) response = self.client.service.ApiProxy(Target=host_id, Request=request) self._check_fail(request, response) request = self.client.factory.create('Request') request.Name = 'lun-destroy' request.Args = text.Raw(path_xml % lun_path) response = self.client.service.ApiProxy(Target=host_id, Request=request) self._check_fail(request, response) def _resize_volume(self, host_id, vol_name, new_size): """Resize the volume by the amount requested.""" request = self.client.factory.create('Request') request.Name = 'volume-size' volume_size_xml = ( '<volume>%s</volume><new-size>%s</new-size>') request.Args = text.Raw(volume_size_xml % (vol_name, new_size)) response = self.client.service.ApiProxy(Target=host_id, Request=request) self._check_fail(request, response) def _create_qtree(self, host_id, vol_name, qtree_name): """Create a qtree the filer.""" request = self.client.factory.create('Request') request.Name = 'qtree-create' qtree_create_xml = ( '<mode>0755</mode><volume>%s</volume><qtree>%s</qtree>') request.Args = text.Raw(qtree_create_xml % (vol_name, qtree_name)) response = self.client.service.ApiProxy(Target=host_id, Request=request) self._check_fail(request, response) def create_snapshot(self, snapshot): """Driver entry point for creating a snapshot. This driver implements snapshots by using efficient single-file (LUN) cloning. """ vol_name = snapshot['volume_name'] snapshot_name = snapshot['name'] project = snapshot['project_id'] lun = self._lookup_lun_for_volume(vol_name, project) lun_id = lun.id lun = self._get_lun_details(lun_id) extra_gb = snapshot['volume_size'] new_size = '+%dg' % extra_gb self._resize_volume(lun.HostId, lun.VolumeName, new_size) # LunPath is the partial LUN path in this format: volume/qtree/lun lun_path = str(lun.LunPath) lun_name = lun_path[lun_path.rfind('/') + 1:] qtree_path = '/vol/%s/%s' % (lun.VolumeName, lun.QtreeName) src_path = '%s/%s' % (qtree_path, lun_name) dest_path = '%s/%s' % (qtree_path, snapshot_name) self._clone_lun(lun.HostId, src_path, dest_path, True) def delete_snapshot(self, snapshot): """Driver entry point for deleting a snapshot.""" vol_name = snapshot['volume_name'] snapshot_name = snapshot['name'] project = snapshot['project_id'] lun = self._lookup_lun_for_volume(vol_name, project) lun_id = lun.id lun = self._get_lun_details(lun_id) lun_path = '/vol/%s/%s/%s' % (lun.VolumeName, lun.QtreeName, snapshot_name) self._destroy_lun(lun.HostId, lun_path) extra_gb = snapshot['volume_size'] new_size = '-%dg' % extra_gb self._resize_volume(lun.HostId, lun.VolumeName, new_size) def create_volume_from_snapshot(self, volume, snapshot): """Driver entry point for creating a new volume from a snapshot. Many would call this "cloning" and in fact we use cloning to implement this feature. """ vol_size = volume['size'] snap_size = snapshot['volume_size'] if vol_size != snap_size: msg = _('Cannot create volume of size %(vol_size)s from ' 'snapshot of size %(snap_size)s') raise exception.VolumeBackendAPIException(data=msg % locals()) vol_name = snapshot['volume_name'] snapshot_name = snapshot['name'] project = snapshot['project_id'] lun = self._lookup_lun_for_volume(vol_name, project) lun_id = lun.id dataset = lun.dataset old_type = dataset.type new_type = self._get_ss_type(volume) if new_type != old_type: msg = _('Cannot create volume of type %(new_type)s from ' 'snapshot of type %(old_type)s') raise exception.VolumeBackendAPIException(data=msg % locals()) lun = self._get_lun_details(lun_id) extra_gb = vol_size new_size = '+%dg' % extra_gb self._resize_volume(lun.HostId, lun.VolumeName, new_size) clone_name = volume['name'] self._create_qtree(lun.HostId, lun.VolumeName, clone_name) src_path = '/vol/%s/%s/%s' % (lun.VolumeName, lun.QtreeName, snapshot_name) dest_path = '/vol/%s/%s/%s' % (lun.VolumeName, clone_name, clone_name) self._clone_lun(lun.HostId, src_path, dest_path, False) self._refresh_dfm_luns(lun.HostId) self._discover_dataset_luns(dataset, clone_name) def check_for_export(self, context, volume_id): raise NotImplementedError() class NetAppLun(object): """Represents a LUN on NetApp storage.""" def __init__(self, handle, name, size, metadata_dict): self.handle = handle self.name = name self.size = size self.metadata = metadata_dict def get_metadata_property(self, prop): """Get the metadata property of a LUN.""" if prop in self.metadata: return self.metadata[prop] name = self.name msg = _("No metadata property %(prop)s defined for the LUN %(name)s") LOG.debug(msg % locals()) class NetAppCmodeISCSIDriver(driver.ISCSIDriver): """NetApp C-mode iSCSI volume driver.""" def __init__(self, *args, **kwargs): super(NetAppCmodeISCSIDriver, self).__init__(*args, **kwargs) self.lun_table = {} def _create_client(self, **kwargs): """Instantiate a web services client. This method creates a "suds" client to make web services calls to the DFM server. Note that the WSDL file is quite large and may take a few seconds to parse. """ wsdl_url = kwargs['wsdl_url'] LOG.debug(_('Using WSDL: %s') % wsdl_url) if kwargs['cache']: self.client = client.Client(wsdl_url, username=kwargs['login'], password=kwargs['password']) else: self.client = client.Client(wsdl_url, username=kwargs['login'], password=kwargs['password'], cache=None) def _check_flags(self): """Ensure that the flags we care about are set.""" required_flags = ['netapp_wsdl_url', 'netapp_login', 'netapp_password', 'netapp_server_hostname', 'netapp_server_port'] for flag in required_flags: if not getattr(FLAGS, flag, None): msg = _('%s is not set') % flag raise exception.InvalidInput(data=msg) def do_setup(self, context): """Setup the NetApp Volume driver. Called one time by the manager after the driver is loaded. Validate the flags we care about and setup the suds (web services) client. """ self._check_flags() self._create_client(wsdl_url=FLAGS.netapp_wsdl_url, login=FLAGS.netapp_login, password=FLAGS.netapp_password, hostname=FLAGS.netapp_server_hostname, port=FLAGS.netapp_server_port, cache=True) def check_for_setup_error(self): """Check that the driver is working and can communicate. Discovers the LUNs on the NetApp server. """ self.lun_table = {} luns = self.client.service.ListLuns() for lun in luns: meta_dict = {} if hasattr(lun, 'Metadata'): meta_dict = self._create_dict_from_meta(lun.Metadata) discovered_lun = NetAppLun(lun.Handle, lun.Name, lun.Size, meta_dict) self._add_lun_to_table(discovered_lun) LOG.debug(_("Success getting LUN list from server")) def create_volume(self, volume): """Driver entry point for creating a new volume.""" default_size = '104857600' # 100 MB gigabytes = 1073741824L # 2^30 name = volume['name'] if int(volume['size']) == 0: size = default_size else: size = str(int(volume['size']) * gigabytes) extra_args = {} extra_args['OsType'] = 'linux' extra_args['QosType'] = self._get_qos_type(volume) extra_args['Container'] = volume['project_id'] extra_args['Display'] = volume['display_name'] extra_args['Description'] = volume['display_description'] extra_args['SpaceReserved'] = True server = self.client.service metadata = self._create_metadata_list(extra_args) lun = server.ProvisionLun(Name=name, Size=size, Metadata=metadata) LOG.debug(_("Created LUN with name %s") % name) self._add_lun_to_table(NetAppLun(lun.Handle, lun.Name, lun.Size, self._create_dict_from_meta(lun.Metadata))) def delete_volume(self, volume): """Driver entry point for destroying existing volumes.""" name = volume['name'] handle = self._get_lun_handle(name) self.client.service.DestroyLun(Handle=handle) LOG.debug(_("Destroyed LUN %s") % handle) self.lun_table.pop(name) def ensure_export(self, context, volume): """Driver entry point to get the export info for an existing volume.""" handle = self._get_lun_handle(volume['name']) return {'provider_location': handle} def create_export(self, context, volume): """Driver entry point to get the export info for a new volume.""" handle = self._get_lun_handle(volume['name']) return {'provider_location': handle} def remove_export(self, context, volume): """Driver exntry point to remove an export for a volume. Since exporting is idempotent in this driver, we have nothing to do for unexporting. """ pass def initialize_connection(self, volume, connector): """Driver entry point to attach a volume to an instance. Do the LUN masking on the storage system so the initiator can access the LUN on the target. Also return the iSCSI properties so the initiator can find the LUN. This implementation does not call _get_iscsi_properties() to get the properties because cannot store the LUN number in the database. We only find out what the LUN number will be during this method call so we construct the properties dictionary ourselves. """ initiator_name = connector['initiator'] handle = volume['provider_location'] server = self.client.service server.MapLun(Handle=handle, InitiatorType="iscsi", InitiatorName=initiator_name) msg = _("Mapped LUN %(handle)s to the initiator %(initiator_name)s") LOG.debug(msg % locals()) target_details_list = server.GetLunTargetDetails(Handle=handle, InitiatorType="iscsi", InitiatorName=initiator_name) msg = _("Succesfully fetched target details for LUN %(handle)s and " "initiator %(initiator_name)s") LOG.debug(msg % locals()) if not target_details_list: msg = _('Failed to get LUN target details for the LUN %s') raise exception.VolumeBackendAPIException(data=msg % handle) target_details = target_details_list[0] if not target_details.Address and target_details.Port: msg = _('Failed to get target portal for the LUN %s') raise exception.VolumeBackendAPIException(data=msg % handle) iqn = target_details.Iqn if not iqn: msg = _('Failed to get target IQN for the LUN %s') raise exception.VolumeBackendAPIException(data=msg % handle) properties = {} properties['target_discovered'] = False (address, port) = (target_details.Address, target_details.Port) properties['target_portal'] = '%s:%s' % (address, port) properties['target_iqn'] = iqn properties['target_lun'] = target_details.LunNumber properties['volume_id'] = volume['id'] auth = volume['provider_auth'] if auth: (auth_method, auth_username, auth_secret) = auth.split() properties['auth_method'] = auth_method properties['auth_username'] = auth_username properties['auth_password'] = auth_secret return { 'driver_volume_type': 'iscsi', 'data': properties, } def terminate_connection(self, volume, connector): """Driver entry point to unattach a volume from an instance. Unmask the LUN on the storage system so the given intiator can no longer access it. """ initiator_name = connector['initiator'] handle = volume['provider_location'] self.client.service.UnmapLun(Handle=handle, InitiatorType="iscsi", InitiatorName=initiator_name) msg = _("Unmapped LUN %(handle)s from the initiator " "%(initiator_name)s") LOG.debug(msg % locals()) def create_snapshot(self, snapshot): """Driver entry point for creating a snapshot. This driver implements snapshots by using efficient single-file (LUN) cloning. """ vol_name = snapshot['volume_name'] snapshot_name = snapshot['name'] lun = self.lun_table[vol_name] extra_args = {'SpaceReserved': False} self._clone_lun(lun.handle, snapshot_name, extra_args) def delete_snapshot(self, snapshot): """Driver entry point for deleting a snapshot.""" handle = self._get_lun_handle(snapshot['name']) self.client.service.DestroyLun(Handle=handle) LOG.debug(_("Destroyed LUN %s") % handle) def create_volume_from_snapshot(self, volume, snapshot): """Driver entry point for creating a new volume from a snapshot. Many would call this "cloning" and in fact we use cloning to implement this feature. """ snapshot_name = snapshot['name'] lun = self.lun_table[snapshot_name] new_name = volume['name'] extra_args = {} extra_args['OsType'] = 'linux' extra_args['QosType'] = self._get_qos_type(volume) extra_args['Container'] = volume['project_id'] extra_args['Display'] = volume['display_name'] extra_args['Description'] = volume['display_description'] extra_args['SpaceReserved'] = True self._clone_lun(lun.handle, new_name, extra_args) def check_for_export(self, context, volume_id): raise NotImplementedError() def _get_qos_type(self, volume): """Get the storage service type for a volume.""" type_id = volume['volume_type_id'] if not type_id: return None volume_type = volume_types.get_volume_type(None, type_id) if not volume_type: return None return volume_type['name'] def _add_lun_to_table(self, lun): """Adds LUN to cache table.""" if not isinstance(lun, NetAppLun): msg = _("Object is not a NetApp LUN.") raise exception.VolumeBackendAPIException(data=msg) self.lun_table[lun.name] = lun def _clone_lun(self, handle, new_name, extra_args): """Clone LUN with the given handle to the new name.""" server = self.client.service metadata = self._create_metadata_list(extra_args) lun = server.CloneLun(Handle=handle, NewName=new_name, Metadata=metadata) LOG.debug(_("Cloned LUN with new name %s") % new_name) self._add_lun_to_table(NetAppLun(lun.Handle, lun.Name, lun.Size, self._create_dict_from_meta(lun.Metadata))) def _create_metadata_list(self, extra_args): """Creates metadata from kwargs.""" metadata = [] for key in extra_args.keys(): meta = self.client.factory.create("Metadata") meta.Key = key meta.Value = extra_args[key] metadata.append(meta) return metadata def _get_lun_handle(self, name): """Get the details for a LUN from our cache table.""" if not name in self.lun_table: LOG.warn(_("Could not find handle for LUN named %s") % name) return None return self.lun_table[name].handle def _create_dict_from_meta(self, metadata): """Creates dictionary from metadata array.""" meta_dict = {} if not metadata: return meta_dict for meta in metadata: meta_dict[meta.Key] = meta.Value return meta_dict
apache-2.0
GabrielFortin/ansible-module-f5bigip
library/f5bigip_ltm_snat.py
2
5863
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2016-2018, Eric Jacob <erjac77@gmail.com> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ANSIBLE_METADATA = { 'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community' } DOCUMENTATION = ''' --- module: f5bigip_ltm_snat short_description: BIG-IP ltm snat module description: - You can use the snat component to configure a SNAT. - A SNAT defines the relationship between an externally visible IP address, SNAT IP address, or translated address, and a group of internal IP addresses, or originating addresses, of individual servers at your site. version_added: "2.4" author: - "Gabriel Fortin (@GabrielFortin)" options: automap: description: - Specifies that the system translates the source IP address to an available self IP address when establishing connections through the virtual server. default: enabled choices: ['none', 'enabled'] app_service: description: - Specifies the name of the application service to which this object belongs. description: description: - User-defined description. metadata: description: - Associates user defined data, each of which has name and value pair and persistence. mirror: description: - Enables or disables mirroring of SNAT connections. name: description: - Specifies unique name for the component. required: true origins: description: - Specifies a set of IP addresses and subnets from which connections originate. required: true partition: description: - Specifies the administrative partition in which the component object resides. default: Common state: description: - Specifies the state of the component on the BIG-IP system. default: present choices: ['absent', 'present'] snatpool: description: - Specifies the name of a SNAT pool. source_port: description: - Specifies whether the system preserves the source port of the connection. default: preserve choices: ['change', 'preserve', 'preserve-strict'] translation: description: - Specifies the name of a translated IP address. vlans: description: - Specifies the name of the VLAN to which you want to assign the SNAT. vlans_disabled: description: - Disables the SNAT on all VLANs. vlans_enabled: description: - Enables the SNAT on all VLANs. requirements: - BIG-IP >= 12.0 - ansible-common-f5 - f5-sdk ''' EXAMPLES = ''' - name: Create LTM Snat f5bigip_ltm_snat: f5_hostname: 172.16.227.35 f5_username: admin f5_password: admin f5_port: 443 name: my_snat partition: Common description: My snat vlans: external vlans_enabled: true state: present delegate_to: localhost ''' RETURN = ''' # ''' from ansible.module_utils.basic import AnsibleModule from ansible_common_f5.base import F5_NAMED_OBJ_ARGS from ansible_common_f5.base import F5_PROVIDER_ARGS from ansible_common_f5.bigip import F5BigIpNamedObject class ModuleParams(object): @property def argument_spec(self): argument_spec = dict( auto_lasthop=dict(type='str', default='default'), automap=dict(type='bool'), app_service=dict(type='str'), description=dict(type='str'), metadata=dict(type='list'), mirror=dict(type='str', choices=['none', 'enabled', 'disabled'], default='disabled'), origins=dict(type='list'), snatpool=dict(type='str'), source_port=dict(type='str', choices=['change', 'preserve', 'preserve-strict'], default='preserve'), translation=dict(type='str'), vlans=dict(type='str'), vlans_disabled=dict(type='bool', default=True), vlans_enabled=dict(type='bool') ) argument_spec.update(F5_PROVIDER_ARGS) argument_spec.update(F5_NAMED_OBJ_ARGS) return argument_spec @property def supports_check_mode(self): return True @property def mutually_exclusive(self): return [ ['vlans_disabled', 'vlans_enabled'], ['automap', 'snatpool'] ] class F5BigIpLtmSnat(F5BigIpNamedObject): def _set_crud_methods(self): self._methods = { 'create': self._api.tm.ltm.snats.snat.create, 'read': self._api.tm.ltm.snats.snat.load, 'update': self._api.tm.ltm.snats.snat.update, 'delete': self._api.tm.ltm.snats.snat.delete, 'exists': self._api.tm.ltm.snats.snat.exists } def main(): params = ModuleParams() module = AnsibleModule(argument_spec=params.argument_spec, supports_check_mode=params.supports_check_mode, mutually_exclusive=params.mutually_exclusive) try: obj = F5BigIpLtmSnat(check_mode=module.check_mode, **module.params) result = obj.flush() module.exit_json(**result) except Exception as exc: module.fail_json(msg=str(exc)) if __name__ == '__main__': main()
apache-2.0
Edraak/edx-platform
common/djangoapps/util/module_utils.py
106
1387
""" Utility library containing operations used/shared by multiple courseware modules """ def yield_dynamic_descriptor_descendants(descriptor, user_id, module_creator): # pylint: disable=invalid-name """ This returns all of the descendants of a descriptor. If the descriptor has dynamic children, the module will be created using module_creator and the children (as descriptors) of that module will be returned. """ stack = [descriptor] while len(stack) > 0: next_descriptor = stack.pop() stack.extend(get_dynamic_descriptor_children(next_descriptor, user_id, module_creator)) yield next_descriptor def get_dynamic_descriptor_children(descriptor, user_id, module_creator=None, usage_key_filter=None): """ Returns the children of the given descriptor, while supporting descriptors with dynamic children. """ module_children = [] if descriptor.has_dynamic_children(): # do not rebind the module if it's already bound to a user. if descriptor.scope_ids.user_id and user_id == descriptor.scope_ids.user_id: module = descriptor else: module = module_creator(descriptor) if module is not None: module_children = module.get_child_descriptors() else: module_children = descriptor.get_children(usage_key_filter) return module_children
agpl-3.0
NazarethCollege/heweb2017-devops-presentation
sites/tweetheat/src/backend/vendor/src/github.com/youtube/vitess/test/vtgate_gateway_flavor/discoverygateway.py
4
1256
#!/usr/bin/env python # Copyright 2017 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Contain VTGate discovery gateway flavor.""" import gateway class DiscoveryGateway(gateway.VTGateGateway): """Overrides to use discovery gateway.""" def flags(self, cell=None, tablets=None): """Return a list of args that tell a VTGate process to start with.""" return ['-cells_to_watch', cell] def connection_count_vars(self): """Return the vars name containing the number of serving connections.""" return 'HealthcheckConnections' def no_tablet_found_message(self): """Return the text message that appears in the gateway.""" return 'no valid tablet' gateway.register_flavor('discoverygateway', DiscoveryGateway)
mit
daenamkim/ansible
lib/ansible/modules/cloud/rackspace/rax_mon_notification_plan.py
29
5674
#!/usr/bin/python # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: rax_mon_notification_plan short_description: Create or delete a Rackspace Cloud Monitoring notification plan. description: - Create or delete a Rackspace Cloud Monitoring notification plan by associating existing rax_mon_notifications with severity levels. Rackspace monitoring module flow | rax_mon_entity -> rax_mon_check -> rax_mon_notification -> *rax_mon_notification_plan* -> rax_mon_alarm version_added: "2.0" options: state: description: - Ensure that the notification plan with this C(label) exists or does not exist. choices: ['present', 'absent'] label: description: - Defines a friendly name for this notification plan. String between 1 and 255 characters long. required: true critical_state: description: - Notification list to use when the alarm state is CRITICAL. Must be an array of valid rax_mon_notification ids. warning_state: description: - Notification list to use when the alarm state is WARNING. Must be an array of valid rax_mon_notification ids. ok_state: description: - Notification list to use when the alarm state is OK. Must be an array of valid rax_mon_notification ids. author: Ash Wilson extends_documentation_fragment: rackspace.openstack ''' EXAMPLES = ''' - name: Example notification plan gather_facts: False hosts: local connection: local tasks: - name: Establish who gets called when. rax_mon_notification_plan: credentials: ~/.rax_pub state: present label: defcon1 critical_state: - "{{ everyone['notification']['id'] }}" warning_state: - "{{ opsfloor['notification']['id'] }}" register: defcon1 ''' try: import pyrax HAS_PYRAX = True except ImportError: HAS_PYRAX = False from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module def notification_plan(module, state, label, critical_state, warning_state, ok_state): if len(label) < 1 or len(label) > 255: module.fail_json(msg='label must be between 1 and 255 characters long') changed = False notification_plan = None cm = pyrax.cloud_monitoring if not cm: module.fail_json(msg='Failed to instantiate client. This typically ' 'indicates an invalid region or an incorrectly ' 'capitalized region name.') existing = [] for n in cm.list_notification_plans(): if n.label == label: existing.append(n) if existing: notification_plan = existing[0] if state == 'present': should_create = False should_delete = False if len(existing) > 1: module.fail_json(msg='%s notification plans are labelled %s.' % (len(existing), label)) if notification_plan: should_delete = (critical_state and critical_state != notification_plan.critical_state) or \ (warning_state and warning_state != notification_plan.warning_state) or \ (ok_state and ok_state != notification_plan.ok_state) if should_delete: notification_plan.delete() should_create = True else: should_create = True if should_create: notification_plan = cm.create_notification_plan(label=label, critical_state=critical_state, warning_state=warning_state, ok_state=ok_state) changed = True else: for np in existing: np.delete() changed = True if notification_plan: notification_plan_dict = { "id": notification_plan.id, "critical_state": notification_plan.critical_state, "warning_state": notification_plan.warning_state, "ok_state": notification_plan.ok_state, "metadata": notification_plan.metadata } module.exit_json(changed=changed, notification_plan=notification_plan_dict) else: module.exit_json(changed=changed) def main(): argument_spec = rax_argument_spec() argument_spec.update( dict( state=dict(default='present', choices=['present', 'absent']), label=dict(required=True), critical_state=dict(type='list'), warning_state=dict(type='list'), ok_state=dict(type='list') ) ) module = AnsibleModule( argument_spec=argument_spec, required_together=rax_required_together() ) if not HAS_PYRAX: module.fail_json(msg='pyrax is required for this module') state = module.params.get('state') label = module.params.get('label') critical_state = module.params.get('critical_state') warning_state = module.params.get('warning_state') ok_state = module.params.get('ok_state') setup_rax_module(module, pyrax) notification_plan(module, state, label, critical_state, warning_state, ok_state) if __name__ == '__main__': main()
gpl-3.0
MattFaus/CrowdTube-Connector
lib/gdata-2.0.18/src/atom/http_interface.py
15
5182
#!/usr/bin/python # # Copyright (C) 2008 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This module provides a common interface for all HTTP requests. HttpResponse: Represents the server's response to an HTTP request. Provides an interface identical to httplib.HTTPResponse which is the response expected from higher level classes which use HttpClient.request. GenericHttpClient: Provides an interface (superclass) for an object responsible for making HTTP requests. Subclasses of this object are used in AtomService and GDataService to make requests to the server. By changing the http_client member object, the AtomService is able to make HTTP requests using different logic (for example, when running on Google App Engine, the http_client makes requests using the App Engine urlfetch API). """ __author__ = 'api.jscudder (Jeff Scudder)' import StringIO USER_AGENT = '%s GData-Python/2.0.18' class Error(Exception): pass class UnparsableUrlObject(Error): pass class ContentLengthRequired(Error): pass class HttpResponse(object): def __init__(self, body=None, status=None, reason=None, headers=None): """Constructor for an HttpResponse object. HttpResponse represents the server's response to an HTTP request from the client. The HttpClient.request method returns a httplib.HTTPResponse object and this HttpResponse class is designed to mirror the interface exposed by httplib.HTTPResponse. Args: body: A file like object, with a read() method. The body could also be a string, and the constructor will wrap it so that HttpResponse.read(self) will return the full string. status: The HTTP status code as an int. Example: 200, 201, 404. reason: The HTTP status message which follows the code. Example: OK, Created, Not Found headers: A dictionary containing the HTTP headers in the server's response. A common header in the response is Content-Length. """ if body: if hasattr(body, 'read'): self._body = body else: self._body = StringIO.StringIO(body) else: self._body = None if status is not None: self.status = int(status) else: self.status = None self.reason = reason self._headers = headers or {} def getheader(self, name, default=None): if name in self._headers: return self._headers[name] else: return default def read(self, amt=None): if not amt: return self._body.read() else: return self._body.read(amt) class GenericHttpClient(object): debug = False def __init__(self, http_client, headers=None): """ Args: http_client: An object which provides a request method to make an HTTP request. The request method in GenericHttpClient performs a call-through to the contained HTTP client object. headers: A dictionary containing HTTP headers which should be included in every HTTP request. Common persistent headers include 'User-Agent'. """ self.http_client = http_client self.headers = headers or {} def request(self, operation, url, data=None, headers=None): all_headers = self.headers.copy() if headers: all_headers.update(headers) return self.http_client.request(operation, url, data=data, headers=all_headers) def get(self, url, headers=None): return self.request('GET', url, headers=headers) def post(self, url, data, headers=None): return self.request('POST', url, data=data, headers=headers) def put(self, url, data, headers=None): return self.request('PUT', url, data=data, headers=headers) def delete(self, url, headers=None): return self.request('DELETE', url, headers=headers) class GenericToken(object): """Represents an Authorization token to be added to HTTP requests. Some Authorization headers included calculated fields (digital signatures for example) which are based on the parameters of the HTTP request. Therefore the token is responsible for signing the request and adding the Authorization header. """ def perform_request(self, http_client, operation, url, data=None, headers=None): """For the GenericToken, no Authorization token is set.""" return http_client.request(operation, url, data=data, headers=headers) def valid_for_scope(self, url): """Tells the caller if the token authorizes access to the desired URL. Since the generic token doesn't add an auth header, it is not valid for any scope. """ return False
mit
NikosAlexandris/landsat8_metadata
landsat8_metadata.py
1
8942
#!/usr/bin/python\<nl>\ # -*- coding: utf-8 -*- """ @author nik | """ import sys from collections import namedtuple # globals MTLFILE = '' DUMMY_MAPCALC_STRING_RADIANCE = 'Radiance' DUMMY_MAPCALC_STRING_DN = 'DigitalNumber' # helper functions def set_mtlfile(): """ Set user defined MTL file, if any """ if len(sys.argv) > 1: return sys.argv[1] else: return False class Landsat8(): """ Retrieve metadata from a Landsat8 MTL file. See <http://landsat.usgs.gov/Landsat8_Using_Product.php>. ToDo: - Implement toar_reflectance - Implement mechanism to translate QA pixel values to QA bits, and vice versa? - Other Landsat8 related functions/algorithms? """ def __init__(self, mtl_filename): """ Initialise class object based on a Landsat8 MTL filename. """ # read lines with open(mtl_filename, 'r') as mtl_file: mtl_lines = mtl_file.readlines() # close and remove 'mtl_file' mtl_file.close() del(mtl_file) # clean and convert MTL lines in to a named tuple self.mtl = self._to_namedtuple(mtl_lines, 'metadata') self._set_attributes() # shorten LANDSAT_SCENE_ID, SENSOR_ID self.scene_id = self.mtl.LANDSAT_SCENE_ID self.sensor = self.mtl.SENSOR_ID # bounding box related self.corner_ul = (self.mtl.CORNER_UL_LAT_PRODUCT, self.mtl.CORNER_UL_LON_PRODUCT) self.corner_lr = (self.mtl.CORNER_LR_LAT_PRODUCT, self.mtl.CORNER_LR_LON_PRODUCT) self.corner_ul_projection = (self.mtl.CORNER_UL_PROJECTION_X_PRODUCT, self.mtl.CORNER_UL_PROJECTION_Y_PRODUCT) self.corner_lr_projection = (self.mtl.CORNER_LR_PROJECTION_X_PRODUCT, self.mtl.CORNER_LR_PROJECTION_Y_PRODUCT) self.cloud_cover = self.mtl.CLOUD_COVER def _to_namedtuple(self, list_of_lines, name_for_tuple): """ This function performs the following actions on the given 'list_of_lines': - excludes lines containing the strings 'GROUP' and 'END' - removes whitespaces and doublequotes from strings - converts list of lines in to a named tuple """ import string # exclude lines containing 'GROUP', 'END' lines = [line.strip() for line in list_of_lines if not any(x in line for x in ('GROUP', 'END'))] # keep a copy, maybe useful? self._mtl_lines = lines del(list_of_lines) # empty variables to hold values field_names = [] field_values = [] # loop over lines, do some cleaning for idx in range(len(lines)): # split line in '=' line = lines[idx] line_split = line.split('=') # get field name & field value, clean whitespaces and " field_name = line_split[0].strip() field_names.append(field_name) field_value = line_split[1].strip() field_value = field_value.translate(string.maketrans("", "",), '"') field_values.append(field_value) # named tuple named_tuple = namedtuple(name_for_tuple, field_names) # return named tuple return named_tuple(*field_values) def _set_attributes(self): """ Set all parsed field names and values, from the MTL file, fed to the named tuple 'self.mtl', as attributes to the object. """ for field in self.mtl._fields: field_lowercase = field.lower() field_value = getattr(self.mtl, field) setattr(self, field_lowercase, field_value) def __str__(self): """ Return a string representation of the scene's id. """ msg = 'Landsat8 scene ID:' return msg + ' ' + self.scene_id def _get_mtl_lines(self): """ Return the "hidden" copy of the MTL lines before cleaning (lines containing 'GROUP' or 'END' are though excluded). """ return self._mtl_lines def toar_radiance(self, bandnumber): """ Note, this function returns a valid expression for GRASS GIS' r.mapcalc raster processing module. Conversion of Digital Numbers to TOA Radiance. OLI and TIRS band data can be converted to TOA spectral radiance using the radiance rescaling factors provided in the metadata file: Lλ = ML * Qcal + AL where: - Lλ = TOA spectral radiance (Watts/( m2 * srad * μm)) - ML = Band-specific multiplicative rescaling factor from the metadata (RADIANCE_MULT_BAND_x, where x is the band number) - AL = Band-specific additive rescaling factor from the metadata (RADIANCE_ADD_BAND_x, where x is the band number) - Qcal = Quantized and calibrated standard product pixel values (DN) Some code borrowed from <https://github.com/micha-silver/grass-landsat8/blob/master/r.in.landsat8.py> """ multiplicative_factor = getattr(self.mtl, ('RADIANCE_MULT_BAND_' + str(bandnumber))) additive_factor = getattr(self.mtl, 'RADIANCE_ADD_BAND_' + str(bandnumber)) formula = '{ML}*{DUMMY_DN} + {AL}' mapcalc = formula.format(ML=multiplicative_factor, DUMMY_DN=DUMMY_MAPCALC_STRING_DN, AL=additive_factor) return mapcalc def toar_reflectance(self, bandnumber): """ Note, this function returns a valid expression for GRASS GIS' r.mapcalc raster processing module. Conversion to TOA Reflectance OLI band data can also be converted to TOA planetary reflectance using reflectance rescaling coefficients provided in the product metadata file (MTL file). The following equation is used to convert DN values to TOA reflectance for OLI data as follows: ρλ' = MρQcal + Aρ where: - ρλ' = TOA planetary reflectance, without correction for solar angle. Note that ρλ' does not contain a correction for the sun angle. - Mρ = Band-specific multiplicative rescaling factor from the metadata (REFLECTANCE_MULT_BAND_x, where x is the band number) - Aρ = Band-specific additive rescaling factor from the metadata (REFLECTANCE_ADD_BAND_x, where x is the band number) - Qcal = Quantized and calibrated standard product pixel values (DN) TOA reflectance with a correction for the sun angle is then: ρλ = ρλ' = ρλ' ### Fix This! cos(θSZ) sin(θSE) ### Fix This! where: - ρλ = TOA planetary reflectance - θSE = Local sun elevation angle. The scene center sun elevation angle in degrees is provided in the metadata (SUN_ELEVATION). - θSZ = Local solar zenith angle; - θSZ = 90° - θSE For more accurate reflectance calculations, per pixel solar angles could be used instead of the scene center solar angle, but per pixel solar zenith angles are not currently provided with the Landsat 8 products. """ pass def radiance_to_temperature(self, bandnumber): """ Note, this function returns a valid expression for GRASS GIS' r.mapcalc raster processing module. Conversion to At-Satellite Brightness Temperature TIRS band data can be converted from spectral radiance to brightness temperature using the thermal constants provided in the metadata file: T = K2 / ln( (K1/Lλ) + 1 ) where: - T = At-satellite brightness temperature (K) - Lλ = TOA spectral radiance (Watts/( m2 * srad * μm)), below 'DUMMY_RADIANCE' - K1 = Band-specific thermal conversion constant from the metadata (K1_CONSTANT_BAND_x, where x is the band number, 10 or 11) - K2 = Band-specific thermal conversion constant from the metadata (K2_CONSTANT_BAND_x, where x is the band number, 10 or 11) """ k2 = getattr(self.mtl, ('K2_CONSTANT_BAND_' + str(bandnumber))) k1 = getattr(self.mtl, ('K1_CONSTANT_BAND_' + str(bandnumber))) formula = '{K2} / ( log({K1} / {DUMMY_RADIANCE} + 1))' mapcalc = formula.format(K2=k2, K1=k1, DUMMY_RADIANCE=DUMMY_MAPCALC_STRING_RADIANCE) return mapcalc def main(): """ Main program. """ if set_mtlfile(): MTLFILE = set_mtlfile() print "| Reading metadata from:", MTLFILE else: MTLFILE = '' if __name__ == "__main__": main()
gpl-3.0
xuxiao19910803/edx-platform
common/lib/xmodule/xmodule/abtest_module.py
86
5331
import random import logging from lxml import etree from xmodule.x_module import XModule from xmodule.raw_module import RawDescriptor from xmodule.xml_module import XmlDescriptor from xmodule.exceptions import InvalidDefinitionError from xblock.fields import String, Scope, Dict DEFAULT = "_DEFAULT_GROUP" log = logging.getLogger(__name__) def group_from_value(groups, v): """ Given group: (('a', 0.3), ('b', 0.4), ('c', 0.3)) and random value v in [0,1], return the associated group (in the above case, return 'a' if v < 0.3, 'b' if 0.3 <= v < 0.7, and 'c' if v > 0.7 """ sum = 0 for (g, p) in groups: sum = sum + p if sum > v: return g # Round off errors might cause us to run to the end of the list. # If the do, return the last element. return g class ABTestFields(object): group_portions = Dict(help="What proportions of students should go in each group", default={DEFAULT: 1}, scope=Scope.content) group_assignments = Dict(help="What group this user belongs to", scope=Scope.preferences, default={}) group_content = Dict(help="What content to display to each group", scope=Scope.content, default={DEFAULT: []}) experiment = String(help="Experiment that this A/B test belongs to", scope=Scope.content) has_children = True class ABTestModule(ABTestFields, XModule): """ Implements an A/B test with an aribtrary number of competing groups """ def __init__(self, *args, **kwargs): super(ABTestModule, self).__init__(*args, **kwargs) if self.group is None: self.group = group_from_value( self.group_portions.items(), random.uniform(0, 1) ) @property def group(self): return self.group_assignments.get(self.experiment) @group.setter def group(self, value): self.group_assignments[self.experiment] = value @group.deleter def group(self): del self.group_assignments[self.experiment] def get_child_descriptors(self): active_locations = set(self.group_content[self.group]) return [desc for desc in self.descriptor.get_children() if desc.location.to_deprecated_string() in active_locations] def displayable_items(self): # Most modules return "self" as the displayable_item. We never display ourself # (which is why we don't implement get_html). We only display our children. return self.get_children() # TODO (cpennington): Use Groups should be a first class object, rather than being # managed by ABTests class ABTestDescriptor(ABTestFields, RawDescriptor, XmlDescriptor): module_class = ABTestModule @classmethod def definition_from_xml(cls, xml_object, system): """ XML Format: <abtest experiment="experiment_name"> <group name="a" portion=".1"><contenta/></group> <group name="b" portion=".2"><contentb/></group> <default><contentdefault/></default> </abtest> """ experiment = xml_object.get('experiment') if experiment is None: raise InvalidDefinitionError( "ABTests must specify an experiment. Not found in:\n{xml}" .format(xml=etree.tostring(xml_object, pretty_print=True))) group_portions = {} group_content = {} children = [] for group in xml_object: if group.tag == 'default': name = DEFAULT else: name = group.get('name') group_portions[name] = float(group.get('portion', 0)) child_content_urls = [] for child in group: try: child_block = system.process_xml(etree.tostring(child)) child_content_urls.append(child_block.scope_ids.usage_id) except: log.exception("Unable to load child when parsing ABTest. Continuing...") continue group_content[name] = child_content_urls children.extend(child_content_urls) default_portion = 1 - sum( portion for (name, portion) in group_portions.items() ) if default_portion < 0: raise InvalidDefinitionError("ABTest portions must add up to less than or equal to 1") group_portions[DEFAULT] = default_portion children.sort() return { 'group_portions': group_portions, 'group_content': group_content, }, children def definition_to_xml(self, resource_fs): xml_object = etree.Element('abtest') xml_object.set('experiment', self.experiment) for name, group in self.group_content.items(): if name == DEFAULT: group_elem = etree.SubElement(xml_object, 'default') else: group_elem = etree.SubElement(xml_object, 'group', attrib={ 'portion': str(self.group_portions[name]), 'name': name, }) for child_loc in group: child = self.system.load_item(child_loc) self.runtime.add_block_as_child_node(child, group_elem) return xml_object def has_dynamic_children(self): return True
agpl-3.0
zoyahav/incubator-airflow
airflow/contrib/operators/spark_submit_operator.py
17
5471
# -*- coding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import logging from airflow.contrib.hooks.spark_submit_hook import SparkSubmitHook from airflow.models import BaseOperator from airflow.utils.decorators import apply_defaults from airflow.settings import WEB_COLORS log = logging.getLogger(__name__) class SparkSubmitOperator(BaseOperator): """ This hook is a wrapper around the spark-submit binary to kick off a spark-submit job. It requires that the "spark-submit" binary is in the PATH or the spark-home is set in the extra on the connection. :param application: The application that submitted as a job, either jar or py file. :type application: str :param conf: Arbitrary Spark configuration properties :type conf: dict :param conn_id: The connection id as configured in Airflow administration. When an invalid connection_id is supplied, it will default to yarn. :type conn_id: str :param files: Upload additional files to the container running the job, separated by a comma. For example hive-site.xml. :type files: str :param py_files: Additional python files used by the job, can be .zip, .egg or .py. :type py_files: str :param jars: Submit additional jars to upload and place them in executor classpath. :type jars: str :param java_class: the main class of the Java application :type java_class: str :param total_executor_cores: (Standalone & Mesos only) Total cores for all executors (Default: all the available cores on the worker) :type total_executor_cores: int :param executor_cores: (Standalone & YARN only) Number of cores per executor (Default: 2) :type executor_cores: int :param executor_memory: Memory per executor (e.g. 1000M, 2G) (Default: 1G) :type executor_memory: str :param driver_memory: Memory allocated to the driver (e.g. 1000M, 2G) (Default: 1G) :type driver_memory: str :param keytab: Full path to the file that contains the keytab :type keytab: str :param principal: The name of the kerberos principal used for keytab :type principal: str :param name: Name of the job (default airflow-spark) :type name: str :param num_executors: Number of executors to launch :type num_executors: int :param application_args: Arguments for the application being submitted :type application_args: list :param verbose: Whether to pass the verbose flag to spark-submit process for debugging :type verbose: bool """ template_fields = ('_name', '_application_args',) ui_color = WEB_COLORS['LIGHTORANGE'] @apply_defaults def __init__(self, application='', conf=None, conn_id='spark_default', files=None, py_files=None, jars=None, java_class=None, total_executor_cores=None, executor_cores=None, executor_memory=None, driver_memory=None, keytab=None, principal=None, name='airflow-spark', num_executors=None, application_args=None, verbose=False, *args, **kwargs): super(SparkSubmitOperator, self).__init__(*args, **kwargs) self._application = application self._conf = conf self._files = files self._py_files = py_files self._jars = jars self._java_class = java_class self._total_executor_cores = total_executor_cores self._executor_cores = executor_cores self._executor_memory = executor_memory self._driver_memory = driver_memory self._keytab = keytab self._principal = principal self._name = name self._num_executors = num_executors self._application_args = application_args self._verbose = verbose self._hook = None self._conn_id = conn_id def execute(self, context): """ Call the SparkSubmitHook to run the provided spark job """ self._hook = SparkSubmitHook( conf=self._conf, conn_id=self._conn_id, files=self._files, py_files=self._py_files, jars=self._jars, java_class=self._java_class, total_executor_cores=self._total_executor_cores, executor_cores=self._executor_cores, executor_memory=self._executor_memory, driver_memory=self._driver_memory, keytab=self._keytab, principal=self._principal, name=self._name, num_executors=self._num_executors, application_args=self._application_args, verbose=self._verbose ) self._hook.submit(self._application) def on_kill(self): self._hook.on_kill()
apache-2.0
waltharius/NewsBlur
utils/mongo_raw_log_middleware.py
9
4434
from django.core.exceptions import MiddlewareNotUsed from django.conf import settings from django.db import connection from pymongo.mongo_client import MongoClient from pymongo.mongo_replica_set_client import MongoReplicaSetClient from time import time import struct import bson from bson.errors import InvalidBSON class MongoDumpMiddleware(object): def activated(self, request): return (settings.DEBUG_QUERIES or (hasattr(request, 'activated_segments') and 'db_profiler' in request.activated_segments)) def process_view(self, request, callback, callback_args, callback_kwargs): if not self.activated(request): return self._used_msg_ids = [] if not getattr(MongoClient, '_logging', False): # save old methods setattr(MongoClient, '_logging', True) # # save old methods # self.orig_send_message = \ # MongoClient._send_message # self.orig_send_message_with_response = \ # MongoClient._send_message_with_response # self.orig_rs_send_message = \ # MongoReplicaSetClient._send_message # self.orig_rs_send_message_with_response = \ # MongoReplicaSetClient._send_message_with_response # instrument methods to record messages # MongoClient._send_message = \ # self._instrument(MongoClient._send_message) MongoClient._send_message_with_response = \ self._instrument(MongoClient._send_message_with_response) # MongoReplicaSetClient._send_message = \ # self._instrument(MongoReplicaSetClient._send_message) MongoReplicaSetClient._send_message_with_response = \ self._instrument(MongoReplicaSetClient._send_message_with_response) return None def process_response(self, request, response): # if settings.DEBUG and hasattr(self, 'orig_send_message') and hasattr(self, 'orig_send_message_with_response'): # # remove instrumentation from pymongo # MongoClient._send_message = \ # self.orig_send_message # MongoClient._send_message_with_response = \ # self.orig_send_message_with_response # MongoReplicaSetClient._send_message = \ # self.orig_rs_send_message # MongoReplicaSetClient._send_message_with_response = \ # self.orig_rs_send_message_with_response return response def _instrument(self, original_method): def instrumented_method(*args, **kwargs): # query = args[1].get_message(False, False) # message = _mongodb_decode_wire_protocol(query[1]) message = _mongodb_decode_wire_protocol(args[1][1]) if not message or message['msg_id'] in self._used_msg_ids: return original_method(*args, **kwargs) self._used_msg_ids.append(message['msg_id']) start = time() result = original_method(*args, **kwargs) stop = time() duration = stop - start connection.queries.append({ 'mongo': message, 'time': '%.3f' % duration, }) return result return instrumented_method def _mongodb_decode_wire_protocol(message): """ http://www.mongodb.org/display/DOCS/Mongo+Wire+Protocol """ MONGO_OPS = { 1000: 'msg', 2001: 'update', 2002: 'insert', 2003: 'reserved', 2004: 'query', 2005: 'get_more', 2006: 'delete', 2007: 'kill_cursors', } _, msg_id, _, opcode, _ = struct.unpack('<iiiii', message[:20]) op = MONGO_OPS.get(opcode, 'unknown') zidx = 20 collection_name_size = message[zidx:].find('\0') collection_name = message[zidx:zidx+collection_name_size] if '.system.' in collection_name: return zidx += collection_name_size + 1 skip, limit = struct.unpack('<ii', message[zidx:zidx+8]) zidx += 8 msg = "" try: if message[zidx:]: msg = bson.decode_all(message[zidx:]) except: msg = 'invalid bson' return { 'op': op, 'collection': collection_name, 'msg_id': msg_id, 'skip': skip, 'limit': limit, 'query': msg }
mit
dhruvsrivastava/flask
flask/json.py
140
8458
# -*- coding: utf-8 -*- """ flask.jsonimpl ~~~~~~~~~~~~~~ Implementation helpers for the JSON support in Flask. :copyright: (c) 2015 by Armin Ronacher. :license: BSD, see LICENSE for more details. """ import io import uuid from datetime import date from .globals import current_app, request from ._compat import text_type, PY2 from werkzeug.http import http_date from jinja2 import Markup # Use the same json implementation as itsdangerous on which we # depend anyways. try: from itsdangerous import simplejson as _json except ImportError: from itsdangerous import json as _json # Figure out if simplejson escapes slashes. This behavior was changed # from one version to another without reason. _slash_escape = '\\/' not in _json.dumps('/') __all__ = ['dump', 'dumps', 'load', 'loads', 'htmlsafe_dump', 'htmlsafe_dumps', 'JSONDecoder', 'JSONEncoder', 'jsonify'] def _wrap_reader_for_text(fp, encoding): if isinstance(fp.read(0), bytes): fp = io.TextIOWrapper(io.BufferedReader(fp), encoding) return fp def _wrap_writer_for_text(fp, encoding): try: fp.write('') except TypeError: fp = io.TextIOWrapper(fp, encoding) return fp class JSONEncoder(_json.JSONEncoder): """The default Flask JSON encoder. This one extends the default simplejson encoder by also supporting ``datetime`` objects, ``UUID`` as well as ``Markup`` objects which are serialized as RFC 822 datetime strings (same as the HTTP date format). In order to support more data types override the :meth:`default` method. """ def default(self, o): """Implement this method in a subclass such that it returns a serializable object for ``o``, or calls the base implementation (to raise a :exc:`TypeError`). For example, to support arbitrary iterators, you could implement default like this:: def default(self, o): try: iterable = iter(o) except TypeError: pass else: return list(iterable) return JSONEncoder.default(self, o) """ if isinstance(o, date): return http_date(o.timetuple()) if isinstance(o, uuid.UUID): return str(o) if hasattr(o, '__html__'): return text_type(o.__html__()) return _json.JSONEncoder.default(self, o) class JSONDecoder(_json.JSONDecoder): """The default JSON decoder. This one does not change the behavior from the default simplejson decoder. Consult the :mod:`json` documentation for more information. This decoder is not only used for the load functions of this module but also :attr:`~flask.Request`. """ def _dump_arg_defaults(kwargs): """Inject default arguments for dump functions.""" if current_app: kwargs.setdefault('cls', current_app.json_encoder) if not current_app.config['JSON_AS_ASCII']: kwargs.setdefault('ensure_ascii', False) kwargs.setdefault('sort_keys', current_app.config['JSON_SORT_KEYS']) else: kwargs.setdefault('sort_keys', True) kwargs.setdefault('cls', JSONEncoder) def _load_arg_defaults(kwargs): """Inject default arguments for load functions.""" if current_app: kwargs.setdefault('cls', current_app.json_decoder) else: kwargs.setdefault('cls', JSONDecoder) def dumps(obj, **kwargs): """Serialize ``obj`` to a JSON formatted ``str`` by using the application's configured encoder (:attr:`~flask.Flask.json_encoder`) if there is an application on the stack. This function can return ``unicode`` strings or ascii-only bytestrings by default which coerce into unicode strings automatically. That behavior by default is controlled by the ``JSON_AS_ASCII`` configuration variable and can be overridden by the simplejson ``ensure_ascii`` parameter. """ _dump_arg_defaults(kwargs) encoding = kwargs.pop('encoding', None) rv = _json.dumps(obj, **kwargs) if encoding is not None and isinstance(rv, text_type): rv = rv.encode(encoding) return rv def dump(obj, fp, **kwargs): """Like :func:`dumps` but writes into a file object.""" _dump_arg_defaults(kwargs) encoding = kwargs.pop('encoding', None) if encoding is not None: fp = _wrap_writer_for_text(fp, encoding) _json.dump(obj, fp, **kwargs) def loads(s, **kwargs): """Unserialize a JSON object from a string ``s`` by using the application's configured decoder (:attr:`~flask.Flask.json_decoder`) if there is an application on the stack. """ _load_arg_defaults(kwargs) if isinstance(s, bytes): s = s.decode(kwargs.pop('encoding', None) or 'utf-8') return _json.loads(s, **kwargs) def load(fp, **kwargs): """Like :func:`loads` but reads from a file object. """ _load_arg_defaults(kwargs) if not PY2: fp = _wrap_reader_for_text(fp, kwargs.pop('encoding', None) or 'utf-8') return _json.load(fp, **kwargs) def htmlsafe_dumps(obj, **kwargs): """Works exactly like :func:`dumps` but is safe for use in ``<script>`` tags. It accepts the same arguments and returns a JSON string. Note that this is available in templates through the ``|tojson`` filter which will also mark the result as safe. Due to how this function escapes certain characters this is safe even if used outside of ``<script>`` tags. The following characters are escaped in strings: - ``<`` - ``>`` - ``&`` - ``'`` This makes it safe to embed such strings in any place in HTML with the notable exception of double quoted attributes. In that case single quote your attributes or HTML escape it in addition. .. versionchanged:: 0.10 This function's return value is now always safe for HTML usage, even if outside of script tags or if used in XHTML. This rule does not hold true when using this function in HTML attributes that are double quoted. Always single quote attributes if you use the ``|tojson`` filter. Alternatively use ``|tojson|forceescape``. """ rv = dumps(obj, **kwargs) \ .replace(u'<', u'\\u003c') \ .replace(u'>', u'\\u003e') \ .replace(u'&', u'\\u0026') \ .replace(u"'", u'\\u0027') if not _slash_escape: rv = rv.replace('\\/', '/') return rv def htmlsafe_dump(obj, fp, **kwargs): """Like :func:`htmlsafe_dumps` but writes into a file object.""" fp.write(unicode(htmlsafe_dumps(obj, **kwargs))) def jsonify(*args, **kwargs): """Creates a :class:`~flask.Response` with the JSON representation of the given arguments with an :mimetype:`application/json` mimetype. The arguments to this function are the same as to the :class:`dict` constructor. Example usage:: from flask import jsonify @app.route('/_get_current_user') def get_current_user(): return jsonify(username=g.user.username, email=g.user.email, id=g.user.id) This will send a JSON response like this to the browser:: { "username": "admin", "email": "admin@localhost", "id": 42 } For security reasons only objects are supported toplevel. For more information about this, have a look at :ref:`json-security`. This function's response will be pretty printed if it was not requested with ``X-Requested-With: XMLHttpRequest`` to simplify debugging unless the ``JSONIFY_PRETTYPRINT_REGULAR`` config parameter is set to false. Compressed (not pretty) formatting currently means no indents and no spaces after separators. .. versionadded:: 0.2 """ indent = None separators = (',', ':') if current_app.config['JSONIFY_PRETTYPRINT_REGULAR'] \ and not request.is_xhr: indent = 2 separators = (', ', ': ') # Note that we add '\n' to end of response # (see https://github.com/mitsuhiko/flask/pull/1262) rv = current_app.response_class( (dumps(dict(*args, **kwargs), indent=indent, separators=separators), '\n'), mimetype='application/json') return rv def tojson_filter(obj, **kwargs): return Markup(htmlsafe_dumps(obj, **kwargs))
bsd-3-clause
commonlisp/kubernetes
hack/lookup_pull.py
368
1319
#!/usr/bin/env python # Copyright 2015 The Kubernetes Authors All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Script to print out PR info in release note format. import json import sys import urllib2 PULLQUERY=("https://api.github.com/repos/" "GoogleCloudPlatform/kubernetes/pulls/{pull}") LOGIN="login" TITLE="title" USER="user" def print_pulls(pulls): for pull in pulls: d = json.loads(urllib2.urlopen(PULLQUERY.format(pull=pull)).read()) print "* {title} #{pull} ({author})".format( title=d[TITLE], pull=pull, author=d[USER][LOGIN]) if __name__ == "__main__": if len(sys.argv) < 2: print ("Usage: {cmd} <pulls>...: Prints out short " + "markdown description for PRs appropriate for release notes.") sys.exit(1) print_pulls(sys.argv[1:])
apache-2.0
denfromufa/PTVS
Python/Product/Django/Templates/Projects/StarterDjangoProject/app-tests.py
17
1082
""" This file demonstrates writing tests using the unittest module. These will pass when you run "manage.py test". """ import django from django.test import TestCase # TODO: Configure your database in settings.py and sync before running tests. class ViewTest(TestCase): """Tests for the application views.""" if django.VERSION[:2] >= (1, 7): # Django 1.7 requires an explicit setup() when running tests in PTVS @classmethod def setUpClass(cls): super(ViewTest, cls).setUpClass() django.setup() def test_home(self): """Tests the home page.""" response = self.client.get('/') self.assertContains(response, 'Home Page', 1, 200) def test_contact(self): """Tests the contact page.""" response = self.client.get('/contact') self.assertContains(response, 'Contact', 3, 200) def test_about(self): """Tests the about page.""" response = self.client.get('/about') self.assertContains(response, 'About', 3, 200)
apache-2.0
cianfrocco-lab/cryoem-cloud-tools
awslib/awsdatatransfer.py
2
3723
import math import subprocess import sys import os from fabric.api import env,run,hide,settings #==================== def exec_remote_cmd(cmd): from fabric.operations import run, put from fabric.api import hide,settings with hide('output','running','warnings') and settings(warn_only=True): return run(cmd) #===================== def transferDirToS3(directoryToTransfer,bucketname,awspath,numfiles,keyid,secretid,region): '''Use rclone to move data onto S3''' #Get OSX vs linux versions if subprocess.Popen('uname',shell=True, stdout=subprocess.PIPE).stdout.read().strip() == 'Linux': rclonepath='%s/rclone' %(awspath) if subprocess.Popen('uname',shell=True, stdout=subprocess.PIPE).stdout.read().strip() == 'Darwin': rclonepath='%s/rclone_mac'%(awspath) #Write .rclone.conf homedir=subprocess.Popen('echo $HOME', shell=True, stdout=subprocess.PIPE).stdout.read().split()[0] if os.path.exists('%s/.rclone.conf' %(homedir)): os.remove('%s/.rclone.conf' %(homedir)) r1=open('%s/.rclone.conf' %(homedir),'w') r1.write('[rclonename]\n') r1.write('type = s3\n') r1.write('env_auth = false\n') r1.write('access_key_id = %s\n' %(keyid)) r1.write('secret_access_key = %s\n' %(secretid)) r1.write('region = %s\n' %(region)) r1.write('endpoint = \n') r1.write('location_constraint = %s\n' %(region)) r1.write('acl = authenticated-read\n') r1.write('server_side_encryption = \n') r1.write('storage_class = STANDARD\n') r1.close() #Copy using rsync cmd='%s sync %s rclonename:%s --quiet --transfers %i > rclone.log' %(rclonepath,directoryToTransfer,bucketname,math.ceil(numfiles)) subprocess.Popen(cmd,shell=True).wait() if os.path.exists('%s/rclone.conf' %(directoryToTransfer)): os.remove('%s/rclone.conf' %(directoryToTransfer)) if os.path.exists('rclone.log'): os.remove('rclone.log') #=================== def transferS3toVM(IP,keypair,bucketname,dironebs,rclonepath,keyid,secretid,region,numfilesAtATime,maxFileSize): #Copy rclone onto instance cmd='scp -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i %s %s ubuntu@%s:~/'%(keypair,rclonepath,IP) subprocess.Popen(cmd,shell=True).wait() #Write rclone config file homedir='/home/ubuntu/' rclonename='ebss3' if os.path.exists('.rclone.conf'): os.remove('.rclone.conf') r1=open('rclone.conf','w') r1.write('[rclonename]\n') r1.write('type = s3\n') r1.write('env_auth = false\n') r1.write('access_key_id = %s\n' %(keyid)) r1.write('secret_access_key = %s\n' %(secretid)) r1.write('region = %s\n' %(region)) r1.write('endpoint = \n') r1.write('location_constraint = %s\n' %(region)) r1.write('acl = authenticated-read\n') r1.write('server_side_encryption = \n') r1.write('storage_class = STANDARD\n') r1.close() cmd='scp -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i %s rclone.conf ubuntu@%s:~/.rclone.conf' %(keypair,IP) subprocess.Popen(cmd,shell=True).wait() #Copy data down env.host_string='ubuntu@%s' %(IP) env.key_filename = '%s' %(keypair) rcloneexe='rclone' exec_remote_cmd('%s/%s copy rclonename:%s %s --max-size %iG --quiet --transfers %i' %(homedir,rcloneexe,bucketname.split('s3://')[-1],dironebs,maxFileSize,numfilesAtATime)) if os.path.exists('rclone.conf'): os.remove('rclone.conf') if os.path.exists('rclone.log'): os.remove('rclone.log')
mit
rahushen/ansible
lib/ansible/modules/network/aci/aci_interface_policy_leaf_policy_group.py
14
18369
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: (c) 2017, Bruno Calogero <brunocalogero@hotmail.com> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = r''' --- module: aci_interface_policy_leaf_policy_group short_description: Manage fabric interface policy leaf policy groups (infra:AccBndlGrp, infra:AccPortGrp) description: - Manage fabric interface policy leaf policy groups on Cisco ACI fabrics. notes: - When using the module please select the appropriate link_aggregation_type (lag_type). C(link) for Port Channel(PC), C(node) for Virtual Port Channel(VPC) and C(leaf) for Leaf Access Port Policy Group. - More information about the internal APIC classes B(infra:AccBndlGrp) and B(infra:AccPortGrp) from L(the APIC Management Information Model reference,https://developer.cisco.com/docs/apic-mim-ref/). author: - Bruno Calogero (@brunocalogero) version_added: '2.5' options: policy_group: description: - Name of the leaf policy group to be added/deleted. aliases: [ name, policy_group_name ] description: description: - Description for the leaf policy group to be created. aliases: [ descr ] lag_type: description: - Selector for the type of leaf policy group we want to create. - C(leaf) for Leaf Access Port Policy Group - C(link) for Port Channel (PC) - C(node) for Virtual Port Channel (VPC) aliases: [ lag_type_name ] choices: [ leaf, link, node ] link_level_policy: description: - Choice of link_level_policy to be used as part of the leaf policy group to be created. aliases: [ link_level_policy_name ] cdp_policy: description: - Choice of cdp_policy to be used as part of the leaf policy group to be created. aliases: [ cdp_policy_name ] mcp_policy: description: - Choice of mcp_policy to be used as part of the leaf policy group to be created. aliases: [ mcp_policy_name ] lldp_policy: description: - Choice of lldp_policy to be used as part of the leaf policy group to be created. aliases: [ lldp_policy_name ] stp_interface_policy: description: - Choice of stp_interface_policy to be used as part of the leaf policy group to be created. aliases: [ stp_interface_policy_name ] egress_data_plane_policing_policy: description: - Choice of egress_data_plane_policing_policy to be used as part of the leaf policy group to be created. aliases: [ egress_data_plane_policing_policy_name ] ingress_data_plane_policing_policy: description: - Choice of ingress_data_plane_policing_policy to be used as part of the leaf policy group to be created. aliases: [ ingress_data_plane_policing_policy_name ] priority_flow_control_policy: description: - Choice of priority_flow_control_policy to be used as part of the leaf policy group to be created. aliases: [ priority_flow_control_policy_name ] fibre_channel_interface_policy: description: - Choice of fibre_channel_interface_policy to be used as part of the leaf policy group to be created. aliases: [ fibre_channel_interface_policy_name ] slow_drain_policy: description: - Choice of slow_drain_policy to be used as part of the leaf policy group to be created. aliases: [ slow_drain_policy_name ] port_channel_policy: description: - Choice of port_channel_policy to be used as part of the leaf policy group to be created. aliases: [ port_channel_policy_name ] monitoring_policy: description: - Choice of monitoring_policy to be used as part of the leaf policy group to be created. aliases: [ monitoring_policy_name ] storm_control_interface_policy: description: - Choice of storm_control_interface_policy to be used as part of the leaf policy group to be created. aliases: [ storm_control_interface_policy_name ] l2_interface_policy: description: - Choice of l2_interface_policy to be used as part of the leaf policy group to be created. aliases: [ l2_interface_policy_name ] port_security_policy: description: - Choice of port_security_policy to be used as part of the leaf policy group to be created. aliases: [ port_security_policy_name ] aep: description: - Choice of attached_entity_profile (AEP) to be used as part of the leaf policy group to be created. aliases: [ aep_name ] state: description: - Use C(present) or C(absent) for adding or removing. - Use C(query) for listing an object or multiple objects. choices: [ absent, present, query ] default: present extends_documentation_fragment: aci ''' # FIXME: Add query examples EXAMPLES = r''' - name: Create a Port Channel (PC) Interface Policy Group aci_interface_policy_leaf_policy_group: host: apic username: admin password: SomeSecretPassword policy_group: policygroupname description: policygroupname description lag_type: link link_level_policy: whateverlinklevelpolicy fibre_channel_interface_policy: whateverfcpolicy state: present - name: Create a Virtual Port Channel (VPC) Interface Policy Group (no description) aci_interface_policy_leaf_policy_group: host: apic username: admin password: SomeSecretPassword policy_group: policygroupname lag_type: node link_level_policy: whateverlinklevelpolicy fibre_channel_interface_policy: whateverfcpolicy state: present - name: Create a Leaf Access Port Policy Group (no description) aci_interface_policy_leaf_policy_group: host: apic username: admin password: SomeSecretPassword policy_group: policygroupname lag_type: leaf link_level_policy: whateverlinklevelpolicy fibre_channel_interface_policy: whateverfcpolicy state: present - name: Delete an Interface policy Leaf Policy Group aci_interface_policy_leaf_policy_group: host: apic username: admin password: SomeSecretPassword policy_group: policygroupname lag_type: type_name state: absent ''' RETURN = r''' current: description: The existing configuration from the APIC after the module has finished returned: success type: list sample: [ { "fvTenant": { "attributes": { "descr": "Production environment", "dn": "uni/tn-production", "name": "production", "nameAlias": "", "ownerKey": "", "ownerTag": "" } } } ] error: description: The error information as returned from the APIC returned: failure type: dict sample: { "code": "122", "text": "unknown managed object class foo" } raw: description: The raw output returned by the APIC REST API (xml or json) returned: parse error type: string sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>' sent: description: The actual/minimal configuration pushed to the APIC returned: info type: list sample: { "fvTenant": { "attributes": { "descr": "Production environment" } } } previous: description: The original configuration from the APIC before the module has started returned: info type: list sample: [ { "fvTenant": { "attributes": { "descr": "Production", "dn": "uni/tn-production", "name": "production", "nameAlias": "", "ownerKey": "", "ownerTag": "" } } } ] proposed: description: The assembled configuration from the user-provided parameters returned: info type: dict sample: { "fvTenant": { "attributes": { "descr": "Production environment", "name": "production" } } } filter_string: description: The filter string used for the request returned: failure or debug type: string sample: ?rsp-prop-include=config-only method: description: The HTTP method used for the request to the APIC returned: failure or debug type: string sample: POST response: description: The HTTP response from the APIC returned: failure or debug type: string sample: OK (30 bytes) status: description: The HTTP status from the APIC returned: failure or debug type: int sample: 200 url: description: The HTTP url used for the request to the APIC returned: failure or debug type: string sample: https://10.11.12.13/api/mo/uni/tn-production.json ''' from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec from ansible.module_utils.basic import AnsibleModule def main(): argument_spec = aci_argument_spec() argument_spec.update({ 'policy_group': dict(type='str', aliases=['name', 'policy_group_name']), # Not required for querying all objects 'description': dict(type='str', aliases=['descr']), # NOTE: Since this module needs to include both infra:AccBndlGrp (for PC and VPC) and infra:AccPortGrp (for leaf access port policy group): # NOTE: I'll allow the user to make the choice here (link(PC), node(VPC), leaf(leaf-access port policy group)) 'lag_type': dict(type='str', aliases=['lag_type_name'], choices=['leaf', 'link', 'node']), # Not required for querying all objects 'link_level_policy': dict(type='str', aliases=['link_level_policy_name']), 'cdp_policy': dict(type='str', aliases=['cdp_policy_name']), 'mcp_policy': dict(type='str', aliases=['mcp_policy_name']), 'lldp_policy': dict(type='str', aliases=['lldp_policy_name']), 'stp_interface_policy': dict(type='str', aliases=['stp_interface_policy_name']), 'egress_data_plane_policing_policy': dict(type='str', aliases=['egress_data_plane_policing_policy_name']), 'ingress_data_plane_policing_policy': dict(type='str', aliases=['ingress_data_plane_policing_policy_name']), 'priority_flow_control_policy': dict(type='str', aliases=['priority_flow_control_policy_name']), 'fibre_channel_interface_policy': dict(type='str', aliases=['fibre_channel_interface_policy_name']), 'slow_drain_policy': dict(type='str', aliases=['slow_drain_policy_name']), 'port_channel_policy': dict(type='str', aliases=['port_channel_policy_name']), 'monitoring_policy': dict(type='str', aliases=['monitoring_policy_name']), 'storm_control_interface_policy': dict(type='str', aliases=['storm_control_interface_policy_name']), 'l2_interface_policy': dict(type='str', aliases=['l2_interface_policy_name']), 'port_security_policy': dict(type='str', aliases=['port_security_policy_name']), 'aep': dict(type='str', aliases=['aep_name']), 'state': dict(type='str', default='present', choices=['absent', 'present', 'query']), }) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, required_if=[ ['state', 'absent', ['lag_type', 'policy_group']], ['state', 'present', ['lag_type', 'policy_group']], ], ) policy_group = module.params['policy_group'] description = module.params['description'] lag_type = module.params['lag_type'] link_level_policy = module.params['link_level_policy'] cdp_policy = module.params['cdp_policy'] mcp_policy = module.params['mcp_policy'] lldp_policy = module.params['lldp_policy'] stp_interface_policy = module.params['stp_interface_policy'] egress_data_plane_policing_policy = module.params['egress_data_plane_policing_policy'] ingress_data_plane_policing_policy = module.params['ingress_data_plane_policing_policy'] priority_flow_control_policy = module.params['priority_flow_control_policy'] fibre_channel_interface_policy = module.params['fibre_channel_interface_policy'] slow_drain_policy = module.params['slow_drain_policy'] port_channel_policy = module.params['port_channel_policy'] monitoring_policy = module.params['monitoring_policy'] storm_control_interface_policy = module.params['storm_control_interface_policy'] l2_interface_policy = module.params['l2_interface_policy'] port_security_policy = module.params['port_security_policy'] aep = module.params['aep'] state = module.params['state'] if lag_type == 'leaf': aci_class_name = 'infraAccPortGrp' dn_name = 'accportgrp' class_config_dict = dict( name=policy_group, descr=description, ) elif lag_type == 'link' or lag_type == 'node': aci_class_name = 'infraAccBndlGrp' dn_name = 'accbundle' class_config_dict = dict( name=policy_group, descr=description, lagT=lag_type, ) aci = ACIModule(module) aci.construct_url( root_class=dict( aci_class=aci_class_name, aci_rn='infra/funcprof/{0}-{1}'.format(dn_name, policy_group), filter_target='eq({0}.name, "{1}")'.format(aci_class_name, policy_group), module_object=policy_group, ), child_classes=[ 'infraRsAttEntP', 'infraRsCdpIfPol', 'infraRsFcIfPol', 'infraRsHIfPol', 'infraRsL2IfPol', 'infraRsL2PortSecurityPol', 'infraRsLacpPol', 'infraRsLldpIfPol', 'infraRsMcpIfPol', 'infraRsMonIfInfraPol', 'infraRsQosEgressDppIfPol', 'infraRsQosIngressDppIfPol', 'infraRsQosPfcIfPol', 'infraRsQosSdIfPol', 'infraRsStormctrlIfPol', 'infraRsStpIfPol', ], ) aci.get_existing() if state == 'present': aci.payload( aci_class=aci_class_name, class_config=class_config_dict, child_configs=[ dict( infraRsAttEntP=dict( attributes=dict( tDn='uni/infra/attentp-{0}'.format(aep), ), ), ), dict( infraRsCdpIfPol=dict( attributes=dict( tnCdpIfPolName=cdp_policy, ), ), ), dict( infraRsFcIfPol=dict( attributes=dict( tnFcIfPolName=fibre_channel_interface_policy, ), ), ), dict( infraRsHIfPol=dict( attributes=dict( tnFabricHIfPolName=link_level_policy, ), ), ), dict( infraRsL2IfPol=dict( attributes=dict( tnL2IfPolName=l2_interface_policy, ), ), ), dict( infraRsL2PortSecurityPol=dict( attributes=dict( tnL2PortSecurityPolName=port_security_policy, ), ), ), dict( infraRsLacpPol=dict( attributes=dict( tnLacpLagPolName=port_channel_policy, ), ), ), dict( infraRsLldpIfPol=dict( attributes=dict( tnLldpIfPolName=lldp_policy, ), ), ), dict( infraRsMcpIfPol=dict( attributes=dict( tnMcpIfPolName=mcp_policy, ), ), ), dict( infraRsMonIfInfraPol=dict( attributes=dict( tnMonInfraPolName=monitoring_policy, ), ), ), dict( infraRsQosEgressDppIfPol=dict( attributes=dict( tnQosDppPolName=egress_data_plane_policing_policy, ), ), ), dict( infraRsQosIngressDppIfPol=dict( attributes=dict( tnQosDppPolName=ingress_data_plane_policing_policy, ), ), ), dict( infraRsQosPfcIfPol=dict( attributes=dict( tnQosPfcIfPolName=priority_flow_control_policy, ), ), ), dict( infraRsQosSdIfPol=dict( attributes=dict( tnQosSdIfPolName=slow_drain_policy, ), ), ), dict( infraRsStormctrlIfPol=dict( attributes=dict( tnStormctrlIfPolName=storm_control_interface_policy, ), ), ), dict( infraRsStpIfPol=dict( attributes=dict( tnStpIfPolName=stp_interface_policy, ), ), ), ], ) aci.get_diff(aci_class=aci_class_name) aci.post_config() elif state == 'absent': aci.delete_config() aci.exit_json() if __name__ == "__main__": main()
gpl-3.0
wkerzendorf/tardis
tardis/tests/test_lte_plasma.py
5
2734
from tardis import atomic from astropy import constants import numpy as np import pytest atom_data = atomic.AtomData.from_hdf5(atomic.default_atom_h5_path) atom_data.prepare_atom_data(selected_atomic_numbers=[14]) pytestmark = pytest.mark.skipif(True, reason='to be implemented') @pytest.mark.xskip class TestNormalLTEPlasma: compare_part_func = np.array([11.406201367482032, 5.866632552894803, 1.0044215520812598, 2.0002017142942163, 1.0, 4.961567712516646]) compare_phis = np.array([9.679214946588846e+16, 2390247610210.469, 63428.65716485618, 0.021444877147797966, 1.0749561507478891e-62]) compare_ion_populations = np.array([0.15945011675589407, 3717655.969887028, 2140505324.5794258, 32704.389884367, 1.6894052370987268e-07, 0.0]) compare_level_populations_14_1 = np.array([1267390.086686989, 2432159.357552647, 2673.1427419153792, 5263.602861186024, 7698.769675188349]) def setup(self): self.plasma = plasma.LTEPlasma.from_abundance(10000, {'Si':1.0}, 1e-13, atom_data, 10*86400) def test_beta_rad(self): assert self.plasma.beta_rad == 1 / (10000 * constants.k_B.cgs.value) def test_t_electron(self): assert self.plasma.t_electron == 0.9 * self.plasma.t_rad def test_saha_calculation_method(self): assert self.plasma.calculate_saha == self.plasma.calculate_saha_lte def test_partition_function_calculation(self): assert np.all(self.plasma.partition_functions.values == self.compare_part_func) def test_phis_calculation(self): self.calculated_phis = self.plasma.calculate_saha() assert np.all(self.calculated_phis.values == self.compare_phis) def test_ionization_balance_calculation(self): assert np.all(self.plasma.ion_populations.values == self.compare_ion_populations) def test_electron_density(self): assert self.plasma.electron_density == 4284826418.2983923 def test_level_populations(self): assert np.all(self.plasma.level_populations.ix[14, 1].values[:5] == self.compare_level_populations_14_1) def test_tau_sobolev(self): #silicon line 14, 1 , wl = 6347.105178 level_lower = 7 level_upper = 12 wavelength_id = 565376 assert self.plasma.tau_sobolevs[self.plasma.atom_data.lines.index == wavelength_id][0] == 101.06456251838634 def test_population_inversion(self): self.plasma.level_populations.ix[14, 1, 12] = 1.1 * self.plasma.level_populations.ix[14, 1, 7] with pytest.raises(plasma.PopulationInversionException): self.plasma.calculate_tau_sobolev()
bsd-3-clause
agiliq/django
django/db/migrations/graph.py
5
6578
from __future__ import unicode_literals from django.db.migrations.state import ProjectState from django.utils.datastructures import OrderedSet class MigrationGraph(object): """ Represents the digraph of all migrations in a project. Each migration is a node, and each dependency is an edge. There are no implicit dependencies between numbered migrations - the numbering is merely a convention to aid file listing. Every new numbered migration has a declared dependency to the previous number, meaning that VCS branch merges can be detected and resolved. Migrations files can be marked as replacing another set of migrations - this is to support the "squash" feature. The graph handler isn't responsible for these; instead, the code to load them in here should examine the migration files and if the replaced migrations are all either unapplied or not present, it should ignore the replaced ones, load in just the replacing migration, and repoint any dependencies that pointed to the replaced migrations to point to the replacing one. A node should be a tuple: (app_path, migration_name). The tree special-cases things within an app - namely, root nodes and leaf nodes ignore dependencies to other apps. """ def __init__(self): self.nodes = {} self.dependencies = {} self.dependents = {} def add_node(self, node, implementation): self.nodes[node] = implementation def add_dependency(self, migration, child, parent): if child not in self.nodes: raise NodeNotFoundError( "Migration %s dependencies reference nonexistent child node %r" % (migration, child), child ) if parent not in self.nodes: raise NodeNotFoundError( "Migration %s dependencies reference nonexistent parent node %r" % (migration, parent), parent ) self.dependencies.setdefault(child, set()).add(parent) self.dependents.setdefault(parent, set()).add(child) def forwards_plan(self, node): """ Given a node, returns a list of which previous nodes (dependencies) must be applied, ending with the node itself. This is the list you would follow if applying the migrations to a database. """ if node not in self.nodes: raise NodeNotFoundError("Node %r not a valid node" % (node, ), node) return self.dfs(node, lambda x: self.dependencies.get(x, set())) def backwards_plan(self, node): """ Given a node, returns a list of which dependent nodes (dependencies) must be unapplied, ending with the node itself. This is the list you would follow if removing the migrations from a database. """ if node not in self.nodes: raise NodeNotFoundError("Node %r not a valid node" % (node, ), node) return self.dfs(node, lambda x: self.dependents.get(x, set())) def root_nodes(self, app=None): """ Returns all root nodes - that is, nodes with no dependencies inside their app. These are the starting point for an app. """ roots = set() for node in self.nodes: if (not any(key[0] == node[0] for key in self.dependencies.get(node, set())) and (not app or app == node[0])): roots.add(node) return sorted(roots) def leaf_nodes(self, app=None): """ Returns all leaf nodes - that is, nodes with no dependents in their app. These are the "most current" version of an app's schema. Having more than one per app is technically an error, but one that gets handled further up, in the interactive command - it's usually the result of a VCS merge and needs some user input. """ leaves = set() for node in self.nodes: if (not any(key[0] == node[0] for key in self.dependents.get(node, set())) and (not app or app == node[0])): leaves.add(node) return sorted(leaves) def dfs(self, start, get_children): """ Dynamic programming based depth first search, for finding dependencies. """ visited = [] visited.append(start) path = [start] stack = sorted(get_children(start)) while stack: node = stack.pop(0) if node in path: raise CircularDependencyError() path.append(node) visited.insert(0, node) children = sorted(get_children(node)) if not children: path = [] stack = children + stack return list(OrderedSet(visited)) def __str__(self): return "Graph: %s nodes, %s edges" % ( len(self.nodes), sum(len(x) for x in self.dependencies.values()), ) def make_state(self, nodes=None, at_end=True, real_apps=None): """ Given a migration node or nodes, returns a complete ProjectState for it. If at_end is False, returns the state before the migration has run. If nodes is not provided, returns the overall most current project state. """ if nodes is None: nodes = list(self.leaf_nodes()) if len(nodes) == 0: return ProjectState() if not isinstance(nodes[0], tuple): nodes = [nodes] plan = [] for node in nodes: for migration in self.forwards_plan(node): if migration not in plan: if not at_end and migration in nodes: continue plan.append(migration) project_state = ProjectState(real_apps=real_apps) for node in plan: project_state = self.nodes[node].mutate_state(project_state) return project_state def __contains__(self, node): return node in self.nodes class CircularDependencyError(Exception): """ Raised when there's an impossible-to-resolve circular dependency. """ pass class NodeNotFoundError(LookupError): """ Raised when an attempt on a node is made that is not available in the graph. """ def __init__(self, message, node): self.message = message self.node = node def __str__(self): return self.message __unicode__ = __str__ def __repr__(self): return "NodeNotFoundError(%r)" % self.node
bsd-3-clause
ScreamingUdder/mantid
qt/applications/workbench/workbench/plugins/jupyterconsole.py
1
2648
# This file is part of the mantid workbench. # # Copyright (C) 2017 mantidproject # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from __future__ import (absolute_import, unicode_literals) # system imports import sys # third-party library imports from mantidqt.widgets.jupyterconsole import InProcessJupyterConsole try: from IPython.core.usage import quick_guide except ImportError: # quick_guide was removed in IPython 6.0 quick_guide = '' from IPython.core.usage import release as ipy_release from matplotlib import __version__ as mpl_version from numpy.version import version as np_version from qtpy.QtWidgets import QVBoxLayout # local package imports from workbench.plugins.base import PluginWidget DEFAULT_BANNER_PARTS = [ 'IPython {version} -- An enhanced Interactive Python.\n'.format( version=ipy_release.version, ), quick_guide, '\nPython {}, numpy {}, matplotlib {}\n'.format(sys.version.split('\n')[0].strip(), np_version, mpl_version), 'Type "copyright", "credits" or "license" for more information.\n', ] BANNER = ''.join(DEFAULT_BANNER_PARTS) # should we share this with plugins.editor? STARTUP_CODE = """from __future__ import (absolute_import, division, print_function, unicode_literals) from mantid.simpleapi import * import matplotlib.pyplot as plt import numpy as np """ class JupyterConsole(PluginWidget): """Provides an in-process Jupyter Qt-based console""" def __init__(self, parent): super(JupyterConsole, self).__init__(parent) # layout self.console = InProcessJupyterConsole(self, banner=BANNER, startup_code=STARTUP_CODE) layout = QVBoxLayout() layout.addWidget(self.console) self.setLayout(layout) # ----------------- Plugin API -------------------- def get_plugin_title(self): return "IPython" def read_user_settings(self, _): pass def register_plugin(self, menu=None): self.main.add_dockwidget(self)
gpl-3.0
robovm/robovm-studio
python/lib/Lib/site-packages/django/contrib/sessions/middleware.py
323
1888
import time from django.conf import settings from django.utils.cache import patch_vary_headers from django.utils.http import cookie_date from django.utils.importlib import import_module class SessionMiddleware(object): def process_request(self, request): engine = import_module(settings.SESSION_ENGINE) session_key = request.COOKIES.get(settings.SESSION_COOKIE_NAME, None) request.session = engine.SessionStore(session_key) def process_response(self, request, response): """ If request.session was modified, or if the configuration is to save the session every time, save the changes and set a session cookie. """ try: accessed = request.session.accessed modified = request.session.modified except AttributeError: pass else: if accessed: patch_vary_headers(response, ('Cookie',)) if modified or settings.SESSION_SAVE_EVERY_REQUEST: if request.session.get_expire_at_browser_close(): max_age = None expires = None else: max_age = request.session.get_expiry_age() expires_time = time.time() + max_age expires = cookie_date(expires_time) # Save the session data and refresh the client cookie. request.session.save() response.set_cookie(settings.SESSION_COOKIE_NAME, request.session.session_key, max_age=max_age, expires=expires, domain=settings.SESSION_COOKIE_DOMAIN, path=settings.SESSION_COOKIE_PATH, secure=settings.SESSION_COOKIE_SECURE or None, httponly=settings.SESSION_COOKIE_HTTPONLY or None) return response
apache-2.0
drawks/ansible
lib/ansible/modules/cloud/opennebula/one_image.py
52
11657
#!/usr/bin/python # -*- coding: utf-8 -*- # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type """ (c) 2018, Milan Ilic <milani@nordeus.com> This file is part of Ansible Ansible is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. Ansible is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a clone of the GNU General Public License along with Ansible. If not, see <http://www.gnu.org/licenses/>. """ ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'metadata_version': '1.1'} DOCUMENTATION = ''' --- module: one_image short_description: Manages OpenNebula images description: - Manages OpenNebula images version_added: "2.6" requirements: - python-oca options: api_url: description: - URL of the OpenNebula RPC server. - It is recommended to use HTTPS so that the username/password are not - transferred over the network unencrypted. - If not set then the value of the C(ONE_URL) environment variable is used. api_username: description: - Name of the user to login into the OpenNebula RPC server. If not set - then the value of the C(ONE_USERNAME) environment variable is used. api_password: description: - Password of the user to login into OpenNebula RPC server. If not set - then the value of the C(ONE_PASSWORD) environment variable is used. id: description: - A C(id) of the image you would like to manage. name: description: - A C(name) of the image you would like to manage. state: description: - C(present) - state that is used to manage the image - C(absent) - delete the image - C(cloned) - clone the image - C(renamed) - rename the image to the C(new_name) choices: ["present", "absent", "cloned", "renamed"] default: present enabled: description: - Whether the image should be enabled or disabled. type: bool new_name: description: - A name that will be assigned to the existing or new image. - In the case of cloning, by default C(new_name) will take the name of the origin image with the prefix 'Copy of'. author: - "Milan Ilic (@ilicmilan)" ''' EXAMPLES = ''' # Fetch the IMAGE by id - one_image: id: 45 register: result # Print the IMAGE properties - debug: msg: result # Rename existing IMAGE - one_image: id: 34 state: renamed new_name: bar-image # Disable the IMAGE by id - one_image: id: 37 enabled: no # Enable the IMAGE by name - one_image: name: bar-image enabled: yes # Clone the IMAGE by name - one_image: name: bar-image state: cloned new_name: bar-image-clone register: result # Delete the IMAGE by id - one_image: id: '{{ result.id }}' state: absent ''' RETURN = ''' id: description: image id type: int returned: success sample: 153 name: description: image name type: str returned: success sample: app1 group_id: description: image's group id type: int returned: success sample: 1 group_name: description: image's group name type: str returned: success sample: one-users owner_id: description: image's owner id type: int returned: success sample: 143 owner_name: description: image's owner name type: str returned: success sample: ansible-test state: description: state of image instance type: str returned: success sample: READY used: description: is image in use type: bool returned: success sample: true running_vms: description: count of running vms that use this image type: int returned: success sample: 7 ''' try: import oca HAS_OCA = True except ImportError: HAS_OCA = False from ansible.module_utils.basic import AnsibleModule import os def get_image(module, client, predicate): pool = oca.ImagePool(client) # Filter -2 means fetch all images user can Use pool.info(filter=-2) for image in pool: if predicate(image): return image return None def get_image_by_name(module, client, image_name): return get_image(module, client, lambda image: (image.name == image_name)) def get_image_by_id(module, client, image_id): return get_image(module, client, lambda image: (image.id == image_id)) def get_image_instance(module, client, requested_id, requested_name): if requested_id: return get_image_by_id(module, client, requested_id) else: return get_image_by_name(module, client, requested_name) IMAGE_STATES = ['INIT', 'READY', 'USED', 'DISABLED', 'LOCKED', 'ERROR', 'CLONE', 'DELETE', 'USED_PERS', 'LOCKED_USED', 'LOCKED_USED_PERS'] def get_image_info(image): image.info() info = { 'id': image.id, 'name': image.name, 'state': IMAGE_STATES[image.state], 'running_vms': image.running_vms, 'used': bool(image.running_vms), 'user_name': image.uname, 'user_id': image.uid, 'group_name': image.gname, 'group_id': image.gid, } return info def wait_for_state(module, image, wait_timeout, state_predicate): import time start_time = time.time() while (time.time() - start_time) < wait_timeout: image.info() state = image.state if state_predicate(state): return image time.sleep(1) module.fail_json(msg="Wait timeout has expired!") def wait_for_ready(module, image, wait_timeout=60): return wait_for_state(module, image, wait_timeout, lambda state: (state in [IMAGE_STATES.index('READY')])) def wait_for_delete(module, image, wait_timeout=60): return wait_for_state(module, image, wait_timeout, lambda state: (state in [IMAGE_STATES.index('DELETE')])) def enable_image(module, client, image, enable): image.info() changed = False state = image.state if state not in [IMAGE_STATES.index('READY'), IMAGE_STATES.index('DISABLED'), IMAGE_STATES.index('ERROR')]: if enable: module.fail_json(msg="Cannot enable " + IMAGE_STATES[state] + " image!") else: module.fail_json(msg="Cannot disable " + IMAGE_STATES[state] + " image!") if ((enable and state != IMAGE_STATES.index('READY')) or (not enable and state != IMAGE_STATES.index('DISABLED'))): changed = True if changed and not module.check_mode: client.call('image.enable', image.id, enable) result = get_image_info(image) result['changed'] = changed return result def clone_image(module, client, image, new_name): if new_name is None: new_name = "Copy of " + image.name tmp_image = get_image_by_name(module, client, new_name) if tmp_image: result = get_image_info(tmp_image) result['changed'] = False return result if image.state == IMAGE_STATES.index('DISABLED'): module.fail_json(msg="Cannot clone DISABLED image") if not module.check_mode: new_id = client.call('image.clone', image.id, new_name) image = get_image_by_id(module, client, new_id) wait_for_ready(module, image) result = get_image_info(image) result['changed'] = True return result def rename_image(module, client, image, new_name): if new_name is None: module.fail_json(msg="'new_name' option has to be specified when the state is 'renamed'") if new_name == image.name: result = get_image_info(image) result['changed'] = False return result tmp_image = get_image_by_name(module, client, new_name) if tmp_image: module.fail_json(msg="Name '" + new_name + "' is already taken by IMAGE with id=" + str(tmp_image.id)) if not module.check_mode: client.call('image.rename', image.id, new_name) result = get_image_info(image) result['changed'] = True return result def delete_image(module, client, image): if not image: return {'changed': False} if image.running_vms > 0: module.fail_json(msg="Cannot delete image. There are " + str(image.running_vms) + " VMs using it.") if not module.check_mode: client.call('image.delete', image.id) wait_for_delete(module, image) return {'changed': True} def get_connection_info(module): url = module.params.get('api_url') username = module.params.get('api_username') password = module.params.get('api_password') if not url: url = os.environ.get('ONE_URL') if not username: username = os.environ.get('ONE_USERNAME') if not password: password = os.environ.get('ONE_PASSWORD') if not(url and username and password): module.fail_json(msg="One or more connection parameters (api_url, api_username, api_password) were not specified") from collections import namedtuple auth_params = namedtuple('auth', ('url', 'username', 'password')) return auth_params(url=url, username=username, password=password) def main(): fields = { "api_url": {"required": False, "type": "str"}, "api_username": {"required": False, "type": "str"}, "api_password": {"required": False, "type": "str", "no_log": True}, "id": {"required": False, "type": "int"}, "name": {"required": False, "type": "str"}, "state": { "default": "present", "choices": ['present', 'absent', 'cloned', 'renamed'], "type": "str" }, "enabled": {"required": False, "type": "bool"}, "new_name": {"required": False, "type": "str"}, } module = AnsibleModule(argument_spec=fields, mutually_exclusive=[['id', 'name']], supports_check_mode=True) if not HAS_OCA: module.fail_json(msg='This module requires python-oca to work!') auth = get_connection_info(module) params = module.params id = params.get('id') name = params.get('name') state = params.get('state') enabled = params.get('enabled') new_name = params.get('new_name') client = oca.Client(auth.username + ':' + auth.password, auth.url) result = {} if not id and state == 'renamed': module.fail_json(msg="Option 'id' is required when the state is 'renamed'") image = get_image_instance(module, client, id, name) if not image and state != 'absent': if id: module.fail_json(msg="There is no image with id=" + str(id)) else: module.fail_json(msg="There is no image with name=" + name) if state == 'absent': result = delete_image(module, client, image) else: result = get_image_info(image) changed = False result['changed'] = False if enabled is not None: result = enable_image(module, client, image, enabled) if state == "cloned": result = clone_image(module, client, image, new_name) elif state == "renamed": result = rename_image(module, client, image, new_name) changed = changed or result['changed'] result['changed'] = changed module.exit_json(**result) if __name__ == '__main__': main()
gpl-3.0
acsone/odoo
addons/website_sale_delivery/controllers/main.py
124
1551
# -*- coding: utf-8 -*- import openerp from openerp import http from openerp.http import request import openerp.addons.website_sale.controllers.main class website_sale(openerp.addons.website_sale.controllers.main.website_sale): @http.route(['/shop/payment'], type='http', auth="public", website=True) def payment(self, **post): cr, uid, context = request.cr, request.uid, request.context order = request.website.sale_get_order(context=context) carrier_id = post.get('carrier_id') if carrier_id: carrier_id = int(carrier_id) if order: request.registry['sale.order']._check_carrier_quotation(cr, uid, order, force_carrier_id=carrier_id, context=context) if carrier_id: return request.redirect("/shop/payment") res = super(website_sale, self).payment(**post) return res def order_lines_2_google_api(self, order_lines): """ Transforms a list of order lines into a dict for google analytics """ order_lines_not_delivery = [line for line in order_lines if not line.is_delivery] return super(website_sale, self).order_lines_2_google_api(order_lines_not_delivery) def order_2_return_dict(self, order): """ Returns the tracking_cart dict of the order for Google analytics """ ret = super(website_sale, self).order_2_return_dict(order) for line in order.order_line: if line.is_delivery: ret['transaction']['shipping'] = line.price_unit return ret
agpl-3.0
ostash/qt-creator-i18n-uk
share/qtcreator/dumper/dumper.py
2
64286
import sys import base64 import __builtin__ import os import tempfile # Fails on Windows. try: import curses.ascii def printableChar(ucs): if curses.ascii.isprint(ucs): return ucs return '?' except: def printableChar(ucs): if ucs >= 32 and ucs <= 126: return ucs return '?' # Fails on SimulatorQt. tempFileCounter = 0 try: # Test if 2.6 is used (Windows), trigger exception and default # to 2nd version. tempfile.NamedTemporaryFile(prefix="py_",delete=True) def createTempFile(): file = tempfile.NamedTemporaryFile(prefix="py_",delete=False) file.close() return file.name, file except: def createTempFile(): global tempFileCounter tempFileCounter += 1 fileName = "%s/py_tmp_%d_%d" \ % (tempfile.gettempdir(), os.getpid(), tempFileCounter) return fileName, None def removeTempFile(name, file): try: os.remove(name) except: pass try: import binascii except: pass verbosity = 0 verbosity = 1 # Some "Enums" # Encodings Unencoded8Bit, \ Base64Encoded8BitWithQuotes, \ Base64Encoded16BitWithQuotes, \ Base64Encoded32BitWithQuotes, \ Base64Encoded16Bit, \ Base64Encoded8Bit, \ Hex2EncodedLatin1, \ Hex4EncodedLittleEndian, \ Hex8EncodedLittleEndian, \ Hex2EncodedUtf8, \ Hex8EncodedBigEndian, \ Hex4EncodedBigEndian, \ Hex4EncodedLittleEndianWithoutQuotes, \ Hex2EncodedLocal8Bit, \ JulianDate, \ MillisecondsSinceMidnight, \ JulianDateAndMillisecondsSinceMidnight \ = range(17) # Display modes StopDisplay, \ DisplayImage1, \ DisplayString, \ DisplayImage, \ DisplayProcess \ = range(5) def hasInferiorThreadList(): #return False try: a = gdb.inferiors()[0].threads() return True except: return False def hasVTable(type): fields = type.fields() if len(fields) == 0: return False if fields[0].is_base_class: return hasVTable(fields[0].type) return str(fields[0].type) == "int (**)(void)" def dynamicTypeName(value): if hasVTable(value.type): #vtbl = str(parseAndEvaluate("{int(*)(int)}%s" % long(value.address))) try: # Fails on 7.1 due to the missing to_string. vtbl = gdb.execute("info symbol {int*}%s" % long(value.address), to_string = True) pos1 = vtbl.find("vtable ") if pos1 != -1: pos1 += 11 pos2 = vtbl.find(" +", pos1) if pos2 != -1: return vtbl[pos1 : pos2] except: pass return str(value.type) def upcast(value): try: return value.cast(value.dynamic_type) except: pass #try: # return value.cast(lookupType(dynamicTypeName(value))) #except: # pass return value def expensiveUpcast(value): try: return value.cast(value.dynamic_type) except: pass try: return value.cast(lookupType(dynamicTypeName(value))) except: pass return value typeCache = {} class TypeInfo: def __init__(self, type): self.size = type.sizeof self.reported = False typeInfoCache = {} def lookupType(typestring): type = typeCache.get(typestring) #warn("LOOKUP 1: %s -> %s" % (typestring, type)) if not type is None: return type if typestring == "void": type = gdb.lookup_type(typestring) typeCache[typestring] = type return type if typestring.find("(anon") != -1: # gdb doesn't like # '(anonymous namespace)::AddAnalysisMessageSuppressionComment' typeCache[typestring] = None return None try: type = gdb.parse_and_eval("{%s}&main" % typestring).type typeCache[typestring] = type return type except: pass #warn(" RESULT FOR 7.2: '%s': %s" % (typestring, type)) #typeCache[typestring] = type #return None # This part should only trigger for # gdb 7.1 for types with namespace separators. ts = typestring while True: #warn("TS: '%s'" % ts) if ts.startswith("class "): ts = ts[6:] elif ts.startswith("struct "): ts = ts[7:] elif ts.startswith("const "): ts = ts[6:] elif ts.startswith("volatile "): ts = ts[9:] elif ts.startswith("enum "): ts = ts[5:] elif ts.endswith(" const"): ts = ts[:-6] elif ts.endswith(" volatile"): ts = ts[:-9] elif ts.endswith("*const"): ts = ts[:-5] elif ts.endswith("*volatile"): ts = ts[:-8] else: break if ts.endswith('*'): type = lookupType(ts[0:-1]) if not type is None: type = type.pointer() typeCache[typestring] = type return type try: #warn("LOOKING UP '%s'" % ts) type = gdb.lookup_type(ts) except RuntimeError, error: #warn("LOOKING UP '%s': %s" % (ts, error)) if type is None: pos = typestring.find("<unnamed>") if pos != -1: # See http://sourceware.org/bugzilla/show_bug.cgi?id=13269 return lookupType(typestring.replace("<unnamed>", "(anonymous namespace)")) # See http://sourceware.org/bugzilla/show_bug.cgi?id=11912 exp = "(class '%s'*)0" % ts try: type = parseAndEvaluate(exp).type.target() except: # Can throw "RuntimeError: No type named class Foo." pass except: #warn("LOOKING UP '%s' FAILED" % ts) pass # This could still be None as gdb.lookup_type("char[3]") generates # "RuntimeError: No type named char[3]" return type def cleanAddress(addr): if addr is None: return "<no address>" # We cannot use str(addr) as it yields rubbish for char pointers # that might trigger Unicode encoding errors. #return addr.cast(lookupType("void").pointer()) # We do not use "hex(...)" as it (sometimes?) adds a "L" suffix. return "0x%x" % long(addr) def extractTemplateArgument(type, position): level = 0 skipSpace = False inner = "" type = str(type) for c in type[type.find('<') + 1 : -1]: if c == '<': inner += c level += 1 elif c == '>': level -= 1 inner += c elif c == ',': if level == 0: if position == 0: return inner position -= 1 inner = "" else: inner += c skipSpace = True else: if skipSpace and c == ' ': pass else: inner += c skipSpace = False return inner def templateArgument(type, position): try: # This fails on stock 7.2 with # "RuntimeError: No type named myns::QObject.\n" return type.template_argument(position) except: # That's something like "myns::QList<...>" return lookupType(extractTemplateArgument(type.strip_typedefs(), position)) # Workaround for gdb < 7.1 def numericTemplateArgument(type, position): try: return int(type.template_argument(position)) except RuntimeError, error: # ": No type named 30." msg = str(error) msg = msg[14:-1] # gdb at least until 7.4 produces for std::array<int, 4u> # for template_argument(1): RuntimeError: No type named 4u. if msg[-1] == 'u': msg = msg[0:-1] return int(msg) def showException(msg, exType, exValue, exTraceback): warn("**** CAUGHT EXCEPTION: %s ****" % msg) try: import traceback for line in traceback.format_exception(exType, exValue, exTraceback): warn("%s" % line) except: pass class OutputSafer: def __init__(self, d, pre = "", post = ""): self.d = d self.pre = pre self.post = post def __enter__(self): self.d.put(self.pre) self.savedOutput = self.d.output self.d.output = [] def __exit__(self, exType, exValue, exTraceBack): self.d.put(self.post) if self.d.passExceptions and not exType is None: showException("OUTPUTSAFER", exType, exValue, exTraceBack) self.d.output = self.savedOutput else: self.savedOutput.extend(self.d.output) self.d.output = self.savedOutput return False class NoAddress: def __init__(self, d): self.d = d def __enter__(self): self.savedPrintsAddress = self.d.currentPrintsAddress self.d.currentPrintsAddress = False def __exit__(self, exType, exValue, exTraceBack): self.d.currentPrintsAddress = self.savedPrintsAddress class SubItem: def __init__(self, d, component): self.d = d self.iname = "%s.%s" % (d.currentIName, component) self.name = component def __enter__(self): self.d.put('{') #if not self.name is None: if isinstance(self.name, str): self.d.put('name="%s",' % self.name) self.savedIName = self.d.currentIName self.savedValue = self.d.currentValue self.savedValuePriority = self.d.currentValuePriority self.savedValueEncoding = self.d.currentValueEncoding self.savedType = self.d.currentType self.savedTypePriority = self.d.currentTypePriority self.d.currentIName = self.iname self.d.currentValuePriority = -100 self.d.currentValueEncoding = None self.d.currentType = "" self.d.currentTypePriority = -100 def __exit__(self, exType, exValue, exTraceBack): #warn(" CURRENT VALUE: %s %s %s" % (self.d.currentValue, # self.d.currentValueEncoding, self.d.currentValuePriority)) if self.d.passExceptions and not exType is None: showException("SUBITEM", exType, exValue, exTraceBack) try: #warn("TYPE VALUE: %s" % self.d.currentValue) typeName = stripClassTag(self.d.currentType) #warn("TYPE: '%s' DEFAULT: '%s' % (typeName, self.d.currentChildType)) if len(typeName) > 0 and typeName != self.d.currentChildType: self.d.put('type="%s",' % typeName) # str(type.unqualified()) ? if not typeName in typeInfoCache \ and typeName != " ": # FIXME: Move to lookupType typeObj = lookupType(typeName) if not typeObj is None: typeInfoCache[typeName] = TypeInfo(typeObj) if self.d.currentValue is None: self.d.put('value="<not accessible>",numchild="0",') else: if not self.d.currentValueEncoding is None: self.d.put('valueencoded="%d",' % self.d.currentValueEncoding) self.d.put('value="%s",' % self.d.currentValue) except: pass self.d.put('},') self.d.currentIName = self.savedIName self.d.currentValue = self.savedValue self.d.currentValuePriority = self.savedValuePriority self.d.currentValueEncoding = self.savedValueEncoding self.d.currentType = self.savedType self.d.currentTypePriority = self.savedTypePriority return True class TopLevelItem(SubItem): def __init__(self, d, iname): self.d = d self.iname = iname self.name = None class UnnamedSubItem(SubItem): def __init__(self, d, component): self.d = d self.iname = "%s.%s" % (self.d.currentIName, component) self.name = None class Children: def __init__(self, d, numChild = 1, childType = None, childNumChild = None, maxNumChild = None, addrBase = None, addrStep = None): self.d = d self.numChild = numChild self.childNumChild = childNumChild self.maxNumChild = maxNumChild self.addrBase = addrBase self.addrStep = addrStep self.printsAddress = True if childType is None: self.childType = None else: self.childType = stripClassTag(str(childType)) self.d.put('childtype="%s",' % self.childType) if childNumChild is None: if isSimpleType(childType): self.d.put('childnumchild="0",') self.childNumChild = 0 elif childType.code == PointerCode: self.d.put('childnumchild="1",') self.childNumChild = 1 else: self.d.put('childnumchild="%s",' % childNumChild) self.childNumChild = childNumChild try: if not addrBase is None and not addrStep is None: self.d.put('addrbase="0x%x",' % long(addrBase)) self.d.put('addrstep="0x%x",' % long(addrStep)) self.printsAddress = False except: warn("ADDRBASE: %s" % addrBase) #warn("CHILDREN: %s %s %s" % (numChild, childType, childNumChild)) def __enter__(self): self.savedChildType = self.d.currentChildType self.savedChildNumChild = self.d.currentChildNumChild self.savedNumChild = self.d.currentNumChild self.savedMaxNumChild = self.d.currentMaxNumChild self.savedPrintsAddress = self.d.currentPrintsAddress self.d.currentChildType = self.childType self.d.currentChildNumChild = self.childNumChild self.d.currentNumChild = self.numChild self.d.currentMaxNumChild = self.maxNumChild self.d.currentPrintsAddress = self.printsAddress self.d.put("children=[") def __exit__(self, exType, exValue, exTraceBack): if self.d.passExceptions and not exType is None: showException("CHILDREN", exType, exValue, exTraceBack) if not self.d.currentMaxNumChild is None: if self.d.currentMaxNumChild < self.d.currentNumChild: self.d.put('{name="<incomplete>",value="",type="",numchild="0"},') self.d.currentChildType = self.savedChildType self.d.currentChildNumChild = self.savedChildNumChild self.d.currentNumChild = self.savedNumChild self.d.currentMaxNumChild = self.savedMaxNumChild self.d.currentPrintsAddress = self.savedPrintsAddress self.d.put('],') return True def value(expr): value = parseAndEvaluate(expr) try: return int(value) except: return str(value) def isSimpleType(typeobj): code = typeobj.code return code == BoolCode \ or code == CharCode \ or code == IntCode \ or code == FloatCode \ or code == EnumCode def warn(message): if True or verbosity > 0: print "XXX: %s\n" % message.encode("latin1") pass def check(exp): if not exp: raise RuntimeError("Check failed") def checkSimpleRef(ref): count = ref["_q_value"] check(count > 0) check(count < 1000000) def checkRef(ref): try: count = ref["atomic"]["_q_value"] # Qt 5. minimum = -1 except: count = ref["_q_value"] # Qt 4. minimum = 0 # Assume there aren't a million references to any object. check(count >= minimum) check(count < 1000000) #def couldBePointer(p, align): # type = lookupType("unsigned int") # ptr = gdb.Value(p).cast(type) # d = int(str(ptr)) # warn("CHECKING : %s %d " % (p, ((d & 3) == 0 and (d > 1000 or d == 0)))) # return (d & (align - 1)) and (d > 1000 or d == 0) def checkAccess(p, align = 1): return p.dereference() def checkContents(p, expected, align = 1): if int(p.dereference()) != expected: raise RuntimeError("Contents check failed") def checkPointer(p, align = 1): if not isNull(p): p.dereference() def isAccessible(p): try: long(p) return True except: return False def isNull(p): # The following can cause evaluation to abort with "UnicodeEncodeError" # for invalid char *, as their "contents" is being examined #s = str(p) #return s == "0x0" or s.startswith("0x0 ") #try: # # Can fail with: "RuntimeError: Cannot access memory at address 0x5" # return p.cast(lookupType("void").pointer()) == 0 #except: # return False try: # Can fail with: "RuntimeError: Cannot access memory at address 0x5" return long(p) == 0 except: return False movableTypes = set([ "QBrush", "QBitArray", "QByteArray", "QCustomTypeInfo", "QChar", "QDate", "QDateTime", "QFileInfo", "QFixed", "QFixedPoint", "QFixedSize", "QHashDummyValue", "QIcon", "QImage", "QLine", "QLineF", "QLatin1Char", "QLocale", "QMatrix", "QModelIndex", "QPoint", "QPointF", "QPen", "QPersistentModelIndex", "QResourceRoot", "QRect", "QRectF", "QRegExp", "QSize", "QSizeF", "QString", "QTime", "QTextBlock", "QUrl", "QVariant", "QXmlStreamAttribute", "QXmlStreamNamespaceDeclaration", "QXmlStreamNotationDeclaration", "QXmlStreamEntityDeclaration" ]) def stripClassTag(typeName): if typeName.startswith("class "): return typeName[6:] if typeName.startswith("struct "): return typeName[7:] if typeName.startswith("const "): return typeName[6:] if typeName.startswith("volatile "): return typeName[9:] return typeName def checkPointerRange(p, n): for i in xrange(n): checkPointer(p) ++p def call2(value, func, args): # args is a tuple. arg = "" for i in range(len(args)): if i: arg += ',' a = args[i] if (':' in a) and not ("'" in a): arg = "'%s'" % a else: arg += a #warn("CALL: %s -> %s(%s)" % (value, func, arg)) type = stripClassTag(str(value.type)) if type.find(":") >= 0: type = "'" + type + "'" # 'class' is needed, see http://sourceware.org/bugzilla/show_bug.cgi?id=11912 exp = "((class %s*)%s)->%s(%s)" % (type, value.address, func, arg) #warn("CALL: %s" % exp) result = None try: result = parseAndEvaluate(exp) except: pass #warn(" -> %s" % result) return result def call(value, func, *args): return call2(value, func, args) def makeValue(type, init): type = stripClassTag(type) if type.find(":") >= 0: type = "'" + type + "'" # Avoid malloc symbol clash with QVector. gdb.execute("set $d = (%s*)calloc(sizeof(%s), 1)" % (type, type)) gdb.execute("set *$d = {%s}" % init) value = parseAndEvaluate("$d").dereference() #warn(" TYPE: %s" % value.type) #warn(" ADDR: %s" % value.address) #warn(" VALUE: %s" % value) return value def makeStdString(init): # Works only for small allocators, but they are usually empty. gdb.execute("set $d=(std::string*)calloc(sizeof(std::string), 2)"); gdb.execute("call($d->basic_string(\"" + init + "\",*(std::allocator<char>*)(1+$d)))") value = parseAndEvaluate("$d").dereference() #warn(" TYPE: %s" % value.type) #warn(" ADDR: %s" % value.address) #warn(" VALUE: %s" % value) return value def makeExpression(value): type = stripClassTag(str(value.type)) if type.find(":") >= 0: type = "'" + type + "'" #warn(" TYPE: %s" % type) #exp = "(*(%s*)(&%s))" % (type, value.address) exp = "(*(%s*)(%s))" % (type, value.address) #warn(" EXP: %s" % exp) return exp qqNs = None def qtNamespace(): # FIXME: This only works when call from inside a Qt function frame. global qqNs if not qqNs is None: return qqNs try: str = catchCliOutput("ptype QString::Null")[0] # The result looks like: # "type = const struct myns::QString::Null {" # " <no data fields>" # "}" pos1 = str.find("struct") + 7 pos2 = str.find("QString::Null") if pos1 > -1 and pos2 > -1: qqNs = str[pos1:pos2] return qqNs return "" except: return "" def findFirstZero(p, maximum): for i in xrange(maximum): if p.dereference() == 0: return i p = p + 1 return maximum + 1 def extractCharArray(p, maxsize): p = p.cast(lookupType("unsigned char").pointer()) s = "" i = 0 while i < maxsize: c = int(p.dereference()) if c == 0: return s s += "%c" % c p += 1 i += 1 if p.dereference() != 0: s += "..." return s def extractByteArray(value): d_ptr = value['d'].dereference() data = d_ptr['data'] size = d_ptr['size'] alloc = d_ptr['alloc'] check(0 <= size and size <= alloc and alloc <= 100*1000*1000) checkRef(d_ptr["ref"]) if size > 0: checkAccess(data, 4) checkAccess(data + size) == 0 return extractCharArray(data, min(100, size)) def encodeCharArray(p, maxsize, limit = -1): t = lookupType("unsigned char").pointer() p = p.cast(t) if limit == -1: limit = findFirstZero(p, maxsize) s = "" try: # gdb.Inferior is new in gdb 7.2 inferior = gdb.inferiors()[0] s = binascii.hexlify(inferior.read_memory(p, limit)) except: for i in xrange(limit): s += "%02x" % int(p.dereference()) p += 1 if limit > maxsize: s += "2e2e2e" return s def encodeChar2Array(p, maxsize): t = lookupType("unsigned short").pointer() p = p.cast(t) limit = findFirstZero(p, maxsize) s = "" for i in xrange(limit): s += "%04x" % int(p.dereference()) p += 1 if i == maxsize: s += "2e002e002e00" return s def encodeChar4Array(p, maxsize): t = lookupType("unsigned int").pointer() p = p.cast(t) limit = findFirstZero(p, maxsize) s = "" for i in xrange(limit): s += "%08x" % int(p.dereference()) p += 1 if i > maxsize: s += "2e0000002e0000002e000000" return s def qByteArrayData(value): private = value['d'] checkRef(private['ref']) try: # Qt 5. Will fail on Qt 4 due to the missing 'offset' member. offset = private['offset'] charPointerType = lookupType('char *') data = private.cast(charPointerType) + private['offset'] return data, int(private['size']), int(private['alloc']) except: # Qt 4: return private['data'], int(private['size']), int(private['alloc']) def encodeByteArray(value): data, size, alloc = qByteArrayData(value) check(0 <= size and size <= alloc and alloc <= 100*1000*1000) if size > 0: checkAccess(data, 4) checkAccess(data + size) == 0 return encodeCharArray(data, 100, size) def qQStringData(value): private = value['d'] checkRef(private['ref']) try: # Qt 5. Will fail on Qt 4 due to the missing 'offset' member. offset = private['offset'] ushortPointerType = lookupType('ushort *') data = private.cast(ushortPointerType) + offset / 2 return data, int(private['size']), int(private['alloc']) except: # Qt 4. return private['data'], int(private['size']), int(private['alloc']) def encodeString(value): data, size, alloc = qQStringData(value) if alloc != 0: check(0 <= size and size <= alloc and alloc <= 100*1000*1000) if size > 0: checkAccess(data, 4) checkAccess(data + size) == 0 s = "" limit = min(size, 1000) try: # gdb.Inferior is new in gdb 7.2 inferior = gdb.inferiors()[0] s = binascii.hexlify(inferior.read_memory(data, 2 * limit)) except: p = data for i in xrange(limit): val = int(p.dereference()) s += "%02x" % (val % 256) s += "%02x" % (val / 256) p += 1 if limit < size: s += "2e002e002e00" return s def stripTypedefs(type): type = type.unqualified() while type.code == TypedefCode: type = type.strip_typedefs().unqualified() return type def extractFields(type): # Insufficient, see http://sourceware.org/bugzilla/show_bug.cgi?id=10953: #fields = type.fields() # Insufficient, see http://sourceware.org/bugzilla/show_bug.cgi?id=11777: #fields = defsype).fields() # This seems to work. #warn("TYPE 0: %s" % type) type = stripTypedefs(type) fields = type.fields() if len(fields): return fields #warn("TYPE 1: %s" % type) # This fails for arrays. See comment in lookupType. type0 = lookupType(str(type)) if not type0 is None: type = type0 if type.code == FunctionCode: return [] #warn("TYPE 2: %s" % type) fields = type.fields() #warn("FIELDS: %s" % fields) return fields ####################################################################### # # LocalItem # ####################################################################### # Contains iname, name, and value. class LocalItem: pass ####################################################################### # # SetupCommand # ####################################################################### # This is a cache mapping from 'type name' to 'display alternatives'. qqFormats = {} # This is a cache of all known dumpers. qqDumpers = {} # This is a cache of all dumpers that support writing. qqEditable = {} # This is a cache of the namespace of the currently used Qt version. # FIXME: This is not available on 'bbsetup' time, only at 'bb' time. # This is a cache of typenames->bool saying whether we are QObject # derived. qqQObjectCache = {} # This keeps canonical forms of the typenames, without array indices etc. qqStripForFormat = {} def stripForFormat(typeName): if typeName in qqStripForFormat: return qqStripForFormat[typeName] stripped = "" inArray = 0 for c in stripClassTag(typeName): if c == '<': break if c == ' ': continue if c == '[': inArray += 1 elif c == ']': inArray -= 1 if inArray and ord(c) >= 48 and ord(c) <= 57: continue stripped += c qqStripForFormat[typeName] = stripped return stripped def bbsetup(args): typeInfoCache = {} typeCache = {} module = sys.modules[__name__] for key, value in module.__dict__.items(): if key.startswith("qdump__"): name = key[7:] qqDumpers[name] = value qqFormats[name] = qqFormats.get(name, "") elif key.startswith("qform__"): name = key[7:] formats = "" try: formats = value() except: pass qqFormats[name] = formats elif key.startswith("qedit__"): name = key[7:] try: qqEditable[name] = value except: pass result = "dumpers=[" #qqNs = qtNamespace() # This is too early for key, value in qqFormats.items(): if qqEditable.has_key(key): result += '{type="%s",formats="%s",editable="true"},' % (key, value) else: result += '{type="%s",formats="%s"},' % (key, value) result += ']' #result += ',namespace="%s"' % qqNs result += ',hasInferiorThreadList="%s"' % int(hasInferiorThreadList()) return result registerCommand("bbsetup", bbsetup) ####################################################################### # # Edit Command # ####################################################################### def bbedit(args): (type, expr, value) = args.split(",") type = base64.b16decode(type, True) ns = qtNamespace() if type.startswith(ns): type = type[len(ns):] type = type.replace("::", "__") pos = type.find('<') if pos != -1: type = type[0:pos] expr = base64.b16decode(expr, True) value = base64.b16decode(value, True) #warn("EDIT: %s %s %s %s: " % (pos, type, expr, value)) if qqEditable.has_key(type): qqEditable[type](expr, value) else: gdb.execute("set (%s)=%s" % (expr, value)) registerCommand("bbedit", bbedit) ####################################################################### # # Frame Command # ####################################################################### def bb(args): output = 'data=[' + "".join(Dumper(args).output) + '],typeinfo=[' for typeName, typeInfo in typeInfoCache.iteritems(): if not typeInfo.reported: output += '{name="' + base64.b64encode(typeName) output += '",size="' + str(typeInfo.size) + '"},' typeInfo.reported = True output += ']'; return output registerCommand("bb", bb) def p1(args): import cProfile cProfile.run('bb("%s")' % args, "/tmp/bbprof") import pstats pstats.Stats('/tmp/bbprof').sort_stats('time').print_stats() return "" registerCommand("p1", p1) def p2(args): import timeit return timeit.repeat('bb("%s")' % args, 'from __main__ import bb', number=10) registerCommand("p2", p2) ####################################################################### # # The Dumper Class # ####################################################################### class Dumper: def __init__(self, args): self.output = [] self.currentIName = "" self.currentPrintsAddress = True self.currentChildType = "" self.currentChildNumChild = -1 self.currentMaxNumChild = -1 self.currentNumChild = -1 self.currentValue = None self.currentValuePriority = -100 self.currentValueEncoding = None self.currentType = None self.currentTypePriority = -100 self.typeformats = {} self.formats = {} self.expandedINames = "" options = [] varList = [] watchers = "" resultVarName = "" for arg in args.split(' '): pos = arg.find(":") + 1 if arg.startswith("options:"): options = arg[pos:].split(",") elif arg.startswith("vars:"): if len(arg[pos:]) > 0: varList = arg[pos:].split(",") elif arg.startswith("resultvarname:"): resultVarName = arg[pos:] elif arg.startswith("expanded:"): self.expandedINames = set(arg[pos:].split(",")) elif arg.startswith("typeformats:"): for f in arg[pos:].split(","): pos = f.find("=") if pos != -1: type = base64.b16decode(f[0:pos], True) self.typeformats[type] = int(f[pos+1:]) elif arg.startswith("formats:"): for f in arg[pos:].split(","): pos = f.find("=") if pos != -1: self.formats[f[0:pos]] = int(f[pos+1:]) elif arg.startswith("watchers:"): watchers = base64.b16decode(arg[pos:], True) self.useDynamicType = "dyntype" in options self.useFancy = "fancy" in options self.passExceptions = "pe" in options self.autoDerefPointers = "autoderef" in options self.partialUpdate = "partial" in options self.tooltipOnly = "tooltiponly" in options self.noLocals = "nolocals" in options self.ns = qtNamespace() #warn("NAMESPACE: '%s'" % self.ns) #warn("VARIABLES: %s" % varList) #warn("EXPANDED INAMES: %s" % self.expandedINames) #warn("WATCHERS: %s" % watchers) #warn("PARTIAL: %s" % self.partialUpdate) #warn("NO LOCALS: %s" % self.noLocals) module = sys.modules[__name__] # # Locals # locals = [] fullUpdateNeeded = True if self.partialUpdate and len(varList) == 1 and not self.tooltipOnly: #warn("PARTIAL: %s" % varList) parts = varList[0].split('.') #warn("PARTIAL PARTS: %s" % parts) name = parts[1] #warn("PARTIAL VAR: %s" % name) #fullUpdateNeeded = False try: frame = gdb.selected_frame() item = LocalItem() item.iname = "local." + name item.name = name item.value = frame.read_var(name) locals = [item] #warn("PARTIAL LOCALS: %s" % locals) fullUpdateNeeded = False except: pass varList = [] if fullUpdateNeeded and not self.tooltipOnly and not self.noLocals: locals = listOfLocals(varList) if "autotest" in options: for item in listOfLocals([]): self.expandedINames.add(item.iname) self.expandedINames.discard("") warn("EXPANDED: %s" % self.expandedINames) # Take care of the return value of the last function call. if len(resultVarName) > 0: try: item = LocalItem() item.name = resultVarName item.iname = "return." + resultVarName item.value = parseAndEvaluate(resultVarName) locals.append(item) except: # Don't bother. It's only supplementary information anyway. pass for item in locals: value = upcast(item.value) with OutputSafer(self, "", ""): self.anonNumber = -1 type = value.type.unqualified() typeName = str(type) # Special handling for char** argv. if type.code == PointerCode \ and item.iname == "local.argv" \ and typeName == "char **": n = 0 p = value # p is 0 for "optimized out" cases. Or contains rubbish. try: if not isNull(p): while not isNull(p.dereference()) and n <= 100: p += 1 n += 1 except: pass with TopLevelItem(self, item.iname): self.put('iname="local.argv",name="argv",') self.putItemCount(n, 100) self.putType(typeName) self.putNumChild(n) if self.currentIName in self.expandedINames: p = value with Children(self, n): for i in xrange(n): self.putSubItem(i, p.dereference()) p += 1 continue else: # A "normal" local variable or parameter. with TopLevelItem(self, item.iname): self.put('iname="%s",' % item.iname) self.put('name="%s",' % item.name) self.putItem(value) # # Watchers # with OutputSafer(self, ",", ""): if len(watchers) > 0: for watcher in watchers.split("##"): (exp, iname) = watcher.split("#") self.handleWatch(exp, iname) #print('data=[' + locals + sep + watchers + ']\n') def checkForQObjectBase(self, type): name = str(type) if name in qqQObjectCache: return qqQObjectCache[name] if name == self.ns + "QObject": qqQObjectCache[name] = True return True fields = type.strip_typedefs().fields() #fields = extractFields(type) if len(fields) == 0: qqQObjectCache[name] = False return False base = fields[0].type.strip_typedefs() if base.code != StructCode: return False # Prevent infinite recursion in Qt 3.3.8 if str(base) == name: return False result = self.checkForQObjectBase(base) qqQObjectCache[name] = result return result def handleWatch(self, exp, iname): exp = str(exp) escapedExp = base64.b64encode(exp); #warn("HANDLING WATCH %s, INAME: '%s'" % (exp, iname)) if exp.startswith("[") and exp.endswith("]"): #warn("EVAL: EXP: %s" % exp) with TopLevelItem(self, iname): self.put('iname="%s",' % iname) self.put('wname="%s",' % escapedExp) try: list = eval(exp) self.putValue("") self.putNoType() self.putNumChild(len(list)) # This is a list of expressions to evaluate with Children(self, len(list)): itemNumber = 0 for item in list: self.handleWatch(item, "%s.%d" % (iname, itemNumber)) itemNumber += 1 except RuntimeError, error: warn("EVAL: ERROR CAUGHT %s" % error) self.putValue("<syntax error>") self.putNoType() self.putNumChild(0) self.put("children=[],") return with TopLevelItem(self, iname): self.put('iname="%s",' % iname) self.put('wname="%s",' % escapedExp) if len(exp) == 0: # The <Edit> case self.putValue(" ") self.putNoType() self.putNumChild(0) else: try: value = parseAndEvaluate(exp) self.putItem(value) except RuntimeError: self.currentType = " " self.currentValue = "<no such value>" self.currentChildNumChild = -1 self.currentNumChild = 0 self.putNumChild(0) def put(self, value): self.output.append(value) def putField(self, name, value): self.put('%s="%s",' % (name, value)) def childRange(self): if self.currentMaxNumChild is None: return xrange(0, self.currentNumChild) return xrange(min(self.currentMaxNumChild, self.currentNumChild)) # Convenience function. def putItemCount(self, count, maximum = 1000000000): # This needs to override the default value, so don't use 'put' directly. if count > maximum: self.putValue('<>%s items>' % maximum) else: self.putValue('<%s items>' % count) def putType(self, type, priority = 0): # Higher priority values override lower ones. if priority >= self.currentTypePriority: self.currentType = str(type) self.currentTypePriority = priority def putNoType(self): # FIXME: replace with something that does not need special handling # in SubItem.__exit__(). self.putBetterType(" ") def putInaccessible(self): #self.putBetterType(" ") self.putNumChild(0) self.currentValue = None def putBetterType(self, type, priority = 0): self.currentType = str(type) self.currentTypePriority = self.currentTypePriority + 1 def putAddress(self, addr): if self.currentPrintsAddress: try: # addr can be "None", long(None) fails. self.put('addr="0x%x",' % long(addr)) except: pass def putNumChild(self, numchild): #warn("NUM CHILD: '%s' '%s'" % (numchild, self.currentChildNumChild)) if numchild != self.currentChildNumChild: self.put('numchild="%s",' % numchild) def putValue(self, value, encoding = None, priority = 0): # Higher priority values override lower ones. if priority >= self.currentValuePriority: self.currentValue = value self.currentValuePriority = priority self.currentValueEncoding = encoding def putPointerValue(self, value): # Use a lower priority if value is None: self.putValue(" ", None, -1) else: self.putValue("0x%x" % value.dereference().cast( lookupType("unsigned long")), None, -1) def putStringValue(self, value, priority = 0): if not value is None: str = encodeString(value) self.putValue(str, Hex4EncodedLittleEndian, priority) def putDisplay(self, format, value = None, cmd = None): self.put('editformat="%s",' % format) if cmd is None: if not value is None: self.put('editvalue="%s",' % value) else: self.put('editvalue="%s|%s",' % (cmd, value)) def putByteArrayValue(self, value): str = encodeByteArray(value) self.putValue(str, Hex2EncodedLatin1) def putName(self, name): self.put('name="%s",' % name) def putMapName(self, value): if str(value.type) == qqNs + "QString": self.put('key="%s",' % encodeString(value)) self.put('keyencoded="%s",' % Hex4EncodedLittleEndian) elif str(value.type) == qqNs + "QByteArray": self.put('key="%s",' % encodeByteArray(value)) self.put('keyencoded="%s",' % Hex2EncodedLatin1) else: self.put('name="%s",' % value) def isExpanded(self): #warn("IS EXPANDED: %s in %s: %s" % (self.currentIName, # self.expandedINames, self.currentIName in self.expandedINames)) return self.currentIName in self.expandedINames def isExpandedSubItem(self, component): iname = "%s.%s" % (self.currentIName, component) #warn("IS EXPANDED: %s in %s" % (iname, self.expandedINames)) return iname in self.expandedINames def stripNamespaceFromType(self, typeName): type = stripClassTag(typeName) if len(self.ns) > 0 and type.startswith(self.ns): type = type[len(self.ns):] pos = type.find("<") # FIXME: make it recognize foo<A>::bar<B>::iterator? while pos != -1: pos1 = type.rfind(">", pos) type = type[0:pos] + type[pos1+1:] pos = type.find("<") return type def isMovableType(self, type): if type.code == PointerCode: return True if isSimpleType(type): return True return self.stripNamespaceFromType(str(type)) in movableTypes def putIntItem(self, name, value): with SubItem(self, name): self.putValue(value) self.putAddress(value.address) self.putType("int") self.putNumChild(0) def putBoolItem(self, name, value): with SubItem(self, name): self.putValue(value) self.putType("bool") self.putNumChild(0) def currentItemFormat(self): format = self.formats.get(self.currentIName) if format is None: format = self.typeformats.get(stripForFormat(str(self.currentType))) return format def putSubItem(self, component, value, tryDynamic=True): with SubItem(self, component): self.putItem(value, tryDynamic) def putNamedSubItem(self, component, value, name): with SubItem(self, component): self.putName(name) self.putItem(value) def putCallItem(self, name, value, func, *args): result = call2(value, func, args) with SubItem(self, name): self.putItem(result) def putItem(self, value, tryDynamic=True): if value is None: # Happens for non-available watchers in gdb versions that # need to use gdb.execute instead of gdb.parse_and_eval self.putValue("<not available>") self.putType("<unknown>") self.putNumChild(0) return type = value.type.unqualified() typeName = str(type) # FIXME: Gui shows references stripped? #warn(" ") #warn("REAL INAME: %s " % self.currentIName) #warn("REAL TYPE: %s " % value.type) #warn("REAL CODE: %s " % value.type.code) #warn("REAL VALUE: %s " % value) if type.code == ReferenceCode: try: # FIXME: This throws "RuntimeError: Attempt to dereference a # generic pointer." with MinGW's gcc 4.5 when it "identifies" # a "QWidget &" as "void &" and with optimized out code. self.putItem(value.cast(type.target().unqualified())) return except RuntimeError: self.putValue("<optimized out reference>") self.putType(typeName) self.putNumChild(0) return if type.code == IntCode or type.code == CharCode: self.putAddress(value.address) self.putType(typeName) if value.is_optimized_out: self.putValue("<optimized out>") else: self.putValue(int(value)) self.putNumChild(0) return if type.code == FloatCode or type.code == BoolCode: self.putAddress(value.address) self.putType(typeName) if value.is_optimized_out: self.putValue("<optimized out>") else: self.putValue(value) self.putNumChild(0) return if type.code == EnumCode: self.putAddress(value.address) self.putType(typeName) if value.is_optimized_out: self.putValue("<optimized out>") else: self.putValue("%s (%d)" % (value, value)) self.putNumChild(0) return if type.code == TypedefCode: type = stripTypedefs(type) # The cast can destroy the address? self.putAddress(value.address) # Workaround for http://sourceware.org/bugzilla/show_bug.cgi?id=13380 if type.code == ArrayCode: value = parseAndEvaluate("{%s}%s" % (type, value.address)) else: try: value = value.cast(type) self.putItem(value) except: self.putValue("<optimized out typedef>") self.putType(typeName) self.putNumChild(0) return self.putItem(value) self.putBetterType(typeName) return if type.code == ArrayCode: targettype = type.target() self.putAddress(value.address) self.putType(typeName) self.putNumChild(1) format = self.currentItemFormat() if format == 0: # Explicitly requested Latin1 formatting. self.putValue(encodeCharArray(value, 100), Hex2EncodedLatin1) elif format == 1: # Explicitly requested UTF-8 formatting. self.putValue(encodeCharArray(value, 100), Hex2EncodedUtf8) elif format == 2: # Explicitly requested Local 8-bit formatting. self.putValue(encodeCharArray(value, 100), Hex2EncodedLocal8Bit) else: self.putValue("@0x%x" % long(value.cast(targettype.pointer()))) if self.currentIName in self.expandedINames: i = 0 with Children(self, childType=targettype, addrBase=value.cast(targettype.pointer()), addrStep=targettype.sizeof): self.putFields(value) i = i + 1 return if type.code == PointerCode: #warn("POINTER: %s" % value) # This could still be stored in a register and # potentially dereferencable. if value.is_optimized_out: self.putValue("<optimized out>") try: value.dereference() except: # Failure to dereference a pointer should at least # show the value of a pointer. self.putValue(cleanAddress(value)) self.putType(typeName) self.putNumChild(0) return if isNull(value): #warn("NULL POINTER") self.putAddress(value.address) self.putType(typeName) self.putValue("0x0") self.putNumChild(0) return innerType = type.target() innerTypeName = str(innerType.unqualified()) format = self.formats.get(self.currentIName) if format is None: format = self.typeformats.get(stripForFormat(str(type))) if innerType.code == VoidCode: #warn("VOID POINTER: %s" % format) self.putType(typeName) self.putValue(str(value)) self.putNumChild(0) self.putAddress(value.address) return if format == None and innerTypeName == "char": # Use Latin1 as default for char *. self.putAddress(value.address) self.putType(typeName) self.putValue(encodeCharArray(value, 100), Hex2EncodedLatin1) self.putNumChild(0) return if format == 0: # Explicitly requested bald pointer. self.putAddress(value.address) self.putType(typeName) self.putPointerValue(value.address) self.putNumChild(1) if self.currentIName in self.expandedINames: with Children(self): with SubItem(self, '*'): self.putItem(value.dereference()) #self.putAddress(value) return if format == 1: # Explicitly requested Latin1 formatting. self.putAddress(value.address) self.putType(typeName) self.putValue(encodeCharArray(value, 100), Hex2EncodedLatin1) self.putNumChild(0) return if format == 2: # Explicitly requested UTF-8 formatting. self.putAddress(value.address) self.putType(typeName) self.putValue(encodeCharArray(value, 100), Hex2EncodedUtf8) self.putNumChild(0) return if format == 3: # Explicitly requested local 8 bit formatting. self.putAddress(value.address) self.putType(typeName) self.putValue(encodeCharArray(value, 100), Hex2EncodedLocal8Bit) self.putNumChild(0) return if format == 4: # Explicitly requested UTF-16 formatting. self.putAddress(value.address) self.putType(typeName) self.putValue(encodeChar2Array(value, 100), Hex4EncodedBigEndian) self.putNumChild(0) return if format == 5: # Explicitly requested UCS-4 formatting. self.putAddress(value.address) self.putType(typeName) self.putValue(encodeChar4Array(value, 100), Hex8EncodedBigEndian) self.putNumChild(0) return if (typeName.replace("(anonymous namespace)", "").find("(") != -1): # A function pointer with format None. self.putValue(str(value)) self.putAddress(value.address) self.putType(typeName) self.putNumChild(0) return #warn("AUTODEREF: %s" % self.autoDerefPointers) #warn("INAME: %s" % self.currentIName) if self.autoDerefPointers or self.currentIName.endswith('.this'): ## Generic pointer type with format None #warn("GENERIC AUTODEREF POINTER: %s AT %s TO %s" # % (type, value.address, innerTypeName)) # Never dereference char types. if innerTypeName != "char" \ and innerTypeName != "signed char" \ and innerTypeName != "unsigned char" \ and innerTypeName != "wchar_t": self.putType(innerType) savedCurrentChildType = self.currentChildType self.currentChildType = stripClassTag(innerTypeName) self.putItem(value.dereference()) self.currentChildType = savedCurrentChildType self.putPointerValue(value.address) self.put('origaddr="%s",' % cleanAddress(value.address)) return # Fall back to plain pointer printing. #warn("GENERIC PLAIN POINTER: %s" % value.type) self.putType(typeName) self.putAddress(value.address) self.putNumChild(1) if self.currentIName in self.expandedINames: with Children(self): with SubItem(self, "*"): self.putItem(value.dereference()) self.putPointerValue(value.address) return if typeName.startswith("<anon"): # Anonymous union. We need a dummy name to distinguish # multiple anonymous unions in the struct. self.putType(type) self.putValue("{...}") self.anonNumber += 1 with Children(self, 1): self.listAnonymous(value, "#%d" % self.anonNumber, type) return if type.code != StructCode and type.code != UnionCode: warn("WRONG ASSUMPTION HERE: %s " % type.code) check(False) if self.useDynamicType and tryDynamic: self.putItem(expensiveUpcast(value), False) return format = self.formats.get(self.currentIName) if format is None: format = self.typeformats.get(stripForFormat(typeName)) if self.useFancy and (format is None or format >= 1): self.putAddress(value.address) self.putType(typeName) if typeName in qqDumpers: qqDumpers[typeName](self, value) return nsStrippedType = self.stripNamespaceFromType(typeName)\ .replace("::", "__") # The following block is only needed for D. if nsStrippedType.startswith("_A"): # DMD v2.058 encodes string[] as _Array_uns long long. # With spaces. if nsStrippedType.startswith("_Array_"): qdump_Array(self, value) return if nsStrippedType.startswith("_AArray_"): qdump_AArray(self, value) return #warn(" STRIPPED: %s" % nsStrippedType) #warn(" DUMPERS: %s" % (nsStrippedType in qqDumpers)) if nsStrippedType in qqDumpers: if tryDynamic: qqDumpers[nsStrippedType](self, expensiveUpcast(value)) else: qqDumpers[nsStrippedType](self, value) return # Is this derived from QObject? if self.checkForQObjectBase(type): qdump__QObject(self, value) return #warn("GENERIC STRUCT: %s" % type) #warn("INAME: %s " % self.currentIName) #warn("INAMES: %s " % self.expandedINames) #warn("EXPANDED: %s " % (self.currentIName in self.expandedINames)) fields = extractFields(type) #fields = type.fields() self.putType(typeName) self.putAddress(value.address) self.putValue("{...}") if False: numfields = 0 for field in fields: bitpos = getattr(field, "bitpos", None) if not bitpos is None: ++numfields else: numfields = len(fields) self.putNumChild(numfields) if self.currentIName in self.expandedINames: innerType = None if len(fields) == 1 and fields[0].name is None: innerType = type.target() with Children(self, 1, childType=innerType): self.putFields(value) def putPlainChildren(self, value): self.putValue(" ", None, -99) self.putNumChild(1) self.putAddress(value.address) if self.currentIName in self.expandedINames: with Children(self): self.putFields(value) def putFields(self, value, dumpBase = True): type = stripTypedefs(value.type) # Insufficient, see http://sourceware.org/bugzilla/show_bug.cgi?id=10953: #fields = type.fields() fields = extractFields(type) #warn("TYPE: %s" % type) #warn("FIELDS: %s" % fields) baseNumber = 0 for field in fields: #warn("FIELD: %s" % field) #warn(" BITSIZE: %s" % field.bitsize) #warn(" ARTIFICIAL: %s" % field.artificial) bitpos = getattr(field, "bitpos", None) if bitpos is None: # FIXME: Is check correct? continue # A static class member(?). if field.name is None: innerType = type.target() p = value.cast(innerType.pointer()) for i in xrange(type.sizeof / innerType.sizeof): with SubItem(self, i): self.putItem(p.dereference()) p = p + 1 continue # Ignore vtable pointers for virtual inheritance. if field.name.startswith("_vptr."): with SubItem(self, "[vptr]"): # int (**)(void) n = 100 self.putType(" ") self.putValue(value[field.name]) self.putNumChild(n) if self.isExpanded(): with Children(self): p = value[field.name] for i in xrange(n): if long(p.dereference()) != 0: with SubItem(self, i): self.putItem(p.dereference()) self.putType(" ") p = p + 1 continue #warn("FIELD NAME: %s" % field.name) #warn("FIELD TYPE: %s" % field.type) if field.is_base_class: # Field is base type. We cannot use field.name as part # of the iname as it might contain spaces and other # strange characters. if dumpBase: baseNumber += 1 with UnnamedSubItem(self, "@%d" % baseNumber): self.put('iname="%s",' % self.currentIName) self.put('name="[%s]",' % field.name) self.putItem(value.cast(field.type), False) elif len(field.name) == 0: # Anonymous union. We need a dummy name to distinguish # multiple anonymous unions in the struct. self.anonNumber += 1 self.listAnonymous(value, "#%d" % self.anonNumber, field.type) else: # Named field. with SubItem(self, field.name): #bitsize = getattr(field, "bitsize", None) #if not bitsize is None: # self.put("bitsize=\"%s\",bitpos=\"%s\"," # % (bitsize, bitpos)) self.putItem(upcast(value[field.name])) def listAnonymous(self, value, name, type): for field in type.fields(): #warn("FIELD NAME: %s" % field.name) if len(field.name) > 0: with SubItem(self, field.name): #self.putAddress(value.address) self.putItem(value[field.name]) else: # Further nested. self.anonNumber += 1 name = "#%d" % self.anonNumber #iname = "%s.%s" % (selitem.iname, name) #child = SameItem(item.value, iname) with SubItem(self, name): self.put('name="%s",' % name) self.putValue(" ") fieldTypeName = str(field.type) if fieldTypeName.endswith("<anonymous union>"): self.putType("<anonymous union>") elif fieldTypeName.endswith("<anonymous struct>"): self.putType("<anonymous struct>") else: self.putType(fieldTypeName) with Children(self, 1): self.listAnonymous(value, name, field.type) ####################################################################### # # ThreadNames Command # ####################################################################### def threadnames(arg): ns = qtNamespace() out = '[' oldthread = gdb.selected_thread() try: for thread in gdb.inferiors()[0].threads(): maximalStackDepth = int(arg) thread.switch() e = gdb.selected_frame () while True: maximalStackDepth -= 1 if maximalStackDepth < 0: break e = e.older() if e == None or e.name() == None: break if e.name() == ns + "QThreadPrivate::start": try: thrptr = e.read_var("thr").dereference() obtype = lookupType(ns + "QObjectPrivate").pointer() d_ptr = thrptr["d_ptr"]["d"].cast(obtype).dereference() objectName = d_ptr["objectName"] out += '{valueencoded="'; out += str(Hex4EncodedLittleEndianWithoutQuotes)+'",id="' out += str(thread.num) + '",value="' out += encodeString(objectName) out += '"},' except: pass except: pass oldthread.switch() return out + ']' registerCommand("threadnames", threadnames) ####################################################################### # # Mixed C++/Qml debugging # ####################################################################### def qmlb(args): # executeCommand(command, to_string=True).split("\n") warm("RUNNING: break -f QScript::FunctionWrapper::proxyCall") output = catchCliOutput("rbreak -f QScript::FunctionWrapper::proxyCall") warn("OUTPUT: %s " % output) bp = output[0] warn("BP: %s " % bp) # BP: ['Breakpoint 3 at 0xf166e7: file .../qscriptfunction.cpp, line 75.\\n'] \n" pos = bp.find(' ') + 1 warn("POS: %s " % pos) nr = bp[bp.find(' ') + 1 : bp.find(' at ')] warn("NR: %s " % nr) return bp registerCommand("qmlb", qmlb)
lgpl-2.1
arpith/zulip
analytics/management/commands/active_user_stats.py
22
2987
from __future__ import absolute_import from __future__ import print_function from django.core.management.base import BaseCommand from typing import Any from zerver.models import UserPresence, UserActivity from zerver.lib.utils import statsd, statsd_key from datetime import datetime, timedelta from collections import defaultdict class Command(BaseCommand): help = """Sends active user statistics to statsd. Run as a cron job that runs every 10 minutes.""" def handle(self, *args, **options): # type: (*Any, **Any) -> None # Get list of all active users in the last 1 week cutoff = datetime.now() - timedelta(minutes=30, hours=168) users = UserPresence.objects.select_related().filter(timestamp__gt=cutoff) # Calculate 10min, 2hrs, 12hrs, 1day, 2 business days (TODO business days), 1 week bucket of stats hour_buckets = [0.16, 2, 12, 24, 48, 168] user_info = defaultdict(dict) # type: Dict[str, Dict[float, List[str]]] for last_presence in users: if last_presence.status == UserPresence.IDLE: known_active = last_presence.timestamp - timedelta(minutes=30) else: known_active = last_presence.timestamp for bucket in hour_buckets: if bucket not in user_info[last_presence.user_profile.realm.domain]: user_info[last_presence.user_profile.realm.domain][bucket] = [] if datetime.now(known_active.tzinfo) - known_active < timedelta(hours=bucket): user_info[last_presence.user_profile.realm.domain][bucket].append(last_presence.user_profile.email) for realm, buckets in user_info.items(): print("Realm %s" % (realm,)) for hr, users in sorted(buckets.items()): print("\tUsers for %s: %s" % (hr, len(users))) statsd.gauge("users.active.%s.%shr" % (statsd_key(realm, True), statsd_key(hr, True)), len(users)) # Also do stats for how many users have been reading the app. users_reading = UserActivity.objects.select_related().filter(query="/json/messages/flags") user_info = defaultdict(dict) for activity in users_reading: for bucket in hour_buckets: if bucket not in user_info[activity.user_profile.realm.domain]: user_info[activity.user_profile.realm.domain][bucket] = [] if datetime.now(activity.last_visit.tzinfo) - activity.last_visit < timedelta(hours=bucket): user_info[activity.user_profile.realm.domain][bucket].append(activity.user_profile.email) for realm, buckets in user_info.items(): print("Realm %s" % (realm,)) for hr, users in sorted(buckets.items()): print("\tUsers reading for %s: %s" % (hr, len(users))) statsd.gauge("users.reading.%s.%shr" % (statsd_key(realm, True), statsd_key(hr, True)), len(users))
apache-2.0
dimagi/tablib
tablib/packages/yaml/__init__.py
112
9273
from error import * from tokens import * from events import * from nodes import * from loader import * from dumper import * __version__ = '3.09' try: from cyaml import * __with_libyaml__ = True except ImportError: __with_libyaml__ = False def scan(stream, Loader=Loader): """ Scan a YAML stream and produce scanning tokens. """ loader = Loader(stream) while loader.check_token(): yield loader.get_token() def parse(stream, Loader=Loader): """ Parse a YAML stream and produce parsing events. """ loader = Loader(stream) while loader.check_event(): yield loader.get_event() def compose(stream, Loader=Loader): """ Parse the first YAML document in a stream and produce the corresponding representation tree. """ loader = Loader(stream) return loader.get_single_node() def compose_all(stream, Loader=Loader): """ Parse all YAML documents in a stream and produce corresponding representation trees. """ loader = Loader(stream) while loader.check_node(): yield loader.get_node() def load(stream, Loader=Loader): """ Parse the first YAML document in a stream and produce the corresponding Python object. """ loader = Loader(stream) return loader.get_single_data() def load_all(stream, Loader=Loader): """ Parse all YAML documents in a stream and produce corresponding Python objects. """ loader = Loader(stream) while loader.check_data(): yield loader.get_data() def safe_load(stream): """ Parse the first YAML document in a stream and produce the corresponding Python object. Resolve only basic YAML tags. """ return load(stream, SafeLoader) def safe_load_all(stream): """ Parse all YAML documents in a stream and produce corresponding Python objects. Resolve only basic YAML tags. """ return load_all(stream, SafeLoader) def emit(events, stream=None, Dumper=Dumper, canonical=None, indent=None, width=None, allow_unicode=None, line_break=None): """ Emit YAML parsing events into a stream. If stream is None, return the produced string instead. """ getvalue = None if stream is None: from StringIO import StringIO stream = StringIO() getvalue = stream.getvalue dumper = Dumper(stream, canonical=canonical, indent=indent, width=width, allow_unicode=allow_unicode, line_break=line_break) for event in events: dumper.emit(event) if getvalue: return getvalue() def serialize_all(nodes, stream=None, Dumper=Dumper, canonical=None, indent=None, width=None, allow_unicode=None, line_break=None, encoding='utf-8', explicit_start=None, explicit_end=None, version=None, tags=None): """ Serialize a sequence of representation trees into a YAML stream. If stream is None, return the produced string instead. """ getvalue = None if stream is None: if encoding is None: from StringIO import StringIO else: from cStringIO import StringIO stream = StringIO() getvalue = stream.getvalue dumper = Dumper(stream, canonical=canonical, indent=indent, width=width, allow_unicode=allow_unicode, line_break=line_break, encoding=encoding, version=version, tags=tags, explicit_start=explicit_start, explicit_end=explicit_end) dumper.open() for node in nodes: dumper.serialize(node) dumper.close() if getvalue: return getvalue() def serialize(node, stream=None, Dumper=Dumper, **kwds): """ Serialize a representation tree into a YAML stream. If stream is None, return the produced string instead. """ return serialize_all([node], stream, Dumper=Dumper, **kwds) def dump_all(documents, stream=None, Dumper=Dumper, default_style=None, default_flow_style=None, canonical=None, indent=None, width=None, allow_unicode=None, line_break=None, encoding='utf-8', explicit_start=None, explicit_end=None, version=None, tags=None): """ Serialize a sequence of Python objects into a YAML stream. If stream is None, return the produced string instead. """ getvalue = None if stream is None: if encoding is None: from StringIO import StringIO else: from cStringIO import StringIO stream = StringIO() getvalue = stream.getvalue dumper = Dumper(stream, default_style=default_style, default_flow_style=default_flow_style, canonical=canonical, indent=indent, width=width, allow_unicode=allow_unicode, line_break=line_break, encoding=encoding, version=version, tags=tags, explicit_start=explicit_start, explicit_end=explicit_end) dumper.open() for data in documents: dumper.represent(data) dumper.close() if getvalue: return getvalue() def dump(data, stream=None, Dumper=Dumper, **kwds): """ Serialize a Python object into a YAML stream. If stream is None, return the produced string instead. """ return dump_all([data], stream, Dumper=Dumper, **kwds) def safe_dump_all(documents, stream=None, **kwds): """ Serialize a sequence of Python objects into a YAML stream. Produce only basic YAML tags. If stream is None, return the produced string instead. """ return dump_all(documents, stream, Dumper=SafeDumper, **kwds) def safe_dump(data, stream=None, **kwds): """ Serialize a Python object into a YAML stream. Produce only basic YAML tags. If stream is None, return the produced string instead. """ return dump_all([data], stream, Dumper=SafeDumper, **kwds) def add_implicit_resolver(tag, regexp, first=None, Loader=Loader, Dumper=Dumper): """ Add an implicit scalar detector. If an implicit scalar value matches the given regexp, the corresponding tag is assigned to the scalar. first is a sequence of possible initial characters or None. """ Loader.add_implicit_resolver(tag, regexp, first) Dumper.add_implicit_resolver(tag, regexp, first) def add_path_resolver(tag, path, kind=None, Loader=Loader, Dumper=Dumper): """ Add a path based resolver for the given tag. A path is a list of keys that forms a path to a node in the representation tree. Keys can be string values, integers, or None. """ Loader.add_path_resolver(tag, path, kind) Dumper.add_path_resolver(tag, path, kind) def add_constructor(tag, constructor, Loader=Loader): """ Add a constructor for the given tag. Constructor is a function that accepts a Loader instance and a node object and produces the corresponding Python object. """ Loader.add_constructor(tag, constructor) def add_multi_constructor(tag_prefix, multi_constructor, Loader=Loader): """ Add a multi-constructor for the given tag prefix. Multi-constructor is called for a node if its tag starts with tag_prefix. Multi-constructor accepts a Loader instance, a tag suffix, and a node object and produces the corresponding Python object. """ Loader.add_multi_constructor(tag_prefix, multi_constructor) def add_representer(data_type, representer, Dumper=Dumper): """ Add a representer for the given type. Representer is a function accepting a Dumper instance and an instance of the given data type and producing the corresponding representation node. """ Dumper.add_representer(data_type, representer) def add_multi_representer(data_type, multi_representer, Dumper=Dumper): """ Add a representer for the given type. Multi-representer is a function accepting a Dumper instance and an instance of the given data type or subtype and producing the corresponding representation node. """ Dumper.add_multi_representer(data_type, multi_representer) class YAMLObjectMetaclass(type): """ The metaclass for YAMLObject. """ def __init__(cls, name, bases, kwds): super(YAMLObjectMetaclass, cls).__init__(name, bases, kwds) if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None: cls.yaml_loader.add_constructor(cls.yaml_tag, cls.from_yaml) cls.yaml_dumper.add_representer(cls, cls.to_yaml) class YAMLObject(object): """ An object that can dump itself to a YAML stream and load itself from a YAML stream. """ __metaclass__ = YAMLObjectMetaclass __slots__ = () # no direct instantiation, so allow immutable subclasses yaml_loader = Loader yaml_dumper = Dumper yaml_tag = None yaml_flow_style = None def from_yaml(cls, loader, node): """ Convert a representation node to a Python object. """ return loader.construct_yaml_object(node, cls) from_yaml = classmethod(from_yaml) def to_yaml(cls, dumper, data): """ Convert a Python object to a representation node. """ return dumper.represent_yaml_object(cls.yaml_tag, data, cls, flow_style=cls.yaml_flow_style) to_yaml = classmethod(to_yaml)
mit
baugp/ds4drv
ds4drv/backends/hidraw.py
3
5327
import fcntl import itertools import os from io import FileIO from time import sleep from evdev import InputDevice from pyudev import Context, Monitor from ..backend import Backend from ..exceptions import DeviceError from ..device import DS4Device from ..utils import zero_copy_slice IOC_RW = 3221243904 HIDIOCSFEATURE = lambda size: IOC_RW | (0x06 << 0) | (size << 16) HIDIOCGFEATURE = lambda size: IOC_RW | (0x07 << 0) | (size << 16) class HidrawDS4Device(DS4Device): def __init__(self, name, addr, type, hidraw_device, event_device): try: self.report_fd = os.open(hidraw_device, os.O_RDWR | os.O_NONBLOCK) self.fd = FileIO(self.report_fd, "rb+", closefd=False) self.input_device = InputDevice(event_device) self.input_device.grab() except (OSError, IOError) as err: raise DeviceError(err) self.buf = bytearray(self.report_size) super(HidrawDS4Device, self).__init__(name, addr, type) def read_report(self): try: ret = self.fd.readinto(self.buf) except IOError: return # Disconnection if ret == 0: return # Invalid report size or id, just ignore it if ret < self.report_size or self.buf[0] != self.valid_report_id: return False if self.type == "bluetooth": # Cut off bluetooth data buf = zero_copy_slice(self.buf, 2) else: buf = self.buf return self.parse_report(buf) def read_feature_report(self, report_id, size): op = HIDIOCGFEATURE(size + 1) buf = bytearray(size + 1) buf[0] = report_id return fcntl.ioctl(self.fd, op, bytes(buf)) def write_report(self, report_id, data): if self.type == "bluetooth": # TODO: Add a check for a kernel that supports writing # output reports when such a kernel has been released. return hid = bytearray((report_id,)) self.fd.write(hid + data) def close(self): try: self.fd.close() self.input_device.ungrab() except IOError: pass class HidrawBluetoothDS4Device(HidrawDS4Device): __type__ = "bluetooth" report_size = 78 valid_report_id = 0x11 def set_operational(self): self.read_feature_report(0x02, 37) class HidrawUSBDS4Device(HidrawDS4Device): __type__ = "usb" report_size = 64 valid_report_id = 0x01 def set_operational(self): # Get the bluetooth MAC addr = self.read_feature_report(0x81, 6)[1:] addr = ["{0:02x}".format(c) for c in bytearray(addr)] addr = ":".join(reversed(addr)).upper() self.device_name = "{0} {1}".format(addr, self.device_name) self.device_addr = addr HID_DEVICES = { "Sony Computer Entertainment Wireless Controller": HidrawUSBDS4Device, "Wireless Controller": HidrawBluetoothDS4Device, } class HidrawBackend(Backend): __name__ = "hidraw" def setup(self): pass def _get_future_devices(self, context): """Return a generator yielding new devices.""" monitor = Monitor.from_netlink(context) monitor.filter_by("hidraw") monitor.start() self._scanning_log_message() for device in iter(monitor.poll, None): if device.action == "add": # Sometimes udev rules has not been applied at this point, # causing permission denied error if we are running in user # mode. With this sleep this will hopefully not happen. sleep(1) yield device self._scanning_log_message() def _scanning_log_message(self): self.logger.info("Scanning for devices") @property def devices(self): """Wait for new DS4 devices to appear.""" context = Context() existing_devices = context.list_devices(subsystem="hidraw") future_devices = self._get_future_devices(context) for hidraw_device in itertools.chain(existing_devices, future_devices): hid_device = hidraw_device.parent if hid_device.subsystem != "hid": continue cls = HID_DEVICES.get(hid_device.get("HID_NAME")) if not cls: continue for child in hid_device.parent.children: event_device = child.get("DEVNAME", "") if event_device.startswith("/dev/input/event"): break else: continue try: device_addr = hid_device.get("HID_UNIQ", "").upper() if device_addr: device_name = "{0} {1}".format(device_addr, hidraw_device.sys_name) else: device_name = hidraw_device.sys_name yield cls(name=device_name, addr=device_addr, type=cls.__type__, hidraw_device=hidraw_device.device_node, event_device=event_device) except DeviceError as err: self.logger.error("Unable to open DS4 device: {0}", err)
mit
levelrf/level_basestation
gnuradio-core/src/python/gnuradio/gr/hier_block2.py
1
4444
# # Copyright 2006,2007 Free Software Foundation, Inc. # # This file is part of GNU Radio # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Radio; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # from gnuradio_core import hier_block2_swig # # This hack forces a 'has-a' relationship to look like an 'is-a' one. # # It allows Python classes to subclass this one, while passing through # method calls to the C++ class shared pointer from SWIG. # # It also allows us to intercept method calls if needed # class hier_block2(object): """ Subclass this to create a python hierarchical block. This is a python wrapper around the C++ hierarchical block implementation. Provides convenience functions and allows proper Python subclassing. """ def __init__(self, name, input_signature, output_signature): """ Create a hierarchical block with a given name and I/O signatures. """ self._hb = hier_block2_swig(name, input_signature, output_signature) def __getattr__(self, name): """ Pass-through member requests to the C++ object. """ if not hasattr(self, "_hb"): raise RuntimeError("hier_block2: invalid state--did you forget to call gr.hier_block2.__init__ in a derived class?") return getattr(self._hb, name) def connect(self, *points): """ Connect two or more block endpoints. An endpoint is either a (block, port) tuple or a block instance. In the latter case, the port number is assumed to be zero. To connect the hierarchical block external inputs or outputs to internal block inputs or outputs, use 'self' in the connect call. If multiple arguments are provided, connect will attempt to wire them in series, interpreting the endpoints as inputs or outputs as appropriate. """ if len (points) < 1: raise ValueError, ("connect requires at least one endpoint; %d provided." % (len (points),)) else: if len(points) == 1: self._hb.primitive_connect(points[0].to_basic_block()) else: for i in range (1, len (points)): self._connect(points[i-1], points[i]) def _connect(self, src, dst): (src_block, src_port) = self._coerce_endpoint(src) (dst_block, dst_port) = self._coerce_endpoint(dst) self._hb.primitive_connect(src_block.to_basic_block(), src_port, dst_block.to_basic_block(), dst_port) def _coerce_endpoint(self, endp): if hasattr(endp, 'to_basic_block'): return (endp, 0) else: if hasattr(endp, "__getitem__") and len(endp) == 2: return endp # Assume user put (block, port) else: raise ValueError("unable to coerce endpoint") def disconnect(self, *points): """ Disconnect two endpoints in the flowgraph. To disconnect the hierarchical block external inputs or outputs to internal block inputs or outputs, use 'self' in the connect call. If more than two arguments are provided, they are disconnected successively. """ if len (points) < 1: raise ValueError, ("disconnect requires at least one endpoint; %d provided." % (len (points),)) else: if len (points) == 1: self._hb.primitive_disconnect(points[0].to_basic_block()) else: for i in range (1, len (points)): self._disconnect(points[i-1], points[i]) def _disconnect(self, src, dst): (src_block, src_port) = self._coerce_endpoint(src) (dst_block, dst_port) = self._coerce_endpoint(dst) self._hb.primitive_disconnect(src_block.to_basic_block(), src_port, dst_block.to_basic_block(), dst_port)
gpl-3.0
caio2k/RIDE
src/robotide/controller/tablecontrollers.py
2
15236
# Copyright 2008-2015 Nokia Solutions and Networks # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from robotide.publish import RideTestCaseRemoved, RideVariableAdded, \ RideVariableRemoved, RideVariableMovedUp, RideVariableMovedDown, \ RideUserKeywordRemoved, RideUserKeywordAdded, RideTestCaseAdded from robotide.publish.messages import RideItemMovedUp, RideItemMovedDown from robotide.robotapi import is_list_var, is_scalar_var, is_dict_var from robotide import utils from .basecontroller import ControllerWithParent from .macrocontrollers import TestCaseController, UserKeywordController from robotide.utils import overrides, variablematcher from .settingcontrollers import MetadataController, ImportController, \ VariableController class _WithListOperations(object): def move_up(self, index): if index > 0: self._swap(index - 1, index) def move_down(self, index): if index < len(self._items) - 1: self._swap(index, index + 1) def _swap(self, ind1, ind2): self._items[ind1], self._items[ind2] = self._items[ind2], self._items[ind1] self.mark_dirty() def delete(self, index): if isinstance(self._items, list): self._items.pop(index) else: self._items.data.pop(index) self.mark_dirty() @property def _items(self): raise NotImplementedError(self.__class__) def mark_dirty(self): raise NotImplementedError(self.__class__) class _TableController(ControllerWithParent): def __init__(self, parent_controller, table): self._parent = parent_controller self._table = table class VariableTableController(_TableController, _WithListOperations): def __init__(self, parent_controller, table): _TableController.__init__(self, parent_controller, table) self._variable_cache = {} def _get(self, variable): if variable not in self._variable_cache: self._variable_cache[variable] = VariableController(self, variable) return self._variable_cache[variable] def __iter__(self): return iter(self._get(v) for v in self._table) def __getitem__(self, index): return self._get(self._items[index]) def index(self, ctrl): return [v for v in self].index(ctrl) @property def _items(self): return self._table.variables def move_up(self, index): ctrl = self[index] _WithListOperations.move_up(self, index) other = self[index] self.mark_dirty() RideVariableMovedUp(item=ctrl, other=other).publish() def move_down(self, index): ctrl = self[index] _WithListOperations.move_down(self, index) other = self[index] self.mark_dirty() RideVariableMovedDown(item=ctrl, other=other).publish() def add_variable(self, name, value, comment=None): self._table.add(name, value, comment) self.mark_dirty() var_controller = self[-1] self.notify_variable_added(var_controller) return var_controller def validate_scalar_variable_name(self, name, item=None): return self._validate_name(_ScalarVarValidator(), name, item) def validate_list_variable_name(self, name, item=None): return self._validate_name(_ListVarValidator(), name, item) def validate_dict_variable_name(self, name, item=None): return self._validate_name(_DictVarValidator(), name, item) def _validate_name(self, validator, name, item=None): return VariableNameValidation(self, validator, name, item) def delete(self, index): self.remove_var(self[index]) def remove_var(self, var_controller): self._items.remove(var_controller.data) del self._variable_cache[var_controller.data] self.mark_dirty() self.notify_variable_removed(var_controller) def notify_variable_added(self, ctrl): self.datafile_controller.update_namespace() RideVariableAdded(datafile=self.datafile, name=ctrl.name, item=ctrl, index=ctrl.index).publish() def notify_variable_removed(self, ctrl): self.datafile_controller.update_namespace() RideVariableRemoved(datafile=self.datafile, name=ctrl.name, item=ctrl).publish() def contains_variable(self, name): vars_as_list = [] for var in self._items: vars_as_list += var.as_list() return any(variablematcher.value_contains_variable(string, name) for string in vars_as_list) class _ScalarVarValidator(object): __call__ = lambda self, name: is_scalar_var(name) name = 'Scalar' prefix = '$' class _ListVarValidator(object): __call__ = lambda self, name: is_list_var(name) name = 'List' prefix = '@' class _DictVarValidator(object): __call__ = lambda self, name: is_dict_var(name) name = 'Dictionary' prefix = '&' class _NameValidation(object): def __init__(self, table, name, named_ctrl=None): self._table = table self.error_message = '' self._named_ctrl = named_ctrl self._validate(name.strip()) def _name_taken(self, name): return any(utils.eq(name, item.name, ignore=['_']) for item in self._table if item != self._named_ctrl) class VariableNameValidation(_NameValidation): def __init__(self, table, validator, name, named_ctrl=None): self._validator = validator _NameValidation.__init__(self, table, name, named_ctrl) def _validate(self, name): if not self._validator(name): self.error_message = '%s variable name must be in format %s{name}' % \ (self._validator.name, self._validator.prefix) if self._name_taken(name): self.error_message = 'Variable with this name already exists.' class MacroNameValidation(_NameValidation): def _validate(self, name): if not name: self.error_message = '%s name cannot be empty.' % \ self._table.item_type if self._name_taken(name): self.error_message = '%s with this name already exists.' % \ self._table.item_type if "\n" in name: self.error_message = '%s name contains newlines' % \ self._table.item_type class _MacroTable(_TableController): @property def _items(self): raise NotImplementedError(self.__class__) def __iter__(self): return iter(self._create_controller(item) for item in self._table) def _create_controller(self, item): if item not in self._item_to_controller: self._item_to_controller[item] = self._controller_class(self, item) return self._item_to_controller[item] @property def _item_to_controller(self): if not hasattr(self, '_item_to_controller_attribute'): self._item_to_controller_attribute = {} return self._item_to_controller_attribute def __len__(self): return len(self._items) def __getitem__(self, index): return self._create_controller(self._items[index]) def move_up(self, item): items = self._items idx = items.index(item) if idx == 0: return False upper = idx - 1 items[upper], items[idx] = items[idx], items[upper] self.mark_dirty() RideItemMovedUp(item=self._create_controller(item)).publish() return True def move_down(self, item): items = self._items idx = items.index(item) if idx + 1 == len(items): return False lower = idx + 1 items[idx], items[lower] = items[lower], items[idx] self.mark_dirty() RideItemMovedDown(item=self._create_controller(item)).publish() return True def validate_name(self, name, named_ctrl=None): return MacroNameValidation(self, name, named_ctrl) def delete(self, ctrl): self._items.remove(ctrl.data) if ctrl.data in self._item_to_controller: del self._item_to_controller[ctrl.data] self.datafile_controller.update_namespace() self.mark_dirty() self._notify_removal(ctrl) def add(self, ctrl): item = ctrl.data item.parent = self._table self._items.append(item) new_controller = self._create_controller(item) self.datafile_controller.update_namespace() self.mark_dirty() self._notify_creation(new_controller.name, new_controller) def _create_new(self, name, config=None): name = name.strip() ctrl = self._create_controller(self._table.add(name)) self._configure_controller(ctrl, config) self.datafile_controller.update_namespace() self.mark_dirty() self._notify_creation(name, ctrl) return ctrl def _configure_controller(self, ctrl, config): pass class TestCaseTableController(_MacroTable): item_type = 'Test case' _controller_class = TestCaseController @property def _items(self): return self._table.tests def _notify_creation(self, name, ctrl): RideTestCaseAdded(datafile=self.datafile, name=name, item=ctrl).publish() def _notify_removal(self, item): RideTestCaseRemoved(datafile=self.datafile, name=item.name, item=item).publish() def new(self, name): return self._create_new(name) class KeywordTableController(_MacroTable): item_type = 'User keyword' _controller_class = UserKeywordController @property def _items(self): return self._table.keywords def _notify_creation(self, name, ctrl): RideUserKeywordAdded(datafile=self.datafile, name=name, item=ctrl).publish() def _notify_removal(self, item): RideUserKeywordRemoved(datafile=self.datafile, name=item.name, item=item).publish() def new(self, name, argstr=''): return self._create_new(name, argstr) def _configure_controller(self, ctrl, config): if config: ctrl.arguments.set_value(config) def sort(self): """Sorts the keywords of the controller by name""" keywords_sorted = sorted(self._table.keywords, key=lambda userkeyword: userkeyword.name) index_difference = self._index_difference(self._table.keywords, keywords_sorted) self._table.keywords = keywords_sorted return index_difference def _index_difference(self, original_list, sorted_list): """Determines the difference in sorting order for undo/redo""" index_difference = [] for kw in original_list: counter = 0 for kw2 in sorted_list: if kw.name == kw2.name: index_difference.append(counter) break counter += 1 return index_difference def restore_keyword_order(self, list): """Restores the old order of the keyword list""" keywords_temp = [] for i in list: keywords_temp.append(self._table.keywords[i]) self._table.keywords = keywords_temp class ImportSettingsController(_TableController, _WithListOperations): def __init__(self, parent_controller, table, resource_file_controller_factory=None): _TableController.__init__(self, parent_controller, table) self._resource_file_controller_factory = resource_file_controller_factory self.__import_controllers = None def __iter__(self): return iter(self._import_controllers) def __getitem__(self, index): return self._import_controllers[index] @property def _import_controllers(self): if self.__import_controllers is None: self.__import_controllers = [self._import_controller(imp) for imp in self._items] return self.__import_controllers def _import_controller(self, import_): return ImportController(self, import_) @property def _items(self): return self._table.imports @property def resource_file_controller_factory(self): return self._resource_file_controller_factory @overrides(_WithListOperations) def _swap(self, ind1, ind2): imps = self._import_controllers imps[ind1], imps[ind2] = imps[ind2], imps[ind1] _WithListOperations._swap(self, ind1, ind2) def remove_import_data(self, imp): self.delete(self._items.data.index(imp)) def delete(self, index): item = self[index] _WithListOperations.delete(self, index) self._import_controllers.pop(index) item.publish_removed() self.notify_imports_modified() def add_library(self, name, argstr, alias, comment=None): self._import_controllers # Call property since it has to exist before adding new import_ = self._table.add_library(name, utils.split_value(argstr), comment) import_.alias = alias self._parent.mark_dirty() self._add_controller(import_) self.notify_imports_modified() return self[-1] def _add_controller(self, import_): ctrl = self._import_controller(import_) ctrl.publish_added() self._import_controllers.append(ctrl) def add_resource(self, path, comment=None): self._import_controllers # Have to exist before adding new import_ = self._table.add_resource(path, comment) self._parent.mark_dirty() self.resource_import_modified(path) self._add_controller(import_) self.notify_imports_modified() return self[-1] def add_variables(self, path, argstr, comment=None): self._import_controllers # Have to exist before adding new import_ = self._table.add_variables(path, utils.split_value(argstr), comment) self._parent.mark_dirty() self._add_controller(import_) return self[-1] def notify_imports_modified(self): self.datafile_controller.update_namespace() def resource_import_modified(self, path): return self._parent.resource_import_modified(path) class MetadataListController(_TableController, _WithListOperations): def __iter__(self): return iter(MetadataController(self, m) for m in self._items) def __getitem__(self, index): return MetadataController(self, self._items[index]) @property def _items(self): return self._table.metadata def add_metadata(self, name, value, comment=None): self._table.add_metadata(name, value, comment) self._parent.mark_dirty() return self[-1]
apache-2.0
arne-cl/turian-parser
scripts/run-management/eda/analyze-errors.py
1
7780
#!/usr/bin/python # ####################################################################### # # # analyze-errors.py # # USAGE: ./analyze-errors.py devel|train < treefile.parsed # # Given a postprocessed treebank, create a breakdown of PARSEVAL scores # by constituent type, sorted by those type with the largest share of # the error. # # Types of constituents we are analyzing: # * All constituents # * Constituents broken down by label # * Constituents broken down by number of children # * Constituents broken down by label, number of children # # WRITEME: Describe more specifically. # # # $Id: analyze-errors.py 1657 2006-06-04 03:03:05Z turian $ # # ####################################################################### # Copyright (c) 2004-2006, New York University. All rights reserved ####################################################################### from variables import * import parsetree import sys if mysys == "Linux" and not profile: debug(1, "Linux detected... using psyco") import psyco psyco.full(memory=4096) # psyco.full() # psyco.log() # psyco.full(memory=128) # psyco.profile(0.05, memory=1024) # psyco.profile(0.2) elif not profile: debug(1, "Uname gives %s... NOT using psyco" % (mysys)) else: debug(1, "Profiling...") import hotshot, hotshot.stats def main(): assert len(sys.argv) == 2 assert sys.argv[1] == "devel" or sys.argv[1] == "train" check_parsefiles(sys.argv[1]) debug(1, "Opening files:\n\t%s\n\t%s\n" % (postprocess_gold[sys.argv[1]], postprocess_jmx[sys.argv[1]])) gold_file = open(postprocess_gold[sys.argv[1]]) # jmx_file = open(postprocess_jmx[sys.argv[1]]) sentence = 0 all_types = {} test_constits = {} gold_constits = {} test_constits_totals = {} gold_constits_totals = {} for l in sys.stdin: sentence += 1 if not l: assert(0) test_tree = parsetree.read_tree(l) assert test_tree != None test_tree = parsetree.normalize(test_tree) test_leaves = [(n.headword, n.headtag) for n in test_tree.leaves()] lt = gold_file.readline() gold_tree = parsetree.read_tree(lt) assert gold_tree != None gold_tree = parsetree.normalize(gold_tree) gold_leaves = [(n.headword, n.headtag) for n in gold_tree.leaves()] # Make sure we're comparing the same sentences assert test_leaves == gold_leaves for n in test_tree.internal_nodes(): if n.label == "TOP": continue s = n.span() c = "Sentence #%d: %s @ [%d, %d]" % (sentence, n.label, s[0], s[1]) # Types of constituents we are analyzing: # * All constituents # * Constituents broken down by label # * Constituents broken down by number of children # * Constituents broken down by label, number of children # types = ["all", "label %s" % n.label, "%d children" % len(n.children), "label %s with %d children" % (n.label, len(n.children))] types = ["all", "label %s" % n.label, "%d children" % len(n.children)] for t in types: all_types[t] = 1 if t not in test_constits: test_constits[t] = {} test_constits_totals[t] = 0 if c not in test_constits[t]: test_constits[t][c] = 0 test_constits_totals[t] += 1 test_constits[t][c] += 1 for n in gold_tree.internal_nodes(): if n.label == "TOP": continue s = n.span() c = "Sentence #%d: %s @ [%d, %d]" % (sentence, n.label, s[0], s[1]) # Types of constituents we are analyzing: # * All constituents # * Constituents broken down by label # * Constituents broken down by number of children # * Constituents broken down by label, number of children # types = ["all", "label %s" % n.label, "%d children" % len(n.children), "label %s with %d children" % (n.label, len(n.children))] types = ["all", "label %s" % n.label, "%d children" % len(n.children)] for t in types: all_types[t] = 1 if t not in gold_constits: gold_constits[t] = {} gold_constits_totals[t] = 0 if c not in gold_constits[t]: gold_constits[t][c] = 0 gold_constits_totals[t] += 1 gold_constits[t][c] += 1 if sentence % 100 == 0: debug(1, "Sentence #%d done" % sentence) else: debug(2, "Sentence #%d done" % sentence) gsum = 0 tsum = 0 msum = 0 # FIXME: Don't hardcode this for i in range(128): t = "%d children" % i if t in gold_constits_totals: gsum += gold_constits_totals[t] if t in test_constits_totals: tsum += test_constits_totals[t] assert gsum == gold_constits_totals["all"] assert tsum == test_constits_totals["all"] alltot = test_constits_totals["all"] + gold_constits_totals["all"] print "Total constituents in test + gold: %d" % alltot all_error_fms = 0. nonall_error_fms = 0. sorted_types = [] for t in all_types: if t not in test_constits: test_constits[t] = {} test_constits_totals[t] = 0 if t not in gold_constits: gold_constits[t] = {} gold_constits_totals[t] = 0 tot = test_constits_totals[t] + gold_constits_totals[t] str = "" str += "\n" str += "Breakdown type: %s\n" % t str += "comprising %.2f%% (%d/%d) of all constituents\n" % (100.*tot/alltot, tot, alltot) testmatch = 0 goldmatch = 0 testtot = 0 goldtot = 0 allkeys = {} for k in test_constits[t].keys() + gold_constits[t].keys(): allkeys[k] = True for c in allkeys: testmatch += min(test_constits[t].get(c,0), gold_constits["all"].get(c,0)) goldmatch += min(test_constits["all"].get(c,0), gold_constits[t].get(c,0)) goldtot += gold_constits[t].get(c,0) testtot += test_constits[t].get(c,0) assert goldtot == gold_constits_totals[t] assert testtot == test_constits_totals[t] # BUG: These error FMS are all skewed! # To see this, observe the "VP with 2 children" has more attributed error than just "VP" #error_fms = 1. * (testtot + goldtot - 2 * match) / alltot error_fms = 1. * (testtot + goldtot - goldmatch - testmatch) / alltot str += "overall error incurred by this constituent type = %.3f%% (%d/%d)\n" % (100.*error_fms, testtot + goldtot - goldmatch - testmatch, alltot) #errprc = 1. * (testtot - match) / test_constits_totals["all"] #errrcl = 1. * (goldtot - match) / gold_constits_totals["all"] ##errprc = 1. * (test_constits_totals["all"] - testtot + match) / test_constits_totals["all"] ##errrcl = 1. * (gold_constits_totals["all"] - goldtot + match) / gold_constits_totals["all"] #if errprc == 0 or errrcl == 0: error_fms = 0 #else: error_fms = 1-2*errrcl*errprc/(errrcl+errprc) #str += "overall error incurred by this constituent type = %.3f%%\n" % (100.*error_fms) #str += "overall PRC error incurred by this constituent type = %.3f%% (%d/%d)\n" % (100.*errprc,testtot - match, test_constits_totals["all"]) #str += "overall RCL error incurred by this constituent type = %.3f%% (%d/%d)\n" % (100.*errrcl,goldtot - match, gold_constits_totals["all"]) if t == "all": all_error_fms += error_fms else: nonall_error_fms += error_fms if testtot == 0: lprc = 0 else: lprc = 1. * testmatch / testtot if goldtot == 0: lrc = 0 else: lrcl = 1. * goldmatch / goldtot if lprc == 0 or lrcl == 0: lfms = 0 else: lfms = 2*lrcl*lprc/(lrcl+lprc) str += "LFMS = %.3f%%\n" % (100. * lfms) str += "LPRC = %.3f%% (%d/%d)\n" % (100. * lprc, testmatch, testtot) str += "LRCL = %.3f%% (%d/%d)\n" % (100. * lrcl, goldmatch, goldtot) sorted_types.append((error_fms, str)) sorted_types.sort() sorted_types.reverse() for (error_fms, str) in sorted_types: print str assert(not gold_file.readline()) # assert(not jmx_file.readline()) gold_file.close() # jmx_file.close() if profile: #prof = hotshot.Profile("oracle.prof", lineevents=1) prof = hotshot.Profile("oracle.prof") prof.runcall(main) prof.close() stats = hotshot.stats.load("oracle.prof") stats.strip_dirs() stats.sort_stats('time', 'calls') stats.print_stats(20) else: main()
gpl-2.0
sjolicoeur/Annelia
annelia/monkey_staticserve.py
1
3539
# original taken from http://www.cherrypy.org/browser/trunk/cherrypy/lib/static.py?format=txt try: from io import UnsupportedOperation except ImportError: UnsupportedOperation = object() import logging import mimetypes mimetypes.init() mimetypes.types_map['.dwg']='image/x-dwg' mimetypes.types_map['.ico']='image/x-icon' mimetypes.types_map['.bz2']='application/x-bzip2' mimetypes.types_map['.gz']='application/x-gzip' import os import re import stat import time import cherrypy from cherrypy._cpcompat import ntob, unquote from cherrypy.lib import cptools, httputil, file_generator_limited from cherrypy.lib.static import _serve_fileobj def serve_file(path, content_type=None, disposition=None, name=None, content_length=None,debug=False): """Set status, headers, and body in order to serve the given path. The Content-Type header will be set to the content_type arg, if provided. If not provided, the Content-Type will be guessed by the file extension of the 'path' argument. If disposition is not None, the Content-Disposition header will be set to "<disposition>; filename=<name>". If name is None, it will be set to the basename of path. If disposition is None, no Content-Disposition header will be written. """ response = cherrypy.serving.response # If path is relative, users should fix it by making path absolute. # That is, CherryPy should not guess where the application root is. # It certainly should *not* use cwd (since CP may be invoked from a # variety of paths). If using tools.staticdir, you can make your relative # paths become absolute by supplying a value for "tools.staticdir.root". if not os.path.isabs(path): msg = "'%s' is not an absolute path." % path if debug: cherrypy.log(msg, 'TOOLS.STATICFILE') raise ValueError(msg) try: st = os.stat(path) except OSError: if debug: cherrypy.log('os.stat(%r) failed' % path, 'TOOLS.STATIC') raise cherrypy.NotFound() # Check if path is a directory. if stat.S_ISDIR(st.st_mode): # Let the caller deal with it as they like. if debug: cherrypy.log('%r is a directory' % path, 'TOOLS.STATIC') raise cherrypy.NotFound() # Set the Last-Modified response header, so that # modified-since validation code can work. response.headers['Last-Modified'] = httputil.HTTPDate(st.st_mtime) cptools.validate_since() if content_type is None: # Set content-type based on filename extension ext = "" i = path.rfind('.') if i != -1: ext = path[i:].lower() content_type = mimetypes.types_map.get(ext, None) if content_type is not None: response.headers['Content-Type'] = content_type if debug: cherrypy.log('Content-Type: %r' % content_type, 'TOOLS.STATIC') cd = None if disposition is not None: if name is None: name = os.path.basename(path) cd = '%s; filename="%s"' % (disposition, name) response.headers["Content-Disposition"] = cd if debug: cherrypy.log('Content-Disposition: %r' % cd, 'TOOLS.STATIC') # Set Content-Length and use an iterable (file object) # this way CP won't load the whole file in memory if not content_length : content_length = st.st_size fileobj = open(path, 'rb') return _serve_fileobj(fileobj, content_type, content_length, debug=debug)
apache-2.0
avoinsystems/odoo
openerp/addons/base/tests/test_res_config.py
398
3532
import unittest2 import openerp import openerp.tests.common as common class test_res_config(common.TransactionCase): def setUp(self): super(test_res_config, self).setUp() self.res_config = self.registry('res.config.settings') # Define the test values self.menu_xml_id = 'base.menu_action_res_users' self.full_field_name = 'res.partner.lang' self.error_msg = "WarningRedirect test string: %(field:res.partner.lang)s - %(menu:base.menu_action_res_users)s." self.error_msg_wo_menu = "WarningRedirect test string: %(field:res.partner.lang)s." # Note: see the get_config_warning() doc for a better example # Fetch the expected values module_name, menu_xml_id = self.menu_xml_id.split('.') dummy, menu_id = self.registry('ir.model.data').get_object_reference(self.cr, self.uid, module_name, menu_xml_id) ir_ui_menu = self.registry('ir.ui.menu').browse(self.cr, self.uid, menu_id, context=None) model_name, field_name = self.full_field_name.rsplit('.', 1) self.expected_path = ir_ui_menu.complete_name self.expected_action_id = ir_ui_menu.action.id self.expected_name = self.registry(model_name).fields_get(self.cr, self.uid, allfields=[field_name], context=None)[field_name]['string'] self.expected_final_error_msg = self.error_msg % { 'field:res.partner.lang': self.expected_name, 'menu:base.menu_action_res_users': self.expected_path } self.expected_final_error_msg_wo_menu = self.error_msg_wo_menu % { 'field:res.partner.lang': self.expected_name, } def test_00_get_option_path(self): """ The get_option_path() method should return a tuple containing a string and an integer """ res = self.res_config.get_option_path(self.cr, self.uid, self.menu_xml_id, context=None) # Check types self.assertIsInstance(res, tuple) self.assertEqual(len(res), 2, "The result should contain 2 elements") self.assertIsInstance(res[0], basestring) self.assertIsInstance(res[1], (int, long)) # Check returned values self.assertEqual(res[0], self.expected_path) self.assertEqual(res[1], self.expected_action_id) def test_10_get_option_name(self): """ The get_option_name() method should return a string """ res = self.res_config.get_option_name(self.cr, self.uid, self.full_field_name, context=None) # Check type self.assertIsInstance(res, basestring) # Check returned value self.assertEqual(res, self.expected_name) def test_20_get_config_warning(self): """ The get_config_warning() method should return a RedirectWarning """ res = self.res_config.get_config_warning(self.cr, self.error_msg, context=None) # Check type self.assertIsInstance(res, openerp.exceptions.RedirectWarning) # Check returned value self.assertEqual(res.args[0], self.expected_final_error_msg) self.assertEqual(res.args[1], self.expected_action_id) def test_30_get_config_warning_wo_menu(self): """ The get_config_warning() method should return a Warning exception """ res = self.res_config.get_config_warning(self.cr, self.error_msg_wo_menu, context=None) # Check type self.assertIsInstance(res, openerp.exceptions.Warning) # Check returned value self.assertEqual(res.args[0], self.expected_final_error_msg_wo_menu)
agpl-3.0
mKeRix/home-assistant
homeassistant/components/ads/sensor.py
7
2424
"""Support for ADS sensors.""" import logging import voluptuous as vol from homeassistant.components import ads from homeassistant.components.sensor import PLATFORM_SCHEMA from homeassistant.const import CONF_NAME, CONF_UNIT_OF_MEASUREMENT import homeassistant.helpers.config_validation as cv from . import CONF_ADS_FACTOR, CONF_ADS_TYPE, CONF_ADS_VAR, STATE_KEY_STATE, AdsEntity _LOGGER = logging.getLogger(__name__) DEFAULT_NAME = "ADS sensor" PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Required(CONF_ADS_VAR): cv.string, vol.Optional(CONF_ADS_FACTOR): cv.positive_int, vol.Optional(CONF_ADS_TYPE, default=ads.ADSTYPE_INT): vol.In( [ ads.ADSTYPE_INT, ads.ADSTYPE_UINT, ads.ADSTYPE_BYTE, ads.ADSTYPE_DINT, ads.ADSTYPE_UDINT, ] ), vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, vol.Optional(CONF_UNIT_OF_MEASUREMENT, default=""): cv.string, } ) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up an ADS sensor device.""" ads_hub = hass.data.get(ads.DATA_ADS) ads_var = config[CONF_ADS_VAR] ads_type = config[CONF_ADS_TYPE] name = config[CONF_NAME] unit_of_measurement = config.get(CONF_UNIT_OF_MEASUREMENT) factor = config.get(CONF_ADS_FACTOR) entity = AdsSensor(ads_hub, ads_var, ads_type, name, unit_of_measurement, factor) add_entities([entity]) class AdsSensor(AdsEntity): """Representation of an ADS sensor entity.""" def __init__(self, ads_hub, ads_var, ads_type, name, unit_of_measurement, factor): """Initialize AdsSensor entity.""" super().__init__(ads_hub, name, ads_var) self._unit_of_measurement = unit_of_measurement self._ads_type = ads_type self._factor = factor async def async_added_to_hass(self): """Register device notification.""" await self.async_initialize_device( self._ads_var, self._ads_hub.ADS_TYPEMAP[self._ads_type], STATE_KEY_STATE, self._factor, ) @property def state(self): """Return the state of the device.""" return self._state_dict[STATE_KEY_STATE] @property def unit_of_measurement(self): """Return the unit of measurement.""" return self._unit_of_measurement
mit
dfang/odoo
odoo/addons/test_new_api/tests/test_new_fields.py
4
29972
# # test cases for new-style fields # from datetime import date, datetime from odoo.exceptions import AccessError, except_orm from odoo.tests import common from odoo.tools import mute_logger class TestFields(common.TransactionCase): def test_00_basics(self): """ test accessing new fields """ # find a discussion discussion = self.env.ref('test_new_api.discussion_0') # read field as a record attribute or as a record item self.assertIsInstance(discussion.name, basestring) self.assertIsInstance(discussion['name'], basestring) self.assertEqual(discussion['name'], discussion.name) # read it with method read() values = discussion.read(['name'])[0] self.assertEqual(values['name'], discussion.name) def test_01_basic_get_assertion(self): """ test item getter """ # field access works on single record record = self.env.ref('test_new_api.message_0_0') self.assertEqual(len(record), 1) ok = record.body # field access fails on multiple records records = self.env['test_new_api.message'].search([]) assert len(records) > 1 with self.assertRaises(ValueError): faulty = records.body def test_01_basic_set_assertion(self): """ test item setter """ # field assignment works on single record record = self.env.ref('test_new_api.message_0_0') self.assertEqual(len(record), 1) record.body = 'OK' # field assignment fails on multiple records records = self.env['test_new_api.message'].search([]) assert len(records) > 1 with self.assertRaises(ValueError): records.body = 'Faulty' def test_10_computed(self): """ check definition of computed fields """ # by default function fields are not stored and readonly field = self.env['test_new_api.message']._fields['size'] self.assertFalse(field.store) self.assertTrue(field.readonly) field = self.env['test_new_api.message']._fields['name'] self.assertTrue(field.store) self.assertTrue(field.readonly) def test_10_non_stored(self): """ test non-stored fields """ # a field declared with store=False should not have a column field = self.env['test_new_api.category']._fields['dummy'] self.assertFalse(field.store) self.assertFalse(field.compute) self.assertFalse(field.inverse) # find messages for message in self.env['test_new_api.message'].search([]): # check definition of field self.assertEqual(message.size, len(message.body or '')) # check recomputation after record is modified size = message.size message.write({'body': (message.body or '') + "!!!"}) self.assertEqual(message.size, size + 3) # special case: computed field without dependency must be computed record = self.env['test_new_api.mixed'].create({}) self.assertTrue(record.now) def test_11_stored(self): """ test stored fields """ def check_stored(disc): """ Check the stored computed field on disc.messages """ for msg in disc.messages: self.assertEqual(msg.name, "[%s] %s" % (disc.name, msg.author.name)) # find the demo discussion, and check messages discussion1 = self.env.ref('test_new_api.discussion_0') self.assertTrue(discussion1.messages) check_stored(discussion1) # modify discussion name, and check again messages discussion1.name = 'Talking about stuff...' check_stored(discussion1) # switch message from discussion, and check again discussion2 = discussion1.copy({'name': 'Another discussion'}) message2 = discussion1.messages[0] message2.discussion = discussion2 check_stored(discussion2) # create a new discussion with messages, and check their name user_root = self.env.ref('base.user_root') user_demo = self.env.ref('base.user_demo') discussion3 = self.env['test_new_api.discussion'].create({ 'name': 'Stuff', 'participants': [(4, user_root.id), (4, user_demo.id)], 'messages': [ (0, 0, {'author': user_root.id, 'body': 'one'}), (0, 0, {'author': user_demo.id, 'body': 'two'}), (0, 0, {'author': user_root.id, 'body': 'three'}), ], }) check_stored(discussion3) # modify the discussion messages: edit the 2nd one, remove the last one # (keep modifications in that order, as they reproduce a former bug!) discussion3.write({ 'messages': [ (4, discussion3.messages[0].id), (1, discussion3.messages[1].id, {'author': user_root.id}), (2, discussion3.messages[2].id), ], }) check_stored(discussion3) def test_11_computed_access(self): """ test computed fields with access right errors """ User = self.env['res.users'] user1 = User.create({'name': 'Aaaah', 'login': 'a'}) user2 = User.create({'name': 'Boooh', 'login': 'b'}) user3 = User.create({'name': 'Crrrr', 'login': 'c'}) # add a rule to not give access to user2 self.env['ir.rule'].create({ 'model_id': self.env['ir.model'].search([('model', '=', 'res.users')]).id, 'domain_force': "[('id', '!=', %d)]" % user2.id, }) # group users as a recordset, and read them as user demo users = (user1 + user2 + user3).sudo(self.env.ref('base.user_demo')) user1, user2, user3 = users # regression test: a bug invalidated the field's value from cache user1.company_type with self.assertRaises(AccessError): user2.company_type user3.company_type def test_12_recursive(self): """ test recursively dependent fields """ Category = self.env['test_new_api.category'] abel = Category.create({'name': 'Abel'}) beth = Category.create({'name': 'Bethany'}) cath = Category.create({'name': 'Catherine'}) dean = Category.create({'name': 'Dean'}) ewan = Category.create({'name': 'Ewan'}) finn = Category.create({'name': 'Finnley'}) gabe = Category.create({'name': 'Gabriel'}) cath.parent = finn.parent = gabe abel.parent = beth.parent = cath dean.parent = ewan.parent = finn self.assertEqual(abel.display_name, "Gabriel / Catherine / Abel") self.assertEqual(beth.display_name, "Gabriel / Catherine / Bethany") self.assertEqual(cath.display_name, "Gabriel / Catherine") self.assertEqual(dean.display_name, "Gabriel / Finnley / Dean") self.assertEqual(ewan.display_name, "Gabriel / Finnley / Ewan") self.assertEqual(finn.display_name, "Gabriel / Finnley") self.assertEqual(gabe.display_name, "Gabriel") ewan.parent = cath self.assertEqual(ewan.display_name, "Gabriel / Catherine / Ewan") cath.parent = finn self.assertEqual(ewan.display_name, "Gabriel / Finnley / Catherine / Ewan") def test_12_cascade(self): """ test computed field depending on computed field """ message = self.env.ref('test_new_api.message_0_0') message.invalidate_cache() double_size = message.double_size self.assertEqual(double_size, message.size) def test_13_inverse(self): """ test inverse computation of fields """ Category = self.env['test_new_api.category'] abel = Category.create({'name': 'Abel'}) beth = Category.create({'name': 'Bethany'}) cath = Category.create({'name': 'Catherine'}) dean = Category.create({'name': 'Dean'}) ewan = Category.create({'name': 'Ewan'}) finn = Category.create({'name': 'Finnley'}) gabe = Category.create({'name': 'Gabriel'}) self.assertEqual(ewan.display_name, "Ewan") ewan.display_name = "Abel / Bethany / Catherine / Erwan" self.assertEqual(beth.parent, abel) self.assertEqual(cath.parent, beth) self.assertEqual(ewan.parent, cath) self.assertEqual(ewan.name, "Erwan") record = self.env['test_new_api.compute.inverse'] # create/write on 'foo' should only invoke the compute method record.counts.update(compute=0, inverse=0) record = record.create({'foo': 'Hi'}) self.assertEqual(record.foo, 'Hi') self.assertEqual(record.bar, 'Hi') self.assertEqual(record.counts, {'compute': 1, 'inverse': 0}) record.counts.update(compute=0, inverse=0) record.write({'foo': 'Ho'}) self.assertEqual(record.foo, 'Ho') self.assertEqual(record.bar, 'Ho') self.assertEqual(record.counts, {'compute': 1, 'inverse': 0}) # create/write on 'bar' should only invoke the inverse method record.counts.update(compute=0, inverse=0) record = record.create({'bar': 'Hi'}) self.assertEqual(record.foo, 'Hi') self.assertEqual(record.bar, 'Hi') self.assertEqual(record.counts, {'compute': 0, 'inverse': 1}) record.counts.update(compute=0, inverse=0) record.write({'bar': 'Ho'}) self.assertEqual(record.foo, 'Ho') self.assertEqual(record.bar, 'Ho') self.assertEqual(record.counts, {'compute': 0, 'inverse': 1}) def test_14_search(self): """ test search on computed fields """ discussion = self.env.ref('test_new_api.discussion_0') # determine message sizes sizes = set(message.size for message in discussion.messages) # search for messages based on their size for size in sizes: messages0 = self.env['test_new_api.message'].search( [('discussion', '=', discussion.id), ('size', '<=', size)]) messages1 = self.env['test_new_api.message'].browse() for message in discussion.messages: if message.size <= size: messages1 += message self.assertEqual(messages0, messages1) def test_15_constraint(self): """ test new-style Python constraints """ discussion = self.env.ref('test_new_api.discussion_0') # remove oneself from discussion participants: we can no longer create # messages in discussion discussion.participants -= self.env.user with self.assertRaises(Exception): self.env['test_new_api.message'].create({'discussion': discussion.id, 'body': 'Whatever'}) # make sure that assertRaises() does not leave fields to recompute self.assertFalse(self.env.has_todo()) # put back oneself into discussion participants: now we can create # messages in discussion discussion.participants += self.env.user self.env['test_new_api.message'].create({'discussion': discussion.id, 'body': 'Whatever'}) def test_20_float(self): """ test float fields """ record = self.env['test_new_api.mixed'].create({}) # assign value, and expect rounding record.write({'number': 2.4999999999999996}) self.assertEqual(record.number, 2.50) # same with field setter record.number = 2.4999999999999996 self.assertEqual(record.number, 2.50) def test_20_monetary(self): """ test monetary fields """ record = self.env['test_new_api.mixed'].create({}) self.assertTrue(record.currency_id) self.assertEqual(record.currency_id.rounding, 0.01) # the conversion to cache should round the value to 14.700000000000001 record.amount = 14.7 self.assertNotEqual(record.amount, 14.7) self.assertEqual(record.amount, 14.700000000000001) # however when stored to database, it should be serialized as 14.70 self.cr.execute('SELECT amount FROM test_new_api_mixed WHERE id=%s', (record.id,)) (amount,) = self.cr.fetchone() self.assertEqual(amount, 14.7) def test_21_date(self): """ test date fields """ record = self.env['test_new_api.mixed'].create({}) # one may assign False or None record.date = None self.assertFalse(record.date) # one may assign date and datetime objects record.date = date(2012, 05, 01) self.assertEqual(record.date, '2012-05-01') record.date = datetime(2012, 05, 01, 10, 45, 00) self.assertEqual(record.date, '2012-05-01') # one may assign dates in the default format, and it must be checked record.date = '2012-05-01' self.assertEqual(record.date, '2012-05-01') with self.assertRaises(ValueError): record.date = '12-5-1' def test_22_selection(self): """ test selection fields """ record = self.env['test_new_api.mixed'].create({}) # one may assign False or None record.lang = None self.assertFalse(record.lang) # one may assign a value, and it must be checked for language in self.env['res.lang'].search([]): record.lang = language.code with self.assertRaises(ValueError): record.lang = 'zz_ZZ' def test_23_relation(self): """ test relation fields """ demo = self.env.ref('base.user_demo') message = self.env.ref('test_new_api.message_0_0') # check environment of record and related records self.assertEqual(message.env, self.env) self.assertEqual(message.discussion.env, self.env) demo_env = self.env(user=demo) self.assertNotEqual(demo_env, self.env) # check environment of record and related records self.assertEqual(message.env, self.env) self.assertEqual(message.discussion.env, self.env) # "migrate" message into demo_env, and check again demo_message = message.sudo(demo) self.assertEqual(demo_message.env, demo_env) self.assertEqual(demo_message.discussion.env, demo_env) # assign record's parent to a record in demo_env message.discussion = message.discussion.copy({'name': 'Copy'}) # both message and its parent field must be in self.env self.assertEqual(message.env, self.env) self.assertEqual(message.discussion.env, self.env) def test_24_reference(self): """ test reference fields. """ record = self.env['test_new_api.mixed'].create({}) # one may assign False or None record.reference = None self.assertFalse(record.reference) # one may assign a user or a partner... record.reference = self.env.user self.assertEqual(record.reference, self.env.user) record.reference = self.env.user.partner_id self.assertEqual(record.reference, self.env.user.partner_id) # ... but no record from a model that starts with 'ir.' with self.assertRaises(ValueError): record.reference = self.env['ir.model'].search([], limit=1) def test_25_related(self): """ test related fields. """ message = self.env.ref('test_new_api.message_0_0') discussion = message.discussion # by default related fields are not stored field = message._fields['discussion_name'] self.assertFalse(field.store) self.assertFalse(field.readonly) # check value of related field self.assertEqual(message.discussion_name, discussion.name) # change discussion name, and check result discussion.name = 'Foo' self.assertEqual(message.discussion_name, 'Foo') # change discussion name via related field, and check result message.discussion_name = 'Bar' self.assertEqual(discussion.name, 'Bar') self.assertEqual(message.discussion_name, 'Bar') # change discussion name via related field on several records discussion1 = discussion.create({'name': 'X1'}) discussion2 = discussion.create({'name': 'X2'}) discussion1.participants = discussion2.participants = self.env.user message1 = message.create({'discussion': discussion1.id}) message2 = message.create({'discussion': discussion2.id}) self.assertEqual(message1.discussion_name, 'X1') self.assertEqual(message2.discussion_name, 'X2') (message1 + message2).write({'discussion_name': 'X3'}) self.assertEqual(discussion1.name, 'X3') self.assertEqual(discussion2.name, 'X3') # search on related field, and check result search_on_related = self.env['test_new_api.message'].search([('discussion_name', '=', 'Bar')]) search_on_regular = self.env['test_new_api.message'].search([('discussion.name', '=', 'Bar')]) self.assertEqual(search_on_related, search_on_regular) # check that field attributes are copied message_field = message.fields_get(['discussion_name'])['discussion_name'] discussion_field = discussion.fields_get(['name'])['name'] self.assertEqual(message_field['help'], discussion_field['help']) def test_25_related_single(self): """ test related fields with a single field in the path. """ record = self.env['test_new_api.related'].create({'name': 'A'}) self.assertEqual(record.related_name, record.name) self.assertEqual(record.related_related_name, record.name) # check searching on related fields records0 = record.search([('name', '=', 'A')]) self.assertIn(record, records0) records1 = record.search([('related_name', '=', 'A')]) self.assertEqual(records1, records0) records2 = record.search([('related_related_name', '=', 'A')]) self.assertEqual(records2, records0) # check writing on related fields record.write({'related_name': 'B'}) self.assertEqual(record.name, 'B') record.write({'related_related_name': 'C'}) self.assertEqual(record.name, 'C') def test_25_related_multi(self): """ test write() on several related fields based on a common computed field. """ foo = self.env['test_new_api.foo'].create({'name': 'A', 'value1': 1, 'value2': 2}) bar = self.env['test_new_api.bar'].create({'name': 'A'}) self.assertEqual(bar.foo, foo) self.assertEqual(bar.value1, 1) self.assertEqual(bar.value2, 2) foo.invalidate_cache() bar.write({'value1': 3, 'value2': 4}) self.assertEqual(foo.value1, 3) self.assertEqual(foo.value2, 4) def test_26_inherited(self): """ test inherited fields. """ # a bunch of fields are inherited from res_partner for user in self.env['res.users'].search([]): partner = user.partner_id for field in ('is_company', 'name', 'email', 'country_id'): self.assertEqual(getattr(user, field), getattr(partner, field)) self.assertEqual(user[field], partner[field]) def test_27_company_dependent(self): """ test company-dependent fields. """ # consider three companies company0 = self.env.ref('base.main_company') company1 = self.env['res.company'].create({'name': 'A', 'parent_id': company0.id}) company2 = self.env['res.company'].create({'name': 'B', 'parent_id': company1.id}) # create one user per company user0 = self.env['res.users'].create({'name': 'Foo', 'login': 'foo', 'company_id': company0.id, 'company_ids': []}) user1 = self.env['res.users'].create({'name': 'Bar', 'login': 'bar', 'company_id': company1.id, 'company_ids': []}) user2 = self.env['res.users'].create({'name': 'Baz', 'login': 'baz', 'company_id': company2.id, 'company_ids': []}) # create a default value for the company-dependent field field = self.env['ir.model.fields'].search([('model', '=', 'test_new_api.company'), ('name', '=', 'foo')]) self.env['ir.property'].create({'name': 'foo', 'fields_id': field.id, 'value': 'default', 'type': 'char'}) # create/modify a record, and check the value for each user record = self.env['test_new_api.company'].create({'foo': 'main'}) record.invalidate_cache() self.assertEqual(record.sudo(user0).foo, 'main') self.assertEqual(record.sudo(user1).foo, 'default') self.assertEqual(record.sudo(user2).foo, 'default') record.sudo(user1).foo = 'alpha' record.invalidate_cache() self.assertEqual(record.sudo(user0).foo, 'main') self.assertEqual(record.sudo(user1).foo, 'alpha') self.assertEqual(record.sudo(user2).foo, 'default') def test_28_sparse(self): """ test sparse fields. """ record = self.env['test_new_api.sparse'].create({}) self.assertFalse(record.data) partner = self.env.ref('base.main_partner') values = [ ('boolean', True), ('integer', 42), ('float', 3.14), ('char', 'John'), ('selection', 'two'), ('partner', partner.id), ] for n, (key, val) in enumerate(values): record.write({key: val}) self.assertEqual(record.data, dict(values[:n+1])) for key, val in values[:-1]: self.assertEqual(record[key], val) self.assertEqual(record.partner, partner) for n, (key, val) in enumerate(values): record.write({key: False}) self.assertEqual(record.data, dict(values[n+1:])) # check reflection of sparse fields in 'ir.model.fields' names = [name for name, _ in values] domain = [('model', '=', 'test_new_api.sparse'), ('name', 'in', names)] fields = self.env['ir.model.fields'].search(domain) self.assertEqual(len(fields), len(names)) for field in fields: self.assertEqual(field.serialization_field_id.name, 'data') def test_30_read(self): """ test computed fields as returned by read(). """ discussion = self.env.ref('test_new_api.discussion_0') for message in discussion.messages: display_name = message.display_name size = message.size data = message.read(['display_name', 'size'])[0] self.assertEqual(data['display_name'], display_name) self.assertEqual(data['size'], size) def test_31_prefetch(self): """ test prefetch of records handle AccessError """ Category = self.env['test_new_api.category'] cat1 = Category.create({'name': 'NOACCESS'}) cat2 = Category.create({'name': 'ACCESS', 'parent': cat1.id}) cats = cat1 + cat2 self.env.clear() cat1, cat2 = cats self.assertEqual(cat2.name, 'ACCESS') # both categories should be ready for prefetching self.assertItemsEqual(cat2._prefetch[Category._name], cats.ids) # but due to our (lame) overwrite of `read`, it should not forbid us to read records we have access to self.assertFalse(cat2.discussions) self.assertEqual(cat2.parent, cat1) with self.assertRaises(AccessError): cat1.name def test_40_new(self): """ test new records. """ discussion = self.env.ref('test_new_api.discussion_0') # create a new message message = self.env['test_new_api.message'].new() self.assertFalse(message.id) # assign some fields; should have no side effect message.discussion = discussion message.body = BODY = "May the Force be with you." self.assertEqual(message.discussion, discussion) self.assertEqual(message.body, BODY) self.assertFalse(message.author) self.assertNotIn(message, discussion.messages) # check computed values of fields self.assertEqual(message.name, "[%s] %s" % (discussion.name, '')) self.assertEqual(message.size, len(BODY)) @mute_logger('odoo.addons.base.ir.ir_model') def test_41_new_related(self): """ test the behavior of related fields starting on new records. """ # make discussions unreadable for demo user access = self.env.ref('test_new_api.access_discussion') access.write({'perm_read': False}) # create an environment for demo user env = self.env(user=self.env.ref('base.user_demo')) self.assertEqual(env.user.login, "demo") # create a new message as demo user discussion = self.env.ref('test_new_api.discussion_0') message = env['test_new_api.message'].new({'discussion': discussion}) self.assertEqual(message.discussion, discussion) # read the related field discussion_name self.assertEqual(message.discussion.env, env) self.assertEqual(message.discussion_name, discussion.name) with self.assertRaises(AccessError): message.discussion.name @mute_logger('odoo.addons.base.ir.ir_model') def test_42_new_related(self): """ test the behavior of related fields traversing new records. """ # make discussions unreadable for demo user access = self.env.ref('test_new_api.access_discussion') access.write({'perm_read': False}) # create an environment for demo user env = self.env(user=self.env.ref('base.user_demo')) self.assertEqual(env.user.login, "demo") # create a new discussion and a new message as demo user discussion = env['test_new_api.discussion'].new({'name': 'Stuff'}) message = env['test_new_api.message'].new({'discussion': discussion}) self.assertEqual(message.discussion, discussion) # read the related field discussion_name self.assertNotEqual(message.sudo().env, message.env) self.assertEqual(message.discussion_name, discussion.name) def test_50_defaults(self): """ test default values. """ fields = ['discussion', 'body', 'author', 'size'] defaults = self.env['test_new_api.message'].default_get(fields) self.assertEqual(defaults, {'author': self.env.uid}) defaults = self.env['test_new_api.mixed'].default_get(['number']) self.assertEqual(defaults, {'number': 3.14}) def test_50_search_many2one(self): """ test search through a path of computed fields""" messages = self.env['test_new_api.message'].search( [('author_partner.name', '=', 'Demo User')]) self.assertEqual(messages, self.env.ref('test_new_api.message_0_1')) def test_60_x2many_domain(self): """ test the cache consistency of a x2many field with a domain """ discussion = self.env.ref('test_new_api.discussion_0') message = discussion.messages[0] self.assertNotIn(message, discussion.important_messages) message.important = True self.assertIn(message, discussion.important_messages) # writing on very_important_messages should call its domain method self.assertIn(message, discussion.very_important_messages) discussion.write({'very_important_messages': [(5,)]}) self.assertFalse(discussion.very_important_messages) self.assertFalse(message.exists()) class TestHtmlField(common.TransactionCase): def setUp(self): super(TestHtmlField, self).setUp() self.model = self.env['test_new_api.mixed'] def test_00_sanitize(self): self.assertEqual(self.model._fields['comment1'].sanitize, False) self.assertEqual(self.model._fields['comment2'].sanitize_attributes, True) self.assertEqual(self.model._fields['comment2'].strip_classes, False) self.assertEqual(self.model._fields['comment3'].sanitize_attributes, True) self.assertEqual(self.model._fields['comment3'].strip_classes, True) some_ugly_html = """<p>Oops this should maybe be sanitized % if object.some_field and not object.oriented: <table> % if object.other_field: <tr style="margin: 0px; border: 10px solid black;"> ${object.mako_thing} <td> </tr> <tr class="custom_class"> This is some html. </tr> % endif <tr> %if object.dummy_field: <p>Youpie</p> %endif""" record = self.model.create({ 'comment1': some_ugly_html, 'comment2': some_ugly_html, 'comment3': some_ugly_html, 'comment4': some_ugly_html, }) self.assertEqual(record.comment1, some_ugly_html, 'Error in HTML field: content was sanitized but field has sanitize=False') self.assertIn('<tr class="', record.comment2) # sanitize should have closed tags left open in the original html self.assertIn('</table>', record.comment3, 'Error in HTML field: content does not seem to have been sanitized despise sanitize=True') self.assertIn('</td>', record.comment3, 'Error in HTML field: content does not seem to have been sanitized despise sanitize=True') self.assertIn('<tr style="', record.comment3, 'Style attr should not have been stripped') # sanitize does not keep classes if asked to self.assertNotIn('<tr class="', record.comment3) self.assertNotIn('<tr style="', record.comment4, 'Style attr should have been stripped') class TestMagicFields(common.TransactionCase): def test_write_date(self): record = self.env['test_new_api.discussion'].create({'name': 'Booba'}) self.assertEqual(record.create_uid, self.env.user) self.assertEqual(record.write_uid, self.env.user)
agpl-3.0
nijel/translate
translate/lang/test_am.py
2
1119
from translate.lang import factory def test_punctranslate(): """Tests that we can translate punctuation.""" language = factory.getlanguage("am") assert language.punctranslate("") == "" assert language.punctranslate("abc efg") == "abc efg" assert language.punctranslate("abc efg.") == "abc efg።" assert language.punctranslate("abc efg. hij.") == "abc efg። hij።" assert language.punctranslate("abc efg, hij;") == "abc efg፣ hij፤" assert language.punctranslate("Delete file: %s?") == "Delete file: %s?" def test_sentences(): """Tests basic functionality of sentence segmentation.""" language = factory.getlanguage("am") sentences = language.sentences("") assert sentences == [] sentences = language.sentences( "ለምልክቱ መግቢያ የተለየ መለያ። ይህ የሚጠቅመው የታሪኩን ዝርዝር ለማስቀመጥ ነው።" ) print(sentences) assert sentences == ["ለምልክቱ መግቢያ የተለየ መለያ።", "ይህ የሚጠቅመው የታሪኩን ዝርዝር ለማስቀመጥ ነው።"]
gpl-2.0
krisys/django
django/contrib/gis/forms/fields.py
504
4316
from __future__ import unicode_literals from django import forms from django.contrib.gis.geos import GEOSException, GEOSGeometry from django.utils.translation import ugettext_lazy as _ from .widgets import OpenLayersWidget class GeometryField(forms.Field): """ This is the basic form field for a Geometry. Any textual input that is accepted by GEOSGeometry is accepted by this form. By default, this includes WKT, HEXEWKB, WKB (in a buffer), and GeoJSON. """ widget = OpenLayersWidget geom_type = 'GEOMETRY' default_error_messages = { 'required': _('No geometry value provided.'), 'invalid_geom': _('Invalid geometry value.'), 'invalid_geom_type': _('Invalid geometry type.'), 'transform_error': _('An error occurred when transforming the geometry ' 'to the SRID of the geometry form field.'), } def __init__(self, **kwargs): # Pop out attributes from the database field, or use sensible # defaults (e.g., allow None). self.srid = kwargs.pop('srid', None) self.geom_type = kwargs.pop('geom_type', self.geom_type) super(GeometryField, self).__init__(**kwargs) self.widget.attrs['geom_type'] = self.geom_type def to_python(self, value): """ Transforms the value to a Geometry object. """ if value in self.empty_values: return None if not isinstance(value, GEOSGeometry): try: value = GEOSGeometry(value) except (GEOSException, ValueError, TypeError): raise forms.ValidationError(self.error_messages['invalid_geom'], code='invalid_geom') # Try to set the srid if not value.srid: try: value.srid = self.widget.map_srid except AttributeError: if self.srid: value.srid = self.srid return value def clean(self, value): """ Validates that the input value can be converted to a Geometry object (which is returned). A ValidationError is raised if the value cannot be instantiated as a Geometry. """ geom = super(GeometryField, self).clean(value) if geom is None: return geom # Ensuring that the geometry is of the correct type (indicated # using the OGC string label). if str(geom.geom_type).upper() != self.geom_type and not self.geom_type == 'GEOMETRY': raise forms.ValidationError(self.error_messages['invalid_geom_type'], code='invalid_geom_type') # Transforming the geometry if the SRID was set. if self.srid and self.srid != -1 and self.srid != geom.srid: try: geom.transform(self.srid) except GEOSException: raise forms.ValidationError( self.error_messages['transform_error'], code='transform_error') return geom def has_changed(self, initial, data): """ Compare geographic value of data with its initial value. """ try: data = self.to_python(data) initial = self.to_python(initial) except forms.ValidationError: return True # Only do a geographic comparison if both values are available if initial and data: data.transform(initial.srid) # If the initial value was not added by the browser, the geometry # provided may be slightly different, the first time it is saved. # The comparison is done with a very low tolerance. return not initial.equals_exact(data, tolerance=0.000001) else: # Check for change of state of existence return bool(initial) != bool(data) class GeometryCollectionField(GeometryField): geom_type = 'GEOMETRYCOLLECTION' class PointField(GeometryField): geom_type = 'POINT' class MultiPointField(GeometryField): geom_type = 'MULTIPOINT' class LineStringField(GeometryField): geom_type = 'LINESTRING' class MultiLineStringField(GeometryField): geom_type = 'MULTILINESTRING' class PolygonField(GeometryField): geom_type = 'POLYGON' class MultiPolygonField(GeometryField): geom_type = 'MULTIPOLYGON'
bsd-3-clause
ray-project/ray
python/ray/util/sgd/torch/examples/benchmarks/horovod_benchmark_apex.py
3
3976
from __future__ import print_function import argparse import torch.backends.cudnn as cudnn import torch.nn.functional as F import torch.optim as optim import torch.utils.data.distributed from torchvision import models import horovod.torch as hvd import timeit import numpy as np # Apex from apex import amp # Benchmark settings parser = argparse.ArgumentParser( description="PyTorch Synthetic Benchmark", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument( "--fp16-allreduce", action="store_true", default=False, help="use fp16 compression during allreduce") parser.add_argument( "--model", type=str, default="resnet50", help="model to benchmark") parser.add_argument( "--batch-size", type=int, default=32, help="input batch size") parser.add_argument( "--num-warmup-batches", type=int, default=10, help="number of warm-up batches that don\"t count towards benchmark") parser.add_argument( "--num-batches-per-iter", type=int, default=10, help="number of batches per benchmark iteration") parser.add_argument( "--num-iters", type=int, default=10, help="number of benchmark iterations") parser.add_argument( "--no-cuda", action="store_true", default=False, help="disables CUDA training") parser.add_argument( "--amp-fp16", action="store_true", default=False, help="Enables FP16 training with Apex.") args = parser.parse_args() args.cuda = not args.no_cuda and torch.cuda.is_available() hvd.init() if args.cuda: # Horovod: pin GPU to local rank. torch.cuda.set_device(hvd.local_rank()) cudnn.benchmark = True # Set up standard model. model = getattr(models, args.model)() if args.cuda: # Move model to GPU. model.cuda() optimizer = optim.SGD(model.parameters(), lr=0.01) # Horovod: (optional) compression algorithm. compression = (hvd.Compression.fp16 if args.fp16_allreduce else hvd.Compression.none) # Horovod: wrap optimizer with DistributedOptimizer. optimizer = hvd.DistributedOptimizer( optimizer, named_parameters=model.named_parameters(), compression=compression) # Horovod: broadcast parameters & optimizer state. hvd.broadcast_parameters(model.state_dict(), root_rank=0) hvd.broadcast_optimizer_state(optimizer, root_rank=0) # Apex if args.amp_fp16: model, optimizer = amp.initialize(model, optimizer, opt_level="O1") # Set up fixed fake data data = torch.randn(args.batch_size, 3, 224, 224) target = torch.LongTensor(args.batch_size).random_() % 1000 if args.cuda: data, target = data.cuda(), target.cuda() def benchmark_step(): optimizer.zero_grad() output = model(data) loss = F.cross_entropy(output, target) # Apex if args.amp_fp16: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() optimizer.synchronize() with optimizer.skip_synchronize(): optimizer.step() else: loss.backward() optimizer.step() def log(s, nl=True): if hvd.rank() != 0: return print(s, end="\n" if nl else "") log(f"Model: {args.model}") log("Batch size: %d" % args.batch_size) device = "GPU" if args.cuda else "CPU" log("Number of %ss: %d" % (device, hvd.size())) # Warm-up log("Running warmup...") timeit.timeit(benchmark_step, number=args.num_warmup_batches) # Benchmark log("Running benchmark...") img_secs = [] for x in range(args.num_iters): time = timeit.timeit(benchmark_step, number=args.num_batches_per_iter) img_sec = args.batch_size * args.num_batches_per_iter / time log("Iter #%d: %.1f img/sec per %s" % (x, img_sec, device)) img_secs.append(img_sec) # Results img_sec_mean = np.mean(img_secs) img_sec_conf = 1.96 * np.std(img_secs) log(f"Img/sec per {device}: {img_sec_mean:.1f} +-{img_sec_conf:.1f}") log("Total img/sec on %d %s(s): %.1f +-%.1f" % (hvd.size(), device, hvd.size() * img_sec_mean, hvd.size() * img_sec_conf))
apache-2.0
LLNL/spack
var/spack/repos/builtin/packages/r-rtracklayer/package.py
5
2885
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class RRtracklayer(RPackage): """R interface to genome annotation files and the UCSC genome browser. Extensible framework for interacting with multiple genome browsers (currently UCSC built-in) and manipulating annotation tracks in various formats (currently GFF, BED, bedGraph, BED15, WIG, BigWig and 2bit built-in). The user may export/import tracks to/from the supported browsers, as well as query and modify the browser state, such as the current viewport.""" homepage = "https://bioconductor.org/packages/rtracklayer" git = "https://git.bioconductor.org/packages/rtracklayer.git" version('1.44.4', commit='aec96e85daf53b5c5eb2e89250d2755352be4de3') version('1.42.2', commit='76702f671faea736807d54aeecfbadcd152d94c5') version('1.40.6', commit='ba9a6e711504a702147383bc7abfcc36eb304df7') version('1.38.3', commit='f20db703c09dc7e808c09e9b78c15aec9e546248') version('1.36.6', commit='8c0ac7230f94e0c5a981acbb178c8de70e968131') depends_on('r@3.3:', type=('build', 'run')) depends_on('r-genomicranges@1.21.20:', type=('build', 'run')) depends_on('r-xml@1.98-0:', type=('build', 'run')) depends_on('r-biocgenerics@0.13.8:', type=('build', 'run')) depends_on('r-s4vectors@0.13.13:', type=('build', 'run')) depends_on('r-iranges@2.3.7:', type=('build', 'run')) depends_on('r-xvector@0.9.4:', type=('build', 'run')) depends_on('r-genomeinfodb@1.3.14:', type=('build', 'run')) depends_on('r-biostrings@2.43.7:', type=('build', 'run')) depends_on('r-zlibbioc', type=('build', 'run')) depends_on('r-rcurl@1.4-2:', type=('build', 'run')) depends_on('r-rsamtools@1.17.8:', type=('build', 'run')) depends_on('r-genomicalignments@1.5.4:', type=('build', 'run')) depends_on('r-iranges@2.11.12:', when='@1.38.3:', type=('build', 'run')) depends_on('r-genomicranges@1.31.8:', when='@1.40.6:', type=('build', 'run')) depends_on('r-biocgenerics@0.25.1:', when='@1.40.6:', type=('build', 'run')) depends_on('r-s4vectors@0.17.25:', when='@1.40.6:', type=('build', 'run')) depends_on('r-iranges@2.13.13:', when='@1.40.6:', type=('build', 'run')) depends_on('r-xvector@0.19.7:', when='@1.40.6:', type=('build', 'run')) depends_on('r-genomeinfodb@1.15.2:', when='@1.40.6:', type=('build', 'run')) depends_on('r-biostrings@2.47.6:', when='@1.40.6:', type=('build', 'run')) depends_on('r-rsamtools@1.31.2:', when='@1.40.6:', type=('build', 'run')) depends_on('r-genomicalignments@1.15.6:', when='@1.40.6:', type=('build', 'run')) depends_on('r-s4vectors@0.19.22:', when='@1.42.2:', type=('build', 'run'))
lgpl-2.1
brandonPurvis/osf.io
scripts/find_nested_projects.py
64
1517
#!/usr/bin/env python # -*- coding: utf-8 -*- """Helper to get a list of all projects that are nested within another project.""" from website.project.model import Node from modularodm import Q from tests.base import OsfTestCase from tests.factories import ProjectFactory def find_nested_projects(): return Node.find( Q('__backrefs.parent.node.nodes.0', 'exists', True) & Q('category', 'eq', 'project') & Q('is_deleted', 'eq', False) ) #return [node for node in Node.find() #if node.category == 'project' #and node.parent_node is not None] class TestFindNestedProjects(OsfTestCase): def test_find_nested(self): project =ProjectFactory.build() nested_project = ProjectFactory() project.nodes.append(nested_project) project.save() result = find_nested_projects() assert nested_project in result assert project not in result def test_unnested_project(self): project = ProjectFactory() assert project not in find_nested_projects() def test_deleted_projects_excluded(self): project = ProjectFactory.build() deleted = ProjectFactory(is_deleted=True) project.nodes.append(deleted) project.save() result = find_nested_projects() assert deleted not in result def main(): result = find_nested_projects() print('Number of nested projects: {0}'.format(len(results))) if __name__ == '__main__': main()
apache-2.0
alexmorozov/django
django/core/serializers/pyyaml.py
439
2843
""" YAML serializer. Requires PyYaml (http://pyyaml.org/), but that's checked for in __init__. """ import collections import decimal import sys from io import StringIO import yaml from django.core.serializers.base import DeserializationError from django.core.serializers.python import ( Deserializer as PythonDeserializer, Serializer as PythonSerializer, ) from django.db import models from django.utils import six # Use the C (faster) implementation if possible try: from yaml import CSafeLoader as SafeLoader from yaml import CSafeDumper as SafeDumper except ImportError: from yaml import SafeLoader, SafeDumper class DjangoSafeDumper(SafeDumper): def represent_decimal(self, data): return self.represent_scalar('tag:yaml.org,2002:str', str(data)) def represent_ordered_dict(self, data): return self.represent_mapping('tag:yaml.org,2002:map', data.items()) DjangoSafeDumper.add_representer(decimal.Decimal, DjangoSafeDumper.represent_decimal) DjangoSafeDumper.add_representer(collections.OrderedDict, DjangoSafeDumper.represent_ordered_dict) class Serializer(PythonSerializer): """ Convert a queryset to YAML. """ internal_use_only = False def handle_field(self, obj, field): # A nasty special case: base YAML doesn't support serialization of time # types (as opposed to dates or datetimes, which it does support). Since # we want to use the "safe" serializer for better interoperability, we # need to do something with those pesky times. Converting 'em to strings # isn't perfect, but it's better than a "!!python/time" type which would # halt deserialization under any other language. if isinstance(field, models.TimeField) and getattr(obj, field.name) is not None: self._current[field.name] = str(getattr(obj, field.name)) else: super(Serializer, self).handle_field(obj, field) def end_serialization(self): yaml.dump(self.objects, self.stream, Dumper=DjangoSafeDumper, **self.options) def getvalue(self): # Grand-parent super return super(PythonSerializer, self).getvalue() def Deserializer(stream_or_string, **options): """ Deserialize a stream or string of YAML data. """ if isinstance(stream_or_string, bytes): stream_or_string = stream_or_string.decode('utf-8') if isinstance(stream_or_string, six.string_types): stream = StringIO(stream_or_string) else: stream = stream_or_string try: for obj in PythonDeserializer(yaml.load(stream, Loader=SafeLoader), **options): yield obj except GeneratorExit: raise except Exception as e: # Map to deserializer error six.reraise(DeserializationError, DeserializationError(e), sys.exc_info()[2])
bsd-3-clause
klusta-team/kwiklib
kwiklib/utils/logger.py
1
5194
"""Logger utility classes and functions.""" # ----------------------------------------------------------------------------- # Imports # ----------------------------------------------------------------------------- import os import sys import logging import traceback # ----------------------------------------------------------------------------- # Utility functions # ----------------------------------------------------------------------------- def get_log_format(debug=False): return '%(asctime)s %(message)s' def get_caller(): tb = traceback.extract_stack()[-5] module = os.path.splitext(os.path.basename(tb[0]))[0] line = str(tb[1]) caller = "{0:s}:{1:s}".format(module, line) return caller.ljust(24) def get_var_info(name, var): return name + ": Type = " + str(type(var)) + ", Value = " + str(var) def debugvar(name, var): debug(get_var_info(name, var)) # ----------------------------------------------------------------------------- # Stream classes # ----------------------------------------------------------------------------- class StringStream(object): """Logger stream used to store all logs in a string.""" def __init__(self): self.string = "" def write(self, line): self.string += line def flush(self): pass def __repr__(self): return self.string # ----------------------------------------------------------------------------- # Logging classes # ----------------------------------------------------------------------------- class Logger(object): """Save logging information to a stream.""" def __init__(self, fmt=None, stream=None, level=None, name=None, print_caller=True, handler=None): if stream is None: stream = sys.stdout self.name = name self.print_caller = print_caller if handler is None: self.stream = stream self.handler = logging.StreamHandler(self.stream) else: self.handler = handler self.level = level self.fmt = fmt # Set the level and corresponding formatter. self.set_level(level, fmt) def set_level(self, level=None, fmt=None): # Default level and format. if level is None: level = self.level or logging.INFO if fmt is None: fmt = self.fmt or get_log_format(level == logging.DEBUG) # Create the Logger object. self._logger = logging.getLogger(self.name) # Create the formatter. formatter = logging.Formatter(fmt, datefmt='%Y-%m-%d %H:%M:%S') self.handler.setFormatter(formatter) # Configure the logger. self._logger.setLevel(level) self._logger.propagate = False self._logger.addHandler(self.handler) def get_message(self, msg): msg = str(msg) if self.print_caller: return get_caller() + msg else: return msg def debug(self, msg): self._logger.debug(self.get_message(msg)) def info(self, msg): self._logger.info(self.get_message(msg)) def warn(self, msg): self._logger.warn(self.get_message(msg)) def exception(self, msg): self._logger.exception(self.get_message(msg)) class StringLogger(Logger): def __init__(self, **kwargs): kwargs['stream'] = StringStream() super(StringLogger, self).__init__(**kwargs) def __repr__(self): return self.stream.__repr__() class ConsoleLogger(Logger): def __init__(self, **kwargs): kwargs['stream'] = sys.stdout super(ConsoleLogger, self).__init__(**kwargs) class FileLogger(Logger): def __init__(self, filename=None, **kwargs): kwargs['handler'] = logging.FileHandler(filename) super(FileLogger, self).__init__(**kwargs) def close(self): self.handler.close() self._logger.removeHandler(self.handler) del self.handler del self._logger # ----------------------------------------------------------------------------- # Global variables # ----------------------------------------------------------------------------- LOGGERS = {} def register(logger): name = logger.name if name not in LOGGERS: LOGGERS[name] = logger def unregister(logger): name = logger.name if name in LOGGERS: LOGGERS[name].close() del LOGGERS[name] def debug(msg): for name, logger in LOGGERS.iteritems(): logger.debug(msg) def info(msg): for name, logger in LOGGERS.iteritems(): logger.info(msg) def warn(msg): for name, logger in LOGGERS.iteritems(): logger.warn(msg) def exception(msg): for name, logger in LOGGERS.iteritems(): logger.exception(msg) def set_level(msg): for name, logger in LOGGERS.iteritems(): logger.set_level(msg) # Capture all exceptions. def handle_exception(exc_type, exc_value, exc_traceback): msg = "".join(traceback.format_exception(exc_type, exc_value, exc_traceback)) if msg: exception(msg)
bsd-3-clause
kawamon/hue
desktop/core/ext-py/pytest-4.6.11/testing/test_pdb.py
3
38942
# -*- coding: utf-8 -*- from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import sys import six import _pytest._code import pytest from _pytest.debugging import _validate_usepdb_cls try: breakpoint except NameError: SUPPORTS_BREAKPOINT_BUILTIN = False else: SUPPORTS_BREAKPOINT_BUILTIN = True _ENVIRON_PYTHONBREAKPOINT = os.environ.get("PYTHONBREAKPOINT", "") def runpdb_and_get_report(testdir, source): p = testdir.makepyfile(source) result = testdir.runpytest_inprocess("--pdb", p) reports = result.reprec.getreports("pytest_runtest_logreport") assert len(reports) == 3, reports # setup/call/teardown return reports[1] @pytest.fixture def custom_pdb_calls(): called = [] # install dummy debugger class and track which methods were called on it class _CustomPdb(object): quitting = False def __init__(self, *args, **kwargs): called.append("init") def reset(self): called.append("reset") def interaction(self, *args): called.append("interaction") _pytest._CustomPdb = _CustomPdb return called @pytest.fixture def custom_debugger_hook(): called = [] # install dummy debugger class and track which methods were called on it class _CustomDebugger(object): def __init__(self, *args, **kwargs): called.append("init") def reset(self): called.append("reset") def interaction(self, *args): called.append("interaction") def set_trace(self, frame): print("**CustomDebugger**") called.append("set_trace") _pytest._CustomDebugger = _CustomDebugger yield called del _pytest._CustomDebugger class TestPDB(object): @pytest.fixture def pdblist(self, request): monkeypatch = request.getfixturevalue("monkeypatch") pdblist = [] def mypdb(*args): pdblist.append(args) plugin = request.config.pluginmanager.getplugin("debugging") monkeypatch.setattr(plugin, "post_mortem", mypdb) return pdblist def test_pdb_on_fail(self, testdir, pdblist): rep = runpdb_and_get_report( testdir, """ def test_func(): assert 0 """, ) assert rep.failed assert len(pdblist) == 1 tb = _pytest._code.Traceback(pdblist[0][0]) assert tb[-1].name == "test_func" def test_pdb_on_xfail(self, testdir, pdblist): rep = runpdb_and_get_report( testdir, """ import pytest @pytest.mark.xfail def test_func(): assert 0 """, ) assert "xfail" in rep.keywords assert not pdblist def test_pdb_on_skip(self, testdir, pdblist): rep = runpdb_and_get_report( testdir, """ import pytest def test_func(): pytest.skip("hello") """, ) assert rep.skipped assert len(pdblist) == 0 def test_pdb_on_BdbQuit(self, testdir, pdblist): rep = runpdb_and_get_report( testdir, """ import bdb def test_func(): raise bdb.BdbQuit """, ) assert rep.failed assert len(pdblist) == 0 def test_pdb_on_KeyboardInterrupt(self, testdir, pdblist): rep = runpdb_and_get_report( testdir, """ def test_func(): raise KeyboardInterrupt """, ) assert rep.failed assert len(pdblist) == 1 @staticmethod def flush(child): if child.isalive(): # Read if the test has not (e.g. test_pdb_unittest_skip). child.read() child.wait() assert not child.isalive() def test_pdb_unittest_postmortem(self, testdir): p1 = testdir.makepyfile( """ import unittest class Blub(unittest.TestCase): def tearDown(self): self.filename = None def test_false(self): self.filename = 'debug' + '.me' assert 0 """ ) child = testdir.spawn_pytest("--pdb %s" % p1) child.expect("Pdb") child.sendline("p self.filename") child.sendeof() rest = child.read().decode("utf8") assert "debug.me" in rest self.flush(child) def test_pdb_unittest_skip(self, testdir): """Test for issue #2137""" p1 = testdir.makepyfile( """ import unittest @unittest.skipIf(True, 'Skipping also with pdb active') class MyTestCase(unittest.TestCase): def test_one(self): assert 0 """ ) child = testdir.spawn_pytest("-rs --pdb %s" % p1) child.expect("Skipping also with pdb active") child.expect("1 skipped in") child.sendeof() self.flush(child) def test_pdb_print_captured_stdout_and_stderr(self, testdir): p1 = testdir.makepyfile( """ def test_1(): import sys sys.stderr.write("get\\x20rekt") print("get\\x20rekt") assert False def test_not_called_due_to_quit(): pass """ ) child = testdir.spawn_pytest("--pdb %s" % p1) child.expect("captured stdout") child.expect("get rekt") child.expect("captured stderr") child.expect("get rekt") child.expect("traceback") child.expect("def test_1") child.expect("Pdb") child.sendeof() rest = child.read().decode("utf8") assert "Exit: Quitting debugger" in rest assert "= 1 failed in" in rest assert "def test_1" not in rest assert "get rekt" not in rest self.flush(child) def test_pdb_dont_print_empty_captured_stdout_and_stderr(self, testdir): p1 = testdir.makepyfile( """ def test_1(): assert False """ ) child = testdir.spawn_pytest("--pdb %s" % p1) child.expect("Pdb") output = child.before.decode("utf8") child.sendeof() assert "captured stdout" not in output assert "captured stderr" not in output self.flush(child) @pytest.mark.parametrize("showcapture", ["all", "no", "log"]) def test_pdb_print_captured_logs(self, testdir, showcapture): p1 = testdir.makepyfile( """ def test_1(): import logging logging.warn("get " + "rekt") assert False """ ) child = testdir.spawn_pytest( "--show-capture={} --pdb {}".format(showcapture, p1) ) if showcapture in ("all", "log"): child.expect("captured log") child.expect("get rekt") child.expect("Pdb") child.sendeof() rest = child.read().decode("utf8") assert "1 failed" in rest self.flush(child) def test_pdb_print_captured_logs_nologging(self, testdir): p1 = testdir.makepyfile( """ def test_1(): import logging logging.warn("get " + "rekt") assert False """ ) child = testdir.spawn_pytest("--show-capture=all --pdb -p no:logging %s" % p1) child.expect("get rekt") output = child.before.decode("utf8") assert "captured log" not in output child.expect("Pdb") child.sendeof() rest = child.read().decode("utf8") assert "1 failed" in rest self.flush(child) def test_pdb_interaction_exception(self, testdir): p1 = testdir.makepyfile( """ import pytest def globalfunc(): pass def test_1(): pytest.raises(ValueError, globalfunc) """ ) child = testdir.spawn_pytest("--pdb %s" % p1) child.expect(".*def test_1") child.expect(".*pytest.raises.*globalfunc") child.expect("Pdb") child.sendline("globalfunc") child.expect(".*function") child.sendeof() child.expect("1 failed") self.flush(child) def test_pdb_interaction_on_collection_issue181(self, testdir): p1 = testdir.makepyfile( """ import pytest xxx """ ) child = testdir.spawn_pytest("--pdb %s" % p1) # child.expect(".*import pytest.*") child.expect("Pdb") child.sendline("c") child.expect("1 error") self.flush(child) def test_pdb_interaction_on_internal_error(self, testdir): testdir.makeconftest( """ def pytest_runtest_protocol(): 0/0 """ ) p1 = testdir.makepyfile("def test_func(): pass") child = testdir.spawn_pytest("--pdb %s" % p1) child.expect("Pdb") # INTERNALERROR is only displayed once via terminal reporter. assert ( len( [ x for x in child.before.decode().splitlines() if x.startswith("INTERNALERROR> Traceback") ] ) == 1 ) child.sendeof() self.flush(child) def test_pdb_interaction_capturing_simple(self, testdir): p1 = testdir.makepyfile( """ import pytest def test_1(): i = 0 print("hello17") pytest.set_trace() i == 1 assert 0 """ ) child = testdir.spawn_pytest(str(p1)) child.expect(r"test_1\(\)") child.expect("i == 1") child.expect("Pdb") child.sendline("c") rest = child.read().decode("utf-8") assert "AssertionError" in rest assert "1 failed" in rest assert "def test_1" in rest assert "hello17" in rest # out is captured self.flush(child) def test_pdb_set_trace_kwargs(self, testdir): p1 = testdir.makepyfile( """ import pytest def test_1(): i = 0 print("hello17") pytest.set_trace(header="== my_header ==") x = 3 assert 0 """ ) child = testdir.spawn_pytest(str(p1)) child.expect("== my_header ==") assert "PDB set_trace" not in child.before.decode() child.expect("Pdb") child.sendline("c") rest = child.read().decode("utf-8") assert "1 failed" in rest assert "def test_1" in rest assert "hello17" in rest # out is captured self.flush(child) def test_pdb_set_trace_interception(self, testdir): p1 = testdir.makepyfile( """ import pdb def test_1(): pdb.set_trace() """ ) child = testdir.spawn_pytest(str(p1)) child.expect("test_1") child.expect("Pdb") child.sendline("q") rest = child.read().decode("utf8") assert "no tests ran" in rest assert "reading from stdin while output" not in rest assert "BdbQuit" not in rest self.flush(child) def test_pdb_and_capsys(self, testdir): p1 = testdir.makepyfile( """ import pytest def test_1(capsys): print("hello1") pytest.set_trace() """ ) child = testdir.spawn_pytest(str(p1)) child.expect("test_1") child.send("capsys.readouterr()\n") child.expect("hello1") child.sendeof() child.read() self.flush(child) def test_pdb_with_caplog_on_pdb_invocation(self, testdir): p1 = testdir.makepyfile( """ def test_1(capsys, caplog): import logging logging.getLogger(__name__).warning("some_warning") assert 0 """ ) child = testdir.spawn_pytest("--pdb %s" % str(p1)) child.send("caplog.record_tuples\n") child.expect_exact( "[('test_pdb_with_caplog_on_pdb_invocation', 30, 'some_warning')]" ) child.sendeof() child.read() self.flush(child) def test_set_trace_capturing_afterwards(self, testdir): p1 = testdir.makepyfile( """ import pdb def test_1(): pdb.set_trace() def test_2(): print("hello") assert 0 """ ) child = testdir.spawn_pytest(str(p1)) child.expect("test_1") child.send("c\n") child.expect("test_2") child.expect("Captured") child.expect("hello") child.sendeof() child.read() self.flush(child) def test_pdb_interaction_doctest(self, testdir, monkeypatch): p1 = testdir.makepyfile( """ import pytest def function_1(): ''' >>> i = 0 >>> assert i == 1 ''' """ ) child = testdir.spawn_pytest("--doctest-modules --pdb %s" % p1) child.expect("Pdb") assert "UNEXPECTED EXCEPTION: AssertionError()" in child.before.decode("utf8") child.sendline("'i=%i.' % i") child.expect("Pdb") assert "\r\n'i=0.'\r\n" in child.before.decode("utf8") child.sendeof() rest = child.read().decode("utf8") assert "1 failed" in rest self.flush(child) def test_pdb_interaction_capturing_twice(self, testdir): p1 = testdir.makepyfile( """ import pytest def test_1(): i = 0 print("hello17") pytest.set_trace() x = 3 print("hello18") pytest.set_trace() x = 4 assert 0 """ ) child = testdir.spawn_pytest(str(p1)) child.expect(r"PDB set_trace \(IO-capturing turned off\)") child.expect("test_1") child.expect("x = 3") child.expect("Pdb") child.sendline("c") child.expect(r"PDB continue \(IO-capturing resumed\)") child.expect(r"PDB set_trace \(IO-capturing turned off\)") child.expect("x = 4") child.expect("Pdb") child.sendline("c") child.expect("_ test_1 _") child.expect("def test_1") rest = child.read().decode("utf8") assert "Captured stdout call" in rest assert "hello17" in rest # out is captured assert "hello18" in rest # out is captured assert "1 failed" in rest self.flush(child) def test_pdb_with_injected_do_debug(self, testdir): """Simulates pdbpp, which injects Pdb into do_debug, and uses self.__class__ in do_continue. """ p1 = testdir.makepyfile( mytest=""" import pdb import pytest count_continue = 0 class CustomPdb(pdb.Pdb, object): def do_debug(self, arg): import sys import types if sys.version_info < (3, ): do_debug_func = pdb.Pdb.do_debug.im_func else: do_debug_func = pdb.Pdb.do_debug newglobals = do_debug_func.__globals__.copy() newglobals['Pdb'] = self.__class__ orig_do_debug = types.FunctionType( do_debug_func.__code__, newglobals, do_debug_func.__name__, do_debug_func.__defaults__, ) return orig_do_debug(self, arg) do_debug.__doc__ = pdb.Pdb.do_debug.__doc__ def do_continue(self, *args, **kwargs): global count_continue count_continue += 1 return super(CustomPdb, self).do_continue(*args, **kwargs) def foo(): print("print_from_foo") def test_1(): i = 0 print("hello17") pytest.set_trace() x = 3 print("hello18") assert count_continue == 2, "unexpected_failure: %d != 2" % count_continue pytest.fail("expected_failure") """ ) child = testdir.spawn_pytest("--pdbcls=mytest:CustomPdb %s" % str(p1)) child.expect(r"PDB set_trace \(IO-capturing turned off\)") child.expect(r"\n\(Pdb") child.sendline("debug foo()") child.expect("ENTERING RECURSIVE DEBUGGER") child.expect(r"\n\(\(Pdb") child.sendline("c") child.expect("LEAVING RECURSIVE DEBUGGER") assert b"PDB continue" not in child.before # No extra newline. assert child.before.endswith(b"c\r\nprint_from_foo\r\n") # set_debug should not raise outcomes.Exit, if used recrursively. child.sendline("debug 42") child.sendline("q") child.expect("LEAVING RECURSIVE DEBUGGER") assert b"ENTERING RECURSIVE DEBUGGER" in child.before assert b"Quitting debugger" not in child.before child.sendline("c") child.expect(r"PDB continue \(IO-capturing resumed\)") rest = child.read().decode("utf8") assert "hello17" in rest # out is captured assert "hello18" in rest # out is captured assert "1 failed" in rest assert "Failed: expected_failure" in rest assert "AssertionError: unexpected_failure" not in rest self.flush(child) def test_pdb_without_capture(self, testdir): p1 = testdir.makepyfile( """ import pytest def test_1(): pytest.set_trace() """ ) child = testdir.spawn_pytest("-s %s" % p1) child.expect(r">>> PDB set_trace >>>") child.expect("Pdb") child.sendline("c") child.expect(r">>> PDB continue >>>") child.expect("1 passed") self.flush(child) @pytest.mark.parametrize("capture_arg", ("", "-s", "-p no:capture")) def test_pdb_continue_with_recursive_debug(self, capture_arg, testdir): """Full coverage for do_debug without capturing. This is very similar to test_pdb_interaction_continue_recursive in general, but mocks out ``pdb.set_trace`` for providing more coverage. """ p1 = testdir.makepyfile( """ try: input = raw_input except NameError: pass def set_trace(): __import__('pdb').set_trace() def test_1(monkeypatch): import _pytest.debugging class pytestPDBTest(_pytest.debugging.pytestPDB): @classmethod def set_trace(cls, *args, **kwargs): # Init PytestPdbWrapper to handle capturing. _pdb = cls._init_pdb("set_trace", *args, **kwargs) # Mock out pdb.Pdb.do_continue. import pdb pdb.Pdb.do_continue = lambda self, arg: None print("===" + " SET_TRACE ===") assert input() == "debug set_trace()" # Simulate PytestPdbWrapper.do_debug cls._recursive_debug += 1 print("ENTERING RECURSIVE DEBUGGER") print("===" + " SET_TRACE_2 ===") assert input() == "c" _pdb.do_continue("") print("===" + " SET_TRACE_3 ===") # Simulate PytestPdbWrapper.do_debug print("LEAVING RECURSIVE DEBUGGER") cls._recursive_debug -= 1 print("===" + " SET_TRACE_4 ===") assert input() == "c" _pdb.do_continue("") def do_continue(self, arg): print("=== do_continue") monkeypatch.setattr(_pytest.debugging, "pytestPDB", pytestPDBTest) import pdb monkeypatch.setattr(pdb, "set_trace", pytestPDBTest.set_trace) set_trace() """ ) child = testdir.spawn_pytest("--tb=short %s %s" % (p1, capture_arg)) child.expect("=== SET_TRACE ===") before = child.before.decode("utf8") if not capture_arg: assert ">>> PDB set_trace (IO-capturing turned off) >>>" in before else: assert ">>> PDB set_trace >>>" in before child.sendline("debug set_trace()") child.expect("=== SET_TRACE_2 ===") before = child.before.decode("utf8") assert "\r\nENTERING RECURSIVE DEBUGGER\r\n" in before child.sendline("c") child.expect("=== SET_TRACE_3 ===") # No continue message with recursive debugging. before = child.before.decode("utf8") assert ">>> PDB continue " not in before child.sendline("c") child.expect("=== SET_TRACE_4 ===") before = child.before.decode("utf8") assert "\r\nLEAVING RECURSIVE DEBUGGER\r\n" in before child.sendline("c") rest = child.read().decode("utf8") if not capture_arg: assert "> PDB continue (IO-capturing resumed) >" in rest else: assert "> PDB continue >" in rest assert "1 passed in" in rest def test_pdb_used_outside_test(self, testdir): p1 = testdir.makepyfile( """ import pytest pytest.set_trace() x = 5 """ ) child = testdir.spawn("{} {}".format(sys.executable, p1)) child.expect("x = 5") child.expect("Pdb") child.sendeof() self.flush(child) def test_pdb_used_in_generate_tests(self, testdir): p1 = testdir.makepyfile( """ import pytest def pytest_generate_tests(metafunc): pytest.set_trace() x = 5 def test_foo(a): pass """ ) child = testdir.spawn_pytest(str(p1)) child.expect("x = 5") child.expect("Pdb") child.sendeof() self.flush(child) def test_pdb_collection_failure_is_shown(self, testdir): p1 = testdir.makepyfile("xxx") result = testdir.runpytest_subprocess("--pdb", p1) result.stdout.fnmatch_lines( ["E NameError: *xxx*", "*! *Exit: Quitting debugger !*"] # due to EOF ) @pytest.mark.parametrize("post_mortem", (False, True)) def test_enter_leave_pdb_hooks_are_called(self, post_mortem, testdir): testdir.makeconftest( """ mypdb = None def pytest_configure(config): config.testing_verification = 'configured' def pytest_enter_pdb(config, pdb): assert config.testing_verification == 'configured' print('enter_pdb_hook') global mypdb mypdb = pdb mypdb.set_attribute = "bar" def pytest_leave_pdb(config, pdb): assert config.testing_verification == 'configured' print('leave_pdb_hook') global mypdb assert mypdb is pdb assert mypdb.set_attribute == "bar" """ ) p1 = testdir.makepyfile( """ import pytest def test_set_trace(): pytest.set_trace() assert 0 def test_post_mortem(): assert 0 """ ) if post_mortem: child = testdir.spawn_pytest(str(p1) + " --pdb -s -k test_post_mortem") else: child = testdir.spawn_pytest(str(p1) + " -k test_set_trace") child.expect("enter_pdb_hook") child.sendline("c") if post_mortem: child.expect(r"PDB continue") else: child.expect(r"PDB continue \(IO-capturing resumed\)") child.expect("Captured stdout call") rest = child.read().decode("utf8") assert "leave_pdb_hook" in rest assert "1 failed" in rest self.flush(child) def test_pdb_custom_cls(self, testdir, custom_pdb_calls): p1 = testdir.makepyfile("""xxx """) result = testdir.runpytest_inprocess("--pdb", "--pdbcls=_pytest:_CustomPdb", p1) result.stdout.fnmatch_lines(["*NameError*xxx*", "*1 error*"]) assert custom_pdb_calls == ["init", "reset", "interaction"] def test_pdb_custom_cls_invalid(self, testdir): result = testdir.runpytest_inprocess("--pdbcls=invalid") result.stderr.fnmatch_lines( [ "*: error: argument --pdbcls: 'invalid' is not in the format 'modname:classname'" ] ) def test_pdb_validate_usepdb_cls(self, testdir): assert _validate_usepdb_cls("os.path:dirname.__name__") == ( "os.path", "dirname.__name__", ) assert _validate_usepdb_cls("pdb:DoesNotExist") == ("pdb", "DoesNotExist") def test_pdb_custom_cls_without_pdb(self, testdir, custom_pdb_calls): p1 = testdir.makepyfile("""xxx """) result = testdir.runpytest_inprocess("--pdbcls=_pytest:_CustomPdb", p1) result.stdout.fnmatch_lines(["*NameError*xxx*", "*1 error*"]) assert custom_pdb_calls == [] def test_pdb_custom_cls_with_set_trace(self, testdir, monkeypatch): testdir.makepyfile( custom_pdb=""" class CustomPdb(object): def __init__(self, *args, **kwargs): skip = kwargs.pop("skip") assert skip == ["foo.*"] print("__init__") super(CustomPdb, self).__init__(*args, **kwargs) def set_trace(*args, **kwargs): print('custom set_trace>') """ ) p1 = testdir.makepyfile( """ import pytest def test_foo(): pytest.set_trace(skip=['foo.*']) """ ) monkeypatch.setenv("PYTHONPATH", str(testdir.tmpdir)) child = testdir.spawn_pytest("--pdbcls=custom_pdb:CustomPdb %s" % str(p1)) child.expect("__init__") child.expect("custom set_trace>") self.flush(child) class TestDebuggingBreakpoints(object): def test_supports_breakpoint_module_global(self): """ Test that supports breakpoint global marks on Python 3.7+ and not on CPython 3.5, 2.7 """ if sys.version_info.major == 3 and sys.version_info.minor >= 7: assert SUPPORTS_BREAKPOINT_BUILTIN is True if sys.version_info.major == 3 and sys.version_info.minor == 5: assert SUPPORTS_BREAKPOINT_BUILTIN is False if sys.version_info.major == 2 and sys.version_info.minor == 7: assert SUPPORTS_BREAKPOINT_BUILTIN is False @pytest.mark.skipif( not SUPPORTS_BREAKPOINT_BUILTIN, reason="Requires breakpoint() builtin" ) @pytest.mark.parametrize("arg", ["--pdb", ""]) def test_sys_breakpointhook_configure_and_unconfigure(self, testdir, arg): """ Test that sys.breakpointhook is set to the custom Pdb class once configured, test that hook is reset to system value once pytest has been unconfigured """ testdir.makeconftest( """ import sys from pytest import hookimpl from _pytest.debugging import pytestPDB def pytest_configure(config): config._cleanup.append(check_restored) def check_restored(): assert sys.breakpointhook == sys.__breakpointhook__ def test_check(): assert sys.breakpointhook == pytestPDB.set_trace """ ) testdir.makepyfile( """ def test_nothing(): pass """ ) args = (arg,) if arg else () result = testdir.runpytest_subprocess(*args) result.stdout.fnmatch_lines(["*1 passed in *"]) @pytest.mark.skipif( not SUPPORTS_BREAKPOINT_BUILTIN, reason="Requires breakpoint() builtin" ) def test_pdb_custom_cls(self, testdir, custom_debugger_hook): p1 = testdir.makepyfile( """ def test_nothing(): breakpoint() """ ) result = testdir.runpytest_inprocess( "--pdb", "--pdbcls=_pytest:_CustomDebugger", p1 ) result.stdout.fnmatch_lines(["*CustomDebugger*", "*1 passed*"]) assert custom_debugger_hook == ["init", "set_trace"] @pytest.mark.parametrize("arg", ["--pdb", ""]) @pytest.mark.skipif( not SUPPORTS_BREAKPOINT_BUILTIN, reason="Requires breakpoint() builtin" ) def test_environ_custom_class(self, testdir, custom_debugger_hook, arg): testdir.makeconftest( """ import os import sys os.environ['PYTHONBREAKPOINT'] = '_pytest._CustomDebugger.set_trace' def pytest_configure(config): config._cleanup.append(check_restored) def check_restored(): assert sys.breakpointhook == sys.__breakpointhook__ def test_check(): import _pytest assert sys.breakpointhook is _pytest._CustomDebugger.set_trace """ ) testdir.makepyfile( """ def test_nothing(): pass """ ) args = (arg,) if arg else () result = testdir.runpytest_subprocess(*args) result.stdout.fnmatch_lines(["*1 passed in *"]) @pytest.mark.skipif( not SUPPORTS_BREAKPOINT_BUILTIN, reason="Requires breakpoint() builtin" ) @pytest.mark.skipif( not _ENVIRON_PYTHONBREAKPOINT == "", reason="Requires breakpoint() default value", ) def test_sys_breakpoint_interception(self, testdir): p1 = testdir.makepyfile( """ def test_1(): breakpoint() """ ) child = testdir.spawn_pytest(str(p1)) child.expect("test_1") child.expect("Pdb") child.sendline("quit") rest = child.read().decode("utf8") assert "Quitting debugger" in rest assert "reading from stdin while output" not in rest TestPDB.flush(child) @pytest.mark.skipif( not SUPPORTS_BREAKPOINT_BUILTIN, reason="Requires breakpoint() builtin" ) def test_pdb_not_altered(self, testdir): p1 = testdir.makepyfile( """ import pdb def test_1(): pdb.set_trace() assert 0 """ ) child = testdir.spawn_pytest(str(p1)) child.expect("test_1") child.expect("Pdb") child.sendline("c") rest = child.read().decode("utf8") assert "1 failed" in rest assert "reading from stdin while output" not in rest TestPDB.flush(child) class TestTraceOption: def test_trace_sets_breakpoint(self, testdir): p1 = testdir.makepyfile( """ def test_1(): assert True def test_2(): pass def test_3(): pass """ ) child = testdir.spawn_pytest("--trace " + str(p1)) child.expect("test_1") child.expect("Pdb") child.sendline("c") child.expect("test_2") child.expect("Pdb") child.sendline("c") child.expect("test_3") child.expect("Pdb") child.sendline("q") child.expect_exact("Exit: Quitting debugger") rest = child.read().decode("utf8") assert "2 passed in" in rest assert "reading from stdin while output" not in rest # Only printed once - not on stderr. assert "Exit: Quitting debugger" not in child.before.decode("utf8") TestPDB.flush(child) def test_trace_after_runpytest(testdir): """Test that debugging's pytest_configure is re-entrant.""" p1 = testdir.makepyfile( """ from _pytest.debugging import pytestPDB def test_outer(testdir): assert len(pytestPDB._saved) == 1 testdir.makepyfile( \""" from _pytest.debugging import pytestPDB def test_inner(): assert len(pytestPDB._saved) == 2 print() print("test_inner_" + "end") \""" ) result = testdir.runpytest("-s", "-k", "test_inner") assert result.ret == 0 assert len(pytestPDB._saved) == 1 """ ) result = testdir.runpytest_subprocess("-s", "-p", "pytester", str(p1)) result.stdout.fnmatch_lines(["test_inner_end"]) assert result.ret == 0 def test_quit_with_swallowed_SystemExit(testdir): """Test that debugging's pytest_configure is re-entrant.""" p1 = testdir.makepyfile( """ def call_pdb_set_trace(): __import__('pdb').set_trace() def test_1(): try: call_pdb_set_trace() except SystemExit: pass def test_2(): pass """ ) child = testdir.spawn_pytest(str(p1)) child.expect("Pdb") child.sendline("q") child.expect_exact("Exit: Quitting debugger") rest = child.read().decode("utf8") assert "no tests ran" in rest TestPDB.flush(child) @pytest.mark.parametrize("fixture", ("capfd", "capsys")) def test_pdb_suspends_fixture_capturing(testdir, fixture): """Using "-s" with pytest should suspend/resume fixture capturing.""" p1 = testdir.makepyfile( """ def test_inner({fixture}): import sys print("out_inner_before") sys.stderr.write("err_inner_before\\n") __import__("pdb").set_trace() print("out_inner_after") sys.stderr.write("err_inner_after\\n") out, err = {fixture}.readouterr() assert out =="out_inner_before\\nout_inner_after\\n" assert err =="err_inner_before\\nerr_inner_after\\n" """.format( fixture=fixture ) ) child = testdir.spawn_pytest(str(p1) + " -s") child.expect("Pdb") before = child.before.decode("utf8") assert ( "> PDB set_trace (IO-capturing turned off for fixture %s) >" % (fixture) in before ) # Test that capturing is really suspended. child.sendline("p 40 + 2") child.expect("Pdb") assert "\r\n42\r\n" in child.before.decode("utf8") child.sendline("c") rest = child.read().decode("utf8") assert "out_inner" not in rest assert "err_inner" not in rest TestPDB.flush(child) assert child.exitstatus == 0 assert "= 1 passed in " in rest assert "> PDB continue (IO-capturing resumed for fixture %s) >" % (fixture) in rest def test_pdbcls_via_local_module(testdir): """It should be imported in pytest_configure or later only.""" p1 = testdir.makepyfile( """ def test(): print("before_set_trace") __import__("pdb").set_trace() """, mypdb=""" class Wrapped: class MyPdb: def set_trace(self, *args): print("set_trace_called", args) def runcall(self, *args, **kwds): print("runcall_called", args, kwds) assert "func" in kwds """, ) result = testdir.runpytest( str(p1), "--pdbcls=really.invalid:Value", syspathinsert=True ) result.stdout.fnmatch_lines( [ "*= FAILURES =*", "E * --pdbcls: could not import 'really.invalid:Value': No module named *really*", ] ) assert result.ret == 1 result = testdir.runpytest( str(p1), "--pdbcls=mypdb:Wrapped.MyPdb", syspathinsert=True ) assert result.ret == 0 result.stdout.fnmatch_lines(["*set_trace_called*", "* 1 passed in *"]) # Ensure that it also works with --trace. result = testdir.runpytest( str(p1), "--pdbcls=mypdb:Wrapped.MyPdb", "--trace", syspathinsert=True ) assert result.ret == 0 result.stdout.fnmatch_lines(["*runcall_called*", "* 1 passed in *"]) def test_raises_bdbquit_with_eoferror(testdir): """It is not guaranteed that DontReadFromInput's read is called.""" if six.PY2: builtin_module = "__builtin__" input_func = "raw_input" else: builtin_module = "builtins" input_func = "input" p1 = testdir.makepyfile( """ def input_without_read(*args, **kwargs): raise EOFError() def test(monkeypatch): import {builtin_module} monkeypatch.setattr({builtin_module}, {input_func!r}, input_without_read) __import__('pdb').set_trace() """.format( builtin_module=builtin_module, input_func=input_func ) ) result = testdir.runpytest(str(p1)) result.stdout.fnmatch_lines(["E *BdbQuit", "*= 1 failed in*"]) assert result.ret == 1 def test_pdb_wrapper_class_is_reused(testdir): p1 = testdir.makepyfile( """ def test(): __import__("pdb").set_trace() __import__("pdb").set_trace() import mypdb instances = mypdb.instances assert len(instances) == 2 assert instances[0].__class__ is instances[1].__class__ """, mypdb=""" instances = [] class MyPdb: def __init__(self, *args, **kwargs): instances.append(self) def set_trace(self, *args): print("set_trace_called", args) """, ) result = testdir.runpytest(str(p1), "--pdbcls=mypdb:MyPdb", syspathinsert=True) assert result.ret == 0 result.stdout.fnmatch_lines( ["*set_trace_called*", "*set_trace_called*", "* 1 passed in *"] )
apache-2.0
HiroIshikawa/21playground
payblog/blog/lib/python3.5/site-packages/pip/_vendor/requests/packages/chardet/sjisprober.py
1777
3764
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is mozilla.org code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 1998 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### import sys from .mbcharsetprober import MultiByteCharSetProber from .codingstatemachine import CodingStateMachine from .chardistribution import SJISDistributionAnalysis from .jpcntx import SJISContextAnalysis from .mbcssm import SJISSMModel from . import constants class SJISProber(MultiByteCharSetProber): def __init__(self): MultiByteCharSetProber.__init__(self) self._mCodingSM = CodingStateMachine(SJISSMModel) self._mDistributionAnalyzer = SJISDistributionAnalysis() self._mContextAnalyzer = SJISContextAnalysis() self.reset() def reset(self): MultiByteCharSetProber.reset(self) self._mContextAnalyzer.reset() def get_charset_name(self): return self._mContextAnalyzer.get_charset_name() def feed(self, aBuf): aLen = len(aBuf) for i in range(0, aLen): codingState = self._mCodingSM.next_state(aBuf[i]) if codingState == constants.eError: if constants._debug: sys.stderr.write(self.get_charset_name() + ' prober hit error at byte ' + str(i) + '\n') self._mState = constants.eNotMe break elif codingState == constants.eItsMe: self._mState = constants.eFoundIt break elif codingState == constants.eStart: charLen = self._mCodingSM.get_current_charlen() if i == 0: self._mLastChar[1] = aBuf[0] self._mContextAnalyzer.feed(self._mLastChar[2 - charLen:], charLen) self._mDistributionAnalyzer.feed(self._mLastChar, charLen) else: self._mContextAnalyzer.feed(aBuf[i + 1 - charLen:i + 3 - charLen], charLen) self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1], charLen) self._mLastChar[0] = aBuf[aLen - 1] if self.get_state() == constants.eDetecting: if (self._mContextAnalyzer.got_enough_data() and (self.get_confidence() > constants.SHORTCUT_THRESHOLD)): self._mState = constants.eFoundIt return self.get_state() def get_confidence(self): contxtCf = self._mContextAnalyzer.get_confidence() distribCf = self._mDistributionAnalyzer.get_confidence() return max(contxtCf, distribCf)
mit
luzheqi1987/nova-annotation
nova/tests/unit/objects/test_objects.py
1
49027
# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import copy import datetime import hashlib import inspect import os import pprint import mock from oslo.serialization import jsonutils from oslo.utils import timeutils import six from testtools import matchers from nova.conductor import rpcapi as conductor_rpcapi from nova import context from nova import exception from nova import objects from nova.objects import base from nova.objects import fields from nova.openstack.common import log from nova import rpc from nova import test from nova.tests.unit import fake_notifier from nova import utils LOG = log.getLogger(__name__) class MyOwnedObject(base.NovaPersistentObject, base.NovaObject): VERSION = '1.0' fields = {'baz': fields.Field(fields.Integer())} class MyObj(base.NovaPersistentObject, base.NovaObject): VERSION = '1.6' fields = {'foo': fields.Field(fields.Integer()), 'bar': fields.Field(fields.String()), 'missing': fields.Field(fields.String()), 'readonly': fields.Field(fields.Integer(), read_only=True), 'rel_object': fields.ObjectField('MyOwnedObject', nullable=True) } @staticmethod def _from_db_object(context, obj, db_obj): self = MyObj() self.foo = db_obj['foo'] self.bar = db_obj['bar'] self.missing = db_obj['missing'] self.readonly = 1 return self def obj_load_attr(self, attrname): setattr(self, attrname, 'loaded!') @base.remotable_classmethod def query(cls, context): obj = cls(foo=1, bar='bar') obj.obj_reset_changes() return obj @base.remotable def marco(self, context): return 'polo' @base.remotable def _update_test(self, context): if context.project_id == 'alternate': self.bar = 'alternate-context' else: self.bar = 'updated' @base.remotable def save(self, context): self.obj_reset_changes() @base.remotable def refresh(self, context): self.foo = 321 self.bar = 'refreshed' self.obj_reset_changes() @base.remotable def modify_save_modify(self, context): self.bar = 'meow' self.save() self.foo = 42 self.rel_object = MyOwnedObject(baz=42) def obj_make_compatible(self, primitive, target_version): super(MyObj, self).obj_make_compatible(primitive, target_version) # NOTE(danms): Simulate an older version that had a different # format for the 'bar' attribute if target_version == '1.1' and 'bar' in primitive: primitive['bar'] = 'old%s' % primitive['bar'] class MyObjDiffVers(MyObj): VERSION = '1.5' @classmethod def obj_name(cls): return 'MyObj' class MyObj2(object): @classmethod def obj_name(cls): return 'MyObj' @base.remotable_classmethod def query(cls, *args, **kwargs): pass class RandomMixInWithNoFields(object): """Used to test object inheritance using a mixin that has no fields.""" pass class TestSubclassedObject(RandomMixInWithNoFields, MyObj): fields = {'new_field': fields.Field(fields.String())} class TestMetaclass(test.TestCase): def test_obj_tracking(self): @six.add_metaclass(base.NovaObjectMetaclass) class NewBaseClass(object): VERSION = '1.0' fields = {} @classmethod def obj_name(cls): return cls.__name__ class Fake1TestObj1(NewBaseClass): @classmethod def obj_name(cls): return 'fake1' class Fake1TestObj2(Fake1TestObj1): pass class Fake1TestObj3(Fake1TestObj1): VERSION = '1.1' class Fake2TestObj1(NewBaseClass): @classmethod def obj_name(cls): return 'fake2' class Fake1TestObj4(Fake1TestObj3): VERSION = '1.2' class Fake2TestObj2(Fake2TestObj1): VERSION = '1.1' class Fake1TestObj5(Fake1TestObj1): VERSION = '1.1' # Newest versions first in the list. Duplicate versions take the # newest object. expected = {'fake1': [Fake1TestObj4, Fake1TestObj5, Fake1TestObj2], 'fake2': [Fake2TestObj2, Fake2TestObj1]} self.assertEqual(expected, NewBaseClass._obj_classes) # The following should work, also. self.assertEqual(expected, Fake1TestObj1._obj_classes) self.assertEqual(expected, Fake1TestObj2._obj_classes) self.assertEqual(expected, Fake1TestObj3._obj_classes) self.assertEqual(expected, Fake1TestObj4._obj_classes) self.assertEqual(expected, Fake1TestObj5._obj_classes) self.assertEqual(expected, Fake2TestObj1._obj_classes) self.assertEqual(expected, Fake2TestObj2._obj_classes) def test_field_checking(self): def create_class(field): class TestField(base.NovaObject): VERSION = '1.5' fields = {'foo': field()} return TestField create_class(fields.IPV4AndV6AddressField) self.assertRaises(exception.ObjectFieldInvalid, create_class, fields.IPV4AndV6Address) self.assertRaises(exception.ObjectFieldInvalid, create_class, int) class TestObjToPrimitive(test.TestCase): def test_obj_to_primitive_list(self): class MyObjElement(base.NovaObject): fields = {'foo': fields.IntegerField()} def __init__(self, foo): super(MyObjElement, self).__init__() self.foo = foo class MyList(base.ObjectListBase, base.NovaObject): fields = {'objects': fields.ListOfObjectsField('MyObjElement')} mylist = MyList() mylist.objects = [MyObjElement(1), MyObjElement(2), MyObjElement(3)] self.assertEqual([1, 2, 3], [x['foo'] for x in base.obj_to_primitive(mylist)]) def test_obj_to_primitive_dict(self): myobj = MyObj(foo=1, bar='foo') self.assertEqual({'foo': 1, 'bar': 'foo'}, base.obj_to_primitive(myobj)) def test_obj_to_primitive_recursive(self): class MyList(base.ObjectListBase, base.NovaObject): fields = {'objects': fields.ListOfObjectsField('MyObj')} mylist = MyList(objects=[MyObj(), MyObj()]) for i, value in enumerate(mylist): value.foo = i self.assertEqual([{'foo': 0}, {'foo': 1}], base.obj_to_primitive(mylist)) def test_obj_to_primitive_with_ip_addr(self): class TestObject(base.NovaObject): fields = {'addr': fields.IPAddressField(), 'cidr': fields.IPNetworkField()} obj = TestObject(addr='1.2.3.4', cidr='1.1.1.1/16') self.assertEqual({'addr': '1.2.3.4', 'cidr': '1.1.1.1/16'}, base.obj_to_primitive(obj)) class TestObjMakeList(test.TestCase): def test_obj_make_list(self): class MyList(base.ObjectListBase, base.NovaObject): pass db_objs = [{'foo': 1, 'bar': 'baz', 'missing': 'banana'}, {'foo': 2, 'bar': 'bat', 'missing': 'apple'}, ] mylist = base.obj_make_list('ctxt', MyList(), MyObj, db_objs) self.assertEqual(2, len(mylist)) self.assertEqual('ctxt', mylist._context) for index, item in enumerate(mylist): self.assertEqual(db_objs[index]['foo'], item.foo) self.assertEqual(db_objs[index]['bar'], item.bar) self.assertEqual(db_objs[index]['missing'], item.missing) def compare_obj(test, obj, db_obj, subs=None, allow_missing=None, comparators=None): """Compare a NovaObject and a dict-like database object. This automatically converts TZ-aware datetimes and iterates over the fields of the object. :param:test: The TestCase doing the comparison :param:obj: The NovaObject to examine :param:db_obj: The dict-like database object to use as reference :param:subs: A dict of objkey=dbkey field substitutions :param:allow_missing: A list of fields that may not be in db_obj :param:comparators: Map of comparator functions to use for certain fields """ if subs is None: subs = {} if allow_missing is None: allow_missing = [] if comparators is None: comparators = {} for key in obj.fields: if key in allow_missing and not obj.obj_attr_is_set(key): continue obj_val = obj[key] db_key = subs.get(key, key) db_val = db_obj[db_key] if isinstance(obj_val, datetime.datetime): obj_val = obj_val.replace(tzinfo=None) if key in comparators: comparator = comparators[key] comparator(db_val, obj_val) else: test.assertEqual(db_val, obj_val) class _BaseTestCase(test.TestCase): def setUp(self): super(_BaseTestCase, self).setUp() self.remote_object_calls = list() self.user_id = 'fake-user' self.project_id = 'fake-project' self.context = context.RequestContext(self.user_id, self.project_id) fake_notifier.stub_notifier(self.stubs) self.addCleanup(fake_notifier.reset) def compare_obj(self, obj, db_obj, subs=None, allow_missing=None, comparators=None): compare_obj(self, obj, db_obj, subs=subs, allow_missing=allow_missing, comparators=comparators) def json_comparator(self, expected, obj_val): # json-ify an object field for comparison with its db str # equivalent self.assertEqual(expected, jsonutils.dumps(obj_val)) def str_comparator(self, expected, obj_val): """Compare an object field to a string in the db by performing a simple coercion on the object field value. """ self.assertEqual(expected, str(obj_val)) def assertNotIsInstance(self, obj, cls, msg=None): """Python < v2.7 compatibility. Assert 'not isinstance(obj, cls).""" try: f = super(_BaseTestCase, self).assertNotIsInstance except AttributeError: self.assertThat(obj, matchers.Not(matchers.IsInstance(cls)), message=msg or '') else: f(obj, cls, msg=msg) class _LocalTest(_BaseTestCase): def setUp(self): super(_LocalTest, self).setUp() # Just in case base.NovaObject.indirection_api = None def assertRemotes(self): self.assertEqual(self.remote_object_calls, []) @contextlib.contextmanager def things_temporarily_local(): # Temporarily go non-remote so the conductor handles # this request directly _api = base.NovaObject.indirection_api base.NovaObject.indirection_api = None yield base.NovaObject.indirection_api = _api class _RemoteTest(_BaseTestCase): def _testable_conductor(self): self.conductor_service = self.start_service( 'conductor', manager='nova.conductor.manager.ConductorManager') self.remote_object_calls = list() orig_object_class_action = \ self.conductor_service.manager.object_class_action orig_object_action = \ self.conductor_service.manager.object_action def fake_object_class_action(*args, **kwargs): self.remote_object_calls.append((kwargs.get('objname'), kwargs.get('objmethod'))) with things_temporarily_local(): result = orig_object_class_action(*args, **kwargs) return (base.NovaObject.obj_from_primitive(result, context=args[0]) if isinstance(result, base.NovaObject) else result) self.stubs.Set(self.conductor_service.manager, 'object_class_action', fake_object_class_action) def fake_object_action(*args, **kwargs): self.remote_object_calls.append((kwargs.get('objinst'), kwargs.get('objmethod'))) with things_temporarily_local(): result = orig_object_action(*args, **kwargs) return result self.stubs.Set(self.conductor_service.manager, 'object_action', fake_object_action) # Things are remoted by default in this session base.NovaObject.indirection_api = conductor_rpcapi.ConductorAPI() # To make sure local and remote contexts match self.stubs.Set(rpc.RequestContextSerializer, 'serialize_context', lambda s, c: c) self.stubs.Set(rpc.RequestContextSerializer, 'deserialize_context', lambda s, c: c) def setUp(self): super(_RemoteTest, self).setUp() self._testable_conductor() def assertRemotes(self): self.assertNotEqual(self.remote_object_calls, []) class _TestObject(object): def test_object_attrs_in_init(self): # Spot check a few objects.Instance objects.InstanceInfoCache objects.SecurityGroup # Now check the test one in this file. Should be newest version self.assertEqual('1.6', objects.MyObj.VERSION) def test_hydration_type_error(self): primitive = {'nova_object.name': 'MyObj', 'nova_object.namespace': 'nova', 'nova_object.version': '1.5', 'nova_object.data': {'foo': 'a'}} self.assertRaises(ValueError, MyObj.obj_from_primitive, primitive) def test_hydration(self): primitive = {'nova_object.name': 'MyObj', 'nova_object.namespace': 'nova', 'nova_object.version': '1.5', 'nova_object.data': {'foo': 1}} real_method = MyObj._obj_from_primitive def _obj_from_primitive(*args): return real_method(*args) with mock.patch.object(MyObj, '_obj_from_primitive') as ofp: ofp.side_effect = _obj_from_primitive obj = MyObj.obj_from_primitive(primitive) ofp.assert_called_once_with(None, '1.5', primitive) self.assertEqual(obj.foo, 1) def test_hydration_version_different(self): primitive = {'nova_object.name': 'MyObj', 'nova_object.namespace': 'nova', 'nova_object.version': '1.2', 'nova_object.data': {'foo': 1}} obj = MyObj.obj_from_primitive(primitive) self.assertEqual(obj.foo, 1) self.assertEqual('1.2', obj.VERSION) def test_hydration_bad_ns(self): primitive = {'nova_object.name': 'MyObj', 'nova_object.namespace': 'foo', 'nova_object.version': '1.5', 'nova_object.data': {'foo': 1}} self.assertRaises(exception.UnsupportedObjectError, MyObj.obj_from_primitive, primitive) def test_dehydration(self): expected = {'nova_object.name': 'MyObj', 'nova_object.namespace': 'nova', 'nova_object.version': '1.6', 'nova_object.data': {'foo': 1}} obj = MyObj(foo=1) obj.obj_reset_changes() self.assertEqual(obj.obj_to_primitive(), expected) def test_object_property(self): obj = MyObj(foo=1) self.assertEqual(obj.foo, 1) def test_object_property_type_error(self): obj = MyObj() def fail(): obj.foo = 'a' self.assertRaises(ValueError, fail) def test_object_dict_syntax(self): obj = MyObj(foo=123, bar='bar') self.assertEqual(obj['foo'], 123) self.assertEqual(sorted(obj.items(), key=lambda x: x[0]), [('bar', 'bar'), ('foo', 123)]) self.assertEqual(sorted(list(obj.iteritems()), key=lambda x: x[0]), [('bar', 'bar'), ('foo', 123)]) def test_load(self): obj = MyObj() self.assertEqual(obj.bar, 'loaded!') def test_load_in_base(self): class Foo(base.NovaObject): fields = {'foobar': fields.Field(fields.Integer())} obj = Foo() with self.assertRaisesRegexp(NotImplementedError, ".*foobar.*"): obj.foobar def test_loaded_in_primitive(self): obj = MyObj(foo=1) obj.obj_reset_changes() self.assertEqual(obj.bar, 'loaded!') expected = {'nova_object.name': 'MyObj', 'nova_object.namespace': 'nova', 'nova_object.version': '1.6', 'nova_object.changes': ['bar'], 'nova_object.data': {'foo': 1, 'bar': 'loaded!'}} self.assertEqual(obj.obj_to_primitive(), expected) def test_changes_in_primitive(self): obj = MyObj(foo=123) self.assertEqual(obj.obj_what_changed(), set(['foo'])) primitive = obj.obj_to_primitive() self.assertIn('nova_object.changes', primitive) obj2 = MyObj.obj_from_primitive(primitive) self.assertEqual(obj2.obj_what_changed(), set(['foo'])) obj2.obj_reset_changes() self.assertEqual(obj2.obj_what_changed(), set()) def test_obj_class_from_name(self): obj = base.NovaObject.obj_class_from_name('MyObj', '1.5') self.assertEqual('1.5', obj.VERSION) def test_obj_class_from_name_latest_compatible(self): obj = base.NovaObject.obj_class_from_name('MyObj', '1.1') self.assertEqual('1.6', obj.VERSION) def test_unknown_objtype(self): self.assertRaises(exception.UnsupportedObjectError, base.NovaObject.obj_class_from_name, 'foo', '1.0') def test_obj_class_from_name_supported_version(self): error = None try: base.NovaObject.obj_class_from_name('MyObj', '1.25') except exception.IncompatibleObjectVersion as error: pass self.assertIsNotNone(error) self.assertEqual('1.6', error.kwargs['supported']) def test_with_alternate_context(self): ctxt1 = context.RequestContext('foo', 'foo') ctxt2 = context.RequestContext('bar', 'alternate') obj = MyObj.query(ctxt1) obj._update_test(ctxt2) self.assertEqual(obj.bar, 'alternate-context') self.assertRemotes() def test_orphaned_object(self): obj = MyObj.query(self.context) obj._context = None self.assertRaises(exception.OrphanedObjectError, obj._update_test) self.assertRemotes() def test_changed_1(self): obj = MyObj.query(self.context) obj.foo = 123 self.assertEqual(obj.obj_what_changed(), set(['foo'])) obj._update_test(self.context) self.assertEqual(obj.obj_what_changed(), set(['foo', 'bar'])) self.assertEqual(obj.foo, 123) self.assertRemotes() def test_changed_2(self): obj = MyObj.query(self.context) obj.foo = 123 self.assertEqual(obj.obj_what_changed(), set(['foo'])) obj.save(self.context) self.assertEqual(obj.obj_what_changed(), set([])) self.assertEqual(obj.foo, 123) self.assertRemotes() def test_changed_3(self): obj = MyObj.query(self.context) obj.foo = 123 self.assertEqual(obj.obj_what_changed(), set(['foo'])) obj.refresh(self.context) self.assertEqual(obj.obj_what_changed(), set([])) self.assertEqual(obj.foo, 321) self.assertEqual(obj.bar, 'refreshed') self.assertRemotes() def test_changed_4(self): obj = MyObj.query(self.context) obj.bar = 'something' self.assertEqual(obj.obj_what_changed(), set(['bar'])) obj.modify_save_modify(self.context) self.assertEqual(obj.obj_what_changed(), set(['foo', 'rel_object'])) self.assertEqual(obj.foo, 42) self.assertEqual(obj.bar, 'meow') self.assertIsInstance(obj.rel_object, MyOwnedObject) self.assertRemotes() def test_changed_with_sub_object(self): class ParentObject(base.NovaObject): fields = {'foo': fields.IntegerField(), 'bar': fields.ObjectField('MyObj'), } obj = ParentObject() self.assertEqual(set(), obj.obj_what_changed()) obj.foo = 1 self.assertEqual(set(['foo']), obj.obj_what_changed()) bar = MyObj() obj.bar = bar self.assertEqual(set(['foo', 'bar']), obj.obj_what_changed()) obj.obj_reset_changes() self.assertEqual(set(), obj.obj_what_changed()) bar.foo = 1 self.assertEqual(set(['bar']), obj.obj_what_changed()) def test_static_result(self): obj = MyObj.query(self.context) self.assertEqual(obj.bar, 'bar') result = obj.marco() self.assertEqual(result, 'polo') self.assertRemotes() def test_updates(self): obj = MyObj.query(self.context) self.assertEqual(obj.foo, 1) obj._update_test() self.assertEqual(obj.bar, 'updated') self.assertRemotes() def test_base_attributes(self): dt = datetime.datetime(1955, 11, 5) obj = MyObj(created_at=dt, updated_at=dt, deleted_at=None, deleted=False) expected = {'nova_object.name': 'MyObj', 'nova_object.namespace': 'nova', 'nova_object.version': '1.6', 'nova_object.changes': ['deleted', 'created_at', 'deleted_at', 'updated_at'], 'nova_object.data': {'created_at': timeutils.isotime(dt), 'updated_at': timeutils.isotime(dt), 'deleted_at': None, 'deleted': False, } } self.assertEqual(obj.obj_to_primitive(), expected) def test_contains(self): obj = MyObj() self.assertNotIn('foo', obj) obj.foo = 1 self.assertIn('foo', obj) self.assertNotIn('does_not_exist', obj) def test_obj_attr_is_set(self): obj = MyObj(foo=1) self.assertTrue(obj.obj_attr_is_set('foo')) self.assertFalse(obj.obj_attr_is_set('bar')) self.assertRaises(AttributeError, obj.obj_attr_is_set, 'bang') def test_get(self): obj = MyObj(foo=1) # Foo has value, should not get the default self.assertEqual(obj.get('foo', 2), 1) # Foo has value, should return the value without error self.assertEqual(obj.get('foo'), 1) # Bar is not loaded, so we should get the default self.assertEqual(obj.get('bar', 'not-loaded'), 'not-loaded') # Bar without a default should lazy-load self.assertEqual(obj.get('bar'), 'loaded!') # Bar now has a default, but loaded value should be returned self.assertEqual(obj.get('bar', 'not-loaded'), 'loaded!') # Invalid attribute should raise AttributeError self.assertRaises(AttributeError, obj.get, 'nothing') # ...even with a default self.assertRaises(AttributeError, obj.get, 'nothing', 3) def test_object_inheritance(self): base_fields = base.NovaPersistentObject.fields.keys() myobj_fields = ['foo', 'bar', 'missing', 'readonly', 'rel_object'] + base_fields myobj3_fields = ['new_field'] self.assertTrue(issubclass(TestSubclassedObject, MyObj)) self.assertEqual(len(myobj_fields), len(MyObj.fields)) self.assertEqual(set(myobj_fields), set(MyObj.fields.keys())) self.assertEqual(len(myobj_fields) + len(myobj3_fields), len(TestSubclassedObject.fields)) self.assertEqual(set(myobj_fields) | set(myobj3_fields), set(TestSubclassedObject.fields.keys())) def test_get_changes(self): obj = MyObj() self.assertEqual({}, obj.obj_get_changes()) obj.foo = 123 self.assertEqual({'foo': 123}, obj.obj_get_changes()) obj.bar = 'test' self.assertEqual({'foo': 123, 'bar': 'test'}, obj.obj_get_changes()) obj.obj_reset_changes() self.assertEqual({}, obj.obj_get_changes()) def test_obj_fields(self): class TestObj(base.NovaObject): fields = {'foo': fields.Field(fields.Integer())} obj_extra_fields = ['bar'] @property def bar(self): return 'this is bar' obj = TestObj() self.assertEqual(['foo', 'bar'], obj.obj_fields) def test_obj_constructor(self): obj = MyObj(context=self.context, foo=123, bar='abc') self.assertEqual(123, obj.foo) self.assertEqual('abc', obj.bar) self.assertEqual(set(['foo', 'bar']), obj.obj_what_changed()) def test_obj_read_only(self): obj = MyObj(context=self.context, foo=123, bar='abc') obj.readonly = 1 self.assertRaises(exception.ReadOnlyFieldError, setattr, obj, 'readonly', 2) def test_obj_repr(self): obj = MyObj(foo=123) self.assertEqual('MyObj(bar=<?>,created_at=<?>,deleted=<?>,' 'deleted_at=<?>,foo=123,missing=<?>,readonly=<?>,' 'rel_object=<?>,updated_at=<?>)', repr(obj)) def test_obj_make_obj_compatible(self): subobj = MyOwnedObject(baz=1) obj = MyObj(rel_object=subobj) obj.obj_relationships = { 'rel_object': [('1.5', '1.1'), ('1.7', '1.2')], } primitive = obj.obj_to_primitive()['nova_object.data'] with mock.patch.object(subobj, 'obj_make_compatible') as mock_compat: obj._obj_make_obj_compatible(copy.copy(primitive), '1.8', 'rel_object') self.assertFalse(mock_compat.called) with mock.patch.object(subobj, 'obj_make_compatible') as mock_compat: obj._obj_make_obj_compatible(copy.copy(primitive), '1.7', 'rel_object') mock_compat.assert_called_once_with( primitive['rel_object']['nova_object.data'], '1.2') self.assertEqual('1.2', primitive['rel_object']['nova_object.version']) with mock.patch.object(subobj, 'obj_make_compatible') as mock_compat: obj._obj_make_obj_compatible(copy.copy(primitive), '1.6', 'rel_object') mock_compat.assert_called_once_with( primitive['rel_object']['nova_object.data'], '1.1') self.assertEqual('1.1', primitive['rel_object']['nova_object.version']) with mock.patch.object(subobj, 'obj_make_compatible') as mock_compat: obj._obj_make_obj_compatible(copy.copy(primitive), '1.5', 'rel_object') mock_compat.assert_called_once_with( primitive['rel_object']['nova_object.data'], '1.1') self.assertEqual('1.1', primitive['rel_object']['nova_object.version']) with mock.patch.object(subobj, 'obj_make_compatible') as mock_compat: _prim = copy.copy(primitive) obj._obj_make_obj_compatible(_prim, '1.4', 'rel_object') self.assertFalse(mock_compat.called) self.assertNotIn('rel_object', _prim) def test_obj_make_compatible_hits_sub_objects(self): subobj = MyOwnedObject(baz=1) obj = MyObj(foo=123, rel_object=subobj) obj.obj_relationships = {'rel_object': [('1.0', '1.0')]} with mock.patch.object(obj, '_obj_make_obj_compatible') as mock_compat: obj.obj_make_compatible({'rel_object': 'foo'}, '1.10') mock_compat.assert_called_once_with({'rel_object': 'foo'}, '1.10', 'rel_object') def test_obj_make_compatible_skips_unset_sub_objects(self): obj = MyObj(foo=123) obj.obj_relationships = {'rel_object': [('1.0', '1.0')]} with mock.patch.object(obj, '_obj_make_obj_compatible') as mock_compat: obj.obj_make_compatible({'rel_object': 'foo'}, '1.10') self.assertFalse(mock_compat.called) def test_obj_make_compatible_complains_about_missing_rules(self): subobj = MyOwnedObject(baz=1) obj = MyObj(foo=123, rel_object=subobj) obj.obj_relationships = {} self.assertRaises(exception.ObjectActionError, obj.obj_make_compatible, {}, '1.0') class TestObject(_LocalTest, _TestObject): pass class TestRemoteObject(_RemoteTest, _TestObject): def test_major_version_mismatch(self): MyObj2.VERSION = '2.0' self.assertRaises(exception.IncompatibleObjectVersion, MyObj2.query, self.context) def test_minor_version_greater(self): MyObj2.VERSION = '1.7' self.assertRaises(exception.IncompatibleObjectVersion, MyObj2.query, self.context) def test_minor_version_less(self): MyObj2.VERSION = '1.2' obj = MyObj2.query(self.context) self.assertEqual(obj.bar, 'bar') self.assertRemotes() def test_compat(self): MyObj2.VERSION = '1.1' obj = MyObj2.query(self.context) self.assertEqual('oldbar', obj.bar) class TestObjectListBase(test.TestCase): def test_list_like_operations(self): class MyElement(base.NovaObject): fields = {'foo': fields.IntegerField()} def __init__(self, foo): super(MyElement, self).__init__() self.foo = foo class Foo(base.ObjectListBase, base.NovaObject): fields = {'objects': fields.ListOfObjectsField('MyElement')} objlist = Foo(context='foo', objects=[MyElement(1), MyElement(2), MyElement(3)]) self.assertEqual(list(objlist), objlist.objects) self.assertEqual(len(objlist), 3) self.assertIn(objlist.objects[0], objlist) self.assertEqual(list(objlist[:1]), [objlist.objects[0]]) self.assertEqual(objlist[:1]._context, 'foo') self.assertEqual(objlist[2], objlist.objects[2]) self.assertEqual(objlist.count(objlist.objects[0]), 1) self.assertEqual(objlist.index(objlist.objects[1]), 1) objlist.sort(key=lambda x: x.foo, reverse=True) self.assertEqual([3, 2, 1], [x.foo for x in objlist]) def test_serialization(self): class Foo(base.ObjectListBase, base.NovaObject): fields = {'objects': fields.ListOfObjectsField('Bar')} class Bar(base.NovaObject): fields = {'foo': fields.Field(fields.String())} obj = Foo(objects=[]) for i in 'abc': bar = Bar(foo=i) obj.objects.append(bar) obj2 = base.NovaObject.obj_from_primitive(obj.obj_to_primitive()) self.assertFalse(obj is obj2) self.assertEqual([x.foo for x in obj], [y.foo for y in obj2]) def _test_object_list_version_mappings(self, list_obj_class): # Figure out what sort of object this list is for list_field = list_obj_class.fields['objects'] item_obj_field = list_field._type._element_type item_obj_name = item_obj_field._type._obj_name # Look through all object classes of this type and make sure that # the versions we find are covered by the parent list class for item_class in base.NovaObject._obj_classes[item_obj_name]: self.assertIn( item_class.VERSION, list_obj_class.child_versions.values(), 'Version mapping is incomplete for %s' % ( list_obj_class.__name__)) def test_object_version_mappings(self): # Find all object list classes and make sure that they at least handle # all the current object versions for obj_classes in base.NovaObject._obj_classes.values(): for obj_class in obj_classes: if issubclass(obj_class, base.ObjectListBase): self._test_object_list_version_mappings(obj_class) def test_list_changes(self): class Foo(base.ObjectListBase, base.NovaObject): fields = {'objects': fields.ListOfObjectsField('Bar')} class Bar(base.NovaObject): fields = {'foo': fields.StringField()} obj = Foo(objects=[]) self.assertEqual(set(['objects']), obj.obj_what_changed()) obj.objects.append(Bar(foo='test')) self.assertEqual(set(['objects']), obj.obj_what_changed()) obj.obj_reset_changes() # This should still look dirty because the child is dirty self.assertEqual(set(['objects']), obj.obj_what_changed()) obj.objects[0].obj_reset_changes() # This should now look clean because the child is clean self.assertEqual(set(), obj.obj_what_changed()) def test_initialize_objects(self): class Foo(base.ObjectListBase, base.NovaObject): fields = {'objects': fields.ListOfObjectsField('Bar')} class Bar(base.NovaObject): fields = {'foo': fields.StringField()} obj = Foo() self.assertEqual([], obj.objects) self.assertEqual(set(), obj.obj_what_changed()) def test_obj_repr(self): class Foo(base.ObjectListBase, base.NovaObject): fields = {'objects': fields.ListOfObjectsField('Bar')} class Bar(base.NovaObject): fields = {'uuid': fields.StringField()} obj = Foo(objects=[Bar(uuid='fake-uuid')]) self.assertEqual('Foo(objects=[Bar(fake-uuid)])', repr(obj)) class TestObjectSerializer(_BaseTestCase): def test_serialize_entity_primitive(self): ser = base.NovaObjectSerializer() for thing in (1, 'foo', [1, 2], {'foo': 'bar'}): self.assertEqual(thing, ser.serialize_entity(None, thing)) def test_deserialize_entity_primitive(self): ser = base.NovaObjectSerializer() for thing in (1, 'foo', [1, 2], {'foo': 'bar'}): self.assertEqual(thing, ser.deserialize_entity(None, thing)) def test_deserialize_entity_newer_version(self): ser = base.NovaObjectSerializer() ser._conductor = mock.Mock() ser._conductor.object_backport.return_value = 'backported' obj = MyObj() obj.VERSION = '1.25' primitive = obj.obj_to_primitive() result = ser.deserialize_entity(self.context, primitive) self.assertEqual('backported', result) ser._conductor.object_backport.assert_called_with(self.context, primitive, '1.6') def test_object_serialization(self): ser = base.NovaObjectSerializer() obj = MyObj() primitive = ser.serialize_entity(self.context, obj) self.assertIn('nova_object.name', primitive) obj2 = ser.deserialize_entity(self.context, primitive) self.assertIsInstance(obj2, MyObj) self.assertEqual(self.context, obj2._context) def test_object_serialization_iterables(self): ser = base.NovaObjectSerializer() obj = MyObj() for iterable in (list, tuple, set): thing = iterable([obj]) primitive = ser.serialize_entity(self.context, thing) self.assertEqual(1, len(primitive)) for item in primitive: self.assertNotIsInstance(item, base.NovaObject) thing2 = ser.deserialize_entity(self.context, primitive) self.assertEqual(1, len(thing2)) for item in thing2: self.assertIsInstance(item, MyObj) # dict case thing = {'key': obj} primitive = ser.serialize_entity(self.context, thing) self.assertEqual(1, len(primitive)) for item in primitive.itervalues(): self.assertNotIsInstance(item, base.NovaObject) thing2 = ser.deserialize_entity(self.context, primitive) self.assertEqual(1, len(thing2)) for item in thing2.itervalues(): self.assertIsInstance(item, MyObj) # object-action updates dict case thing = {'foo': obj.obj_to_primitive()} primitive = ser.serialize_entity(self.context, thing) self.assertEqual(thing, primitive) thing2 = ser.deserialize_entity(self.context, thing) self.assertIsInstance(thing2['foo'], base.NovaObject) # NOTE(danms): The hashes in this list should only be changed if # they come with a corresponding version bump in the affected # objects object_data = { 'Agent': '1.0-c4ff8a833aee8ae44ab8aed1a171273d', 'AgentList': '1.0-31f07426a729311a42ff7f6246e76e25', 'Aggregate': '1.1-f5d477be06150529a9b2d27cc49030b5', 'AggregateList': '1.2-4b02a285b8612bfb86a96ff80052fb0a', 'BandwidthUsage': '1.2-a9d7c2ba54995e48ce38688c51c9416d', 'BandwidthUsageList': '1.2-5b564cbfd5ae6e106443c086938e7602', 'BlockDeviceMapping': '1.4-9968ffe513e7672484b0f528b034cd0f', 'BlockDeviceMappingList': '1.5-83767968de6e91e9705bddaae02bc649', 'ComputeNode': '1.6-d2ea9b8f4a6e95ff6a683266eebddbff', 'ComputeNodeList': '1.6-205aa2ea08d49f6ce87df1fcd2407b4e', 'DNSDomain': '1.0-5bdc288d7c3b723ce86ede998fd5c9ba', 'DNSDomainList': '1.0-cfb3e7e82be661501c31099523154db4', 'EC2InstanceMapping': '1.0-627baaf4b12c9067200979bdc4558a99', 'EC2SnapshotMapping': '1.0-26cf315be1f8abab4289d4147671c836', 'EC2VolumeMapping': '1.0-2f8c3bf077c65a425294ec2b361c9143', 'FixedIP': '1.6-2472964d39e50da67202109eb85cd173', 'FixedIPList': '1.6-f2f740de66bc2d90627004bd311690ad', 'Flavor': '1.1-096cfd023c35d07542cf732fb29b45e4', 'FlavorList': '1.1-a3d5551267cb8f62ff38ded125900721', 'FloatingIP': '1.6-27eb68b7c9c620dd5f0561b5a3be0e82', 'FloatingIPList': '1.7-f376f63ed99243f9d90841b7f6732bbf', 'HVSpec': '1.0-c4d8377cc4fe519930e60c1d8265a142', 'Instance': '1.16-b00c09fb92ae80b393943f56e84abd9c', 'InstanceAction': '1.1-6b1d0a6dbd522b5a83c20757ec659663', 'InstanceActionEvent': '1.1-42dbdba74bd06e0619ca75cd3397cd1b', 'InstanceActionEventList': '1.0-1d5cc958171d6ce07383c2ad6208318e', 'InstanceActionList': '1.0-368410fdb8d69ae20c495308535d6266', 'InstanceExternalEvent': '1.0-f1134523654407a875fd59b80f759ee7', 'InstanceFault': '1.2-313438e37e9d358f3566c85f6ddb2d3e', 'InstanceFaultList': '1.1-aeb598ffd0cd6aa61fca7adf0f5e900d', 'InstanceGroup': '1.9-95ece99f092e8f4f88327cdbb44162c9', 'InstanceGroupList': '1.6-c6b78f3c9d9080d33c08667e80589817', 'InstanceInfoCache': '1.5-ef64b604498bfa505a8c93747a9d8b2f', 'InstanceList': '1.11-bb0b3a74a1c82791330247a963a7d6a9', 'InstanceNUMACell': '1.1-8d2a13c8360cc9ea1b68c9c6c4476857', 'InstanceNUMATopology': '1.1-86b95d263c4c68411d44c6741b8d2bb0', 'InstancePCIRequest': '1.1-e082d174f4643e5756ba098c47c1510f', 'InstancePCIRequests': '1.1-bc7c6684d8579ee49d6a3b8aef756918', 'KeyPair': '1.1-3410f51950d052d861c11946a6ae621a', 'KeyPairList': '1.0-71132a568cc5d078ba1748a9c02c87b8', 'Migration': '1.1-67c47726c2c71422058cd9d149d6d3ed', 'MigrationList': '1.1-8c5f678edc72a592d591a13b35e54353', 'MyObj': '1.6-55bfc22259fd3df239e4a49fa3552c93', 'MyOwnedObject': '1.0-0f3d6c028543d7f3715d121db5b8e298', 'Network': '1.2-2ea21ede5e45bb80e7b7ac7106915c4e', 'NetworkList': '1.2-aa4ad23f035b97a41732ea8b3445fc5e', 'NetworkRequest': '1.1-f31192f5a725017707f989585e12d7dc', 'NetworkRequestList': '1.1-beeab521ac9450f1f5ef4eaa945a783c', 'PciDevice': '1.2-29e35c3199f3b98ce66e5d1212612818', 'PciDeviceList': '1.1-2896df4f5b06579e5f35adba5fcae9db', 'Quotas': '1.1-7897deef00e6cd3095c8916f68d24418', 'QuotasNoOp': '1.1-4b06fd721c586b907ddd6543a00d6c2f', 'S3ImageMapping': '1.0-9225943a44a91ad0349b9fd8bd3f3ce2', 'SecurityGroup': '1.1-bba0e72865e0953793e796571692453b', 'SecurityGroupList': '1.0-528e6448adfeeb78921ebeda499ab72f', 'SecurityGroupRule': '1.1-a9175baf7664439af1a16c2010b55576', 'SecurityGroupRuleList': '1.1-667fca3a9928f23d2d10e61962c55f3c', 'Service': '1.5-82bbfd46a744a9c89bc44b47a1b81683', 'ServiceList': '1.3-4a1a5822dea268d0d7f892f5106bb2e1', 'TestSubclassedObject': '1.6-c63feb2f2533b7d075490c04a2cc10dd', 'VirtualInterface': '1.0-10fdac4c704102b6d57d6936d6d790d2', 'VirtualInterfaceList': '1.0-accbf02628a8063c1d885077a2bf49b6', } object_relationships = { 'BlockDeviceMapping': {'Instance': '1.16'}, 'FixedIP': {'Instance': '1.16', 'Network': '1.2', 'VirtualInterface': '1.0', 'FloatingIPList': '1.7'}, 'FloatingIP': {'FixedIP': '1.6'}, 'Instance': {'InstanceFault': '1.2', 'InstanceInfoCache': '1.5', 'InstanceNUMATopology': '1.1', 'PciDeviceList': '1.1', 'SecurityGroupList': '1.0', 'InstancePCIRequests': '1.1'}, 'MyObj': {'MyOwnedObject': '1.0'}, 'SecurityGroupRule': {'SecurityGroup': '1.1'}, 'Service': {'ComputeNode': '1.6'}, 'TestSubclassedObject': {'MyOwnedObject': '1.0'} } class TestObjectVersions(test.TestCase): def _find_remotable_method(self, cls, thing, parent_was_remotable=False): """Follow a chain of remotable things down to the original function.""" if isinstance(thing, classmethod): return self._find_remotable_method(cls, thing.__get__(None, cls)) elif inspect.ismethod(thing) and hasattr(thing, 'remotable'): return self._find_remotable_method(cls, thing.original_fn, parent_was_remotable=True) elif parent_was_remotable: # We must be the first non-remotable thing underneath a stack of # remotable things (i.e. the actual implementation method) return thing else: # This means the top-level thing never hit a remotable layer return None def _get_fingerprint(self, obj_name): obj_class = base.NovaObject._obj_classes[obj_name][0] fields = obj_class.fields.items() fields.sort() methods = [] for name in dir(obj_class): thing = getattr(obj_class, name) if inspect.ismethod(thing) or isinstance(thing, classmethod): method = self._find_remotable_method(obj_class, thing) if method: methods.append((name, inspect.getargspec(method))) methods.sort() # NOTE(danms): Things that need a version bump are any fields # and their types, or the signatures of any remotable methods. # Of course, these are just the mechanical changes we can detect, # but many other things may require a version bump (method behavior # and return value changes, for example). if hasattr(obj_class, 'child_versions'): relevant_data = (fields, methods, obj_class.child_versions) else: relevant_data = (fields, methods) fingerprint = '%s-%s' % (obj_class.VERSION, hashlib.md5(str(relevant_data)).hexdigest()) return fingerprint def test_versions(self): fingerprints = {} for obj_name in base.NovaObject._obj_classes: fingerprints[obj_name] = self._get_fingerprint(obj_name) if os.getenv('GENERATE_HASHES'): file('object_hashes.txt', 'w').write( pprint.pformat(fingerprints)) raise test.TestingException( 'Generated hashes in object_hashes.txt') stored = set(object_data.items()) computed = set(fingerprints.items()) changed = stored.symmetric_difference(computed) expected = {} actual = {} for name, hash in changed: expected[name] = object_data.get(name) actual[name] = fingerprints.get(name) self.assertEqual(expected, actual, 'Some objects have changed; please make sure the ' 'versions have been bumped, and then update their ' 'hashes here.') def _build_tree(self, tree, obj_class): obj_name = obj_class.obj_name() if obj_name in tree: return for name, field in obj_class.fields.items(): if isinstance(field._type, fields.Object): sub_obj_name = field._type._obj_name sub_obj_class = base.NovaObject._obj_classes[sub_obj_name][0] self._build_tree(tree, sub_obj_class) tree.setdefault(obj_name, {}) tree[obj_name][sub_obj_name] = sub_obj_class.VERSION def test_relationships(self): tree = {} for obj_name in base.NovaObject._obj_classes.keys(): self._build_tree(tree, base.NovaObject._obj_classes[obj_name][0]) stored = set([(x, str(y)) for x, y in object_relationships.items()]) computed = set([(x, str(y)) for x, y in tree.items()]) changed = stored.symmetric_difference(computed) expected = {} actual = {} for name, deps in changed: expected[name] = object_relationships.get(name) actual[name] = tree.get(name) self.assertEqual(expected, actual, 'Some objects have changed dependencies. ' 'Please make sure to bump the versions of ' 'parent objects and provide a rule in their ' 'obj_make_compatible() routines to backlevel ' 'the child object.') def test_obj_make_compatible(self): # Iterate all object classes and verify that we can run # obj_make_compatible with every older version than current. # This doesn't actually test the data conversions, but it at least # makes sure the method doesn't blow up on something basic like # expecting the wrong version format. for obj_name in base.NovaObject._obj_classes: obj_class = base.NovaObject._obj_classes[obj_name][0] version = utils.convert_version_to_tuple(obj_class.VERSION) for n in range(version[1]): test_version = '%d.%d' % (version[0], n) LOG.info('testing obj: %s version: %s' % (obj_name, test_version)) obj_class().obj_to_primitive(target_version=test_version) def test_obj_relationships_in_order(self): # Iterate all object classes and verify that we can run # obj_make_compatible with every older version than current. # This doesn't actually test the data conversions, but it at least # makes sure the method doesn't blow up on something basic like # expecting the wrong version format. for obj_name in base.NovaObject._obj_classes: obj_class = base.NovaObject._obj_classes[obj_name][0] for field, versions in obj_class.obj_relationships.items(): last_my_version = (0, 0) last_child_version = (0, 0) for my_version, child_version in versions: _my_version = utils.convert_version_to_tuple(my_version) _ch_version = utils.convert_version_to_tuple(child_version) self.assertTrue((last_my_version < _my_version and last_child_version <= _ch_version), 'Object %s relationship ' '%s->%s for field %s is out of order' % ( obj_name, my_version, child_version, field)) last_my_version = _my_version last_child_version = _ch_version
apache-2.0
l5h5t7/ZeroNet
src/Content/ContentDbDict.py
2
4495
import time import os import ContentDb class ContentDbDict(dict): def __init__(self, site, *args, **kwargs): s = time.time() self.site = site self.cached_keys = [] self.log = self.site.log self.db = ContentDb.getContentDb() self.db_id = self.db.needSite(site) self.num_loaded = 0 super(ContentDbDict, self).__init__(self.db.loadDbDict(site)) # Load keys from database self.log.debug("ContentDb init: %.3fs, found files: %s, sites: %s" % (time.time() - s, len(self), len(self.db.site_ids))) def loadItem(self, key): try: self.num_loaded += 1 if self.num_loaded % 100 == 0: self.log.debug("Loaded json: %s (latest: %s)" % (self.num_loaded, key)) content = self.site.storage.loadJson(key) dict.__setitem__(self, key, content) except IOError: if dict.get(self, key): self.__delitem__(key) # File not exists anymore raise KeyError(key) self.addCachedKey(key) self.checkLimit() return content def getItemSize(self, key): return self.site.storage.getSize(key) # Only keep last 10 accessed json in memory def checkLimit(self): if len(self.cached_keys) > 10: key_deleted = self.cached_keys.pop(0) dict.__setitem__(self, key_deleted, False) def addCachedKey(self, key): if key not in self.cached_keys and key != "content.json" and len(key) > 40: # Always keep keys smaller than 40 char self.cached_keys.append(key) def __getitem__(self, key): val = dict.get(self, key) if val: # Already loaded return val elif val is None: # Unknown key raise KeyError(key) elif val is False: # Loaded before, but purged from cache return self.loadItem(key) def __setitem__(self, key, val): self.addCachedKey(key) self.checkLimit() size = self.getItemSize(key) self.db.setContent(self.site, key, val, size) dict.__setitem__(self, key, val) def __delitem__(self, key): self.db.deleteContent(self.site, key) dict.__delitem__(self, key) try: self.cached_keys.remove(key) except ValueError: pass def iteritems(self): for key in dict.keys(self): try: val = self[key] except Exception, err: self.log.warning("Error loading %s: %s" % (key, err)) continue yield key, val def items(self): back = [] for key in dict.keys(self): try: val = self[key] except Exception, err: self.log.warning("Error loading %s: %s" % (key, err)) continue back.append((key, val)) return back def values(self): back = [] for key, val in dict.iteritems(self): if not val: try: val = self.loadItem(key) except Exception: continue back.append(val) return back def get(self, key, default=None): try: return self.__getitem__(key) except KeyError: return default def execute(self, query, params={}): params["site_id"] = self.db_id return self.db.execute(query, params) if __name__ == "__main__": import psutil process = psutil.Process(os.getpid()) s_mem = process.memory_info()[0] / float(2 ** 20) root = "data-live/1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27" contents = ContentDbDict("1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27", root) print "Init len", len(contents) s = time.time() for dir_name in os.listdir(root + "/data/users/")[0:8000]: contents["data/users/%s/content.json" % dir_name] print "Load: %.3fs" % (time.time() - s) s = time.time() found = 0 for key, val in contents.iteritems(): found += 1 assert key assert val print "Found:", found print "Iteritem: %.3fs" % (time.time() - s) s = time.time() found = 0 for key in contents.keys(): found += 1 assert key in contents print "In: %.3fs" % (time.time() - s) print "Len:", len(contents.values()), len(contents.keys()) print "Mem: +", process.memory_info()[0] / float(2 ** 20) - s_mem
gpl-2.0
giorgiop/scipy
scipy/linalg/_solvers.py
26
10144
"""Matrix equation solver routines""" # Author: Jeffrey Armstrong <jeff@approximatrix.com> # February 24, 2012 # Modified: Chad Fulton <ChadFulton@gmail.com> # June 19, 2014 from __future__ import division, print_function, absolute_import import numpy as np from numpy.linalg import inv, LinAlgError from .basic import solve from .lapack import get_lapack_funcs from .decomp_schur import schur from .special_matrices import kron __all__ = ['solve_sylvester', 'solve_lyapunov', 'solve_discrete_lyapunov', 'solve_continuous_are', 'solve_discrete_are'] def solve_sylvester(a, b, q): """ Computes a solution (X) to the Sylvester equation :math:`AX + XB = Q`. Parameters ---------- a : (M, M) array_like Leading matrix of the Sylvester equation b : (N, N) array_like Trailing matrix of the Sylvester equation q : (M, N) array_like Right-hand side Returns ------- x : (M, N) ndarray The solution to the Sylvester equation. Raises ------ LinAlgError If solution was not found Notes ----- Computes a solution to the Sylvester matrix equation via the Bartels- Stewart algorithm. The A and B matrices first undergo Schur decompositions. The resulting matrices are used to construct an alternative Sylvester equation (``RY + YS^T = F``) where the R and S matrices are in quasi-triangular form (or, when R, S or F are complex, triangular form). The simplified equation is then solved using ``*TRSYL`` from LAPACK directly. .. versionadded:: 0.11.0 """ # Compute the Schur decomp form of a r, u = schur(a, output='real') # Compute the Schur decomp of b s, v = schur(b.conj().transpose(), output='real') # Construct f = u'*q*v f = np.dot(np.dot(u.conj().transpose(), q), v) # Call the Sylvester equation solver trsyl, = get_lapack_funcs(('trsyl',), (r, s, f)) if trsyl is None: raise RuntimeError('LAPACK implementation does not contain a proper ' 'Sylvester equation solver (TRSYL)') y, scale, info = trsyl(r, s, f, tranb='C') y = scale*y if info < 0: raise LinAlgError("Illegal value encountered in " "the %d term" % (-info,)) return np.dot(np.dot(u, y), v.conj().transpose()) def solve_lyapunov(a, q): """ Solves the continuous Lyapunov equation :math:`AX + XA^H = Q`. Uses the Bartels-Stewart algorithm to find :math:`X`. Parameters ---------- a : array_like A square matrix q : array_like Right-hand side square matrix Returns ------- x : array_like Solution to the continuous Lyapunov equation See Also -------- solve_sylvester : computes the solution to the Sylvester equation Notes ----- Because the continuous Lyapunov equation is just a special form of the Sylvester equation, this solver relies entirely on solve_sylvester for a solution. .. versionadded:: 0.11.0 """ return solve_sylvester(a, a.conj().transpose(), q) def _solve_discrete_lyapunov_direct(a, q): """ Solves the discrete Lyapunov equation directly. This function is called by the `solve_discrete_lyapunov` function with `method=direct`. It is not supposed to be called directly. """ lhs = kron(a, a.conj()) lhs = np.eye(lhs.shape[0]) - lhs x = solve(lhs, q.flatten()) return np.reshape(x, q.shape) def _solve_discrete_lyapunov_bilinear(a, q): """ Solves the discrete Lyapunov equation using a bilinear transformation. This function is called by the `solve_discrete_lyapunov` function with `method=bilinear`. It is not supposed to be called directly. """ eye = np.eye(a.shape[0]) aH = a.conj().transpose() aHI_inv = inv(aH + eye) b = np.dot(aH - eye, aHI_inv) c = 2*np.dot(np.dot(inv(a + eye), q), aHI_inv) return solve_lyapunov(b.conj().transpose(), -c) def solve_discrete_lyapunov(a, q, method=None): """ Solves the discrete Lyapunov equation :math:`A'XA-X=-Q`. Parameters ---------- a : (M, M) array_like A square matrix q : (M, M) array_like Right-hand side square matrix method : {'direct', 'bilinear'}, optional Type of solver. If not given, chosen to be ``direct`` if ``M`` is less than 10 and ``bilinear`` otherwise. Returns ------- x : ndarray Solution to the discrete Lyapunov equation See Also -------- solve_lyapunov : computes the solution to the continuous Lyapunov equation Notes ----- This section describes the available solvers that can be selected by the 'method' parameter. The default method is *direct* if ``M`` is less than 10 and ``bilinear`` otherwise. Method *direct* uses a direct analytical solution to the discrete Lyapunov equation. The algorithm is given in, for example, [1]_. However it requires the linear solution of a system with dimension :math:`M^2` so that performance degrades rapidly for even moderately sized matrices. Method *bilinear* uses a bilinear transformation to convert the discrete Lyapunov equation to a continuous Lyapunov equation :math:`(B'X+XB=-C)` where :math:`B=(A-I)(A+I)^{-1}` and :math:`C=2(A' + I)^{-1} Q (A + I)^{-1}`. The continuous equation can be efficiently solved since it is a special case of a Sylvester equation. The transformation algorithm is from Popov (1964) as described in [2]_. .. versionadded:: 0.11.0 References ---------- .. [1] Hamilton, James D. Time Series Analysis, Princeton: Princeton University Press, 1994. 265. Print. http://www.scribd.com/doc/20577138/Hamilton-1994-Time-Series-Analysis .. [2] Gajic, Z., and M.T.J. Qureshi. 2008. Lyapunov Matrix Equation in System Stability and Control. Dover Books on Engineering Series. Dover Publications. """ a = np.asarray(a) q = np.asarray(q) if method is None: # Select automatically based on size of matrices if a.shape[0] >= 10: method = 'bilinear' else: method = 'direct' meth = method.lower() if meth == 'direct': x = _solve_discrete_lyapunov_direct(a, q) elif meth == 'bilinear': x = _solve_discrete_lyapunov_bilinear(a, q) else: raise ValueError('Unknown solver %s' % method) return x def solve_continuous_are(a, b, q, r): """ Solves the continuous algebraic Riccati equation (CARE). The CARE is defined as .. math:: (A'X + XA - XBR^-1B'X+Q=0) It is solved directly using a Schur decomposition method. Parameters ---------- a : (M, M) array_like Input b : (M, N) array_like Input q : (M, M) array_like Input r : (N, N) array_like Non-singular, square matrix Returns ------- x : (M, M) ndarray Solution to the continuous algebraic Riccati equation See Also -------- solve_discrete_are : Solves the discrete algebraic Riccati equation Notes ----- Method taken from: Laub, "A Schur Method for Solving Algebraic Riccati Equations." U.S. Energy Research and Development Agency under contract ERDA-E(49-18)-2087. http://dspace.mit.edu/bitstream/handle/1721.1/1301/R-0859-05666488.pdf .. versionadded:: 0.11.0 """ try: g = inv(r) except LinAlgError: raise ValueError('Matrix R in the algebraic Riccati equation solver ' 'is ill-conditioned') g = np.dot(np.dot(b, g), b.conj().transpose()) z11 = a z12 = -1.0*g z21 = -1.0*q z22 = -1.0*a.conj().transpose() z = np.vstack((np.hstack((z11, z12)), np.hstack((z21, z22)))) # Note: we need to sort the upper left of s to have negative real parts, # while the lower right is positive real components (Laub, p. 7) s, u, _ = schur(z, sort='lhp') (m, n) = u.shape u11 = u[0:m//2, 0:n//2] u21 = u[m//2:m, 0:n//2] u11i = inv(u11) return np.dot(u21, u11i) def solve_discrete_are(a, b, q, r): """ Solves the discrete algebraic Riccati equation (DARE). The DARE is defined as .. math:: X = A'XA-(A'XB)(R+B'XB)^-1(B'XA)+Q It is solved directly using a Schur decomposition method. Parameters ---------- a : (M, M) array_like Non-singular, square matrix b : (M, N) array_like Input q : (M, M) array_like Input r : (N, N) array_like Non-singular, square matrix Returns ------- x : ndarray Solution to the continuous Lyapunov equation See Also -------- solve_continuous_are : Solves the continuous algebraic Riccati equation Notes ----- Method taken from: Laub, "A Schur Method for Solving Algebraic Riccati Equations." U.S. Energy Research and Development Agency under contract ERDA-E(49-18)-2087. http://dspace.mit.edu/bitstream/handle/1721.1/1301/R-0859-05666488.pdf .. versionadded:: 0.11.0 """ try: g = inv(r) except LinAlgError: raise ValueError('Matrix R in the algebraic Riccati equation solver ' 'is ill-conditioned') g = np.dot(np.dot(b, g), b.conj().transpose()) try: ait = inv(a).conj().transpose() # ait is "A inverse transpose" except LinAlgError: raise ValueError('Matrix A in the algebraic Riccati equation solver ' 'is ill-conditioned') z11 = a+np.dot(np.dot(g, ait), q) z12 = -1.0*np.dot(g, ait) z21 = -1.0*np.dot(ait, q) z22 = ait z = np.vstack((np.hstack((z11, z12)), np.hstack((z21, z22)))) # Note: we need to sort the upper left of s to lie within the unit circle, # while the lower right is outside (Laub, p. 7) s, u, _ = schur(z, sort='iuc') (m, n) = u.shape u11 = u[0:m//2, 0:n//2] u21 = u[m//2:m, 0:n//2] u11i = inv(u11) return np.dot(u21, u11i)
bsd-3-clause
Microvellum/Fluid-Designer
win64-vc/2.78/python/lib/test/test_import/__init__.py
4
42690
# We import importlib *ASAP* in order to test #15386 import importlib import importlib.util from importlib._bootstrap_external import _get_sourcefile import builtins import marshal import os import platform import py_compile import random import stat import sys import unittest import unittest.mock as mock import textwrap import errno import shutil import contextlib import test.support from test.support import ( EnvironmentVarGuard, TESTFN, check_warnings, forget, is_jython, make_legacy_pyc, rmtree, run_unittest, swap_attr, swap_item, temp_umask, unlink, unload, create_empty_file, cpython_only, TESTFN_UNENCODABLE, temp_dir) from test.support import script_helper skip_if_dont_write_bytecode = unittest.skipIf( sys.dont_write_bytecode, "test meaningful only when writing bytecode") def remove_files(name): for f in (name + ".py", name + ".pyc", name + ".pyw", name + "$py.class"): unlink(f) rmtree('__pycache__') @contextlib.contextmanager def _ready_to_import(name=None, source=""): # sets up a temporary directory and removes it # creates the module file # temporarily clears the module from sys.modules (if any) # reverts or removes the module when cleaning up name = name or "spam" with temp_dir() as tempdir: path = script_helper.make_script(tempdir, name, source) old_module = sys.modules.pop(name, None) try: sys.path.insert(0, tempdir) yield name, path sys.path.remove(tempdir) finally: if old_module is not None: sys.modules[name] = old_module elif name in sys.modules: del sys.modules[name] class ImportTests(unittest.TestCase): def setUp(self): remove_files(TESTFN) importlib.invalidate_caches() def tearDown(self): unload(TESTFN) def test_case_sensitivity(self): # Brief digression to test that import is case-sensitive: if we got # this far, we know for sure that "random" exists. with self.assertRaises(ImportError): import RAnDoM def test_double_const(self): # Another brief digression to test the accuracy of manifest float # constants. from test import double_const # don't blink -- that *was* the test def test_import(self): def test_with_extension(ext): # The extension is normally ".py", perhaps ".pyw". source = TESTFN + ext if is_jython: pyc = TESTFN + "$py.class" else: pyc = TESTFN + ".pyc" with open(source, "w") as f: print("# This tests Python's ability to import a", ext, "file.", file=f) a = random.randrange(1000) b = random.randrange(1000) print("a =", a, file=f) print("b =", b, file=f) if TESTFN in sys.modules: del sys.modules[TESTFN] importlib.invalidate_caches() try: try: mod = __import__(TESTFN) except ImportError as err: self.fail("import from %s failed: %s" % (ext, err)) self.assertEqual(mod.a, a, "module loaded (%s) but contents invalid" % mod) self.assertEqual(mod.b, b, "module loaded (%s) but contents invalid" % mod) finally: forget(TESTFN) unlink(source) unlink(pyc) sys.path.insert(0, os.curdir) try: test_with_extension(".py") if sys.platform.startswith("win"): for ext in [".PY", ".Py", ".pY", ".pyw", ".PYW", ".pYw"]: test_with_extension(ext) finally: del sys.path[0] def test_module_with_large_stack(self, module='longlist'): # Regression test for http://bugs.python.org/issue561858. filename = module + '.py' # Create a file with a list of 65000 elements. with open(filename, 'w') as f: f.write('d = [\n') for i in range(65000): f.write('"",\n') f.write(']') try: # Compile & remove .py file; we only need .pyc. # Bytecode must be relocated from the PEP 3147 bytecode-only location. py_compile.compile(filename) finally: unlink(filename) # Need to be able to load from current dir. sys.path.append('') importlib.invalidate_caches() namespace = {} try: make_legacy_pyc(filename) # This used to crash. exec('import ' + module, None, namespace) finally: # Cleanup. del sys.path[-1] unlink(filename + 'c') unlink(filename + 'o') # Remove references to the module (unload the module) namespace.clear() try: del sys.modules[module] except KeyError: pass def test_failing_import_sticks(self): source = TESTFN + ".py" with open(source, "w") as f: print("a = 1/0", file=f) # New in 2.4, we shouldn't be able to import that no matter how often # we try. sys.path.insert(0, os.curdir) importlib.invalidate_caches() if TESTFN in sys.modules: del sys.modules[TESTFN] try: for i in [1, 2, 3]: self.assertRaises(ZeroDivisionError, __import__, TESTFN) self.assertNotIn(TESTFN, sys.modules, "damaged module in sys.modules on %i try" % i) finally: del sys.path[0] remove_files(TESTFN) def test_import_name_binding(self): # import x.y.z binds x in the current namespace import test as x import test.support self.assertIs(x, test, x.__name__) self.assertTrue(hasattr(test.support, "__file__")) # import x.y.z as w binds z as w import test.support as y self.assertIs(y, test.support, y.__name__) def test_failing_reload(self): # A failing reload should leave the module object in sys.modules. source = TESTFN + os.extsep + "py" with open(source, "w") as f: f.write("a = 1\nb=2\n") sys.path.insert(0, os.curdir) try: mod = __import__(TESTFN) self.assertIn(TESTFN, sys.modules) self.assertEqual(mod.a, 1, "module has wrong attribute values") self.assertEqual(mod.b, 2, "module has wrong attribute values") # On WinXP, just replacing the .py file wasn't enough to # convince reload() to reparse it. Maybe the timestamp didn't # move enough. We force it to get reparsed by removing the # compiled file too. remove_files(TESTFN) # Now damage the module. with open(source, "w") as f: f.write("a = 10\nb=20//0\n") self.assertRaises(ZeroDivisionError, importlib.reload, mod) # But we still expect the module to be in sys.modules. mod = sys.modules.get(TESTFN) self.assertIsNotNone(mod, "expected module to be in sys.modules") # We should have replaced a w/ 10, but the old b value should # stick. self.assertEqual(mod.a, 10, "module has wrong attribute values") self.assertEqual(mod.b, 2, "module has wrong attribute values") finally: del sys.path[0] remove_files(TESTFN) unload(TESTFN) @skip_if_dont_write_bytecode def test_file_to_source(self): # check if __file__ points to the source file where available source = TESTFN + ".py" with open(source, "w") as f: f.write("test = None\n") sys.path.insert(0, os.curdir) try: mod = __import__(TESTFN) self.assertTrue(mod.__file__.endswith('.py')) os.remove(source) del sys.modules[TESTFN] make_legacy_pyc(source) importlib.invalidate_caches() mod = __import__(TESTFN) base, ext = os.path.splitext(mod.__file__) self.assertEqual(ext, '.pyc') finally: del sys.path[0] remove_files(TESTFN) if TESTFN in sys.modules: del sys.modules[TESTFN] def test_import_by_filename(self): path = os.path.abspath(TESTFN) encoding = sys.getfilesystemencoding() try: path.encode(encoding) except UnicodeEncodeError: self.skipTest('path is not encodable to {}'.format(encoding)) with self.assertRaises(ImportError) as c: __import__(path) def test_import_in_del_does_not_crash(self): # Issue 4236 testfn = script_helper.make_script('', TESTFN, textwrap.dedent("""\ import sys class C: def __del__(self): import importlib sys.argv.insert(0, C()) """)) script_helper.assert_python_ok(testfn) def test_timestamp_overflow(self): # A modification timestamp larger than 2**32 should not be a problem # when importing a module (issue #11235). sys.path.insert(0, os.curdir) try: source = TESTFN + ".py" compiled = importlib.util.cache_from_source(source) with open(source, 'w') as f: pass try: os.utime(source, (2 ** 33 - 5, 2 ** 33 - 5)) except OverflowError: self.skipTest("cannot set modification time to large integer") except OSError as e: if e.errno not in (getattr(errno, 'EOVERFLOW', None), getattr(errno, 'EINVAL', None)): raise self.skipTest("cannot set modification time to large integer ({})".format(e)) __import__(TESTFN) # The pyc file was created. os.stat(compiled) finally: del sys.path[0] remove_files(TESTFN) def test_bogus_fromlist(self): try: __import__('http', fromlist=['blah']) except ImportError: self.fail("fromlist must allow bogus names") @cpython_only def test_delete_builtins_import(self): args = ["-c", "del __builtins__.__import__; import os"] popen = script_helper.spawn_python(*args) stdout, stderr = popen.communicate() self.assertIn(b"ImportError", stdout) def test_from_import_message_for_nonexistent_module(self): with self.assertRaisesRegex(ImportError, "^No module named 'bogus'"): from bogus import foo def test_from_import_message_for_existing_module(self): with self.assertRaisesRegex(ImportError, "^cannot import name 'bogus'"): from re import bogus def test_from_import_AttributeError(self): # Issue #24492: trying to import an attribute that raises an # AttributeError should lead to an ImportError. class AlwaysAttributeError: def __getattr__(self, _): raise AttributeError module_name = 'test_from_import_AttributeError' self.addCleanup(unload, module_name) sys.modules[module_name] = AlwaysAttributeError() with self.assertRaises(ImportError): from test_from_import_AttributeError import does_not_exist @skip_if_dont_write_bytecode class FilePermissionTests(unittest.TestCase): # tests for file mode on cached .pyc files @unittest.skipUnless(os.name == 'posix', "test meaningful only on posix systems") def test_creation_mode(self): mask = 0o022 with temp_umask(mask), _ready_to_import() as (name, path): cached_path = importlib.util.cache_from_source(path) module = __import__(name) if not os.path.exists(cached_path): self.fail("__import__ did not result in creation of " "a .pyc file") stat_info = os.stat(cached_path) # Check that the umask is respected, and the executable bits # aren't set. self.assertEqual(oct(stat.S_IMODE(stat_info.st_mode)), oct(0o666 & ~mask)) @unittest.skipUnless(os.name == 'posix', "test meaningful only on posix systems") def test_cached_mode_issue_2051(self): # permissions of .pyc should match those of .py, regardless of mask mode = 0o600 with temp_umask(0o022), _ready_to_import() as (name, path): cached_path = importlib.util.cache_from_source(path) os.chmod(path, mode) __import__(name) if not os.path.exists(cached_path): self.fail("__import__ did not result in creation of " "a .pyc file") stat_info = os.stat(cached_path) self.assertEqual(oct(stat.S_IMODE(stat_info.st_mode)), oct(mode)) @unittest.skipUnless(os.name == 'posix', "test meaningful only on posix systems") def test_cached_readonly(self): mode = 0o400 with temp_umask(0o022), _ready_to_import() as (name, path): cached_path = importlib.util.cache_from_source(path) os.chmod(path, mode) __import__(name) if not os.path.exists(cached_path): self.fail("__import__ did not result in creation of " "a .pyc file") stat_info = os.stat(cached_path) expected = mode | 0o200 # Account for fix for issue #6074 self.assertEqual(oct(stat.S_IMODE(stat_info.st_mode)), oct(expected)) def test_pyc_always_writable(self): # Initially read-only .pyc files on Windows used to cause problems # with later updates, see issue #6074 for details with _ready_to_import() as (name, path): # Write a Python file, make it read-only and import it with open(path, 'w') as f: f.write("x = 'original'\n") # Tweak the mtime of the source to ensure pyc gets updated later s = os.stat(path) os.utime(path, (s.st_atime, s.st_mtime-100000000)) os.chmod(path, 0o400) m = __import__(name) self.assertEqual(m.x, 'original') # Change the file and then reimport it os.chmod(path, 0o600) with open(path, 'w') as f: f.write("x = 'rewritten'\n") unload(name) importlib.invalidate_caches() m = __import__(name) self.assertEqual(m.x, 'rewritten') # Now delete the source file and check the pyc was rewritten unlink(path) unload(name) importlib.invalidate_caches() bytecode_only = path + "c" os.rename(importlib.util.cache_from_source(path), bytecode_only) m = __import__(name) self.assertEqual(m.x, 'rewritten') class PycRewritingTests(unittest.TestCase): # Test that the `co_filename` attribute on code objects always points # to the right file, even when various things happen (e.g. both the .py # and the .pyc file are renamed). module_name = "unlikely_module_name" module_source = """ import sys code_filename = sys._getframe().f_code.co_filename module_filename = __file__ constant = 1 def func(): pass func_filename = func.__code__.co_filename """ dir_name = os.path.abspath(TESTFN) file_name = os.path.join(dir_name, module_name) + os.extsep + "py" compiled_name = importlib.util.cache_from_source(file_name) def setUp(self): self.sys_path = sys.path[:] self.orig_module = sys.modules.pop(self.module_name, None) os.mkdir(self.dir_name) with open(self.file_name, "w") as f: f.write(self.module_source) sys.path.insert(0, self.dir_name) importlib.invalidate_caches() def tearDown(self): sys.path[:] = self.sys_path if self.orig_module is not None: sys.modules[self.module_name] = self.orig_module else: unload(self.module_name) unlink(self.file_name) unlink(self.compiled_name) rmtree(self.dir_name) def import_module(self): ns = globals() __import__(self.module_name, ns, ns) return sys.modules[self.module_name] def test_basics(self): mod = self.import_module() self.assertEqual(mod.module_filename, self.file_name) self.assertEqual(mod.code_filename, self.file_name) self.assertEqual(mod.func_filename, self.file_name) del sys.modules[self.module_name] mod = self.import_module() self.assertEqual(mod.module_filename, self.file_name) self.assertEqual(mod.code_filename, self.file_name) self.assertEqual(mod.func_filename, self.file_name) def test_incorrect_code_name(self): py_compile.compile(self.file_name, dfile="another_module.py") mod = self.import_module() self.assertEqual(mod.module_filename, self.file_name) self.assertEqual(mod.code_filename, self.file_name) self.assertEqual(mod.func_filename, self.file_name) def test_module_without_source(self): target = "another_module.py" py_compile.compile(self.file_name, dfile=target) os.remove(self.file_name) pyc_file = make_legacy_pyc(self.file_name) importlib.invalidate_caches() mod = self.import_module() self.assertEqual(mod.module_filename, pyc_file) self.assertEqual(mod.code_filename, target) self.assertEqual(mod.func_filename, target) def test_foreign_code(self): py_compile.compile(self.file_name) with open(self.compiled_name, "rb") as f: header = f.read(12) code = marshal.load(f) constants = list(code.co_consts) foreign_code = importlib.import_module.__code__ pos = constants.index(1) constants[pos] = foreign_code code = type(code)(code.co_argcount, code.co_kwonlyargcount, code.co_nlocals, code.co_stacksize, code.co_flags, code.co_code, tuple(constants), code.co_names, code.co_varnames, code.co_filename, code.co_name, code.co_firstlineno, code.co_lnotab, code.co_freevars, code.co_cellvars) with open(self.compiled_name, "wb") as f: f.write(header) marshal.dump(code, f) mod = self.import_module() self.assertEqual(mod.constant.co_filename, foreign_code.co_filename) class PathsTests(unittest.TestCase): SAMPLES = ('test', 'test\u00e4\u00f6\u00fc\u00df', 'test\u00e9\u00e8', 'test\u00b0\u00b3\u00b2') path = TESTFN def setUp(self): os.mkdir(self.path) self.syspath = sys.path[:] def tearDown(self): rmtree(self.path) sys.path[:] = self.syspath # Regression test for http://bugs.python.org/issue1293. def test_trailing_slash(self): with open(os.path.join(self.path, 'test_trailing_slash.py'), 'w') as f: f.write("testdata = 'test_trailing_slash'") sys.path.append(self.path+'/') mod = __import__("test_trailing_slash") self.assertEqual(mod.testdata, 'test_trailing_slash') unload("test_trailing_slash") # Regression test for http://bugs.python.org/issue3677. @unittest.skipUnless(sys.platform == 'win32', 'Windows-specific') def test_UNC_path(self): with open(os.path.join(self.path, 'test_unc_path.py'), 'w') as f: f.write("testdata = 'test_unc_path'") importlib.invalidate_caches() # Create the UNC path, like \\myhost\c$\foo\bar. path = os.path.abspath(self.path) import socket hn = socket.gethostname() drive = path[0] unc = "\\\\%s\\%s$"%(hn, drive) unc += path[2:] try: os.listdir(unc) except OSError as e: if e.errno in (errno.EPERM, errno.EACCES): # See issue #15338 self.skipTest("cannot access administrative share %r" % (unc,)) raise sys.path.insert(0, unc) try: mod = __import__("test_unc_path") except ImportError as e: self.fail("could not import 'test_unc_path' from %r: %r" % (unc, e)) self.assertEqual(mod.testdata, 'test_unc_path') self.assertTrue(mod.__file__.startswith(unc), mod.__file__) unload("test_unc_path") class RelativeImportTests(unittest.TestCase): def tearDown(self): unload("test.relimport") setUp = tearDown def test_relimport_star(self): # This will import * from .test_import. from .. import relimport self.assertTrue(hasattr(relimport, "RelativeImportTests")) def test_issue3221(self): # Note for mergers: the 'absolute' tests from the 2.x branch # are missing in Py3k because implicit relative imports are # a thing of the past # # Regression test for http://bugs.python.org/issue3221. def check_relative(): exec("from . import relimport", ns) # Check relative import OK with __package__ and __name__ correct ns = dict(__package__='test', __name__='test.notarealmodule') check_relative() # Check relative import OK with only __name__ wrong ns = dict(__package__='test', __name__='notarealpkg.notarealmodule') check_relative() # Check relative import fails with only __package__ wrong ns = dict(__package__='foo', __name__='test.notarealmodule') self.assertRaises(SystemError, check_relative) # Check relative import fails with __package__ and __name__ wrong ns = dict(__package__='foo', __name__='notarealpkg.notarealmodule') self.assertRaises(SystemError, check_relative) # Check relative import fails with package set to a non-string ns = dict(__package__=object()) self.assertRaises(TypeError, check_relative) def test_absolute_import_without_future(self): # If explicit relative import syntax is used, then do not try # to perform an absolute import in the face of failure. # Issue #7902. with self.assertRaises(ImportError): from .os import sep self.fail("explicit relative import triggered an " "implicit absolute import") class OverridingImportBuiltinTests(unittest.TestCase): def test_override_builtin(self): # Test that overriding builtins.__import__ can bypass sys.modules. import os def foo(): import os return os self.assertEqual(foo(), os) # Quick sanity check. with swap_attr(builtins, "__import__", lambda *x: 5): self.assertEqual(foo(), 5) # Test what happens when we shadow __import__ in globals(); this # currently does not impact the import process, but if this changes, # other code will need to change, so keep this test as a tripwire. with swap_item(globals(), "__import__", lambda *x: 5): self.assertEqual(foo(), os) class PycacheTests(unittest.TestCase): # Test the various PEP 3147/488-related behaviors. def _clean(self): forget(TESTFN) rmtree('__pycache__') unlink(self.source) def setUp(self): self.source = TESTFN + '.py' self._clean() with open(self.source, 'w') as fp: print('# This is a test file written by test_import.py', file=fp) sys.path.insert(0, os.curdir) importlib.invalidate_caches() def tearDown(self): assert sys.path[0] == os.curdir, 'Unexpected sys.path[0]' del sys.path[0] self._clean() @skip_if_dont_write_bytecode def test_import_pyc_path(self): self.assertFalse(os.path.exists('__pycache__')) __import__(TESTFN) self.assertTrue(os.path.exists('__pycache__')) pyc_path = importlib.util.cache_from_source(self.source) self.assertTrue(os.path.exists(pyc_path), 'bytecode file {!r} for {!r} does not ' 'exist'.format(pyc_path, TESTFN)) @unittest.skipUnless(os.name == 'posix', "test meaningful only on posix systems") @unittest.skipIf(hasattr(os, 'geteuid') and os.geteuid() == 0, "due to varying filesystem permission semantics (issue #11956)") @skip_if_dont_write_bytecode def test_unwritable_directory(self): # When the umask causes the new __pycache__ directory to be # unwritable, the import still succeeds but no .pyc file is written. with temp_umask(0o222): __import__(TESTFN) self.assertTrue(os.path.exists('__pycache__')) pyc_path = importlib.util.cache_from_source(self.source) self.assertFalse(os.path.exists(pyc_path), 'bytecode file {!r} for {!r} ' 'exists'.format(pyc_path, TESTFN)) @skip_if_dont_write_bytecode def test_missing_source(self): # With PEP 3147 cache layout, removing the source but leaving the pyc # file does not satisfy the import. __import__(TESTFN) pyc_file = importlib.util.cache_from_source(self.source) self.assertTrue(os.path.exists(pyc_file)) os.remove(self.source) forget(TESTFN) importlib.invalidate_caches() self.assertRaises(ImportError, __import__, TESTFN) @skip_if_dont_write_bytecode def test_missing_source_legacy(self): # Like test_missing_source() except that for backward compatibility, # when the pyc file lives where the py file would have been (and named # without the tag), it is importable. The __file__ of the imported # module is the pyc location. __import__(TESTFN) # pyc_file gets removed in _clean() via tearDown(). pyc_file = make_legacy_pyc(self.source) os.remove(self.source) unload(TESTFN) importlib.invalidate_caches() m = __import__(TESTFN) self.assertEqual(m.__file__, os.path.join(os.curdir, os.path.relpath(pyc_file))) def test___cached__(self): # Modules now also have an __cached__ that points to the pyc file. m = __import__(TESTFN) pyc_file = importlib.util.cache_from_source(TESTFN + '.py') self.assertEqual(m.__cached__, os.path.join(os.curdir, pyc_file)) @skip_if_dont_write_bytecode def test___cached___legacy_pyc(self): # Like test___cached__() except that for backward compatibility, # when the pyc file lives where the py file would have been (and named # without the tag), it is importable. The __cached__ of the imported # module is the pyc location. __import__(TESTFN) # pyc_file gets removed in _clean() via tearDown(). pyc_file = make_legacy_pyc(self.source) os.remove(self.source) unload(TESTFN) importlib.invalidate_caches() m = __import__(TESTFN) self.assertEqual(m.__cached__, os.path.join(os.curdir, os.path.relpath(pyc_file))) @skip_if_dont_write_bytecode def test_package___cached__(self): # Like test___cached__ but for packages. def cleanup(): rmtree('pep3147') unload('pep3147.foo') unload('pep3147') os.mkdir('pep3147') self.addCleanup(cleanup) # Touch the __init__.py with open(os.path.join('pep3147', '__init__.py'), 'w'): pass with open(os.path.join('pep3147', 'foo.py'), 'w'): pass importlib.invalidate_caches() m = __import__('pep3147.foo') init_pyc = importlib.util.cache_from_source( os.path.join('pep3147', '__init__.py')) self.assertEqual(m.__cached__, os.path.join(os.curdir, init_pyc)) foo_pyc = importlib.util.cache_from_source(os.path.join('pep3147', 'foo.py')) self.assertEqual(sys.modules['pep3147.foo'].__cached__, os.path.join(os.curdir, foo_pyc)) def test_package___cached___from_pyc(self): # Like test___cached__ but ensuring __cached__ when imported from a # PEP 3147 pyc file. def cleanup(): rmtree('pep3147') unload('pep3147.foo') unload('pep3147') os.mkdir('pep3147') self.addCleanup(cleanup) # Touch the __init__.py with open(os.path.join('pep3147', '__init__.py'), 'w'): pass with open(os.path.join('pep3147', 'foo.py'), 'w'): pass importlib.invalidate_caches() m = __import__('pep3147.foo') unload('pep3147.foo') unload('pep3147') importlib.invalidate_caches() m = __import__('pep3147.foo') init_pyc = importlib.util.cache_from_source( os.path.join('pep3147', '__init__.py')) self.assertEqual(m.__cached__, os.path.join(os.curdir, init_pyc)) foo_pyc = importlib.util.cache_from_source(os.path.join('pep3147', 'foo.py')) self.assertEqual(sys.modules['pep3147.foo'].__cached__, os.path.join(os.curdir, foo_pyc)) def test_recompute_pyc_same_second(self): # Even when the source file doesn't change timestamp, a change in # source size is enough to trigger recomputation of the pyc file. __import__(TESTFN) unload(TESTFN) with open(self.source, 'a') as fp: print("x = 5", file=fp) m = __import__(TESTFN) self.assertEqual(m.x, 5) class TestSymbolicallyLinkedPackage(unittest.TestCase): package_name = 'sample' tagged = package_name + '-tagged' def setUp(self): test.support.rmtree(self.tagged) test.support.rmtree(self.package_name) self.orig_sys_path = sys.path[:] # create a sample package; imagine you have a package with a tag and # you want to symbolically link it from its untagged name. os.mkdir(self.tagged) self.addCleanup(test.support.rmtree, self.tagged) init_file = os.path.join(self.tagged, '__init__.py') test.support.create_empty_file(init_file) assert os.path.exists(init_file) # now create a symlink to the tagged package # sample -> sample-tagged os.symlink(self.tagged, self.package_name, target_is_directory=True) self.addCleanup(test.support.unlink, self.package_name) importlib.invalidate_caches() self.assertEqual(os.path.isdir(self.package_name), True) assert os.path.isfile(os.path.join(self.package_name, '__init__.py')) def tearDown(self): sys.path[:] = self.orig_sys_path # regression test for issue6727 @unittest.skipUnless( not hasattr(sys, 'getwindowsversion') or sys.getwindowsversion() >= (6, 0), "Windows Vista or later required") @test.support.skip_unless_symlink def test_symlinked_dir_importable(self): # make sure sample can only be imported from the current directory. sys.path[:] = ['.'] assert os.path.exists(self.package_name) assert os.path.exists(os.path.join(self.package_name, '__init__.py')) # Try to import the package importlib.import_module(self.package_name) @cpython_only class ImportlibBootstrapTests(unittest.TestCase): # These tests check that importlib is bootstrapped. def test_frozen_importlib(self): mod = sys.modules['_frozen_importlib'] self.assertTrue(mod) def test_frozen_importlib_is_bootstrap(self): from importlib import _bootstrap mod = sys.modules['_frozen_importlib'] self.assertIs(mod, _bootstrap) self.assertEqual(mod.__name__, 'importlib._bootstrap') self.assertEqual(mod.__package__, 'importlib') self.assertTrue(mod.__file__.endswith('_bootstrap.py'), mod.__file__) def test_frozen_importlib_external_is_bootstrap_external(self): from importlib import _bootstrap_external mod = sys.modules['_frozen_importlib_external'] self.assertIs(mod, _bootstrap_external) self.assertEqual(mod.__name__, 'importlib._bootstrap_external') self.assertEqual(mod.__package__, 'importlib') self.assertTrue(mod.__file__.endswith('_bootstrap_external.py'), mod.__file__) def test_there_can_be_only_one(self): # Issue #15386 revealed a tricky loophole in the bootstrapping # This test is technically redundant, since the bug caused importing # this test module to crash completely, but it helps prove the point from importlib import machinery mod = sys.modules['_frozen_importlib'] self.assertIs(machinery.ModuleSpec, mod.ModuleSpec) @cpython_only class GetSourcefileTests(unittest.TestCase): """Test importlib._bootstrap_external._get_sourcefile() as used by the C API. Because of the peculiarities of the need of this function, the tests are knowingly whitebox tests. """ def test_get_sourcefile(self): # Given a valid bytecode path, return the path to the corresponding # source file if it exists. with mock.patch('importlib._bootstrap_external._path_isfile') as _path_isfile: _path_isfile.return_value = True; path = TESTFN + '.pyc' expect = TESTFN + '.py' self.assertEqual(_get_sourcefile(path), expect) def test_get_sourcefile_no_source(self): # Given a valid bytecode path without a corresponding source path, # return the original bytecode path. with mock.patch('importlib._bootstrap_external._path_isfile') as _path_isfile: _path_isfile.return_value = False; path = TESTFN + '.pyc' self.assertEqual(_get_sourcefile(path), path) def test_get_sourcefile_bad_ext(self): # Given a path with an invalid bytecode extension, return the # bytecode path passed as the argument. path = TESTFN + '.bad_ext' self.assertEqual(_get_sourcefile(path), path) class ImportTracebackTests(unittest.TestCase): def setUp(self): os.mkdir(TESTFN) self.old_path = sys.path[:] sys.path.insert(0, TESTFN) def tearDown(self): sys.path[:] = self.old_path rmtree(TESTFN) def create_module(self, mod, contents, ext=".py"): fname = os.path.join(TESTFN, mod + ext) with open(fname, "w") as f: f.write(contents) self.addCleanup(unload, mod) importlib.invalidate_caches() return fname def assert_traceback(self, tb, files): deduped_files = [] while tb: code = tb.tb_frame.f_code fn = code.co_filename if not deduped_files or fn != deduped_files[-1]: deduped_files.append(fn) tb = tb.tb_next self.assertEqual(len(deduped_files), len(files), deduped_files) for fn, pat in zip(deduped_files, files): self.assertIn(pat, fn) def test_nonexistent_module(self): try: # assertRaises() clears __traceback__ import nonexistent_xyzzy except ImportError as e: tb = e.__traceback__ else: self.fail("ImportError should have been raised") self.assert_traceback(tb, [__file__]) def test_nonexistent_module_nested(self): self.create_module("foo", "import nonexistent_xyzzy") try: import foo except ImportError as e: tb = e.__traceback__ else: self.fail("ImportError should have been raised") self.assert_traceback(tb, [__file__, 'foo.py']) def test_exec_failure(self): self.create_module("foo", "1/0") try: import foo except ZeroDivisionError as e: tb = e.__traceback__ else: self.fail("ZeroDivisionError should have been raised") self.assert_traceback(tb, [__file__, 'foo.py']) def test_exec_failure_nested(self): self.create_module("foo", "import bar") self.create_module("bar", "1/0") try: import foo except ZeroDivisionError as e: tb = e.__traceback__ else: self.fail("ZeroDivisionError should have been raised") self.assert_traceback(tb, [__file__, 'foo.py', 'bar.py']) # A few more examples from issue #15425 def test_syntax_error(self): self.create_module("foo", "invalid syntax is invalid") try: import foo except SyntaxError as e: tb = e.__traceback__ else: self.fail("SyntaxError should have been raised") self.assert_traceback(tb, [__file__]) def _setup_broken_package(self, parent, child): pkg_name = "_parent_foo" self.addCleanup(unload, pkg_name) pkg_path = os.path.join(TESTFN, pkg_name) os.mkdir(pkg_path) # Touch the __init__.py init_path = os.path.join(pkg_path, '__init__.py') with open(init_path, 'w') as f: f.write(parent) bar_path = os.path.join(pkg_path, 'bar.py') with open(bar_path, 'w') as f: f.write(child) importlib.invalidate_caches() return init_path, bar_path def test_broken_submodule(self): init_path, bar_path = self._setup_broken_package("", "1/0") try: import _parent_foo.bar except ZeroDivisionError as e: tb = e.__traceback__ else: self.fail("ZeroDivisionError should have been raised") self.assert_traceback(tb, [__file__, bar_path]) def test_broken_from(self): init_path, bar_path = self._setup_broken_package("", "1/0") try: from _parent_foo import bar except ZeroDivisionError as e: tb = e.__traceback__ else: self.fail("ImportError should have been raised") self.assert_traceback(tb, [__file__, bar_path]) def test_broken_parent(self): init_path, bar_path = self._setup_broken_package("1/0", "") try: import _parent_foo.bar except ZeroDivisionError as e: tb = e.__traceback__ else: self.fail("ZeroDivisionError should have been raised") self.assert_traceback(tb, [__file__, init_path]) def test_broken_parent_from(self): init_path, bar_path = self._setup_broken_package("1/0", "") try: from _parent_foo import bar except ZeroDivisionError as e: tb = e.__traceback__ else: self.fail("ZeroDivisionError should have been raised") self.assert_traceback(tb, [__file__, init_path]) @cpython_only def test_import_bug(self): # We simulate a bug in importlib and check that it's not stripped # away from the traceback. self.create_module("foo", "") importlib = sys.modules['_frozen_importlib_external'] if 'load_module' in vars(importlib.SourceLoader): old_exec_module = importlib.SourceLoader.exec_module else: old_exec_module = None try: def exec_module(*args): 1/0 importlib.SourceLoader.exec_module = exec_module try: import foo except ZeroDivisionError as e: tb = e.__traceback__ else: self.fail("ZeroDivisionError should have been raised") self.assert_traceback(tb, [__file__, '<frozen importlib', __file__]) finally: if old_exec_module is None: del importlib.SourceLoader.exec_module else: importlib.SourceLoader.exec_module = old_exec_module @unittest.skipUnless(TESTFN_UNENCODABLE, 'need TESTFN_UNENCODABLE') def test_unencodable_filename(self): # Issue #11619: The Python parser and the import machinery must not # encode filenames, especially on Windows pyname = script_helper.make_script('', TESTFN_UNENCODABLE, 'pass') self.addCleanup(unlink, pyname) name = pyname[:-3] script_helper.assert_python_ok("-c", "mod = __import__(%a)" % name, __isolated=False) class CircularImportTests(unittest.TestCase): """See the docstrings of the modules being imported for the purpose of the test.""" def tearDown(self): """Make sure no modules pre-exist in sys.modules which are being used to test.""" for key in list(sys.modules.keys()): if key.startswith('test.test_import.data.circular_imports'): del sys.modules[key] def test_direct(self): try: import test.test_import.data.circular_imports.basic except ImportError: self.fail('circular import through relative imports failed') def test_indirect(self): try: import test.test_import.data.circular_imports.indirect except ImportError: self.fail('relative import in module contributing to circular ' 'import failed') def test_subpackage(self): try: import test.test_import.data.circular_imports.subpackage except ImportError: self.fail('circular import involving a subpackage failed') def test_rebinding(self): try: import test.test_import.data.circular_imports.rebinding as rebinding except ImportError: self.fail('circular import with rebinding of module attribute failed') from test.test_import.data.circular_imports.subpkg import util self.assertIs(util.util, rebinding.util) if __name__ == '__main__': # Test needs to be a package, so we can do relative imports. unittest.main()
gpl-3.0
anthonysandrin/kafka-utils
kafka_utils/kafka_check/commands/min_isr.py
1
3568
# -*- coding: utf-8 -*- # Copyright 2016 Yelp Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from __future__ import print_function from kazoo.exceptions import NoNodeError from kafka_utils.kafka_check import status_code from kafka_utils.kafka_check.commands.command import KafkaCheckCmd from kafka_utils.util.metadata import get_topic_partition_metadata class MinIsrCmd(KafkaCheckCmd): def build_subparser(self, subparsers): subparser = subparsers.add_parser( 'min_isr', description='Check min isr number for each topic in the cluster.', help='This command will check actual number of insync replicas for each ' 'topic-partition in the cluster with configuration for that topic ' 'in Zookeeper or default min.isr param if it is specified and there ' 'is no settings in Zookeeper for partition.', ) subparser.add_argument( '--default-min-isr', type=int, default=1, help='Default min.isr value for cases without settings in Zookeeper ' 'for some topics. Default: %(default)s', ) return subparser def run_command(self): """Min_isr command, checks number of actual min-isr for each topic-partition with configuration for that topic.""" topics = get_topic_partition_metadata(self.cluster_config.broker_list) not_in_sync = process_metadata_response( topics, self.zk, self.args.default_min_isr, self.args.verbose, ) if not_in_sync == 0: return status_code.OK, "All replicas in sync." else: msg = ("{0} partition(s) have the number of replicas in " "sync that is lower than the specified min ISR.").format(not_in_sync) return status_code.CRITICAL, msg def get_min_isr(zk, topic): """Return the min-isr for topic, or None if not specified""" ISR_CONF_NAME = 'min.insync.replicas' try: config = zk.get_topic_config(topic) except NoNodeError: return None if ISR_CONF_NAME in config['config']: return int(config['config'][ISR_CONF_NAME]) else: return None def process_metadata_response(topics, zk, default_min_isr, verbose): not_in_sync = 0 for topic_name, partitions in topics.items(): min_isr = get_min_isr(zk, topic_name) or default_min_isr if min_isr is None: continue for metadata in partitions.values(): cur_isr = len(metadata.isr) if cur_isr < min_isr: if verbose: print("isr={isr} is lower than min_isr={min_isr} for {topic}:{partition}".format( isr=cur_isr, min_isr=min_isr, topic=metadata.topic, partition=metadata.partition, )) not_in_sync += 1 return not_in_sync
apache-2.0
MaryanMorel/faker
faker/providers/address/pt_BR/__init__.py
15
17254
# coding=utf-8 from __future__ import unicode_literals from .. import Provider as AddressProvider class Provider(AddressProvider): city_suffixes = ( 'do Sul', 'do Norte', 'de Minas', 'do Campo', 'Grande', 'da Serra', 'do Oeste', 'de Goiás', 'Paulista', 'da Mata', 'Alegre', 'da Praia', 'das Flores', 'das Pedras', 'dos Dourados', 'do Amparo', 'do Galho', 'da Prata', 'Verde' ) street_prefixes = ('Aeroporto', 'Alameda', 'Área', 'Avenida', 'Campo', 'Chácara', 'Colônia', 'Condomínio', 'Conjunto', 'Distrito', 'Esplanada', 'Estação', 'Estrada', 'Favela', 'Fazenda', 'Feira', 'Jardim', 'Ladeira', 'Lago', 'Lagoa', 'Largo', 'Loteamento', 'Morro', 'Núcleo', 'Parque', 'Passarela', 'Pátio', 'Praça', 'Quadra', 'Recanto', 'Residencial', 'Rodovia', 'Rua', 'Setor', 'Sítio', 'Travessa', 'Trecho', 'Trevo', 'Vale', 'Vereda', 'Via', 'Viaduto', 'Viela', 'Vila') city_formats = ( '{{last_name}}', '{{last_name}}', '{{last_name}}', '{{last_name}}', '{{last_name}} {{city_suffix}}', '{{last_name}} {{city_suffix}}', '{{last_name}} {{city_suffix}}', '{{last_name}} de {{last_name}}', ) street_name_formats = ( '{{street_prefix}} {{last_name}}', '{{street_prefix}} {{first_name}} {{last_name}}', '{{street_prefix}} de {{last_name}}', ) street_address_formats = ( '{{street_name}}', '{{street_name}}, {{building_number}}', '{{street_name}}, {{building_number}}', '{{street_name}}, {{building_number}}', '{{street_name}}, {{building_number}}', '{{street_name}}, {{building_number}}', '{{street_name}}, {{building_number}}', ) address_formats = ( "{{street_address}}\n{{bairro}}\n{{postcode}} {{city}} / {{estado_sigla}}", ) building_number_formats = ('%', '%#', '%#', '%#', '%##') postcode_formats = ('########', '#####-###') bairros = ( 'Aarão Reis', 'Acaba Mundo', 'Acaiaca', 'Ademar Maldonado', 'Aeroporto', 'Aguas Claras', 'Alípio De Melo', 'Alpes', 'Alta Tensão 1ª Seção', 'Alta Tensão 2ª Seção', 'Alto Caiçaras', 'Alto Das Antenas', 'Alto Dos Pinheiros', 'Alto Vera Cruz', 'Álvaro Camargos', 'Ambrosina', 'Andiroba', 'Antonio Ribeiro De Abreu 1ª Seção', 'Aparecida 7ª Seção', 'Ápia', 'Apolonia', 'Araguaia', 'Atila De Paiva', 'Bacurau', 'Bairro Das Indústrias Ii', 'Baleia', 'Barão Homem De Melo 1ª Seção', 'Barão Homem De Melo 2ª Seção', 'Barão Homem De Melo 3ª Seção', 'Barreiro', 'Beija Flor', 'Beira Linha', 'Bela Vitoria', 'Belmonte', 'Bernadete', 'Betânia', 'Biquinhas', 'Boa Esperança', 'Boa União 1ª Seção', 'Boa União 2ª Seção', 'Boa Viagem', 'Boa Vista', 'Bom Jesus', 'Bonfim', 'Bonsucesso', 'Brasil Industrial', 'Braúnas', 'Buraco Quente', 'Cabana Do Pai Tomás', 'Cachoeirinha', 'Caetano Furquim', 'Caiçara - Adelaide', 'Calafate', 'Califórnia', 'Camargos', 'Campo Alegre', 'Camponesa 1ª Seção', 'Camponesa 2ª Seção', 'Canaa', 'Canadá', 'Candelaria', 'Capitão Eduardo', 'Cardoso', 'Casa Branca', 'Castanheira', 'Cdi Jatoba', 'Cenaculo', 'Céu Azul', 'Chácara Leonina', 'Cidade Jardim Taquaril', 'Cinquentenário', 'Colégio Batista', 'Comiteco', 'Concórdia', 'Cônego Pinheiro 1ª Seção', 'Cônego Pinheiro 2ª Seção', 'Confisco', 'Conjunto Bonsucesso', 'Conjunto Califórnia I', 'Conjunto Califórnia Ii', 'Conjunto Capitão Eduardo', 'Conjunto Celso Machado', 'Conjunto Floramar', 'Conjunto Jardim Filadélfia', 'Conjunto Jatoba', 'Conjunto Lagoa', 'Conjunto Minas Caixa', 'Conjunto Novo Dom Bosco', 'Conjunto Paulo Vi', 'Conjunto Providencia', 'Conjunto Santa Maria', 'Conjunto São Francisco De Assis', 'Conjunto Serra Verde', 'Conjunto Taquaril', 'Copacabana', 'Coqueiros', 'Corumbiara', 'Custodinha', 'Das Industrias I', 'Delta', 'Diamante', 'Distrito Industrial Do Jatoba', 'Dom Bosco', 'Dom Cabral', 'Dom Joaquim', 'Dom Silverio', 'Dona Clara', 'Embaúbas', 'Engenho Nogueira', 'Ermelinda', 'Ernesto Nascimento', 'Esperança', 'Estrela', 'Estrela Do Oriente', 'Etelvina Carneiro', 'Europa', 'Eymard', 'Fazendinha', 'Flamengo', 'Flavio De Oliveira', 'Flavio Marques Lisboa', 'Floramar', 'Frei Leopoldo', 'Gameleira', 'Garças', 'Glória', 'Goiania', 'Graça', 'Granja De Freitas', 'Granja Werneck', 'Grota', 'Grotinha', 'Guarani', 'Guaratã', 'Havaí', 'Heliopolis', 'Horto Florestal', 'Inconfidência', 'Indaiá', 'Independência', 'Ipe', 'Itapoa', 'Itatiaia', 'Jaqueline', 'Jaraguá', 'Jardim Alvorada', 'Jardim Atlântico', 'Jardim Do Vale', 'Jardim Dos Comerciarios', 'Jardim Felicidade', 'Jardim Guanabara', 'Jardim Leblon', 'Jardim Montanhês', 'Jardim São José', 'Jardim Vitoria', 'Jardinópolis', 'Jatobá', 'João Alfredo', 'João Paulo Ii', 'Jonas Veiga', 'Juliana', 'Lagoa', 'Lagoinha', 'Lagoinha Leblon', 'Lajedo', 'Laranjeiras', 'Leonina', 'Leticia', 'Liberdade', 'Lindéia', 'Lorena', 'Madre Gertrudes', 'Madri', 'Mala E Cuia', 'Manacas', 'Mangueiras', 'Mantiqueira', 'Marajó', 'Maravilha', 'Marçola', 'Maria Goretti', 'Maria Helena', 'Maria Tereza', 'Maria Virgínia', 'Mariano De Abreu', 'Marieta 1ª Seção', 'Marieta 2ª Seção', 'Marieta 3ª Seção', 'Marilandia', 'Mariquinhas', 'Marmiteiros', 'Milionario', 'Minas Brasil', 'Minas Caixa', 'Minaslandia', 'Mineirão', 'Miramar', 'Mirante', 'Mirtes', 'Monsenhor Messias', 'Monte Azul', 'Monte São José', 'Morro Dos Macacos', 'Nazare', 'Nossa Senhora Aparecida', 'Nossa Senhora Da Aparecida', 'Nossa Senhora Da Conceição', 'Nossa Senhora De Fátima', 'Nossa Senhora Do Rosário', 'Nova America', 'Nova Cachoeirinha', 'Nova Cintra', 'Nova Esperança', 'Nova Floresta', 'Nova Gameleira', 'Nova Pampulha', 'Novo Aarão Reis', 'Novo Das Industrias', 'Novo Glória', 'Novo Santa Cecilia', 'Novo Tupi', 'Oeste', 'Olaria', "Olhos D'água", 'Ouro Minas', 'Pantanal', 'Paquetá', 'Paraíso', 'Parque São José', 'Parque São Pedro', 'Paulo Vi', 'Pedreira Padro Lopes', 'Penha', 'Petropolis', 'Pilar', 'Pindorama', 'Pindura Saia', 'Piraja', 'Piratininga', 'Pirineus', 'Pompéia', 'Pongelupe', 'Pousada Santo Antonio', 'Primeiro De Maio', 'Providencia', 'Ribeiro De Abreu', 'Rio Branco', 'Salgado Filho', 'Santa Amelia', 'Santa Branca', 'Santa Cecilia', 'Santa Cruz', 'Santa Helena', 'Santa Inês', 'Santa Isabel', 'Santa Margarida', 'Santa Maria', 'Santa Rita', 'Santa Rita De Cássia', 'Santa Sofia', 'Santa Terezinha', 'Santana Do Cafezal', 'Santo André', 'São Benedito', 'São Bernardo', 'São Cristóvão', 'São Damião', 'São Francisco', 'São Francisco Das Chagas', 'São Gabriel', 'São Geraldo', 'São Gonçalo', 'São João', 'São João Batista', 'São Jorge 1ª Seção', 'São Jorge 2ª Seção', 'São Jorge 3ª Seção', 'São José', 'São Marcos', 'São Paulo', 'São Salvador', 'São Sebastião', 'São Tomaz', 'São Vicente', 'Satelite', 'Saudade', 'Senhor Dos Passos', 'Serra Do Curral', 'Serra Verde', 'Serrano', 'Solar Do Barreiro', 'Solimoes', 'Sport Club', 'Suzana', 'Taquaril', 'Teixeira Dias', 'Tiradentes', 'Tirol', 'Tres Marias', 'Trevo', 'Túnel De Ibirité', 'Tupi A', 'Tupi B', 'União', 'Unidas', 'Universitário', 'Universo', 'Urca', 'Vale Do Jatoba', 'Varzea Da Palma', 'Venda Nova', 'Ventosa', 'Vera Cruz', 'Vila Aeroporto', 'Vila Aeroporto Jaraguá', 'Vila Antena', 'Vila Antena Montanhês', 'Vila Atila De Paiva', 'Vila Bandeirantes', 'Vila Barragem Santa Lúcia', 'Vila Batik', 'Vila Betânia', 'Vila Boa Vista', 'Vila Calafate', 'Vila Califórnia', 'Vila Canto Do Sabiá', 'Vila Cemig', 'Vila Cloris', 'Vila Copacabana', 'Vila Copasa', 'Vila Coqueiral', 'Vila Da Amizade', 'Vila Da Ária', 'Vila Da Luz', 'Vila Da Paz', 'Vila Das Oliveiras', 'Vila Do Pombal', 'Vila Dos Anjos', 'Vila Ecológica', 'Vila Engenho Nogueira', 'Vila Esplanada', 'Vila Formosa', 'Vila Fumec', 'Vila Havaí', 'Vila Independencia 1ª Seção', 'Vila Independencia 2ª Seção', 'Vila Independencia 3ª Seção', 'Vila Inestan', 'Vila Ipiranga', 'Vila Jardim Alvorada', 'Vila Jardim Leblon', 'Vila Jardim São José', 'Vila Madre Gertrudes 1ª Seção', 'Vila Madre Gertrudes 2ª Seção', 'Vila Madre Gertrudes 3ª Seção', 'Vila Madre Gertrudes 4ª Seção', 'Vila Maloca', 'Vila Mangueiras', 'Vila Mantiqueira', 'Vila Maria', 'Vila Minaslandia', 'Vila Nossa Senhora Do Rosário', 'Vila Nova', 'Vila Nova Cachoeirinha 1ª Seção', 'Vila Nova Cachoeirinha 2ª Seção', 'Vila Nova Cachoeirinha 3ª Seção', 'Vila Nova Dos Milionarios', 'Vila Nova Gameleira 1ª Seção', 'Vila Nova Gameleira 2ª Seção', 'Vila Nova Gameleira 3ª Seção', 'Vila Nova Paraíso', 'Vila Novo São Lucas', 'Vila Oeste', "Vila Olhos D'água", 'Vila Ouro Minas', 'Vila Paquetá', 'Vila Paraíso', 'Vila Petropolis', 'Vila Pilar', 'Vila Pinho', 'Vila Piratininga', 'Vila Piratininga Venda Nova', 'Vila Primeiro De Maio', 'Vila Puc', 'Vila Real 1ª Seção', 'Vila Real 2ª Seção', 'Vila Rica', 'Vila Santa Monica 1ª Seção', 'Vila Santa Monica 2ª Seção', 'Vila Santa Rosa', 'Vila Santo Antônio', 'Vila Santo Antônio Barroquinha', 'Vila São Dimas', 'Vila São Francisco', 'Vila São Gabriel', 'Vila São Gabriel Jacui', 'Vila São Geraldo', 'Vila São João Batista', 'Vila São Paulo', 'Vila São Rafael', 'Vila Satélite', 'Vila Sesc', 'Vila Sumaré', 'Vila Suzana Primeira Seção', 'Vila Suzana Segunda Seção', 'Vila Tirol', 'Vila Trinta E Um De Março', 'Vila União', 'Vila Vista Alegre', 'Virgínia', 'Vista Alegre', 'Vista Do Sol', 'Vitoria', 'Vitoria Da Conquista', 'Xangri-Lá', 'Xodo-Marize', 'Zilah Sposito', 'Outro', 'Novo São Lucas', 'Esplanada', 'Estoril', 'Novo Ouro Preto', 'Ouro Preto', 'Padre Eustáquio', 'Palmares', 'Palmeiras', 'Vila De Sá', 'Floresta', 'Anchieta', 'Aparecida', 'Grajaú', 'Planalto', 'Bandeirantes', 'Gutierrez', 'Jardim América', 'Renascença', 'Barro Preto', 'Barroca', 'Sagrada Família', 'Ipiranga', 'Belvedere', 'Santa Efigênia', 'Santa Lúcia', 'Santa Monica', 'Vila Jardim Montanhes', 'Santa Rosa', 'Santa Tereza', 'Buritis', 'Vila Paris', 'Santo Agostinho', 'Santo Antônio', 'Caiçaras', 'São Bento', 'Prado', 'Lourdes', 'Fernão Dias', 'Carlos Prates', 'Carmo', 'Luxemburgo', 'São Lucas', 'São Luiz', 'Mangabeiras', 'São Pedro', 'Horto', 'Cidade Jardim', 'Castelo', 'Cidade Nova', 'Savassi', 'Serra', 'Silveira', 'Sion', 'Centro', 'Alto Barroca', 'Nova Vista', 'Coração De Jesus', 'Coração Eucarístico', 'Funcionários', 'Cruzeiro', 'João Pinheiro', 'Nova Granada', 'Nova Suíça', 'Itaipu' ) countries = ('Afeganistão', 'África do Sul', 'Akrotiri', 'Albânia', 'Alemanha', 'Andorra', 'Angola', 'Anguila', 'Antártica', 'Antígua e Barbuda', 'Antilhas Holandesas', 'Arábia Saudita', 'Argélia', 'Argentina', 'Armênia', 'Aruba', 'Ashmore and Cartier Islands', 'Austrália', 'Áustria', 'Azerbaijão', 'Bahamas', 'Bangladesh', 'Barbados', 'Barein', 'Bélgica', 'Belize', 'Benim', 'Bermudas', 'Bielorrússia', 'Birmânia', 'Bolívia', 'Bósnia e Herzegovina', 'Botsuana', 'Brasil', 'Brunei', 'Bulgária', 'Burquina Faso', 'Burundi', 'Butão', 'Cabo Verde', 'Camarões', 'Camboja', 'Canadá', 'Catar', 'Cazaquistão', 'Chade', 'Chile', 'China', 'Chipre', 'Clipperton Island', 'Colômbia', 'Comores', 'Congo-Brazzaville', 'Congo-Kinshasa', 'Coral Sea Islands', 'Coreia do Norte', 'Coreia do Sul', 'Costa do Marfim', 'Costa Rica', 'Croácia', 'Cuba', 'Dhekelia', 'Dinamarca', 'Domínica', 'Egito', 'Costa do Marfim', 'Costa Rica', 'Croácia', 'Cuba', 'Dhekelia', 'Dinamarca', 'Domínica', 'Egito', 'Emirados Árabes Unidos', 'Equador', 'Eritreia', 'Eslováquia', 'Eslovênia', 'Espanha', 'Estados Unidos', 'Estônia', 'Etiópia', 'Faroé', 'Fiji', 'Filipinas', 'Finlândia', 'França', 'Gabão', 'Gâmbia', 'Gana', 'Geórgia', 'Geórgia do Sul e Sandwich do Sul', 'Gibraltar', 'Granada', 'Grécia', 'Gronelândia', 'Guam', 'Guatemala', 'Guernsey', 'Guiana', 'Guiné', 'Guiné Equatorial', 'Guiné-Bissau', 'Haiti', 'Honduras', 'Hong Kong', 'Hungria', 'Iêmen', 'Ilha Bouvet', 'Ilha do Natal', 'Ilha Norfolk', 'Ilhas Caiman', 'Ilhas Cook', 'Ilhas dos Cocos', 'Ilhas Falkland', 'Ilhas Heard e McDonald', 'Ilhas Marshall', 'Ilhas Salomão', 'Ilhas Turcas e Caicos', 'Ilhas Virgens Americanas', 'Ilhas Virgens Britânicas', 'Índia', 'Indonésia', 'Iran', 'Iraque', 'Irlanda', 'Islândia', 'Israel', 'Itália', 'Jamaica', 'Jan Mayen', 'Japão', 'Jersey', 'Jibuti', 'Jordânia', 'Kuwait', 'Laos', 'Lesoto', 'Letônia', 'Líbano', 'Libéria', 'Líbia', 'Liechtenstein', 'Lituânia', 'Luxemburgo', 'Macau', 'Macedônia', 'Madagáscar', 'Malásia', 'Malávi', 'Maldivas', 'Mali', 'Malta', 'Man, Isle of', 'Marianas do Norte', 'Marrocos', 'Maurícia', 'Mauritânia', 'Mayotte', 'México', 'Micronésia', 'Moçambique', 'Moldávia', 'Mônaco', 'Mongólia', 'Monserrate', 'Montenegro', 'Namíbia', 'Nauru', 'Navassa Island', 'Nepal', 'Nicarágua', 'Níger', 'Nigéria', 'Niue', 'Noruega', 'Nova Caledónia', 'Nova Zelândia', 'Omã', 'Países Baixos', 'Palau', 'Panamá', 'Papua-Nova Guiné', 'Paquistão', 'Paracel Islands', 'Paraguai', 'Peru', 'Pitcairn', 'Polinésia Francesa', 'Polônia', 'Porto Rico', 'Portugal', 'Quênia', 'Quirguizistão', 'Quiribáti', 'Reino Unido', 'República Centro-Africana', 'República Checa', 'República Dominicana', 'Roménia', 'Ruanda', 'Rússia', 'Salvador', 'Samoa', 'Samoa Americana', 'Santa Helena', 'Santa Lúcia', 'São Cristóvão e Neves', 'São Marinho', 'São Pedro e Miquelon', 'São Tomé e Príncipe', 'São Vicente e Granadinas', 'Sara Ocidental', 'Seicheles', 'Senegal', 'Serra Leoa', 'Sérvia', 'Singapura', 'Síria', 'Somália', 'Sri Lanka', 'Suazilândia', 'Sudão', 'Suécia', 'Suíça', 'Suriname', 'Svalbard e Jan Mayen', 'Tailândia', 'Taiwan', 'Tajiquistão', 'Tanzânia', 'Território Britânico do Oceano Índico', 'Territórios Austrais Franceses', 'Timor Leste', 'Togo', 'Tokelau', 'Tonga', 'Trindade e Tobago', 'Tunísia', 'Turquemenistão', 'Turquia', 'Tuvalu', 'Ucrânia', 'Uganda', 'União Europeia', 'Uruguai', 'Usbequistão', 'Vanuatu', 'Vaticano', 'Venezuela', 'Vietnam', 'Wake Island', 'Wallis e Futuna', 'Zâmbia', 'Zimbabué' ) estados = ( ('AC', 'Acre'), ('AL', 'Alagoas'), ('AP', 'Amapá'), ('AM', 'Amazonas'), ('BA', 'Bahia'), ('CE', 'Ceará'), ('DF', 'Distrito Federal'), ('ES', 'Espírito Santo'), ('GO', 'Goiás'), ('MA', 'Maranhão'), ('MT', 'Mato Grosso'), ('MS', 'Mato Grosso do Sul'), ('MG', 'Minas Gerais'), ('PA', 'Pará'), ('PB', 'Paraíba'), ('PR', 'Paraná'), ('PE', 'Pernambuco'), ('PI', 'Piauí'), ('RJ', 'Rio de Janeiro'), ('RN', 'Rio Grande do Norte'), ('RS', 'Rio Grande do Sul'), ('RO', 'Rondônia'), ('RR', 'Roraima'), ('SC', 'Santa Catarina'), ('SP', 'São Paulo'), ('SE', 'Sergipe'), ('TO', 'Tocantins') ) @classmethod def street_prefix(cls): """ :example 'rua' """ return cls.random_element(cls.street_prefixes) @classmethod def estado(cls): """ Randomly returns a Brazilian State ('sigla' , 'nome'). :example ('MG' . 'Minas Gerais') """ return cls.random_element(cls.estados) @classmethod def estado_nome(cls): """ Randomly returns a Brazilian State Name :example 'Minas Gerais' """ return cls.estado()[1] @classmethod def estado_sigla(cls): """ Randomly returns the abbreviation of a Brazilian State :example 'MG' """ return cls.estado()[0] @classmethod def bairro(cls): """ Randomly returns a bairro (neighborhood) name. The names were taken from the city of Belo Horizonte - Minas Gerais :example 'Serra' """ return cls.random_element(cls.bairros)
mit
wuvcen/shadowsocks
shadowsocks/crypto/sodium.py
1032
3778
#!/usr/bin/env python # # Copyright 2015 clowwindy # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from __future__ import absolute_import, division, print_function, \ with_statement from ctypes import c_char_p, c_int, c_ulonglong, byref, \ create_string_buffer, c_void_p from shadowsocks.crypto import util __all__ = ['ciphers'] libsodium = None loaded = False buf_size = 2048 # for salsa20 and chacha20 BLOCK_SIZE = 64 def load_libsodium(): global loaded, libsodium, buf libsodium = util.find_library('sodium', 'crypto_stream_salsa20_xor_ic', 'libsodium') if libsodium is None: raise Exception('libsodium not found') libsodium.crypto_stream_salsa20_xor_ic.restype = c_int libsodium.crypto_stream_salsa20_xor_ic.argtypes = (c_void_p, c_char_p, c_ulonglong, c_char_p, c_ulonglong, c_char_p) libsodium.crypto_stream_chacha20_xor_ic.restype = c_int libsodium.crypto_stream_chacha20_xor_ic.argtypes = (c_void_p, c_char_p, c_ulonglong, c_char_p, c_ulonglong, c_char_p) buf = create_string_buffer(buf_size) loaded = True class SodiumCrypto(object): def __init__(self, cipher_name, key, iv, op): if not loaded: load_libsodium() self.key = key self.iv = iv self.key_ptr = c_char_p(key) self.iv_ptr = c_char_p(iv) if cipher_name == 'salsa20': self.cipher = libsodium.crypto_stream_salsa20_xor_ic elif cipher_name == 'chacha20': self.cipher = libsodium.crypto_stream_chacha20_xor_ic else: raise Exception('Unknown cipher') # byte counter, not block counter self.counter = 0 def update(self, data): global buf_size, buf l = len(data) # we can only prepend some padding to make the encryption align to # blocks padding = self.counter % BLOCK_SIZE if buf_size < padding + l: buf_size = (padding + l) * 2 buf = create_string_buffer(buf_size) if padding: data = (b'\0' * padding) + data self.cipher(byref(buf), c_char_p(data), padding + l, self.iv_ptr, int(self.counter / BLOCK_SIZE), self.key_ptr) self.counter += l # buf is copied to a str object when we access buf.raw # strip off the padding return buf.raw[padding:padding + l] ciphers = { 'salsa20': (32, 8, SodiumCrypto), 'chacha20': (32, 8, SodiumCrypto), } def test_salsa20(): cipher = SodiumCrypto('salsa20', b'k' * 32, b'i' * 16, 1) decipher = SodiumCrypto('salsa20', b'k' * 32, b'i' * 16, 0) util.run_cipher(cipher, decipher) def test_chacha20(): cipher = SodiumCrypto('chacha20', b'k' * 32, b'i' * 16, 1) decipher = SodiumCrypto('chacha20', b'k' * 32, b'i' * 16, 0) util.run_cipher(cipher, decipher) if __name__ == '__main__': test_chacha20() test_salsa20()
apache-2.0
pyannote/pyannote-generators
pyannote/generators/_version.py
2
15789
# This file helps to compute a version number in source trees obtained from # git-archive tarball (such as those provided by githubs download-from-tag # feature). Distribution tarballs (built by setup.py sdist) and build # directories (produced by setup.py build) will contain a much shorter file # that just contains the computed version number. # This file is released into the public domain. Generated by # versioneer-0.15 (https://github.com/warner/python-versioneer) import errno import os import re import subprocess import sys def get_keywords(): # these strings will be replaced by git during git-archive. # setup.py/versioneer.py will grep for the variable names, so they must # each be defined on a line of their own. _version.py will just call # get_keywords(). git_refnames = "$Format:%d$" git_full = "$Format:%H$" keywords = {"refnames": git_refnames, "full": git_full} return keywords class VersioneerConfig: pass def get_config(): # these strings are filled in when 'setup.py versioneer' creates # _version.py cfg = VersioneerConfig() cfg.VCS = "git" cfg.style = "pep440" cfg.tag_prefix = "" cfg.parentdir_prefix = "pyannote-generators-" cfg.versionfile_source = "pyannote/generators/_version.py" cfg.verbose = False return cfg class NotThisMethod(Exception): pass LONG_VERSION_PY = {} HANDLERS = {} def register_vcs_handler(vcs, method): # decorator def decorate(f): if vcs not in HANDLERS: HANDLERS[vcs] = {} HANDLERS[vcs][method] = f return f return decorate def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False): assert isinstance(commands, list) p = None for c in commands: try: dispcmd = str([c] + args) # remember shell=False, so use git.cmd on windows, not just git p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE, stderr=(subprocess.PIPE if hide_stderr else None)) break except EnvironmentError: e = sys.exc_info()[1] if e.errno == errno.ENOENT: continue if verbose: print("unable to run %s" % dispcmd) print(e) return None else: if verbose: print("unable to find command, tried %s" % (commands,)) return None stdout = p.communicate()[0].strip() if sys.version_info[0] >= 3: stdout = stdout.decode() if p.returncode != 0: if verbose: print("unable to run %s (error)" % dispcmd) return None return stdout def versions_from_parentdir(parentdir_prefix, root, verbose): # Source tarballs conventionally unpack into a directory that includes # both the project name and a version string. dirname = os.path.basename(root) if not dirname.startswith(parentdir_prefix): if verbose: print("guessing rootdir is '%s', but '%s' doesn't start with " "prefix '%s'" % (root, dirname, parentdir_prefix)) raise NotThisMethod("rootdir doesn't start with parentdir_prefix") return {"version": dirname[len(parentdir_prefix):], "full-revisionid": None, "dirty": False, "error": None} @register_vcs_handler("git", "get_keywords") def git_get_keywords(versionfile_abs): # the code embedded in _version.py can just fetch the value of these # keywords. When used from setup.py, we don't want to import _version.py, # so we do it with a regexp instead. This function is not used from # _version.py. keywords = {} try: f = open(versionfile_abs, "r") for line in f.readlines(): if line.strip().startswith("git_refnames ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["refnames"] = mo.group(1) if line.strip().startswith("git_full ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["full"] = mo.group(1) f.close() except EnvironmentError: pass return keywords @register_vcs_handler("git", "keywords") def git_versions_from_keywords(keywords, tag_prefix, verbose): if not keywords: raise NotThisMethod("no keywords at all, weird") refnames = keywords["refnames"].strip() if refnames.startswith("$Format"): if verbose: print("keywords are unexpanded, not using") raise NotThisMethod("unexpanded keywords, not a git-archive tarball") refs = set([r.strip() for r in refnames.strip("()").split(",")]) # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of # just "foo-1.0". If we see a "tag: " prefix, prefer those. TAG = "tag: " tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)]) if not tags: # Either we're using git < 1.8.3, or there really are no tags. We use # a heuristic: assume all version tags have a digit. The old git %d # expansion behaves like git log --decorate=short and strips out the # refs/heads/ and refs/tags/ prefixes that would let us distinguish # between branches and tags. By ignoring refnames without digits, we # filter out many common branch names like "release" and # "stabilization", as well as "HEAD" and "master". tags = set([r for r in refs if re.search(r'\d', r)]) if verbose: print("discarding '%s', no digits" % ",".join(refs-tags)) if verbose: print("likely tags: %s" % ",".join(sorted(tags))) for ref in sorted(tags): # sorting will prefer e.g. "2.0" over "2.0rc1" if ref.startswith(tag_prefix): r = ref[len(tag_prefix):] if verbose: print("picking %s" % r) return {"version": r, "full-revisionid": keywords["full"].strip(), "dirty": False, "error": None } # no suitable tags, so version is "0+unknown", but full hex is still there if verbose: print("no suitable tags, using unknown + full revision id") return {"version": "0+unknown", "full-revisionid": keywords["full"].strip(), "dirty": False, "error": "no suitable tags"} @register_vcs_handler("git", "pieces_from_vcs") def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): # this runs 'git' from the root of the source tree. This only gets called # if the git-archive 'subst' keywords were *not* expanded, and # _version.py hasn't already been rewritten with a short version string, # meaning we're inside a checked out source tree. if not os.path.exists(os.path.join(root, ".git")): if verbose: print("no .git in %s" % root) raise NotThisMethod("no .git directory") GITS = ["git"] if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] # if there is a tag, this yields TAG-NUM-gHEX[-dirty] # if there are no tags, this yields HEX[-dirty] (no NUM) describe_out = run_command(GITS, ["describe", "--tags", "--dirty", "--always", "--long"], cwd=root) # --long was added in git-1.5.5 if describe_out is None: raise NotThisMethod("'git describe' failed") describe_out = describe_out.strip() full_out = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) if full_out is None: raise NotThisMethod("'git rev-parse' failed") full_out = full_out.strip() pieces = {} pieces["long"] = full_out pieces["short"] = full_out[:7] # maybe improved later pieces["error"] = None # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] # TAG might have hyphens. git_describe = describe_out # look for -dirty suffix dirty = git_describe.endswith("-dirty") pieces["dirty"] = dirty if dirty: git_describe = git_describe[:git_describe.rindex("-dirty")] # now we have TAG-NUM-gHEX or HEX if "-" in git_describe: # TAG-NUM-gHEX mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) if not mo: # unparseable. Maybe git-describe is misbehaving? pieces["error"] = ("unable to parse git-describe output: '%s'" % describe_out) return pieces # tag full_tag = mo.group(1) if not full_tag.startswith(tag_prefix): if verbose: fmt = "tag '%s' doesn't start with prefix '%s'" print(fmt % (full_tag, tag_prefix)) pieces["error"] = ("tag '%s' doesn't start with prefix '%s'" % (full_tag, tag_prefix)) return pieces pieces["closest-tag"] = full_tag[len(tag_prefix):] # distance: number of commits since tag pieces["distance"] = int(mo.group(2)) # commit: short hex revision ID pieces["short"] = mo.group(3) else: # HEX: no tags pieces["closest-tag"] = None count_out = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root) pieces["distance"] = int(count_out) # total number of commits return pieces def plus_or_dot(pieces): if "+" in pieces.get("closest-tag", ""): return "." return "+" def render_pep440(pieces): # now build up version string, with post-release "local version # identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you # get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty # exceptions: # 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += plus_or_dot(pieces) rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" else: # exception #1 rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" return rendered def render_pep440_pre(pieces): # TAG[.post.devDISTANCE] . No -dirty # exceptions: # 1: no tags. 0.post.devDISTANCE if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += ".post.dev%d" % pieces["distance"] else: # exception #1 rendered = "0.post.dev%d" % pieces["distance"] return rendered def render_pep440_post(pieces): # TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that # .dev0 sorts backwards (a dirty tree will appear "older" than the # corresponding clean one), but you shouldn't be releasing software with # -dirty anyways. # exceptions: # 1: no tags. 0.postDISTANCE[.dev0] if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += plus_or_dot(pieces) rendered += "g%s" % pieces["short"] else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += "+g%s" % pieces["short"] return rendered def render_pep440_old(pieces): # TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty. # exceptions: # 1: no tags. 0.postDISTANCE[.dev0] if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" return rendered def render_git_describe(pieces): # TAG[-DISTANCE-gHEX][-dirty], like 'git describe --tags --dirty # --always' # exceptions: # 1: no tags. HEX[-dirty] (note: no 'g' prefix) if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render_git_describe_long(pieces): # TAG-DISTANCE-gHEX[-dirty], like 'git describe --tags --dirty # --always -long'. The distance/hash is unconditional. # exceptions: # 1: no tags. HEX[-dirty] (note: no 'g' prefix) if pieces["closest-tag"]: rendered = pieces["closest-tag"] rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render(pieces, style): if pieces["error"]: return {"version": "unknown", "full-revisionid": pieces.get("long"), "dirty": None, "error": pieces["error"]} if not style or style == "default": style = "pep440" # the default if style == "pep440": rendered = render_pep440(pieces) elif style == "pep440-pre": rendered = render_pep440_pre(pieces) elif style == "pep440-post": rendered = render_pep440_post(pieces) elif style == "pep440-old": rendered = render_pep440_old(pieces) elif style == "git-describe": rendered = render_git_describe(pieces) elif style == "git-describe-long": rendered = render_git_describe_long(pieces) else: raise ValueError("unknown style '%s'" % style) return {"version": rendered, "full-revisionid": pieces["long"], "dirty": pieces["dirty"], "error": None} def get_versions(): # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have # __file__, we can work backwards from there to the root. Some # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which # case we can only use expanded keywords. cfg = get_config() verbose = cfg.verbose try: return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose) except NotThisMethod: pass try: root = os.path.realpath(__file__) # versionfile_source is the relative path from the top of the source # tree (where the .git directory might live) to this file. Invert # this to find the root from __file__. for i in cfg.versionfile_source.split('/'): root = os.path.dirname(root) except NameError: return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to find root of source tree"} try: pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) return render(pieces, cfg.style) except NotThisMethod: pass try: if cfg.parentdir_prefix: return versions_from_parentdir(cfg.parentdir_prefix, root, verbose) except NotThisMethod: pass return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to compute version"}
mit
yangsensen/Crawler
X500/bs4/tests/test_lxml.py
273
2965
"""Tests to ensure that the lxml tree builder generates good trees.""" import re import warnings try: import lxml.etree LXML_PRESENT = True LXML_VERSION = lxml.etree.LXML_VERSION except ImportError, e: LXML_PRESENT = False LXML_VERSION = (0,) if LXML_PRESENT: from bs4.builder import LXMLTreeBuilder, LXMLTreeBuilderForXML from bs4 import ( BeautifulSoup, BeautifulStoneSoup, ) from bs4.element import Comment, Doctype, SoupStrainer from bs4.testing import skipIf from bs4.tests import test_htmlparser from bs4.testing import ( HTMLTreeBuilderSmokeTest, XMLTreeBuilderSmokeTest, SoupTest, skipIf, ) @skipIf( not LXML_PRESENT, "lxml seems not to be present, not testing its tree builder.") class LXMLTreeBuilderSmokeTest(SoupTest, HTMLTreeBuilderSmokeTest): """See ``HTMLTreeBuilderSmokeTest``.""" @property def default_builder(self): return LXMLTreeBuilder() def test_out_of_range_entity(self): self.assertSoupEquals( "<p>foo&#10000000000000;bar</p>", "<p>foobar</p>") self.assertSoupEquals( "<p>foo&#x10000000000000;bar</p>", "<p>foobar</p>") self.assertSoupEquals( "<p>foo&#1000000000;bar</p>", "<p>foobar</p>") # In lxml < 2.3.5, an empty doctype causes a segfault. Skip this # test if an old version of lxml is installed. @skipIf( not LXML_PRESENT or LXML_VERSION < (2,3,5,0), "Skipping doctype test for old version of lxml to avoid segfault.") def test_empty_doctype(self): soup = self.soup("<!DOCTYPE>") doctype = soup.contents[0] self.assertEqual("", doctype.strip()) def test_beautifulstonesoup_is_xml_parser(self): # Make sure that the deprecated BSS class uses an xml builder # if one is installed. with warnings.catch_warnings(record=True) as w: soup = BeautifulStoneSoup("<b />") self.assertEqual(u"<b/>", unicode(soup.b)) self.assertTrue("BeautifulStoneSoup class is deprecated" in str(w[0].message)) def test_real_xhtml_document(self): """lxml strips the XML definition from an XHTML doc, which is fine.""" markup = b"""<?xml version="1.0" encoding="utf-8"?> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"> <html xmlns="http://www.w3.org/1999/xhtml"> <head><title>Hello.</title></head> <body>Goodbye.</body> </html>""" soup = self.soup(markup) self.assertEqual( soup.encode("utf-8").replace(b"\n", b''), markup.replace(b'\n', b'').replace( b'<?xml version="1.0" encoding="utf-8"?>', b'')) @skipIf( not LXML_PRESENT, "lxml seems not to be present, not testing its XML tree builder.") class LXMLXMLTreeBuilderSmokeTest(SoupTest, XMLTreeBuilderSmokeTest): """See ``HTMLTreeBuilderSmokeTest``.""" @property def default_builder(self): return LXMLTreeBuilderForXML()
gpl-2.0
mmatyas/servo
tests/wpt/web-platform-tests/old-tests/webdriver/command_contexts/open_and_close_window_test.py
141
2529
import os import sys import random import unittest sys.path.insert(1, os.path.abspath(os.path.join(__file__, "../.."))) import base_test repo_root = os.path.abspath(os.path.join(__file__, "../../..")) sys.path.insert(1, os.path.join(repo_root, "tools", "webdriver")) from webdriver import exceptions class OpenAndCloseWindowTest(base_test.WebDriverBaseTest): def setUp(self): self.driver.get(self.webserver.where_is("command_contexts/res/first-page.html")) def tearDown(self): handles = self.driver.get_window_handles() for i in range(len(handles) - 1): self.driver.switch_to_window(handles[i]) self.driver.close() self.driver.switch_to_window(self.driver.get_window_handles()[0]) def test_open_new_window(self): handles = self.driver.get_window_handles() self.driver.find_element_by_id("open_new_window").click() self.assertEquals(len(handles) + 1, len(self.driver.get_window_handles())) def test_get_window_handles_returns_the_windows_that_have_been_opened(self): self.driver.find_element_by_id("open_new_window").click() handles = self.driver.get_window_handles() self.driver.switch_to_window(handles[0]) url1 = self.driver.get_current_url() self.driver.switch_to_window(handles[1]) url2 = self.driver.get_current_url() if url1 == self.webserver.where_is("controlling_windows/res/other-page.html"): self.assertEquals(url2, self.webserver.where_is("controlling_windows/res/first-page.html")) elif url1 == self.webserver.where_is("controlling_windows/res/first-page.html"): self.assertEquals(url2, self.webserver.where_is("controlling_windows/res/other-page.html")) else: self.fail("The wrong set of URLs were returned") def test_close_window(self): open_windows = len(self.driver.get_window_handles()) self.driver.find_element_by_id("open_new_window").click() self.assertEquals(1 + open_windows, len(self.driver.get_window_handles())) self.driver.close() self.assertEquals(open_windows, len(self.driver.get_window_handles())) def test_command_sent_to_closed_window_returns_no_such_window_exception(self): self.driver.find_element_by_id("open_new_window").click() self.driver.close() with self.assertRaises(exceptions.NoSuchWindowException): self.driver.get_window_handle() if __name__ == "__main__": unittest.main()
mpl-2.0
Anonymous-X6/django
tests/custom_lookups/tests.py
177
22547
from __future__ import unicode_literals import contextlib import time import unittest from datetime import date, datetime from django.core.exceptions import FieldError from django.db import connection, models from django.test import TestCase, override_settings from django.utils import timezone from .models import Author, MySQLUnixTimestamp @contextlib.contextmanager def register_lookup(field, *lookups): try: for lookup in lookups: field.register_lookup(lookup) yield finally: for lookup in lookups: field._unregister_lookup(lookup) class Div3Lookup(models.Lookup): lookup_name = 'div3' def as_sql(self, compiler, connection): lhs, params = self.process_lhs(compiler, connection) rhs, rhs_params = self.process_rhs(compiler, connection) params.extend(rhs_params) return '(%s) %%%% 3 = %s' % (lhs, rhs), params def as_oracle(self, compiler, connection): lhs, params = self.process_lhs(compiler, connection) rhs, rhs_params = self.process_rhs(compiler, connection) params.extend(rhs_params) return 'mod(%s, 3) = %s' % (lhs, rhs), params class Div3Transform(models.Transform): lookup_name = 'div3' def as_sql(self, compiler, connection): lhs, lhs_params = compiler.compile(self.lhs) return '(%s) %%%% 3' % lhs, lhs_params def as_oracle(self, compiler, connection): lhs, lhs_params = compiler.compile(self.lhs) return 'mod(%s, 3)' % lhs, lhs_params class Div3BilateralTransform(Div3Transform): bilateral = True class Mult3BilateralTransform(models.Transform): bilateral = True lookup_name = 'mult3' def as_sql(self, compiler, connection): lhs, lhs_params = compiler.compile(self.lhs) return '3 * (%s)' % lhs, lhs_params class UpperBilateralTransform(models.Transform): bilateral = True lookup_name = 'upper' def as_sql(self, compiler, connection): lhs, lhs_params = compiler.compile(self.lhs) return 'UPPER(%s)' % lhs, lhs_params class YearTransform(models.Transform): # Use a name that avoids collision with the built-in year lookup. lookup_name = 'testyear' def as_sql(self, compiler, connection): lhs_sql, params = compiler.compile(self.lhs) return connection.ops.date_extract_sql('year', lhs_sql), params @property def output_field(self): return models.IntegerField() @YearTransform.register_lookup class YearExact(models.lookups.Lookup): lookup_name = 'exact' def as_sql(self, compiler, connection): # We will need to skip the extract part, and instead go # directly with the originating field, that is self.lhs.lhs lhs_sql, lhs_params = self.process_lhs(compiler, connection, self.lhs.lhs) rhs_sql, rhs_params = self.process_rhs(compiler, connection) # Note that we must be careful so that we have params in the # same order as we have the parts in the SQL. params = lhs_params + rhs_params + lhs_params + rhs_params # We use PostgreSQL specific SQL here. Note that we must do the # conversions in SQL instead of in Python to support F() references. return ("%(lhs)s >= (%(rhs)s || '-01-01')::date " "AND %(lhs)s <= (%(rhs)s || '-12-31')::date" % {'lhs': lhs_sql, 'rhs': rhs_sql}, params) @YearTransform.register_lookup class YearLte(models.lookups.LessThanOrEqual): """ The purpose of this lookup is to efficiently compare the year of the field. """ def as_sql(self, compiler, connection): # Skip the YearTransform above us (no possibility for efficient # lookup otherwise). real_lhs = self.lhs.lhs lhs_sql, params = self.process_lhs(compiler, connection, real_lhs) rhs_sql, rhs_params = self.process_rhs(compiler, connection) params.extend(rhs_params) # Build SQL where the integer year is concatenated with last month # and day, then convert that to date. (We try to have SQL like: # WHERE somecol <= '2013-12-31') # but also make it work if the rhs_sql is field reference. return "%s <= (%s || '-12-31')::date" % (lhs_sql, rhs_sql), params class SQLFunc(models.Lookup): def __init__(self, name, *args, **kwargs): super(SQLFunc, self).__init__(*args, **kwargs) self.name = name def as_sql(self, compiler, connection): return '%s()', [self.name] @property def output_field(self): return CustomField() class SQLFuncFactory(object): def __init__(self, name): self.name = name def __call__(self, *args, **kwargs): return SQLFunc(self.name, *args, **kwargs) class CustomField(models.TextField): def get_lookup(self, lookup_name): if lookup_name.startswith('lookupfunc_'): key, name = lookup_name.split('_', 1) return SQLFuncFactory(name) return super(CustomField, self).get_lookup(lookup_name) def get_transform(self, lookup_name): if lookup_name.startswith('transformfunc_'): key, name = lookup_name.split('_', 1) return SQLFuncFactory(name) return super(CustomField, self).get_transform(lookup_name) class CustomModel(models.Model): field = CustomField() # We will register this class temporarily in the test method. class InMonth(models.lookups.Lookup): """ InMonth matches if the column's month is the same as value's month. """ lookup_name = 'inmonth' def as_sql(self, compiler, connection): lhs, lhs_params = self.process_lhs(compiler, connection) rhs, rhs_params = self.process_rhs(compiler, connection) # We need to be careful so that we get the params in right # places. params = lhs_params + rhs_params + lhs_params + rhs_params return ("%s >= date_trunc('month', %s) and " "%s < date_trunc('month', %s) + interval '1 months'" % (lhs, rhs, lhs, rhs), params) class DateTimeTransform(models.Transform): lookup_name = 'as_datetime' @property def output_field(self): return models.DateTimeField() def as_sql(self, compiler, connection): lhs, params = compiler.compile(self.lhs) return 'from_unixtime({})'.format(lhs), params class LookupTests(TestCase): def test_basic_lookup(self): a1 = Author.objects.create(name='a1', age=1) a2 = Author.objects.create(name='a2', age=2) a3 = Author.objects.create(name='a3', age=3) a4 = Author.objects.create(name='a4', age=4) with register_lookup(models.IntegerField, Div3Lookup): self.assertQuerysetEqual( Author.objects.filter(age__div3=0), [a3], lambda x: x ) self.assertQuerysetEqual( Author.objects.filter(age__div3=1).order_by('age'), [a1, a4], lambda x: x ) self.assertQuerysetEqual( Author.objects.filter(age__div3=2), [a2], lambda x: x ) self.assertQuerysetEqual( Author.objects.filter(age__div3=3), [], lambda x: x ) @unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific SQL used") def test_birthdate_month(self): a1 = Author.objects.create(name='a1', birthdate=date(1981, 2, 16)) a2 = Author.objects.create(name='a2', birthdate=date(2012, 2, 29)) a3 = Author.objects.create(name='a3', birthdate=date(2012, 1, 31)) a4 = Author.objects.create(name='a4', birthdate=date(2012, 3, 1)) with register_lookup(models.DateField, InMonth): self.assertQuerysetEqual( Author.objects.filter(birthdate__inmonth=date(2012, 1, 15)), [a3], lambda x: x ) self.assertQuerysetEqual( Author.objects.filter(birthdate__inmonth=date(2012, 2, 1)), [a2], lambda x: x ) self.assertQuerysetEqual( Author.objects.filter(birthdate__inmonth=date(1981, 2, 28)), [a1], lambda x: x ) self.assertQuerysetEqual( Author.objects.filter(birthdate__inmonth=date(2012, 3, 12)), [a4], lambda x: x ) self.assertQuerysetEqual( Author.objects.filter(birthdate__inmonth=date(2012, 4, 1)), [], lambda x: x ) def test_div3_extract(self): with register_lookup(models.IntegerField, Div3Transform): a1 = Author.objects.create(name='a1', age=1) a2 = Author.objects.create(name='a2', age=2) a3 = Author.objects.create(name='a3', age=3) a4 = Author.objects.create(name='a4', age=4) baseqs = Author.objects.order_by('name') self.assertQuerysetEqual( baseqs.filter(age__div3=2), [a2], lambda x: x) self.assertQuerysetEqual( baseqs.filter(age__div3__lte=3), [a1, a2, a3, a4], lambda x: x) self.assertQuerysetEqual( baseqs.filter(age__div3__in=[0, 2]), [a2, a3], lambda x: x) self.assertQuerysetEqual( baseqs.filter(age__div3__in=[2, 4]), [a2], lambda x: x) self.assertQuerysetEqual( baseqs.filter(age__div3__gte=3), [], lambda x: x) self.assertQuerysetEqual( baseqs.filter(age__div3__range=(1, 2)), [a1, a2, a4], lambda x: x) class BilateralTransformTests(TestCase): def test_bilateral_upper(self): with register_lookup(models.CharField, UpperBilateralTransform): Author.objects.bulk_create([ Author(name='Doe'), Author(name='doe'), Author(name='Foo'), ]) self.assertQuerysetEqual( Author.objects.filter(name__upper='doe'), ["<Author: Doe>", "<Author: doe>"], ordered=False) self.assertQuerysetEqual( Author.objects.filter(name__upper__contains='f'), ["<Author: Foo>"], ordered=False) def test_bilateral_inner_qs(self): with register_lookup(models.CharField, UpperBilateralTransform): with self.assertRaises(NotImplementedError): Author.objects.filter(name__upper__in=Author.objects.values_list('name')) def test_div3_bilateral_extract(self): with register_lookup(models.IntegerField, Div3BilateralTransform): a1 = Author.objects.create(name='a1', age=1) a2 = Author.objects.create(name='a2', age=2) a3 = Author.objects.create(name='a3', age=3) a4 = Author.objects.create(name='a4', age=4) baseqs = Author.objects.order_by('name') self.assertQuerysetEqual( baseqs.filter(age__div3=2), [a2], lambda x: x) self.assertQuerysetEqual( baseqs.filter(age__div3__lte=3), [a3], lambda x: x) self.assertQuerysetEqual( baseqs.filter(age__div3__in=[0, 2]), [a2, a3], lambda x: x) self.assertQuerysetEqual( baseqs.filter(age__div3__in=[2, 4]), [a1, a2, a4], lambda x: x) self.assertQuerysetEqual( baseqs.filter(age__div3__gte=3), [a1, a2, a3, a4], lambda x: x) self.assertQuerysetEqual( baseqs.filter(age__div3__range=(1, 2)), [a1, a2, a4], lambda x: x) def test_bilateral_order(self): with register_lookup(models.IntegerField, Mult3BilateralTransform, Div3BilateralTransform): a1 = Author.objects.create(name='a1', age=1) a2 = Author.objects.create(name='a2', age=2) a3 = Author.objects.create(name='a3', age=3) a4 = Author.objects.create(name='a4', age=4) baseqs = Author.objects.order_by('name') self.assertQuerysetEqual( baseqs.filter(age__mult3__div3=42), # mult3__div3 always leads to 0 [a1, a2, a3, a4], lambda x: x) self.assertQuerysetEqual( baseqs.filter(age__div3__mult3=42), [a3], lambda x: x) def test_bilateral_fexpr(self): with register_lookup(models.IntegerField, Mult3BilateralTransform): a1 = Author.objects.create(name='a1', age=1, average_rating=3.2) a2 = Author.objects.create(name='a2', age=2, average_rating=0.5) a3 = Author.objects.create(name='a3', age=3, average_rating=1.5) a4 = Author.objects.create(name='a4', age=4) baseqs = Author.objects.order_by('name') self.assertQuerysetEqual( baseqs.filter(age__mult3=models.F('age')), [a1, a2, a3, a4], lambda x: x) self.assertQuerysetEqual( # Same as age >= average_rating baseqs.filter(age__mult3__gte=models.F('average_rating')), [a2, a3], lambda x: x) @override_settings(USE_TZ=True) class DateTimeLookupTests(TestCase): @unittest.skipUnless(connection.vendor == 'mysql', "MySQL specific SQL used") def test_datetime_output_field(self): with register_lookup(models.PositiveIntegerField, DateTimeTransform): ut = MySQLUnixTimestamp.objects.create(timestamp=time.time()) y2k = timezone.make_aware(datetime(2000, 1, 1)) self.assertQuerysetEqual( MySQLUnixTimestamp.objects.filter(timestamp__as_datetime__gt=y2k), [ut], lambda x: x) class YearLteTests(TestCase): def setUp(self): models.DateField.register_lookup(YearTransform) self.a1 = Author.objects.create(name='a1', birthdate=date(1981, 2, 16)) self.a2 = Author.objects.create(name='a2', birthdate=date(2012, 2, 29)) self.a3 = Author.objects.create(name='a3', birthdate=date(2012, 1, 31)) self.a4 = Author.objects.create(name='a4', birthdate=date(2012, 3, 1)) def tearDown(self): models.DateField._unregister_lookup(YearTransform) @unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific SQL used") def test_year_lte(self): baseqs = Author.objects.order_by('name') self.assertQuerysetEqual( baseqs.filter(birthdate__testyear__lte=2012), [self.a1, self.a2, self.a3, self.a4], lambda x: x) self.assertQuerysetEqual( baseqs.filter(birthdate__testyear=2012), [self.a2, self.a3, self.a4], lambda x: x) self.assertNotIn('BETWEEN', str(baseqs.filter(birthdate__testyear=2012).query)) self.assertQuerysetEqual( baseqs.filter(birthdate__testyear__lte=2011), [self.a1], lambda x: x) # The non-optimized version works, too. self.assertQuerysetEqual( baseqs.filter(birthdate__testyear__lt=2012), [self.a1], lambda x: x) @unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific SQL used") def test_year_lte_fexpr(self): self.a2.age = 2011 self.a2.save() self.a3.age = 2012 self.a3.save() self.a4.age = 2013 self.a4.save() baseqs = Author.objects.order_by('name') self.assertQuerysetEqual( baseqs.filter(birthdate__testyear__lte=models.F('age')), [self.a3, self.a4], lambda x: x) self.assertQuerysetEqual( baseqs.filter(birthdate__testyear__lt=models.F('age')), [self.a4], lambda x: x) def test_year_lte_sql(self): # This test will just check the generated SQL for __lte. This # doesn't require running on PostgreSQL and spots the most likely # error - not running YearLte SQL at all. baseqs = Author.objects.order_by('name') self.assertIn( '<= (2011 || ', str(baseqs.filter(birthdate__testyear__lte=2011).query)) self.assertIn( '-12-31', str(baseqs.filter(birthdate__testyear__lte=2011).query)) def test_postgres_year_exact(self): baseqs = Author.objects.order_by('name') self.assertIn( '= (2011 || ', str(baseqs.filter(birthdate__testyear=2011).query)) self.assertIn( '-12-31', str(baseqs.filter(birthdate__testyear=2011).query)) def test_custom_implementation_year_exact(self): try: # Two ways to add a customized implementation for different backends: # First is MonkeyPatch of the class. def as_custom_sql(self, compiler, connection): lhs_sql, lhs_params = self.process_lhs(compiler, connection, self.lhs.lhs) rhs_sql, rhs_params = self.process_rhs(compiler, connection) params = lhs_params + rhs_params + lhs_params + rhs_params return ("%(lhs)s >= str_to_date(concat(%(rhs)s, '-01-01'), '%%%%Y-%%%%m-%%%%d') " "AND %(lhs)s <= str_to_date(concat(%(rhs)s, '-12-31'), '%%%%Y-%%%%m-%%%%d')" % {'lhs': lhs_sql, 'rhs': rhs_sql}, params) setattr(YearExact, 'as_' + connection.vendor, as_custom_sql) self.assertIn( 'concat(', str(Author.objects.filter(birthdate__testyear=2012).query)) finally: delattr(YearExact, 'as_' + connection.vendor) try: # The other way is to subclass the original lookup and register the subclassed # lookup instead of the original. class CustomYearExact(YearExact): # This method should be named "as_mysql" for MySQL, "as_postgresql" for postgres # and so on, but as we don't know which DB we are running on, we need to use # setattr. def as_custom_sql(self, compiler, connection): lhs_sql, lhs_params = self.process_lhs(compiler, connection, self.lhs.lhs) rhs_sql, rhs_params = self.process_rhs(compiler, connection) params = lhs_params + rhs_params + lhs_params + rhs_params return ("%(lhs)s >= str_to_date(CONCAT(%(rhs)s, '-01-01'), '%%%%Y-%%%%m-%%%%d') " "AND %(lhs)s <= str_to_date(CONCAT(%(rhs)s, '-12-31'), '%%%%Y-%%%%m-%%%%d')" % {'lhs': lhs_sql, 'rhs': rhs_sql}, params) setattr(CustomYearExact, 'as_' + connection.vendor, CustomYearExact.as_custom_sql) YearTransform.register_lookup(CustomYearExact) self.assertIn( 'CONCAT(', str(Author.objects.filter(birthdate__testyear=2012).query)) finally: YearTransform._unregister_lookup(CustomYearExact) YearTransform.register_lookup(YearExact) class TrackCallsYearTransform(YearTransform): # Use a name that avoids collision with the built-in year lookup. lookup_name = 'testyear' call_order = [] def as_sql(self, compiler, connection): lhs_sql, params = compiler.compile(self.lhs) return connection.ops.date_extract_sql('year', lhs_sql), params @property def output_field(self): return models.IntegerField() def get_lookup(self, lookup_name): self.call_order.append('lookup') return super(TrackCallsYearTransform, self).get_lookup(lookup_name) def get_transform(self, lookup_name): self.call_order.append('transform') return super(TrackCallsYearTransform, self).get_transform(lookup_name) class LookupTransformCallOrderTests(TestCase): def test_call_order(self): with register_lookup(models.DateField, TrackCallsYearTransform): # junk lookup - tries lookup, then transform, then fails with self.assertRaises(FieldError): Author.objects.filter(birthdate__testyear__junk=2012) self.assertEqual(TrackCallsYearTransform.call_order, ['lookup', 'transform']) TrackCallsYearTransform.call_order = [] # junk transform - tries transform only, then fails with self.assertRaises(FieldError): Author.objects.filter(birthdate__testyear__junk__more_junk=2012) self.assertEqual(TrackCallsYearTransform.call_order, ['transform']) TrackCallsYearTransform.call_order = [] # Just getting the year (implied __exact) - lookup only Author.objects.filter(birthdate__testyear=2012) self.assertEqual(TrackCallsYearTransform.call_order, ['lookup']) TrackCallsYearTransform.call_order = [] # Just getting the year (explicit __exact) - lookup only Author.objects.filter(birthdate__testyear__exact=2012) self.assertEqual(TrackCallsYearTransform.call_order, ['lookup']) class CustomisedMethodsTests(TestCase): def test_overridden_get_lookup(self): q = CustomModel.objects.filter(field__lookupfunc_monkeys=3) self.assertIn('monkeys()', str(q.query)) def test_overridden_get_transform(self): q = CustomModel.objects.filter(field__transformfunc_banana=3) self.assertIn('banana()', str(q.query)) def test_overridden_get_lookup_chain(self): q = CustomModel.objects.filter(field__transformfunc_banana__lookupfunc_elephants=3) self.assertIn('elephants()', str(q.query)) def test_overridden_get_transform_chain(self): q = CustomModel.objects.filter(field__transformfunc_banana__transformfunc_pear=3) self.assertIn('pear()', str(q.query)) class SubqueryTransformTests(TestCase): def test_subquery_usage(self): with register_lookup(models.IntegerField, Div3Transform): Author.objects.create(name='a1', age=1) a2 = Author.objects.create(name='a2', age=2) Author.objects.create(name='a3', age=3) Author.objects.create(name='a4', age=4) self.assertQuerysetEqual( Author.objects.order_by('name').filter(id__in=Author.objects.filter(age__div3=2)), [a2], lambda x: x)
bsd-3-clause
valkyriesavage/gasustainability
django/contrib/auth/tests/permissions.py
231
1654
try: from cStringIO import StringIO except ImportError: from StringIO import StringIO from django.contrib.auth.management import create_permissions from django.contrib.auth import models as auth_models from django.contrib.contenttypes import models as contenttypes_models from django.core.management import call_command from django.test import TestCase class TestAuthPermissions(TestCase): def tearDown(self): # These tests mess with content types, but content type lookups # are cached, so we need to make sure the effects of this test # are cleaned up. contenttypes_models.ContentType.objects.clear_cache() def test_permission_register_order(self): """Test that the order of registered permissions doesn't break""" # Changeset 14413 introduced a regression in the ordering of # newly created permissions for objects. When loading a fixture # after the initial creation (such as during unit tests), the # expected IDs for the permissions may not match up, leading to # SQL errors. This is ticket 14731 # Start with a clean slate and build the permissions as we # expect to see them in the fixtures. auth_models.Permission.objects.all().delete() contenttypes_models.ContentType.objects.all().delete() create_permissions(auth_models, [], verbosity=0) create_permissions(contenttypes_models, [], verbosity=0) stderr = StringIO() call_command('loaddata', 'test_permissions.json', verbosity=0, commit=False, stderr=stderr) self.assertEqual(stderr.getvalue(), '')
bsd-3-clause
MalloyPower/parsing-python
front-end/testsuite-python-lib/Python-2.5/Lib/plat-sunos5/STROPTS.py
9
36364
# Generated by h2py from /usr/include/sys/stropts.h # Included from sys/feature_tests.h # Included from sys/isa_defs.h _CHAR_ALIGNMENT = 1 _SHORT_ALIGNMENT = 2 _INT_ALIGNMENT = 4 _LONG_ALIGNMENT = 8 _LONG_LONG_ALIGNMENT = 8 _DOUBLE_ALIGNMENT = 8 _LONG_DOUBLE_ALIGNMENT = 16 _POINTER_ALIGNMENT = 8 _MAX_ALIGNMENT = 16 _ALIGNMENT_REQUIRED = 1 _CHAR_ALIGNMENT = 1 _SHORT_ALIGNMENT = 2 _INT_ALIGNMENT = 4 _LONG_ALIGNMENT = 4 _LONG_LONG_ALIGNMENT = 4 _DOUBLE_ALIGNMENT = 4 _LONG_DOUBLE_ALIGNMENT = 4 _POINTER_ALIGNMENT = 4 _MAX_ALIGNMENT = 4 _ALIGNMENT_REQUIRED = 0 _CHAR_ALIGNMENT = 1 _SHORT_ALIGNMENT = 2 _INT_ALIGNMENT = 4 _LONG_LONG_ALIGNMENT = 8 _DOUBLE_ALIGNMENT = 8 _ALIGNMENT_REQUIRED = 1 _LONG_ALIGNMENT = 4 _LONG_DOUBLE_ALIGNMENT = 8 _POINTER_ALIGNMENT = 4 _MAX_ALIGNMENT = 8 _LONG_ALIGNMENT = 8 _LONG_DOUBLE_ALIGNMENT = 16 _POINTER_ALIGNMENT = 8 _MAX_ALIGNMENT = 16 _POSIX_C_SOURCE = 1 _LARGEFILE64_SOURCE = 1 _LARGEFILE_SOURCE = 1 _FILE_OFFSET_BITS = 64 _FILE_OFFSET_BITS = 32 _POSIX_C_SOURCE = 199506L _POSIX_PTHREAD_SEMANTICS = 1 _XOPEN_VERSION = 500 _XOPEN_VERSION = 4 _XOPEN_VERSION = 3 from TYPES import * # Included from sys/conf.h # Included from sys/t_lock.h # Included from sys/machlock.h from TYPES import * LOCK_HELD_VALUE = 0xff def SPIN_LOCK(pl): return ((pl) > ipltospl(LOCK_LEVEL)) def LOCK_SAMPLE_INTERVAL(i): return (((i) & 0xff) == 0) CLOCK_LEVEL = 10 LOCK_LEVEL = 10 DISP_LEVEL = (LOCK_LEVEL + 1) PTR24_LSB = 5 PTR24_MSB = (PTR24_LSB + 24) PTR24_ALIGN = 32 PTR24_BASE = 0xe0000000 # Included from sys/param.h from TYPES import * _POSIX_VDISABLE = 0 MAX_INPUT = 512 MAX_CANON = 256 UID_NOBODY = 60001 GID_NOBODY = UID_NOBODY UID_NOACCESS = 60002 MAX_TASKID = 999999 MAX_MAXPID = 999999 DEFAULT_MAXPID = 999999 DEFAULT_JUMPPID = 100000 DEFAULT_MAXPID = 30000 DEFAULT_JUMPPID = 0 MAXUID = 2147483647 MAXPROJID = MAXUID MAXLINK = 32767 NMOUNT = 40 CANBSIZ = 256 NOFILE = 20 NGROUPS_UMIN = 0 NGROUPS_UMAX = 32 NGROUPS_MAX_DEFAULT = 16 NZERO = 20 NULL = 0L NULL = 0 CMASK = 022 CDLIMIT = (1L<<11) NBPS = 0x20000 NBPSCTR = 512 UBSIZE = 512 SCTRSHFT = 9 SYSNAME = 9 PREMOTE = 39 MAXPATHLEN = 1024 MAXSYMLINKS = 20 MAXNAMELEN = 256 NADDR = 13 PIPE_BUF = 5120 PIPE_MAX = 5120 NBBY = 8 MAXBSIZE = 8192 DEV_BSIZE = 512 DEV_BSHIFT = 9 MAXFRAG = 8 MAXOFF32_T = 0x7fffffff MAXOFF_T = 0x7fffffffffffffffl MAXOFFSET_T = 0x7fffffffffffffffl MAXOFF_T = 0x7fffffffl MAXOFFSET_T = 0x7fffffff def btodb(bytes): return \ def dbtob(db): return \ def lbtodb(bytes): return \ def ldbtob(db): return \ NCARGS32 = 0x100000 NCARGS64 = 0x200000 NCARGS = NCARGS64 NCARGS = NCARGS32 FSHIFT = 8 FSCALE = (1<<FSHIFT) def DELAY(n): return drv_usecwait(n) def mmu_ptob(x): return ((x) << MMU_PAGESHIFT) def mmu_btop(x): return (((x)) >> MMU_PAGESHIFT) def mmu_btopr(x): return ((((x) + MMU_PAGEOFFSET) >> MMU_PAGESHIFT)) def mmu_ptod(x): return ((x) << (MMU_PAGESHIFT - DEV_BSHIFT)) def ptod(x): return ((x) << (PAGESHIFT - DEV_BSHIFT)) def ptob(x): return ((x) << PAGESHIFT) def btop(x): return (((x) >> PAGESHIFT)) def btopr(x): return ((((x) + PAGEOFFSET) >> PAGESHIFT)) def dtop(DD): return (((DD) + NDPP - 1) >> (PAGESHIFT - DEV_BSHIFT)) def dtopt(DD): return ((DD) >> (PAGESHIFT - DEV_BSHIFT)) _AIO_LISTIO_MAX = (4096) _AIO_MAX = (-1) _MQ_OPEN_MAX = (32) _MQ_PRIO_MAX = (32) _SEM_NSEMS_MAX = INT_MAX _SEM_VALUE_MAX = INT_MAX # Included from sys/unistd.h _CS_PATH = 65 _CS_LFS_CFLAGS = 68 _CS_LFS_LDFLAGS = 69 _CS_LFS_LIBS = 70 _CS_LFS_LINTFLAGS = 71 _CS_LFS64_CFLAGS = 72 _CS_LFS64_LDFLAGS = 73 _CS_LFS64_LIBS = 74 _CS_LFS64_LINTFLAGS = 75 _CS_XBS5_ILP32_OFF32_CFLAGS = 700 _CS_XBS5_ILP32_OFF32_LDFLAGS = 701 _CS_XBS5_ILP32_OFF32_LIBS = 702 _CS_XBS5_ILP32_OFF32_LINTFLAGS = 703 _CS_XBS5_ILP32_OFFBIG_CFLAGS = 705 _CS_XBS5_ILP32_OFFBIG_LDFLAGS = 706 _CS_XBS5_ILP32_OFFBIG_LIBS = 707 _CS_XBS5_ILP32_OFFBIG_LINTFLAGS = 708 _CS_XBS5_LP64_OFF64_CFLAGS = 709 _CS_XBS5_LP64_OFF64_LDFLAGS = 710 _CS_XBS5_LP64_OFF64_LIBS = 711 _CS_XBS5_LP64_OFF64_LINTFLAGS = 712 _CS_XBS5_LPBIG_OFFBIG_CFLAGS = 713 _CS_XBS5_LPBIG_OFFBIG_LDFLAGS = 714 _CS_XBS5_LPBIG_OFFBIG_LIBS = 715 _CS_XBS5_LPBIG_OFFBIG_LINTFLAGS = 716 _SC_ARG_MAX = 1 _SC_CHILD_MAX = 2 _SC_CLK_TCK = 3 _SC_NGROUPS_MAX = 4 _SC_OPEN_MAX = 5 _SC_JOB_CONTROL = 6 _SC_SAVED_IDS = 7 _SC_VERSION = 8 _SC_PASS_MAX = 9 _SC_LOGNAME_MAX = 10 _SC_PAGESIZE = 11 _SC_XOPEN_VERSION = 12 _SC_NPROCESSORS_CONF = 14 _SC_NPROCESSORS_ONLN = 15 _SC_STREAM_MAX = 16 _SC_TZNAME_MAX = 17 _SC_AIO_LISTIO_MAX = 18 _SC_AIO_MAX = 19 _SC_AIO_PRIO_DELTA_MAX = 20 _SC_ASYNCHRONOUS_IO = 21 _SC_DELAYTIMER_MAX = 22 _SC_FSYNC = 23 _SC_MAPPED_FILES = 24 _SC_MEMLOCK = 25 _SC_MEMLOCK_RANGE = 26 _SC_MEMORY_PROTECTION = 27 _SC_MESSAGE_PASSING = 28 _SC_MQ_OPEN_MAX = 29 _SC_MQ_PRIO_MAX = 30 _SC_PRIORITIZED_IO = 31 _SC_PRIORITY_SCHEDULING = 32 _SC_REALTIME_SIGNALS = 33 _SC_RTSIG_MAX = 34 _SC_SEMAPHORES = 35 _SC_SEM_NSEMS_MAX = 36 _SC_SEM_VALUE_MAX = 37 _SC_SHARED_MEMORY_OBJECTS = 38 _SC_SIGQUEUE_MAX = 39 _SC_SIGRT_MIN = 40 _SC_SIGRT_MAX = 41 _SC_SYNCHRONIZED_IO = 42 _SC_TIMERS = 43 _SC_TIMER_MAX = 44 _SC_2_C_BIND = 45 _SC_2_C_DEV = 46 _SC_2_C_VERSION = 47 _SC_2_FORT_DEV = 48 _SC_2_FORT_RUN = 49 _SC_2_LOCALEDEF = 50 _SC_2_SW_DEV = 51 _SC_2_UPE = 52 _SC_2_VERSION = 53 _SC_BC_BASE_MAX = 54 _SC_BC_DIM_MAX = 55 _SC_BC_SCALE_MAX = 56 _SC_BC_STRING_MAX = 57 _SC_COLL_WEIGHTS_MAX = 58 _SC_EXPR_NEST_MAX = 59 _SC_LINE_MAX = 60 _SC_RE_DUP_MAX = 61 _SC_XOPEN_CRYPT = 62 _SC_XOPEN_ENH_I18N = 63 _SC_XOPEN_SHM = 64 _SC_2_CHAR_TERM = 66 _SC_XOPEN_XCU_VERSION = 67 _SC_ATEXIT_MAX = 76 _SC_IOV_MAX = 77 _SC_XOPEN_UNIX = 78 _SC_PAGE_SIZE = _SC_PAGESIZE _SC_T_IOV_MAX = 79 _SC_PHYS_PAGES = 500 _SC_AVPHYS_PAGES = 501 _SC_COHER_BLKSZ = 503 _SC_SPLIT_CACHE = 504 _SC_ICACHE_SZ = 505 _SC_DCACHE_SZ = 506 _SC_ICACHE_LINESZ = 507 _SC_DCACHE_LINESZ = 508 _SC_ICACHE_BLKSZ = 509 _SC_DCACHE_BLKSZ = 510 _SC_DCACHE_TBLKSZ = 511 _SC_ICACHE_ASSOC = 512 _SC_DCACHE_ASSOC = 513 _SC_MAXPID = 514 _SC_STACK_PROT = 515 _SC_THREAD_DESTRUCTOR_ITERATIONS = 568 _SC_GETGR_R_SIZE_MAX = 569 _SC_GETPW_R_SIZE_MAX = 570 _SC_LOGIN_NAME_MAX = 571 _SC_THREAD_KEYS_MAX = 572 _SC_THREAD_STACK_MIN = 573 _SC_THREAD_THREADS_MAX = 574 _SC_TTY_NAME_MAX = 575 _SC_THREADS = 576 _SC_THREAD_ATTR_STACKADDR = 577 _SC_THREAD_ATTR_STACKSIZE = 578 _SC_THREAD_PRIORITY_SCHEDULING = 579 _SC_THREAD_PRIO_INHERIT = 580 _SC_THREAD_PRIO_PROTECT = 581 _SC_THREAD_PROCESS_SHARED = 582 _SC_THREAD_SAFE_FUNCTIONS = 583 _SC_XOPEN_LEGACY = 717 _SC_XOPEN_REALTIME = 718 _SC_XOPEN_REALTIME_THREADS = 719 _SC_XBS5_ILP32_OFF32 = 720 _SC_XBS5_ILP32_OFFBIG = 721 _SC_XBS5_LP64_OFF64 = 722 _SC_XBS5_LPBIG_OFFBIG = 723 _PC_LINK_MAX = 1 _PC_MAX_CANON = 2 _PC_MAX_INPUT = 3 _PC_NAME_MAX = 4 _PC_PATH_MAX = 5 _PC_PIPE_BUF = 6 _PC_NO_TRUNC = 7 _PC_VDISABLE = 8 _PC_CHOWN_RESTRICTED = 9 _PC_ASYNC_IO = 10 _PC_PRIO_IO = 11 _PC_SYNC_IO = 12 _PC_FILESIZEBITS = 67 _PC_LAST = 67 _POSIX_VERSION = 199506L _POSIX2_VERSION = 199209L _POSIX2_C_VERSION = 199209L _XOPEN_XCU_VERSION = 4 _XOPEN_REALTIME = 1 _XOPEN_ENH_I18N = 1 _XOPEN_SHM = 1 _POSIX2_C_BIND = 1 _POSIX2_CHAR_TERM = 1 _POSIX2_LOCALEDEF = 1 _POSIX2_C_DEV = 1 _POSIX2_SW_DEV = 1 _POSIX2_UPE = 1 # Included from sys/mutex.h from TYPES import * def MUTEX_HELD(x): return (mutex_owned(x)) # Included from sys/rwlock.h from TYPES import * def RW_READ_HELD(x): return (rw_read_held((x))) def RW_WRITE_HELD(x): return (rw_write_held((x))) def RW_LOCK_HELD(x): return (rw_lock_held((x))) def RW_ISWRITER(x): return (rw_iswriter(x)) # Included from sys/semaphore.h # Included from sys/thread.h from TYPES import * # Included from sys/klwp.h from TYPES import * # Included from sys/condvar.h from TYPES import * # Included from sys/time.h # Included from sys/types32.h # Included from sys/int_types.h TIME32_MAX = INT32_MAX TIME32_MIN = INT32_MIN def TIMEVAL_OVERFLOW(tv): return \ from TYPES import * DST_NONE = 0 DST_USA = 1 DST_AUST = 2 DST_WET = 3 DST_MET = 4 DST_EET = 5 DST_CAN = 6 DST_GB = 7 DST_RUM = 8 DST_TUR = 9 DST_AUSTALT = 10 ITIMER_REAL = 0 ITIMER_VIRTUAL = 1 ITIMER_PROF = 2 ITIMER_REALPROF = 3 def ITIMERVAL_OVERFLOW(itv): return \ SEC = 1 MILLISEC = 1000 MICROSEC = 1000000 NANOSEC = 1000000000 # Included from sys/time_impl.h def TIMESPEC_OVERFLOW(ts): return \ def ITIMERSPEC_OVERFLOW(it): return \ __CLOCK_REALTIME0 = 0 CLOCK_VIRTUAL = 1 CLOCK_PROF = 2 __CLOCK_REALTIME3 = 3 CLOCK_HIGHRES = 4 CLOCK_MAX = 5 CLOCK_REALTIME = __CLOCK_REALTIME3 CLOCK_REALTIME = __CLOCK_REALTIME0 TIMER_RELTIME = 0x0 TIMER_ABSTIME = 0x1 def TICK_TO_SEC(tick): return ((tick) / hz) def SEC_TO_TICK(sec): return ((sec) * hz) def TICK_TO_MSEC(tick): return \ def MSEC_TO_TICK(msec): return \ def MSEC_TO_TICK_ROUNDUP(msec): return \ def TICK_TO_USEC(tick): return ((tick) * usec_per_tick) def USEC_TO_TICK(usec): return ((usec) / usec_per_tick) def USEC_TO_TICK_ROUNDUP(usec): return \ def TICK_TO_NSEC(tick): return ((tick) * nsec_per_tick) def NSEC_TO_TICK(nsec): return ((nsec) / nsec_per_tick) def NSEC_TO_TICK_ROUNDUP(nsec): return \ def TIMEVAL_TO_TICK(tvp): return \ def TIMESTRUC_TO_TICK(tsp): return \ # Included from time.h from TYPES import * # Included from iso/time_iso.h NULL = 0L NULL = 0 CLOCKS_PER_SEC = 1000000 # Included from sys/select.h FD_SETSIZE = 65536 FD_SETSIZE = 1024 _NBBY = 8 NBBY = _NBBY def FD_ZERO(p): return bzero((p), sizeof (*(p))) # Included from sys/signal.h # Included from sys/iso/signal_iso.h SIGHUP = 1 SIGINT = 2 SIGQUIT = 3 SIGILL = 4 SIGTRAP = 5 SIGIOT = 6 SIGABRT = 6 SIGEMT = 7 SIGFPE = 8 SIGKILL = 9 SIGBUS = 10 SIGSEGV = 11 SIGSYS = 12 SIGPIPE = 13 SIGALRM = 14 SIGTERM = 15 SIGUSR1 = 16 SIGUSR2 = 17 SIGCLD = 18 SIGCHLD = 18 SIGPWR = 19 SIGWINCH = 20 SIGURG = 21 SIGPOLL = 22 SIGIO = SIGPOLL SIGSTOP = 23 SIGTSTP = 24 SIGCONT = 25 SIGTTIN = 26 SIGTTOU = 27 SIGVTALRM = 28 SIGPROF = 29 SIGXCPU = 30 SIGXFSZ = 31 SIGWAITING = 32 SIGLWP = 33 SIGFREEZE = 34 SIGTHAW = 35 SIGCANCEL = 36 SIGLOST = 37 _SIGRTMIN = 38 _SIGRTMAX = 45 SIG_BLOCK = 1 SIG_UNBLOCK = 2 SIG_SETMASK = 3 SIGNO_MASK = 0xFF SIGDEFER = 0x100 SIGHOLD = 0x200 SIGRELSE = 0x400 SIGIGNORE = 0x800 SIGPAUSE = 0x1000 # Included from sys/siginfo.h from TYPES import * SIGEV_NONE = 1 SIGEV_SIGNAL = 2 SIGEV_THREAD = 3 SI_NOINFO = 32767 SI_USER = 0 SI_LWP = (-1) SI_QUEUE = (-2) SI_TIMER = (-3) SI_ASYNCIO = (-4) SI_MESGQ = (-5) # Included from sys/machsig.h ILL_ILLOPC = 1 ILL_ILLOPN = 2 ILL_ILLADR = 3 ILL_ILLTRP = 4 ILL_PRVOPC = 5 ILL_PRVREG = 6 ILL_COPROC = 7 ILL_BADSTK = 8 NSIGILL = 8 EMT_TAGOVF = 1 EMT_CPCOVF = 2 NSIGEMT = 2 FPE_INTDIV = 1 FPE_INTOVF = 2 FPE_FLTDIV = 3 FPE_FLTOVF = 4 FPE_FLTUND = 5 FPE_FLTRES = 6 FPE_FLTINV = 7 FPE_FLTSUB = 8 NSIGFPE = 8 SEGV_MAPERR = 1 SEGV_ACCERR = 2 NSIGSEGV = 2 BUS_ADRALN = 1 BUS_ADRERR = 2 BUS_OBJERR = 3 NSIGBUS = 3 TRAP_BRKPT = 1 TRAP_TRACE = 2 TRAP_RWATCH = 3 TRAP_WWATCH = 4 TRAP_XWATCH = 5 NSIGTRAP = 5 CLD_EXITED = 1 CLD_KILLED = 2 CLD_DUMPED = 3 CLD_TRAPPED = 4 CLD_STOPPED = 5 CLD_CONTINUED = 6 NSIGCLD = 6 POLL_IN = 1 POLL_OUT = 2 POLL_MSG = 3 POLL_ERR = 4 POLL_PRI = 5 POLL_HUP = 6 NSIGPOLL = 6 PROF_SIG = 1 NSIGPROF = 1 SI_MAXSZ = 256 SI_MAXSZ = 128 # Included from sys/time_std_impl.h from TYPES import * SI32_MAXSZ = 128 def SI_CANQUEUE(c): return ((c) <= SI_QUEUE) SA_NOCLDSTOP = 0x00020000 SA_ONSTACK = 0x00000001 SA_RESETHAND = 0x00000002 SA_RESTART = 0x00000004 SA_SIGINFO = 0x00000008 SA_NODEFER = 0x00000010 SA_NOCLDWAIT = 0x00010000 SA_WAITSIG = 0x00010000 NSIG = 46 MAXSIG = 45 S_SIGNAL = 1 S_SIGSET = 2 S_SIGACTION = 3 S_NONE = 4 MINSIGSTKSZ = 2048 SIGSTKSZ = 8192 SS_ONSTACK = 0x00000001 SS_DISABLE = 0x00000002 SN_PROC = 1 SN_CANCEL = 2 SN_SEND = 3 # Included from sys/ucontext.h from TYPES import * # Included from sys/regset.h REG_CCR = (0) REG_PSR = (0) REG_PSR = (0) REG_PC = (1) REG_nPC = (2) REG_Y = (3) REG_G1 = (4) REG_G2 = (5) REG_G3 = (6) REG_G4 = (7) REG_G5 = (8) REG_G6 = (9) REG_G7 = (10) REG_O0 = (11) REG_O1 = (12) REG_O2 = (13) REG_O3 = (14) REG_O4 = (15) REG_O5 = (16) REG_O6 = (17) REG_O7 = (18) REG_ASI = (19) REG_FPRS = (20) REG_PS = REG_PSR REG_SP = REG_O6 REG_R0 = REG_O0 REG_R1 = REG_O1 _NGREG = 21 _NGREG = 19 NGREG = _NGREG _NGREG32 = 19 _NGREG64 = 21 SPARC_MAXREGWINDOW = 31 MAXFPQ = 16 XRS_ID = 0x78727300 # Included from v7/sys/privregs.h # Included from v7/sys/psr.h PSR_CWP = 0x0000001F PSR_ET = 0x00000020 PSR_PS = 0x00000040 PSR_S = 0x00000080 PSR_PIL = 0x00000F00 PSR_EF = 0x00001000 PSR_EC = 0x00002000 PSR_RSV = 0x000FC000 PSR_ICC = 0x00F00000 PSR_C = 0x00100000 PSR_V = 0x00200000 PSR_Z = 0x00400000 PSR_N = 0x00800000 PSR_VER = 0x0F000000 PSR_IMPL = 0xF0000000 PSL_ALLCC = PSR_ICC PSL_USER = (PSR_S) PSL_USERMASK = (PSR_ICC) PSL_UBITS = (PSR_ICC|PSR_EF) def USERMODE(ps): return (((ps) & PSR_PS) == 0) # Included from sys/fsr.h FSR_CEXC = 0x0000001f FSR_AEXC = 0x000003e0 FSR_FCC = 0x00000c00 FSR_PR = 0x00001000 FSR_QNE = 0x00002000 FSR_FTT = 0x0001c000 FSR_VER = 0x000e0000 FSR_TEM = 0x0f800000 FSR_RP = 0x30000000 FSR_RD = 0xc0000000 FSR_VER_SHIFT = 17 FSR_FCC1 = 0x00000003 FSR_FCC2 = 0x0000000C FSR_FCC3 = 0x00000030 FSR_CEXC_NX = 0x00000001 FSR_CEXC_DZ = 0x00000002 FSR_CEXC_UF = 0x00000004 FSR_CEXC_OF = 0x00000008 FSR_CEXC_NV = 0x00000010 FSR_AEXC_NX = (0x1 << 5) FSR_AEXC_DZ = (0x2 << 5) FSR_AEXC_UF = (0x4 << 5) FSR_AEXC_OF = (0x8 << 5) FSR_AEXC_NV = (0x10 << 5) FTT_NONE = 0 FTT_IEEE = 1 FTT_UNFIN = 2 FTT_UNIMP = 3 FTT_SEQ = 4 FTT_ALIGN = 5 FTT_DFAULT = 6 FSR_FTT_SHIFT = 14 FSR_FTT_IEEE = (FTT_IEEE << FSR_FTT_SHIFT) FSR_FTT_UNFIN = (FTT_UNFIN << FSR_FTT_SHIFT) FSR_FTT_UNIMP = (FTT_UNIMP << FSR_FTT_SHIFT) FSR_FTT_SEQ = (FTT_SEQ << FSR_FTT_SHIFT) FSR_FTT_ALIGN = (FTT_ALIGN << FSR_FTT_SHIFT) FSR_FTT_DFAULT = (FTT_DFAULT << FSR_FTT_SHIFT) FSR_TEM_NX = (0x1 << 23) FSR_TEM_DZ = (0x2 << 23) FSR_TEM_UF = (0x4 << 23) FSR_TEM_OF = (0x8 << 23) FSR_TEM_NV = (0x10 << 23) RP_DBLEXT = 0 RP_SINGLE = 1 RP_DOUBLE = 2 RP_RESERVED = 3 RD_NEAR = 0 RD_ZER0 = 1 RD_POSINF = 2 RD_NEGINF = 3 FPRS_DL = 0x1 FPRS_DU = 0x2 FPRS_FEF = 0x4 PIL_MAX = 0xf def SAVE_GLOBALS(RP): return \ def RESTORE_GLOBALS(RP): return \ def SAVE_OUTS(RP): return \ def RESTORE_OUTS(RP): return \ def SAVE_WINDOW(SBP): return \ def RESTORE_WINDOW(SBP): return \ def STORE_FPREGS(FP): return \ def LOAD_FPREGS(FP): return \ _SPARC_MAXREGWINDOW = 31 _XRS_ID = 0x78727300 GETCONTEXT = 0 SETCONTEXT = 1 UC_SIGMASK = 001 UC_STACK = 002 UC_CPU = 004 UC_MAU = 010 UC_FPU = UC_MAU UC_INTR = 020 UC_ASR = 040 UC_MCONTEXT = (UC_CPU|UC_FPU|UC_ASR) UC_ALL = (UC_SIGMASK|UC_STACK|UC_MCONTEXT) _SIGQUEUE_MAX = 32 _SIGNOTIFY_MAX = 32 # Included from sys/pcb.h INSTR_VALID = 0x02 NORMAL_STEP = 0x04 WATCH_STEP = 0x08 CPC_OVERFLOW = 0x10 ASYNC_HWERR = 0x20 STEP_NONE = 0 STEP_REQUESTED = 1 STEP_ACTIVE = 2 STEP_WASACTIVE = 3 # Included from sys/msacct.h LMS_USER = 0 LMS_SYSTEM = 1 LMS_TRAP = 2 LMS_TFAULT = 3 LMS_DFAULT = 4 LMS_KFAULT = 5 LMS_USER_LOCK = 6 LMS_SLEEP = 7 LMS_WAIT_CPU = 8 LMS_STOPPED = 9 NMSTATES = 10 # Included from sys/lwp.h # Included from sys/synch.h from TYPES import * USYNC_THREAD = 0x00 USYNC_PROCESS = 0x01 LOCK_NORMAL = 0x00 LOCK_ERRORCHECK = 0x02 LOCK_RECURSIVE = 0x04 USYNC_PROCESS_ROBUST = 0x08 LOCK_PRIO_NONE = 0x00 LOCK_PRIO_INHERIT = 0x10 LOCK_PRIO_PROTECT = 0x20 LOCK_STALL_NP = 0x00 LOCK_ROBUST_NP = 0x40 LOCK_OWNERDEAD = 0x1 LOCK_NOTRECOVERABLE = 0x2 LOCK_INITED = 0x4 LOCK_UNMAPPED = 0x8 LWP_DETACHED = 0x00000040 LWP_SUSPENDED = 0x00000080 __LWP_ASLWP = 0x00000100 MAXSYSARGS = 8 NORMALRETURN = 0 JUSTRETURN = 1 LWP_USER = 0x01 LWP_SYS = 0x02 TS_FREE = 0x00 TS_SLEEP = 0x01 TS_RUN = 0x02 TS_ONPROC = 0x04 TS_ZOMB = 0x08 TS_STOPPED = 0x10 T_INTR_THREAD = 0x0001 T_WAKEABLE = 0x0002 T_TOMASK = 0x0004 T_TALLOCSTK = 0x0008 T_WOULDBLOCK = 0x0020 T_DONTBLOCK = 0x0040 T_DONTPEND = 0x0080 T_SYS_PROF = 0x0100 T_WAITCVSEM = 0x0200 T_WATCHPT = 0x0400 T_PANIC = 0x0800 TP_HOLDLWP = 0x0002 TP_TWAIT = 0x0004 TP_LWPEXIT = 0x0008 TP_PRSTOP = 0x0010 TP_CHKPT = 0x0020 TP_EXITLWP = 0x0040 TP_PRVSTOP = 0x0080 TP_MSACCT = 0x0100 TP_STOPPING = 0x0200 TP_WATCHPT = 0x0400 TP_PAUSE = 0x0800 TP_CHANGEBIND = 0x1000 TS_LOAD = 0x0001 TS_DONT_SWAP = 0x0002 TS_SWAPENQ = 0x0004 TS_ON_SWAPQ = 0x0008 TS_CSTART = 0x0100 TS_UNPAUSE = 0x0200 TS_XSTART = 0x0400 TS_PSTART = 0x0800 TS_RESUME = 0x1000 TS_CREATE = 0x2000 TS_ALLSTART = \ (TS_CSTART|TS_UNPAUSE|TS_XSTART|TS_PSTART|TS_RESUME|TS_CREATE) def CPR_VSTOPPED(t): return \ def THREAD_TRANSITION(tp): return thread_transition(tp); def THREAD_STOP(tp): return \ def THREAD_ZOMB(tp): return THREAD_SET_STATE(tp, TS_ZOMB, NULL) def SEMA_HELD(x): return (sema_held((x))) NO_LOCKS_HELD = 1 NO_COMPETING_THREADS = 1 FMNAMESZ = 8 # Included from sys/systm.h from TYPES import * # Included from sys/proc.h # Included from sys/cred.h # Included from sys/user.h from TYPES import * # Included from sys/resource.h from TYPES import * PRIO_PROCESS = 0 PRIO_PGRP = 1 PRIO_USER = 2 RLIMIT_CPU = 0 RLIMIT_FSIZE = 1 RLIMIT_DATA = 2 RLIMIT_STACK = 3 RLIMIT_CORE = 4 RLIMIT_NOFILE = 5 RLIMIT_VMEM = 6 RLIMIT_AS = RLIMIT_VMEM RLIM_NLIMITS = 7 RLIM_INFINITY = (-3l) RLIM_SAVED_MAX = (-2l) RLIM_SAVED_CUR = (-1l) RLIM_INFINITY = 0x7fffffff RLIM_SAVED_MAX = 0x7ffffffe RLIM_SAVED_CUR = 0x7ffffffd RLIM32_INFINITY = 0x7fffffff RLIM32_SAVED_MAX = 0x7ffffffe RLIM32_SAVED_CUR = 0x7ffffffd # Included from sys/model.h # Included from sys/debug.h def ASSERT64(x): return ASSERT(x) def ASSERT32(x): return ASSERT(x) DATAMODEL_MASK = 0x0FF00000 DATAMODEL_ILP32 = 0x00100000 DATAMODEL_LP64 = 0x00200000 DATAMODEL_NONE = 0 DATAMODEL_NATIVE = DATAMODEL_LP64 DATAMODEL_NATIVE = DATAMODEL_ILP32 def STRUCT_SIZE(handle): return \ def STRUCT_BUF(handle): return ((handle).ptr.m64) def SIZEOF_PTR(umodel): return \ def STRUCT_SIZE(handle): return (sizeof (*(handle).ptr)) def STRUCT_BUF(handle): return ((handle).ptr) def SIZEOF_PTR(umodel): return sizeof (caddr_t) def lwp_getdatamodel(t): return DATAMODEL_ILP32 RUSAGE_SELF = 0 RUSAGE_CHILDREN = -1 # Included from sys/auxv.h AT_NULL = 0 AT_IGNORE = 1 AT_EXECFD = 2 AT_PHDR = 3 AT_PHENT = 4 AT_PHNUM = 5 AT_PAGESZ = 6 AT_BASE = 7 AT_FLAGS = 8 AT_ENTRY = 9 AT_DCACHEBSIZE = 10 AT_ICACHEBSIZE = 11 AT_UCACHEBSIZE = 12 AT_SUN_UID = 2000 AT_SUN_RUID = 2001 AT_SUN_GID = 2002 AT_SUN_RGID = 2003 AT_SUN_LDELF = 2004 AT_SUN_LDSHDR = 2005 AT_SUN_LDNAME = 2006 AT_SUN_LPAGESZ = 2007 AT_SUN_PLATFORM = 2008 AT_SUN_HWCAP = 2009 AT_SUN_IFLUSH = 2010 AT_SUN_CPU = 2011 AT_SUN_EMUL_ENTRY = 2012 AT_SUN_EMUL_EXECFD = 2013 AT_SUN_EXECNAME = 2014 AT_SUN_MMU = 2015 # Included from sys/errno.h EPERM = 1 ENOENT = 2 ESRCH = 3 EINTR = 4 EIO = 5 ENXIO = 6 E2BIG = 7 ENOEXEC = 8 EBADF = 9 ECHILD = 10 EAGAIN = 11 ENOMEM = 12 EACCES = 13 EFAULT = 14 ENOTBLK = 15 EBUSY = 16 EEXIST = 17 EXDEV = 18 ENODEV = 19 ENOTDIR = 20 EISDIR = 21 EINVAL = 22 ENFILE = 23 EMFILE = 24 ENOTTY = 25 ETXTBSY = 26 EFBIG = 27 ENOSPC = 28 ESPIPE = 29 EROFS = 30 EMLINK = 31 EPIPE = 32 EDOM = 33 ERANGE = 34 ENOMSG = 35 EIDRM = 36 ECHRNG = 37 EL2NSYNC = 38 EL3HLT = 39 EL3RST = 40 ELNRNG = 41 EUNATCH = 42 ENOCSI = 43 EL2HLT = 44 EDEADLK = 45 ENOLCK = 46 ECANCELED = 47 ENOTSUP = 48 EDQUOT = 49 EBADE = 50 EBADR = 51 EXFULL = 52 ENOANO = 53 EBADRQC = 54 EBADSLT = 55 EDEADLOCK = 56 EBFONT = 57 EOWNERDEAD = 58 ENOTRECOVERABLE = 59 ENOSTR = 60 ENODATA = 61 ETIME = 62 ENOSR = 63 ENONET = 64 ENOPKG = 65 EREMOTE = 66 ENOLINK = 67 EADV = 68 ESRMNT = 69 ECOMM = 70 EPROTO = 71 ELOCKUNMAPPED = 72 ENOTACTIVE = 73 EMULTIHOP = 74 EBADMSG = 77 ENAMETOOLONG = 78 EOVERFLOW = 79 ENOTUNIQ = 80 EBADFD = 81 EREMCHG = 82 ELIBACC = 83 ELIBBAD = 84 ELIBSCN = 85 ELIBMAX = 86 ELIBEXEC = 87 EILSEQ = 88 ENOSYS = 89 ELOOP = 90 ERESTART = 91 ESTRPIPE = 92 ENOTEMPTY = 93 EUSERS = 94 ENOTSOCK = 95 EDESTADDRREQ = 96 EMSGSIZE = 97 EPROTOTYPE = 98 ENOPROTOOPT = 99 EPROTONOSUPPORT = 120 ESOCKTNOSUPPORT = 121 EOPNOTSUPP = 122 EPFNOSUPPORT = 123 EAFNOSUPPORT = 124 EADDRINUSE = 125 EADDRNOTAVAIL = 126 ENETDOWN = 127 ENETUNREACH = 128 ENETRESET = 129 ECONNABORTED = 130 ECONNRESET = 131 ENOBUFS = 132 EISCONN = 133 ENOTCONN = 134 ESHUTDOWN = 143 ETOOMANYREFS = 144 ETIMEDOUT = 145 ECONNREFUSED = 146 EHOSTDOWN = 147 EHOSTUNREACH = 148 EWOULDBLOCK = EAGAIN EALREADY = 149 EINPROGRESS = 150 ESTALE = 151 PSARGSZ = 80 PSCOMSIZ = 14 MAXCOMLEN = 16 __KERN_NAUXV_IMPL = 19 __KERN_NAUXV_IMPL = 21 __KERN_NAUXV_IMPL = 21 PSARGSZ = 80 # Included from sys/watchpoint.h from TYPES import * # Included from vm/seg_enum.h # Included from sys/copyops.h from TYPES import * # Included from sys/buf.h # Included from sys/kstat.h from TYPES import * KSTAT_STRLEN = 31 def KSTAT_ENTER(k): return \ def KSTAT_EXIT(k): return \ KSTAT_TYPE_RAW = 0 KSTAT_TYPE_NAMED = 1 KSTAT_TYPE_INTR = 2 KSTAT_TYPE_IO = 3 KSTAT_TYPE_TIMER = 4 KSTAT_NUM_TYPES = 5 KSTAT_FLAG_VIRTUAL = 0x01 KSTAT_FLAG_VAR_SIZE = 0x02 KSTAT_FLAG_WRITABLE = 0x04 KSTAT_FLAG_PERSISTENT = 0x08 KSTAT_FLAG_DORMANT = 0x10 KSTAT_FLAG_INVALID = 0x20 KSTAT_READ = 0 KSTAT_WRITE = 1 KSTAT_DATA_CHAR = 0 KSTAT_DATA_INT32 = 1 KSTAT_DATA_UINT32 = 2 KSTAT_DATA_INT64 = 3 KSTAT_DATA_UINT64 = 4 KSTAT_DATA_LONG = KSTAT_DATA_INT32 KSTAT_DATA_ULONG = KSTAT_DATA_UINT32 KSTAT_DATA_LONG = KSTAT_DATA_INT64 KSTAT_DATA_ULONG = KSTAT_DATA_UINT64 KSTAT_DATA_LONG = 7 KSTAT_DATA_ULONG = 8 KSTAT_DATA_LONGLONG = KSTAT_DATA_INT64 KSTAT_DATA_ULONGLONG = KSTAT_DATA_UINT64 KSTAT_DATA_FLOAT = 5 KSTAT_DATA_DOUBLE = 6 KSTAT_INTR_HARD = 0 KSTAT_INTR_SOFT = 1 KSTAT_INTR_WATCHDOG = 2 KSTAT_INTR_SPURIOUS = 3 KSTAT_INTR_MULTSVC = 4 KSTAT_NUM_INTRS = 5 B_BUSY = 0x0001 B_DONE = 0x0002 B_ERROR = 0x0004 B_PAGEIO = 0x0010 B_PHYS = 0x0020 B_READ = 0x0040 B_WRITE = 0x0100 B_KERNBUF = 0x0008 B_WANTED = 0x0080 B_AGE = 0x000200 B_ASYNC = 0x000400 B_DELWRI = 0x000800 B_STALE = 0x001000 B_DONTNEED = 0x002000 B_REMAPPED = 0x004000 B_FREE = 0x008000 B_INVAL = 0x010000 B_FORCE = 0x020000 B_HEAD = 0x040000 B_NOCACHE = 0x080000 B_TRUNC = 0x100000 B_SHADOW = 0x200000 B_RETRYWRI = 0x400000 def notavail(bp): return \ def BWRITE(bp): return \ def BWRITE2(bp): return \ # Included from sys/aio_req.h # Included from sys/uio.h from TYPES import * WP_NOWATCH = 0x01 WP_SETPROT = 0x02 # Included from sys/timer.h from TYPES import * _TIMER_MAX = 32 ITLK_LOCKED = 0x01 ITLK_WANTED = 0x02 ITLK_REMOVE = 0x04 IT_PERLWP = 0x01 IT_SIGNAL = 0x02 # Included from sys/utrap.h UT_INSTRUCTION_DISABLED = 1 UT_INSTRUCTION_ERROR = 2 UT_INSTRUCTION_PROTECTION = 3 UT_ILLTRAP_INSTRUCTION = 4 UT_ILLEGAL_INSTRUCTION = 5 UT_PRIVILEGED_OPCODE = 6 UT_FP_DISABLED = 7 UT_FP_EXCEPTION_IEEE_754 = 8 UT_FP_EXCEPTION_OTHER = 9 UT_TAG_OVERFLOW = 10 UT_DIVISION_BY_ZERO = 11 UT_DATA_EXCEPTION = 12 UT_DATA_ERROR = 13 UT_DATA_PROTECTION = 14 UT_MEM_ADDRESS_NOT_ALIGNED = 15 UT_PRIVILEGED_ACTION = 16 UT_ASYNC_DATA_ERROR = 17 UT_TRAP_INSTRUCTION_16 = 18 UT_TRAP_INSTRUCTION_17 = 19 UT_TRAP_INSTRUCTION_18 = 20 UT_TRAP_INSTRUCTION_19 = 21 UT_TRAP_INSTRUCTION_20 = 22 UT_TRAP_INSTRUCTION_21 = 23 UT_TRAP_INSTRUCTION_22 = 24 UT_TRAP_INSTRUCTION_23 = 25 UT_TRAP_INSTRUCTION_24 = 26 UT_TRAP_INSTRUCTION_25 = 27 UT_TRAP_INSTRUCTION_26 = 28 UT_TRAP_INSTRUCTION_27 = 29 UT_TRAP_INSTRUCTION_28 = 30 UT_TRAP_INSTRUCTION_29 = 31 UT_TRAP_INSTRUCTION_30 = 32 UT_TRAP_INSTRUCTION_31 = 33 UTRAP_V8P_FP_DISABLED = UT_FP_DISABLED UTRAP_V8P_MEM_ADDRESS_NOT_ALIGNED = UT_MEM_ADDRESS_NOT_ALIGNED UT_PRECISE_MAXTRAPS = 33 # Included from sys/refstr.h # Included from sys/task.h from TYPES import * TASK_NORMAL = 0x0 TASK_FINAL = 0x1 TASK_FINALITY = 0x1 # Included from sys/id_space.h from TYPES import * # Included from sys/vmem.h from TYPES import * VM_SLEEP = 0x00000000 VM_NOSLEEP = 0x00000001 VM_PANIC = 0x00000002 VM_KMFLAGS = 0x000000ff VM_BESTFIT = 0x00000100 VMEM_ALLOC = 0x01 VMEM_FREE = 0x02 VMEM_SPAN = 0x10 ISP_NORMAL = 0x0 ISP_RESERVE = 0x1 # Included from sys/exacct_impl.h from TYPES import * # Included from sys/kmem.h from TYPES import * KM_SLEEP = 0x0000 KM_NOSLEEP = 0x0001 KM_PANIC = 0x0002 KM_VMFLAGS = 0x00ff KM_FLAGS = 0xffff KMC_NOTOUCH = 0x00010000 KMC_NODEBUG = 0x00020000 KMC_NOMAGAZINE = 0x00040000 KMC_NOHASH = 0x00080000 KMC_QCACHE = 0x00100000 _ISA_IA32 = 0 _ISA_IA64 = 1 SSLEEP = 1 SRUN = 2 SZOMB = 3 SSTOP = 4 SIDL = 5 SONPROC = 6 CLDPEND = 0x0001 CLDCONT = 0x0002 SSYS = 0x00000001 STRC = 0x00000002 SLOAD = 0x00000008 SLOCK = 0x00000010 SPREXEC = 0x00000020 SPROCTR = 0x00000040 SPRFORK = 0x00000080 SKILLED = 0x00000100 SULOAD = 0x00000200 SRUNLCL = 0x00000400 SBPTADJ = 0x00000800 SKILLCL = 0x00001000 SOWEUPC = 0x00002000 SEXECED = 0x00004000 SPASYNC = 0x00008000 SJCTL = 0x00010000 SNOWAIT = 0x00020000 SVFORK = 0x00040000 SVFWAIT = 0x00080000 EXITLWPS = 0x00100000 HOLDFORK = 0x00200000 SWAITSIG = 0x00400000 HOLDFORK1 = 0x00800000 COREDUMP = 0x01000000 SMSACCT = 0x02000000 ASLWP = 0x04000000 SPRLOCK = 0x08000000 NOCD = 0x10000000 HOLDWATCH = 0x20000000 SMSFORK = 0x40000000 SDOCORE = 0x80000000 FORREAL = 0 JUSTLOOKING = 1 SUSPEND_NORMAL = 0 SUSPEND_PAUSE = 1 NOCLASS = (-1) # Included from sys/dditypes.h DDI_DEVICE_ATTR_V0 = 0x0001 DDI_NEVERSWAP_ACC = 0x00 DDI_STRUCTURE_LE_ACC = 0x01 DDI_STRUCTURE_BE_ACC = 0x02 DDI_STRICTORDER_ACC = 0x00 DDI_UNORDERED_OK_ACC = 0x01 DDI_MERGING_OK_ACC = 0x02 DDI_LOADCACHING_OK_ACC = 0x03 DDI_STORECACHING_OK_ACC = 0x04 DDI_DATA_SZ01_ACC = 1 DDI_DATA_SZ02_ACC = 2 DDI_DATA_SZ04_ACC = 4 DDI_DATA_SZ08_ACC = 8 VERS_ACCHDL = 0x0001 DEVID_NONE = 0 DEVID_SCSI3_WWN = 1 DEVID_SCSI_SERIAL = 2 DEVID_FAB = 3 DEVID_ENCAP = 4 DEVID_MAXTYPE = 4 # Included from sys/varargs.h # Included from sys/va_list.h VA_ALIGN = 8 def _ARGSIZEOF(t): return ((sizeof (t) + VA_ALIGN - 1) & ~(VA_ALIGN - 1)) VA_ALIGN = 8 def _ARGSIZEOF(t): return ((sizeof (t) + VA_ALIGN - 1) & ~(VA_ALIGN - 1)) NSYSCALL = 256 SE_32RVAL1 = 0x0 SE_32RVAL2 = 0x1 SE_64RVAL = 0x2 SE_RVAL_MASK = 0x3 SE_LOADABLE = 0x08 SE_LOADED = 0x10 SE_NOUNLOAD = 0x20 SE_ARGC = 0x40 # Included from sys/devops.h from TYPES import * # Included from sys/poll.h POLLIN = 0x0001 POLLPRI = 0x0002 POLLOUT = 0x0004 POLLRDNORM = 0x0040 POLLWRNORM = POLLOUT POLLRDBAND = 0x0080 POLLWRBAND = 0x0100 POLLNORM = POLLRDNORM POLLERR = 0x0008 POLLHUP = 0x0010 POLLNVAL = 0x0020 POLLREMOVE = 0x0800 POLLRDDATA = 0x0200 POLLNOERR = 0x0400 POLLCLOSED = 0x8000 # Included from vm/as.h # Included from vm/seg.h # Included from sys/vnode.h from TYPES import * VROOT = 0x01 VNOCACHE = 0x02 VNOMAP = 0x04 VDUP = 0x08 VNOSWAP = 0x10 VNOMOUNT = 0x20 VISSWAP = 0x40 VSWAPLIKE = 0x80 VVFSLOCK = 0x100 VVFSWAIT = 0x200 VVMLOCK = 0x400 VDIROPEN = 0x800 VVMEXEC = 0x1000 VPXFS = 0x2000 AT_TYPE = 0x0001 AT_MODE = 0x0002 AT_UID = 0x0004 AT_GID = 0x0008 AT_FSID = 0x0010 AT_NODEID = 0x0020 AT_NLINK = 0x0040 AT_SIZE = 0x0080 AT_ATIME = 0x0100 AT_MTIME = 0x0200 AT_CTIME = 0x0400 AT_RDEV = 0x0800 AT_BLKSIZE = 0x1000 AT_NBLOCKS = 0x2000 AT_VCODE = 0x4000 AT_ALL = (AT_TYPE|AT_MODE|AT_UID|AT_GID|AT_FSID|AT_NODEID|\ AT_NLINK|AT_SIZE|AT_ATIME|AT_MTIME|AT_CTIME|\ AT_RDEV|AT_BLKSIZE|AT_NBLOCKS|AT_VCODE) AT_STAT = (AT_MODE|AT_UID|AT_GID|AT_FSID|AT_NODEID|AT_NLINK|\ AT_SIZE|AT_ATIME|AT_MTIME|AT_CTIME|AT_RDEV) AT_TIMES = (AT_ATIME|AT_MTIME|AT_CTIME) AT_NOSET = (AT_NLINK|AT_RDEV|AT_FSID|AT_NODEID|AT_TYPE|\ AT_BLKSIZE|AT_NBLOCKS|AT_VCODE) VSUID = 04000 VSGID = 02000 VSVTX = 01000 VREAD = 00400 VWRITE = 00200 VEXEC = 00100 MODEMASK = 07777 PERMMASK = 00777 def MANDMODE(mode): return (((mode) & (VSGID|(VEXEC>>3))) == VSGID) VSA_ACL = 0x0001 VSA_ACLCNT = 0x0002 VSA_DFACL = 0x0004 VSA_DFACLCNT = 0x0008 LOOKUP_DIR = 0x01 DUMP_ALLOC = 0 DUMP_FREE = 1 DUMP_SCAN = 2 ATTR_UTIME = 0x01 ATTR_EXEC = 0x02 ATTR_COMM = 0x04 ATTR_HINT = 0x08 ATTR_REAL = 0x10 # Included from vm/faultcode.h FC_HWERR = 0x1 FC_ALIGN = 0x2 FC_OBJERR = 0x3 FC_PROT = 0x4 FC_NOMAP = 0x5 FC_NOSUPPORT = 0x6 def FC_MAKE_ERR(e): return (((e) << 8) | FC_OBJERR) def FC_CODE(fc): return ((fc) & 0xff) def FC_ERRNO(fc): return ((unsigned)(fc) >> 8) # Included from vm/hat.h from TYPES import * # Included from vm/page.h PAGE_HASHAVELEN = 4 PAGE_HASHVPSHIFT = 6 PG_EXCL = 0x0001 PG_WAIT = 0x0002 PG_PHYSCONTIG = 0x0004 PG_MATCH_COLOR = 0x0008 PG_NORELOC = 0x0010 PG_FREE_LIST = 1 PG_CACHE_LIST = 2 PG_LIST_TAIL = 0 PG_LIST_HEAD = 1 def page_next_raw(PP): return page_nextn_raw((PP), 1) PAGE_IO_INUSE = 0x1 PAGE_IO_WANTED = 0x2 PGREL_NOTREL = 0x1 PGREL_CLEAN = 0x2 PGREL_MOD = 0x3 P_FREE = 0x80 P_NORELOC = 0x40 def PP_SETAGED(pp): return ASSERT(PP_ISAGED(pp)) HAT_FLAGS_RESV = 0xFF000000 HAT_LOAD = 0x00 HAT_LOAD_LOCK = 0x01 HAT_LOAD_ADV = 0x04 HAT_LOAD_CONTIG = 0x10 HAT_LOAD_NOCONSIST = 0x20 HAT_LOAD_SHARE = 0x40 HAT_LOAD_REMAP = 0x80 HAT_RELOAD_SHARE = 0x100 HAT_PLAT_ATTR_MASK = 0xF00000 HAT_PROT_MASK = 0x0F HAT_NOFAULT = 0x10 HAT_NOSYNC = 0x20 HAT_STRICTORDER = 0x0000 HAT_UNORDERED_OK = 0x0100 HAT_MERGING_OK = 0x0200 HAT_LOADCACHING_OK = 0x0300 HAT_STORECACHING_OK = 0x0400 HAT_ORDER_MASK = 0x0700 HAT_NEVERSWAP = 0x0000 HAT_STRUCTURE_BE = 0x1000 HAT_STRUCTURE_LE = 0x2000 HAT_ENDIAN_MASK = 0x3000 HAT_COW = 0x0001 HAT_UNLOAD = 0x00 HAT_UNLOAD_NOSYNC = 0x02 HAT_UNLOAD_UNLOCK = 0x04 HAT_UNLOAD_OTHER = 0x08 HAT_UNLOAD_UNMAP = 0x10 HAT_SYNC_DONTZERO = 0x00 HAT_SYNC_ZERORM = 0x01 HAT_SYNC_STOPON_REF = 0x02 HAT_SYNC_STOPON_MOD = 0x04 HAT_SYNC_STOPON_RM = (HAT_SYNC_STOPON_REF | HAT_SYNC_STOPON_MOD) HAT_DUP_ALL = 1 HAT_DUP_COW = 2 HAT_MAP = 0x00 HAT_ADV_PGUNLOAD = 0x00 HAT_FORCE_PGUNLOAD = 0x01 P_MOD = 0x1 P_REF = 0x2 P_RO = 0x4 def hat_ismod(pp): return (hat_page_getattr(pp, P_MOD)) def hat_isref(pp): return (hat_page_getattr(pp, P_REF)) def hat_isro(pp): return (hat_page_getattr(pp, P_RO)) def hat_setmod(pp): return (hat_page_setattr(pp, P_MOD)) def hat_setref(pp): return (hat_page_setattr(pp, P_REF)) def hat_setrefmod(pp): return (hat_page_setattr(pp, P_REF|P_MOD)) def hat_clrmod(pp): return (hat_page_clrattr(pp, P_MOD)) def hat_clrref(pp): return (hat_page_clrattr(pp, P_REF)) def hat_clrrefmod(pp): return (hat_page_clrattr(pp, P_REF|P_MOD)) def hat_page_is_mapped(pp): return (hat_page_getshare(pp)) HAT_DONTALLOC = 0 HAT_ALLOC = 1 HRM_SHIFT = 4 HRM_BYTES = (1 << HRM_SHIFT) HRM_PAGES = ((HRM_BYTES * NBBY) / 2) HRM_PGPERBYTE = (NBBY/2) HRM_PGBYTEMASK = (HRM_PGPERBYTE-1) HRM_HASHSIZE = 0x200 HRM_HASHMASK = (HRM_HASHSIZE - 1) HRM_BLIST_INCR = 0x200 HRM_SWSMONID = 1 SSL_NLEVELS = 4 SSL_BFACTOR = 4 SSL_LOG2BF = 2 SEGP_ASYNC_FLUSH = 0x1 SEGP_FORCE_WIRED = 0x2 SEGP_SUCCESS = 0 SEGP_FAIL = 1 def seg_pages(seg): return \ IE_NOMEM = -1 AS_PAGLCK = 0x80 AS_CLAIMGAP = 0x40 AS_UNMAPWAIT = 0x20 def AS_TYPE_64BIT(as): return \ AS_LREP_LINKEDLIST = 0 AS_LREP_SKIPLIST = 1 AS_MUTATION_THRESH = 225 AH_DIR = 0x1 AH_LO = 0x0 AH_HI = 0x1 AH_CONTAIN = 0x2 # Included from sys/ddidmareq.h DMA_UNIT_8 = 1 DMA_UNIT_16 = 2 DMA_UNIT_32 = 4 DMALIM_VER0 = ((0x86000000) + 0) DDI_DMA_FORCE_PHYSICAL = 0x0100 DMA_ATTR_V0 = 0 DMA_ATTR_VERSION = DMA_ATTR_V0 DDI_DMA_CALLBACK_RUNOUT = 0 DDI_DMA_CALLBACK_DONE = 1 DDI_DMA_WRITE = 0x0001 DDI_DMA_READ = 0x0002 DDI_DMA_RDWR = (DDI_DMA_READ | DDI_DMA_WRITE) DDI_DMA_REDZONE = 0x0004 DDI_DMA_PARTIAL = 0x0008 DDI_DMA_CONSISTENT = 0x0010 DDI_DMA_EXCLUSIVE = 0x0020 DDI_DMA_STREAMING = 0x0040 DDI_DMA_SBUS_64BIT = 0x2000 DDI_DMA_MAPPED = 0 DDI_DMA_MAPOK = 0 DDI_DMA_PARTIAL_MAP = 1 DDI_DMA_DONE = 2 DDI_DMA_NORESOURCES = -1 DDI_DMA_NOMAPPING = -2 DDI_DMA_TOOBIG = -3 DDI_DMA_TOOSMALL = -4 DDI_DMA_LOCKED = -5 DDI_DMA_BADLIMITS = -6 DDI_DMA_STALE = -7 DDI_DMA_BADATTR = -8 DDI_DMA_INUSE = -9 DDI_DMA_SYNC_FORDEV = 0x0 DDI_DMA_SYNC_FORCPU = 0x1 DDI_DMA_SYNC_FORKERNEL = 0x2 # Included from sys/ddimapreq.h # Included from sys/mman.h PROT_READ = 0x1 PROT_WRITE = 0x2 PROT_EXEC = 0x4 PROT_USER = 0x8 PROT_ZFOD = (PROT_READ | PROT_WRITE | PROT_EXEC | PROT_USER) PROT_ALL = (PROT_READ | PROT_WRITE | PROT_EXEC | PROT_USER) PROT_NONE = 0x0 MAP_SHARED = 1 MAP_PRIVATE = 2 MAP_TYPE = 0xf MAP_FIXED = 0x10 MAP_NORESERVE = 0x40 MAP_ANON = 0x100 MAP_ANONYMOUS = MAP_ANON MAP_RENAME = 0x20 PROC_TEXT = (PROT_EXEC | PROT_READ) PROC_DATA = (PROT_READ | PROT_WRITE | PROT_EXEC) SHARED = 0x10 PRIVATE = 0x20 VALID_ATTR = (PROT_READ|PROT_WRITE|PROT_EXEC|SHARED|PRIVATE) PROT_EXCL = 0x20 _MAP_LOW32 = 0x80 _MAP_NEW = 0x80000000 from TYPES import * MADV_NORMAL = 0 MADV_RANDOM = 1 MADV_SEQUENTIAL = 2 MADV_WILLNEED = 3 MADV_DONTNEED = 4 MADV_FREE = 5 MS_OLDSYNC = 0x0 MS_SYNC = 0x4 MS_ASYNC = 0x1 MS_INVALIDATE = 0x2 MC_SYNC = 1 MC_LOCK = 2 MC_UNLOCK = 3 MC_ADVISE = 4 MC_LOCKAS = 5 MC_UNLOCKAS = 6 MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 DDI_MAP_VERSION = 0x0001 DDI_MF_USER_MAPPING = 0x1 DDI_MF_KERNEL_MAPPING = 0x2 DDI_MF_DEVICE_MAPPING = 0x4 DDI_ME_GENERIC = (-1) DDI_ME_UNIMPLEMENTED = (-2) DDI_ME_NORESOURCES = (-3) DDI_ME_UNSUPPORTED = (-4) DDI_ME_REGSPEC_RANGE = (-5) DDI_ME_RNUMBER_RANGE = (-6) DDI_ME_INVAL = (-7) # Included from sys/ddipropdefs.h def CELLS_1275_TO_BYTES(n): return ((n) * PROP_1275_CELL_SIZE) def BYTES_TO_1275_CELLS(n): return ((n) / PROP_1275_CELL_SIZE) PH_FROM_PROM = 0x01 DDI_PROP_SUCCESS = 0 DDI_PROP_NOT_FOUND = 1 DDI_PROP_UNDEFINED = 2 DDI_PROP_NO_MEMORY = 3 DDI_PROP_INVAL_ARG = 4 DDI_PROP_BUF_TOO_SMALL = 5 DDI_PROP_CANNOT_DECODE = 6 DDI_PROP_CANNOT_ENCODE = 7 DDI_PROP_END_OF_DATA = 8 DDI_PROP_FOUND_1275 = 255 PROP_1275_INT_SIZE = 4 DDI_PROP_DONTPASS = 0x0001 DDI_PROP_CANSLEEP = 0x0002 DDI_PROP_SYSTEM_DEF = 0x0004 DDI_PROP_NOTPROM = 0x0008 DDI_PROP_DONTSLEEP = 0x0010 DDI_PROP_STACK_CREATE = 0x0020 DDI_PROP_UNDEF_IT = 0x0040 DDI_PROP_HW_DEF = 0x0080 DDI_PROP_TYPE_INT = 0x0100 DDI_PROP_TYPE_STRING = 0x0200 DDI_PROP_TYPE_BYTE = 0x0400 DDI_PROP_TYPE_COMPOSITE = 0x0800 DDI_PROP_TYPE_ANY = (DDI_PROP_TYPE_INT | \ DDI_PROP_TYPE_STRING | \ DDI_PROP_TYPE_BYTE | \ DDI_PROP_TYPE_COMPOSITE) DDI_PROP_TYPE_MASK = (DDI_PROP_TYPE_INT | \ DDI_PROP_TYPE_STRING | \ DDI_PROP_TYPE_BYTE | \ DDI_PROP_TYPE_COMPOSITE) DDI_RELATIVE_ADDRESSING = "relative-addressing" DDI_GENERIC_ADDRESSING = "generic-addressing" # Included from sys/ddidevmap.h KMEM_PAGEABLE = 0x100 KMEM_NON_PAGEABLE = 0x200 UMEM_LOCKED = 0x400 UMEM_TRASH = 0x800 DEVMAP_OPS_REV = 1 DEVMAP_DEFAULTS = 0x00 DEVMAP_MAPPING_INVALID = 0x01 DEVMAP_ALLOW_REMAP = 0x02 DEVMAP_USE_PAGESIZE = 0x04 DEVMAP_SETUP_FLAGS = \ (DEVMAP_MAPPING_INVALID | DEVMAP_ALLOW_REMAP | DEVMAP_USE_PAGESIZE) DEVMAP_SETUP_DONE = 0x100 DEVMAP_LOCK_INITED = 0x200 DEVMAP_FAULTING = 0x400 DEVMAP_LOCKED = 0x800 DEVMAP_FLAG_LARGE = 0x1000 DDI_UMEM_SLEEP = 0x0 DDI_UMEM_NOSLEEP = 0x01 DDI_UMEM_PAGEABLE = 0x02 DDI_UMEM_TRASH = 0x04 DDI_UMEMLOCK_READ = 0x01 DDI_UMEMLOCK_WRITE = 0x02 # Included from sys/nexusdefs.h # Included from sys/nexusintr.h BUSO_REV = 4 BUSO_REV_3 = 3 BUSO_REV_4 = 4 DEVO_REV = 3 CB_REV = 1 DDI_IDENTIFIED = (0) DDI_NOT_IDENTIFIED = (-1) DDI_PROBE_FAILURE = ENXIO DDI_PROBE_DONTCARE = 0 DDI_PROBE_PARTIAL = 1 DDI_PROBE_SUCCESS = 2 MAPDEV_REV = 1 from TYPES import * D_NEW = 0x00 _D_OLD = 0x01 D_TAPE = 0x08 D_MTSAFE = 0x0020 _D_QNEXTLESS = 0x0040 _D_MTOCSHARED = 0x0080 D_MTOCEXCL = 0x0800 D_MTPUTSHARED = 0x1000 D_MTPERQ = 0x2000 D_MTQPAIR = 0x4000 D_MTPERMOD = 0x6000 D_MTOUTPERIM = 0x8000 _D_MTCBSHARED = 0x10000 D_MTINNER_MOD = (D_MTPUTSHARED|_D_MTOCSHARED|_D_MTCBSHARED) D_MTOUTER_MOD = (D_MTOCEXCL) D_MP = D_MTSAFE D_64BIT = 0x200 D_SYNCSTR = 0x400 D_DEVMAP = 0x100 D_HOTPLUG = 0x4 SNDZERO = 0x001 SNDPIPE = 0x002 RNORM = 0x000 RMSGD = 0x001 RMSGN = 0x002 RMODEMASK = 0x003 RPROTDAT = 0x004 RPROTDIS = 0x008 RPROTNORM = 0x010 RPROTMASK = 0x01c RFLUSHMASK = 0x020 RFLUSHPCPROT = 0x020 RERRNORM = 0x001 RERRNONPERSIST = 0x002 RERRMASK = (RERRNORM|RERRNONPERSIST) WERRNORM = 0x004 WERRNONPERSIST = 0x008 WERRMASK = (WERRNORM|WERRNONPERSIST) FLUSHR = 0x01 FLUSHW = 0x02 FLUSHRW = 0x03 FLUSHBAND = 0x04 MAPINOK = 0x01 NOMAPIN = 0x02 REMAPOK = 0x04 NOREMAP = 0x08 S_INPUT = 0x0001 S_HIPRI = 0x0002 S_OUTPUT = 0x0004 S_MSG = 0x0008 S_ERROR = 0x0010 S_HANGUP = 0x0020 S_RDNORM = 0x0040 S_WRNORM = S_OUTPUT S_RDBAND = 0x0080 S_WRBAND = 0x0100 S_BANDURG = 0x0200 RS_HIPRI = 0x01 STRUIO_POSTPONE = 0x08 STRUIO_MAPIN = 0x10 MSG_HIPRI = 0x01 MSG_ANY = 0x02 MSG_BAND = 0x04 MSG_XPG4 = 0x08 MSG_IPEEK = 0x10 MSG_DISCARDTAIL = 0x20 MSG_HOLDSIG = 0x40 MSG_IGNERROR = 0x80 MSG_DELAYERROR = 0x100 MSG_IGNFLOW = 0x200 MSG_NOMARK = 0x400 MORECTL = 1 MOREDATA = 2 MUXID_ALL = (-1) ANYMARK = 0x01 LASTMARK = 0x02 _INFTIM = -1 INFTIM = _INFTIM
mit
MagicDevTeam/android_external_skia
tools/svndiff.py
18
13076
#!/usr/bin/python ''' Copyright 2012 Google Inc. Use of this source code is governed by a BSD-style license that can be found in the LICENSE file. ''' ''' Generates a visual diff of all pending changes in the local SVN (or git!) checkout. Launch with --help to see more information. TODO(epoger): Now that this tool supports either git or svn, rename it. TODO(epoger): Fix indentation in this file (2-space indents, not 4-space). ''' # common Python modules import optparse import os import re import shutil import subprocess import sys import tempfile import urllib2 # Imports from within Skia # # We need to add the 'gm' directory, so that we can import gm_json.py within # that directory. That script allows us to parse the actual-results.json file # written out by the GM tool. # Make sure that the 'gm' dir is in the PYTHONPATH, but add it at the *end* # so any dirs that are already in the PYTHONPATH will be preferred. # # This assumes that the 'gm' directory has been checked out as a sibling of # the 'tools' directory containing this script, which will be the case if # 'trunk' was checked out as a single unit. GM_DIRECTORY = os.path.realpath( os.path.join(os.path.dirname(os.path.dirname(__file__)), 'gm')) if GM_DIRECTORY not in sys.path: sys.path.append(GM_DIRECTORY) import gm_json import jsondiff import svn USAGE_STRING = 'Usage: %s [options]' HELP_STRING = ''' Generates a visual diff of all pending changes in the local SVN/git checkout. This includes a list of all files that have been added, deleted, or modified (as far as SVN/git knows about). For any image modifications, pixel diffs will be generated. ''' IMAGE_FILENAME_RE = re.compile(gm_json.IMAGE_FILENAME_PATTERN) TRUNK_PATH = os.path.join(os.path.dirname(__file__), os.pardir) OPTION_DEST_DIR = '--dest-dir' OPTION_PATH_TO_SKDIFF = '--path-to-skdiff' OPTION_SOURCE_DIR = '--source-dir' def RunCommand(command): """Run a command, raising an exception if it fails. @param command the command as a single string """ print 'running command [%s]...' % command retval = os.system(command) if retval is not 0: raise Exception('command [%s] failed' % command) def FindPathToSkDiff(user_set_path=None): """Return path to an existing skdiff binary, or raise an exception if we cannot find one. @param user_set_path if None, the user did not specify a path, so look in some likely places; otherwise, only check at this path """ if user_set_path is not None: if os.path.isfile(user_set_path): return user_set_path raise Exception('unable to find skdiff at user-set path %s' % user_set_path) trunk_path = os.path.join(os.path.dirname(__file__), os.pardir) possible_paths = [os.path.join(trunk_path, 'out', 'Release', 'skdiff'), os.path.join(trunk_path, 'out', 'Debug', 'skdiff')] for try_path in possible_paths: if os.path.isfile(try_path): return try_path raise Exception('cannot find skdiff in paths %s; maybe you need to ' 'specify the %s option or build skdiff?' % ( possible_paths, OPTION_PATH_TO_SKDIFF)) def _DownloadUrlToFile(source_url, dest_path): """Download source_url, and save its contents to dest_path. Raises an exception if there were any problems.""" try: reader = urllib2.urlopen(source_url) writer = open(dest_path, 'wb') writer.write(reader.read()) writer.close() except BaseException as e: raise Exception( '%s: unable to download source_url %s to dest_path %s' % ( e, source_url, dest_path)) def _CreateGSUrl(imagename, hash_type, hash_digest): """Return the HTTP URL we can use to download this particular version of the actually-generated GM image with this imagename. imagename: name of the test image, e.g. 'perlinnoise_msaa4.png' hash_type: string indicating the hash type used to generate hash_digest, e.g. gm_json.JSONKEY_HASHTYPE_BITMAP_64BITMD5 hash_digest: the hash digest of the image to retrieve """ return gm_json.CreateGmActualUrl( test_name=IMAGE_FILENAME_RE.match(imagename).group(1), hash_type=hash_type, hash_digest=hash_digest) def _CallJsonDiff(old_json_path, new_json_path, old_flattened_dir, new_flattened_dir, filename_prefix): """Using jsondiff.py, write the images that differ between two GM expectations summary files (old and new) into old_flattened_dir and new_flattened_dir. filename_prefix: prefix to prepend to filenames of all images we write into the flattened directories """ json_differ = jsondiff.GMDiffer() diff_dict = json_differ.GenerateDiffDict(oldfile=old_json_path, newfile=new_json_path) print 'Downloading %d before-and-after image pairs...' % len(diff_dict) for (imagename, results) in diff_dict.iteritems(): # TODO(epoger): Currently, this assumes that all images have been # checksummed using gm_json.JSONKEY_HASHTYPE_BITMAP_64BITMD5 old_checksum = results['old'] if old_checksum: old_image_url = _CreateGSUrl( imagename=imagename, hash_type=gm_json.JSONKEY_HASHTYPE_BITMAP_64BITMD5, hash_digest=old_checksum) _DownloadUrlToFile( source_url=old_image_url, dest_path=os.path.join(old_flattened_dir, filename_prefix + imagename)) new_checksum = results['new'] if new_checksum: new_image_url = _CreateGSUrl( imagename=imagename, hash_type=gm_json.JSONKEY_HASHTYPE_BITMAP_64BITMD5, hash_digest=new_checksum) _DownloadUrlToFile( source_url=new_image_url, dest_path=os.path.join(new_flattened_dir, filename_prefix + imagename)) def _RunCommand(args): """Run a command (from self._directory) and return stdout as a single string. @param args a list of arguments """ proc = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (stdout, stderr) = proc.communicate() if proc.returncode is not 0: raise Exception('command "%s" failed: %s' % (args, stderr)) return stdout def _GitGetModifiedFiles(): """Returns a list of locally modified files within the current working dir. TODO(epoger): Move this into a git utility package? """ return _RunCommand(['git', 'ls-files', '-m']).splitlines() def _GitExportBaseVersionOfFile(file_within_repo, dest_path): """Retrieves a copy of the base version of a file within the repository. @param file_within_repo path to the file within the repo whose base version you wish to obtain @param dest_path destination to which to write the base content TODO(epoger): Move this into a git utility package? """ # TODO(epoger): Replace use of "git show" command with lower-level git # commands? senorblanco points out that "git show" is a "porcelain" # command, intended for human use, as opposed to the "plumbing" commands # generally more suitable for scripting. (See # http://git-scm.com/book/en/Git-Internals-Plumbing-and-Porcelain ) # # For now, though, "git show" is the most straightforward implementation # I could come up with. I tried using "git cat-file", but I had trouble # getting it to work as desired. args = ['git', 'show', os.path.join('HEAD:.', file_within_repo)] with open(dest_path, 'wb') as outfile: proc = subprocess.Popen(args, stdout=outfile) proc.communicate() if proc.returncode is not 0: raise Exception('command "%s" failed' % args) def SvnDiff(path_to_skdiff, dest_dir, source_dir): """Generates a visual diff of all pending changes in source_dir. @param path_to_skdiff @param dest_dir existing directory within which to write results @param source_dir """ # Validate parameters, filling in default values if necessary and possible. path_to_skdiff = os.path.abspath(FindPathToSkDiff(path_to_skdiff)) if not dest_dir: dest_dir = tempfile.mkdtemp() dest_dir = os.path.abspath(dest_dir) os.chdir(source_dir) using_svn = os.path.isdir('.svn') # Prepare temporary directories. modified_flattened_dir = os.path.join(dest_dir, 'modified_flattened') original_flattened_dir = os.path.join(dest_dir, 'original_flattened') diff_dir = os.path.join(dest_dir, 'diffs') for dir in [modified_flattened_dir, original_flattened_dir, diff_dir] : shutil.rmtree(dir, ignore_errors=True) os.mkdir(dir) # Get a list of all locally modified (including added/deleted) files, # descending subdirectories. if using_svn: svn_repo = svn.Svn('.') modified_file_paths = svn_repo.GetFilesWithStatus( svn.STATUS_ADDED | svn.STATUS_DELETED | svn.STATUS_MODIFIED) else: modified_file_paths = _GitGetModifiedFiles() # For each modified file: # 1. copy its current contents into modified_flattened_dir # 2. copy its original contents into original_flattened_dir for modified_file_path in modified_file_paths: if modified_file_path.endswith('.json'): # Special handling for JSON files, in the hopes that they # contain GM result summaries. (_unused, original_file_path) = tempfile.mkstemp() if using_svn: svn_repo.ExportBaseVersionOfFile( modified_file_path, original_file_path) else: _GitExportBaseVersionOfFile( modified_file_path, original_file_path) platform_prefix = re.sub(os.sep, '__', os.path.dirname(modified_file_path)) + '__' _CallJsonDiff(old_json_path=original_file_path, new_json_path=modified_file_path, old_flattened_dir=original_flattened_dir, new_flattened_dir=modified_flattened_dir, filename_prefix=platform_prefix) os.remove(original_file_path) else: dest_filename = re.sub(os.sep, '__', modified_file_path) # If the file had STATUS_DELETED, it won't exist anymore... if os.path.isfile(modified_file_path): shutil.copyfile(modified_file_path, os.path.join(modified_flattened_dir, dest_filename)) if using_svn: svn_repo.ExportBaseVersionOfFile( modified_file_path, os.path.join(original_flattened_dir, dest_filename)) else: _GitExportBaseVersionOfFile( modified_file_path, os.path.join(original_flattened_dir, dest_filename)) # Run skdiff: compare original_flattened_dir against modified_flattened_dir RunCommand('%s %s %s %s' % (path_to_skdiff, original_flattened_dir, modified_flattened_dir, diff_dir)) print '\nskdiff results are ready in file://%s/index.html' % diff_dir def RaiseUsageException(): raise Exception('%s\nRun with --help for more detail.' % ( USAGE_STRING % __file__)) def Main(options, args): """Allow other scripts to call this script with fake command-line args. """ num_args = len(args) if num_args != 0: RaiseUsageException() SvnDiff(path_to_skdiff=options.path_to_skdiff, dest_dir=options.dest_dir, source_dir=options.source_dir) if __name__ == '__main__': parser = optparse.OptionParser(USAGE_STRING % '%prog' + HELP_STRING) parser.add_option(OPTION_DEST_DIR, action='store', type='string', default=None, help='existing directory within which to write results; ' 'if not set, will create a temporary directory which ' 'will remain in place after this script completes') parser.add_option(OPTION_PATH_TO_SKDIFF, action='store', type='string', default=None, help='path to already-built skdiff tool; if not set, ' 'will search for it in typical directories near this ' 'script') parser.add_option(OPTION_SOURCE_DIR, action='store', type='string', default=os.path.join('expectations', 'gm'), help='root directory within which to compare all ' + 'files; defaults to "%default"') (options, args) = parser.parse_args() Main(options, args)
bsd-3-clause
jrichte43/ProjectEuler
Problem-0089/solutions.py
1
1879
__problem_title__ = "Roman numerals" __problem_url___ = "https://projecteuler.net/problem=89" __problem_description__ = "For a number written in Roman numerals to be considered valid there " \ "are basic rules which must be followed. Even though the rules allow " \ "some numbers to be expressed in more than one way there is always a " \ ""best" way of writing a particular number. For example, it would " \ "appear that there are at least six ways of writing the number " \ "sixteen: IIIIIIIIIIIIIIII VIIIIIIIIIII VVIIIIII XIIIIII VVVI XVI " \ "However, according to the rules only and are valid, and the last " \ "example is considered to be the most efficient, as it uses the least " \ "number of numerals. The 11K text file, (right click and 'Save " \ "Link/Target As...'), contains one thousand numbers written in valid, " \ "but not necessarily minimal, Roman numerals; see for the definitive " \ "rules for this problem. Find the number of characters saved by " \ "writing each of these in their minimal form. Note: You can assume " \ "that all the Roman numerals in the file contain no more than four " \ "consecutive identical units." import timeit class Solution(): @staticmethod def solution1(): pass @staticmethod def time_solutions(): setup = 'from __main__ import Solution' print('Solution 1:', timeit.timeit('Solution.solution1()', setup=setup, number=1)) if __name__ == '__main__': s = Solution() print(s.solution1()) s.time_solutions()
gpl-3.0
BruceDai/crosswalk-test-suite
webapi/tct-csp-w3c-tests/csp-py/csp_script-src_self_unsafe-inline.py
30
3347
def main(request, response): import simplejson as json f = file('config.json') source = f.read() s = json.JSONDecoder().decode(source) url1 = "http://" + s['host'] + ":" + str(s['ports']['http'][1]) url2 = "http://" + s['host'] + ":" + str(s['ports']['http'][0]) _CSP = "script-src 'self' 'unsafe-inline'" response.headers.set("Content-Security-Policy", _CSP) response.headers.set("X-Content-Security-Policy", _CSP) response.headers.set("X-WebKit-CSP", _CSP) return """<!DOCTYPE html> <!-- Copyright (c) 2013 Intel Corporation. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of works must retain the original copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the original copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this work without specific prior written permission. THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Authors: Hao, Yunfei <yunfeix.hao@intel.com> --> <html> <head> <title>CSP Test: csp_script-src_self_unsafe-inline</title> <link rel="author" title="Intel" href="http://www.intel.com/"/> <link rel="help" href="http://www.w3.org/TR/2012/CR-CSP-20121115/#script-src"/> <meta name="flags" content=""/> <meta name="assert" content="script-src 'self' 'unsafe-inline'"/> <meta charset="utf-8"/> <script src="../resources/testharness.js"></script> <script src="../resources/testharnessreport.js"></script> </head> <body> <div id="log"></div> <script src="support/csp.js"></script> <script src='""" + url1 + """/tests/csp/support/test.js'></script> <script> test(function() { var d = document.getElementById("log"); assert_true(typeof d == "object", "HTML div element is of type object"); assert_true(d.toString() == "[object HTMLDivElement]", "HTML div element is of [object HTMLAudioElement]"); }, document.title); test(function() { assert_equals(X, 10, "X is 10"); assert_equals(Y, 27, "Y is X+17"); }, document.title + "_internal"); test(function() { assert_true(typeof getVideoURI == "undefined", "Function getVideoURI is undefined"); }, document.title + "_external"); </script> </body> </html> """
bsd-3-clause
XuanyuZhao1984/MeanJS_train1
node_modules/gulp-sass/node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/common.py
1292
20063
# Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from __future__ import with_statement import collections import errno import filecmp import os.path import re import tempfile import sys # A minimal memoizing decorator. It'll blow up if the args aren't immutable, # among other "problems". class memoize(object): def __init__(self, func): self.func = func self.cache = {} def __call__(self, *args): try: return self.cache[args] except KeyError: result = self.func(*args) self.cache[args] = result return result class GypError(Exception): """Error class representing an error, which is to be presented to the user. The main entry point will catch and display this. """ pass def ExceptionAppend(e, msg): """Append a message to the given exception's message.""" if not e.args: e.args = (msg,) elif len(e.args) == 1: e.args = (str(e.args[0]) + ' ' + msg,) else: e.args = (str(e.args[0]) + ' ' + msg,) + e.args[1:] def FindQualifiedTargets(target, qualified_list): """ Given a list of qualified targets, return the qualified targets for the specified |target|. """ return [t for t in qualified_list if ParseQualifiedTarget(t)[1] == target] def ParseQualifiedTarget(target): # Splits a qualified target into a build file, target name and toolset. # NOTE: rsplit is used to disambiguate the Windows drive letter separator. target_split = target.rsplit(':', 1) if len(target_split) == 2: [build_file, target] = target_split else: build_file = None target_split = target.rsplit('#', 1) if len(target_split) == 2: [target, toolset] = target_split else: toolset = None return [build_file, target, toolset] def ResolveTarget(build_file, target, toolset): # This function resolves a target into a canonical form: # - a fully defined build file, either absolute or relative to the current # directory # - a target name # - a toolset # # build_file is the file relative to which 'target' is defined. # target is the qualified target. # toolset is the default toolset for that target. [parsed_build_file, target, parsed_toolset] = ParseQualifiedTarget(target) if parsed_build_file: if build_file: # If a relative path, parsed_build_file is relative to the directory # containing build_file. If build_file is not in the current directory, # parsed_build_file is not a usable path as-is. Resolve it by # interpreting it as relative to build_file. If parsed_build_file is # absolute, it is usable as a path regardless of the current directory, # and os.path.join will return it as-is. build_file = os.path.normpath(os.path.join(os.path.dirname(build_file), parsed_build_file)) # Further (to handle cases like ../cwd), make it relative to cwd) if not os.path.isabs(build_file): build_file = RelativePath(build_file, '.') else: build_file = parsed_build_file if parsed_toolset: toolset = parsed_toolset return [build_file, target, toolset] def BuildFile(fully_qualified_target): # Extracts the build file from the fully qualified target. return ParseQualifiedTarget(fully_qualified_target)[0] def GetEnvironFallback(var_list, default): """Look up a key in the environment, with fallback to secondary keys and finally falling back to a default value.""" for var in var_list: if var in os.environ: return os.environ[var] return default def QualifiedTarget(build_file, target, toolset): # "Qualified" means the file that a target was defined in and the target # name, separated by a colon, suffixed by a # and the toolset name: # /path/to/file.gyp:target_name#toolset fully_qualified = build_file + ':' + target if toolset: fully_qualified = fully_qualified + '#' + toolset return fully_qualified @memoize def RelativePath(path, relative_to, follow_path_symlink=True): # Assuming both |path| and |relative_to| are relative to the current # directory, returns a relative path that identifies path relative to # relative_to. # If |follow_symlink_path| is true (default) and |path| is a symlink, then # this method returns a path to the real file represented by |path|. If it is # false, this method returns a path to the symlink. If |path| is not a # symlink, this option has no effect. # Convert to normalized (and therefore absolute paths). if follow_path_symlink: path = os.path.realpath(path) else: path = os.path.abspath(path) relative_to = os.path.realpath(relative_to) # On Windows, we can't create a relative path to a different drive, so just # use the absolute path. if sys.platform == 'win32': if (os.path.splitdrive(path)[0].lower() != os.path.splitdrive(relative_to)[0].lower()): return path # Split the paths into components. path_split = path.split(os.path.sep) relative_to_split = relative_to.split(os.path.sep) # Determine how much of the prefix the two paths share. prefix_len = len(os.path.commonprefix([path_split, relative_to_split])) # Put enough ".." components to back up out of relative_to to the common # prefix, and then append the part of path_split after the common prefix. relative_split = [os.path.pardir] * (len(relative_to_split) - prefix_len) + \ path_split[prefix_len:] if len(relative_split) == 0: # The paths were the same. return '' # Turn it back into a string and we're done. return os.path.join(*relative_split) @memoize def InvertRelativePath(path, toplevel_dir=None): """Given a path like foo/bar that is relative to toplevel_dir, return the inverse relative path back to the toplevel_dir. E.g. os.path.normpath(os.path.join(path, InvertRelativePath(path))) should always produce the empty string, unless the path contains symlinks. """ if not path: return path toplevel_dir = '.' if toplevel_dir is None else toplevel_dir return RelativePath(toplevel_dir, os.path.join(toplevel_dir, path)) def FixIfRelativePath(path, relative_to): # Like RelativePath but returns |path| unchanged if it is absolute. if os.path.isabs(path): return path return RelativePath(path, relative_to) def UnrelativePath(path, relative_to): # Assuming that |relative_to| is relative to the current directory, and |path| # is a path relative to the dirname of |relative_to|, returns a path that # identifies |path| relative to the current directory. rel_dir = os.path.dirname(relative_to) return os.path.normpath(os.path.join(rel_dir, path)) # re objects used by EncodePOSIXShellArgument. See IEEE 1003.1 XCU.2.2 at # http://www.opengroup.org/onlinepubs/009695399/utilities/xcu_chap02.html#tag_02_02 # and the documentation for various shells. # _quote is a pattern that should match any argument that needs to be quoted # with double-quotes by EncodePOSIXShellArgument. It matches the following # characters appearing anywhere in an argument: # \t, \n, space parameter separators # # comments # $ expansions (quoted to always expand within one argument) # % called out by IEEE 1003.1 XCU.2.2 # & job control # ' quoting # (, ) subshell execution # *, ?, [ pathname expansion # ; command delimiter # <, >, | redirection # = assignment # {, } brace expansion (bash) # ~ tilde expansion # It also matches the empty string, because "" (or '') is the only way to # represent an empty string literal argument to a POSIX shell. # # This does not match the characters in _escape, because those need to be # backslash-escaped regardless of whether they appear in a double-quoted # string. _quote = re.compile('[\t\n #$%&\'()*;<=>?[{|}~]|^$') # _escape is a pattern that should match any character that needs to be # escaped with a backslash, whether or not the argument matched the _quote # pattern. _escape is used with re.sub to backslash anything in _escape's # first match group, hence the (parentheses) in the regular expression. # # _escape matches the following characters appearing anywhere in an argument: # " to prevent POSIX shells from interpreting this character for quoting # \ to prevent POSIX shells from interpreting this character for escaping # ` to prevent POSIX shells from interpreting this character for command # substitution # Missing from this list is $, because the desired behavior of # EncodePOSIXShellArgument is to permit parameter (variable) expansion. # # Also missing from this list is !, which bash will interpret as the history # expansion character when history is enabled. bash does not enable history # by default in non-interactive shells, so this is not thought to be a problem. # ! was omitted from this list because bash interprets "\!" as a literal string # including the backslash character (avoiding history expansion but retaining # the backslash), which would not be correct for argument encoding. Handling # this case properly would also be problematic because bash allows the history # character to be changed with the histchars shell variable. Fortunately, # as history is not enabled in non-interactive shells and # EncodePOSIXShellArgument is only expected to encode for non-interactive # shells, there is no room for error here by ignoring !. _escape = re.compile(r'(["\\`])') def EncodePOSIXShellArgument(argument): """Encodes |argument| suitably for consumption by POSIX shells. argument may be quoted and escaped as necessary to ensure that POSIX shells treat the returned value as a literal representing the argument passed to this function. Parameter (variable) expansions beginning with $ are allowed to remain intact without escaping the $, to allow the argument to contain references to variables to be expanded by the shell. """ if not isinstance(argument, str): argument = str(argument) if _quote.search(argument): quote = '"' else: quote = '' encoded = quote + re.sub(_escape, r'\\\1', argument) + quote return encoded def EncodePOSIXShellList(list): """Encodes |list| suitably for consumption by POSIX shells. Returns EncodePOSIXShellArgument for each item in list, and joins them together using the space character as an argument separator. """ encoded_arguments = [] for argument in list: encoded_arguments.append(EncodePOSIXShellArgument(argument)) return ' '.join(encoded_arguments) def DeepDependencyTargets(target_dicts, roots): """Returns the recursive list of target dependencies.""" dependencies = set() pending = set(roots) while pending: # Pluck out one. r = pending.pop() # Skip if visited already. if r in dependencies: continue # Add it. dependencies.add(r) # Add its children. spec = target_dicts[r] pending.update(set(spec.get('dependencies', []))) pending.update(set(spec.get('dependencies_original', []))) return list(dependencies - set(roots)) def BuildFileTargets(target_list, build_file): """From a target_list, returns the subset from the specified build_file. """ return [p for p in target_list if BuildFile(p) == build_file] def AllTargets(target_list, target_dicts, build_file): """Returns all targets (direct and dependencies) for the specified build_file. """ bftargets = BuildFileTargets(target_list, build_file) deptargets = DeepDependencyTargets(target_dicts, bftargets) return bftargets + deptargets def WriteOnDiff(filename): """Write to a file only if the new contents differ. Arguments: filename: name of the file to potentially write to. Returns: A file like object which will write to temporary file and only overwrite the target if it differs (on close). """ class Writer(object): """Wrapper around file which only covers the target if it differs.""" def __init__(self): # Pick temporary file. tmp_fd, self.tmp_path = tempfile.mkstemp( suffix='.tmp', prefix=os.path.split(filename)[1] + '.gyp.', dir=os.path.split(filename)[0]) try: self.tmp_file = os.fdopen(tmp_fd, 'wb') except Exception: # Don't leave turds behind. os.unlink(self.tmp_path) raise def __getattr__(self, attrname): # Delegate everything else to self.tmp_file return getattr(self.tmp_file, attrname) def close(self): try: # Close tmp file. self.tmp_file.close() # Determine if different. same = False try: same = filecmp.cmp(self.tmp_path, filename, False) except OSError, e: if e.errno != errno.ENOENT: raise if same: # The new file is identical to the old one, just get rid of the new # one. os.unlink(self.tmp_path) else: # The new file is different from the old one, or there is no old one. # Rename the new file to the permanent name. # # tempfile.mkstemp uses an overly restrictive mode, resulting in a # file that can only be read by the owner, regardless of the umask. # There's no reason to not respect the umask here, which means that # an extra hoop is required to fetch it and reset the new file's mode. # # No way to get the umask without setting a new one? Set a safe one # and then set it back to the old value. umask = os.umask(077) os.umask(umask) os.chmod(self.tmp_path, 0666 & ~umask) if sys.platform == 'win32' and os.path.exists(filename): # NOTE: on windows (but not cygwin) rename will not replace an # existing file, so it must be preceded with a remove. Sadly there # is no way to make the switch atomic. os.remove(filename) os.rename(self.tmp_path, filename) except Exception: # Don't leave turds behind. os.unlink(self.tmp_path) raise return Writer() def EnsureDirExists(path): """Make sure the directory for |path| exists.""" try: os.makedirs(os.path.dirname(path)) except OSError: pass def GetFlavor(params): """Returns |params.flavor| if it's set, the system's default flavor else.""" flavors = { 'cygwin': 'win', 'win32': 'win', 'darwin': 'mac', } if 'flavor' in params: return params['flavor'] if sys.platform in flavors: return flavors[sys.platform] if sys.platform.startswith('sunos'): return 'solaris' if sys.platform.startswith('freebsd'): return 'freebsd' if sys.platform.startswith('openbsd'): return 'openbsd' if sys.platform.startswith('netbsd'): return 'netbsd' if sys.platform.startswith('aix'): return 'aix' return 'linux' def CopyTool(flavor, out_path): """Finds (flock|mac|win)_tool.gyp in the gyp directory and copies it to |out_path|.""" # aix and solaris just need flock emulation. mac and win use more complicated # support scripts. prefix = { 'aix': 'flock', 'solaris': 'flock', 'mac': 'mac', 'win': 'win' }.get(flavor, None) if not prefix: return # Slurp input file. source_path = os.path.join( os.path.dirname(os.path.abspath(__file__)), '%s_tool.py' % prefix) with open(source_path) as source_file: source = source_file.readlines() # Add header and write it out. tool_path = os.path.join(out_path, 'gyp-%s-tool' % prefix) with open(tool_path, 'w') as tool_file: tool_file.write( ''.join([source[0], '# Generated by gyp. Do not edit.\n'] + source[1:])) # Make file executable. os.chmod(tool_path, 0755) # From Alex Martelli, # http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52560 # ASPN: Python Cookbook: Remove duplicates from a sequence # First comment, dated 2001/10/13. # (Also in the printed Python Cookbook.) def uniquer(seq, idfun=None): if idfun is None: idfun = lambda x: x seen = {} result = [] for item in seq: marker = idfun(item) if marker in seen: continue seen[marker] = 1 result.append(item) return result # Based on http://code.activestate.com/recipes/576694/. class OrderedSet(collections.MutableSet): def __init__(self, iterable=None): self.end = end = [] end += [None, end, end] # sentinel node for doubly linked list self.map = {} # key --> [key, prev, next] if iterable is not None: self |= iterable def __len__(self): return len(self.map) def __contains__(self, key): return key in self.map def add(self, key): if key not in self.map: end = self.end curr = end[1] curr[2] = end[1] = self.map[key] = [key, curr, end] def discard(self, key): if key in self.map: key, prev_item, next_item = self.map.pop(key) prev_item[2] = next_item next_item[1] = prev_item def __iter__(self): end = self.end curr = end[2] while curr is not end: yield curr[0] curr = curr[2] def __reversed__(self): end = self.end curr = end[1] while curr is not end: yield curr[0] curr = curr[1] # The second argument is an addition that causes a pylint warning. def pop(self, last=True): # pylint: disable=W0221 if not self: raise KeyError('set is empty') key = self.end[1][0] if last else self.end[2][0] self.discard(key) return key def __repr__(self): if not self: return '%s()' % (self.__class__.__name__,) return '%s(%r)' % (self.__class__.__name__, list(self)) def __eq__(self, other): if isinstance(other, OrderedSet): return len(self) == len(other) and list(self) == list(other) return set(self) == set(other) # Extensions to the recipe. def update(self, iterable): for i in iterable: if i not in self: self.add(i) class CycleError(Exception): """An exception raised when an unexpected cycle is detected.""" def __init__(self, nodes): self.nodes = nodes def __str__(self): return 'CycleError: cycle involving: ' + str(self.nodes) def TopologicallySorted(graph, get_edges): r"""Topologically sort based on a user provided edge definition. Args: graph: A list of node names. get_edges: A function mapping from node name to a hashable collection of node names which this node has outgoing edges to. Returns: A list containing all of the node in graph in topological order. It is assumed that calling get_edges once for each node and caching is cheaper than repeatedly calling get_edges. Raises: CycleError in the event of a cycle. Example: graph = {'a': '$(b) $(c)', 'b': 'hi', 'c': '$(b)'} def GetEdges(node): return re.findall(r'\$\(([^))]\)', graph[node]) print TopologicallySorted(graph.keys(), GetEdges) ==> ['a', 'c', b'] """ get_edges = memoize(get_edges) visited = set() visiting = set() ordered_nodes = [] def Visit(node): if node in visiting: raise CycleError(visiting) if node in visited: return visited.add(node) visiting.add(node) for neighbor in get_edges(node): Visit(neighbor) visiting.remove(node) ordered_nodes.insert(0, node) for node in sorted(graph): Visit(node) return ordered_nodes def CrossCompileRequested(): # TODO: figure out how to not build extra host objects in the # non-cross-compile case when this is enabled, and enable unconditionally. return (os.environ.get('GYP_CROSSCOMPILE') or os.environ.get('AR_host') or os.environ.get('CC_host') or os.environ.get('CXX_host') or os.environ.get('AR_target') or os.environ.get('CC_target') or os.environ.get('CXX_target'))
mit
joannadiong/biosig
biosig/readin.py
1
3033
import os import numpy as np import warnings def read_data(file, channels={}): """ Read in data from a data text file. Data from each channel are stored in columns. Values are tab-separated. The function requires a dictionary of channel keys and values to be specified. Channel columns are zero-indexed. Example: channels = {'force':0, 'emg':1, 'distance':2} data = read_data('V_L.txt', channel=channels) :param file: file name :type file: str :param channels: dictionary of channel keys and values :type channels: dict :return: dictionary of channel keys and values :rtype: dict """ infile = open(file, 'r') lines = infile.readlines() infile.close() data_list = [row.strip().split('\t') for row in lines] data_dict = {} if channels: for k,v in channels.items(): data_dict[k] = np.array([float(row[v]) for row in data_list]) # update dictionary to return free variables in function block using keys locals().update(data_dict) return data_dict else: warnings.warn('Dictionary of channel keys and values not specified') pass def make_time(freq, var): """ Create time (sec) based on sampling rate. :param freq: sampling rate (Hz) :type freq: int :param var: channel values :type var: ndarray :return: time (sec) :rtype: ndarray """ step = 1 / freq time = np.arange(0, len(var)/freq, step) return time def read_log(file): """ Template to read in data from a log text file. This function will require editing to customise output for each experiment. :param file: file name :type file: str :return: values from log file :rtype: int or float """ infile = open(file, 'r') lines = infile.readlines() infile.close() for row in lines: var = row.strip().split(' ') if var[0]=='subject' and var[1]=='number': id = var[2] elif var[0]=='transducer' and var[1]=='1' and var[2]=='calibration:': scale1 = float(var[3]) elif var[0]=='transducer' and var[1]=='2' and var[2]=='calibration:': scale2 = float(var[3]) elif var[0]=='sampling' and var[1]=='rate:': freq = int(var[2]) elif var[0]=='age:': age = int(var[1]) elif var[0]=='sex:': sex = var[1] elif var[0]=='height:': height = float(var[1]) # meters elif var[0]=='weight:': weight = float(var[1]) # kg return id, scale1, scale2, freq, age, sex, height, weight def calibrate(data, scale, offset): """ Remove offset and calibrate raw voltage to meaningful values. :param data: uncalibrated data :type data: ndarray :param scale: calibration scale :type scale: float :param offset: calibration offset :type offset: float :return: calibrated data :rtype: ndarray """ data = (data - offset) * scale return data
gpl-3.0
xiandiancloud/edxplaltfom-xusong
cms/djangoapps/contentstore/tests/test_import_draft_order.py
13
2584
from xmodule.modulestore.xml_importer import import_from_xml from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase from xmodule.modulestore.django import modulestore # This test is in the CMS module because the test configuration to use a draft # modulestore is dependent on django. class DraftReorderTestCase(ModuleStoreTestCase): def test_order(self): store = modulestore() course_items = import_from_xml(store, self.user.id, 'common/test/data/', ['import_draft_order']) course_key = course_items[0].id sequential = store.get_item(course_key.make_usage_key('sequential', '0f4f7649b10141b0bdc9922dcf94515a')) verticals = sequential.children # The order that files are read in from the file system is not guaranteed (cannot rely on # alphabetical ordering, for example). Therefore, I have added a lot of variation in filename and desired # ordering so that the test reliably failed with the bug, at least on Linux. # # 'a', 'b', 'c', 'd', and 'z' are all drafts, with 'index_in_children_list' of # 2 , 4 , 6 , 5 , and 0 respectively. # # '5a05be9d59fc4bb79282c94c9e6b88c7' and 'second' are public verticals. self.assertEqual(7, len(verticals)) self.assertEqual(course_key.make_usage_key('vertical', 'z'), verticals[0]) self.assertEqual(course_key.make_usage_key('vertical', '5a05be9d59fc4bb79282c94c9e6b88c7'), verticals[1]) self.assertEqual(course_key.make_usage_key('vertical', 'a'), verticals[2]) self.assertEqual(course_key.make_usage_key('vertical', 'second'), verticals[3]) self.assertEqual(course_key.make_usage_key('vertical', 'b'), verticals[4]) self.assertEqual(course_key.make_usage_key('vertical', 'd'), verticals[5]) self.assertEqual(course_key.make_usage_key('vertical', 'c'), verticals[6]) # Now also test that the verticals in a second sequential are correct. sequential = store.get_item(course_key.make_usage_key('sequential', 'secondseq')) verticals = sequential.children # 'asecond' and 'zsecond' are drafts with 'index_in_children_list' 0 and 2, respectively. # 'secondsubsection' is a public vertical. self.assertEqual(3, len(verticals)) self.assertEqual(course_key.make_usage_key('vertical', 'asecond'), verticals[0]) self.assertEqual(course_key.make_usage_key('vertical', 'secondsubsection'), verticals[1]) self.assertEqual(course_key.make_usage_key('vertical', 'zsecond'), verticals[2])
agpl-3.0
JuanbingTeam/djangobbs
djangobbs/accounts/views.py
1
3219
#!/usr/bin/env python #coding=utf-8 from django.shortcuts import render_to_response from django.contrib.auth.decorators import login_required from django.contrib.auth.models import User from django.http import HttpResponse, HttpResponseRedirect from django.contrib import auth from django.core.mail import send_mail from cStringIO import StringIO import Image, ImageDraw from djangobbs.accounts.models import * from djangobbs.accounts.forms import * from djangobbs.accounts.config import * from djangobbs.accounts import tools def index(request, id): data = {'user' : request.user } try: data['profile'] = request.user.get_profile() except UserProfile.DoesNotExist: pass return render_to_response('accounts/user.html', data) def login(request, template = 'accounts/login.html'): if request.session.has_key('validation') and request.method == 'POST': form = LoginForm(request.POST) form.code = request.session['validation'] request.session['validation'] = "" form.hasCookie = request.session.test_cookie_worked() if form.is_valid(): form.cleaned_data['user'].user = tools.authenticate(username=form.cleaned_data['user'].user.username, password=form.cleaned_data['password']) tools.login(request, form.cleaned_data['user']) if not form.cleaned_data['saveLogin']: request.session.set_expiry(0) if request.REQUEST.has_key('next'): next = request.REQUEST['next'] else: next = '/' return HttpResponseRedirect(next) else: form = LoginForm() request.session.set_test_cookie() return render_to_response(template, {'form': form}) def logout(request, template = 'accounts/logout.html'): tools.logout(request) return render_to_response(template, request) def register(request, autoActive=True): if not request.POST.has_key('accept_eula'): return render_to_response('accounts/eula.html', {'request':request}) form = RegisterForm(request.POST) form.code = request.session['validation'] request.session['validation'] = "" form.hasCookie = request.session.test_cookie_worked() if form.is_valid(): return HttpResponseRedirect(next) else: form = RegisterForm() request.session.set_test_cookie() return render_to_response('', {'form': form} ) @login_required def password(request): pass def resetPassword(request): pass def validate(request, length = 5): password = User.objects.make_random_password(length, allowed_chars='ABCDEFGHJKLMNPQRUSTUVWXY') request.session['validation'] = password img = VALIDATE_IMAGE_BACK_GROUND.copy() draw = ImageDraw.Draw(img) size = draw.textsize(password, font = VALIDATE_IMAGE_FONT) draw.text((0, 0), password, fill=VALIDATE_IMAGE_FORE_GROUND, font = VALIDATE_IMAGE_FONT) img = img.crop((0, 0, size[0], size[1])) buf = StringIO() img.save(buf, "jpeg") result = HttpResponse(mimetype='Image/jpeg') result.write(buf.getvalue()) buf.close() return result
apache-2.0
davidsminor/cortex
test/IECore/AttributeStateTest.py
12
2719
########################################################################## # # Copyright (c) 2008-2011, Image Engine Design Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # * Neither the name of Image Engine Design nor the names of any # other contributors to this software may be used to endorse or # promote products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ########################################################################## import os import unittest import IECore class AttributeStateTest( unittest.TestCase ) : def testCopy( self ) : a = IECore.AttributeState() a.attributes["test"] = IECore.FloatData( 10 ) aa = a.copy() self.assertEqual( a, aa ) def testConstructFromDict( self ) : a = IECore.AttributeState( { "a" : IECore.StringData( "a" ), "b" : IECore.IntData( 10 ), } ) self.assertEqual( len( a.attributes ), 2 ) self.assertEqual( a.attributes["a"], IECore.StringData( "a" ) ) self.assertEqual( a.attributes["b"], IECore.IntData( 10 ) ) def testHash( self ) : a1 = IECore.AttributeState() a2 = IECore.AttributeState() self.assertEqual( a1.hash(), a2.hash() ) a1.attributes["a"] = IECore.StringData( "a" ) self.assertNotEqual( a1.hash(), a2.hash() ) a2.attributes["a"] = IECore.StringData( "a" ) self.assertEqual( a1.hash(), a2.hash() ) if __name__ == "__main__": unittest.main()
bsd-3-clause
phobson/statsmodels
statsmodels/emplike/elanova.py
26
3734
""" This script contains empirical likelihood ANOVA. Currently the script only contains one feature that allows the user to compare means of multiple groups General References ------------------ Owen, A. B. (2001). Empirical Likelihood. Chapman and Hall. """ from __future__ import division from statsmodels.compat.python import range import numpy as np from .descriptive import _OptFuncts from scipy import optimize from scipy.stats import chi2 class _ANOVAOpt(_OptFuncts): """ Class containing functions that are optimized over when conducting ANOVA """ def _opt_common_mu(self, mu): """ Optimizes the likelihood under the null hypothesis that all groups have mean mu Parameters ---------- mu : float The common mean Returns ------- llr : float -2 times the llr ratio, which is the test statistic """ nobs = self.nobs endog = self.endog num_groups = self.num_groups endog_asarray = np.zeros((nobs, num_groups)) obs_num = 0 for arr_num in range(len(endog)): new_obs_num = obs_num + len(endog[arr_num]) endog_asarray[obs_num: new_obs_num, arr_num] = endog[arr_num] - \ mu obs_num = new_obs_num est_vect = endog_asarray wts = np.ones(est_vect.shape[0]) * (1. / (est_vect.shape[0])) eta_star = self._modif_newton(np.zeros(num_groups), est_vect, wts) denom = 1. + np.dot(eta_star, est_vect.T) self.new_weights = 1. / nobs * 1. / denom llr = np.sum(np.log(nobs * self.new_weights)) return -2 * llr class ANOVA(_ANOVAOpt): """ A class for ANOVA and comparing means. Parameters ---------- endog : list of arrays endog should be a list containing 1 dimensional arrays. Each array is the data collected from a certain group. """ def __init__(self, endog): self.endog = endog self.num_groups = len(self.endog) self.nobs = 0 for i in self.endog: self.nobs = self.nobs + len(i) def compute_ANOVA(self, mu=None, mu_start=0, return_weights=0): """ Returns -2 log likelihood, the pvalue and the maximum likelihood estimate for a common mean. Parameters ---------- mu : float If a mu is specified, ANOVA is conducted with mu as the common mean. Otherwise, the common mean is the maximum empirical likelihood estimate of the common mean. Default is None. mu_start : float Starting value for commean mean if specific mu is not specified. Default = 0 return_weights : bool if TRUE, returns the weights on observations that maximize the likelihood. Default is FALSE Returns ------- res: tuple The log-likelihood, p-value and estimate for the common mean. """ if mu is not None: llr = self._opt_common_mu(mu) pval = 1 - chi2.cdf(llr, self.num_groups - 1) if return_weights: return llr, pval, mu, self.new_weights else: return llr, pval, mu else: res = optimize.fmin_powell(self._opt_common_mu, mu_start, full_output=1, disp=False) llr = res[1] mu_common = float(res[0]) pval = 1 - chi2.cdf(llr, self.num_groups - 1) if return_weights: return llr, pval, mu_common, self.new_weights else: return llr, pval, mu_common
bsd-3-clause
lllucius/climacast
aniso8601/interval.py
5
5596
# -*- coding: utf-8 -*- # Copyright (c) 2016, Brandon Nielsen # All rights reserved. # # This software may be modified and distributed under the terms # of the BSD license. See the LICENSE file for details. from datetime import datetime from aniso8601.duration import parse_duration from aniso8601.time import parse_datetime from aniso8601.date import parse_date def parse_interval(isointervalstr, intervaldelimiter='/', datetimedelimiter='T', relative=False): #Given a string representing an ISO 8601 interval, return a #tuple of datetime.date or date.datetime objects representing the beginning #and end of the specified interval. Valid formats are: # #<start>/<end> #<start>/<duration> #<duration>/<end> # #The <start> and <end> values can represent dates, or datetimes, #not times. # #The format: # #<duration> # #Is expressly not supported as there is no way to provide the addtional #required context. firstpart, secondpart = isointervalstr.split(intervaldelimiter) if firstpart[0] == 'P': #<duration>/<end> #Notice that these are not returned 'in order' (earlier to later), this #is to maintain consistency with parsing <start>/<end> durations, as #well as making repeating interval code cleaner. Users who desire #durations to be in order can use the 'sorted' operator. #We need to figure out if <end> is a date, or a datetime if secondpart.find(datetimedelimiter) != -1: #<end> is a datetime duration = parse_duration(firstpart, relative=relative) enddatetime = parse_datetime(secondpart, delimiter=datetimedelimiter) return (enddatetime, enddatetime - duration) else: #<end> must just be a date duration = parse_duration(firstpart, relative=relative) enddate = parse_date(secondpart) #See if we need to upconvert to datetime to preserve resolution if firstpart.find(datetimedelimiter) != -1: return (enddate, datetime.combine(enddate, datetime.min.time()) - duration) else: return (enddate, enddate - duration) elif secondpart[0] == 'P': #<start>/<duration> #We need to figure out if <start> is a date, or a datetime if firstpart.find(datetimedelimiter) != -1: #<end> is a datetime duration = parse_duration(secondpart, relative=relative) startdatetime = parse_datetime(firstpart, delimiter=datetimedelimiter) return (startdatetime, startdatetime + duration) else: #<start> must just be a date duration = parse_duration(secondpart, relative=relative) startdate = parse_date(firstpart) #See if we need to upconvert to datetime to preserve resolution if secondpart.find(datetimedelimiter) != -1: return (startdate, datetime.combine(startdate, datetime.min.time()) + duration) else: return (startdate, startdate + duration) else: #<start>/<end> if firstpart.find(datetimedelimiter) != -1 and secondpart.find(datetimedelimiter) != -1: #Both parts are datetimes return (parse_datetime(firstpart, delimiter=datetimedelimiter), parse_datetime(secondpart, delimiter=datetimedelimiter)) elif firstpart.find(datetimedelimiter) != -1 and secondpart.find(datetimedelimiter) == -1: #First part is a datetime, second part is a date return (parse_datetime(firstpart, delimiter=datetimedelimiter), parse_date(secondpart)) elif firstpart.find(datetimedelimiter) == -1 and secondpart.find(datetimedelimiter) != -1: #First part is a date, second part is a datetime return (parse_date(firstpart), parse_datetime(secondpart, delimiter=datetimedelimiter)) else: #Both parts are dates return (parse_date(firstpart), parse_date(secondpart)) def parse_repeating_interval(isointervalstr, intervaldelimiter='/', datetimedelimiter='T', relative=False): #Given a string representing an ISO 8601 interval repating, return a #generator of datetime.date or date.datetime objects representing the #dates specified by the repeating interval. Valid formats are: # #Rnn/<interval> #R/<interval> if isointervalstr[0] != 'R': raise ValueError('ISO 8601 repeating interval must start with an R.') #Parse the number of iterations iterationpart, intervalpart = isointervalstr.split(intervaldelimiter, 1) if len(iterationpart) > 1: iterations = int(iterationpart[1:]) else: iterations = None interval = parse_interval(intervalpart, intervaldelimiter, datetimedelimiter, relative=relative) intervaltimedelta = interval[1] - interval[0] #Now, build and return the generator if iterations != None: return _date_generator(interval[0], intervaltimedelta, iterations) else: return _date_generator_unbounded(interval[0], intervaltimedelta) def _date_generator(startdate, timedelta, iterations): currentdate = startdate currentiteration = 0 while currentiteration < iterations: yield currentdate #Update the values currentdate += timedelta currentiteration += 1 def _date_generator_unbounded(startdate, timedelta): currentdate = startdate while True: yield currentdate #Update the value currentdate += timedelta
agpl-3.0
qedi-r/home-assistant
homeassistant/components/unifi/__init__.py
2
4420
"""Support for devices connected to UniFi POE.""" import voluptuous as vol from homeassistant.const import CONF_HOST from homeassistant.core import callback from homeassistant.helpers.device_registry import CONNECTION_NETWORK_MAC import homeassistant.helpers.config_validation as cv from .config_flow import get_controller_id_from_config_entry from .const import ( ATTR_MANUFACTURER, CONF_BLOCK_CLIENT, CONF_DETECTION_TIME, CONF_DONT_TRACK_CLIENTS, CONF_DONT_TRACK_DEVICES, CONF_DONT_TRACK_WIRED_CLIENTS, CONF_SITE_ID, CONF_SSID_FILTER, DOMAIN, UNIFI_CONFIG, UNIFI_WIRELESS_CLIENTS, ) from .controller import UniFiController SAVE_DELAY = 10 STORAGE_KEY = "unifi_data" STORAGE_VERSION = 1 CONF_CONTROLLERS = "controllers" CONTROLLER_SCHEMA = vol.Schema( { vol.Required(CONF_HOST): cv.string, vol.Required(CONF_SITE_ID): cv.string, vol.Optional(CONF_BLOCK_CLIENT, default=[]): vol.All( cv.ensure_list, [cv.string] ), vol.Optional(CONF_DONT_TRACK_CLIENTS): cv.boolean, vol.Optional(CONF_DONT_TRACK_DEVICES): cv.boolean, vol.Optional(CONF_DONT_TRACK_WIRED_CLIENTS): cv.boolean, vol.Optional(CONF_DETECTION_TIME): cv.positive_int, vol.Optional(CONF_SSID_FILTER): vol.All(cv.ensure_list, [cv.string]), } ) CONFIG_SCHEMA = vol.Schema( { DOMAIN: vol.Schema( { vol.Required(CONF_CONTROLLERS): vol.All( cv.ensure_list, [CONTROLLER_SCHEMA] ) } ) }, extra=vol.ALLOW_EXTRA, ) async def async_setup(hass, config): """Component doesn't support configuration through configuration.yaml.""" hass.data[UNIFI_CONFIG] = [] if DOMAIN in config: hass.data[UNIFI_CONFIG] = config[DOMAIN][CONF_CONTROLLERS] hass.data[UNIFI_WIRELESS_CLIENTS] = wireless_clients = UnifiWirelessClients(hass) await wireless_clients.async_load() return True async def async_setup_entry(hass, config_entry): """Set up the UniFi component.""" if DOMAIN not in hass.data: hass.data[DOMAIN] = {} controller = UniFiController(hass, config_entry) if not await controller.async_setup(): return False controller_id = get_controller_id_from_config_entry(config_entry) hass.data[DOMAIN][controller_id] = controller if controller.mac is None: return True device_registry = await hass.helpers.device_registry.async_get_registry() device_registry.async_get_or_create( config_entry_id=config_entry.entry_id, connections={(CONNECTION_NETWORK_MAC, controller.mac)}, manufacturer=ATTR_MANUFACTURER, model="UniFi Controller", name="UniFi Controller", # sw_version=config.raw['swversion'], ) return True async def async_unload_entry(hass, config_entry): """Unload a config entry.""" controller_id = get_controller_id_from_config_entry(config_entry) controller = hass.data[DOMAIN].pop(controller_id) return await controller.async_reset() class UnifiWirelessClients: """Class to store clients known to be wireless. This is needed since wireless devices going offline might get marked as wired by UniFi. """ def __init__(self, hass): """Set up client storage.""" self.hass = hass self.data = {} self._store = hass.helpers.storage.Store(STORAGE_VERSION, STORAGE_KEY) async def async_load(self): """Load data from file.""" data = await self._store.async_load() if data is not None: self.data = data @callback def get_data(self, config_entry): """Get data related to a specific controller.""" controller_id = get_controller_id_from_config_entry(config_entry) data = self.data.get(controller_id, {"wireless_devices": []}) return set(data["wireless_devices"]) @callback def update_data(self, data, config_entry): """Update data and schedule to save to file.""" controller_id = get_controller_id_from_config_entry(config_entry) self.data[controller_id] = {"wireless_devices": list(data)} self._store.async_delay_save(self._data_to_save, SAVE_DELAY) @callback def _data_to_save(self): """Return data of UniFi wireless clients to store in a file.""" return self.data
apache-2.0
Stefanos19/prpy
src/prpy/planning/openrave.py
1
6066
#!/usr/bin/env python # Copyright (c) 2013, Carnegie Mellon University # All rights reserved. # Authors: Siddhartha Srinivasa <siddh@cs.cmu.edu> # Michael Koval <mkoval@cs.cmu.edu> # Pras Velagapudi <mkoval@cs.cmu.edu> # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # - Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # - Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # - Neither the name of Carnegie Mellon University nor the names of its # contributors may be used to endorse or promote products derived from this # software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. import numpy import openravepy from base import (BasePlanner, PlanningError, UnsupportedPlanningError, ClonedPlanningMethod) class OpenRAVEPlanner(BasePlanner): def __init__(self, algorithm='birrt'): super(OpenRAVEPlanner, self).__init__() self.setup = False self.algorithm = algorithm try: self.planner = openravepy.RaveCreatePlanner(self.env, algorithm) except openravepy.openrave_exception: raise UnsupportedPlanningError('Unable to create {:s} module.' .format(str(self))) def __str__(self): return 'OpenRAVE {0:s}'.format(self.algorithm) @ClonedPlanningMethod def PlanToConfiguration(self, robot, goal, **kw_args): """ Plan to a desired configuration with OpenRAVE. This will invoke the OpenRAVE planner specified in the OpenRAVEPlanner constructor. @param robot the robot whose active DOFs will be used @param goal the desired robot joint configuration @return traj a trajectory from current configuration to specified goal """ return self._Plan(robot, goal, **kw_args) def _Plan(self, robot, goals, maxiter=500, continue_planner=False, or_args=None, **kw_args): # Get rid of default postprocessing extraParams = ('<_postprocessing planner="">' '<_nmaxiterations>0</_nmaxiterations>' '</_postprocessing>') # Maximum planner iterations extraParams += ('<_nmaxiterations>{:d}</_nmaxiterations>' .format(maxiter)) if or_args is not None: for key, value in or_args.iteritems(): extraParams += '<{k:s}>{v:s}</{k:s}>'.format(k=str(key), v=str(value)) params = openravepy.Planner.PlannerParameters() params.SetRobotActiveJoints(robot) params.SetGoalConfig(goals) params.SetExtraParameters(extraParams) traj = openravepy.RaveCreateTrajectory(self.env, 'GenericTrajectory') try: self.env.Lock() # Plan. if (not continue_planner) or not self.setup: self.planner.InitPlan(robot, params) self.setup = True status = self.planner.PlanPath(traj, releasegil=True) from openravepy import PlannerStatus if status not in [PlannerStatus.HasSolution, PlannerStatus.InterruptedWithSolution]: raise PlanningError('Planner returned with status {:s}.' .format(str(status))) except Exception as e: raise PlanningError('Planning failed with error: {:s}'.format(e)) finally: self.env.Unlock() return traj class BiRRTPlanner(OpenRAVEPlanner): def __init__(self): OpenRAVEPlanner.__init__(self, algorithm='birrt') @ClonedPlanningMethod def PlanToConfiguration(self, robot, goal, **kw_args): """ Plan to a desired configuration with OpenRAVE. This will invoke the OpenRAVE planner specified in the OpenRAVEPlanner constructor. @param robot the robot whose active DOFs will be used @param goal the desired robot joint configuration @return traj a trajectory from current configuration to specified goal """ return self._Plan(robot, goal, **kw_args) @ClonedPlanningMethod def PlanToConfigurations(self, robot, goals, **kw_args): """ Plan to one of many configuration with OpenRAVE's BiRRT planner. @param robot the robot whose active DOFs will be used @param goals a list of desired robot joint configurations @return traj trajectory from current configuration to one of the goals """ if len(goals[0]) != len(robot.GetActiveDOFIndices()): raise ValueError('Goals must be same length as robot active DOFs.') # Serialize list of goals into a single 1D vector # (This will raise ValueError if the goals are not equal length.) goals = numpy.ravel(numpy.vstack(goals)) return self._Plan(robot, goals, **kw_args)
bsd-3-clause
Ritsyy/fjord
vendor/src/html5lib-python/html5lib/tests/tokenizertotree.py
4
1965
from __future__ import absolute_import, division, unicode_literals import sys import os import json import re import html5lib from . import support from . import test_tokenizer p = html5lib.HTMLParser() unnamespaceExpected = re.compile(r'^(\|\s*)<html ([^>]+)>', re.M).sub def main(out_path): if not os.path.exists(out_path): sys.stderr.write('Path %s does not exist' % out_path) sys.exit(1) for filename in support.get_data_files('tokenizer', '*.test'): run_file(filename, out_path) def run_file(filename, out_path): try: tests_data = json.load(open(filename, 'r')) except ValueError: sys.stderr.write('Failed to load %s\n' % filename) return name = os.path.splitext(os.path.split(filename)[1])[0] output_file = open(os.path.join(out_path, 'tokenizer_%s.dat' % name), 'w') if 'tests' in tests_data: for test_data in tests_data['tests']: if 'initialStates' not in test_data: test_data['initialStates'] = ['Data state'] for initial_state in test_data['initialStates']: if initial_state != 'Data state': # don't support this yet continue test = make_test(test_data) output_file.write(test) output_file.close() def make_test(test_data): if 'doubleEscaped' in test_data: test_data = test_tokenizer.unescape_test(test_data) rv = [] rv.append('#data') rv.append(test_data['input'].encode('utf8')) rv.append('#errors') tree = p.parse(test_data['input']) output = p.tree.testSerializer(tree) output = '\n'.join(('| ' + line[3:]) if line.startswith('| ') else line for line in output.split('\n')) output = unnamespaceExpected(r'\1<\2>', output) rv.append(output.encode('utf8')) rv.append('') return '\n'.join(rv) if __name__ == '__main__': main(sys.argv[1])
bsd-3-clause
fernandog/Sick-Beard
cherrypy/lib/caching.py
35
15405
import datetime import threading import time import cherrypy from cherrypy.lib import cptools, httputil class Cache(object): def get(self): raise NotImplemented def put(self, obj, size): raise NotImplemented def delete(self): raise NotImplemented def clear(self): raise NotImplemented # ------------------------------- Memory Cache ------------------------------- # class AntiStampedeCache(dict): def wait(self, key, timeout=5, debug=False): """Return the cached value for the given key, or None. If timeout is not None (the default), and the value is already being calculated by another thread, wait until the given timeout has elapsed. If the value is available before the timeout expires, it is returned. If not, None is returned, and a sentinel placed in the cache to signal other threads to wait. If timeout is None, no waiting is performed nor sentinels used. """ value = self.get(key) if isinstance(value, threading._Event): if timeout is None: # Ignore the other thread and recalc it ourselves. if debug: cherrypy.log('No timeout', 'TOOLS.CACHING') return None # Wait until it's done or times out. if debug: cherrypy.log('Waiting up to %s seconds' % timeout, 'TOOLS.CACHING') value.wait(timeout) if value.result is not None: # The other thread finished its calculation. Use it. if debug: cherrypy.log('Result!', 'TOOLS.CACHING') return value.result # Timed out. Stick an Event in the slot so other threads wait # on this one to finish calculating the value. if debug: cherrypy.log('Timed out', 'TOOLS.CACHING') e = threading.Event() e.result = None dict.__setitem__(self, key, e) return None elif value is None: # Stick an Event in the slot so other threads wait # on this one to finish calculating the value. if debug: cherrypy.log('Timed out', 'TOOLS.CACHING') e = threading.Event() e.result = None dict.__setitem__(self, key, e) return value def __setitem__(self, key, value): """Set the cached value for the given key.""" existing = self.get(key) dict.__setitem__(self, key, value) if isinstance(existing, threading._Event): # Set Event.result so other threads waiting on it have # immediate access without needing to poll the cache again. existing.result = value existing.set() class MemoryCache(Cache): """An in-memory cache for varying response content. Each key in self.store is a URI, and each value is an AntiStampedeCache. The response for any given URI may vary based on the values of "selecting request headers"; that is, those named in the Vary response header. We assume the list of header names to be constant for each URI throughout the lifetime of the application, and store that list in self.store[uri].selecting_headers. The items contained in self.store[uri] have keys which are tuples of request header values (in the same order as the names in its selecting_headers), and values which are the actual responses. """ maxobjects = 1000 maxobj_size = 100000 maxsize = 10000000 delay = 600 antistampede_timeout = 5 expire_freq = 0.1 debug = False def __init__(self): self.clear() # Run self.expire_cache in a separate daemon thread. t = threading.Thread(target=self.expire_cache, name='expire_cache') self.expiration_thread = t if hasattr(threading.Thread, "daemon"): # Python 2.6+ t.daemon = True else: t.setDaemon(True) t.start() def clear(self): """Reset the cache to its initial, empty state.""" self.store = {} self.expirations = {} self.tot_puts = 0 self.tot_gets = 0 self.tot_hist = 0 self.tot_expires = 0 self.tot_non_modified = 0 self.cursize = 0 def expire_cache(self): # expire_cache runs in a separate thread which the servers are # not aware of. It's possible that "time" will be set to None # arbitrarily, so we check "while time" to avoid exceptions. # See tickets #99 and #180 for more information. while time: now = time.time() # Must make a copy of expirations so it doesn't change size # during iteration for expiration_time, objects in self.expirations.items(): if expiration_time <= now: for obj_size, uri, sel_header_values in objects: try: del self.store[uri][sel_header_values] self.tot_expires += 1 self.cursize -= obj_size except KeyError: # the key may have been deleted elsewhere pass del self.expirations[expiration_time] time.sleep(self.expire_freq) def get(self): """Return the current variant if in the cache, else None.""" request = cherrypy.serving.request self.tot_gets += 1 uri = cherrypy.url(qs=request.query_string) uricache = self.store.get(uri) if uricache is None: return None header_values = [request.headers.get(h, '') for h in uricache.selecting_headers] header_values.sort() variant = uricache.wait(key=tuple(header_values), timeout=self.antistampede_timeout, debug=self.debug) if variant is not None: self.tot_hist += 1 return variant def put(self, variant, size): """Store the current variant in the cache.""" request = cherrypy.serving.request response = cherrypy.serving.response uri = cherrypy.url(qs=request.query_string) uricache = self.store.get(uri) if uricache is None: uricache = AntiStampedeCache() uricache.selecting_headers = [ e.value for e in response.headers.elements('Vary')] self.store[uri] = uricache if len(self.store) < self.maxobjects: total_size = self.cursize + size # checks if there's space for the object if (size < self.maxobj_size and total_size < self.maxsize): # add to the expirations list expiration_time = response.time + self.delay bucket = self.expirations.setdefault(expiration_time, []) bucket.append((size, uri, uricache.selecting_headers)) # add to the cache header_values = [request.headers.get(h, '') for h in uricache.selecting_headers] header_values.sort() uricache[tuple(header_values)] = variant self.tot_puts += 1 self.cursize = total_size def delete(self): """Remove ALL cached variants of the current resource.""" uri = cherrypy.url(qs=cherrypy.serving.request.query_string) self.store.pop(uri, None) def get(invalid_methods=("POST", "PUT", "DELETE"), debug=False, **kwargs): """Try to obtain cached output. If fresh enough, raise HTTPError(304). If POST, PUT, or DELETE: * invalidates (deletes) any cached response for this resource * sets request.cached = False * sets request.cacheable = False else if a cached copy exists: * sets request.cached = True * sets request.cacheable = False * sets response.headers to the cached values * checks the cached Last-Modified response header against the current If-(Un)Modified-Since request headers; raises 304 if necessary. * sets response.status and response.body to the cached values * returns True otherwise: * sets request.cached = False * sets request.cacheable = True * returns False """ request = cherrypy.serving.request response = cherrypy.serving.response if not hasattr(cherrypy, "_cache"): # Make a process-wide Cache object. cherrypy._cache = kwargs.pop("cache_class", MemoryCache)() # Take all remaining kwargs and set them on the Cache object. for k, v in kwargs.items(): setattr(cherrypy._cache, k, v) cherrypy._cache.debug = debug # POST, PUT, DELETE should invalidate (delete) the cached copy. # See http://www.w3.org/Protocols/rfc2616/rfc2616-sec13.html#sec13.10. if request.method in invalid_methods: if debug: cherrypy.log('request.method %r in invalid_methods %r' % (request.method, invalid_methods), 'TOOLS.CACHING') cherrypy._cache.delete() request.cached = False request.cacheable = False return False if 'no-cache' in [e.value for e in request.headers.elements('Pragma')]: request.cached = False request.cacheable = True return False cache_data = cherrypy._cache.get() request.cached = bool(cache_data) request.cacheable = not request.cached if request.cached: # Serve the cached copy. max_age = cherrypy._cache.delay for v in [e.value for e in request.headers.elements('Cache-Control')]: atoms = v.split('=', 1) directive = atoms.pop(0) if directive == 'max-age': if len(atoms) != 1 or not atoms[0].isdigit(): raise cherrypy.HTTPError(400, "Invalid Cache-Control header") max_age = int(atoms[0]) break elif directive == 'no-cache': if debug: cherrypy.log('Ignoring cache due to Cache-Control: no-cache', 'TOOLS.CACHING') request.cached = False request.cacheable = True return False if debug: cherrypy.log('Reading response from cache', 'TOOLS.CACHING') s, h, b, create_time = cache_data age = int(response.time - create_time) if (age > max_age): if debug: cherrypy.log('Ignoring cache due to age > %d' % max_age, 'TOOLS.CACHING') request.cached = False request.cacheable = True return False # Copy the response headers. See http://www.cherrypy.org/ticket/721. response.headers = rh = httputil.HeaderMap() for k in h: dict.__setitem__(rh, k, dict.__getitem__(h, k)) # Add the required Age header response.headers["Age"] = str(age) try: # Note that validate_since depends on a Last-Modified header; # this was put into the cached copy, and should have been # resurrected just above (response.headers = cache_data[1]). cptools.validate_since() except cherrypy.HTTPRedirect, x: if x.status == 304: cherrypy._cache.tot_non_modified += 1 raise # serve it & get out from the request response.status = s response.body = b else: if debug: cherrypy.log('request is not cached', 'TOOLS.CACHING') return request.cached def tee_output(): request = cherrypy.serving.request if 'no-store' in request.headers.values('Cache-Control'): return def tee(body): """Tee response.body into a list.""" if ('no-cache' in response.headers.values('Pragma') or 'no-store' in response.headers.values('Cache-Control')): for chunk in body: yield chunk return output = [] for chunk in body: output.append(chunk) yield chunk # save the cache data body = ''.join(output) cherrypy._cache.put((response.status, response.headers or {}, body, response.time), len(body)) response = cherrypy.serving.response response.body = tee(response.body) def expires(secs=0, force=False, debug=False): """Tool for influencing cache mechanisms using the 'Expires' header. 'secs' must be either an int or a datetime.timedelta, and indicates the number of seconds between response.time and when the response should expire. The 'Expires' header will be set to (response.time + secs). If 'secs' is zero, the 'Expires' header is set one year in the past, and the following "cache prevention" headers are also set: 'Pragma': 'no-cache' 'Cache-Control': 'no-cache, must-revalidate' If 'force' is False (the default), the following headers are checked: 'Etag', 'Last-Modified', 'Age', 'Expires'. If any are already present, none of the above response headers are set. """ response = cherrypy.serving.response headers = response.headers cacheable = False if not force: # some header names that indicate that the response can be cached for indicator in ('Etag', 'Last-Modified', 'Age', 'Expires'): if indicator in headers: cacheable = True break if not cacheable and not force: if debug: cherrypy.log('request is not cacheable', 'TOOLS.EXPIRES') else: if debug: cherrypy.log('request is cacheable', 'TOOLS.EXPIRES') if isinstance(secs, datetime.timedelta): secs = (86400 * secs.days) + secs.seconds if secs == 0: if force or ("Pragma" not in headers): headers["Pragma"] = "no-cache" if cherrypy.serving.request.protocol >= (1, 1): if force or "Cache-Control" not in headers: headers["Cache-Control"] = "no-cache, must-revalidate" # Set an explicit Expires date in the past. expiry = httputil.HTTPDate(1169942400.0) else: expiry = httputil.HTTPDate(response.time + secs) if force or "Expires" not in headers: headers["Expires"] = expiry
gpl-3.0
hanks-zyh/PythonBlog
www/models.py
1
1467
#!/usr/bin/env python3 # -*- coding: utf-8 -*- __author__ = 'Hanks' 'model 对应的数据库中的表' import time, uuid from orm import Model, StringField, BooleanField, FloatField, TextField def next_id(): return '%015d%s000' % (int(time.time() * 1000), uuid.uuid4().hex) class User(Model): __table__ = 'users' id = StringField(primary_key=True, default=next_id, ddl='varchar(50)') email = StringField(ddl='varchar(50)') password = StringField(ddl='varchar(50)') admin = BooleanField() name = StringField(ddl='varchar(50)') image = StringField(ddl='varchar(500)') created_at = FloatField(default=time.time) class Blog(Model): __table__ = 'blogs' id = StringField(primary_key=True, default=next_id, ddl='varchar(50)') user_id = StringField(ddl='varchar(50)') user_name = StringField(ddl='varchar(50)') user_image = StringField(ddl='varchar(500)') name = StringField(ddl='varchar(50)') summary = StringField(ddl='varchar(200)') content = TextField() created_at = FloatField(default=time.time) class Comment(Model): __table__ = 'comments' id = StringField(primary_key=True, default=next_id, ddl='varchar(50)') blog_id = StringField(ddl='varchar(50)') user_id = StringField(ddl='varchar(50)') user_name = StringField(ddl='varchar(50)') user_image = StringField(ddl='varchar(500)') content = TextField() created_at = FloatField(default=time.time)
apache-2.0
timopulkkinen/BubbleFish
build/mac/find_sdk.py
67
2958
#!/usr/bin/env python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import os import re import subprocess import sys """Prints the lowest locally available SDK version greater than or equal to a given minimum sdk version to standard output. Usage: python find_sdk.py 10.6 # Ignores SDKs < 10.6 """ from optparse import OptionParser def parse_version(version_str): """'10.6' => [10, 6]""" return map(int, re.findall(r'(\d+)', version_str)) def main(): parser = OptionParser() parser.add_option("--verify", action="store_true", dest="verify", default=False, help="return the sdk argument and warn if it doesn't exist") parser.add_option("--sdk_path", action="store", type="string", dest="sdk_path", default="", help="user-specified SDK path; bypasses verification") (options, args) = parser.parse_args() min_sdk_version = args[0] job = subprocess.Popen(['xcode-select', '-print-path'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) out, err = job.communicate() if job.returncode != 0: print >>sys.stderr, out print >>sys.stderr, err raise Exception(('Error %d running xcode-select, you might have to run ' '|sudo xcode-select --switch /Applications/Xcode.app/Contents/Developer| ' 'if you are using Xcode 4.') % job.returncode) # The Developer folder moved in Xcode 4.3. xcode43_sdk_path = os.path.join( out.rstrip(), 'Platforms/MacOSX.platform/Developer/SDKs') if os.path.isdir(xcode43_sdk_path): sdk_dir = xcode43_sdk_path else: sdk_dir = os.path.join(out.rstrip(), 'SDKs') sdks = [re.findall('^MacOSX(10\.\d+)\.sdk$', s) for s in os.listdir(sdk_dir)] sdks = [s[0] for s in sdks if s] # [['10.5'], ['10.6']] => ['10.5', '10.6'] sdks = [s for s in sdks # ['10.5', '10.6'] => ['10.6'] if parse_version(s) >= parse_version(min_sdk_version)] if not sdks: raise Exception('No %s+ SDK found' % min_sdk_version) best_sdk = sorted(sdks, key=parse_version)[0] if options.verify and best_sdk != min_sdk_version and not options.sdk_path: print >>sys.stderr, '' print >>sys.stderr, ' vvvvvvv' print >>sys.stderr, '' print >>sys.stderr, \ 'This build requires the %s SDK, but it was not found on your system.' \ % min_sdk_version print >>sys.stderr, \ 'Either install it, or explicitly set mac_sdk in your GYP_DEFINES.' print >>sys.stderr, '' print >>sys.stderr, ' ^^^^^^^' print >>sys.stderr, '' return min_sdk_version return best_sdk if __name__ == '__main__': if sys.platform != 'darwin': raise Exception("This script only runs on Mac") print main()
bsd-3-clause
trishnaguha/ansible
lib/ansible/plugins/action/net_linkagg.py
648
1057
# (c) 2017, Ansible Inc, # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import (absolute_import, division, print_function) __metaclass__ = type from ansible.plugins.action.net_base import ActionModule as _ActionModule class ActionModule(_ActionModule): def run(self, tmp=None, task_vars=None): result = super(ActionModule, self).run(tmp, task_vars) del tmp # tmp no longer has any effect return result
gpl-3.0
jmesteve/openerp
openerp/addons/mrp/res_config.py
44
4545
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Business Applications # Copyright (C) 2004-2012 OpenERP S.A. (<http://openerp.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import fields, osv from openerp import pooler from openerp.tools.translate import _ class mrp_config_settings(osv.osv_memory): _name = 'mrp.config.settings' _inherit = 'res.config.settings' _columns = { 'module_mrp_repair': fields.boolean("Manage repairs of products ", help="""Allows to manage all product repairs. * Add/remove products in the reparation * Impact for stocks * Invoicing (products and/or services) * Warranty concept * Repair quotation report * Notes for the technician and for the final customer. This installs the module mrp_repair."""), 'module_mrp_operations': fields.boolean("Allow detailed planning of work orders", help="""This allows to add state, date_start,date_stop in production order operation lines (in the "Work Centers" tab). This installs the module mrp_operations."""), 'module_mrp_byproduct': fields.boolean("Produce several products from one manufacturing order", help="""You can configure by-products in the bill of material. Without this module: A + B + C -> D. With this module: A + B + C -> D + E. This installs the module mrp_byproduct."""), 'module_mrp_jit': fields.boolean("Generate procurement in real time", help="""This allows Just In Time computation of procurement orders. All procurement orders will be processed immediately, which could in some cases entail a small performance impact. This installs the module mrp_jit."""), 'module_stock_no_autopicking': fields.boolean("Manage manual picking to fulfill manufacturing orders ", help="""This module allows an intermediate picking process to provide raw materials to production orders. For example to manage production made by your suppliers (sub-contracting). To achieve this, set the assembled product which is sub-contracted to "No Auto-Picking" and put the location of the supplier in the routing of the assembly operation. This installs the module stock_no_autopicking."""), 'group_mrp_routings': fields.boolean("Manage routings and work orders ", implied_group='mrp.group_mrp_routings', help="""Routings allow you to create and manage the manufacturing operations that should be followed within your work centers in order to produce a product. They are attached to bills of materials that will define the required raw materials."""), 'group_mrp_properties': fields.boolean("Allow several bill of materials per product using properties", implied_group='product.group_mrp_properties', help="""The selection of the right Bill of Material to use will depend on the properties specified on the sales order and the Bill of Material."""), 'module_product_manufacturer': fields.boolean("Define manufacturers on products ", help="""This allows you to define the following for a product: * Manufacturer * Manufacturer Product Name * Manufacturer Product Code * Product Attributes. This installs the module product_manufacturer."""), } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
hirokiky/matcha
tests/test_make_wsgi_app.py
1
1141
import pytest @pytest.fixture def target(): from matcha import make_wsgi_app return make_wsgi_app def dummy_case(environ, start_response): return environ, start_response def dummy_matching(environ): return dummy_case, 'dummy_matched_dict' def dummy_not_matched_app(environ, start_response): return 'not matched' def dummy_matching_not_matched(environ): from matcha import NotMatched raise NotMatched def test_matched(target): inner_target = target(dummy_matching) dummy_environ = {} actual = inner_target(dummy_environ, 'dummy_start_response') assert dummy_environ['matcha.matched_dict'] == 'dummy_matched_dict' assert dummy_environ['matcha.matching'] == dummy_matching assert actual == (dummy_environ, 'dummy_start_response') def test_not_matched(target): inner_target = target(dummy_matching_not_matched, not_found_app=dummy_not_matched_app) dummy_environ = {} actual = inner_target(dummy_environ, 'dummy_start_response') assert actual == 'not matched' assert dummy_environ['matcha.matching'] == dummy_matching_not_matched
mit
jnovinger/django
django/conf/locale/ru/formats.py
1059
1267
# -*- encoding: utf-8 -*- # This file is distributed under the same license as the Django package. # from __future__ import unicode_literals # The *_FORMAT strings use the Django date format syntax, # see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date DATE_FORMAT = 'j E Y г.' TIME_FORMAT = 'G:i' DATETIME_FORMAT = 'j E Y г. G:i' YEAR_MONTH_FORMAT = 'F Y г.' MONTH_DAY_FORMAT = 'j F' SHORT_DATE_FORMAT = 'd.m.Y' SHORT_DATETIME_FORMAT = 'd.m.Y H:i' FIRST_DAY_OF_WEEK = 1 # Monday # The *_INPUT_FORMATS strings use the Python strftime format syntax, # see http://docs.python.org/library/datetime.html#strftime-strptime-behavior DATE_INPUT_FORMATS = [ '%d.%m.%Y', # '25.10.2006' '%d.%m.%y', # '25.10.06' ] DATETIME_INPUT_FORMATS = [ '%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59' '%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200' '%d.%m.%Y %H:%M', # '25.10.2006 14:30' '%d.%m.%Y', # '25.10.2006' '%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59' '%d.%m.%y %H:%M:%S.%f', # '25.10.06 14:30:59.000200' '%d.%m.%y %H:%M', # '25.10.06 14:30' '%d.%m.%y', # '25.10.06' ] DECIMAL_SEPARATOR = ',' THOUSAND_SEPARATOR = '\xa0' # non-breaking space NUMBER_GROUPING = 3
bsd-3-clause
m-chichikalov/iot-manager-demo
python/sensor/la.py
2
2579
# iotManager # load avg sensor import json import threading # import os frame = "STEEL" # BLACK_METAL METAL SHINY_METAL BRASS STEEL CHROME GOLD ANTHRACITE TILTED_GRAY TILTED_BLACK GLOSSY_METAL color = "RAITH" # RED GREEN BLUE ORANGE YELLOW CYAN MAGENTA WHITE GRAY BLACK RAITH GREEN_LCD JUG_GREEN bgColor = "CARBON" # DARK_GRAY SATIN_GRAY LIGHT_GRAY WHITE BLACK BEIGE BROWN RED GREEN BLUE ANTHRACITE MUD PUNCHED_SHEET CARBON STAINLESS BRUSHED_METAL BRUSHED_STAINLESS TURNED lcd = "BLUE_BLUE" # BEIGE BLUE ORANGE RED YELLOW WHITE GRAY BLACK GREEN BLUE2 BLUE_BLACK BLUE_DARKBLUE BLUE_GRAY STANDARD STANDARD_GREEN BLUE_BLUE RED_DARKRED DARKBLUE LILA BLACKRED DARKGREEN AMBER LIGHTBLUE SECTIONS led = "RED_LED" # RED_LED GREEN_LED BLUE_LED ORANGE_LED YELLOW_LED CYAN_LED class LoadAvg: counter = 0 t = 0 config = { 'descr' : "Load average", 'widget' : "steel", 'style1' : "float:left;", 'widgetConfig' : { 'titleString' : "Load average 1 min", 'unitString' : "%", 'width' : "auto2", 'height' : 100, 'type' : "Linear", 'lcdVisible' : True, 'ledVisible' : True, 'lcdDecimals' : 0, 'FrameDesign' : frame, 'ColorDef' : color, 'BackgroundColor': bgColor, 'LcdColor' : lcd, 'LedColor' : led, 'minMeasuredValueVisible' : True, 'maxMeasuredValueVisible' : True, 'threshold' : 50, 'minValue' : 0, 'maxValue' : 100, } } def __init__(self, client, prefix, deviceID, widgetID, pageId, page): self.client = client self.prefix = prefix self.deviceID = deviceID self.config['id'] = widgetID self.config['topic'] = prefix + "/" + deviceID + "/la" self.config['pageId'] = pageId self.config['page'] = page self.t = threading.Timer(10.0, self.send) self.t.start() def send_config(self): print('Publish config:'+ json.dumps(self.config)) self.client.publish( self.prefix + "/" + self.deviceID + '/config', json.dumps(self.config)) self.counter = 5 * 60 / 10 # 5 min send self.send() def send(self): self.t.cancel() self.t = threading.Timer(10.0, self.send) self.t.start() if(self.counter > 0): topic = self.prefix + "/" + self.deviceID + "/la/status" val = float(self.get()[0]) * 100 print('Publish ' + topic + ':' + str(val)) self.client.publish(topic, json.dumps({ 'status': val}) ) def get(self): with open('/proc/loadavg') as f: loadavg = f.readlines() return str(loadavg[0]).replace('\n', '').split(' ')
mit
zubair-arbi/edx-platform
lms/djangoapps/instructor/views/api_urls.py
4
7259
""" Instructor API endpoint urls. """ from django.conf.urls import patterns, url urlpatterns = patterns( '', url(r'^students_update_enrollment$', 'instructor.views.api.students_update_enrollment', name="students_update_enrollment"), url(r'^register_and_enroll_students$', 'instructor.views.api.register_and_enroll_students', name="register_and_enroll_students"), url(r'^list_course_role_members$', 'instructor.views.api.list_course_role_members', name="list_course_role_members"), url(r'^modify_access$', 'instructor.views.api.modify_access', name="modify_access"), url(r'^bulk_beta_modify_access$', 'instructor.views.api.bulk_beta_modify_access', name="bulk_beta_modify_access"), url(r'^get_problem_responses$', 'instructor.views.api.get_problem_responses', name="get_problem_responses"), url(r'^get_grading_config$', 'instructor.views.api.get_grading_config', name="get_grading_config"), url(r'^get_students_features(?P<csv>/csv)?$', 'instructor.views.api.get_students_features', name="get_students_features"), url(r'^get_issued_certificates/$', 'instructor.views.api.get_issued_certificates', name="get_issued_certificates"), url(r'^get_students_who_may_enroll$', 'instructor.views.api.get_students_who_may_enroll', name="get_students_who_may_enroll"), url(r'^get_user_invoice_preference$', 'instructor.views.api.get_user_invoice_preference', name="get_user_invoice_preference"), url(r'^get_sale_records(?P<csv>/csv)?$', 'instructor.views.api.get_sale_records', name="get_sale_records"), url(r'^get_sale_order_records$', 'instructor.views.api.get_sale_order_records', name="get_sale_order_records"), url(r'^sale_validation_url$', 'instructor.views.api.sale_validation', name="sale_validation"), url(r'^get_anon_ids$', 'instructor.views.api.get_anon_ids', name="get_anon_ids"), url(r'^get_student_progress_url$', 'instructor.views.api.get_student_progress_url', name="get_student_progress_url"), url(r'^reset_student_attempts$', 'instructor.views.api.reset_student_attempts', name="reset_student_attempts"), url( # pylint: disable=bad-continuation r'^rescore_problem$', 'instructor.views.api.rescore_problem', name="rescore_problem" ), url( r'^reset_student_attempts_for_entrance_exam$', 'instructor.views.api.reset_student_attempts_for_entrance_exam', name="reset_student_attempts_for_entrance_exam" ), url( r'^rescore_entrance_exam$', 'instructor.views.api.rescore_entrance_exam', name="rescore_entrance_exam" ), url( r'^list_entrance_exam_instructor_tasks', 'instructor.views.api.list_entrance_exam_instructor_tasks', name="list_entrance_exam_instructor_tasks" ), url( r'^mark_student_can_skip_entrance_exam', 'instructor.views.api.mark_student_can_skip_entrance_exam', name="mark_student_can_skip_entrance_exam" ), url(r'^list_instructor_tasks$', 'instructor.views.api.list_instructor_tasks', name="list_instructor_tasks"), url(r'^list_background_email_tasks$', 'instructor.views.api.list_background_email_tasks', name="list_background_email_tasks"), url(r'^list_email_content$', 'instructor.views.api.list_email_content', name="list_email_content"), url(r'^list_forum_members$', 'instructor.views.api.list_forum_members', name="list_forum_members"), url(r'^update_forum_role_membership$', 'instructor.views.api.update_forum_role_membership', name="update_forum_role_membership"), url(r'^send_email$', 'instructor.views.api.send_email', name="send_email"), url(r'^change_due_date$', 'instructor.views.api.change_due_date', name='change_due_date'), url(r'^reset_due_date$', 'instructor.views.api.reset_due_date', name='reset_due_date'), url(r'^show_unit_extensions$', 'instructor.views.api.show_unit_extensions', name='show_unit_extensions'), url(r'^show_student_extensions$', 'instructor.views.api.show_student_extensions', name='show_student_extensions'), # proctored exam downloads... url(r'^get_proctored_exam_results$', 'instructor.views.api.get_proctored_exam_results', name="get_proctored_exam_results"), # Grade downloads... url(r'^list_report_downloads$', 'instructor.views.api.list_report_downloads', name="list_report_downloads"), url(r'calculate_grades_csv$', 'instructor.views.api.calculate_grades_csv', name="calculate_grades_csv"), url(r'problem_grade_report$', 'instructor.views.api.problem_grade_report', name="problem_grade_report"), # Financial Report downloads.. url(r'^list_financial_report_downloads$', 'instructor.views.api.list_financial_report_downloads', name="list_financial_report_downloads"), # Registration Codes.. url(r'get_registration_codes$', 'instructor.views.api.get_registration_codes', name="get_registration_codes"), url(r'generate_registration_codes$', 'instructor.views.api.generate_registration_codes', name="generate_registration_codes"), url(r'active_registration_codes$', 'instructor.views.api.active_registration_codes', name="active_registration_codes"), url(r'spent_registration_codes$', 'instructor.views.api.spent_registration_codes', name="spent_registration_codes"), # Reports.. url(r'get_enrollment_report$', 'instructor.views.api.get_enrollment_report', name="get_enrollment_report"), url(r'get_exec_summary_report$', 'instructor.views.api.get_exec_summary_report', name="get_exec_summary_report"), url(r'get_course_survey_results$', 'instructor.views.api.get_course_survey_results', name="get_course_survey_results"), # Coupon Codes.. url(r'get_coupon_codes', 'instructor.views.api.get_coupon_codes', name="get_coupon_codes"), # spoc gradebook url(r'^gradebook$', 'instructor.views.gradebook_api.spoc_gradebook', name='spoc_gradebook'), url(r'^gradebook/(?P<offset>[0-9]+)$', 'instructor.views.gradebook_api.spoc_gradebook', name='spoc_gradebook'), # Cohort management url(r'add_users_to_cohorts$', 'instructor.views.api.add_users_to_cohorts', name="add_users_to_cohorts"), # Certificates url(r'^generate_example_certificates$', 'instructor.views.api.generate_example_certificates', name='generate_example_certificates'), url(r'^enable_certificate_generation$', 'instructor.views.api.enable_certificate_generation', name='enable_certificate_generation'), url(r'^start_certificate_generation', 'instructor.views.api.start_certificate_generation', name='start_certificate_generation'), url(r'^start_certificate_regeneration', 'instructor.views.api.start_certificate_regeneration', name='start_certificate_regeneration'), url(r'^create_certificate_exception/(?P<white_list_student>[^/]*)', 'instructor.views.api.create_certificate_exception', name='create_certificate_exception'), )
agpl-3.0
ColdMatter/EDMSuite
EDMScripts/OldScripts/JoesLazyMapThreadLoop.py
1
1075
# MapLoop - asks ScanMaster to make a series of scans with one of the pg # parameters incremented scan to scan from DAQ.Environment import * def mapLoop(numScans): # setup fileSystem = Environs.FileSystem file = \ fileSystem.GetDataDirectory(\ fileSystem.Paths["scanMasterDataPath"])\ + fileSystem.GenerateNextDataFileName() print("Saving as " + file + "_*.zip") print("") list1 = [line.strip() for line in open("rfCentreTimes.txt")] list2 = [line.strip() for line in open("topAmpPwrs.txt")] # start looping for i in range(len(list1)): print "pg:rf2CentreTime -> " + str(list1[i]) print "pg:rf2BlankingCentreTime -> " + str(list1[i]) print "out:scanOnAmplitude -> " + str(list2[i]) sm.AdjustProfileParameter("pg", "rf2CentreTime", list1[i], False) sm.AdjustProfileParameter("pg", "rf2BlankingCentreTime", list1[i], False) sm.AdjustProfileParameter("out", "scanOnAmplitude", list2[i], False) sm.AcquireAndWait(numScans) scanPath = file + "_" + str(i) + ".zip" sm.SaveData(scanPath) def run_script(): print "Use mapLoop(numScans)"
mit
abartlet/samba
third_party/dnspython/dns/rdtypes/nsbase.py
100
2994
# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc. # # Permission to use, copy, modify, and distribute this software and its # documentation for any purpose with or without fee is hereby granted, # provided that the above copyright notice and this permission notice # appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT # OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. """NS-like base classes.""" import cStringIO import dns.exception import dns.rdata import dns.name class NSBase(dns.rdata.Rdata): """Base class for rdata that is like an NS record. @ivar target: the target name of the rdata @type target: dns.name.Name object""" __slots__ = ['target'] def __init__(self, rdclass, rdtype, target): super(NSBase, self).__init__(rdclass, rdtype) self.target = target def to_text(self, origin=None, relativize=True, **kw): target = self.target.choose_relativity(origin, relativize) return str(target) def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True): target = tok.get_name() target = target.choose_relativity(origin, relativize) tok.get_eol() return cls(rdclass, rdtype, target) from_text = classmethod(from_text) def to_wire(self, file, compress = None, origin = None): self.target.to_wire(file, compress, origin) def to_digestable(self, origin = None): return self.target.to_digestable(origin) def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None): (target, cused) = dns.name.from_wire(wire[: current + rdlen], current) if cused != rdlen: raise dns.exception.FormError if not origin is None: target = target.relativize(origin) return cls(rdclass, rdtype, target) from_wire = classmethod(from_wire) def choose_relativity(self, origin = None, relativize = True): self.target = self.target.choose_relativity(origin, relativize) def _cmp(self, other): return cmp(self.target, other.target) class UncompressedNS(NSBase): """Base class for rdata that is like an NS record, but whose name is not compressed when convert to DNS wire format, and whose digestable form is not downcased.""" def to_wire(self, file, compress = None, origin = None): super(UncompressedNS, self).to_wire(file, None, origin) def to_digestable(self, origin = None): f = cStringIO.StringIO() self.to_wire(f, None, origin) return f.getvalue()
gpl-3.0
wreckJ/intellij-community
python/lib/Lib/site-packages/django/core/management/commands/shell.py
230
3263
import os from django.core.management.base import NoArgsCommand from optparse import make_option class Command(NoArgsCommand): option_list = NoArgsCommand.option_list + ( make_option('--plain', action='store_true', dest='plain', help='Tells Django to use plain Python, not IPython.'), ) help = "Runs a Python interactive interpreter. Tries to use IPython, if it's available." shells = ['ipython', 'bpython'] requires_model_validation = False def ipython(self): try: from IPython.frontend.terminal.embed import TerminalInteractiveShell shell = TerminalInteractiveShell() shell.mainloop() except ImportError: # IPython < 0.11 # Explicitly pass an empty list as arguments, because otherwise # IPython would use sys.argv from this script. try: from IPython.Shell import IPShell shell = IPShell(argv=[]) shell.mainloop() except ImportError: # IPython not found at all, raise ImportError raise def bpython(self): import bpython bpython.embed() def run_shell(self): for shell in self.shells: try: return getattr(self, shell)() except ImportError: pass raise ImportError def handle_noargs(self, **options): # XXX: (Temporary) workaround for ticket #1796: force early loading of all # models from installed apps. from django.db.models.loading import get_models loaded_models = get_models() use_plain = options.get('plain', False) try: if use_plain: # Don't bother loading IPython, because the user wants plain Python. raise ImportError self.run_shell() except ImportError: import code # Set up a dictionary to serve as the environment for the shell, so # that tab completion works on objects that are imported at runtime. # See ticket 5082. imported_objects = {} try: # Try activating rlcompleter, because it's handy. import readline except ImportError: pass else: # We don't have to wrap the following import in a 'try', because # we already know 'readline' was imported successfully. import rlcompleter readline.set_completer(rlcompleter.Completer(imported_objects).complete) readline.parse_and_bind("tab:complete") # We want to honor both $PYTHONSTARTUP and .pythonrc.py, so follow system # conventions and get $PYTHONSTARTUP first then import user. if not use_plain: pythonrc = os.environ.get("PYTHONSTARTUP") if pythonrc and os.path.isfile(pythonrc): try: execfile(pythonrc) except NameError: pass # This will import .pythonrc.py as a side-effect import user code.interact(local=imported_objects)
apache-2.0
vovojh/gem5
src/arch/x86/isa/insts/system/segmentation.py
25
7202
# Copyright (c) 2007 The Hewlett-Packard Development Company # Copyright (c) 2012-2013 AMD # All rights reserved. # # The license below extends only to copyright in the software and shall # not be construed as granting a license to any other intellectual # property including but not limited to intellectual property relating # to a hardware implementation of the functionality of the software # licensed hereunder. You may use the software subject to the license # terms below provided that you ensure that this notice is replicated # unmodified and in its entirety in all distributions of the software, # modified or unmodified, in source code or in binary form. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer; # redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution; # neither the name of the copyright holders nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # Authors: Gabe Black microcode = ''' def macroop LGDT_M { .serializing .adjust_env maxOsz # Get the limit ld t1, seg, sib, disp, dataSize=2 # Get the base ld t2, seg, sib, 'adjustedDisp + 2' wrbase tsg, t2 wrlimit tsg, t1 }; def macroop LGDT_P { .serializing .adjust_env maxOsz rdip t7 # Get the limit ld t1, seg, riprel, disp, dataSize=2 # Get the base ld t2, seg, riprel, 'adjustedDisp + 2' wrbase tsg, t2 wrlimit tsg, t1 }; # # These versions are for when the original data size was 16 bits. The base is # still 32 bits, but the top byte is zeroed before being used. # def macroop LGDT_16_M { .serializing .adjust_env maxOsz # Get the limit ld t1, seg, sib, disp, dataSize=2 # Get the base ld t2, seg, sib, 'adjustedDisp + 2', dataSize=4 zexti t2, t2, 23, dataSize=8 wrbase tsg, t2, dataSize=8 wrlimit tsg, t1 }; def macroop LGDT_16_P { .serializing .adjust_env maxOsz rdip t7 # Get the limit ld t1, seg, riprel, disp, dataSize=2 # Get the base ld t2, seg, riprel, 'adjustedDisp + 2', dataSize=4 zexti t2, t2, 23, dataSize=8 wrbase tsg, t2 wrlimit tsg, t1 }; def macroop LIDT_M { .serializing .adjust_env maxOsz # Get the limit ld t1, seg, sib, disp, dataSize=2 # Get the base ld t2, seg, sib, 'adjustedDisp + 2' wrbase idtr, t2 wrlimit idtr, t1 }; def macroop LIDT_P { .serializing .adjust_env maxOsz rdip t7 # Get the limit ld t1, seg, riprel, disp, dataSize=2 # Get the base ld t2, seg, riprel, 'adjustedDisp + 2' wrbase idtr, t2 wrlimit idtr, t1 }; # # These versions are for when the original data size was 16 bits. The base is # still 32 bits, but the top byte is zeroed before being used. # def macroop LIDT_16_M { .serializing .adjust_env maxOsz # Get the limit ld t1, seg, sib, disp, dataSize=2 # Get the base ld t2, seg, sib, 'adjustedDisp + 2', dataSize=4 zexti t2, t2, 23, dataSize=8 wrbase idtr, t2, dataSize=8 wrlimit idtr, t1 }; def macroop LIDT_16_P { .serializing .adjust_env maxOsz rdip t7 # Get the limit ld t1, seg, riprel, disp, dataSize=2 # Get the base ld t2, seg, riprel, 'adjustedDisp + 2', dataSize=4 zexti t2, t2, 23, dataSize=8 wrbase idtr, t2 wrlimit idtr, t1 }; def macroop LTR_R { .serializing chks reg, t0, TRCheck limm t4, 0, dataSize=8 srli t4, reg, 3, dataSize=2 ldst t1, tsg, [8, t4, t0], dataSize=8 ld t2, tsg, [8, t4, t0], 8, dataSize=8 chks reg, t1, TSSCheck wrdh t3, t1, t2 wrdl tr, t1, reg wrbase tr, t3, dataSize=8 limm t5, (1 << 9) or t1, t1, t5 st t1, tsg, [8, t4, t0], dataSize=8 }; def macroop LTR_M { .serializing ld t5, seg, sib, disp, dataSize=2 chks t5, t0, TRCheck limm t4, 0, dataSize=8 srli t4, t5, 3, dataSize=2 ldst t1, tsg, [8, t4, t0], dataSize=8 ld t2, tsg, [8, t4, t0], 8, dataSize=8 chks t5, t1, TSSCheck wrdh t3, t1, t2 wrdl tr, t1, t5 wrbase tr, t3, dataSize=8 limm t5, (1 << 9) or t1, t1, t5 st t1, tsg, [8, t4, t0], dataSize=8 }; def macroop LTR_P { .serializing rdip t7 ld t5, seg, riprel, disp, dataSize=2 chks t5, t0, TRCheck limm t4, 0, dataSize=8 srli t4, t5, 3, dataSize=2 ldst t1, tsg, [8, t4, t0], dataSize=8 ld t2, tsg, [8, t4, t0], 8, dataSize=8 chks t5, t1, TSSCheck wrdh t3, t1, t2 wrdl tr, t1, t5 wrbase tr, t3, dataSize=8 limm t5, (1 << 9) or t1, t1, t5 st t1, tsg, [8, t4, t0], dataSize=8 }; def macroop LLDT_R { .serializing chks reg, t0, InGDTCheck, flags=(EZF,) br label("end"), flags=(CEZF,) limm t4, 0, dataSize=8 srli t4, reg, 3, dataSize=2 ldst t1, tsg, [8, t4, t0], dataSize=8 ld t2, tsg, [8, t4, t0], 8, dataSize=8 chks reg, t1, LDTCheck wrdh t3, t1, t2 wrdl tsl, t1, reg wrbase tsl, t3, dataSize=8 end: fault "NoFault" }; def macroop LLDT_M { .serializing ld t5, seg, sib, disp, dataSize=2 chks t5, t0, InGDTCheck, flags=(EZF,) br label("end"), flags=(CEZF,) limm t4, 0, dataSize=8 srli t4, t5, 3, dataSize=2 ldst t1, tsg, [8, t4, t0], dataSize=8 ld t2, tsg, [8, t4, t0], 8, dataSize=8 chks t5, t1, LDTCheck wrdh t3, t1, t2 wrdl tsl, t1, t5 wrbase tsl, t3, dataSize=8 end: fault "NoFault" }; def macroop LLDT_P { .serializing rdip t7 ld t5, seg, riprel, disp, dataSize=2 chks t5, t0, InGDTCheck, flags=(EZF,) br label("end"), flags=(CEZF,) limm t4, 0, dataSize=8 srli t4, t5, 3, dataSize=2 ldst t1, tsg, [8, t4, t0], dataSize=8 ld t2, tsg, [8, t4, t0], 8, dataSize=8 chks t5, t1, LDTCheck wrdh t3, t1, t2 wrdl tsl, t1, t5 wrbase tsl, t3, dataSize=8 end: fault "NoFault" }; def macroop SWAPGS { rdval t1, kernel_gs_base, dataSize=8 rdbase t2, gs, dataSize=8 wrbase gs, t1, dataSize=8 wrval kernel_gs_base, t2, dataSize=8 }; '''
bsd-3-clause
RajatGoyal/frontera
docs/source/conf.py
6
8732
# -*- coding: utf-8 -*- # # frontera documentation build configuration file, created by # sphinx-quickstart on Tue Nov 18 17:54:50 2014. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys from os import path # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.append(path.join(path.dirname(__file__), "_ext")) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'fronteradocs', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['ytemplates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Frontera' copyright = u'2014, ScrapingHub' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '0.3.0' # The full version, including alpha/beta/rc tags. release = '0.3.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['ystatic'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'fronteradoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'frontera.tex', u'Frontera Documentation', u'ScrapingHub', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'frontera', u'Frontera Documentation', [u'ScrapingHub'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'frontera', u'Frontera Documentation', u'ScrapingHub', 'frontera', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False # -- Options for sphinx_rtd_theme ----------------------------------------- #https://github.com/snide/sphinx_rtd_theme import os on_rtd = os.environ.get('READTHEDOCS', None) == 'True' if on_rtd: html_theme = 'default' else: import sphinx_rtd_theme html_theme = "sphinx_rtd_theme" html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] # -- Options for autoclass ------------------------------------------------ # Use class and init docstrings for autoclass directive autoclass_content = 'both'
bsd-3-clause
fxfitz/ansible
lib/ansible/plugins/connection/__init__.py
17
12283
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> # (c) 2015 Toshio Kuratomi <tkuratomi@ansible.com> # (c) 2017, Peter Sprygada <psprygad@redhat.com> # (c) 2017 Ansible Project from __future__ import (absolute_import, division, print_function) __metaclass__ = type import fcntl import gettext import os import shlex from abc import abstractmethod, abstractproperty from functools import wraps from ansible import constants as C from ansible.errors import AnsibleError from ansible.module_utils.six import string_types from ansible.module_utils._text import to_bytes, to_text from ansible.plugins import AnsiblePlugin from ansible.plugins.loader import shell_loader try: from __main__ import display except ImportError: from ansible.utils.display import Display display = Display() __all__ = ['ConnectionBase', 'ensure_connect'] BUFSIZE = 65536 def ensure_connect(func): @wraps(func) def wrapped(self, *args, **kwargs): if not self._connected: self._connect() return func(self, *args, **kwargs) return wrapped class ConnectionBase(AnsiblePlugin): ''' A base class for connections to contain common code. ''' has_pipelining = False has_native_async = False # eg, winrm always_pipeline_modules = False # eg, winrm become_methods = C.BECOME_METHODS # When running over this connection type, prefer modules written in a certain language # as discovered by the specified file extension. An empty string as the # language means any language. module_implementation_preferences = ('',) allow_executable = True # the following control whether or not the connection supports the # persistent connection framework or not supports_persistence = False force_persistence = False default_user = None def __init__(self, play_context, new_stdin, shell=None, *args, **kwargs): super(ConnectionBase, self).__init__() # All these hasattrs allow subclasses to override these parameters if not hasattr(self, '_play_context'): self._play_context = play_context if not hasattr(self, '_new_stdin'): self._new_stdin = new_stdin # Backwards compat: self._display isn't really needed, just import the global display and use that. if not hasattr(self, '_display'): self._display = display if not hasattr(self, '_connected'): self._connected = False self.success_key = None self.prompt = None self._connected = False self._socket_path = None if shell is not None: self._shell = shell # load the shell plugin for this action/connection if play_context.shell: shell_type = play_context.shell elif hasattr(self, '_shell_type'): shell_type = getattr(self, '_shell_type') else: shell_type = 'sh' shell_filename = os.path.basename(self._play_context.executable) try: shell = shell_loader.get(shell_filename) except Exception: shell = None if shell is None: for shell in shell_loader.all(): if shell_filename in shell.COMPATIBLE_SHELLS: break shell_type = shell.SHELL_FAMILY self._shell = shell_loader.get(shell_type) if not self._shell: raise AnsibleError("Invalid shell type specified (%s), or the plugin for that shell type is missing." % shell_type) @property def connected(self): '''Read-only property holding whether the connection to the remote host is active or closed.''' return self._connected @property def socket_path(self): '''Read-only property holding the connection socket path for this remote host''' return self._socket_path def _become_method_supported(self): ''' Checks if the current class supports this privilege escalation method ''' if self._play_context.become_method in self.become_methods: return True raise AnsibleError("Internal Error: this connection module does not support running commands via %s" % self._play_context.become_method) @staticmethod def _split_ssh_args(argstring): """ Takes a string like '-o Foo=1 -o Bar="foo bar"' and returns a list ['-o', 'Foo=1', '-o', 'Bar=foo bar'] that can be added to the argument list. The list will not contain any empty elements. """ try: # Python 2.6.x shlex doesn't handle unicode type so we have to # convert args to byte string for that case. More efficient to # try without conversion first but python2.6 doesn't throw an # exception, it merely mangles the output: # >>> shlex.split(u't e') # ['t\x00\x00\x00', '\x00\x00\x00e\x00\x00\x00'] return [to_text(x.strip()) for x in shlex.split(to_bytes(argstring)) if x.strip()] except AttributeError: # In Python3, shlex.split doesn't work on a byte string. return [to_text(x.strip()) for x in shlex.split(argstring) if x.strip()] @abstractproperty def transport(self): """String used to identify this Connection class from other classes""" pass @abstractmethod def _connect(self): """Connect to the host we've been initialized with""" # Check if PE is supported if self._play_context.become: self._become_method_supported() @ensure_connect @abstractmethod def exec_command(self, cmd, in_data=None, sudoable=True): """Run a command on the remote host. :arg cmd: byte string containing the command :kwarg in_data: If set, this data is passed to the command's stdin. This is used to implement pipelining. Currently not all connection plugins implement pipelining. :kwarg sudoable: Tell the connection plugin if we're executing a command via a privilege escalation mechanism. This may affect how the connection plugin returns data. Note that not all connections can handle privilege escalation. :returns: a tuple of (return code, stdout, stderr) The return code is an int while stdout and stderr are both byte strings. When a command is executed, it goes through multiple commands to get there. It looks approximately like this:: [LocalShell] ConnectionCommand [UsersLoginShell (*)] ANSIBLE_SHELL_EXECUTABLE [(BecomeCommand ANSIBLE_SHELL_EXECUTABLE)] Command :LocalShell: Is optional. It is run locally to invoke the ``Connection Command``. In most instances, the ``ConnectionCommand`` can be invoked directly instead. The ssh connection plugin which can have values that need expanding locally specified via ssh_args is the sole known exception to this. Shell metacharacters in the command itself should be processed on the remote machine, not on the local machine so no shell is needed on the local machine. (Example, ``/bin/sh``) :ConnectionCommand: This is the command that connects us to the remote machine to run the rest of the command. ``ansible_ssh_user``, ``ansible_ssh_host`` and so forth are fed to this piece of the command to connect to the correct host (Examples ``ssh``, ``chroot``) :UsersLoginShell: This shell may or may not be created depending on the ConnectionCommand used by the connection plugin. This is the shell that the ``ansible_ssh_user`` has configured as their login shell. In traditional UNIX parlance, this is the last field of a user's ``/etc/passwd`` entry We do not specifically try to run the ``UsersLoginShell`` when we connect. Instead it is implicit in the actions that the ``ConnectionCommand`` takes when it connects to a remote machine. ``ansible_shell_type`` may be set to inform ansible of differences in how the ``UsersLoginShell`` handles things like quoting if a shell has different semantics than the Bourne shell. :ANSIBLE_SHELL_EXECUTABLE: This is the shell set via the inventory var ``ansible_shell_executable`` or via ``constants.DEFAULT_EXECUTABLE`` if the inventory var is not set. We explicitly invoke this shell so that we have predictable quoting rules at this point. ``ANSIBLE_SHELL_EXECUTABLE`` is only settable by the user because some sudo setups may only allow invoking a specific shell. (For instance, ``/bin/bash`` may be allowed but ``/bin/sh``, our default, may not). We invoke this twice, once after the ``ConnectionCommand`` and once after the ``BecomeCommand``. After the ConnectionCommand, this is run by the ``UsersLoginShell``. After the ``BecomeCommand`` we specify that the ``ANSIBLE_SHELL_EXECUTABLE`` is being invoked directly. :BecomeComand ANSIBLE_SHELL_EXECUTABLE: Is the command that performs privilege escalation. Setting this up is performed by the action plugin prior to running ``exec_command``. So we just get passed :param:`cmd` which has the BecomeCommand already added. (Examples: sudo, su) If we have a BecomeCommand then we will invoke a ANSIBLE_SHELL_EXECUTABLE shell inside of it so that we have a consistent view of quoting. :Command: Is the command we're actually trying to run remotely. (Examples: mkdir -p $HOME/.ansible, python $HOME/.ansible/tmp-script-file) """ pass @ensure_connect @abstractmethod def put_file(self, in_path, out_path): """Transfer a file from local to remote""" pass @ensure_connect @abstractmethod def fetch_file(self, in_path, out_path): """Fetch a file from remote to local""" pass @abstractmethod def close(self): """Terminate the connection""" pass def check_become_success(self, b_output): b_success_key = to_bytes(self._play_context.success_key) for b_line in b_output.splitlines(True): if b_success_key == b_line.rstrip(): return True return False def check_password_prompt(self, b_output): if self._play_context.prompt is None: return False elif isinstance(self._play_context.prompt, string_types): b_prompt = to_bytes(self._play_context.prompt).strip() b_lines = b_output.splitlines() return any(l.strip().startswith(b_prompt) for l in b_lines) else: return self._play_context.prompt(b_output) def check_incorrect_password(self, b_output): b_incorrect_password = to_bytes(gettext.dgettext(self._play_context.become_method, C.BECOME_ERROR_STRINGS[self._play_context.become_method])) return b_incorrect_password and b_incorrect_password in b_output def check_missing_password(self, b_output): b_missing_password = to_bytes(gettext.dgettext(self._play_context.become_method, C.BECOME_MISSING_STRINGS[self._play_context.become_method])) return b_missing_password and b_missing_password in b_output def connection_lock(self): f = self._play_context.connection_lockfd display.vvvv('CONNECTION: pid %d waiting for lock on %d' % (os.getpid(), f), host=self._play_context.remote_addr) fcntl.lockf(f, fcntl.LOCK_EX) display.vvvv('CONNECTION: pid %d acquired lock on %d' % (os.getpid(), f), host=self._play_context.remote_addr) def connection_unlock(self): f = self._play_context.connection_lockfd fcntl.lockf(f, fcntl.LOCK_UN) display.vvvv('CONNECTION: pid %d released lock on %d' % (os.getpid(), f), host=self._play_context.remote_addr) def reset(self): display.warning("Reset is not implemented for this connection")
gpl-3.0
cbentes/texta
utils/gensim_wrapper/masked_word2vec.py
2
5449
# TODO: gensim license import numpy as np from six import string_types import gensim class MaskedWord2Vec(object): def __init__(self,word2vec_model): self.model = word2vec_model self.vocab = word2vec_model.wv.vocab def most_similar(self, positive=[], negative=[], topn=10, ignored_idxes=[], ignored_dist = -999999): """ Find the top-N most similar words. Positive words contribute positively towards the similarity, negative words negatively. This method computes cosine similarity between a simple mean of the projection weight vectors of the given words, and corresponds to the `word-analogy` and `distance` scripts in the original word2vec implementation. Example:: >>> trained_model.most_similar(positive=['woman', 'king'], negative=['man']) [('queen', 0.50882536), ...] """ self.model.init_sims() if isinstance(positive, string_types) and not negative: # allow calls like most_similar('dog'), as a shorthand for most_similar(['dog']) positive = [positive] # add weights for each word, if not already present; default to 1.0 for positive and -1.0 for negative words positive = [(word, 1.0) if isinstance(word, string_types + (np.ndarray,)) else word for word in positive] negative = [(word, -1.0) if isinstance(word, string_types + (np.ndarray,)) else word for word in negative] # compute the weighted average of all words all_words, mean = set(), [] for word, weight in positive + negative: if isinstance(word, np.ndarray): mean.append(weight * word) elif word in self.model.wv.vocab: mean.append(weight * self.model.wv.syn0norm[self.model.wv.vocab[word].index]) all_words.add(self.model.wv.vocab[word].index) else: raise KeyError("word '%s' not in vocabulary" % word) if not mean: raise ValueError("cannot compute similarity with no input") mean = gensim.matutils.unitvec(np.array(mean).mean(axis=0)).astype(np.float32) dists = np.dot(self.model.wv.syn0norm, mean) if not topn: return dists dists[ignored_idxes] = ignored_dist best = np.argsort(dists)[::-1][:topn + len(all_words)] # ignore (don't return) words from the input result = [(self.model.wv.index2word[sim], float(dists[sim])) for sim in best if sim not in all_words] return result[:topn] def most_similar_cosmul(self, positive=[], negative=[], topn=10, ignored_idxes=[], ignored_dist = -999999): """ Find the top-N most similar words, using the multiplicative combination objective proposed by Omer Levy and Yoav Goldberg in [4]_. Positive words still contribute positively towards the similarity, negative words negatively, but with less susceptibility to one large distance dominating the calculation. In the common analogy-solving case, of two positive and one negative examples, this method is equivalent to the "3CosMul" objective (equation (4)) of Levy and Goldberg. Additional positive or negative examples contribute to the numerator or denominator, respectively - a potentially sensible but untested extension of the method. (With a single positive example, rankings will be the same as in the default most_similar.) Example:: >>> trained_model.most_similar_cosmul(positive=['baghdad','england'],negative=['london']) [(u'iraq', 0.8488819003105164), ...] .. [4] Omer Levy and Yoav Goldberg. Linguistic Regularities in Sparse and Explicit Word Representations, 2014. """ self.model.init_sims() if isinstance(positive, string_types) and not negative: # allow calls like most_similar_cosmul('dog'), as a shorthand for most_similar_cosmul(['dog']) positive = [positive] all_words = set() def word_vec(word): if isinstance(word, np.ndarray): return word elif word in self.model.wv.vocab: all_words.add(self.model.wv.vocab[word].index) return self.model.wv.syn0norm[self.model.wv.vocab[word].index] else: raise KeyError("word '%s' not in vocabulary" % word) positive = [word_vec(word) for word in positive] negative = [word_vec(word) for word in negative] if not positive: raise ValueError("cannot compute similarity with no input") # equation (4) of Levy & Goldberg "Linguistic Regularities...", # with distances shifted to [0,1] per footnote (7) pos_dists = [((1 + np.dot(self.model.wv.syn0norm, term)) / 2) for term in positive] neg_dists = [((1 + np.dot(self.model.wv.syn0norm, term)) / 2) for term in negative] dists = np.prod(pos_dists, axis=0) / (np.prod(neg_dists, axis=0) + 0.000001) if not topn: return dists dists[ignored_idxes] = ignored_dist best = np.argsort(dists)[::-1][:topn + len(all_words)] # ignore (don't return) words from the input result = [(self.model.wv.index2word[sim], float(dists[sim])) for sim in best if sim not in all_words] return result[:topn]
gpl-3.0
Weihonghao/ECM
Vpy34/lib/python3.5/site-packages/tensorflow/python/ops/rnn_cell.py
16
34198
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Module for constructing RNN Cells. ## Base interface for all RNN Cells @@RNNCell ## RNN Cells for use with TensorFlow's core RNN methods @@BasicRNNCell @@BasicLSTMCell @@GRUCell @@LSTMCell ## Classes storing split `RNNCell` state @@LSTMStateTuple ## RNN Cell wrappers (RNNCells that wrap other RNNCells) @@MultiRNNCell @@DropoutWrapper @@EmbeddingWrapper @@InputProjectionWrapper @@OutputProjectionWrapper """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import math from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.ops import array_ops from tensorflow.python.ops import clip_ops from tensorflow.python.ops import embedding_ops from tensorflow.python.ops import init_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn_ops from tensorflow.python.ops import variable_scope as vs from tensorflow.python.ops.math_ops import sigmoid from tensorflow.python.ops.math_ops import tanh from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util import nest def _state_size_with_prefix(state_size, prefix=None): """Helper function that enables int or TensorShape shape specification. This function takes a size specification, which can be an integer or a TensorShape, and converts it into a list of integers. One may specify any additional dimensions that precede the final state size specification. Args: state_size: TensorShape or int that specifies the size of a tensor. prefix: optional additional list of dimensions to prepend. Returns: result_state_size: list of dimensions the resulting tensor size. """ result_state_size = tensor_shape.as_shape(state_size).as_list() if prefix is not None: if not isinstance(prefix, list): raise TypeError("prefix of _state_size_with_prefix should be a list.") result_state_size = prefix + result_state_size return result_state_size class RNNCell(object): """Abstract object representing an RNN cell. The definition of cell in this package differs from the definition used in the literature. In the literature, cell refers to an object with a single scalar output. The definition in this package refers to a horizontal array of such units. An RNN cell, in the most abstract setting, is anything that has a state and performs some operation that takes a matrix of inputs. This operation results in an output matrix with `self.output_size` columns. If `self.state_size` is an integer, this operation also results in a new state matrix with `self.state_size` columns. If `self.state_size` is a tuple of integers, then it results in a tuple of `len(state_size)` state matrices, each with a column size corresponding to values in `state_size`. This module provides a number of basic commonly used RNN cells, such as LSTM (Long Short Term Memory) or GRU (Gated Recurrent Unit), and a number of operators that allow add dropouts, projections, or embeddings for inputs. Constructing multi-layer cells is supported by the class `MultiRNNCell`, or by calling the `rnn` ops several times. Every `RNNCell` must have the properties below and and implement `__call__` with the following signature. """ def __call__(self, inputs, state, scope=None): """Run this RNN cell on inputs, starting from the given state. Args: inputs: `2-D` tensor with shape `[batch_size x input_size]`. state: if `self.state_size` is an integer, this should be a `2-D Tensor` with shape `[batch_size x self.state_size]`. Otherwise, if `self.state_size` is a tuple of integers, this should be a tuple with shapes `[batch_size x s] for s in self.state_size`. scope: VariableScope for the created subgraph; defaults to class name. Returns: A pair containing: - Output: A `2-D` tensor with shape `[batch_size x self.output_size]`. - New state: Either a single `2-D` tensor, or a tuple of tensors matching the arity and shapes of `state`. """ raise NotImplementedError("Abstract method") @property def state_size(self): """size(s) of state(s) used by this cell. It can be represented by an Integer, a TensorShape or a tuple of Integers or TensorShapes. """ raise NotImplementedError("Abstract method") @property def output_size(self): """Integer or TensorShape: size of outputs produced by this cell.""" raise NotImplementedError("Abstract method") def zero_state(self, batch_size, dtype): """Return zero-filled state tensor(s). Args: batch_size: int, float, or unit Tensor representing the batch size. dtype: the data type to use for the state. Returns: If `state_size` is an int or TensorShape, then the return value is a `N-D` tensor of shape `[batch_size x state_size]` filled with zeros. If `state_size` is a nested list or tuple, then the return value is a nested list or tuple (of the same structure) of `2-D` tensors with the shapes `[batch_size x s]` for each s in `state_size`. """ state_size = self.state_size if nest.is_sequence(state_size): state_size_flat = nest.flatten(state_size) zeros_flat = [ array_ops.zeros( array_ops.pack(_state_size_with_prefix(s, prefix=[batch_size])), dtype=dtype) for s in state_size_flat] for s, z in zip(state_size_flat, zeros_flat): z.set_shape(_state_size_with_prefix(s, prefix=[None])) zeros = nest.pack_sequence_as(structure=state_size, flat_sequence=zeros_flat) else: zeros_size = _state_size_with_prefix(state_size, prefix=[batch_size]) zeros = array_ops.zeros(array_ops.pack(zeros_size), dtype=dtype) zeros.set_shape(_state_size_with_prefix(state_size, prefix=[None])) return zeros class BasicRNNCell(RNNCell): """The most basic RNN cell.""" def __init__(self, num_units, input_size=None, activation=tanh): if input_size is not None: logging.warn("%s: The input_size parameter is deprecated.", self) self._num_units = num_units self._activation = activation @property def state_size(self): return self._num_units @property def output_size(self): return self._num_units def __call__(self, inputs, state, scope=None): """Most basic RNN: output = new_state = activation(W * input + U * state + B).""" with vs.variable_scope(scope or type(self).__name__): # "BasicRNNCell" output = self._activation(_linear([inputs, state], self._num_units, True)) return output, output class GRUCell(RNNCell): """Gated Recurrent Unit cell (cf. http://arxiv.org/abs/1406.1078).""" def __init__(self, num_units, input_size=None, activation=tanh): if input_size is not None: logging.warn("%s: The input_size parameter is deprecated.", self) self._num_units = num_units self._activation = activation @property def state_size(self): return self._num_units @property def output_size(self): return self._num_units def __call__(self, inputs, state, scope=None): """Gated recurrent unit (GRU) with nunits cells.""" with vs.variable_scope(scope or type(self).__name__): # "GRUCell" with vs.variable_scope("Gates"): # Reset gate and update gate. # We start with bias of 1.0 to not reset and not update. r, u = array_ops.split(1, 2, _linear([inputs, state], 2 * self._num_units, True, 1.0)) r, u = sigmoid(r), sigmoid(u) with vs.variable_scope("Candidate"): c = self._activation(_linear([inputs, r * state], self._num_units, True)) new_h = u * state + (1 - u) * c return new_h, new_h _LSTMStateTuple = collections.namedtuple("LSTMStateTuple", ("c", "h")) class LSTMStateTuple(_LSTMStateTuple): """Tuple used by LSTM Cells for `state_size`, `zero_state`, and output state. Stores two elements: `(c, h)`, in that order. Only used when `state_is_tuple=True`. """ __slots__ = () @property def dtype(self): (c, h) = self if not c.dtype == h.dtype: raise TypeError("Inconsistent internal state: %s vs %s" % (str(c.dtype), str(h.dtype))) return c.dtype class BasicLSTMCell(RNNCell): """Basic LSTM recurrent network cell. The implementation is based on: http://arxiv.org/abs/1409.2329. We add forget_bias (default: 1) to the biases of the forget gate in order to reduce the scale of forgetting in the beginning of the training. It does not allow cell clipping, a projection layer, and does not use peep-hole connections: it is the basic baseline. For advanced models, please use the full LSTMCell that follows. """ def __init__(self, num_units, forget_bias=1.0, input_size=None, state_is_tuple=True, activation=tanh): """Initialize the basic LSTM cell. Args: num_units: int, The number of units in the LSTM cell. forget_bias: float, The bias added to forget gates (see above). input_size: Deprecated and unused. state_is_tuple: If True, accepted and returned states are 2-tuples of the `c_state` and `m_state`. If False, they are concatenated along the column axis. The latter behavior will soon be deprecated. activation: Activation function of the inner states. """ if not state_is_tuple: logging.warn("%s: Using a concatenated state is slower and will soon be " "deprecated. Use state_is_tuple=True.", self) if input_size is not None: logging.warn("%s: The input_size parameter is deprecated.", self) self._num_units = num_units self._forget_bias = forget_bias self._state_is_tuple = state_is_tuple self._activation = activation @property def state_size(self): return (LSTMStateTuple(self._num_units, self._num_units) if self._state_is_tuple else 2 * self._num_units) @property def output_size(self): return self._num_units def __call__(self, inputs, state, scope=None): """Long short-term memory cell (LSTM).""" with vs.variable_scope(scope or type(self).__name__): # "BasicLSTMCell" # Parameters of gates are concatenated into one multiply for efficiency. if self._state_is_tuple: c, h = state else: c, h = array_ops.split(1, 2, state) concat = _linear([inputs, h], 4 * self._num_units, True) # i = input_gate, j = new_input, f = forget_gate, o = output_gate i, j, f, o = array_ops.split(1, 4, concat) new_c = (c * sigmoid(f + self._forget_bias) + sigmoid(i) * self._activation(j)) new_h = self._activation(new_c) * sigmoid(o) if self._state_is_tuple: new_state = LSTMStateTuple(new_c, new_h) else: new_state = array_ops.concat(1, [new_c, new_h]) return new_h, new_state def _get_concat_variable(name, shape, dtype, num_shards): """Get a sharded variable concatenated into one tensor.""" sharded_variable = _get_sharded_variable(name, shape, dtype, num_shards) if len(sharded_variable) == 1: return sharded_variable[0] concat_name = name + "/concat" concat_full_name = vs.get_variable_scope().name + "/" + concat_name + ":0" for value in ops.get_collection(ops.GraphKeys.CONCATENATED_VARIABLES): if value.name == concat_full_name: return value concat_variable = array_ops.concat(0, sharded_variable, name=concat_name) ops.add_to_collection(ops.GraphKeys.CONCATENATED_VARIABLES, concat_variable) return concat_variable def _get_sharded_variable(name, shape, dtype, num_shards): """Get a list of sharded variables with the given dtype.""" if num_shards > shape[0]: raise ValueError("Too many shards: shape=%s, num_shards=%d" % (shape, num_shards)) unit_shard_size = int(math.floor(shape[0] / num_shards)) remaining_rows = shape[0] - unit_shard_size * num_shards shards = [] for i in range(num_shards): current_size = unit_shard_size if i < remaining_rows: current_size += 1 shards.append(vs.get_variable(name + "_%d" % i, [current_size] + shape[1:], dtype=dtype)) return shards class LSTMCell(RNNCell): """Long short-term memory unit (LSTM) recurrent network cell. The default non-peephole implementation is based on: http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf S. Hochreiter and J. Schmidhuber. "Long Short-Term Memory". Neural Computation, 9(8):1735-1780, 1997. The peephole implementation is based on: https://research.google.com/pubs/archive/43905.pdf Hasim Sak, Andrew Senior, and Francoise Beaufays. "Long short-term memory recurrent neural network architectures for large scale acoustic modeling." INTERSPEECH, 2014. The class uses optional peep-hole connections, optional cell clipping, and an optional projection layer. """ def __init__(self, num_units, input_size=None, use_peepholes=False, cell_clip=None, initializer=None, num_proj=None, proj_clip=None, num_unit_shards=1, num_proj_shards=1, forget_bias=1.0, state_is_tuple=True, activation=tanh): """Initialize the parameters for an LSTM cell. Args: num_units: int, The number of units in the LSTM cell input_size: Deprecated and unused. use_peepholes: bool, set True to enable diagonal/peephole connections. cell_clip: (optional) A float value, if provided the cell state is clipped by this value prior to the cell output activation. initializer: (optional) The initializer to use for the weight and projection matrices. num_proj: (optional) int, The output dimensionality for the projection matrices. If None, no projection is performed. proj_clip: (optional) A float value. If `num_proj > 0` and `proj_clip` is provided, then the projected values are clipped elementwise to within `[-proj_clip, proj_clip]`. num_unit_shards: How to split the weight matrix. If >1, the weight matrix is stored across num_unit_shards. num_proj_shards: How to split the projection matrix. If >1, the projection matrix is stored across num_proj_shards. forget_bias: Biases of the forget gate are initialized by default to 1 in order to reduce the scale of forgetting at the beginning of the training. state_is_tuple: If True, accepted and returned states are 2-tuples of the `c_state` and `m_state`. If False, they are concatenated along the column axis. This latter behavior will soon be deprecated. activation: Activation function of the inner states. """ if not state_is_tuple: logging.warn("%s: Using a concatenated state is slower and will soon be " "deprecated. Use state_is_tuple=True.", self) if input_size is not None: logging.warn("%s: The input_size parameter is deprecated.", self) self._num_units = num_units self._use_peepholes = use_peepholes self._cell_clip = cell_clip self._initializer = initializer self._num_proj = num_proj self._proj_clip = proj_clip self._num_unit_shards = num_unit_shards self._num_proj_shards = num_proj_shards self._forget_bias = forget_bias self._state_is_tuple = state_is_tuple self._activation = activation if num_proj: self._state_size = ( LSTMStateTuple(num_units, num_proj) if state_is_tuple else num_units + num_proj) self._output_size = num_proj else: self._state_size = ( LSTMStateTuple(num_units, num_units) if state_is_tuple else 2 * num_units) self._output_size = num_units @property def state_size(self): return self._state_size @property def output_size(self): return self._output_size def __call__(self, inputs, state, scope=None): """Run one step of LSTM. Args: inputs: input Tensor, 2D, batch x num_units. state: if `state_is_tuple` is False, this must be a state Tensor, `2-D, batch x state_size`. If `state_is_tuple` is True, this must be a tuple of state Tensors, both `2-D`, with column sizes `c_state` and `m_state`. scope: VariableScope for the created subgraph; defaults to "LSTMCell". Returns: A tuple containing: - A `2-D, [batch x output_dim]`, Tensor representing the output of the LSTM after reading `inputs` when previous state was `state`. Here output_dim is: num_proj if num_proj was set, num_units otherwise. - Tensor(s) representing the new state of LSTM after reading `inputs` when the previous state was `state`. Same type and shape(s) as `state`. Raises: ValueError: If input size cannot be inferred from inputs via static shape inference. """ num_proj = self._num_units if self._num_proj is None else self._num_proj if self._state_is_tuple: (c_prev, m_prev) = state else: c_prev = array_ops.slice(state, [0, 0], [-1, self._num_units]) m_prev = array_ops.slice(state, [0, self._num_units], [-1, num_proj]) dtype = inputs.dtype input_size = inputs.get_shape().with_rank(2)[1] if input_size.value is None: raise ValueError("Could not infer input size from inputs.get_shape()[-1]") with vs.variable_scope(scope or type(self).__name__, initializer=self._initializer): # "LSTMCell" concat_w = _get_concat_variable( "W", [input_size.value + num_proj, 4 * self._num_units], dtype, self._num_unit_shards) b = vs.get_variable( "B", shape=[4 * self._num_units], initializer=init_ops.zeros_initializer, dtype=dtype) # i = input_gate, j = new_input, f = forget_gate, o = output_gate cell_inputs = array_ops.concat(1, [inputs, m_prev]) lstm_matrix = nn_ops.bias_add(math_ops.matmul(cell_inputs, concat_w), b) i, j, f, o = array_ops.split(1, 4, lstm_matrix) # Diagonal connections if self._use_peepholes: w_f_diag = vs.get_variable( "W_F_diag", shape=[self._num_units], dtype=dtype) w_i_diag = vs.get_variable( "W_I_diag", shape=[self._num_units], dtype=dtype) w_o_diag = vs.get_variable( "W_O_diag", shape=[self._num_units], dtype=dtype) if self._use_peepholes: c = (sigmoid(f + self._forget_bias + w_f_diag * c_prev) * c_prev + sigmoid(i + w_i_diag * c_prev) * self._activation(j)) else: c = (sigmoid(f + self._forget_bias) * c_prev + sigmoid(i) * self._activation(j)) if self._cell_clip is not None: # pylint: disable=invalid-unary-operand-type c = clip_ops.clip_by_value(c, -self._cell_clip, self._cell_clip) # pylint: enable=invalid-unary-operand-type if self._use_peepholes: m = sigmoid(o + w_o_diag * c) * self._activation(c) else: m = sigmoid(o) * self._activation(c) if self._num_proj is not None: concat_w_proj = _get_concat_variable( "W_P", [self._num_units, self._num_proj], dtype, self._num_proj_shards) m = math_ops.matmul(m, concat_w_proj) if self._proj_clip is not None: # pylint: disable=invalid-unary-operand-type m = clip_ops.clip_by_value(m, -self._proj_clip, self._proj_clip) # pylint: enable=invalid-unary-operand-type new_state = (LSTMStateTuple(c, m) if self._state_is_tuple else array_ops.concat(1, [c, m])) return m, new_state class OutputProjectionWrapper(RNNCell): """Operator adding an output projection to the given cell. Note: in many cases it may be more efficient to not use this wrapper, but instead concatenate the whole sequence of your outputs in time, do the projection on this batch-concatenated sequence, then split it if needed or directly feed into a softmax. """ def __init__(self, cell, output_size): """Create a cell with output projection. Args: cell: an RNNCell, a projection to output_size is added to it. output_size: integer, the size of the output after projection. Raises: TypeError: if cell is not an RNNCell. ValueError: if output_size is not positive. """ if not isinstance(cell, RNNCell): raise TypeError("The parameter cell is not RNNCell.") if output_size < 1: raise ValueError("Parameter output_size must be > 0: %d." % output_size) self._cell = cell self._output_size = output_size @property def state_size(self): return self._cell.state_size @property def output_size(self): return self._output_size def __call__(self, inputs, state, scope=None): """Run the cell and output projection on inputs, starting from state.""" output, res_state = self._cell(inputs, state) # Default scope: "OutputProjectionWrapper" with vs.variable_scope(scope or type(self).__name__): projected = _linear(output, self._output_size, True) return projected, res_state class InputProjectionWrapper(RNNCell): """Operator adding an input projection to the given cell. Note: in many cases it may be more efficient to not use this wrapper, but instead concatenate the whole sequence of your inputs in time, do the projection on this batch-concatenated sequence, then split it. """ def __init__(self, cell, num_proj, input_size=None): """Create a cell with input projection. Args: cell: an RNNCell, a projection of inputs is added before it. num_proj: Python integer. The dimension to project to. input_size: Deprecated and unused. Raises: TypeError: if cell is not an RNNCell. """ if input_size is not None: logging.warn("%s: The input_size parameter is deprecated.", self) if not isinstance(cell, RNNCell): raise TypeError("The parameter cell is not RNNCell.") self._cell = cell self._num_proj = num_proj @property def state_size(self): return self._cell.state_size @property def output_size(self): return self._cell.output_size def __call__(self, inputs, state, scope=None): """Run the input projection and then the cell.""" # Default scope: "InputProjectionWrapper" with vs.variable_scope(scope or type(self).__name__): projected = _linear(inputs, self._num_proj, True) return self._cell(projected, state) class DropoutWrapper(RNNCell): """Operator adding dropout to inputs and outputs of the given cell.""" def __init__(self, cell, input_keep_prob=1.0, output_keep_prob=1.0, seed=None): """Create a cell with added input and/or output dropout. Dropout is never used on the state. Args: cell: an RNNCell, a projection to output_size is added to it. input_keep_prob: unit Tensor or float between 0 and 1, input keep probability; if it is float and 1, no input dropout will be added. output_keep_prob: unit Tensor or float between 0 and 1, output keep probability; if it is float and 1, no output dropout will be added. seed: (optional) integer, the randomness seed. Raises: TypeError: if cell is not an RNNCell. ValueError: if keep_prob is not between 0 and 1. """ if not isinstance(cell, RNNCell): raise TypeError("The parameter cell is not a RNNCell.") if (isinstance(input_keep_prob, float) and not (input_keep_prob >= 0.0 and input_keep_prob <= 1.0)): raise ValueError("Parameter input_keep_prob must be between 0 and 1: %d" % input_keep_prob) if (isinstance(output_keep_prob, float) and not (output_keep_prob >= 0.0 and output_keep_prob <= 1.0)): raise ValueError("Parameter output_keep_prob must be between 0 and 1: %d" % output_keep_prob) self._cell = cell self._input_keep_prob = input_keep_prob self._output_keep_prob = output_keep_prob self._seed = seed @property def state_size(self): return self._cell.state_size @property def output_size(self): return self._cell.output_size def __call__(self, inputs, state, scope=None): """Run the cell with the declared dropouts.""" if (not isinstance(self._input_keep_prob, float) or self._input_keep_prob < 1): inputs = nn_ops.dropout(inputs, self._input_keep_prob, seed=self._seed) output, new_state = self._cell(inputs, state, scope) if (not isinstance(self._output_keep_prob, float) or self._output_keep_prob < 1): output = nn_ops.dropout(output, self._output_keep_prob, seed=self._seed) return output, new_state class EmbeddingWrapper(RNNCell): """Operator adding input embedding to the given cell. Note: in many cases it may be more efficient to not use this wrapper, but instead concatenate the whole sequence of your inputs in time, do the embedding on this batch-concatenated sequence, then split it and feed into your RNN. """ def __init__(self, cell, embedding_classes, embedding_size, initializer=None): """Create a cell with an added input embedding. Args: cell: an RNNCell, an embedding will be put before its inputs. embedding_classes: integer, how many symbols will be embedded. embedding_size: integer, the size of the vectors we embed into. initializer: an initializer to use when creating the embedding; if None, the initializer from variable scope or a default one is used. Raises: TypeError: if cell is not an RNNCell. ValueError: if embedding_classes is not positive. """ if not isinstance(cell, RNNCell): raise TypeError("The parameter cell is not RNNCell.") if embedding_classes <= 0 or embedding_size <= 0: raise ValueError("Both embedding_classes and embedding_size must be > 0: " "%d, %d." % (embedding_classes, embedding_size)) self._cell = cell self._embedding_classes = embedding_classes self._embedding_size = embedding_size self._initializer = initializer @property def state_size(self): return self._cell.state_size @property def output_size(self): return self._cell.output_size def __call__(self, inputs, state, scope=None): """Run the cell on embedded inputs.""" with vs.variable_scope(scope or type(self).__name__): # "EmbeddingWrapper" with ops.device("/cpu:0"): if self._initializer: initializer = self._initializer elif vs.get_variable_scope().initializer: initializer = vs.get_variable_scope().initializer else: # Default initializer for embeddings should have variance=1. sqrt3 = math.sqrt(3) # Uniform(-sqrt(3), sqrt(3)) has variance=1. initializer = init_ops.random_uniform_initializer(-sqrt3, sqrt3) if type(state) is tuple: data_type = state[0].dtype else: data_type = state.dtype embedding = vs.get_variable( "embedding", [self._embedding_classes, self._embedding_size], initializer=initializer, dtype=data_type) embedded = embedding_ops.embedding_lookup( embedding, array_ops.reshape(inputs, [-1])) return self._cell(embedded, state) class MultiRNNCell(RNNCell): """RNN cell composed sequentially of multiple simple cells.""" def __init__(self, cells, state_is_tuple=True): """Create a RNN cell composed sequentially of a number of RNNCells. Args: cells: list of RNNCells that will be composed in this order. state_is_tuple: If True, accepted and returned states are n-tuples, where `n = len(cells)`. If False, the states are all concatenated along the column axis. This latter behavior will soon be deprecated. Raises: ValueError: if cells is empty (not allowed), or at least one of the cells returns a state tuple but the flag `state_is_tuple` is `False`. """ if not cells: raise ValueError("Must specify at least one cell for MultiRNNCell.") self._cells = cells self._state_is_tuple = state_is_tuple if not state_is_tuple: if any(nest.is_sequence(c.state_size) for c in self._cells): raise ValueError("Some cells return tuples of states, but the flag " "state_is_tuple is not set. State sizes are: %s" % str([c.state_size for c in self._cells])) @property def state_size(self): if self._state_is_tuple: return tuple(cell.state_size for cell in self._cells) else: return sum([cell.state_size for cell in self._cells]) @property def output_size(self): return self._cells[-1].output_size def __call__(self, inputs, state, scope=None): """Run this multi-layer cell on inputs, starting from state.""" with vs.variable_scope(scope or type(self).__name__): # "MultiRNNCell" cur_state_pos = 0 cur_inp = inputs new_states = [] for i, cell in enumerate(self._cells): with vs.variable_scope("Cell%d" % i): if self._state_is_tuple: if not nest.is_sequence(state): raise ValueError( "Expected state to be a tuple of length %d, but received: %s" % (len(self.state_size), state)) cur_state = state[i] else: cur_state = array_ops.slice( state, [0, cur_state_pos], [-1, cell.state_size]) cur_state_pos += cell.state_size cur_inp, new_state = cell(cur_inp, cur_state) new_states.append(new_state) new_states = (tuple(new_states) if self._state_is_tuple else array_ops.concat(1, new_states)) return cur_inp, new_states class _SlimRNNCell(RNNCell): """A simple wrapper for slim.rnn_cells.""" def __init__(self, cell_fn): """Create a SlimRNNCell from a cell_fn. Args: cell_fn: a function which takes (inputs, state, scope) and produces the outputs and the new_state. Additionally when called with inputs=None and state=None it should return (initial_outputs, initial_state). Raises: TypeError: if cell_fn is not callable ValueError: if cell_fn cannot produce a valid initial state. """ if not callable(cell_fn): raise TypeError("cell_fn %s needs to be callable", cell_fn) self._cell_fn = cell_fn self._cell_name = cell_fn.func.__name__ init_output, init_state = self._cell_fn(None, None) output_shape = init_output.get_shape() state_shape = init_state.get_shape() self._output_size = output_shape.with_rank(2)[1].value self._state_size = state_shape.with_rank(2)[1].value if self._output_size is None: raise ValueError("Initial output created by %s has invalid shape %s" % (self._cell_name, output_shape)) if self._state_size is None: raise ValueError("Initial state created by %s has invalid shape %s" % (self._cell_name, state_shape)) @property def state_size(self): return self._state_size @property def output_size(self): return self._output_size def __call__(self, inputs, state, scope=None): scope = scope or self._cell_name output, state = self._cell_fn(inputs, state, scope=scope) return output, state def _linear(args, output_size, bias, bias_start=0.0, scope=None): """Linear map: sum_i(args[i] * W[i]), where W[i] is a variable. Args: args: a 2D Tensor or a list of 2D, batch x n, Tensors. output_size: int, second dimension of W[i]. bias: boolean, whether to add a bias term or not. bias_start: starting value to initialize the bias; 0 by default. scope: VariableScope for the created subgraph; defaults to "Linear". Returns: A 2D Tensor with shape [batch x output_size] equal to sum_i(args[i] * W[i]), where W[i]s are newly created matrices. Raises: ValueError: if some of the arguments has unspecified or wrong shape. """ if args is None or (nest.is_sequence(args) and not args): raise ValueError("`args` must be specified") if not nest.is_sequence(args): args = [args] # Calculate the total size of arguments on dimension 1. total_arg_size = 0 shapes = [a.get_shape().as_list() for a in args] for shape in shapes: if len(shape) != 2: raise ValueError("Linear is expecting 2D arguments: %s" % str(shapes)) if not shape[1]: raise ValueError("Linear expects shape[1] of arguments: %s" % str(shapes)) else: total_arg_size += shape[1] dtype = [a.dtype for a in args][0] # Now the computation. with vs.variable_scope(scope or "Linear"): matrix = vs.get_variable( "Matrix", [total_arg_size, output_size], dtype=dtype) if len(args) == 1: res = math_ops.matmul(args[0], matrix) else: res = math_ops.matmul(array_ops.concat(1, args), matrix) if not bias: return res bias_term = vs.get_variable( "Bias", [output_size], dtype=dtype, initializer=init_ops.constant_initializer( bias_start, dtype=dtype)) return res + bias_term
agpl-3.0