gt
stringclasses
1 value
context
stringlengths
2.49k
119k
import py from rpython.jit.metainterp.warmspot import ll_meta_interp, get_stats from rpython.jit.metainterp.test.support import LLJitMixin from rpython.jit.codewriter.policy import StopAtXPolicy from rpython.jit.metainterp.resoperation import rop from rpython.jit.metainterp import history from rpython.rlib.jit import JitDriver, hint, set_param from rpython.rlib.objectmodel import compute_hash from rpython.rlib import rfloat from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rlib.rarithmetic import r_uint, intmask from rpython.rlib.rawstorage import (alloc_raw_storage, raw_storage_setitem, free_raw_storage, raw_storage_getitem) from rpython.rlib.objectmodel import specialize, is_annotation_constant from rpython.jit.backend.detect_cpu import getcpuclass CPU = getcpuclass() if not CPU.vector_extension: py.test.skip("this cpu %s has no implemented vector backend" % CPU) @specialize.argtype(0,1) def malloc(T,n): return lltype.malloc(T, n, flavor='raw', zero=True) def free(mem): lltype.free(mem, flavor='raw') class VectorizeTests: enable_opts = 'intbounds:rewrite:virtualize:string:earlyforce:pure:heap:unroll' def setup_method(self, method): print "RUNNING", method.__name__ def meta_interp(self, f, args, policy=None, vec=True, vec_all=False): return ll_meta_interp(f, args, enable_opts=self.enable_opts, policy=policy, CPUClass=self.CPUClass, type_system=self.type_system, vec=vec, vec_all=vec_all) @py.test.mark.parametrize('i',[3,4,5,6,7,8,9,50]) def test_vectorize_simple_load_arith_store_int_add_index(self,i): myjitdriver = JitDriver(greens = [], reds = 'auto', vectorize=True) def f(d): bc = d*rffi.sizeof(rffi.SIGNED) va = alloc_raw_storage(bc, zero=True) vb = alloc_raw_storage(bc, zero=True) vc = alloc_raw_storage(bc, zero=True) x = 1 for i in range(d): j = i*rffi.sizeof(rffi.SIGNED) raw_storage_setitem(va, j, rffi.cast(rffi.SIGNED,i)) raw_storage_setitem(vb, j, rffi.cast(rffi.SIGNED,i)) i = 0 while i < bc: myjitdriver.jit_merge_point() a = raw_storage_getitem(rffi.SIGNED,va,i) b = raw_storage_getitem(rffi.SIGNED,vb,i) c = a+b raw_storage_setitem(vc, i, rffi.cast(rffi.SIGNED,c)) i += 1*rffi.sizeof(rffi.SIGNED) res = 0 for i in range(d): res += raw_storage_getitem(rffi.SIGNED,vc,i*rffi.sizeof(rffi.SIGNED)) free_raw_storage(va) free_raw_storage(vb) free_raw_storage(vc) return res res = self.meta_interp(f, [i]) assert res == f(i) @py.test.mark.parametrize('i',[1,2,3,8,17,128,130,131,142,143]) def test_vectorize_array_get_set(self,i): myjitdriver = JitDriver(greens = [], reds = 'auto', vectorize=True) T = lltype.Array(rffi.INT, hints={'nolength': True}) def f(d): i = 0 va = lltype.malloc(T, d, flavor='raw', zero=True) vb = lltype.malloc(T, d, flavor='raw', zero=True) vc = lltype.malloc(T, d, flavor='raw', zero=True) for j in range(d): va[j] = rffi.r_int(j) vb[j] = rffi.r_int(j) while i < d: myjitdriver.jit_merge_point() a = va[i] b = vb[i] ec = intmask(a)+intmask(b) vc[i] = rffi.r_int(ec) i += 1 res = 0 for j in range(d): res += intmask(vc[j]) lltype.free(va, flavor='raw') lltype.free(vb, flavor='raw') lltype.free(vc, flavor='raw') return res res = self.meta_interp(f, [i]) assert res == f(i) @py.test.mark.parametrize('i',[1,2,3,4,9]) def test_vector_register_too_small_vector(self, i): myjitdriver = JitDriver(greens = [], reds = 'auto', vectorize=True) T = lltype.Array(rffi.SHORT, hints={'nolength': True}) def g(d, va, vb): i = 0 while i < d: myjitdriver.jit_merge_point() a = va[i] b = vb[i] ec = intmask(a) + intmask(b) va[i] = rffi.r_short(ec) i += 1 def f(d): i = 0 va = lltype.malloc(T, d+100, flavor='raw', zero=True) vb = lltype.malloc(T, d+100, flavor='raw', zero=True) for j in range(d+100): va[j] = rffi.r_short(1) vb[j] = rffi.r_short(2) g(d+100, va, vb) g(d, va, vb) # this iteration might not fit into the vector register res = intmask(va[d]) lltype.free(va, flavor='raw') lltype.free(vb, flavor='raw') return res res = self.meta_interp(f, [i]) assert res == f(i) == 3 def test_vectorize_max(self): myjitdriver = JitDriver(greens = [], reds = 'auto', vectorize=True) def fmax(v1, v2): return v1 if v1 >= v2 or rfloat.isnan(v2) else v2 T = lltype.Array(rffi.DOUBLE, hints={'nolength': True}) def f(d): i = 0 va = lltype.malloc(T, d, flavor='raw', zero=True) for j in range(d): va[j] = float(j) va[13] = 128.0 m = -128.0 while i < d: myjitdriver.jit_merge_point() a = va[i] m = fmax(a, m) i += 1 lltype.free(va, flavor='raw') return m res = self.meta_interp(f, [30]) assert res == f(30) == 128 @py.test.mark.parametrize('type,func,init,insert,at,count,breaks', # all [(rffi.DOUBLE, lambda x: not bool(x), 1.0, None, -1,32, False), (rffi.DOUBLE, lambda x: x == 0.0, 1.0, None, -1,33, False), (rffi.DOUBLE, lambda x: x == 0.0, 1.0, 0.0, 33,34, True), (rffi.DOUBLE, lambda x: x == 0.0, 1.0, 0.1, 4,34, False), (lltype.Signed, lambda x: not bool(x), 1, None, -1,32, False), (lltype.Signed, lambda x: not bool(x), 1, 0, 14,32, True), (lltype.Signed, lambda x: not bool(x), 1, 0, 15,31, True), (lltype.Signed, lambda x: not bool(x), 1, 0, 16,30, True), (lltype.Signed, lambda x: x == 0, 1, None, -1,33, False), (lltype.Signed, lambda x: x == 0, 1, 0, 33,34, True), # any (rffi.DOUBLE, lambda x: x != 0.0, 0.0, 1.0, 33,35, True), (rffi.DOUBLE, lambda x: x != 0.0, 0.0, 1.0, -1,36, False), (rffi.DOUBLE, lambda x: bool(x), 0.0, 1.0, 33,37, True), (rffi.DOUBLE, lambda x: bool(x), 0.0, 1.0, -1,38, False), (lltype.Signed, lambda x: x != 0, 0, 1, 33,35, True), (lltype.Signed, lambda x: x != 0, 0, 1, -1,36, False), (lltype.Signed, lambda x: bool(x), 0, 1, 33,37, True), (lltype.Signed, lambda x: bool(x), 0, 1, -1,38, False), (rffi.INT, lambda x: intmask(x) != 0, rffi.r_int(0), rffi.r_int(1), 33,35, True), (rffi.INT, lambda x: intmask(x) != 0, rffi.r_int(0), rffi.r_int(1), -1,36, False), (rffi.INT, lambda x: bool(intmask(x)), rffi.r_int(0), rffi.r_int(1), 33,37, True), (rffi.INT, lambda x: bool(intmask(x)), rffi.r_int(0), rffi.r_int(1), -1,38, False), ]) def test_bool_reduction(self, type, func, init, insert, at, count, breaks): myjitdriver = JitDriver(greens = [], reds = 'auto', vectorize=True) T = lltype.Array(type, hints={'nolength': True}) def f(d): va = lltype.malloc(T, d, flavor='raw', zero=True) for i in range(d): va[i] = init if at != -1: va[at] = insert i = 0 ; nobreak = False while i < d: myjitdriver.jit_merge_point() b = func(va[i]) if b: assert b break i += 1 else: nobreak = True lltype.free(va, flavor='raw') return not nobreak res = self.meta_interp(f, [count]) assert res == f(count) == breaks def test_sum(self): myjitdriver = JitDriver(greens = [], reds = 'auto', vectorize=True) T = lltype.Array(rffi.DOUBLE, hints={'nolength': True}) def f(d): va = lltype.malloc(T, d, flavor='raw', zero=True) for j in range(d): va[j] = float(j) i = 0 accum = 0 while i < d: myjitdriver.jit_merge_point() accum += va[i] i += 1 lltype.free(va, flavor='raw') return accum res = self.meta_interp(f, [60]) assert res == f(60) == sum(range(60)) def test_constant_expand(self): myjitdriver = JitDriver(greens = [], reds = 'auto', vectorize=True) T = lltype.Array(rffi.DOUBLE, hints={'nolength': True}) def f(d): va = lltype.malloc(T, d, flavor='raw', zero=True) i = 0 while i < d: myjitdriver.jit_merge_point() va[i] = va[i] + 34.5 i += 1 val = va[0] lltype.free(va, flavor='raw') return val res = self.meta_interp(f, [60]) assert res == f(60) == 34.5 def test_constant_expand_vec_all(self): myjitdriver = JitDriver(greens = [], reds = 'auto') T = lltype.Array(rffi.DOUBLE, hints={'nolength': True}) def f(d): va = lltype.malloc(T, d, flavor='raw', zero=True) i = 0 while i < d: myjitdriver.jit_merge_point() if not (i < d): raise IndexError va[i] = va[i] + 34.5 i += 1 val = va[0] lltype.free(va, flavor='raw') return val res = self.meta_interp(f, [60], vec_all=True) assert res == f(60) == 34.5 def test_variable_expand(self): myjitdriver = JitDriver(greens = [], reds = 'auto', vectorize=True) T = lltype.Array(rffi.DOUBLE, hints={'nolength': True}) def f(d,variable): va = lltype.malloc(T, d, flavor='raw', zero=True) i = 0 while i < d: myjitdriver.jit_merge_point() va[i] = va[i] + variable i += 1 val = va[0] lltype.free(va, flavor='raw') return val res = self.meta_interp(f, [60,58.4547]) assert res == f(60,58.4547) == 58.4547 @py.test.mark.parametrize('vec,vec_all',[(False,True),(True,False),(True,True),(False,False)]) def test_accum(self, vec, vec_all): myjitdriver = JitDriver(greens = [], reds = 'auto', vectorize=vec) T = lltype.Array(rffi.DOUBLE) def f(d, value): va = lltype.malloc(T, d, flavor='raw', zero=True) for i in range(d): va[i] = value r = 0 i = 0 k = d + 2 # in this case a guard k <= d is inserted which fails right away! while i < d: myjitdriver.jit_merge_point() if not(i < k): k -= 1 r += va[i] i += 1 lltype.free(va, flavor='raw') return r res = self.meta_interp(f, [60,0.5], vec=vec, vec_all=vec_all) assert res == f(60,0.5) == 60*0.5 @py.test.mark.parametrize('i',[15]) def test_array_bounds_check_elimination(self,i): myjitdriver = JitDriver(greens = [], reds = 'auto', vectorize=True) T = lltype.Array(rffi.INT, hints={'nolength': True}) def f(d): va = lltype.malloc(T, d, flavor='raw', zero=True) vb = lltype.malloc(T, d, flavor='raw', zero=True) for j in range(d): va[j] = rffi.r_int(j) vb[j] = rffi.r_int(j) i = 0 while i < d: myjitdriver.jit_merge_point() if i < 0: raise IndexError if i >= d: raise IndexError a = va[i] if i < 0: raise IndexError if i >= d: raise IndexError b = vb[i] ec = intmask(a)+intmask(b) if i < 0: raise IndexError if i >= d: raise IndexError va[i] = rffi.r_int(ec) i += 1 lltype.free(va, flavor='raw') lltype.free(vb, flavor='raw') return 0 res = self.meta_interp(f, [i]) assert res == f(i) @py.test.mark.parametrize('i,v1,v2',[(25,2.5,0.3),(25,2.5,0.3)]) def test_list_vectorize(self,i,v1,v2): myjitdriver = JitDriver(greens = [], reds = 'auto') class ListF(object): def __init__(self, size, init): self.list = [init] * size def __getitem__(self, key): return self.list[key] def __setitem__(self, key, value): self.list[key] = value def f(d, v1, v2): a = ListF(d, v1) b = ListF(d, v2) i = 0 while i < d: myjitdriver.jit_merge_point() a[i] = a[i] + b[i] i += 1 s = 0 for i in range(d): s += a[i] return s res = self.meta_interp(f, [i,v1,v2], vec_all=True) # sum helps to generate the rounding error of floating points # return 69.999 ... instead of 70, (v1+v2)*i == 70.0 assert res == f(i,v1,v2) == sum([v1+v2]*i) @py.test.mark.parametrize('size',[12]) def test_body_multiple_accesses(self, size): myjitdriver = JitDriver(greens = [], reds = 'auto') T = lltype.Array(rffi.CHAR, hints={'nolength': True}) def f(size): vector_a = malloc(T, size) vector_b = malloc(T, size) i = 0 while i < size: myjitdriver.jit_merge_point() # should unroll and group them correctly c1 = vector_a[i] c2 = vector_a[i+1] c3 = vector_a[i+2] # vector_b[i] = c1 vector_b[i+1] = c2 vector_b[i+2] = c3 i += 3 free(vector_a) free(vector_b) return 0 res = self.meta_interp(f, [size], vec_all=True) assert res == f(size) def test_max_byte(self): myjitdriver = JitDriver(greens = [], reds = 'auto') T = lltype.Array(rffi.SIGNEDCHAR, hints={'nolength': True}) def f(size): vector_a = malloc(T, size) for i in range(size): vector_a[i] = rffi.r_signedchar(1) for i in range(size/2,size): vector_a[i] = rffi.r_signedchar(i) i = 0 max = -127 while i < size: myjitdriver.jit_merge_point() a = intmask(vector_a[i]) a = a & 255 if a > max: max = a i += 1 free(vector_a) return max res = self.meta_interp(f, [128], vec_all=True) assert res == f(128) def combinations(types, operators): import itertools size = 22 class Typ(object): def __init__(self, type, storecast, loadcast): self.type = type self.storecast = storecast self.loadcast = loadcast def __repr__(self): return self.type.replace(".","_") sizes = [22] for t1, t2, t3, op, size in itertools.product(types, types, types, operators, sizes): yield (size, Typ(*t1), Typ(*t2), Typ(*t3), op[0], op[1]) types = [('rffi.DOUBLE', 'float', 'float'), ('rffi.SIGNED', 'int', 'int'), ('rffi.FLOAT', 'rffi.r_singlefloat', 'float'), ] operators = [('add', '+'), ] for size, typ1, typ2, typ3, opname, op in combinations(types, operators): _source = """ def test_binary_operations_{name}(self): myjitdriver = JitDriver(greens = [], reds = 'auto') T1 = lltype.Array({type_a}, hints={{'nolength': True}}) T2 = lltype.Array({type_b}, hints={{'nolength': True}}) T3 = lltype.Array({type_c}, hints={{'nolength': True}}) def f(size): vector_a = lltype.malloc(T1, size, flavor='raw') vector_b = lltype.malloc(T2, size, flavor='raw') vector_c = lltype.malloc(T3, size, flavor='raw') for i in range(size): vector_a[i] = {type_a_storecast}(i+1) for i in range(size): vector_b[i] = {type_b_storecast}(i+1) for i in range(size): vector_c[i] = {type_c_storecast}(i+1) i = 0 while i < size: myjitdriver.jit_merge_point() a = {type_a_loadcast}(vector_a[i]) b = {type_b_loadcast}(vector_b[i]) c = (a {op} b) vector_c[i] = {type_c_storecast}(c) i += 1 lltype.free(vector_a, flavor='raw') lltype.free(vector_b, flavor='raw') c = {type_c_loadcast}(0.0) for i in range(size): c += {type_c_loadcast}(vector_c[i]) lltype.free(vector_c, flavor='raw') return c res = self.meta_interp(f, [{size}], vec_all=True) assert res == f({size}) """ env = { 'type_a': typ1.type, 'type_b': typ2.type, 'type_c': typ3.type, 'type_a_loadcast': typ1.loadcast, 'type_b_loadcast': typ2.loadcast, 'type_c_loadcast': typ3.loadcast, 'type_a_storecast': typ1.storecast, 'type_b_storecast': typ2.storecast, 'type_c_storecast': typ3.storecast, 'size': size, 'name': str(typ1) + '__' + str(typ2) + '__' + str(typ3) + \ '__' + str(size) + '__' + opname, 'op': op, } formatted = _source.format(**env) exec py.code.Source(formatted).compile() def test_binary_operations_aa(self): myjitdriver = JitDriver(greens = [], reds = 'auto') T1 = lltype.Array(rffi.DOUBLE, hints={'nolength': True}) T3 = lltype.Array(rffi.SIGNED, hints={'nolength': True}) def f(size): vector_a = lltype.malloc(T1, size, flavor='raw', zero=True) vector_b = lltype.malloc(T1, size, flavor='raw', zero=True) vector_c = lltype.malloc(T3, size, flavor='raw', zero=True) i = 0 while i < size: myjitdriver.jit_merge_point() a = (vector_a[i]) b = (vector_b[i]) c = (a + b) vector_c[i] = int(c) i += 1 free(vector_a) free(vector_b) #c = 0.0 #for i in range(size): # c += vector_c[i] lltype.free(vector_c, flavor='raw') return 0 res = self.meta_interp(f, [22], vec_all=True) assert res == f(22) def test_guard_test_location_assert(self): myjitdriver = JitDriver(greens = [], reds = 'auto') T1 = lltype.Array(rffi.SIGNED, hints={'nolength': True}) def f(size): vector_a = lltype.malloc(T1, size, flavor='raw', zero=True) for i in range(size): vector_a[i] = 0 i = 0 breaks = 0 while i < size: myjitdriver.jit_merge_point() a = vector_a[i] if a: breaks = 1 break del a i += 1 lltype.free(vector_a, flavor='raw') return breaks res = self.meta_interp(f, [22], vec_all=True, vec_guard_ratio=5) assert res == f(22) class TestLLtype(LLJitMixin, VectorizeTests): pass
# Copyright 2014 Objectif Libre # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ Volume driver common utilities for HP MSA Storage array """ import base64 import uuid from oslo.config import cfg from cinder import exception from cinder.openstack.common import log as logging from cinder.volume.drivers.san.hp import hp_msa_client as msa LOG = logging.getLogger(__name__) hpmsa_opt = [ cfg.StrOpt('msa_vdisk', default='OpenStack', help="The VDisk to use for volume creation."), ] CONF = cfg.CONF CONF.register_opts(hpmsa_opt) class HPMSACommon(object): VERSION = "0.1" stats = {} def __init__(self, config): self.config = config self.client = msa.HPMSAClient(self.config.san_ip, self.config.san_login, self.config.san_password) self.vdisk = self.config.msa_vdisk def get_version(self): return self.VERSION def do_setup(self, context): self.client_login() self._validate_vdisks() self.client_logout() def client_login(self): LOG.debug("Connecting to MSA") try: self.client.login() except msa.HPMSAConnectionError as ex: msg = (_("Failed to connect to MSA Array (%(host)s): %(err)s") % {'host': self.config.san_ip, 'err': ex}) LOG.error(msg) raise exception.HPMSAConnectionError(reason=msg) except msa.HPMSAAuthenticationError: msg = _("Failed to log on MSA Array (invalid login?)") LOG.error(msg) raise exception.HPMSAConnectionError(reason=msg) def _validate_vdisks(self): if not self.client.vdisk_exists(self.vdisk): self.client_logout() raise exception.HPMSAInvalidVDisk(vdisk=self.vdisk) def client_logout(self): self.client.logout() LOG.debug("Disconnected from MSA Array") def _get_vol_name(self, volume_id): volume_name = self._encode_name(volume_id) return "v%s" % volume_name def _get_snap_name(self, snapshot_id): snapshot_name = self._encode_name(snapshot_id) return "s%s" % snapshot_name def _encode_name(self, name): """Get converted MSA volume name. Converts the openstack volume id from ecffc30f-98cb-4cf5-85ee-d7309cc17cd2 to 7P_DD5jLTPWF7tcwnMF80g We convert the 128 bits of the uuid into a 24character long base64 encoded string. This still exceeds the limit of 20 characters so we truncate the name later. """ uuid_str = name.replace("-", "") vol_uuid = uuid.UUID('urn:uuid:%s' % uuid_str) vol_encoded = base64.b64encode(vol_uuid.bytes) vol_encoded = vol_encoded.replace('=', '') # + is not a valid character for MSA vol_encoded = vol_encoded.replace('+', '.') # since we use http URLs to send paramters, '/' is not an acceptable # parameter vol_encoded = vol_encoded.replace('/', '_') # NOTE(gpocentek): we limit the size to 20 characters since the array # doesn't support more than that for now. Duplicates should happen very # rarely. # We return 19 chars here because the _get_{vol,snap}_name functions # prepend a character return vol_encoded[:19] def check_flags(self, options, required_flags): for flag in required_flags: if not getattr(options, flag, None): msg = _('%s configuration option is not set') % flag LOG.error(msg) raise exception.InvalidInput(reason=msg) def create_volume(self, volume): volume_id = self._get_vol_name(volume['id']) LOG.debug("Create Volume (%(display_name)s: %(name)s %(id)s)" % {'display_name': volume['display_name'], 'name': volume['name'], 'id': volume_id}) # use base64 to encode the volume name (UUID is too long for MSA) volume_name = self._get_vol_name(volume['id']) volume_size = "%dGB" % volume['size'] try: metadata = self.client.create_volume(self.config.msa_vdisk, volume_name, volume_size) except msa.HPMSARequestError as ex: LOG.error(ex) raise exception.Invalid(ex) return metadata def _assert_enough_space_for_copy(self, volume_size): """The MSA creates a snap pool before trying to copy the volume. The pool is 5.27GB or 20% of the volume size, whichever is larger. Verify that we have enough space for the pool and then copy """ pool_size = max(volume_size * 0.2, 5.27) required_size = pool_size + volume_size if required_size > self.stats['free_capacity_gb']: raise exception.HPMSANotEnoughSpace(vdisk=self.vdisk) def _assert_source_detached(self, volume): """The MSA requires a volume to be dettached to clone it. Make sure that the volume is not in use when trying to copy it. """ if volume['status'] != "available" or \ volume['attach_status'] == "attached": msg = _("Volume must be detached to perform a clone operation.") LOG.error(msg) raise exception.VolumeAttached(volume_id=volume['id']) def create_cloned_volume(self, volume, src_vref): self.get_volume_stats(True) self._assert_enough_space_for_copy(volume['size']) self._assert_source_detached(src_vref) LOG.debug("Cloning Volume %(source_id)s (%(dest_id)s)" % {'source_id': volume['source_volid'], 'dest_id': volume['id']}) orig_name = self._get_vol_name(volume['source_volid']) dest_name = self._get_vol_name(volume['id']) try: self.client.copy_volume(orig_name, dest_name, self.config.msa_vdisk) except msa.HPMSARequestError as ex: LOG.error(ex) raise exception.Invalid(ex) return None def create_volume_from_snapshot(self, volume, snapshot): self.get_volume_stats(True) self._assert_enough_space_for_copy(volume['size']) LOG.debug("Creating Volume from snapshot %(source_id)s " "(%(dest_id)s)" % {'source_id': snapshot['id'], 'dest_id': volume['id']}) orig_name = self._get_snap_name(snapshot['id']) dest_name = self._get_vol_name(volume['id']) try: self.client.copy_volume(orig_name, dest_name, self.config.msa_vdisk) except msa.HPMSARequestError as ex: LOG.error(ex) raise exception.Invalid(ex) return None def delete_volume(self, volume): LOG.debug("Deleting Volume (%s)" % volume['id']) volume_name = self._get_vol_name(volume['id']) try: self.client.delete_volume(volume_name) except msa.HPMSARequestError as ex: LOG.error(ex) # if the volume wasn't found, ignore the error if 'The volume was not found on this system.' in ex: return raise exception.Invalid(ex) def get_volume_stats(self, refresh): if refresh: self._update_volume_stats() return self.stats def _update_volume_stats(self): # storage_protocol and volume_backend_name are # set in the child classes stats = {'driver_version': self.VERSION, 'free_capacity_gb': 'unknown', 'reserved_percentage': 0, 'storage_protocol': None, 'total_capacity_gb': 'unknown', 'QoS_support': False, 'vendor_name': 'Hewlett-Packard', 'volume_backend_name': None} try: vdisk_stats = self.client.vdisk_stats(self.config.msa_vdisk) stats.update(vdisk_stats) except msa.HPMSARequestError: err = (_("Unable to get stats for VDisk (%s)") % self.config.msa_vdisk) LOG.error(err) raise exception.Invalid(reason=err) self.stats = stats def _assert_connector_ok(self, connector): if not connector['wwpns']: msg = _("Connector doesn't provide wwpns") LOG.error(msg) raise exception.InvalidInput(reason=msg) def map_volume(self, volume, connector): self._assert_connector_ok(connector) volume_name = self._get_vol_name(volume['id']) try: data = self.client.map_volume(volume_name, connector['wwpns']) return data except msa.HPMSARequestError as ex: LOG.error(ex) raise exception.Invalid(ex) def unmap_volume(self, volume, connector): self._assert_connector_ok(connector) volume_name = self._get_vol_name(volume['id']) try: self.client.unmap_volume(volume_name, connector['wwpns']) except msa.HPMSARequestError as ex: LOG.error(ex) raise exception.Invalid(ex) def get_active_fc_target_ports(self): return self.client.get_active_fc_target_ports() def create_snapshot(self, snapshot): LOG.debug("Creating Snapshot from %(volume_id)s (%(snap_id)s)" % {'volume_id': snapshot['volume_id'], 'snap_id': snapshot['id']}) snap_name = self._get_snap_name(snapshot['id']) vol_name = self._get_vol_name(snapshot['volume_id']) try: self.client.create_snapshot(vol_name, snap_name) except msa.HPMSARequestError as ex: LOG.error(ex) raise exception.Invalid(ex) def delete_snapshot(self, snapshot): snap_name = self._get_snap_name(snapshot['id']) LOG.debug("Deleting Snapshot (%s)" % snapshot['id']) try: self.client.delete_snapshot(snap_name) except msa.HPMSARequestError as ex: LOG.error(ex) # if the volume wasn't found, ignore the error if 'The volume was not found on this system.' in ex: return raise exception.Invalid(ex) def extend_volume(self, volume, new_size): volume_name = self._get_vol_name(volume['id']) old_size = volume['size'] growth_size = int(new_size) - old_size LOG.debug("Extending Volume %(volume_name)s from %(old_size)s to " "%(new_size)s, by %(growth_size)s GB." % {'volume_name': volume_name, 'old_size': old_size, 'new_size': new_size, 'growth_size': growth_size}) try: self.client.extend_volume(volume_name, "%dGB" % growth_size) except msa.HPMSARequestError as ex: LOG.error(ex) raise exception.Invalid(ex)
# Copyright 2014 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json from time import sleep from datetime import datetime from proboscis import after_class from proboscis import before_class from proboscis import SkipTest from proboscis import test from proboscis.asserts import assert_equal from proboscis.asserts import assert_raises from proboscis.asserts import assert_true from proboscis.asserts import assert_not_equal from proboscis.decorators import time_out from trove.common.utils import poll_until from trove.tests.api.backups import RestoreUsingBackup from trove.tests.api.instances import assert_unprocessable from trove.tests.api.instances import InstanceTestInfo from trove.tests.api.instances import instance_info from trove.tests.api.instances import TIMEOUT_INSTANCE_CREATE from trove.tests.api.instances import TIMEOUT_INSTANCE_DELETE from trove.tests.api.instances import WaitForGuestInstallationToFinish from trove.tests.config import CONFIG from trove.tests.util import create_dbaas_client from trove.tests.util.check import AttrCheck from trove.tests.util.check import CollectionCheck from trove.tests.util.check import TypeCheck from trove.tests.util.mysql import create_mysql_connection from trove.tests.util.users import Requirements from troveclient.compat import exceptions GROUP = "dbaas.api.configurations" GROUP_CONFIG_DEFINE = "dbaas.api.configurations.define" CONFIG_NAME = "test_configuration" CONFIG_DESC = "configuration description" configuration_default = None configuration_info = None configuration_href = None configuration_instance = InstanceTestInfo() configuration_instance_id = None sql_variables = [ 'key_buffer_size', 'connect_timeout', 'join_buffer_size', ] def _is_valid_timestamp(time_string): try: datetime.strptime(time_string, "%Y-%m-%dT%H:%M:%S") except ValueError: return False return True # helper methods to validate configuration is applied to instance def _execute_query(host, user_name, password, query): print(host, user_name, password, query) with create_mysql_connection(host, user_name, password) as db: result = db.execute(query) return result assert_true(False, "something went wrong in the sql connection") def _get_address(instance_id): result = instance_info.dbaas_admin.mgmt.instances.show(instance_id) return result.ip[0] def _test_configuration_is_applied_to_instance(instance, configuration_id): if CONFIG.fake_mode: raise SkipTest("configuration from sql does not work in fake mode") instance_test = instance_info.dbaas.instances.get(instance.id) assert_equal(configuration_id, instance_test.configuration['id']) if configuration_id: testconfig_info = instance_info.dbaas.configurations.get( configuration_id) else: testconfig_info = instance_info.dbaas.instance.configuration( instance.id) testconfig_info['configuration'] conf_instances = instance_info.dbaas.configurations.instances( configuration_id) config_instance_ids = [inst.id for inst in conf_instances] assert_true(instance_test.id in config_instance_ids) cfg_names = testconfig_info.values.keys() host = _get_address(instance.id) for user in instance.users: username = user['name'] password = user['password'] concat_variables = "','".join(cfg_names) query = ("show variables where Variable_name " "in ('%s');" % concat_variables) actual_values = _execute_query(host, username, password, query) print("actual_values %s" % actual_values) print("testconfig_info.values %s" % testconfig_info.values) assert_true(len(actual_values) == len(cfg_names)) # check the configs exist attrcheck = AttrCheck() allowed_attrs = [actual_key for actual_key, actual_value in actual_values] attrcheck.contains_allowed_attrs( testconfig_info.values, allowed_attrs, msg="Configurations parameters") def _get_parameter_type(name): instance_info.dbaas.configuration_parameters.get_parameter( instance_info.dbaas_datastore, instance_info.dbaas_datastore_version, name) resp, body = instance_info.dbaas.client.last_response print(resp) print(body) return json.loads(body)['type'] # check the config values are correct for key, value in actual_values: key_type = _get_parameter_type(key) # mysql returns 'ON' and 'OFF' for True and False respectively if value == 'ON': converted_key_value = (str(key), 1) elif value == 'OFF': converted_key_value = (str(key), 0) else: if key_type == 'integer': value = int(value) converted_key_value = (str(key), value) print("converted_key_value: %s" % str(converted_key_value)) assert_true(converted_key_value in testconfig_info.values.items()) class ConfigurationsTestBase(object): @staticmethod def expected_instance_datastore_configs(instance_id): """Given an instance retrieve the expected test configurations for instance's datastore. """ instance = instance_info.dbaas.instances.get(instance_id) datastore_type = instance.datastore['type'] datastore_test_configs = CONFIG.get(datastore_type, {}) return datastore_test_configs.get("configurations", {}) @staticmethod def expected_default_datastore_configs(): """Returns the expected test configurations for the default datastore defined in the Test Config as dbaas_datastore. """ default_datatstore = CONFIG.get('dbaas_datastore', None) datastore_test_configs = CONFIG.get(default_datatstore, {}) return datastore_test_configs.get("configurations", {}) @test(depends_on_classes=[WaitForGuestInstallationToFinish], runs_after=[RestoreUsingBackup], groups=[GROUP, GROUP_CONFIG_DEFINE]) class CreateConfigurations(ConfigurationsTestBase): @test def test_expected_configurations_parameters(self): """Test get expected configurations parameters.""" allowed_attrs = ["configuration-parameters"] instance_info.dbaas.configuration_parameters.parameters( instance_info.dbaas_datastore, instance_info.dbaas_datastore_version) resp, body = instance_info.dbaas.client.last_response attrcheck = AttrCheck() config_parameters_dict = json.loads(body) attrcheck.contains_allowed_attrs( config_parameters_dict, allowed_attrs, msg="Configurations parameters") # sanity check that a few options are in the list config_params_list = config_parameters_dict['configuration-parameters'] config_param_keys = [] for param in config_params_list: config_param_keys.append(param['name']) expected_configs = self.expected_default_datastore_configs() expected_config_params = expected_configs.get('parameters_list') # check for duplicate configuration parameters msg = "check for duplicate configuration parameters" assert_equal(len(config_param_keys), len(set(config_param_keys)), msg) for expected_config_item in expected_config_params: assert_true(expected_config_item in config_param_keys) @test def test_expected_get_configuration_parameter(self): # tests get on a single parameter to verify it has expected attributes param_name = 'key_buffer_size' allowed_config_params = ['name', 'restart_required', 'max', 'min', 'type', 'deleted', 'deleted_at', 'datastore_version_id'] param = instance_info.dbaas.configuration_parameters.get_parameter( instance_info.dbaas_datastore, instance_info.dbaas_datastore_version, param_name) resp, body = instance_info.dbaas.client.last_response print("params: %s" % param) print("resp: %s" % resp) print("body: %s" % body) attrcheck = AttrCheck() config_parameter_dict = json.loads(body) print("config_parameter_dict: %s" % config_parameter_dict) attrcheck.contains_allowed_attrs( config_parameter_dict, allowed_config_params, msg="Get Configuration parameter") assert_equal(param_name, config_parameter_dict['name']) with TypeCheck('ConfigurationParameter', param) as parameter: parameter.has_field('name', basestring) parameter.has_field('restart_required', bool) parameter.has_field('max', int) parameter.has_field('min', int) parameter.has_field('type', basestring) parameter.has_field('datastore_version_id', unicode) @test def test_configurations_create_invalid_values(self): """Test create configurations with invalid values.""" values = '{"this_is_invalid": 123}' try: instance_info.dbaas.configurations.create( CONFIG_NAME, values, CONFIG_DESC) except exceptions.UnprocessableEntity: resp, body = instance_info.dbaas.client.last_response assert_equal(resp.status, 422) @test def test_configurations_create_invalid_value_type(self): """Test create configuration with invalild value type.""" values = '{"key_buffer_size": "this is a string not int"}' assert_unprocessable(instance_info.dbaas.configurations.create, CONFIG_NAME, values, CONFIG_DESC) @test def test_configurations_create_value_out_of_bounds(self): """Test create configuration with value out of bounds.""" expected_configs = self.expected_default_datastore_configs() values = json.dumps(expected_configs.get('out_of_bounds_over')) assert_unprocessable(instance_info.dbaas.configurations.create, CONFIG_NAME, values, CONFIG_DESC) values = json.dumps(expected_configs.get('out_of_bounds_under')) assert_unprocessable(instance_info.dbaas.configurations.create, CONFIG_NAME, values, CONFIG_DESC) @test def test_valid_configurations_create(self): # create a configuration with valid parameters expected_configs = self.expected_default_datastore_configs() values = json.dumps(expected_configs.get('valid_values')) expected_values = json.loads(values) result = instance_info.dbaas.configurations.create( CONFIG_NAME, values, CONFIG_DESC, datastore=instance_info.dbaas_datastore, datastore_version=instance_info.dbaas_datastore_version) resp, body = instance_info.dbaas.client.last_response assert_equal(resp.status, 200) with TypeCheck('Configuration', result) as configuration: configuration.has_field('name', basestring) configuration.has_field('description', basestring) configuration.has_field('values', dict) configuration.has_field('datastore_name', basestring) configuration.has_field('datastore_version_id', unicode) configuration.has_field('datastore_version_name', basestring) global configuration_info configuration_info = result assert_equal(configuration_info.name, CONFIG_NAME) assert_equal(configuration_info.description, CONFIG_DESC) assert_equal(configuration_info.values, expected_values) @test(runs_after=[test_valid_configurations_create]) def test_appending_to_existing_configuration(self): # test being able to update and insert new parameter name and values # to an existing configuration expected_configs = self.expected_default_datastore_configs() values = json.dumps(expected_configs.get('appending_values')) # ensure updated timestamp is different than created if not CONFIG.fake_mode: sleep(1) instance_info.dbaas.configurations.edit(configuration_info.id, values) resp, body = instance_info.dbaas.client.last_response assert_equal(resp.status, 200) @test(runs_after=[CreateConfigurations], groups=[GROUP, GROUP_CONFIG_DEFINE]) class AfterConfigurationsCreation(ConfigurationsTestBase): @test def test_assign_configuration_to_invalid_instance(self): # test assigning to an instance that does not exist invalid_id = "invalid-inst-id" try: instance_info.dbaas.instances.modify(invalid_id, configuration_info.id) except exceptions.NotFound: resp, body = instance_info.dbaas.client.last_response assert_equal(resp.status, 404) @test def test_assign_configuration_to_valid_instance(self): # test assigning a configuration to an instance print("instance_info.id: %s" % instance_info.id) print("configuration_info: %s" % configuration_info) print("configuration_info.id: %s" % configuration_info.id) config_id = configuration_info.id instance_info.dbaas.instances.modify(instance_info.id, configuration=config_id) resp, body = instance_info.dbaas.client.last_response assert_equal(resp.status, 202) @test def test_assign_name_to_instance_using_patch(self): # test assigning a name to an instance new_name = 'new_name_1' report = CONFIG.get_report() report.log("instance_info.id: %s" % instance_info.id) report.log("instance name:%s" % instance_info.name) report.log("instance new name:%s" % new_name) instance_info.dbaas.instances.edit(instance_info.id, name=new_name) assert_equal(202, instance_info.dbaas.last_http_code) check = instance_info.dbaas.instances.get(instance_info.id) assert_equal(200, instance_info.dbaas.last_http_code) assert_equal(check.name, new_name) # Restore instance name instance_info.dbaas.instances.edit(instance_info.id, name=instance_info.name) assert_equal(202, instance_info.dbaas.last_http_code) @test def test_assign_configuration_to_invalid_instance_using_patch(self): # test assign config group to an invalid instance invalid_id = "invalid-inst-id" assert_raises(exceptions.NotFound, instance_info.dbaas.instances.edit, invalid_id, configuration=configuration_info.id) @test(depends_on=[test_assign_configuration_to_valid_instance]) def test_assign_configuration_to_instance_with_config(self): # test assigning a configuration to an instance that # already has an assigned configuration config_id = configuration_info.id assert_raises(exceptions.BadRequest, instance_info.dbaas.instances.modify, instance_info.id, configuration=config_id) @test(depends_on=[test_assign_configuration_to_valid_instance]) @time_out(30) def test_get_configuration_details_from_instance_validation(self): # validate that the configuration was applied correctly to the instance print("instance_info.id: %s" % instance_info.id) inst = instance_info.dbaas.instances.get(instance_info.id) configuration_id = inst.configuration['id'] print("configuration_info: %s" % configuration_id) assert_not_equal(None, inst.configuration['id']) _test_configuration_is_applied_to_instance(instance_info, configuration_id) def test_configurations_get(self): # test that the instance shows up on the assigned configuration result = instance_info.dbaas.configurations.get(configuration_info.id) assert_equal(configuration_info.id, result.id) assert_equal(configuration_info.name, result.name) assert_equal(configuration_info.description, result.description) # check the result field types with TypeCheck("configuration", result) as check: check.has_field("id", basestring) check.has_field("name", basestring) check.has_field("description", basestring) check.has_field("values", dict) check.has_field("created", basestring) check.has_field("updated", basestring) check.has_field("instance_count", int) print(result.values) # check for valid timestamps assert_true(_is_valid_timestamp(result.created)) assert_true(_is_valid_timestamp(result.updated)) # check that created and updated timestamps differ, since # test_appending_to_existing_configuration should have changed the # updated timestamp if not CONFIG.fake_mode: assert_not_equal(result.created, result.updated) assert_equal(result.instance_count, 1) with CollectionCheck("configuration_values", result.values) as check: # check each item has the correct type according to the rules for (item_key, item_val) in result.values.iteritems(): print("item_key: %s" % item_key) print("item_val: %s" % item_val) dbaas = instance_info.dbaas param = dbaas.configuration_parameters.get_parameter( instance_info.dbaas_datastore, instance_info.dbaas_datastore_version, item_key) if param.type == 'integer': check.has_element(item_key, int) if param.type == 'string': check.has_element(item_key, basestring) if param.type == 'boolean': check.has_element(item_key, bool) # Test to make sure that another user is not able to GET this config reqs = Requirements(is_admin=False) test_auth_user = instance_info.user.auth_user other_user = CONFIG.users.find_user(reqs, black_list=[test_auth_user]) other_user_tenant_id = other_user.tenant_id client_tenant_id = instance_info.user.tenant_id if other_user_tenant_id == client_tenant_id: other_user = CONFIG.users.find_user(reqs, black_list=[ instance_info.user.auth_user, other_user]) print(other_user) print(other_user.__dict__) other_client = create_dbaas_client(other_user) assert_raises(exceptions.NotFound, other_client.configurations.get, configuration_info.id) @test(runs_after=[AfterConfigurationsCreation], groups=[GROUP, GROUP_CONFIG_DEFINE]) class ListConfigurations(ConfigurationsTestBase): @test def test_configurations_list(self): # test listing configurations show up result = instance_info.dbaas.configurations.list() for conf in result: with TypeCheck("Configuration", conf) as check: check.has_field('id', basestring) check.has_field('name', basestring) check.has_field('description', basestring) check.has_field('datastore_version_id', basestring) check.has_field('datastore_version_name', basestring) check.has_field('datastore_name', basestring) exists = [config for config in result if config.id == configuration_info.id] assert_equal(1, len(exists)) configuration = exists[0] assert_equal(configuration.id, configuration_info.id) assert_equal(configuration.name, configuration_info.name) assert_equal(configuration.description, configuration_info.description) @test def test_configurations_list_for_instance(self): # test getting an instance shows the configuration assigned shows up instance = instance_info.dbaas.instances.get(instance_info.id) assert_equal(instance.configuration['id'], configuration_info.id) assert_equal(instance.configuration['name'], configuration_info.name) # expecting two things in links, href and bookmark assert_equal(2, len(instance.configuration['links'])) link = instance.configuration['links'][0] global configuration_href configuration_href = link['href'] @test def test_get_default_configuration_on_instance(self): # test the api call to get the default template of an instance exists result = instance_info.dbaas.instances.configuration(instance_info.id) global configuration_default configuration_default = result assert_not_equal(None, result.configuration) @test def test_changing_configuration_with_nondynamic_parameter(self): # test that changing a non-dynamic parameter is applied to instance # and show that the instance requires a restart expected_configs = self.expected_default_datastore_configs() values = json.dumps(expected_configs.get('nondynamic_parameter')) instance_info.dbaas.configurations.update(configuration_info.id, values) resp, body = instance_info.dbaas.client.last_response assert_equal(resp.status, 202) instance_info.dbaas.configurations.get(configuration_info.id) resp, body = instance_info.dbaas.client.last_response assert_equal(resp.status, 200) @test(depends_on=[test_changing_configuration_with_nondynamic_parameter]) @time_out(20) def test_waiting_for_instance_in_restart_required(self): def result_is_not_active(): instance = instance_info.dbaas.instances.get( instance_info.id) if instance.status == "ACTIVE": return False else: return True poll_until(result_is_not_active) instance = instance_info.dbaas.instances.get(instance_info.id) resp, body = instance_info.dbaas.client.last_response assert_equal(resp.status, 200) print(instance.status) assert_equal('RESTART_REQUIRED', instance.status) @test(depends_on=[test_waiting_for_instance_in_restart_required]) def test_restart_service_should_return_active(self): # test that after restarting the instance it becomes active instance_info.dbaas.instances.restart(instance_info.id) resp, body = instance_info.dbaas.client.last_response assert_equal(resp.status, 202) def result_is_active(): instance = instance_info.dbaas.instances.get( instance_info.id) if instance.status == "ACTIVE": return True else: assert_equal("REBOOT", instance.status) return False poll_until(result_is_active) @test(depends_on=[test_restart_service_should_return_active]) @time_out(30) def test_get_configuration_details_from_instance_validation(self): # validate that the configuraiton was applied correctly to the instance inst = instance_info.dbaas.instances.get(instance_info.id) configuration_id = inst.configuration['id'] assert_not_equal(None, inst.configuration['id']) _test_configuration_is_applied_to_instance(instance_info, configuration_id) @test(depends_on=[test_configurations_list]) def test_compare_list_and_details_timestamps(self): # compare config timestamps between list and details calls result = instance_info.dbaas.configurations.list() list_config = [config for config in result if config.id == configuration_info.id] assert_equal(1, len(list_config)) details_config = instance_info.dbaas.configurations.get( configuration_info.id) assert_equal(list_config[0].created, details_config.created) assert_equal(list_config[0].updated, details_config.updated) @test(runs_after=[ListConfigurations], groups=[GROUP, GROUP_CONFIG_DEFINE]) class StartInstanceWithConfiguration(ConfigurationsTestBase): @test def test_start_instance_with_configuration(self): # test that a new instance will apply the configuration on create global configuration_instance databases = [] databases.append({"name": "firstdbconfig", "character_set": "latin2", "collate": "latin2_general_ci"}) databases.append({"name": "db2"}) configuration_instance.databases = databases users = [] users.append({"name": "liteconf", "password": "liteconfpass", "databases": [{"name": "firstdbconfig"}]}) configuration_instance.users = users configuration_instance.name = "TEST_" + str(datetime.now()) + "_config" flavor_href = instance_info.dbaas_flavor_href configuration_instance.dbaas_flavor_href = flavor_href configuration_instance.volume = instance_info.volume result = instance_info.dbaas.instances.create( configuration_instance.name, configuration_instance.dbaas_flavor_href, configuration_instance.volume, configuration_instance.databases, configuration_instance.users, availability_zone="nova", configuration=configuration_href) assert_equal(200, instance_info.dbaas.last_http_code) assert_equal("BUILD", result.status) configuration_instance.id = result.id @test(depends_on_classes=[StartInstanceWithConfiguration], runs_after_groups=['dbaas.api.backups'], groups=[GROUP]) class WaitForConfigurationInstanceToFinish(ConfigurationsTestBase): @test @time_out(TIMEOUT_INSTANCE_CREATE) def test_instance_with_configuration_active(self): # wait for the instance to become active def result_is_active(): instance = instance_info.dbaas.instances.get( configuration_instance.id) if instance.status == "ACTIVE": return True else: assert_equal("BUILD", instance.status) return False poll_until(result_is_active) @test(depends_on=[test_instance_with_configuration_active]) @time_out(30) def test_get_configuration_details_from_instance_validation(self): # validate that the configuraiton was applied correctly to the instance inst = instance_info.dbaas.instances.get(configuration_instance.id) configuration_id = inst.configuration['id'] assert_not_equal(None, inst.configuration['id']) _test_configuration_is_applied_to_instance(configuration_instance, configuration_id) @test(runs_after=[WaitForConfigurationInstanceToFinish], groups=[GROUP]) class DeleteConfigurations(ConfigurationsTestBase): @before_class def setUp(self): # need to store the parameter details that will be deleted config_param_name = sql_variables[1] instance_info.dbaas.configuration_parameters.get_parameter( instance_info.dbaas_datastore, instance_info.dbaas_datastore_version, config_param_name) resp, body = instance_info.dbaas.client.last_response print(resp) print(body) self.config_parameter_dict = json.loads(body) @after_class(always_run=True) def tearDown(self): # need to "undelete" the parameter that was deleted from the mgmt call ds = instance_info.dbaas_datastore ds_v = instance_info.dbaas_datastore_version version = instance_info.dbaas.datastore_versions.get( ds, ds_v) client = instance_info.dbaas_admin.mgmt_configs print(self.config_parameter_dict) client.create(version.id, self.config_parameter_dict['name'], self.config_parameter_dict['restart_required'], self.config_parameter_dict['type'], self.config_parameter_dict['max'], self.config_parameter_dict['min']) @test def test_delete_invalid_configuration_not_found(self): # test deleting a configuration that does not exist throws exception invalid_configuration_id = "invalid-config-id" assert_raises(exceptions.NotFound, instance_info.dbaas.configurations.delete, invalid_configuration_id) @test(depends_on=[test_delete_invalid_configuration_not_found]) def test_delete_configuration_parameter_with_mgmt_api(self): # testing a param that is assigned to an instance can be deleted # and doesn't affect an unassign later. So we delete a parameter # that is used by a test (connect_timeout) ds = instance_info.dbaas_datastore ds_v = instance_info.dbaas_datastore_version version = instance_info.dbaas.datastore_versions.get( ds, ds_v) client = instance_info.dbaas_admin.mgmt_configs config_param_name = self.config_parameter_dict['name'] client.delete(version.id, config_param_name) assert_raises( exceptions.NotFound, instance_info.dbaas.configuration_parameters.get_parameter, ds, ds_v, config_param_name) @test(depends_on=[test_delete_configuration_parameter_with_mgmt_api]) def test_unable_delete_instance_configurations(self): # test deleting a configuration that is assigned to # an instance is not allowed. assert_raises(exceptions.BadRequest, instance_info.dbaas.configurations.delete, configuration_info.id) @test(depends_on=[test_unable_delete_instance_configurations]) @time_out(30) def test_unassign_configuration_from_instances(self): # test to unassign configuration from instance instance_info.dbaas.instances.modify(configuration_instance.id, configuration="") resp, body = instance_info.dbaas.client.last_response assert_equal(resp.status, 202) instance_info.dbaas.instances.get(configuration_instance.id) #test that config group is not removed instance_info.dbaas.instances.modify(instance_info.id, configuration=None) resp, body = instance_info.dbaas.client.last_response assert_equal(resp.status, 202) instance_info.dbaas.instances.get(instance_info.id) def result_has_no_configuration(): instance = instance_info.dbaas.instances.get(inst_info.id) if hasattr(instance, 'configuration'): return False else: return True inst_info = instance_info poll_until(result_has_no_configuration) inst_info = configuration_instance poll_until(result_has_no_configuration) instance = instance_info.dbaas.instances.get(instance_info.id) assert_equal('RESTART_REQUIRED', instance.status) @test(depends_on=[test_unassign_configuration_from_instances]) def test_assign_in_wrong_state(self): # test assigning a config to an instance in RESTART state assert_raises(exceptions.BadRequest, instance_info.dbaas.instances.modify, configuration_instance.id, configuration=configuration_info.id) @test(depends_on=[test_assign_in_wrong_state]) def test_no_instances_on_configuration(self): # test there is no configuration on the instance after unassigning result = instance_info.dbaas.configurations.get(configuration_info.id) assert_equal(configuration_info.id, result.id) assert_equal(configuration_info.name, result.name) assert_equal(configuration_info.description, result.description) assert_equal(result.instance_count, 0) print(configuration_instance.id) print(instance_info.id) @test(depends_on=[test_unassign_configuration_from_instances]) @time_out(120) def test_restart_service_after_unassign_return_active(self): def result_is_not_active(): instance = instance_info.dbaas.instances.get( instance_info.id) if instance.status == "ACTIVE": return False else: return True poll_until(result_is_not_active) config = instance_info.dbaas.configurations.list() print(config) instance = instance_info.dbaas.instances.get(instance_info.id) print(instance.__dict__) resp, body = instance_info.dbaas.client.last_response assert_equal(resp.status, 200) print(instance.status) assert_equal('RESTART_REQUIRED', instance.status) @test(depends_on=[test_restart_service_after_unassign_return_active]) @time_out(120) def test_restart_service_should_return_active(self): # test that after restarting the instance it becomes active instance_info.dbaas.instances.restart(instance_info.id) resp, body = instance_info.dbaas.client.last_response assert_equal(resp.status, 202) def result_is_active(): instance = instance_info.dbaas.instances.get( instance_info.id) if instance.status == "ACTIVE": return True else: assert_equal("REBOOT", instance.status) return False poll_until(result_is_active) @test(depends_on=[test_restart_service_should_return_active]) def test_assign_config_and_name_to_instance_using_patch(self): # test assigning a configuration and name to an instance new_name = 'new_name' report = CONFIG.get_report() report.log("instance_info.id: %s" % instance_info.id) report.log("configuration_info: %s" % configuration_info) report.log("configuration_info.id: %s" % configuration_info.id) report.log("instance name:%s" % instance_info.name) report.log("instance new name:%s" % new_name) saved_name = instance_info.name config_id = configuration_info.id instance_info.dbaas.instances.edit(instance_info.id, configuration=config_id, name=new_name) assert_equal(202, instance_info.dbaas.last_http_code) check = instance_info.dbaas.instances.get(instance_info.id) assert_equal(200, instance_info.dbaas.last_http_code) assert_equal(check.name, new_name) # restore instance name instance_info.dbaas.instances.edit(instance_info.id, name=saved_name) assert_equal(202, instance_info.dbaas.last_http_code) instance = instance_info.dbaas.instances.get(instance_info.id) assert_equal('RESTART_REQUIRED', instance.status) # restart to be sure configuration is applied instance_info.dbaas.instances.restart(instance_info.id) assert_equal(202, instance_info.dbaas.last_http_code) sleep(2) def result_is_active(): instance = instance_info.dbaas.instances.get( instance_info.id) if instance.status == "ACTIVE": return True else: assert_equal("REBOOT", instance.status) return False poll_until(result_is_active) # test assigning a configuration to an instance that # already has an assigned configuration with patch config_id = configuration_info.id assert_raises(exceptions.BadRequest, instance_info.dbaas.instances.edit, instance_info.id, configuration=config_id) @test(runs_after=[test_assign_config_and_name_to_instance_using_patch]) def test_unassign_configuration_after_patch(self): # remove the configuration from the instance instance_info.dbaas.instances.edit(instance_info.id, remove_configuration=True) assert_equal(202, instance_info.dbaas.last_http_code) instance = instance_info.dbaas.instances.get(instance_info.id) assert_equal('RESTART_REQUIRED', instance.status) # restart to be sure configuration has been unassigned instance_info.dbaas.instances.restart(instance_info.id) assert_equal(202, instance_info.dbaas.last_http_code) sleep(2) def result_is_active(): instance = instance_info.dbaas.instances.get( instance_info.id) if instance.status == "ACTIVE": return True else: assert_equal("REBOOT", instance.status) return False poll_until(result_is_active) result = instance_info.dbaas.configurations.get(configuration_info.id) assert_equal(result.instance_count, 0) @test def test_unassign_configuration_from_invalid_instance_using_patch(self): # test unassign config group from an invalid instance invalid_id = "invalid-inst-id" try: instance_info.dbaas.instances.edit(invalid_id, remove_configuration=True) except exceptions.NotFound: resp, body = instance_info.dbaas.client.last_response assert_equal(resp.status, 404) @test(runs_after=[test_unassign_configuration_after_patch]) def test_delete_unassigned_configuration(self): # test that we can delete the configuration after no instances are # assigned to it any longer instance_info.dbaas.configurations.delete(configuration_info.id) resp, body = instance_info.dbaas.client.last_response assert_equal(resp.status, 202) @test(depends_on=[test_delete_unassigned_configuration]) @time_out(TIMEOUT_INSTANCE_DELETE) def test_delete_configuration_instance(self): # test that we can delete the instance even though there is a # configuration applied to the instance instance_info.dbaas.instances.delete(configuration_instance.id) assert_equal(202, instance_info.dbaas.last_http_code) def instance_is_gone(): try: instance_info.dbaas.instances.get(configuration_instance.id) return False except exceptions.NotFound: return True poll_until(instance_is_gone) assert_raises(exceptions.NotFound, instance_info.dbaas.instances.get, configuration_instance.id)
# -*- coding: utf-8 -*- # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import unittest import time from datetime import datetime from tzlocal import get_localzone try: from unittest import mock except ImportError: try: import mock except ImportError: mock = None from airflow import configuration from airflow.contrib.hooks.sagemaker_hook import (SageMakerHook, secondary_training_status_changed, secondary_training_status_message, LogState) from airflow.hooks.S3_hook import S3Hook from airflow.exceptions import AirflowException role = 'arn:aws:iam:role/test-role' path = 'local/data' bucket = 'test-bucket' key = 'test/data' data_url = 's3://{}/{}'.format(bucket, key) job_name = 'test-job' model_name = 'test-model' config_name = 'test-endpoint-config' endpoint_name = 'test-endpoint' image = 'test-image' test_arn_return = {'Arn': 'testarn'} output_url = 's3://{}/test/output'.format(bucket) create_training_params = { 'AlgorithmSpecification': { 'TrainingImage': image, 'TrainingInputMode': 'File' }, 'RoleArn': role, 'OutputDataConfig': { 'S3OutputPath': output_url }, 'ResourceConfig': { 'InstanceCount': 2, 'InstanceType': 'ml.c4.8xlarge', 'VolumeSizeInGB': 50 }, 'TrainingJobName': job_name, 'HyperParameters': { 'k': '10', 'feature_dim': '784', 'mini_batch_size': '500', 'force_dense': 'True' }, 'StoppingCondition': { 'MaxRuntimeInSeconds': 60 * 60 }, 'InputDataConfig': [ { 'ChannelName': 'train', 'DataSource': { 'S3DataSource': { 'S3DataType': 'S3Prefix', 'S3Uri': data_url, 'S3DataDistributionType': 'FullyReplicated' } }, 'CompressionType': 'None', 'RecordWrapperType': 'None' } ] } create_tuning_params = { 'HyperParameterTuningJobName': job_name, 'HyperParameterTuningJobConfig': { 'Strategy': 'Bayesian', 'HyperParameterTuningJobObjective': { 'Type': 'Maximize', 'MetricName': 'test_metric' }, 'ResourceLimits': { 'MaxNumberOfTrainingJobs': 123, 'MaxParallelTrainingJobs': 123 }, 'ParameterRanges': { 'IntegerParameterRanges': [ { 'Name': 'k', 'MinValue': '2', 'MaxValue': '10' }, ] } }, 'TrainingJobDefinition': { 'StaticHyperParameters': create_training_params['HyperParameters'], 'AlgorithmSpecification': create_training_params['AlgorithmSpecification'], 'RoleArn': 'string', 'InputDataConfig': create_training_params['InputDataConfig'], 'OutputDataConfig': create_training_params['OutputDataConfig'], 'ResourceConfig': create_training_params['ResourceConfig'], 'StoppingCondition': dict(MaxRuntimeInSeconds=60 * 60) } } create_transform_params = { 'TransformJobName': job_name, 'ModelName': model_name, 'BatchStrategy': 'MultiRecord', 'TransformInput': { 'DataSource': { 'S3DataSource': { 'S3DataType': 'S3Prefix', 'S3Uri': data_url } } }, 'TransformOutput': { 'S3OutputPath': output_url, }, 'TransformResources': { 'InstanceType': 'ml.m4.xlarge', 'InstanceCount': 123 } } create_model_params = { 'ModelName': model_name, 'PrimaryContainer': { 'Image': image, 'ModelDataUrl': output_url, }, 'ExecutionRoleArn': role } create_endpoint_config_params = { 'EndpointConfigName': config_name, 'ProductionVariants': [ { 'VariantName': 'AllTraffic', 'ModelName': model_name, 'InitialInstanceCount': 1, 'InstanceType': 'ml.c4.xlarge' } ] } create_endpoint_params = { 'EndpointName': endpoint_name, 'EndpointConfigName': config_name } update_endpoint_params = create_endpoint_params DESCRIBE_TRAINING_COMPELETED_RETURN = { 'TrainingJobStatus': 'Completed', 'ResourceConfig': { 'InstanceCount': 1, 'InstanceType': 'ml.c4.xlarge', 'VolumeSizeInGB': 10 }, 'TrainingStartTime': datetime(2018, 2, 17, 7, 15, 0, 103000), 'TrainingEndTime': datetime(2018, 2, 17, 7, 19, 34, 953000), 'ResponseMetadata': { 'HTTPStatusCode': 200, } } DESCRIBE_TRAINING_INPROGRESS_RETURN = dict(DESCRIBE_TRAINING_COMPELETED_RETURN) DESCRIBE_TRAINING_INPROGRESS_RETURN.update({'TrainingJobStatus': 'InProgress'}) DESCRIBE_TRAINING_FAILED_RETURN = dict(DESCRIBE_TRAINING_COMPELETED_RETURN) DESCRIBE_TRAINING_FAILED_RETURN.update({'TrainingJobStatus': 'Failed', 'FailureReason': 'Unknown'}) DESCRIBE_TRAINING_STOPPING_RETURN = dict(DESCRIBE_TRAINING_COMPELETED_RETURN) DESCRIBE_TRAINING_STOPPING_RETURN.update({'TrainingJobStatus': 'Stopping'}) message = 'message' status = 'status' SECONDARY_STATUS_DESCRIPTION_1 = { 'SecondaryStatusTransitions': [{'StatusMessage': message, 'Status': status}] } SECONDARY_STATUS_DESCRIPTION_2 = { 'SecondaryStatusTransitions': [{'StatusMessage': 'different message', 'Status': status}] } DEFAULT_LOG_STREAMS = {'logStreams': [{'logStreamName': job_name + '/xxxxxxxxx'}]} LIFECYCLE_LOG_STREAMS = [DEFAULT_LOG_STREAMS, DEFAULT_LOG_STREAMS, DEFAULT_LOG_STREAMS, DEFAULT_LOG_STREAMS, DEFAULT_LOG_STREAMS, DEFAULT_LOG_STREAMS] DEFAULT_LOG_EVENTS = [{'nextForwardToken': None, 'events': [{'timestamp': 1, 'message': 'hi there #1'}]}, {'nextForwardToken': None, 'events': []}] STREAM_LOG_EVENTS = [{'nextForwardToken': None, 'events': [{'timestamp': 1, 'message': 'hi there #1'}]}, {'nextForwardToken': None, 'events': []}, {'nextForwardToken': None, 'events': [{'timestamp': 1, 'message': 'hi there #1'}, {'timestamp': 2, 'message': 'hi there #2'}]}, {'nextForwardToken': None, 'events': []}, {'nextForwardToken': None, 'events': [{'timestamp': 2, 'message': 'hi there #2'}, {'timestamp': 2, 'message': 'hi there #2a'}, {'timestamp': 3, 'message': 'hi there #3'}]}, {'nextForwardToken': None, 'events': []}] test_evaluation_config = { 'Image': image, 'Role': role, 'S3Operations': { 'S3CreateBucket': [ { 'Bucket': bucket } ], 'S3Upload': [ { 'Path': path, 'Bucket': bucket, 'Key': key, 'Tar': False } ] } } class TestSageMakerHook(unittest.TestCase): def setUp(self): configuration.load_test_config() @mock.patch.object(S3Hook, 'create_bucket') @mock.patch.object(S3Hook, 'load_file') def test_configure_s3_resources(self, mock_load_file, mock_create_bucket): hook = SageMakerHook() evaluation_result = { 'Image': image, 'Role': role } hook.configure_s3_resources(test_evaluation_config) self.assertEqual(test_evaluation_config, evaluation_result) mock_create_bucket.assert_called_once_with(bucket_name=bucket) mock_load_file.assert_called_once_with(path, key, bucket) @mock.patch.object(SageMakerHook, 'get_conn') @mock.patch.object(S3Hook, 'check_for_key') @mock.patch.object(S3Hook, 'check_for_bucket') @mock.patch.object(S3Hook, 'check_for_prefix') def test_check_s3_url(self, mock_check_prefix, mock_check_bucket, mock_check_key, mock_client): mock_client.return_value = None hook = SageMakerHook() mock_check_bucket.side_effect = [False, True, True, True] mock_check_key.side_effect = [False, True, False] mock_check_prefix.side_effect = [False, True, True] self.assertRaises(AirflowException, hook.check_s3_url, data_url) self.assertRaises(AirflowException, hook.check_s3_url, data_url) self.assertEqual(hook.check_s3_url(data_url), True) self.assertEqual(hook.check_s3_url(data_url), True) @mock.patch.object(SageMakerHook, 'get_conn') @mock.patch.object(SageMakerHook, 'check_s3_url') def test_check_valid_training(self, mock_check_url, mock_client): mock_client.return_value = None hook = SageMakerHook() hook.check_training_config(create_training_params) mock_check_url.assert_called_once_with(data_url) @mock.patch.object(SageMakerHook, 'get_conn') @mock.patch.object(SageMakerHook, 'check_s3_url') def test_check_valid_tuning(self, mock_check_url, mock_client): mock_client.return_value = None hook = SageMakerHook() hook.check_tuning_config(create_tuning_params) mock_check_url.assert_called_once_with(data_url) @mock.patch.object(SageMakerHook, 'get_client_type') def test_conn(self, mock_get_client): hook = SageMakerHook(aws_conn_id='sagemaker_test_conn_id') self.assertEqual(hook.aws_conn_id, 'sagemaker_test_conn_id') @mock.patch.object(SageMakerHook, 'check_training_config') @mock.patch.object(SageMakerHook, 'get_conn') def test_create_training_job(self, mock_client, mock_check_training): mock_check_training.return_value = True mock_session = mock.Mock() attrs = {'create_training_job.return_value': test_arn_return} mock_session.configure_mock(**attrs) mock_client.return_value = mock_session hook = SageMakerHook(aws_conn_id='sagemaker_test_conn_id') response = hook.create_training_job(create_training_params, wait_for_completion=False, print_log=False) mock_session.create_training_job.assert_called_once_with(**create_training_params) self.assertEqual(response, test_arn_return) @mock.patch.object(SageMakerHook, 'check_training_config') @mock.patch.object(SageMakerHook, 'get_conn') def test_training_ends_with_wait(self, mock_client, mock_check_training): mock_check_training.return_value = True mock_session = mock.Mock() attrs = {'create_training_job.return_value': test_arn_return, 'describe_training_job.side_effect': [DESCRIBE_TRAINING_INPROGRESS_RETURN, DESCRIBE_TRAINING_STOPPING_RETURN, DESCRIBE_TRAINING_COMPELETED_RETURN, DESCRIBE_TRAINING_COMPELETED_RETURN] } mock_session.configure_mock(**attrs) mock_client.return_value = mock_session hook = SageMakerHook(aws_conn_id='sagemaker_test_conn_id_1') hook.create_training_job(create_training_params, wait_for_completion=True, print_log=False, check_interval=1) self.assertEqual(mock_session.describe_training_job.call_count, 4) @mock.patch.object(SageMakerHook, 'check_training_config') @mock.patch.object(SageMakerHook, 'get_conn') def test_training_throws_error_when_failed_with_wait( self, mock_client, mock_check_training): mock_check_training.return_value = True mock_session = mock.Mock() attrs = {'create_training_job.return_value': test_arn_return, 'describe_training_job.side_effect': [DESCRIBE_TRAINING_INPROGRESS_RETURN, DESCRIBE_TRAINING_STOPPING_RETURN, DESCRIBE_TRAINING_FAILED_RETURN, DESCRIBE_TRAINING_COMPELETED_RETURN] } mock_session.configure_mock(**attrs) mock_client.return_value = mock_session hook = SageMakerHook(aws_conn_id='sagemaker_test_conn_id_1') self.assertRaises(AirflowException, hook.create_training_job, create_training_params, wait_for_completion=True, print_log=False, check_interval=1) self.assertEqual(mock_session.describe_training_job.call_count, 3) @mock.patch.object(SageMakerHook, 'check_tuning_config') @mock.patch.object(SageMakerHook, 'get_conn') def test_create_tuning_job(self, mock_client, mock_check_tuning): mock_session = mock.Mock() attrs = {'create_hyper_parameter_tuning_job.return_value': test_arn_return} mock_session.configure_mock(**attrs) mock_client.return_value = mock_session hook = SageMakerHook(aws_conn_id='sagemaker_test_conn_id') response = hook.create_tuning_job(create_tuning_params, wait_for_completion=False) mock_session.create_hyper_parameter_tuning_job.\ assert_called_once_with(**create_tuning_params) self.assertEqual(response, test_arn_return) @mock.patch.object(SageMakerHook, 'check_s3_url') @mock.patch.object(SageMakerHook, 'get_conn') def test_create_transform_job(self, mock_client, mock_check_url): mock_check_url.return_value = True mock_session = mock.Mock() attrs = {'create_transform_job.return_value': test_arn_return} mock_session.configure_mock(**attrs) mock_client.return_value = mock_session hook = SageMakerHook(aws_conn_id='sagemaker_test_conn_id') response = hook.create_transform_job(create_transform_params, wait_for_completion=False) mock_session.create_transform_job.assert_called_once_with( **create_transform_params) self.assertEqual(response, test_arn_return) @mock.patch.object(SageMakerHook, 'get_conn') def test_create_model(self, mock_client): mock_session = mock.Mock() attrs = {'create_model.return_value': test_arn_return} mock_session.configure_mock(**attrs) mock_client.return_value = mock_session hook = SageMakerHook(aws_conn_id='sagemaker_test_conn_id') response = hook.create_model(create_model_params) mock_session.create_model.assert_called_once_with(**create_model_params) self.assertEqual(response, test_arn_return) @mock.patch.object(SageMakerHook, 'get_conn') def test_create_endpoint_config(self, mock_client): mock_session = mock.Mock() attrs = {'create_endpoint_config.return_value': test_arn_return} mock_session.configure_mock(**attrs) mock_client.return_value = mock_session hook = SageMakerHook(aws_conn_id='sagemaker_test_conn_id') response = hook.create_endpoint_config(create_endpoint_config_params) mock_session.create_endpoint_config\ .assert_called_once_with(**create_endpoint_config_params) self.assertEqual(response, test_arn_return) @mock.patch.object(SageMakerHook, 'get_conn') def test_create_endpoint(self, mock_client): mock_session = mock.Mock() attrs = {'create_endpoint.return_value': test_arn_return} mock_session.configure_mock(**attrs) mock_client.return_value = mock_session hook = SageMakerHook(aws_conn_id='sagemaker_test_conn_id') response = hook.create_endpoint(create_endpoint_params, wait_for_completion=False) mock_session.create_endpoint\ .assert_called_once_with(**create_endpoint_params) self.assertEqual(response, test_arn_return) @mock.patch.object(SageMakerHook, 'get_conn') def test_update_endpoint(self, mock_client): mock_session = mock.Mock() attrs = {'update_endpoint.return_value': test_arn_return} mock_session.configure_mock(**attrs) mock_client.return_value = mock_session hook = SageMakerHook(aws_conn_id='sagemaker_test_conn_id') response = hook.update_endpoint(update_endpoint_params, wait_for_completion=False) mock_session.update_endpoint\ .assert_called_once_with(**update_endpoint_params) self.assertEqual(response, test_arn_return) @mock.patch.object(SageMakerHook, 'get_conn') def test_describe_training_job(self, mock_client): mock_session = mock.Mock() attrs = {'describe_training_job.return_value': 'InProgress'} mock_session.configure_mock(**attrs) mock_client.return_value = mock_session hook = SageMakerHook(aws_conn_id='sagemaker_test_conn_id') response = hook.describe_training_job(job_name) mock_session.describe_training_job.\ assert_called_once_with(TrainingJobName=job_name) self.assertEqual(response, 'InProgress') @mock.patch.object(SageMakerHook, 'get_conn') def test_describe_tuning_job(self, mock_client): mock_session = mock.Mock() attrs = {'describe_hyper_parameter_tuning_job.return_value': 'InProgress'} mock_session.configure_mock(**attrs) mock_client.return_value = mock_session hook = SageMakerHook(aws_conn_id='sagemaker_test_conn_id') response = hook.describe_tuning_job(job_name) mock_session.describe_hyper_parameter_tuning_job.\ assert_called_once_with(HyperParameterTuningJobName=job_name) self.assertEqual(response, 'InProgress') @mock.patch.object(SageMakerHook, 'get_conn') def test_describe_transform_job(self, mock_client): mock_session = mock.Mock() attrs = {'describe_transform_job.return_value': 'InProgress'} mock_session.configure_mock(**attrs) mock_client.return_value = mock_session hook = SageMakerHook(aws_conn_id='sagemaker_test_conn_id') response = hook.describe_transform_job(job_name) mock_session.describe_transform_job.\ assert_called_once_with(TransformJobName=job_name) self.assertEqual(response, 'InProgress') @mock.patch.object(SageMakerHook, 'get_conn') def test_describe_model(self, mock_client): mock_session = mock.Mock() attrs = {'describe_model.return_value': model_name} mock_session.configure_mock(**attrs) mock_client.return_value = mock_session hook = SageMakerHook(aws_conn_id='sagemaker_test_conn_id') response = hook.describe_model(model_name) mock_session.describe_model.\ assert_called_once_with(ModelName=model_name) self.assertEqual(response, model_name) @mock.patch.object(SageMakerHook, 'get_conn') def test_describe_endpoint_config(self, mock_client): mock_session = mock.Mock() attrs = {'describe_endpoint_config.return_value': config_name} mock_session.configure_mock(**attrs) mock_client.return_value = mock_session hook = SageMakerHook(aws_conn_id='sagemaker_test_conn_id') response = hook.describe_endpoint_config(config_name) mock_session.describe_endpoint_config.\ assert_called_once_with(EndpointConfigName=config_name) self.assertEqual(response, config_name) @mock.patch.object(SageMakerHook, 'get_conn') def test_describe_endpoint(self, mock_client): mock_session = mock.Mock() attrs = {'describe_endpoint.return_value': 'InProgress'} mock_session.configure_mock(**attrs) mock_client.return_value = mock_session hook = SageMakerHook(aws_conn_id='sagemaker_test_conn_id') response = hook.describe_endpoint(endpoint_name) mock_session.describe_endpoint.\ assert_called_once_with(EndpointName=endpoint_name) self.assertEqual(response, 'InProgress') def test_secondary_training_status_changed_true(self): changed = secondary_training_status_changed(SECONDARY_STATUS_DESCRIPTION_1, SECONDARY_STATUS_DESCRIPTION_2) self.assertTrue(changed) def test_secondary_training_status_changed_false(self): changed = secondary_training_status_changed(SECONDARY_STATUS_DESCRIPTION_1, SECONDARY_STATUS_DESCRIPTION_1) self.assertFalse(changed) def test_secondary_training_status_message_status_changed(self): now = datetime.now(get_localzone()) SECONDARY_STATUS_DESCRIPTION_1['LastModifiedTime'] = now expected = '{} {} - {}'.format( datetime.utcfromtimestamp(time.mktime(now.timetuple())).strftime('%Y-%m-%d %H:%M:%S'), status, message ) self.assertEqual( secondary_training_status_message(SECONDARY_STATUS_DESCRIPTION_1, SECONDARY_STATUS_DESCRIPTION_2), expected) @mock.patch.object(SageMakerHook, 'get_log_conn') @mock.patch.object(SageMakerHook, 'get_conn') @mock.patch.object(time, 'time') def test_describe_training_job_with_logs_in_progress(self, mock_time, mock_client, mock_log_client): mock_session = mock.Mock() mock_log_session = mock.Mock() attrs = {'describe_training_job.return_value': DESCRIBE_TRAINING_COMPELETED_RETURN } log_attrs = {'describe_log_streams.side_effect': LIFECYCLE_LOG_STREAMS, 'get_log_events.side_effect': STREAM_LOG_EVENTS } mock_time.return_value = 50 mock_session.configure_mock(**attrs) mock_client.return_value = mock_session mock_log_session.configure_mock(**log_attrs) mock_log_client.return_value = mock_log_session hook = SageMakerHook(aws_conn_id='sagemaker_test_conn_id') response = hook.describe_training_job_with_log(job_name=job_name, positions={}, stream_names=[], instance_count=1, state=LogState.WAIT_IN_PROGRESS, last_description={}, last_describe_job_call=0) self.assertEqual(response, (LogState.JOB_COMPLETE, {}, 50)) @mock.patch.object(SageMakerHook, 'get_log_conn') @mock.patch.object(SageMakerHook, 'get_conn') def test_describe_training_job_with_logs_job_complete(self, mock_client, mock_log_client): mock_session = mock.Mock() mock_log_session = mock.Mock() attrs = {'describe_training_job.return_value': DESCRIBE_TRAINING_COMPELETED_RETURN } log_attrs = {'describe_log_streams.side_effect': LIFECYCLE_LOG_STREAMS, 'get_log_events.side_effect': STREAM_LOG_EVENTS } mock_session.configure_mock(**attrs) mock_client.return_value = mock_session mock_log_session.configure_mock(**log_attrs) mock_log_client.return_value = mock_log_session hook = SageMakerHook(aws_conn_id='sagemaker_test_conn_id') response = hook.describe_training_job_with_log(job_name=job_name, positions={}, stream_names=[], instance_count=1, state=LogState.JOB_COMPLETE, last_description={}, last_describe_job_call=0) self.assertEqual(response, (LogState.COMPLETE, {}, 0)) @mock.patch.object(SageMakerHook, 'get_log_conn') @mock.patch.object(SageMakerHook, 'get_conn') def test_describe_training_job_with_logs_complete(self, mock_client, mock_log_client): mock_session = mock.Mock() mock_log_session = mock.Mock() attrs = {'describe_training_job.return_value': DESCRIBE_TRAINING_COMPELETED_RETURN } log_attrs = {'describe_log_streams.side_effect': LIFECYCLE_LOG_STREAMS, 'get_log_events.side_effect': STREAM_LOG_EVENTS } mock_session.configure_mock(**attrs) mock_client.return_value = mock_session mock_log_session.configure_mock(**log_attrs) mock_log_client.return_value = mock_log_session hook = SageMakerHook(aws_conn_id='sagemaker_test_conn_id') response = hook.describe_training_job_with_log(job_name=job_name, positions={}, stream_names=[], instance_count=1, state=LogState.COMPLETE, last_description={}, last_describe_job_call=0) self.assertEqual(response, (LogState.COMPLETE, {}, 0)) @mock.patch.object(SageMakerHook, 'check_training_config') @mock.patch.object(SageMakerHook, 'get_log_conn') @mock.patch.object(SageMakerHook, 'get_conn') @mock.patch.object(SageMakerHook, 'describe_training_job_with_log') def test_training_with_logs(self, mock_describe, mock_client, mock_log_client, mock_check_training): mock_check_training.return_value = True mock_describe.side_effect = \ [(LogState.WAIT_IN_PROGRESS, DESCRIBE_TRAINING_INPROGRESS_RETURN, 0), (LogState.JOB_COMPLETE, DESCRIBE_TRAINING_STOPPING_RETURN, 0), (LogState.COMPLETE, DESCRIBE_TRAINING_COMPELETED_RETURN, 0)] mock_session = mock.Mock() mock_log_session = mock.Mock() attrs = {'create_training_job.return_value': test_arn_return, 'describe_training_job.return_value': DESCRIBE_TRAINING_COMPELETED_RETURN } log_attrs = {'describe_log_streams.side_effect': LIFECYCLE_LOG_STREAMS, 'get_log_events.side_effect': STREAM_LOG_EVENTS } mock_session.configure_mock(**attrs) mock_log_session.configure_mock(**log_attrs) mock_client.return_value = mock_session mock_log_client.return_value = mock_log_session hook = SageMakerHook(aws_conn_id='sagemaker_test_conn_id_1') hook.create_training_job(create_training_params, wait_for_completion=True, print_log=True, check_interval=1) self.assertEqual(mock_describe.call_count, 3) self.assertEqual(mock_session.describe_training_job.call_count, 1) if __name__ == '__main__': unittest.main()
from __future__ import print_function import logging import pprint import math import numpy import traceback import operator import theano from six.moves import input from picklable_itertools.extras import equizip from theano import tensor from blocks.bricks import Tanh, Initializable from blocks.bricks.base import application from blocks.bricks.lookup import LookupTable from blocks.bricks.recurrent import SimpleRecurrent, Bidirectional from blocks.bricks.attention import SequenceContentAttention from blocks.bricks.parallel import Fork from blocks.bricks.sequence_generators import ( SequenceGenerator, Readout, SoftmaxEmitter, LookupFeedback) from blocks.config import config from blocks.graph import ComputationGraph from fuel.transformers import Mapping, Batch, Padding, Filter from fuel.datasets import OneBillionWord, TextFile from fuel.schemes import ConstantScheme from blocks.serialization import load_parameter_values from blocks.algorithms import (GradientDescent, Scale, StepClipping, CompositeRule) from blocks.initialization import Orthogonal, IsotropicGaussian, Constant from blocks.model import Model from blocks.monitoring import aggregation from blocks.extensions import FinishAfter, Printing, Timing from blocks.extensions.saveload import Checkpoint from blocks.extensions.monitoring import TrainingDataMonitoring from blocks.main_loop import MainLoop from blocks.filter import VariableFilter from blocks.utils import dict_union from blocks.search import BeamSearch config.recursion_limit = 100000 floatX = theano.config.floatX logger = logging.getLogger(__name__) # Dictionaries all_chars = ([chr(ord('a') + i) for i in range(26)] + [chr(ord('0') + i) for i in range(10)] + [',', '.', '!', '?', '<UNK>'] + [' ', '<S>', '</S>']) code2char = dict(enumerate(all_chars)) char2code = {v: k for k, v in code2char.items()} def reverse_words(sample): sentence = sample[0] result = [] word_start = -1 for i, code in enumerate(sentence): if code >= char2code[' ']: if word_start >= 0: result.extend(sentence[i - 1:word_start - 1:-1]) word_start = -1 result.append(code) else: if word_start == -1: word_start = i return (result,) def _lower(s): return s.lower() def _transpose(data): return tuple(array.T for array in data) def _filter_long(data): return len(data[0]) <= 100 def _is_nan(log): return math.isnan(log.current_row['total_gradient_norm']) class WordReverser(Initializable): """The top brick. It is often convenient to gather all bricks of the model under the roof of a single top brick. """ def __init__(self, dimension, alphabet_size, **kwargs): super(WordReverser, self).__init__(**kwargs) encoder = Bidirectional( SimpleRecurrent(dim=dimension, activation=Tanh())) fork = Fork([name for name in encoder.prototype.apply.sequences if name != 'mask']) fork.input_dim = dimension fork.output_dims = [encoder.prototype.get_dim(name) for name in fork.input_names] lookup = LookupTable(alphabet_size, dimension) transition = SimpleRecurrent( activation=Tanh(), dim=dimension, name="transition") attention = SequenceContentAttention( state_names=transition.apply.states, attended_dim=2 * dimension, match_dim=dimension, name="attention") readout = Readout( readout_dim=alphabet_size, source_names=[transition.apply.states[0], attention.take_glimpses.outputs[0]], emitter=SoftmaxEmitter(name="emitter"), feedback_brick=LookupFeedback(alphabet_size, dimension), name="readout") generator = SequenceGenerator( readout=readout, transition=transition, attention=attention, name="generator") self.lookup = lookup self.fork = fork self.encoder = encoder self.generator = generator self.children = [lookup, fork, encoder, generator] @application def cost(self, chars, chars_mask, targets, targets_mask): return self.generator.cost_matrix( targets, targets_mask, attended=self.encoder.apply( **dict_union( self.fork.apply(self.lookup.apply(chars), as_dict=True), mask=chars_mask)), attended_mask=chars_mask) @application def generate(self, chars): return self.generator.generate( n_steps=3 * chars.shape[0], batch_size=chars.shape[1], attended=self.encoder.apply( **dict_union( self.fork.apply(self.lookup.apply(chars), as_dict=True))), attended_mask=tensor.ones(chars.shape)) def main(mode, save_path, num_batches, data_path=None): reverser = WordReverser(100, len(char2code), name="reverser") if mode == "train": # Data processing pipeline dataset_options = dict(dictionary=char2code, level="character", preprocess=_lower) if data_path: dataset = TextFile(data_path, **dataset_options) else: dataset = OneBillionWord("training", [99], **dataset_options) data_stream = dataset.get_example_stream() data_stream = Filter(data_stream, _filter_long) data_stream = Mapping(data_stream, reverse_words, add_sources=("targets",)) data_stream = Batch(data_stream, iteration_scheme=ConstantScheme(10)) data_stream = Padding(data_stream) data_stream = Mapping(data_stream, _transpose) # Initialization settings reverser.weights_init = IsotropicGaussian(0.1) reverser.biases_init = Constant(0.0) reverser.push_initialization_config() reverser.encoder.weights_init = Orthogonal() reverser.generator.transition.weights_init = Orthogonal() # Build the cost computation graph chars = tensor.lmatrix("features") chars_mask = tensor.matrix("features_mask") targets = tensor.lmatrix("targets") targets_mask = tensor.matrix("targets_mask") batch_cost = reverser.cost( chars, chars_mask, targets, targets_mask).sum() batch_size = chars.shape[1].copy(name="batch_size") cost = aggregation.mean(batch_cost, batch_size) cost.name = "sequence_log_likelihood" logger.info("Cost graph is built") # Give an idea of what's going on model = Model(cost) parameters = model.get_parameter_dict() logger.info("Parameters:\n" + pprint.pformat( [(key, value.get_value().shape) for key, value in parameters.items()], width=120)) # Initialize parameters for brick in model.get_top_bricks(): brick.initialize() # Define the training algorithm. cg = ComputationGraph(cost) algorithm = GradientDescent( cost=cost, parameters=cg.parameters, step_rule=CompositeRule([StepClipping(10.0), Scale(0.01)])) # Fetch variables useful for debugging generator = reverser.generator (energies,) = VariableFilter( applications=[generator.readout.readout], name_regex="output")(cg.variables) (activations,) = VariableFilter( applications=[generator.transition.apply], name=generator.transition.apply.states[0])(cg.variables) max_length = chars.shape[0].copy(name="max_length") cost_per_character = aggregation.mean( batch_cost, batch_size * max_length).copy( name="character_log_likelihood") min_energy = energies.min().copy(name="min_energy") max_energy = energies.max().copy(name="max_energy") mean_activation = abs(activations).mean().copy( name="mean_activation") observables = [ cost, min_energy, max_energy, mean_activation, batch_size, max_length, cost_per_character, algorithm.total_step_norm, algorithm.total_gradient_norm] for name, parameter in parameters.items(): observables.append(parameter.norm(2).copy(name + "_norm")) observables.append(algorithm.gradients[parameter].norm(2).copy( name + "_grad_norm")) # Construct the main loop and start training! average_monitoring = TrainingDataMonitoring( observables, prefix="average", every_n_batches=10) main_loop = MainLoop( model=model, data_stream=data_stream, algorithm=algorithm, extensions=[ Timing(), TrainingDataMonitoring(observables, after_batch=True), average_monitoring, FinishAfter(after_n_batches=num_batches) # This shows a way to handle NaN emerging during # training: simply finish it. .add_condition(["after_batch"], _is_nan), # Saving the model and the log separately is convenient, # because loading the whole pickle takes quite some time. Checkpoint(save_path, every_n_batches=500, save_separately=["model", "log"]), Printing(every_n_batches=1)]) main_loop.run() elif mode == "sample" or mode == "beam_search": chars = tensor.lmatrix("input") generated = reverser.generate(chars) model = Model(generated) logger.info("Loading the model..") model.set_parameter_values(load_parameter_values(save_path)) def generate(input_): """Generate output sequences for an input sequence. Incapsulates most of the difference between sampling and beam search. Returns ------- outputs : list of lists Trimmed output sequences. costs : list The negative log-likelihood of generating the respective sequences. """ if mode == "beam_search": samples, = VariableFilter( applications=[reverser.generator.generate], name="outputs")( ComputationGraph(generated[1])) # NOTE: this will recompile beam search functions # every time user presses Enter. Do not create # a new `BeamSearch` object every time if # speed is important for you. beam_search = BeamSearch(samples) outputs, costs = beam_search.search( {chars: input_}, char2code['</S>'], 3 * input_.shape[0]) else: _1, outputs, _2, _3, costs = ( model.get_theano_function()(input_)) outputs = list(outputs.T) costs = list(costs.T) for i in range(len(outputs)): outputs[i] = list(outputs[i]) try: true_length = outputs[i].index(char2code['</S>']) + 1 except ValueError: true_length = len(outputs[i]) outputs[i] = outputs[i][:true_length] costs[i] = costs[i][:true_length].sum() return outputs, costs while True: try: line = input("Enter a sentence\n") message = ("Enter the number of samples\n" if mode == "sample" else "Enter the beam size\n") batch_size = int(input(message)) except EOFError: break except Exception: traceback.print_exc() continue encoded_input = [char2code.get(char, char2code["<UNK>"]) for char in line.lower().strip()] encoded_input = ([char2code['<S>']] + encoded_input + [char2code['</S>']]) print("Encoder input:", encoded_input) target = reverse_words((encoded_input,))[0] print("Target: ", target) samples, costs = generate( numpy.repeat(numpy.array(encoded_input)[:, None], batch_size, axis=1)) messages = [] for sample, cost in equizip(samples, costs): message = "({})".format(cost) message += "".join(code2char[code] for code in sample) if sample == target: message += " CORRECT!" messages.append((cost, message)) messages.sort(key=operator.itemgetter(0), reverse=True) for _, message in messages: print(message)
from __future__ import unicode_literals import ctypes import json import random import unittest from unittest import skipUnless from binascii import a2b_hex, b2a_hex from io import BytesIO from django.contrib.gis.gdal import HAS_GDAL from django.contrib.gis.geometry.test_data import TestDataMixin from django.utils.encoding import force_bytes from django.utils import six from django.utils.six.moves import xrange from .. import HAS_GEOS if HAS_GEOS: from .. import (GEOSException, GEOSIndexError, GEOSGeometry, GeometryCollection, Point, MultiPoint, Polygon, MultiPolygon, LinearRing, LineString, MultiLineString, fromfile, fromstr, geos_version_info) from ..base import gdal, numpy, GEOSBase @skipUnless(HAS_GEOS, "Geos is required.") class GEOSTest(unittest.TestCase, TestDataMixin): def test_base(self): "Tests out the GEOSBase class." # Testing out GEOSBase class, which provides a `ptr` property # that abstracts out access to underlying C pointers. class FakeGeom1(GEOSBase): pass # This one only accepts pointers to floats c_float_p = ctypes.POINTER(ctypes.c_float) class FakeGeom2(GEOSBase): ptr_type = c_float_p # Default ptr_type is `c_void_p`. fg1 = FakeGeom1() # Default ptr_type is C float pointer fg2 = FakeGeom2() # These assignments are OK -- None is allowed because # it's equivalent to the NULL pointer. fg1.ptr = ctypes.c_void_p() fg1.ptr = None fg2.ptr = c_float_p(ctypes.c_float(5.23)) fg2.ptr = None # Because pointers have been set to NULL, an exception should be # raised when we try to access it. Raising an exception is # preferable to a segmentation fault that commonly occurs when # a C method is given a NULL memory reference. for fg in (fg1, fg2): # Equivalent to `fg.ptr` self.assertRaises(GEOSException, fg._get_ptr) # Anything that is either not None or the acceptable pointer type will # result in a TypeError when trying to assign it to the `ptr` property. # Thus, memmory addresses (integers) and pointers of the incorrect type # (in `bad_ptrs`) will not be allowed. bad_ptrs = (5, ctypes.c_char_p(b'foobar')) for bad_ptr in bad_ptrs: # Equivalent to `fg.ptr = bad_ptr` self.assertRaises(TypeError, fg1._set_ptr, bad_ptr) self.assertRaises(TypeError, fg2._set_ptr, bad_ptr) def test_wkt(self): "Testing WKT output." for g in self.geometries.wkt_out: geom = fromstr(g.wkt) if geom.hasz and geos_version_info()['version'] >= '3.3.0': self.assertEqual(g.ewkt, geom.wkt) def test_hex(self): "Testing HEX output." for g in self.geometries.hex_wkt: geom = fromstr(g.wkt) self.assertEqual(g.hex, geom.hex.decode()) def test_hexewkb(self): "Testing (HEX)EWKB output." # For testing HEX(EWKB). ogc_hex = b'01010000000000000000000000000000000000F03F' ogc_hex_3d = b'01010000800000000000000000000000000000F03F0000000000000040' # `SELECT ST_AsHEXEWKB(ST_GeomFromText('POINT(0 1)', 4326));` hexewkb_2d = b'0101000020E61000000000000000000000000000000000F03F' # `SELECT ST_AsHEXEWKB(ST_GeomFromEWKT('SRID=4326;POINT(0 1 2)'));` hexewkb_3d = b'01010000A0E61000000000000000000000000000000000F03F0000000000000040' pnt_2d = Point(0, 1, srid=4326) pnt_3d = Point(0, 1, 2, srid=4326) # OGC-compliant HEX will not have SRID value. self.assertEqual(ogc_hex, pnt_2d.hex) self.assertEqual(ogc_hex_3d, pnt_3d.hex) # HEXEWKB should be appropriate for its dimension -- have to use an # a WKBWriter w/dimension set accordingly, else GEOS will insert # garbage into 3D coordinate if there is none. self.assertEqual(hexewkb_2d, pnt_2d.hexewkb) self.assertEqual(hexewkb_3d, pnt_3d.hexewkb) self.assertEqual(True, GEOSGeometry(hexewkb_3d).hasz) # Same for EWKB. self.assertEqual(six.memoryview(a2b_hex(hexewkb_2d)), pnt_2d.ewkb) self.assertEqual(six.memoryview(a2b_hex(hexewkb_3d)), pnt_3d.ewkb) # Redundant sanity check. self.assertEqual(4326, GEOSGeometry(hexewkb_2d).srid) def test_kml(self): "Testing KML output." for tg in self.geometries.wkt_out: geom = fromstr(tg.wkt) kml = getattr(tg, 'kml', False) if kml: self.assertEqual(kml, geom.kml) def test_errors(self): "Testing the Error handlers." # string-based for err in self.geometries.errors: with self.assertRaises((GEOSException, ValueError)): fromstr(err.wkt) # Bad WKB self.assertRaises(GEOSException, GEOSGeometry, six.memoryview(b'0')) class NotAGeometry(object): pass # Some other object self.assertRaises(TypeError, GEOSGeometry, NotAGeometry()) # None self.assertRaises(TypeError, GEOSGeometry, None) def test_wkb(self): "Testing WKB output." for g in self.geometries.hex_wkt: geom = fromstr(g.wkt) wkb = geom.wkb self.assertEqual(b2a_hex(wkb).decode().upper(), g.hex) def test_create_hex(self): "Testing creation from HEX." for g in self.geometries.hex_wkt: geom_h = GEOSGeometry(g.hex) # we need to do this so decimal places get normalized geom_t = fromstr(g.wkt) self.assertEqual(geom_t.wkt, geom_h.wkt) def test_create_wkb(self): "Testing creation from WKB." for g in self.geometries.hex_wkt: wkb = six.memoryview(a2b_hex(g.hex.encode())) geom_h = GEOSGeometry(wkb) # we need to do this so decimal places get normalized geom_t = fromstr(g.wkt) self.assertEqual(geom_t.wkt, geom_h.wkt) def test_ewkt(self): "Testing EWKT." srids = (-1, 32140) for srid in srids: for p in self.geometries.polygons: ewkt = 'SRID=%d;%s' % (srid, p.wkt) poly = fromstr(ewkt) self.assertEqual(srid, poly.srid) self.assertEqual(srid, poly.shell.srid) self.assertEqual(srid, fromstr(poly.ewkt).srid) # Checking export @skipUnless(HAS_GDAL, "GDAL is required.") def test_json(self): "Testing GeoJSON input/output (via GDAL)." for g in self.geometries.json_geoms: geom = GEOSGeometry(g.wkt) if not hasattr(g, 'not_equal'): # Loading jsons to prevent decimal differences self.assertEqual(json.loads(g.json), json.loads(geom.json)) self.assertEqual(json.loads(g.json), json.loads(geom.geojson)) self.assertEqual(GEOSGeometry(g.wkt), GEOSGeometry(geom.json)) def test_fromfile(self): "Testing the fromfile() factory." ref_pnt = GEOSGeometry('POINT(5 23)') wkt_f = BytesIO() wkt_f.write(force_bytes(ref_pnt.wkt)) wkb_f = BytesIO() wkb_f.write(bytes(ref_pnt.wkb)) # Other tests use `fromfile()` on string filenames so those # aren't tested here. for fh in (wkt_f, wkb_f): fh.seek(0) pnt = fromfile(fh) self.assertEqual(ref_pnt, pnt) def test_eq(self): "Testing equivalence." p = fromstr('POINT(5 23)') self.assertEqual(p, p.wkt) self.assertNotEqual(p, 'foo') ls = fromstr('LINESTRING(0 0, 1 1, 5 5)') self.assertEqual(ls, ls.wkt) self.assertNotEqual(p, 'bar') # Error shouldn't be raise on equivalence testing with # an invalid type. for g in (p, ls): self.assertNotEqual(g, None) self.assertNotEqual(g, {'foo': 'bar'}) self.assertNotEqual(g, False) def test_points(self): "Testing Point objects." prev = fromstr('POINT(0 0)') for p in self.geometries.points: # Creating the point from the WKT pnt = fromstr(p.wkt) self.assertEqual(pnt.geom_type, 'Point') self.assertEqual(pnt.geom_typeid, 0) self.assertEqual(p.x, pnt.x) self.assertEqual(p.y, pnt.y) self.assertEqual(True, pnt == fromstr(p.wkt)) self.assertEqual(False, pnt == prev) # Making sure that the point's X, Y components are what we expect self.assertAlmostEqual(p.x, pnt.tuple[0], 9) self.assertAlmostEqual(p.y, pnt.tuple[1], 9) # Testing the third dimension, and getting the tuple arguments if hasattr(p, 'z'): self.assertEqual(True, pnt.hasz) self.assertEqual(p.z, pnt.z) self.assertEqual(p.z, pnt.tuple[2], 9) tup_args = (p.x, p.y, p.z) set_tup1 = (2.71, 3.14, 5.23) set_tup2 = (5.23, 2.71, 3.14) else: self.assertEqual(False, pnt.hasz) self.assertEqual(None, pnt.z) tup_args = (p.x, p.y) set_tup1 = (2.71, 3.14) set_tup2 = (3.14, 2.71) # Centroid operation on point should be point itself self.assertEqual(p.centroid, pnt.centroid.tuple) # Now testing the different constructors pnt2 = Point(tup_args) # e.g., Point((1, 2)) pnt3 = Point(*tup_args) # e.g., Point(1, 2) self.assertEqual(True, pnt == pnt2) self.assertEqual(True, pnt == pnt3) # Now testing setting the x and y pnt.y = 3.14 pnt.x = 2.71 self.assertEqual(3.14, pnt.y) self.assertEqual(2.71, pnt.x) # Setting via the tuple/coords property pnt.tuple = set_tup1 self.assertEqual(set_tup1, pnt.tuple) pnt.coords = set_tup2 self.assertEqual(set_tup2, pnt.coords) prev = pnt # setting the previous geometry def test_multipoints(self): "Testing MultiPoint objects." for mp in self.geometries.multipoints: mpnt = fromstr(mp.wkt) self.assertEqual(mpnt.geom_type, 'MultiPoint') self.assertEqual(mpnt.geom_typeid, 4) self.assertAlmostEqual(mp.centroid[0], mpnt.centroid.tuple[0], 9) self.assertAlmostEqual(mp.centroid[1], mpnt.centroid.tuple[1], 9) self.assertRaises(GEOSIndexError, mpnt.__getitem__, len(mpnt)) self.assertEqual(mp.centroid, mpnt.centroid.tuple) self.assertEqual(mp.coords, tuple(m.tuple for m in mpnt)) for p in mpnt: self.assertEqual(p.geom_type, 'Point') self.assertEqual(p.geom_typeid, 0) self.assertEqual(p.empty, False) self.assertEqual(p.valid, True) def test_linestring(self): "Testing LineString objects." prev = fromstr('POINT(0 0)') for l in self.geometries.linestrings: ls = fromstr(l.wkt) self.assertEqual(ls.geom_type, 'LineString') self.assertEqual(ls.geom_typeid, 1) self.assertEqual(ls.empty, False) self.assertEqual(ls.ring, False) if hasattr(l, 'centroid'): self.assertEqual(l.centroid, ls.centroid.tuple) if hasattr(l, 'tup'): self.assertEqual(l.tup, ls.tuple) self.assertEqual(True, ls == fromstr(l.wkt)) self.assertEqual(False, ls == prev) self.assertRaises(GEOSIndexError, ls.__getitem__, len(ls)) prev = ls # Creating a LineString from a tuple, list, and numpy array self.assertEqual(ls, LineString(ls.tuple)) # tuple self.assertEqual(ls, LineString(*ls.tuple)) # as individual arguments self.assertEqual(ls, LineString([list(tup) for tup in ls.tuple])) # as list # Point individual arguments self.assertEqual(ls.wkt, LineString(*tuple(Point(tup) for tup in ls.tuple)).wkt) if numpy: self.assertEqual(ls, LineString(numpy.array(ls.tuple))) # as numpy array def test_multilinestring(self): "Testing MultiLineString objects." prev = fromstr('POINT(0 0)') for l in self.geometries.multilinestrings: ml = fromstr(l.wkt) self.assertEqual(ml.geom_type, 'MultiLineString') self.assertEqual(ml.geom_typeid, 5) self.assertAlmostEqual(l.centroid[0], ml.centroid.x, 9) self.assertAlmostEqual(l.centroid[1], ml.centroid.y, 9) self.assertEqual(True, ml == fromstr(l.wkt)) self.assertEqual(False, ml == prev) prev = ml for ls in ml: self.assertEqual(ls.geom_type, 'LineString') self.assertEqual(ls.geom_typeid, 1) self.assertEqual(ls.empty, False) self.assertRaises(GEOSIndexError, ml.__getitem__, len(ml)) self.assertEqual(ml.wkt, MultiLineString(*tuple(s.clone() for s in ml)).wkt) self.assertEqual(ml, MultiLineString(*tuple(LineString(s.tuple) for s in ml))) def test_linearring(self): "Testing LinearRing objects." for rr in self.geometries.linearrings: lr = fromstr(rr.wkt) self.assertEqual(lr.geom_type, 'LinearRing') self.assertEqual(lr.geom_typeid, 2) self.assertEqual(rr.n_p, len(lr)) self.assertEqual(True, lr.valid) self.assertEqual(False, lr.empty) # Creating a LinearRing from a tuple, list, and numpy array self.assertEqual(lr, LinearRing(lr.tuple)) self.assertEqual(lr, LinearRing(*lr.tuple)) self.assertEqual(lr, LinearRing([list(tup) for tup in lr.tuple])) if numpy: self.assertEqual(lr, LinearRing(numpy.array(lr.tuple))) def test_polygons_from_bbox(self): "Testing `from_bbox` class method." bbox = (-180, -90, 180, 90) p = Polygon.from_bbox(bbox) self.assertEqual(bbox, p.extent) # Testing numerical precision x = 3.14159265358979323 bbox = (0, 0, 1, x) p = Polygon.from_bbox(bbox) y = p.extent[-1] self.assertEqual(format(x, '.13f'), format(y, '.13f')) def test_polygons(self): "Testing Polygon objects." prev = fromstr('POINT(0 0)') for p in self.geometries.polygons: # Creating the Polygon, testing its properties. poly = fromstr(p.wkt) self.assertEqual(poly.geom_type, 'Polygon') self.assertEqual(poly.geom_typeid, 3) self.assertEqual(poly.empty, False) self.assertEqual(poly.ring, False) self.assertEqual(p.n_i, poly.num_interior_rings) self.assertEqual(p.n_i + 1, len(poly)) # Testing __len__ self.assertEqual(p.n_p, poly.num_points) # Area & Centroid self.assertAlmostEqual(p.area, poly.area, 9) self.assertAlmostEqual(p.centroid[0], poly.centroid.tuple[0], 9) self.assertAlmostEqual(p.centroid[1], poly.centroid.tuple[1], 9) # Testing the geometry equivalence self.assertEqual(True, poly == fromstr(p.wkt)) self.assertEqual(False, poly == prev) # Should not be equal to previous geometry self.assertEqual(True, poly != prev) # Testing the exterior ring ring = poly.exterior_ring self.assertEqual(ring.geom_type, 'LinearRing') self.assertEqual(ring.geom_typeid, 2) if p.ext_ring_cs: self.assertEqual(p.ext_ring_cs, ring.tuple) self.assertEqual(p.ext_ring_cs, poly[0].tuple) # Testing __getitem__ # Testing __getitem__ and __setitem__ on invalid indices self.assertRaises(GEOSIndexError, poly.__getitem__, len(poly)) self.assertRaises(GEOSIndexError, poly.__setitem__, len(poly), False) self.assertRaises(GEOSIndexError, poly.__getitem__, -1 * len(poly) - 1) # Testing __iter__ for r in poly: self.assertEqual(r.geom_type, 'LinearRing') self.assertEqual(r.geom_typeid, 2) # Testing polygon construction. self.assertRaises(TypeError, Polygon, 0, [1, 2, 3]) self.assertRaises(TypeError, Polygon, 'foo') # Polygon(shell, (hole1, ... holeN)) rings = tuple(r for r in poly) self.assertEqual(poly, Polygon(rings[0], rings[1:])) # Polygon(shell_tuple, hole_tuple1, ... , hole_tupleN) ring_tuples = tuple(r.tuple for r in poly) self.assertEqual(poly, Polygon(*ring_tuples)) # Constructing with tuples of LinearRings. self.assertEqual(poly.wkt, Polygon(*tuple(r for r in poly)).wkt) self.assertEqual(poly.wkt, Polygon(*tuple(LinearRing(r.tuple) for r in poly)).wkt) def test_polygon_comparison(self): p1 = Polygon(((0, 0), (0, 1), (1, 1), (1, 0), (0, 0))) p2 = Polygon(((0, 0), (0, 1), (1, 0), (0, 0))) self.assertGreater(p1, p2) self.assertLess(p2, p1) p3 = Polygon(((0, 0), (0, 1), (1, 1), (2, 0), (0, 0))) p4 = Polygon(((0, 0), (0, 1), (2, 2), (1, 0), (0, 0))) self.assertGreater(p4, p3) self.assertLess(p3, p4) def test_multipolygons(self): "Testing MultiPolygon objects." fromstr('POINT (0 0)') for mp in self.geometries.multipolygons: mpoly = fromstr(mp.wkt) self.assertEqual(mpoly.geom_type, 'MultiPolygon') self.assertEqual(mpoly.geom_typeid, 6) self.assertEqual(mp.valid, mpoly.valid) if mp.valid: self.assertEqual(mp.num_geom, mpoly.num_geom) self.assertEqual(mp.n_p, mpoly.num_coords) self.assertEqual(mp.num_geom, len(mpoly)) self.assertRaises(GEOSIndexError, mpoly.__getitem__, len(mpoly)) for p in mpoly: self.assertEqual(p.geom_type, 'Polygon') self.assertEqual(p.geom_typeid, 3) self.assertEqual(p.valid, True) self.assertEqual(mpoly.wkt, MultiPolygon(*tuple(poly.clone() for poly in mpoly)).wkt) def test_memory_hijinks(self): "Testing Geometry __del__() on rings and polygons." #### Memory issues with rings and polygons # These tests are needed to ensure sanity with writable geometries. # Getting a polygon with interior rings, and pulling out the interior rings poly = fromstr(self.geometries.polygons[1].wkt) ring1 = poly[0] ring2 = poly[1] # These deletes should be 'harmless' since they are done on child geometries del ring1 del ring2 ring1 = poly[0] ring2 = poly[1] # Deleting the polygon del poly # Access to these rings is OK since they are clones. str(ring1) str(ring2) def test_coord_seq(self): "Testing Coordinate Sequence objects." for p in self.geometries.polygons: if p.ext_ring_cs: # Constructing the polygon and getting the coordinate sequence poly = fromstr(p.wkt) cs = poly.exterior_ring.coord_seq self.assertEqual(p.ext_ring_cs, cs.tuple) # done in the Polygon test too. self.assertEqual(len(p.ext_ring_cs), len(cs)) # Making sure __len__ works # Checks __getitem__ and __setitem__ for i in xrange(len(p.ext_ring_cs)): c1 = p.ext_ring_cs[i] # Expected value c2 = cs[i] # Value from coordseq self.assertEqual(c1, c2) # Constructing the test value to set the coordinate sequence with if len(c1) == 2: tset = (5, 23) else: tset = (5, 23, 8) cs[i] = tset # Making sure every set point matches what we expect for j in range(len(tset)): cs[i] = tset self.assertEqual(tset[j], cs[i][j]) def test_relate_pattern(self): "Testing relate() and relate_pattern()." g = fromstr('POINT (0 0)') self.assertRaises(GEOSException, g.relate_pattern, 0, 'invalid pattern, yo') for rg in self.geometries.relate_geoms: a = fromstr(rg.wkt_a) b = fromstr(rg.wkt_b) self.assertEqual(rg.result, a.relate_pattern(b, rg.pattern)) self.assertEqual(rg.pattern, a.relate(b)) def test_intersection(self): "Testing intersects() and intersection()." for i in xrange(len(self.geometries.topology_geoms)): a = fromstr(self.geometries.topology_geoms[i].wkt_a) b = fromstr(self.geometries.topology_geoms[i].wkt_b) i1 = fromstr(self.geometries.intersect_geoms[i].wkt) self.assertEqual(True, a.intersects(b)) i2 = a.intersection(b) self.assertEqual(i1, i2) self.assertEqual(i1, a & b) # __and__ is intersection operator a &= b # testing __iand__ self.assertEqual(i1, a) def test_union(self): "Testing union()." for i in xrange(len(self.geometries.topology_geoms)): a = fromstr(self.geometries.topology_geoms[i].wkt_a) b = fromstr(self.geometries.topology_geoms[i].wkt_b) u1 = fromstr(self.geometries.union_geoms[i].wkt) u2 = a.union(b) self.assertEqual(u1, u2) self.assertEqual(u1, a | b) # __or__ is union operator a |= b # testing __ior__ self.assertEqual(u1, a) def test_difference(self): "Testing difference()." for i in xrange(len(self.geometries.topology_geoms)): a = fromstr(self.geometries.topology_geoms[i].wkt_a) b = fromstr(self.geometries.topology_geoms[i].wkt_b) d1 = fromstr(self.geometries.diff_geoms[i].wkt) d2 = a.difference(b) self.assertEqual(d1, d2) self.assertEqual(d1, a - b) # __sub__ is difference operator a -= b # testing __isub__ self.assertEqual(d1, a) def test_symdifference(self): "Testing sym_difference()." for i in xrange(len(self.geometries.topology_geoms)): a = fromstr(self.geometries.topology_geoms[i].wkt_a) b = fromstr(self.geometries.topology_geoms[i].wkt_b) d1 = fromstr(self.geometries.sdiff_geoms[i].wkt) d2 = a.sym_difference(b) self.assertEqual(d1, d2) self.assertEqual(d1, a ^ b) # __xor__ is symmetric difference operator a ^= b # testing __ixor__ self.assertEqual(d1, a) def test_buffer(self): "Testing buffer()." for bg in self.geometries.buffer_geoms: g = fromstr(bg.wkt) # The buffer we expect exp_buf = fromstr(bg.buffer_wkt) quadsegs = bg.quadsegs width = bg.width # Can't use a floating-point for the number of quadsegs. self.assertRaises(ctypes.ArgumentError, g.buffer, width, float(quadsegs)) # Constructing our buffer buf = g.buffer(width, quadsegs) self.assertEqual(exp_buf.num_coords, buf.num_coords) self.assertEqual(len(exp_buf), len(buf)) # Now assuring that each point in the buffer is almost equal for j in xrange(len(exp_buf)): exp_ring = exp_buf[j] buf_ring = buf[j] self.assertEqual(len(exp_ring), len(buf_ring)) for k in xrange(len(exp_ring)): # Asserting the X, Y of each point are almost equal (due to floating point imprecision) self.assertAlmostEqual(exp_ring[k][0], buf_ring[k][0], 9) self.assertAlmostEqual(exp_ring[k][1], buf_ring[k][1], 9) def test_srid(self): "Testing the SRID property and keyword." # Testing SRID keyword on Point pnt = Point(5, 23, srid=4326) self.assertEqual(4326, pnt.srid) pnt.srid = 3084 self.assertEqual(3084, pnt.srid) self.assertRaises(ctypes.ArgumentError, pnt.set_srid, '4326') # Testing SRID keyword on fromstr(), and on Polygon rings. poly = fromstr(self.geometries.polygons[1].wkt, srid=4269) self.assertEqual(4269, poly.srid) for ring in poly: self.assertEqual(4269, ring.srid) poly.srid = 4326 self.assertEqual(4326, poly.shell.srid) # Testing SRID keyword on GeometryCollection gc = GeometryCollection(Point(5, 23), LineString((0, 0), (1.5, 1.5), (3, 3)), srid=32021) self.assertEqual(32021, gc.srid) for i in range(len(gc)): self.assertEqual(32021, gc[i].srid) # GEOS may get the SRID from HEXEWKB # 'POINT(5 23)' at SRID=4326 in hex form -- obtained from PostGIS # using `SELECT GeomFromText('POINT (5 23)', 4326);`. hex = '0101000020E610000000000000000014400000000000003740' p1 = fromstr(hex) self.assertEqual(4326, p1.srid) p2 = fromstr(p1.hex) self.assertIsNone(p2.srid) p3 = fromstr(p1.hex, srid=-1) # -1 is intended. self.assertEqual(-1, p3.srid) @skipUnless(HAS_GDAL, "GDAL is required.") def test_custom_srid(self): """ Test with a srid unknown from GDAL """ pnt = Point(111200, 220900, srid=999999) self.assertTrue(pnt.ewkt.startswith("SRID=999999;POINT (111200.0")) self.assertIsInstance(pnt.ogr, gdal.OGRGeometry) self.assertIsNone(pnt.srs) # Test conversion from custom to a known srid c2w = gdal.CoordTransform( gdal.SpatialReference( '+proj=mill +lat_0=0 +lon_0=0 +x_0=0 +y_0=0 +R_A +ellps=WGS84 ' '+datum=WGS84 +units=m +no_defs' ), gdal.SpatialReference(4326)) new_pnt = pnt.transform(c2w, clone=True) self.assertEqual(new_pnt.srid, 4326) self.assertAlmostEqual(new_pnt.x, 1, 3) self.assertAlmostEqual(new_pnt.y, 2, 3) def test_mutable_geometries(self): "Testing the mutability of Polygons and Geometry Collections." ### Testing the mutability of Polygons ### for p in self.geometries.polygons: poly = fromstr(p.wkt) # Should only be able to use __setitem__ with LinearRing geometries. self.assertRaises(TypeError, poly.__setitem__, 0, LineString((1, 1), (2, 2))) # Constructing the new shell by adding 500 to every point in the old shell. shell_tup = poly.shell.tuple new_coords = [] for point in shell_tup: new_coords.append((point[0] + 500., point[1] + 500.)) new_shell = LinearRing(*tuple(new_coords)) # Assigning polygon's exterior ring w/the new shell poly.exterior_ring = new_shell str(new_shell) # new shell is still accessible self.assertEqual(poly.exterior_ring, new_shell) self.assertEqual(poly[0], new_shell) ### Testing the mutability of Geometry Collections for tg in self.geometries.multipoints: mp = fromstr(tg.wkt) for i in range(len(mp)): # Creating a random point. pnt = mp[i] new = Point(random.randint(21, 100), random.randint(21, 100)) # Testing the assignment mp[i] = new str(new) # what was used for the assignment is still accessible self.assertEqual(mp[i], new) self.assertEqual(mp[i].wkt, new.wkt) self.assertNotEqual(pnt, mp[i]) # MultiPolygons involve much more memory management because each # Polygon w/in the collection has its own rings. for tg in self.geometries.multipolygons: mpoly = fromstr(tg.wkt) for i in xrange(len(mpoly)): poly = mpoly[i] old_poly = mpoly[i] # Offsetting the each ring in the polygon by 500. for j in xrange(len(poly)): r = poly[j] for k in xrange(len(r)): r[k] = (r[k][0] + 500., r[k][1] + 500.) poly[j] = r self.assertNotEqual(mpoly[i], poly) # Testing the assignment mpoly[i] = poly str(poly) # Still accessible self.assertEqual(mpoly[i], poly) self.assertNotEqual(mpoly[i], old_poly) # Extreme (!!) __setitem__ -- no longer works, have to detect # in the first object that __setitem__ is called in the subsequent # objects -- maybe mpoly[0, 0, 0] = (3.14, 2.71)? #mpoly[0][0][0] = (3.14, 2.71) #self.assertEqual((3.14, 2.71), mpoly[0][0][0]) # Doing it more slowly.. #self.assertEqual((3.14, 2.71), mpoly[0].shell[0]) #del mpoly def test_threed(self): "Testing three-dimensional geometries." # Testing a 3D Point pnt = Point(2, 3, 8) self.assertEqual((2., 3., 8.), pnt.coords) self.assertRaises(TypeError, pnt.set_coords, (1., 2.)) pnt.coords = (1., 2., 3.) self.assertEqual((1., 2., 3.), pnt.coords) # Testing a 3D LineString ls = LineString((2., 3., 8.), (50., 250., -117.)) self.assertEqual(((2., 3., 8.), (50., 250., -117.)), ls.tuple) self.assertRaises(TypeError, ls.__setitem__, 0, (1., 2.)) ls[0] = (1., 2., 3.) self.assertEqual((1., 2., 3.), ls[0]) def test_distance(self): "Testing the distance() function." # Distance to self should be 0. pnt = Point(0, 0) self.assertEqual(0.0, pnt.distance(Point(0, 0))) # Distance should be 1 self.assertEqual(1.0, pnt.distance(Point(0, 1))) # Distance should be ~ sqrt(2) self.assertAlmostEqual(1.41421356237, pnt.distance(Point(1, 1)), 11) # Distances are from the closest vertex in each geometry -- # should be 3 (distance from (2, 2) to (5, 2)). ls1 = LineString((0, 0), (1, 1), (2, 2)) ls2 = LineString((5, 2), (6, 1), (7, 0)) self.assertEqual(3, ls1.distance(ls2)) def test_length(self): "Testing the length property." # Points have 0 length. pnt = Point(0, 0) self.assertEqual(0.0, pnt.length) # Should be ~ sqrt(2) ls = LineString((0, 0), (1, 1)) self.assertAlmostEqual(1.41421356237, ls.length, 11) # Should be circumference of Polygon poly = Polygon(LinearRing((0, 0), (0, 1), (1, 1), (1, 0), (0, 0))) self.assertEqual(4.0, poly.length) # Should be sum of each element's length in collection. mpoly = MultiPolygon(poly.clone(), poly) self.assertEqual(8.0, mpoly.length) def test_emptyCollections(self): "Testing empty geometries and collections." gc1 = GeometryCollection([]) gc2 = fromstr('GEOMETRYCOLLECTION EMPTY') pnt = fromstr('POINT EMPTY') ls = fromstr('LINESTRING EMPTY') poly = fromstr('POLYGON EMPTY') mls = fromstr('MULTILINESTRING EMPTY') mpoly1 = fromstr('MULTIPOLYGON EMPTY') mpoly2 = MultiPolygon(()) for g in [gc1, gc2, pnt, ls, poly, mls, mpoly1, mpoly2]: self.assertEqual(True, g.empty) # Testing len() and num_geom. if isinstance(g, Polygon): self.assertEqual(1, len(g)) # Has one empty linear ring self.assertEqual(1, g.num_geom) self.assertEqual(0, len(g[0])) elif isinstance(g, (Point, LineString)): self.assertEqual(1, g.num_geom) self.assertEqual(0, len(g)) else: self.assertEqual(0, g.num_geom) self.assertEqual(0, len(g)) # Testing __getitem__ (doesn't work on Point or Polygon) if isinstance(g, Point): self.assertRaises(GEOSIndexError, g.get_x) elif isinstance(g, Polygon): lr = g.shell self.assertEqual('LINEARRING EMPTY', lr.wkt) self.assertEqual(0, len(lr)) self.assertEqual(True, lr.empty) self.assertRaises(GEOSIndexError, lr.__getitem__, 0) else: self.assertRaises(GEOSIndexError, g.__getitem__, 0) def test_collections_of_collections(self): "Testing GeometryCollection handling of other collections." # Creating a GeometryCollection WKT string composed of other # collections and polygons. coll = [mp.wkt for mp in self.geometries.multipolygons if mp.valid] coll.extend(mls.wkt for mls in self.geometries.multilinestrings) coll.extend(p.wkt for p in self.geometries.polygons) coll.extend(mp.wkt for mp in self.geometries.multipoints) gc_wkt = 'GEOMETRYCOLLECTION(%s)' % ','.join(coll) # Should construct ok from WKT gc1 = GEOSGeometry(gc_wkt) # Should also construct ok from individual geometry arguments. gc2 = GeometryCollection(*tuple(g for g in gc1)) # And, they should be equal. self.assertEqual(gc1, gc2) @skipUnless(HAS_GDAL, "GDAL is required.") def test_gdal(self): "Testing `ogr` and `srs` properties." g1 = fromstr('POINT(5 23)') self.assertIsInstance(g1.ogr, gdal.OGRGeometry) self.assertIsNone(g1.srs) g1_3d = fromstr('POINT(5 23 8)') self.assertIsInstance(g1_3d.ogr, gdal.OGRGeometry) self.assertEqual(g1_3d.ogr.z, 8) g2 = fromstr('LINESTRING(0 0, 5 5, 23 23)', srid=4326) self.assertIsInstance(g2.ogr, gdal.OGRGeometry) self.assertIsInstance(g2.srs, gdal.SpatialReference) self.assertEqual(g2.hex, g2.ogr.hex) self.assertEqual('WGS 84', g2.srs.name) def test_copy(self): "Testing use with the Python `copy` module." import copy poly = GEOSGeometry('POLYGON((0 0, 0 23, 23 23, 23 0, 0 0), (5 5, 5 10, 10 10, 10 5, 5 5))') cpy1 = copy.copy(poly) cpy2 = copy.deepcopy(poly) self.assertNotEqual(poly._ptr, cpy1._ptr) self.assertNotEqual(poly._ptr, cpy2._ptr) @skipUnless(HAS_GDAL, "GDAL is required to transform geometries") def test_transform(self): "Testing `transform` method." orig = GEOSGeometry('POINT (-104.609 38.255)', 4326) trans = GEOSGeometry('POINT (992385.4472045 481455.4944650)', 2774) # Using a srid, a SpatialReference object, and a CoordTransform object # for transformations. t1, t2, t3 = orig.clone(), orig.clone(), orig.clone() t1.transform(trans.srid) t2.transform(gdal.SpatialReference('EPSG:2774')) ct = gdal.CoordTransform(gdal.SpatialReference('WGS84'), gdal.SpatialReference(2774)) t3.transform(ct) # Testing use of the `clone` keyword. k1 = orig.clone() k2 = k1.transform(trans.srid, clone=True) self.assertEqual(k1, orig) self.assertNotEqual(k1, k2) prec = 3 for p in (t1, t2, t3, k2): self.assertAlmostEqual(trans.x, p.x, prec) self.assertAlmostEqual(trans.y, p.y, prec) @skipUnless(HAS_GDAL, "GDAL is required to transform geometries") def test_transform_3d(self): p3d = GEOSGeometry('POINT (5 23 100)', 4326) p3d.transform(2774) self.assertEqual(p3d.z, 100) @skipUnless(HAS_GDAL, "GDAL is required.") def test_transform_noop(self): """ Testing `transform` method (SRID match) """ # transform() should no-op if source & dest SRIDs match, # regardless of whether GDAL is available. if gdal.HAS_GDAL: g = GEOSGeometry('POINT (-104.609 38.255)', 4326) gt = g.tuple g.transform(4326) self.assertEqual(g.tuple, gt) self.assertEqual(g.srid, 4326) g = GEOSGeometry('POINT (-104.609 38.255)', 4326) g1 = g.transform(4326, clone=True) self.assertEqual(g1.tuple, g.tuple) self.assertEqual(g1.srid, 4326) self.assertIsNot(g1, g, "Clone didn't happen") old_has_gdal = gdal.HAS_GDAL try: gdal.HAS_GDAL = False g = GEOSGeometry('POINT (-104.609 38.255)', 4326) gt = g.tuple g.transform(4326) self.assertEqual(g.tuple, gt) self.assertEqual(g.srid, 4326) g = GEOSGeometry('POINT (-104.609 38.255)', 4326) g1 = g.transform(4326, clone=True) self.assertEqual(g1.tuple, g.tuple) self.assertEqual(g1.srid, 4326) self.assertIsNot(g1, g, "Clone didn't happen") finally: gdal.HAS_GDAL = old_has_gdal def test_transform_nosrid(self): """ Testing `transform` method (no SRID or negative SRID) """ g = GEOSGeometry('POINT (-104.609 38.255)', srid=None) self.assertRaises(GEOSException, g.transform, 2774) g = GEOSGeometry('POINT (-104.609 38.255)', srid=None) self.assertRaises(GEOSException, g.transform, 2774, clone=True) g = GEOSGeometry('POINT (-104.609 38.255)', srid=-1) self.assertRaises(GEOSException, g.transform, 2774) g = GEOSGeometry('POINT (-104.609 38.255)', srid=-1) self.assertRaises(GEOSException, g.transform, 2774, clone=True) @skipUnless(HAS_GDAL, "GDAL is required.") def test_transform_nogdal(self): """ Testing `transform` method (GDAL not available) """ old_has_gdal = gdal.HAS_GDAL try: gdal.HAS_GDAL = False g = GEOSGeometry('POINT (-104.609 38.255)', 4326) self.assertRaises(GEOSException, g.transform, 2774) g = GEOSGeometry('POINT (-104.609 38.255)', 4326) self.assertRaises(GEOSException, g.transform, 2774, clone=True) finally: gdal.HAS_GDAL = old_has_gdal def test_extent(self): "Testing `extent` method." # The xmin, ymin, xmax, ymax of the MultiPoint should be returned. mp = MultiPoint(Point(5, 23), Point(0, 0), Point(10, 50)) self.assertEqual((0.0, 0.0, 10.0, 50.0), mp.extent) pnt = Point(5.23, 17.8) # Extent of points is just the point itself repeated. self.assertEqual((5.23, 17.8, 5.23, 17.8), pnt.extent) # Testing on the 'real world' Polygon. poly = fromstr(self.geometries.polygons[3].wkt) ring = poly.shell x, y = ring.x, ring.y xmin, ymin = min(x), min(y) xmax, ymax = max(x), max(y) self.assertEqual((xmin, ymin, xmax, ymax), poly.extent) def test_pickle(self): "Testing pickling and unpickling support." # Using both pickle and cPickle -- just 'cause. from django.utils.six.moves import cPickle import pickle # Creating a list of test geometries for pickling, # and setting the SRID on some of them. def get_geoms(lst, srid=None): return [GEOSGeometry(tg.wkt, srid) for tg in lst] tgeoms = get_geoms(self.geometries.points) tgeoms.extend(get_geoms(self.geometries.multilinestrings, 4326)) tgeoms.extend(get_geoms(self.geometries.polygons, 3084)) tgeoms.extend(get_geoms(self.geometries.multipolygons, 3857)) for geom in tgeoms: s1, s2 = cPickle.dumps(geom), pickle.dumps(geom) g1, g2 = cPickle.loads(s1), pickle.loads(s2) for tmpg in (g1, g2): self.assertEqual(geom, tmpg) self.assertEqual(geom.srid, tmpg.srid) def test_prepared(self): "Testing PreparedGeometry support." # Creating a simple multipolygon and getting a prepared version. mpoly = GEOSGeometry('MULTIPOLYGON(((0 0,0 5,5 5,5 0,0 0)),((5 5,5 10,10 10,10 5,5 5)))') prep = mpoly.prepared # A set of test points. pnts = [Point(5, 5), Point(7.5, 7.5), Point(2.5, 7.5)] covers = [True, True, False] # No `covers` op for regular GEOS geoms. for pnt, c in zip(pnts, covers): # Results should be the same (but faster) self.assertEqual(mpoly.contains(pnt), prep.contains(pnt)) self.assertEqual(mpoly.intersects(pnt), prep.intersects(pnt)) self.assertEqual(c, prep.covers(pnt)) if geos_version_info()['version'] > '3.3.0': self.assertTrue(prep.crosses(fromstr('LINESTRING(1 1, 15 15)'))) self.assertTrue(prep.disjoint(Point(-5, -5))) poly = Polygon(((-1, -1), (1, 1), (1, 0), (-1, -1))) self.assertTrue(prep.overlaps(poly)) poly = Polygon(((-5, 0), (-5, 5), (0, 5), (-5, 0))) self.assertTrue(prep.touches(poly)) poly = Polygon(((-1, -1), (-1, 11), (11, 11), (11, -1), (-1, -1))) self.assertTrue(prep.within(poly)) # Original geometry deletion should not crash the prepared one (#21662) del mpoly self.assertTrue(prep.covers(Point(5, 5))) def test_line_merge(self): "Testing line merge support" ref_geoms = (fromstr('LINESTRING(1 1, 1 1, 3 3)'), fromstr('MULTILINESTRING((1 1, 3 3), (3 3, 4 2))'), ) ref_merged = (fromstr('LINESTRING(1 1, 3 3)'), fromstr('LINESTRING (1 1, 3 3, 4 2)'), ) for geom, merged in zip(ref_geoms, ref_merged): self.assertEqual(merged, geom.merged) def test_valid_reason(self): "Testing IsValidReason support" g = GEOSGeometry("POINT(0 0)") self.assertTrue(g.valid) self.assertIsInstance(g.valid_reason, six.string_types) self.assertEqual(g.valid_reason, "Valid Geometry") g = GEOSGeometry("LINESTRING(0 0, 0 0)") self.assertFalse(g.valid) self.assertIsInstance(g.valid_reason, six.string_types) self.assertTrue(g.valid_reason.startswith("Too few points in geometry component")) @skipUnless(HAS_GEOS, "Geos is required.") def test_linearref(self): "Testing linear referencing" ls = fromstr('LINESTRING(0 0, 0 10, 10 10, 10 0)') mls = fromstr('MULTILINESTRING((0 0, 0 10), (10 0, 10 10))') self.assertEqual(ls.project(Point(0, 20)), 10.0) self.assertEqual(ls.project(Point(7, 6)), 24) self.assertEqual(ls.project_normalized(Point(0, 20)), 1.0 / 3) self.assertEqual(ls.interpolate(10), Point(0, 10)) self.assertEqual(ls.interpolate(24), Point(10, 6)) self.assertEqual(ls.interpolate_normalized(1.0 / 3), Point(0, 10)) self.assertEqual(mls.project(Point(0, 20)), 10) self.assertEqual(mls.project(Point(7, 6)), 16) self.assertEqual(mls.interpolate(9), Point(0, 9)) self.assertEqual(mls.interpolate(17), Point(10, 7)) def test_geos_version(self): """Testing the GEOS version regular expression.""" from django.contrib.gis.geos.libgeos import version_regex versions = [('3.0.0rc4-CAPI-1.3.3', '3.0.0', '1.3.3'), ('3.0.0-CAPI-1.4.1', '3.0.0', '1.4.1'), ('3.4.0dev-CAPI-1.8.0', '3.4.0', '1.8.0'), ('3.4.0dev-CAPI-1.8.0 r0', '3.4.0', '1.8.0')] for v_init, v_geos, v_capi in versions: m = version_regex.match(v_init) self.assertTrue(m, msg="Unable to parse the version string '%s'" % v_init) self.assertEqual(m.group('version'), v_geos) self.assertEqual(m.group('capi_version'), v_capi)
import unittest if __name__ == '__main__': import sys import os sys.path.append(os.path.abspath("..")) from streak_client import * del sys del os elif len(__name__.split('.')) > 2: #nose tests compatibility from ..streak_client import * else: #py.test from streak_client import * class StreakClientTestBase(unittest.TestCase): @classmethod def setUpClass(cls): #borrowed from: https://github.com/kredei/streak_client/ key = '0b6359c686584bc3b610a640e2e7eb9f' cls.client = StreakClient(key) @classmethod def tearDownClass(cls): cls.client.delete_all_pipelines() del cls.client class StreakClientUserAPITest(StreakClientTestBase): ### #Unittest basics ### def test_get_user_me(self): code, data = self.client.get_user() self.assertEqual(code, 200, "Response is not OK. Code: {}".format(code)) def test_get_user_by_key(self): code, data = self.client.get_user() user_key = data['userKey'] code, data = self.client.get_user(user_key) self.assertEqual(code, 200, "Response is not OK. Code: {}".format(code)) def test_get_user_by_key_invalid_key(self): code, data = self.client.get_user(' ') self.assertEqual(code, 400, "Response is not '400'. Code: {}".format(code)) class StreakClientPipelineAPITest(StreakClientTestBase): '''Touches most API related to Pipelines. This is not exactly unittesting though. ''' def setUp(self): #do nothing unless we want to create stuff here in the future pass def tearDown(self): #delete all pipelines. #assumes get_all and delete work fine. pass def test_create_update_get_delete_one_pipeline(self): #create entry code, data = self.client.create_pipeline('my_name', 'my_description') self.assertEqual(code, 200, "Create response is not OK. Code: {}".format(code)) self.assertTrue('name' in data and data['name'] == 'my_name', 'Create response content is missing!') #update entry pl = StreakPipeline(**data) pl.attributes['name'] = "new_name" pl.attributes['description'] = "new_description" #update the pipeline code, data = self.client.update_pipeline(pl) self.assertEqual(code, 200, "Update response is not OK. Code: {}".format(code)) new_pl = StreakPipeline(**data) #check the new values. self.assertDictEqual(pl.to_dict(rw=True), new_pl.to_dict(rw=True), "Update data failed!") #get entry code, data = self.client.get_pipeline(data['pipelineKey']) self.assertEqual(code, 200, "Get response is not OK. Code: {}".format(code)) self.assertTrue('name' in data and data['name'] == 'new_name', 'Get response content is missing!') #delete entry code, data = self.client.delete_pipeline(data['pipelineKey']) self.assertEqual(code, 200, "Delete response is not OK. Code: {}".format(code)) def test_delete_all_pipelines(self): code, data = self.client.delete_all_pipelines() self.assertEqual(code, 200, "Delete all response not OK. Code: {}".format(code)) code, data = self.client.get_pipeline() self.assertEqual(len(data), 0, "Expected: 0, Read: {}".format(len(data))) def test_get_delete_all_pipelines(self): num_pl = 10 for i in range(num_pl): code, data = self.client.create_pipeline('my_name' + str(i), 'my_description' + str(i)) self.assertEqual(code, 200, "Create response is not OK. Code: {}".format(code)) self.assertTrue('name' in data and data['name'] == 'my_name' + str(i), 'Create response content is missing!') code, data = self.client.get_pipeline() self.assertEqual(len(data), num_pl, "Created: {}, Read: {}".format(num_pl, len(data))) code, data = self.client.delete_all_pipelines() self.assertEqual(code, 200, "Delete all response not OK. Code: {}".format(code)) code, data = self.client.get_pipeline() self.assertEqual(len(data), 0, "Expected: 0, Read: {}".format(len(data))) ''' ############ #Temporary test routines. ############ def user_api_test(s_client): code, user_data = s_client.get_user() print('---ME---') user = StreakUser(**user_data) user.show() print('---USER BY ID---') code, user_data = s_client.get_user(user.attributes['userKey']) user = StreakUser(**user_data) user.show() def pipeline_api_test(s_client): print('---Create PIPELINE---') code, data = s_client.create_pipeline("1", "desc") o = StreakPipeline(**data) o.show() print("---------") for i in xrange(5): raw_input() print('---Update PIPE---') o.attributes['name'] = str(int(o.attributes['name']) + 1) code, data = s_client.update_pipeline(o) o = StreakPipeline(**data) o.show() print("---------") raw_input() print('---Delete PIPELINE---') code, data = s_client.delete_pipeline(o.to_dict()['pipelineKey']) print(data) print("---------") raw_input() print('---GET ALL PIPELINES---') code, data = s_client.get_pipelines() for item in data: o = StreakPipeline(**item) o.show() print("---------") print('---GET ONE PIPELINE---') code, data = s_client.get_pipeline("agxzfm1haWxmb29nYWVyOAsSDE9yZ2FuaXphdGlvbiIRbWVobWV0Z0BnbWFpbC5jb20MCxIIV29ya2Zsb3cYgICAgIC5hAoM") o = StreakPipeline(**data) o.show() print("---------") def box_api_test(s_client): print('---ONE PIPE, ALL BOXES---') code, data = s_client.get_pipeline_boxes("agxzfm1haWxmb29nYWVyOAsSDE9yZ2FuaXphdGlvbiIRbWVobWV0Z0BnbWFpbC5jb20MCxIIV29ya2Zsb3cYgICAgIC5hAoM") for item in data: o = StreakBox(**item) o.show() print("---------") print("---ONE BOX---") code, data = s_client.get_box('agxzfm1haWxmb29nYWVyLwsSDE9yZ2FuaXphdGlvbiIRbWVobWV0Z0BnbWFpbC5jb20MCxIEQ2FzZRjh1AMM') o = StreakBox(**data) o.show() print("---------") print("---ALL BOXES---") code, data = s_client.get_all_boxes() for item in data: o = StreakBox(**item) o.show() print("---------") print("---Create BOX---") code, data = s_client.create_box('agxzfm1haWxmb29nYWVyOAsSDE9yZ2FuaXphdGlvbiIRbWVobWV0Z0BnbWFpbC5jb20MCxIIV29ya2Zsb3cYgICAgIC5hAoM', "1") o = StreakBox(**data) o.show() for i in xrange(5): raw_input() print('---Update BOX---') o.attributes['name'] = str(int(o.attributes['name']) + 1) code, data = s_client.update_box(o) o = StreakBox(**data) o.show() print("---------") def search_api_test(s_client): print("---Search BOXES---") code, data = s_client.search("6") for item in data['results']: o = StreakBox(**item) o.show() print("---------") raw_input() print("---Delete BOX---") code, data = s_client.delete_box(o.to_dict()['boxKey']) print(data) def snippet_api_test(s_client): print("---ALL SNIPPETS---") code, data = s_client.get_snippets() for item in data: o = StreakSnippet(**item) o.show() print("---------") print("---ONE SNIPPET---") code, data = s_client.get_snippet("sss") if(code == 200): o = StreakSnippet(**item) o.show() print("---------") def stage_api_test(s_client): print('---ONE PIPE, ALL STAGES---') code, data = s_client.get_pipeline_stages("agxzfm1haWxmb29nYWVyOAsSDE9yZ2FuaXphdGlvbiIRbWVobWV0Z0BnbWFpbC5jb20MCxIIV29ya2Zsb3cYgICAgIDyiAoM") raw_input() for item in data: o = StreakStage(**item) o.show() print("------------------") print("---ONE STAGE---") code, data = s_client.get_pipeline_stage('agxzfm1haWxmb29nYWVyOAsSDE9yZ2FuaXphdGlvbiIRbWVobWV0Z0BnbWFpbC5jb20MCxIIV29ya2Zsb3cYgICAgIDyiAoM', '5001') pprint(data) o = StreakStage(**data) o.show() print("------------------") print("---Create STAGE---") code, data = s_client.create_pipeline_stage('agxzfm1haWxmb29nYWVyOAsSDE9yZ2FuaXphdGlvbiIRbWVobWV0Z0BnbWFpbC5jb20MCxIIV29ya2Zsb3cYgICAgIDyiAoM', "1") data.update({'pipelineKey':'agxzfm1haWxmb29nYWVyOAsSDE9yZ2FuaXphdGlvbiIRbWVobWV0Z0BnbWFpbC5jb20MCxIIV29ya2Zsb3cYgICAgIDyiAoM'}) o = StreakStage(**data) o.show() for i in xrange(5): raw_input() print("---Update STAGE---") o.attributes['name'] = str(int(o.attributes['name']) + 1) code, data = s_client.update_pipeline_stage(o) data.update({'pipelineKey':'agxzfm1haWxmb29nYWVyOAsSDE9yZ2FuaXphdGlvbiIRbWVobWV0Z0BnbWFpbC5jb20MCxIIV29ya2Zsb3cYgICAgIDyiAoM'}) o = StreakStage(**data) o.show() print("------------------") raw_input() print("---Delete BOX---") code, data = s_client.delete_pipeline_stage(o.attributes['pipelineKey'], o.attributes['key']) print(data) def pipeline_field_api_test(s_client): pipeline_key = 'agxzfm1haWxmb29nYWVyOAsSDE9yZ2FuaXphdGlvbiIRbWVobWV0Z0BnbWFpbC5jb20MCxIIV29ya2Zsb3cYgICAgIDyiAoM' print('---ONE PIPE, CREATE FIELD---') code, data = s_client.create_pipeline_field(pipeline_key, 'myField', 'TEXT_INPUT') if(code == 200): o = StreakField(**data) o.show() print("------------------") raw_input() print('---ONE PIPE, CREATE FIELD---') code, data = s_client.create_pipeline_field(pipeline_key, 'myField2', 'PERSON') if(code == 200): o = StreakField(**data) o.show() print("------------------") raw_input() print('---ONE PIPE, ALL FIELDS---') code, data = s_client.get_pipeline_field(pipeline_key) if(code == 200): for item in data: o = StreakField(**item) o.show() print("------------------") raw_input() print("---ONE FIELD---") code, data = s_client.get_pipeline_field(pipeline_key, o.attributes['key']) if(code == 200): o = StreakField(**data) o.show() print("------------------") raw_input() print("---UPDATE FIELD---") for i in xrange(5): o.attributes['name'] += str(i) code, data = s_client.update_pipeline_field(pipeline_key, o) if(code == 200): o = StreakField(**data) o.show() print("------------------") raw_input() print('---ONE PIPE, DELETE ALL FIELDS---') code, data = s_client.get_pipeline_field(pipeline_key) if(code == 200): for item in data: o = StreakField(**item) o.attributes['pipelineKey'] = pipeline_key o.show() print("---Delete FIELD---") code, data = s_client.delete_pipeline_field(o.attributes['pipelineKey'], o.attributes['key']) pprint(data) print("------------------") raw_input() def box_field_api_test(s_client): box_key ='agxzfm1haWxmb29nYWVyLwsSDE9yZ2FuaXphdGlvbiIRbWVobWV0Z0BnbWFpbC5jb20MCxIEQ2FzZRjhxQgM' print('---ONE BOX, CREATE FIELD---') code, data = s_client.create_box_field(box_key, 'myField', 'TEXT_INPUT') if(code == 200): o = StreakField(**data) o.show() print("------------------") raw_input() print('---ONE BOX, CREATE FIELD---') code, data = s_client.create_box_field(box_key, 'myField2', 'PERSON') if(code == 200): o = StreakField(**data) o.show() print("------------------") raw_input() print('---ONE BOX, ALL FIELDS---') code, data = s_client.get_box_field(box_key) if(code == 200): for item in data: o = StreakField(**item) o.show() print("------------------") raw_input() print("---ONE FIELD---") code, data = s_client.get_box_field(box_key, o.attributes['key']) if(code == 200): o = StreakField(**data) o.show() print("------------------") raw_input() print('---ONE BOX, DELETE ALL FIELDS---') code, data = s_client.get_box_field(box_key) if(code == 200): for item in data: o = StreakField(**item) o.attributes['boxKey'] = box_key o.show() print("---Delete FIELD---") code, data = s_client.delete_box_field(o.attributes['pipelineKey'], o.attributes['key']) pprint(data) print("------------------") raw_input() def newsfeed_api_test(s_client): box_key = 'agxzfm1haWxmb29nYWVyLwsSDE9yZ2FuaXphdGlvbiIRbWVobWV0Z0BnbWFpbC5jb20MCxIEQ2FzZRjhxQgM' pipeline_key = 'agxzfm1haWxmb29nYWVyOAsSDE9yZ2FuaXphdGlvbiIRbWVobWV0Z0BnbWFpbC5jb20MCxIIV29ya2Zsb3cYgICAgIDyiAoM' code, data = s_client.get_pipeline_newsfeeds(pipeline_key, "ALL") pprint("ALL PIPELINE NEWSFEED: {}".format(data)) raw_input() code, data = s_client.get_pipeline_newsfeeds(pipeline_key, "CONDENSED") pprint("CONDENSED PIPELINE NEWSFEED: {}".format(data)) raw_input() code, data = s_client.get_box_newsfeeds(box_key, "ALL") pprint("ALL BOX NEWSFEED: {}".format(data)) raw_input() code, data = s_client.get_box_newsfeeds(box_key, "CONDENSED") pprint("CONDENSED BOX NEWSFEED: {}".format(data)) def threads_api_test(s_client): box_key = 'agxzfm1haWxmb29nYWVyLwsSDE9yZ2FuaXphdGlvbiIRbWVobWV0Z0BnbWFpbC5jb20MCxIEQ2FzZRjhxQgM' print("---BOX THREADS---") code, data = s_client.get_box_threads(box_key) if code == 200: for item in data: o = StreakThread(**item) o.show() print("-------------") print("---ONE THREAD---") code, data = s_client.get_thread(o.attributes['key']) if code == 200: o = StreakThread(**data) o.show() print("-------------") def comments_api_test(s_client): box_key = 'agxzfm1haWxmb29nYWVyLwsSDE9yZ2FuaXphdGlvbiIRbWVobWV0Z0BnbWFpbC5jb20MCxIEQ2FzZRjhxQgM' print("---CREATE BOX COMMENT---") code, data = s_client.create_box_comments(box_key, "Hello World!") if code == 200: o = StreakComment(**data) o.show() print("------------------------") print("---GET BOX COMMENTS---") code, data = s_client.get_box_comments(box_key) if code == 200: for item in data: o = StreakComment(**item) o.show() #print(o.attributes['boxKey'], o.attributes['key']) #print("---DELETE BOX COMMENT---") #code, data = s_client.delete_box_comment(o.attributes['boxKey'], o.attributes['key']) #pprint(data) print("------------------------") def files_api_test(s_client): box_key = 'agxzfm1haWxmb29nYWVyLwsSDE9yZ2FuaXphdGlvbiIRbWVobWV0Z0BnbWFpbC5jb20MCxIEQ2FzZRjhxQgM' print("---GET BOX FILES---") code, data = s_client.get_box_files(box_key) if code == 200: for item in data: o = StreakFile(**item) o.show() #print(o.attributes['boxKey'], o.attributes['key']) #print("---DELETE BOX COMMENT---") #code, data = s_client.delete_box_comment(o.attributes['boxKey'], o.attributes['key']) #pprint(data) print("------------------------") print("---GET FILE---") code, data = s_client.get_file(o.attributes['fileKey']) if code == 200: o = StreakFile(**data) o.show() print("------------------------") print("---GET FILE LINK---") code, data = s_client.get_file_link(o.attributes['fileKey']) pprint(data) print("------------------------") print("---GET FILE CONTENTS---") code, data = s_client.get_file_contents(o.attributes['fileKey']) pprint(data) print("------------------------") def box_reminder_api_test(s_client): import time box_key ='agxzfm1haWxmb29nYWVyLwsSDE9yZ2FuaXphdGlvbiIRbWVobWV0Z0BnbWFpbC5jb20MCxIEQ2FzZRjhxQgM' print('---ONE BOX, CREATE REMINDER---') code, data = s_client.create_box_reminder(box_key, 'hai!', str(int(time.time())+100000), True) if(code == 200): o = StreakReminder(**data) o.show() print("------------------") raw_input() print('---ONE BOX, CREATE REMINDER---') code, data = s_client.create_box_reminder(box_key, 'Moo!', str(int(time.time())+200000), False) if(code == 200): o = StreakReminder(**data) o.show() print("------------------") raw_input() print('---ONE BOX, ALL REMINDERS---') code, data = s_client.get_box_reminders(box_key) if(code == 200): for item in data: o = StreakReminder(**item) o.show() print("------------------") raw_input() print("---ONE REMINDER---") code, data = s_client.get_reminder(o.attributes['reminderKey']) if(code == 200): o = StreakReminder(**data) o.show() print("------------------") print("---UPDATE REMINDER---") o.attributes['message'] = "updated!" code, data = s_client.update_reminder(o) if(code == 200): o = StreakReminder(**data) o.show() print("------------------") raw_input() print('---ONE BOX, DELETE ALL REMINDERS---') code, data = s_client.get_box_reminders(box_key) if(code == 200): for item in data: o = StreakField(**item) o.show() print("---Delete Reminder---") code, data = s_client.delete_reminder(o.attributes['reminderKey']) pprint(data) print("------------------") raw_input() ''' def main(): unittest.main(verbosity=2) ''' suite = unittest.TestSuite() suite.addTest(unittest.makeSuite(StreakClientTestUserAPI)) unittest.TextTestRunner(verbosity=2).run(suite) ''' if __name__ == '__main__': main()
#!/usr/bin/python # -*- coding: utf-8 -*- # ====================================================================== # Copyright 2018 Julien LE CLEACH # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ====================================================================== import pytest from unittest.mock import call, Mock from supervisor.http import NOT_DONE_YET from supervisor.web import MeldView from supervisor.xmlrpc import RPCError from supvisors.ttypes import ApplicationStates, StartingStrategies from supvisors.viewapplication import ApplicationView from supvisors.viewcontext import APPLI, AUTO, PROCESS, STRATEGY from supvisors.viewhandler import ViewHandler from supvisors.webutils import APPLICATION_PAGE from .base import DummyHttpContext from .conftest import create_element @pytest.fixture def view(supvisors): """ Fixture for the instance to test. """ http_context = DummyHttpContext('ui/application.html') http_context.supervisord.supvisors = supvisors view = ApplicationView(http_context) view.view_ctx = Mock(parameters={}, **{'format_url.return_value': 'an url'}) return view def test_init(view): """ Test the values set at construction. """ # create instance assert isinstance(view, ViewHandler) assert isinstance(view, MeldView) assert view.application_name == '' assert view.application is None def test_handle_parameters(mocker, view): """ Test the handle_parameters method. """ mocker.patch('supvisors.viewapplication.error_message', return_value='an error') mocked_handle = mocker.patch('supvisors.viewhandler.ViewHandler.handle_parameters') # patch context view.view_ctx.parameters[APPLI] = None # test with no application selected view.handle_parameters() assert mocked_handle.call_args_list == [call(view)] assert view.application is None assert view.view_ctx.store_message == 'an error' assert view.view_ctx.redirect mocked_handle.reset_mock() # test with application selected view.view_ctx = Mock(parameters={APPLI: 'dummy_appli'}, store_message=None, redirect=False) view.sup_ctx.applications['dummy_appli'] = 'dummy_appli' view.handle_parameters() assert mocked_handle.call_args_list == [call(view)] assert view.application == 'dummy_appli' assert view.view_ctx.store_message is None assert not view.view_ctx.redirect def test_write_navigation(mocker, view): """ Test the write_navigation method. """ mocked_handle = mocker.patch('supvisors.viewhandler.ViewHandler.write_nav') view.application_name = 'dummy_appli' # test with no application selected view.write_navigation('root') assert mocked_handle.call_args_list == [call('root', appli='dummy_appli')] def test_write_header(mocker, view): """ Test the write_header method. """ mocked_action = mocker.patch('supvisors.viewapplication.ApplicationView.write_application_actions') mocked_period = mocker.patch('supvisors.viewhandler.ViewHandler.write_periods') mocked_strategy = mocker.patch('supvisors.viewapplication.ApplicationView.write_starting_strategy') view.application_name = 'dummy_appli' view.application = Mock(state=ApplicationStates.STOPPED, major_failure=False, minor_failure=False, **{'running.return_value': False}) # patch the meld elements led_mid = create_element() state_mid = create_element() application_mid = create_element() mocked_root = create_element({'application_mid': application_mid, 'state_mid': state_mid, 'state_led_mid': led_mid}) # test call with stopped application view.write_header(mocked_root) assert application_mid.content.call_args_list == [call('dummy_appli')] assert state_mid.content.call_args_list == [call('STOPPED')] assert led_mid.attrib['class'] == 'status_empty' assert mocked_strategy.call_args_list == [call(mocked_root)] assert mocked_period.call_args_list == [call(mocked_root)] assert mocked_action.call_args_list == [call(mocked_root)] mocked_root.reset_all() mocker.resetall() # test call with running application and no failure view.application = Mock(state=ApplicationStates.STARTING, major_failure=False, minor_failure=False, **{'running.return_value': True}) view.write_header(mocked_root) assert application_mid.content.call_args_list == [call('dummy_appli')] assert state_mid.content.call_args_list == [call('STARTING')] assert led_mid.attrib['class'] == 'status_green' assert mocked_strategy.call_args_list == [call(mocked_root)] assert mocked_period.call_args_list == [call(mocked_root)] assert mocked_action.call_args_list == [call(mocked_root)] mocked_root.reset_all() mocker.resetall() # test call with running application and minor failure view.application.minor_failure = True view.write_header(mocked_root) assert application_mid.content.call_args_list == [call('dummy_appli')] assert state_mid.content.call_args_list == [call('STARTING')] assert led_mid.attrib['class'] == 'status_yellow' assert mocked_strategy.call_args_list == [call(mocked_root)] assert mocked_period.call_args_list == [call(mocked_root)] assert mocked_action.call_args_list == [call(mocked_root)] mocked_root.reset_all() mocker.resetall() # test call with running application and major failure view.application.major_failure = True view.write_header(mocked_root) assert application_mid.content.call_args_list == [call('dummy_appli')] assert state_mid.content.call_args_list == [call('STARTING')] assert led_mid.attrib['class'] == 'status_red' assert mocked_strategy.call_args_list == [call(mocked_root)] assert mocked_period.call_args_list == [call(mocked_root)] assert mocked_action.call_args_list == [call(mocked_root)] def test_write_starting_strategy(view): """ Test the write_starting_strategy method. """ # patch the view context view.view_ctx = Mock(parameters={STRATEGY: 'CONFIG'}, **{'format_url.return_value': 'an url'}) # patch the meld elements strategy_mids = [Mock(attrib={'class': ''}) for _ in StartingStrategies] mocked_root = Mock(**{'findmeld.side_effect': strategy_mids * len(strategy_mids)}) # test all strategies in loop for index, strategy in enumerate(StartingStrategies._member_names_): view.view_ctx.parameters[STRATEGY] = strategy view.write_starting_strategy(mocked_root) # other strategy_mids are not selected for idx in range(len(strategy_mids)): if idx == index: # strategy_mid at same index is selected assert strategy_mids[idx].attrib['class'] == 'button off active' assert strategy_mids[idx].attributes.call_args_list == [] else: assert strategy_mids[idx].attrib['class'] == '' assert strategy_mids[idx].attributes.call_args_list == [call(href='an url')] # reset mocks strategy_mids[idx].attrib['class'] = '' strategy_mids[idx].attributes.reset_mock() def test_write_application_actions(view): """ Test the write_application_actions method. """ # patch the view context view.view_ctx = Mock(**{'format_url.side_effect': ['a start url', 'a stop url', 'a restart url']}) # patch the meld elements actions_mid = (Mock(), Mock(), Mock()) mocked_root = Mock(**{'findmeld.side_effect': actions_mid}) # test call view.write_application_actions(mocked_root) assert view.view_ctx.format_url.call_args_list == [call('', APPLICATION_PAGE, action='startapp'), call('', APPLICATION_PAGE, action='stopapp'), call('', APPLICATION_PAGE, action='restartapp')] assert actions_mid[0].attributes.call_args_list == [call(href='a start url')] assert actions_mid[1].attributes.call_args_list == [call(href='a stop url')] assert actions_mid[2].attributes.call_args_list == [call(href='a restart url')] def test_write_contents(mocker, view): """ Test the write_contents method. """ mocked_stats = mocker.patch('supvisors.viewhandler.ViewHandler.write_process_statistics') mocked_table = mocker.patch('supvisors.viewapplication.ApplicationView.write_process_table') mocked_data = mocker.patch('supvisors.viewapplication.ApplicationView.get_process_data', side_effect=([{'namespec': 'dummy'}], [{'namespec': 'dummy'}], [{'namespec': 'dummy'}], [{'namespec': 'dummy_proc'}], [{'namespec': 'dummy_proc'}])) view.application_name = 'dummy_appli' view.application = Mock() # patch context view.view_ctx = Mock(parameters={PROCESS: None}, **{'get_process_status.return_value': None}) # patch the meld elements mocked_root = Mock() # test call with no process selected view.write_contents(mocked_root) assert mocked_data.call_args_list == [call()] assert mocked_table.call_args_list == [call(mocked_root, [{'namespec': 'dummy'}])] assert mocked_stats.call_args_list == [call(mocked_root, {})] mocked_data.reset_mock() mocked_table.reset_mock() mocked_stats.reset_mock() # test call with process selected and no corresponding status view.view_ctx.parameters[PROCESS] = 'dummy_proc' view.write_contents(mocked_root) assert mocked_data.call_args_list == [call()] assert mocked_table.call_args_list == [call(mocked_root, [{'namespec': 'dummy'}])] assert view.view_ctx.parameters[PROCESS] == '' assert mocked_stats.call_args_list == [call(mocked_root, {})] mocked_data.reset_mock() mocked_table.reset_mock() mocked_stats.reset_mock() # test call with process selected but belonging to another application view.view_ctx.parameters[PROCESS] = 'dummy_proc' view.view_ctx.get_process_status.return_value = Mock(application_name='dumb_appli') view.write_contents(mocked_root) assert mocked_data.call_args_list == [call()] assert mocked_table.call_args_list == [call(mocked_root, [{'namespec': 'dummy'}])] assert view.view_ctx.parameters[PROCESS] == '' assert mocked_stats.call_args_list == [call(mocked_root, {})] mocked_data.reset_mock() mocked_table.reset_mock() mocked_stats.reset_mock() # test call with process selected and belonging to the application but stopped view.view_ctx.parameters[PROCESS] = 'dummy_proc' view.view_ctx.get_process_status.return_value = Mock(application_name='dummy_appli', **{'stopped.return_value': True}) view.write_contents(mocked_root) assert mocked_data.call_args_list == [call()] assert mocked_table.call_args_list == [call(mocked_root, [{'namespec': 'dummy_proc'}])] assert view.view_ctx.parameters[PROCESS] == '' assert mocked_stats.call_args_list == [call(mocked_root, {})] mocked_data.reset_mock() mocked_table.reset_mock() mocked_stats.reset_mock() # test call with process selected and belonging to the application and running view.view_ctx.parameters[PROCESS] = 'dummy_proc' view.view_ctx.get_process_status.return_value = Mock(application_name='dummy_appli', **{'stopped.return_value': False}) view.write_contents(mocked_root) assert mocked_data.call_args_list == [call()] assert mocked_table.call_args_list == [call(mocked_root, [{'namespec': 'dummy_proc'}])] assert view.view_ctx.parameters[PROCESS] == 'dummy_proc' assert mocked_stats.call_args_list == [call(mocked_root, {'namespec': 'dummy_proc'})] def test_get_process_last_desc(mocker, view): """ Test the ViewApplication.get_process_last_desc method. """ # build common Mock mocked_process = Mock(**{'get_last_description.return_value': ('10.0.0.1', 'the latest comment')}) view.view_ctx = Mock(**{'get_process_status.return_value': mocked_process}) # test method return on non-running process assert view.get_process_last_desc('dummy_proc') == ('10.0.0.1', 'the latest comment') def test_get_process_data(mocker, view): """ Test the ViewApplication.get_process_data method. """ # patch the selected application process_1 = Mock(application_name='appli_1', process_name='process_1', namespec='namespec_1', running_identifiers=set(), state='stopped', rules=Mock(expected_load=20), **{'state_string.return_value': 'stopped', 'has_crashed.return_value': False}) process_2 = Mock(application_name='appli_2', process_name='process_2', namespec='namespec_2', running_identifiers=['10.0.0.1', '10.0.0.3'], # should be a set but hard to test afterwards state='running', rules=Mock(expected_load=1), **{'state_string.return_value': 'running', 'has_crashed.return_value': True}) view.application = Mock(processes={process_1.process_name: process_1, process_2.process_name: process_2}) # patch context mocked_stats = Mock() view.view_ctx = Mock(**{'get_process_stats.return_value': (4, mocked_stats)}) mocker.patch.object(view, 'get_process_last_desc', return_value=('10.0.0.1', 'something')) # test call data1 = {'application_name': 'appli_1', 'process_name': 'process_1', 'namespec': 'namespec_1', 'disabled': False, 'identifier': '10.0.0.1', 'statename': 'stopped', 'statecode': 'stopped', 'gravity': 'stopped', 'has_crashed': False, 'running_identifiers': [], 'description': 'something', 'expected_load': 20, 'nb_cores': 4, 'proc_stats': mocked_stats} data2 = {'application_name': 'appli_2', 'process_name': 'process_2', 'namespec': 'namespec_2', 'disabled': False, 'identifier': '10.0.0.1', 'statename': 'running', 'statecode': 'running', 'gravity': 'running', 'has_crashed': True, 'running_identifiers': ['10.0.0.1', '10.0.0.3'], 'description': 'something', 'expected_load': 1, 'nb_cores': 4, 'proc_stats': mocked_stats} assert view.get_process_data() == [data1, data2] def test_write_process(view): """ Test the write_process method. """ # create a process-like dict info = {'process_name': 'proc1', 'namespec': 'dummy_appli:dummy_proc', 'running_identifiers': [], 'identifier': '10.0.0.2'} # patch the view context view.view_ctx = Mock(**{'format_url.return_value': 'an url'}) # patch the meld elements running_ul_mid = Mock() running_a_mid = Mock(attrib={'class': 'button'}) running_li_elt = Mock(**{'findmeld.return_value': running_a_mid}) running_li_mid = Mock(**{'repeat.return_value': [(running_li_elt, '10.0.0.1')]}) tr_elt = Mock(**{'findmeld.side_effect': [running_ul_mid, running_li_mid]}) # test call with stopped process view.write_process(tr_elt, info) assert tr_elt.findmeld.call_args_list == [call('running_ul_mid')] assert running_ul_mid.replace.call_args_list == [call('')] assert running_a_mid.attributes.call_args_list == [] assert running_a_mid.content.call_args_list == [] # reset mock elements view.view_ctx.format_url.reset_mock() running_ul_mid.replace.reset_mock() # test call with running process info['running_identifiers'] = {'10.0.0.1'} info['identifier'] = '10.0.0.1' view.write_process(tr_elt, info) assert tr_elt.findmeld.call_args_list == [call('running_ul_mid'), call('running_li_mid')] assert running_ul_mid.replace.call_args_list == [] assert running_a_mid.attributes.call_args_list == [call(href='an url')] assert running_a_mid.content.call_args_list == [call('10.0.0.1')] def test_write_process_table(mocker, view): """ Test the write_process_table method. """ mocked_process = mocker.patch('supvisors.viewapplication.ApplicationView.write_process') mocked_common = mocker.patch('supvisors.viewhandler.ViewHandler.write_common_process_status', side_effect=[True, False, False]) # patch the meld elements table_mid = Mock() tr_elt_1 = Mock(attrib={'class': ''}) tr_elt_2 = Mock(attrib={'class': ''}) tr_elt_3 = Mock(attrib={'class': ''}) tr_mid = Mock(**{'repeat.return_value': [(tr_elt_1, 'info_1'), (tr_elt_2, 'info_2'), (tr_elt_3, 'info_3')]}) mocked_root = Mock(**{'findmeld.side_effect': [table_mid, tr_mid]}) # test call with no data view.write_process_table(mocked_root, {}) assert table_mid.replace.call_args_list == [call('No programs to manage')] assert mocked_common.replace.call_args_list == [] assert mocked_process.replace.call_args_list == [] assert tr_elt_1.attrib['class'] == '' assert tr_elt_2.attrib['class'] == '' assert tr_elt_3.attrib['class'] == '' table_mid.replace.reset_mock() # test call with data and line selected view.write_process_table(mocked_root, True) assert table_mid.replace.call_args_list == [] assert mocked_common.call_args_list == [call(tr_elt_1, 'info_1'), call(tr_elt_2, 'info_2'), call(tr_elt_3, 'info_3')] assert mocked_process.call_args_list == [call(tr_elt_1, 'info_1'), call(tr_elt_2, 'info_2'), call(tr_elt_3, 'info_3')] assert tr_elt_1.attrib['class'] == 'brightened' assert tr_elt_2.attrib['class'] == 'shaded' assert tr_elt_3.attrib['class'] == 'brightened' def test_make_callback(mocker, view): """ Test the make_callback method. """ mocker.patch('supvisors.viewapplication.delayed_error', return_value='Delayed') mocked_clear_proc = mocker.patch.object(view, 'clearlog_process_action', return_value='Clear process logs') mocked_restart_proc = mocker.patch.object(view, 'restart_process_action', return_value='Restart process') mocked_stop_proc = mocker.patch.object(view, 'stop_process_action', return_value='Stop process') mocked_start_proc = mocker.patch.object(view, 'start_process_action', return_value='Start process') mocked_restart_app = mocker.patch.object(view, 'restart_application_action', return_value='Restart application') mocked_stop_app = mocker.patch.object(view, 'stop_application_action', return_value='Stop application') mocked_start_app = mocker.patch.object(view, 'start_application_action', return_value='Start application') # patch view context view.view_ctx = Mock(parameters={STRATEGY: 'LOCAL'}, **{'get_process_status.return_value': None}) view.application = Mock() # test calls for different actions assert view.make_callback('', 'startapp') == 'Start application' assert mocked_start_app.call_args_list == [call(StartingStrategies.LOCAL)] assert view.make_callback('', 'stopapp') == 'Stop application' assert mocked_stop_app.call_args_list == [call()] assert view.make_callback('', 'restartapp') == 'Restart application' assert mocked_restart_app.call_args_list == [call(StartingStrategies.LOCAL)] assert view.make_callback('dummy', 'anything') == 'Delayed' # change view context for the remaining actions view.view_ctx.get_process_status.return_value = 'None' # test start process assert view.make_callback('dummy', 'start') == 'Start process' assert mocked_start_proc.call_args_list == [call(StartingStrategies.LOCAL, 'dummy')] # test stop process assert view.make_callback('dummy', 'stop') == 'Stop process' assert mocked_stop_proc.call_args_list == [call('dummy')] # test restart process assert view.make_callback('dummy', 'restart') == 'Restart process' assert mocked_restart_proc.call_args_list == [call(StartingStrategies.LOCAL, 'dummy')] # test clear logs process assert view.make_callback('dummy', 'clearlog') == 'Clear process logs' assert mocked_clear_proc.call_args_list == [call('dummy')] @pytest.fixture def messages(mocker): """ Install patches on all message functions""" patches = [mocker.patch('supvisors.viewapplication.delayed_error', return_value='Delay err'), mocker.patch('supvisors.viewapplication.delayed_warn', return_value='Delay warn'), mocker.patch('supvisors.viewapplication.delayed_info', return_value='Delay info'), mocker.patch('supvisors.viewapplication.error_message', return_value='Msg err'), mocker.patch('supvisors.viewapplication.warn_message', return_value='Msg warn'), mocker.patch('supvisors.viewapplication.info_message', return_value='Msg info')] [p.start() for p in patches] yield [p.stop() for p in patches] def check_start_action(view, rpc_name, action_name, *args): """ Test the method named action_name. """ for auto in [True, False]: view.view_ctx = Mock(parameters={AUTO: auto}) # get methods involved rpc_call = getattr(view.supvisors.supervisor_data.supvisors_rpc_interface, rpc_name) action = getattr(view, action_name) # test call with error on main RPC call rpc_call.side_effect = RPCError('failed RPC') assert action(StartingStrategies.CONFIG, *args) == 'Delay err' # test call with direct result (application started) rpc_call.side_effect = None rpc_call.return_value = True assert action(StartingStrategies.CONFIG, *args) == 'Delay info' # test call with direct result (application NOT started) rpc_call.return_value = False assert action(StartingStrategies.CONFIG, *args) == 'Delay warn' # test call with indirect result leading to internal RPC error rpc_call.return_value = lambda: (_ for _ in ()).throw(RPCError('')) result = action(StartingStrategies.CONFIG, *args) assert callable(result) assert result() == 'Msg err' # test call with indirect result leading to unfinished job rpc_call.return_value = lambda: NOT_DONE_YET result = action(StartingStrategies.CONFIG, *args) assert callable(result) assert result() is NOT_DONE_YET # test call with indirect result leading to failure rpc_call.return_value = lambda: False result = action(StartingStrategies.CONFIG, *args) assert callable(result) assert result() == 'Msg warn' # test call with indirect result leading to success rpc_call.return_value = lambda: True result = action(StartingStrategies.CONFIG, *args) assert callable(result) assert result() == 'Msg info' def test_start_application_action(view, messages): """ Test the start_application_action method. """ check_start_action(view, 'start_application', 'start_application_action') def test_restart_application_action(view, messages): """ Test the restart_application_action method. """ check_start_action(view, 'restart_application', 'restart_application_action') def test_start_process_action(view, messages): """ Test the start_process_action method. """ check_start_action(view, 'start_process', 'start_process_action', 'dummy_proc') def test_restart_process_action(view, messages): """ Test the restart_process_action method. """ check_start_action(view, 'restart_process', 'restart_process_action', 'dummy_proc') def check_stop_action(view, rpc_name, action_name, *args): """ Test the stop-like method named action_name. """ for auto in [True, False]: view.view_ctx = Mock(parameters={AUTO: auto}) # get methods involved rpc_call = getattr(view.supvisors.supervisor_data.supvisors_rpc_interface, rpc_name) action = getattr(view, action_name) # test call with error on main RPC call rpc_call.side_effect = RPCError('failed RPC') assert action(*args) == 'Delay err' # test call with direct result (application started) rpc_call.side_effect = None rpc_call.return_value = True assert action(*args) == 'Delay info' # test call with direct result (application NOT started) rpc_call.return_value = False assert action(*args) == 'Delay warn' # test call with indirect result leading to internal RPC error rpc_call.return_value = lambda: (_ for _ in ()).throw(RPCError('')) result = action(*args) assert callable(result) assert result() == 'Msg err' # test call with indirect result leading to unfinished job rpc_call.return_value = lambda: NOT_DONE_YET result = action(*args) assert callable(result) assert result() is NOT_DONE_YET # test call with indirect result leading to success rpc_call.return_value = lambda: True result = action(*args) assert callable(result) assert result() == 'Msg info' def test_stop_application_action(view, messages): """ Test the stop_application_action method. """ check_stop_action(view, 'stop_application', 'stop_application_action') def test_stop_process_action(view, messages): """ Test the stop_process_action method. """ check_stop_action(view, 'stop_process', 'stop_process_action', 'dummy_proc') def test_clearlog_process_action(view, messages): """ Test the clearlog_process_action method. """ # get rpc involved (mock) rpc_call = view.supvisors.supervisor_data.supervisor_rpc_interface.clearProcessLogs # test call with error on main RPC call rpc_call.side_effect = RPCError(777, 'failed RPC') assert view.clearlog_process_action('namespec') == 'Delay err' # test call with direct result (application started) rpc_call.side_effect = None assert view.clearlog_process_action('namespec') == 'Delay info'
""" Turn compiler.ast structures back into executable python code. The unparse method takes a compiler.ast tree and transforms it back into valid python code. It is incomplete and currently only works for import statements, function calls, function definitions, assignments, and basic expressions. Inspired by python-2.5-svn/Demo/parser/unparse.py fixme: We may want to move to using _ast trees because the compiler for them is about 6 times faster than compiler.compile. """ from __future__ import division, absolute_import, print_function import sys from compiler.ast import Const, Name, Tuple, Div, Mul, Sub, Add if sys.version_info[0] >= 3: from io import StringIO else: from StringIO import StringIO def unparse(ast, single_line_functions=False): s = StringIO() UnparseCompilerAst(ast, s, single_line_functions) return s.getvalue().lstrip() op_precedence = { 'compiler.ast.Power':3, 'compiler.ast.Mul':2, 'compiler.ast.Div':2, 'compiler.ast.Add':1, 'compiler.ast.Sub':1 } class UnparseCompilerAst: """ Methods in this class recursively traverse an AST and output source code for the abstract syntax; original formatting is disregarged. """ ######################################################################### # object interface. ######################################################################### def __init__(self, tree, file = sys.stdout, single_line_functions=False): """ Unparser(tree, file=sys.stdout) -> None. Print the source for tree to file. """ self.f = file self._single_func = single_line_functions self._do_indent = True self._indent = 0 self._dispatch(tree) self._write("\n") self.f.flush() ######################################################################### # Unparser private interface. ######################################################################### ### format, output, and dispatch methods ################################ def _fill(self, text = ""): "Indent a piece of text, according to the current indentation level" if self._do_indent: self._write("\n"+" "*self._indent + text) else: self._write(text) def _write(self, text): "Append a piece of text to the current line." self.f.write(text) def _enter(self): "Print ':', and increase the indentation." self._write(": ") self._indent += 1 def _leave(self): "Decrease the indentation level." self._indent -= 1 def _dispatch(self, tree): "_dispatcher function, _dispatching tree type T to method _T." if isinstance(tree, list): for t in tree: self._dispatch(t) return meth = getattr(self, "_"+tree.__class__.__name__) if tree.__class__.__name__ == 'NoneType' and not self._do_indent: return meth(tree) ######################################################################### # compiler.ast unparsing methods. # # There should be one method per concrete grammar type. They are # organized in alphabetical order. ######################################################################### def _Add(self, t): self.__binary_op(t, '+') def _And(self, t): self._write(" (") for i, node in enumerate(t.nodes): self._dispatch(node) if i != len(t.nodes)-1: self._write(") and (") self._write(")") def _AssAttr(self, t): """ Handle assigning an attribute of an object """ self._dispatch(t.expr) self._write('.'+t.attrname) def _Assign(self, t): """ Expression Assignment such as "a = 1". This only handles assignment in expressions. Keyword assignment is handled separately. """ self._fill() for target in t.nodes: self._dispatch(target) self._write(" = ") self._dispatch(t.expr) if not self._do_indent: self._write('; ') def _AssName(self, t): """ Name on left hand side of expression. Treat just like a name on the right side of an expression. """ self._Name(t) def _AssTuple(self, t): """ Tuple on left hand side of an expression. """ # _write each elements, separated by a comma. for element in t.nodes[:-1]: self._dispatch(element) self._write(", ") # Handle the last one without writing comma last_element = t.nodes[-1] self._dispatch(last_element) def _AugAssign(self, t): """ +=,-=,*=,/=,**=, etc. operations """ self._fill() self._dispatch(t.node) self._write(' '+t.op+' ') self._dispatch(t.expr) if not self._do_indent: self._write(';') def _Bitand(self, t): """ Bit and operation. """ for i, node in enumerate(t.nodes): self._write("(") self._dispatch(node) self._write(")") if i != len(t.nodes)-1: self._write(" & ") def _Bitor(self, t): """ Bit or operation """ for i, node in enumerate(t.nodes): self._write("(") self._dispatch(node) self._write(")") if i != len(t.nodes)-1: self._write(" | ") def _CallFunc(self, t): """ Function call. """ self._dispatch(t.node) self._write("(") comma = False for e in t.args: if comma: self._write(", ") else: comma = True self._dispatch(e) if t.star_args: if comma: self._write(", ") else: comma = True self._write("*") self._dispatch(t.star_args) if t.dstar_args: if comma: self._write(", ") else: comma = True self._write("**") self._dispatch(t.dstar_args) self._write(")") def _Compare(self, t): self._dispatch(t.expr) for op, expr in t.ops: self._write(" " + op + " ") self._dispatch(expr) def _Const(self, t): """ A constant value such as an integer value, 3, or a string, "hello". """ self._dispatch(t.value) def _Decorators(self, t): """ Handle function decorators (eg. @has_units) """ for node in t.nodes: self._dispatch(node) def _Dict(self, t): self._write("{") for i, (k, v) in enumerate(t.items): self._dispatch(k) self._write(": ") self._dispatch(v) if i < len(t.items)-1: self._write(", ") self._write("}") def _Discard(self, t): """ Node for when return value is ignored such as in "foo(a)". """ self._fill() self._dispatch(t.expr) def _Div(self, t): self.__binary_op(t, '/') def _Ellipsis(self, t): self._write("...") def _From(self, t): """ Handle "from xyz import foo, bar as baz". """ # fixme: Are From and ImportFrom handled differently? self._fill("from ") self._write(t.modname) self._write(" import ") for i, (name,asname) in enumerate(t.names): if i != 0: self._write(", ") self._write(name) if asname is not None: self._write(" as "+asname) def _Function(self, t): """ Handle function definitions """ if t.decorators is not None: self._fill("@") self._dispatch(t.decorators) self._fill("def "+t.name + "(") defaults = [None] * (len(t.argnames) - len(t.defaults)) + list(t.defaults) for i, arg in enumerate(zip(t.argnames, defaults)): self._write(arg[0]) if arg[1] is not None: self._write('=') self._dispatch(arg[1]) if i < len(t.argnames)-1: self._write(', ') self._write(")") if self._single_func: self._do_indent = False self._enter() self._dispatch(t.code) self._leave() self._do_indent = True def _Getattr(self, t): """ Handle getting an attribute of an object """ if isinstance(t.expr, (Div, Mul, Sub, Add)): self._write('(') self._dispatch(t.expr) self._write(')') else: self._dispatch(t.expr) self._write('.'+t.attrname) def _If(self, t): self._fill() for i, (compare,code) in enumerate(t.tests): if i == 0: self._write("if ") else: self._write("elif ") self._dispatch(compare) self._enter() self._fill() self._dispatch(code) self._leave() self._write("\n") if t.else_ is not None: self._write("else") self._enter() self._fill() self._dispatch(t.else_) self._leave() self._write("\n") def _IfExp(self, t): self._dispatch(t.then) self._write(" if ") self._dispatch(t.test) if t.else_ is not None: self._write(" else (") self._dispatch(t.else_) self._write(")") def _Import(self, t): """ Handle "import xyz.foo". """ self._fill("import ") for i, (name,asname) in enumerate(t.names): if i != 0: self._write(", ") self._write(name) if asname is not None: self._write(" as "+asname) def _Keyword(self, t): """ Keyword value assignment within function calls and definitions. """ self._write(t.name) self._write("=") self._dispatch(t.expr) def _List(self, t): self._write("[") for i,node in enumerate(t.nodes): self._dispatch(node) if i < len(t.nodes)-1: self._write(", ") self._write("]") def _Module(self, t): if t.doc is not None: self._dispatch(t.doc) self._dispatch(t.node) def _Mul(self, t): self.__binary_op(t, '*') def _Name(self, t): self._write(t.name) def _NoneType(self, t): self._write("None") def _Not(self, t): self._write('not (') self._dispatch(t.expr) self._write(')') def _Or(self, t): self._write(" (") for i, node in enumerate(t.nodes): self._dispatch(node) if i != len(t.nodes)-1: self._write(") or (") self._write(")") def _Pass(self, t): self._write("pass\n") def _Printnl(self, t): self._fill("print ") if t.dest: self._write(">> ") self._dispatch(t.dest) self._write(", ") comma = False for node in t.nodes: if comma: self._write(', ') else: comma = True self._dispatch(node) def _Power(self, t): self.__binary_op(t, '**') def _Return(self, t): self._fill("return ") if t.value: if isinstance(t.value, Tuple): text = ', '.join([ name.name for name in t.value.asList() ]) self._write(text) else: self._dispatch(t.value) if not self._do_indent: self._write('; ') def _Slice(self, t): self._dispatch(t.expr) self._write("[") if t.lower: self._dispatch(t.lower) self._write(":") if t.upper: self._dispatch(t.upper) #if t.step: # self._write(":") # self._dispatch(t.step) self._write("]") def _Sliceobj(self, t): for i, node in enumerate(t.nodes): if i != 0: self._write(":") if not (isinstance(node, Const) and node.value is None): self._dispatch(node) def _Stmt(self, tree): for node in tree.nodes: self._dispatch(node) def _Sub(self, t): self.__binary_op(t, '-') def _Subscript(self, t): self._dispatch(t.expr) self._write("[") for i, value in enumerate(t.subs): if i != 0: self._write(",") self._dispatch(value) self._write("]") def _TryExcept(self, t): self._fill("try") self._enter() self._dispatch(t.body) self._leave() for handler in t.handlers: self._fill('except ') self._dispatch(handler[0]) if handler[1] is not None: self._write(', ') self._dispatch(handler[1]) self._enter() self._dispatch(handler[2]) self._leave() if t.else_: self._fill("else") self._enter() self._dispatch(t.else_) self._leave() def _Tuple(self, t): if not t.nodes: # Empty tuple. self._write("()") else: self._write("(") # _write each elements, separated by a comma. for element in t.nodes[:-1]: self._dispatch(element) self._write(", ") # Handle the last one without writing comma last_element = t.nodes[-1] self._dispatch(last_element) self._write(")") def _UnaryAdd(self, t): self._write("+") self._dispatch(t.expr) def _UnarySub(self, t): self._write("-") self._dispatch(t.expr) def _With(self, t): self._fill('with ') self._dispatch(t.expr) if t.vars: self._write(' as ') self._dispatch(t.vars.name) self._enter() self._dispatch(t.body) self._leave() self._write('\n') def _int(self, t): self._write(repr(t)) def __binary_op(self, t, symbol): # Check if parenthesis are needed on left side and then dispatch has_paren = False left_class = str(t.left.__class__) if (left_class in op_precedence.keys() and op_precedence[left_class] < op_precedence[str(t.__class__)]): has_paren = True if has_paren: self._write('(') self._dispatch(t.left) if has_paren: self._write(')') # Write the appropriate symbol for operator self._write(symbol) # Check if parenthesis are needed on the right side and then dispatch has_paren = False right_class = str(t.right.__class__) if (right_class in op_precedence.keys() and op_precedence[right_class] < op_precedence[str(t.__class__)]): has_paren = True if has_paren: self._write('(') self._dispatch(t.right) if has_paren: self._write(')') def _float(self, t): # if t is 0.1, str(t)->'0.1' while repr(t)->'0.1000000000001' # We prefer str here. self._write(str(t)) def _str(self, t): self._write(repr(t)) def _tuple(self, t): self._write(str(t)) ######################################################################### # These are the methods from the _ast modules unparse. # # As our needs to handle more advanced code increase, we may want to # modify some of the methods below so that they work for compiler.ast. ######################################################################### # # stmt # def _Expr(self, tree): # self._fill() # self._dispatch(tree.value) # # def _Import(self, t): # self._fill("import ") # first = True # for a in t.names: # if first: # first = False # else: # self._write(", ") # self._write(a.name) # if a.asname: # self._write(" as "+a.asname) # ## def _ImportFrom(self, t): ## self._fill("from ") ## self._write(t.module) ## self._write(" import ") ## for i, a in enumerate(t.names): ## if i == 0: ## self._write(", ") ## self._write(a.name) ## if a.asname: ## self._write(" as "+a.asname) ## # XXX(jpe) what is level for? ## # # def _Break(self, t): # self._fill("break") # # def _Continue(self, t): # self._fill("continue") # # def _Delete(self, t): # self._fill("del ") # self._dispatch(t.targets) # # def _Assert(self, t): # self._fill("assert ") # self._dispatch(t.test) # if t.msg: # self._write(", ") # self._dispatch(t.msg) # # def _Exec(self, t): # self._fill("exec ") # self._dispatch(t.body) # if t.globals: # self._write(" in ") # self._dispatch(t.globals) # if t.locals: # self._write(", ") # self._dispatch(t.locals) # # def _Print(self, t): # self._fill("print ") # do_comma = False # if t.dest: # self._write(">>") # self._dispatch(t.dest) # do_comma = True # for e in t.values: # if do_comma:self._write(", ") # else:do_comma=True # self._dispatch(e) # if not t.nl: # self._write(",") # # def _Global(self, t): # self._fill("global") # for i, n in enumerate(t.names): # if i != 0: # self._write(",") # self._write(" " + n) # # def _Yield(self, t): # self._fill("yield") # if t.value: # self._write(" (") # self._dispatch(t.value) # self._write(")") # # def _Raise(self, t): # self._fill('raise ') # if t.type: # self._dispatch(t.type) # if t.inst: # self._write(", ") # self._dispatch(t.inst) # if t.tback: # self._write(", ") # self._dispatch(t.tback) # # # def _TryFinally(self, t): # self._fill("try") # self._enter() # self._dispatch(t.body) # self._leave() # # self._fill("finally") # self._enter() # self._dispatch(t.finalbody) # self._leave() # # def _excepthandler(self, t): # self._fill("except ") # if t.type: # self._dispatch(t.type) # if t.name: # self._write(", ") # self._dispatch(t.name) # self._enter() # self._dispatch(t.body) # self._leave() # # def _ClassDef(self, t): # self._write("\n") # self._fill("class "+t.name) # if t.bases: # self._write("(") # for a in t.bases: # self._dispatch(a) # self._write(", ") # self._write(")") # self._enter() # self._dispatch(t.body) # self._leave() # # def _FunctionDef(self, t): # self._write("\n") # for deco in t.decorators: # self._fill("@") # self._dispatch(deco) # self._fill("def "+t.name + "(") # self._dispatch(t.args) # self._write(")") # self._enter() # self._dispatch(t.body) # self._leave() # # def _For(self, t): # self._fill("for ") # self._dispatch(t.target) # self._write(" in ") # self._dispatch(t.iter) # self._enter() # self._dispatch(t.body) # self._leave() # if t.orelse: # self._fill("else") # self._enter() # self._dispatch(t.orelse) # self._leave # # def _While(self, t): # self._fill("while ") # self._dispatch(t.test) # self._enter() # self._dispatch(t.body) # self._leave() # if t.orelse: # self._fill("else") # self._enter() # self._dispatch(t.orelse) # self._leave # # # expr # def _Str(self, tree): # self._write(repr(tree.s)) ## # def _Repr(self, t): # self._write("`") # self._dispatch(t.value) # self._write("`") # # def _Num(self, t): # self._write(repr(t.n)) # # def _ListComp(self, t): # self._write("[") # self._dispatch(t.elt) # for gen in t.generators: # self._dispatch(gen) # self._write("]") # # def _GeneratorExp(self, t): # self._write("(") # self._dispatch(t.elt) # for gen in t.generators: # self._dispatch(gen) # self._write(")") # # def _comprehension(self, t): # self._write(" for ") # self._dispatch(t.target) # self._write(" in ") # self._dispatch(t.iter) # for if_clause in t.ifs: # self._write(" if ") # self._dispatch(if_clause) # # def _IfExp(self, t): # self._dispatch(t.body) # self._write(" if ") # self._dispatch(t.test) # if t.orelse: # self._write(" else ") # self._dispatch(t.orelse) # # unop = {"Invert":"~", "Not": "not", "UAdd":"+", "USub":"-"} # def _UnaryOp(self, t): # self._write(self.unop[t.op.__class__.__name__]) # self._write("(") # self._dispatch(t.operand) # self._write(")") # # binop = { "Add":"+", "Sub":"-", "Mult":"*", "Div":"/", "Mod":"%", # "LShift":">>", "RShift":"<<", "BitOr":"|", "BitXor":"^", "BitAnd":"&", # "FloorDiv":"//", "Pow": "**"} # def _BinOp(self, t): # self._write("(") # self._dispatch(t.left) # self._write(")" + self.binop[t.op.__class__.__name__] + "(") # self._dispatch(t.right) # self._write(")") # # boolops = {_ast.And: 'and', _ast.Or: 'or'} # def _BoolOp(self, t): # self._write("(") # self._dispatch(t.values[0]) # for v in t.values[1:]: # self._write(" %s " % self.boolops[t.op.__class__]) # self._dispatch(v) # self._write(")") # # def _Attribute(self,t): # self._dispatch(t.value) # self._write(".") # self._write(t.attr) # ## def _Call(self, t): ## self._dispatch(t.func) ## self._write("(") ## comma = False ## for e in t.args: ## if comma: self._write(", ") ## else: comma = True ## self._dispatch(e) ## for e in t.keywords: ## if comma: self._write(", ") ## else: comma = True ## self._dispatch(e) ## if t.starargs: ## if comma: self._write(", ") ## else: comma = True ## self._write("*") ## self._dispatch(t.starargs) ## if t.kwargs: ## if comma: self._write(", ") ## else: comma = True ## self._write("**") ## self._dispatch(t.kwargs) ## self._write(")") # # # slice # def _Index(self, t): # self._dispatch(t.value) # # def _ExtSlice(self, t): # for i, d in enumerate(t.dims): # if i != 0: # self._write(': ') # self._dispatch(d) # # # others # def _arguments(self, t): # first = True # nonDef = len(t.args)-len(t.defaults) # for a in t.args[0:nonDef]: # if first:first = False # else: self._write(", ") # self._dispatch(a) # for a,d in zip(t.args[nonDef:], t.defaults): # if first:first = False # else: self._write(", ") # self._dispatch(a), # self._write("=") # self._dispatch(d) # if t.vararg: # if first:first = False # else: self._write(", ") # self._write("*"+t.vararg) # if t.kwarg: # if first:first = False # else: self._write(", ") # self._write("**"+t.kwarg) # ## def _keyword(self, t): ## self._write(t.arg) ## self._write("=") ## self._dispatch(t.value) # # def _Lambda(self, t): # self._write("lambda ") # self._dispatch(t.args) # self._write(": ") # self._dispatch(t.body)
# coding: utf-8 # Copyright (c) Pymatgen Development Team. # Distributed under the terms of the MIT License. from __future__ import division, unicode_literals, print_function import numpy as np from pymatgen.core import Structure, Lattice from pymatgen.phonon.bandstructure import PhononBandStructureSymmLine from pymatgen.phonon.dos import PhononDos, CompletePhononDos from monty.serialization import loadfn from monty.dev import requires try: from phonopy import Phonopy from phonopy.structure.atoms import PhonopyAtoms from phonopy.file_IO import write_disp_yaml except ImportError: Phonopy = None @requires(Phonopy, "phonopy not installed!") def get_pmg_structure(phonopy_structure): """ Convert a PhonopyAtoms object to pymatgen Structure object. Args: phonopy_structure (PhonopyAtoms): A phonopy structure object. """ lattice = phonopy_structure.get_cell() frac_coords = phonopy_structure.get_scaled_positions() symbols = phonopy_structure.get_chemical_symbols() masses = phonopy_structure.get_masses() mms = phonopy_structure.get_magnetic_moments() mms = mms or [0] * len(symbols) return Structure(lattice, symbols, frac_coords, site_properties={"phonopy_masses": masses, "magnetic_moments": mms}) @requires(Phonopy, "phonopy not installed!") def get_phonopy_structure(pmg_structure): """ Convert a pymatgen Structure object to a PhonopyAtoms object. Args: pmg_structure (pymatgen Structure): A Pymatgen structure object. """ symbols = [site.specie.symbol for site in pmg_structure] return PhonopyAtoms(symbols=symbols, cell=pmg_structure.lattice.matrix, scaled_positions=pmg_structure.frac_coords) def get_structure_from_dict(d): """ Extracts a structure from the dictionary extracted from the output files of phonopy like phonopy.yaml or band.yaml. Adds "phonopy_masses" in the site_properties of the structures. Compatible with older phonopy versions. """ species = [] frac_coords = [] masses = [] if 'points' in d: for p in d['points']: species.append(p['symbol']) frac_coords.append(p['coordinates']) masses.append(p['mass']) elif 'atoms' in d: for p in d['atoms']: species.append(p['symbol']) frac_coords.append(p['position']) masses.append(p['mass']) else: raise ValueError('The dict does not contain structural information') return Structure(d['lattice'], species, frac_coords, site_properties={"phonopy_masses": masses}) def eigvec_to_eigdispl(v, q, frac_coords, mass): """ Converts a single eigenvector to an eigendisplacement in the primitive cell according to the formula:: exp(2*pi*i*(frac_coords \\dot q) / sqrt(mass) * v Compared to the modulation option in phonopy, here all the additional multiplicative and phase factors are set to 1. Args: v: the vector that should be converted. A 3D complex numpy array. q: the q point in fractional coordinates frac_coords: the fractional coordinates of the atom mass: the mass of the atom """ c = np.exp(2j * np.pi * np.dot(frac_coords, q)) / np.sqrt(mass) return c*v def get_ph_bs_symm_line_from_dict(bands_dict, has_nac=False, labels_dict=None): """ Creates a pymatgen PhononBandStructure object from the dictionary extracted by the band.yaml file produced by phonopy. The labels will be extracted from the dictionary, if present. If the 'eigenvector' key is found the eigendisplacements will be calculated according to the formula:: exp(2*pi*i*(frac_coords \\dot q) / sqrt(mass) * v and added to the object. Args: bands_dict: the dictionary extracted from the band.yaml file has_nac: True if the data have been obtained with the option --nac option. Default False. labels_dict: dict that links a qpoint in frac coords to a label. Its value will replace the data contained in the band.yaml. """ structure = get_structure_from_dict(bands_dict) qpts = [] frequencies = [] eigendisplacements = [] phonopy_labels_dict = {} for p in bands_dict['phonon']: q = p['q-position'] qpts.append(q) bands = [] eig_q = [] for b in p['band']: bands.append(b['frequency']) if 'eigenvector' in b: eig_b = [] for i, eig_a in enumerate(b['eigenvector']): v = np.zeros(3, np.complex) for x in range(3): v[x] = eig_a[x][0] + eig_a[x][1]*1j eig_b.append(eigvec_to_eigdispl( v, q, structure[i].frac_coords, structure.site_properties['phonopy_masses'][i])) eig_q.append(eig_b) frequencies.append(bands) if 'label' in p: phonopy_labels_dict[p['label']] = p['q-position'] if eig_q: eigendisplacements.append(eig_q) qpts = np.array(qpts) # transpose to match the convention in PhononBandStructure frequencies = np.transpose(frequencies) if eigendisplacements: eigendisplacements = np.transpose(eigendisplacements, (1, 0, 2, 3)) rec_latt = Lattice(bands_dict['reciprocal_lattice']) labels_dict = labels_dict or phonopy_labels_dict ph_bs = PhononBandStructureSymmLine( qpts, frequencies, rec_latt, has_nac=has_nac, labels_dict=labels_dict, structure=structure, eigendisplacements=eigendisplacements) return ph_bs def get_ph_bs_symm_line(bands_path, has_nac=False, labels_dict=None): """ Creates a pymatgen PhononBandStructure from a band.yaml file. The labels will be extracted from the dictionary, if present. If the 'eigenvector' key is found the eigendisplacements will be calculated according to the formula: \\exp(2*pi*i*(frac_coords \\dot q) / sqrt(mass) * v and added to the object. Args: bands_path: path to the band.yaml file has_nac: True if the data have been obtained with the option --nac option. Default False. labels_dict: dict that links a qpoint in frac coords to a label. """ return get_ph_bs_symm_line_from_dict(loadfn(bands_path), has_nac, labels_dict) def get_ph_dos(total_dos_path): """ Creates a pymatgen PhononDos from a total_dos.dat file. Args: total_dos_path: path to the total_dos.dat file. """ a = np.loadtxt(total_dos_path) return PhononDos(a[:, 0], a[:, 1]) def get_complete_ph_dos(partial_dos_path, phonopy_yaml_path): """ Creates a pymatgen CompletePhononDos from a partial_dos.dat and phonopy.yaml files. The second is produced when generating a Dos and is needed to extract the structure. Args: partial_dos_path: path to the partial_dos.dat file. phonopy_yaml_path: path to the phonopy.yaml file. """ a = np.loadtxt(partial_dos_path).transpose() d = loadfn(phonopy_yaml_path) structure = get_structure_from_dict(d['primitive_cell']) total_dos = PhononDos(a[0], a[1:].sum(axis=0)) pdoss = {} for site, pdos in zip(structure, a[1:]): pdoss[site] = pdos.tolist() return CompletePhononDos(structure, total_dos, pdoss) @requires(Phonopy, "phonopy not installed!") def get_displaced_structures(pmg_structure, atom_disp=0.01, supercell_matrix=None, yaml_fname=None, **kwargs): """ Generate a set of symmetrically inequivalent displaced structures for phonon calculations. Args: pmg_structure (Structure): A pymatgen structure object. atom_disp (float): Atomic displacement. Default is 0.01 $\AA$. supercell_matrix (3x3 array): Scaling matrix for supercell. yaml_fname (string): If not None, it represents the full path to the outputting displacement yaml file, e.g. disp.yaml. **kwargs: Parameters used in Phonopy.generate_displacement method. Return: A list of symmetrically inequivalent structures with displacements, in which the first element is the perfect supercell structure. """ is_plusminus = kwargs.get("is_plusminus", "auto") is_diagonal = kwargs.get("is_diagonal", True) is_trigonal = kwargs.get("is_trigonal", False) ph_structure = get_phonopy_structure(pmg_structure) if supercell_matrix is None: supercell_matrix = np.eye(3) * np.array((1, 1, 1)) phonon = Phonopy(unitcell=ph_structure, supercell_matrix=supercell_matrix) phonon.generate_displacements(distance=atom_disp, is_plusminus=is_plusminus, is_diagonal=is_diagonal, is_trigonal=is_trigonal) if yaml_fname is not None: displacements = phonon.get_displacements() directions = phonon.get_displacement_directions() write_disp_yaml(displacements=displacements, supercell=phonon.get_supercell(), directions=directions, filename=yaml_fname) # Supercell structures with displacement disp_supercells = phonon.get_supercells_with_displacements() # Perfect supercell structure init_supercell = phonon.get_supercell() # Structure list to be returned structure_list = [get_pmg_structure(init_supercell)] for c in disp_supercells: if c is not None: structure_list.append(get_pmg_structure(c)) return structure_list
# # Builtin Definitions # from Symtab import BuiltinScope, StructOrUnionScope from Code import UtilityCode from TypeSlots import Signature import PyrexTypes import Options # C-level implementations of builtin types, functions and methods iter_next_utility_code = UtilityCode.load("IterNext", "ObjectHandling.c") getattr_utility_code = UtilityCode.load("GetAttr", "ObjectHandling.c") getattr3_utility_code = UtilityCode.load("GetAttr3", "Builtins.c") pyexec_utility_code = UtilityCode.load("PyExec", "Builtins.c") pyexec_globals_utility_code = UtilityCode.load("PyExecGlobals", "Builtins.c") globals_utility_code = UtilityCode.load("Globals", "Builtins.c") py_set_utility_code = UtilityCode.load("pyset_compat", "Builtins.c") builtin_utility_code = { 'set' : py_set_utility_code, 'frozenset' : py_set_utility_code, } # mapping from builtins to their C-level equivalents class _BuiltinOverride(object): def __init__(self, py_name, args, ret_type, cname, py_equiv="*", utility_code=None, sig=None, func_type=None, is_strict_signature=False, builtin_return_type=None): self.py_name, self.cname, self.py_equiv = py_name, cname, py_equiv self.args, self.ret_type = args, ret_type self.func_type, self.sig = func_type, sig self.builtin_return_type = builtin_return_type self.is_strict_signature = is_strict_signature self.utility_code = utility_code def build_func_type(self, sig=None, self_arg=None): if sig is None: sig = Signature(self.args, self.ret_type) sig.exception_check = False # not needed for the current builtins func_type = sig.function_type(self_arg) if self.is_strict_signature: func_type.is_strict_signature = True if self.builtin_return_type: func_type.return_type = builtin_types[self.builtin_return_type] return func_type class BuiltinAttribute(object): def __init__(self, py_name, cname=None, field_type=None, field_type_name=None): self.py_name = py_name self.cname = cname or py_name self.field_type_name = field_type_name # can't do the lookup before the type is declared! self.field_type = field_type def declare_in_type(self, self_type): if self.field_type_name is not None: # lazy type lookup field_type = builtin_scope.lookup(self.field_type_name).type else: field_type = self.field_type or PyrexTypes.py_object_type entry = self_type.scope.declare(self.py_name, self.cname, field_type, None, 'private') entry.is_variable = True class BuiltinFunction(_BuiltinOverride): def declare_in_scope(self, scope): func_type, sig = self.func_type, self.sig if func_type is None: func_type = self.build_func_type(sig) scope.declare_builtin_cfunction(self.py_name, func_type, self.cname, self.py_equiv, self.utility_code) class BuiltinMethod(_BuiltinOverride): def declare_in_type(self, self_type): method_type, sig = self.func_type, self.sig if method_type is None: # override 'self' type (first argument) self_arg = PyrexTypes.CFuncTypeArg("", self_type, None) self_arg.not_none = True self_arg.accept_builtin_subtypes = True method_type = self.build_func_type(sig, self_arg) self_type.scope.declare_builtin_cfunction( self.py_name, method_type, self.cname, utility_code=self.utility_code) builtin_function_table = [ # name, args, return, C API func, py equiv = "*" BuiltinFunction('abs', "d", "d", "fabs", is_strict_signature = True), BuiltinFunction('abs', "f", "f", "fabsf", is_strict_signature = True), BuiltinFunction('abs', None, None, "__Pyx_abs_int", utility_code = UtilityCode.load("abs_int", "Builtins.c"), func_type = PyrexTypes.CFuncType( PyrexTypes.c_uint_type, [ PyrexTypes.CFuncTypeArg("arg", PyrexTypes.c_int_type, None) ], is_strict_signature = True)), BuiltinFunction('abs', None, None, "__Pyx_abs_long", utility_code = UtilityCode.load("abs_long", "Builtins.c"), func_type = PyrexTypes.CFuncType( PyrexTypes.c_ulong_type, [ PyrexTypes.CFuncTypeArg("arg", PyrexTypes.c_long_type, None) ], is_strict_signature = True)), BuiltinFunction('abs', None, None, "__Pyx_abs_longlong", utility_code = UtilityCode.load("abs_longlong", "Builtins.c"), func_type = PyrexTypes.CFuncType( PyrexTypes.c_ulonglong_type, [ PyrexTypes.CFuncTypeArg("arg", PyrexTypes.c_longlong_type, None) ], is_strict_signature = True)), BuiltinFunction('abs', "O", "O", "PyNumber_Absolute"), BuiltinFunction('callable', "O", "b", "__Pyx_PyCallable_Check", utility_code = UtilityCode.load("CallableCheck", "ObjectHandling.c")), #('chr', "", "", ""), #('cmp', "", "", "", ""), # int PyObject_Cmp(PyObject *o1, PyObject *o2, int *result) #('compile', "", "", ""), # PyObject* Py_CompileString( char *str, char *filename, int start) BuiltinFunction('delattr', "OO", "r", "PyObject_DelAttr"), BuiltinFunction('dir', "O", "O", "PyObject_Dir"), BuiltinFunction('divmod', "OO", "O", "PyNumber_Divmod"), BuiltinFunction('exec', "O", "O", "__Pyx_PyExecGlobals", utility_code = pyexec_globals_utility_code), BuiltinFunction('exec', "OO", "O", "__Pyx_PyExec2", utility_code = pyexec_utility_code), BuiltinFunction('exec', "OOO", "O", "__Pyx_PyExec3", utility_code = pyexec_utility_code), #('eval', "", "", ""), #('execfile', "", "", ""), #('filter', "", "", ""), BuiltinFunction('getattr3', "OOO", "O", "__Pyx_GetAttr3", "getattr", utility_code=getattr3_utility_code), # Pyrex legacy BuiltinFunction('getattr', "OOO", "O", "__Pyx_GetAttr3", utility_code=getattr3_utility_code), BuiltinFunction('getattr', "OO", "O", "__Pyx_GetAttr", utility_code=getattr_utility_code), BuiltinFunction('hasattr', "OO", "b", "PyObject_HasAttr"), BuiltinFunction('hash', "O", "h", "PyObject_Hash"), #('hex', "", "", ""), #('id', "", "", ""), #('input', "", "", ""), BuiltinFunction('intern', "O", "O", "__Pyx_Intern", utility_code = UtilityCode.load("Intern", "Builtins.c")), BuiltinFunction('isinstance', "OO", "b", "PyObject_IsInstance"), BuiltinFunction('issubclass', "OO", "b", "PyObject_IsSubclass"), BuiltinFunction('iter', "OO", "O", "PyCallIter_New"), BuiltinFunction('iter', "O", "O", "PyObject_GetIter"), BuiltinFunction('len', "O", "z", "PyObject_Length"), BuiltinFunction('locals', "", "O", "__pyx_locals"), #('map', "", "", ""), #('max', "", "", ""), #('min', "", "", ""), BuiltinFunction('next', "O", "O", "__Pyx_PyIter_Next", utility_code = iter_next_utility_code), # not available in Py2 => implemented here BuiltinFunction('next', "OO", "O", "__Pyx_PyIter_Next2", utility_code = iter_next_utility_code), # not available in Py2 => implemented here #('oct', "", "", ""), #('open', "ss", "O", "PyFile_FromString"), # not in Py3 #('ord', "", "", ""), BuiltinFunction('pow', "OOO", "O", "PyNumber_Power"), BuiltinFunction('pow', "OO", "O", "__Pyx_PyNumber_Power2", utility_code = UtilityCode.load("pow2", "Builtins.c")), #('range', "", "", ""), #('raw_input', "", "", ""), #('reduce', "", "", ""), BuiltinFunction('reload', "O", "O", "PyImport_ReloadModule"), BuiltinFunction('repr', "O", "O", "PyObject_Repr", builtin_return_type='str'), #('round', "", "", ""), BuiltinFunction('setattr', "OOO", "r", "PyObject_SetAttr"), #('sum', "", "", ""), #('type', "O", "O", "PyObject_Type"), #('unichr', "", "", ""), #('unicode', "", "", ""), #('vars', "", "", ""), #('zip', "", "", ""), # Can't do these easily until we have builtin type entries. #('typecheck', "OO", "i", "PyObject_TypeCheck", False), #('issubtype', "OO", "i", "PyType_IsSubtype", False), # Put in namespace append optimization. BuiltinFunction('__Pyx_PyObject_Append', "OO", "O", "__Pyx_PyObject_Append"), ] if not Options.old_style_globals: builtin_function_table.append( BuiltinFunction('globals', "", "O", "__Pyx_Globals", utility_code=globals_utility_code)) # Builtin types # bool # buffer # classmethod # dict # enumerate # file # float # int # list # long # object # property # slice # staticmethod # super # str # tuple # type # xrange builtin_types_table = [ ("type", "PyType_Type", []), # This conflicts with the C++ bool type, and unfortunately # C++ is too liberal about PyObject* <-> bool conversions, # resulting in unintuitive runtime behavior and segfaults. # ("bool", "PyBool_Type", []), ("int", "PyInt_Type", []), ("long", "PyLong_Type", []), ("float", "PyFloat_Type", []), ("complex", "PyComplex_Type", [BuiltinAttribute('cval', field_type_name = 'Py_complex'), BuiltinAttribute('real', 'cval.real', field_type = PyrexTypes.c_double_type), BuiltinAttribute('imag', 'cval.imag', field_type = PyrexTypes.c_double_type), ]), ("basestring", "PyBaseString_Type", [ BuiltinMethod("join", "TO", "T", "__Pyx_PyBaseString_Join", utility_code=UtilityCode.load("StringJoin", "StringTools.c")), ]), ("bytearray", "PyByteArray_Type", [ ]), ("bytes", "PyBytes_Type", [BuiltinMethod("__contains__", "TO", "b", "PySequence_Contains"), BuiltinMethod("join", "TO", "O", "__Pyx_PyBytes_Join", utility_code=UtilityCode.load("StringJoin", "StringTools.c")), ]), ("str", "PyString_Type", [BuiltinMethod("__contains__", "TO", "b", "PySequence_Contains"), BuiltinMethod("join", "TO", "O", "__Pyx_PyString_Join", builtin_return_type='basestring', utility_code=UtilityCode.load("StringJoin", "StringTools.c")), ]), ("unicode", "PyUnicode_Type", [BuiltinMethod("__contains__", "TO", "b", "PyUnicode_Contains"), BuiltinMethod("join", "TO", "T", "PyUnicode_Join"), ]), ("tuple", "PyTuple_Type", [BuiltinMethod("__contains__", "TO", "b", "PySequence_Contains"), ]), ("list", "PyList_Type", [BuiltinMethod("__contains__", "TO", "b", "PySequence_Contains"), BuiltinMethod("insert", "TzO", "r", "PyList_Insert"), BuiltinMethod("reverse", "T", "r", "PyList_Reverse"), BuiltinMethod("append", "TO", "r", "__Pyx_PyList_Append", utility_code=UtilityCode.load("ListAppend", "Optimize.c")), BuiltinMethod("extend", "TO", "r", "__Pyx_PyList_Extend", utility_code=UtilityCode.load("ListExtend", "Optimize.c")), ]), ("dict", "PyDict_Type", [BuiltinMethod("__contains__", "TO", "b", "PyDict_Contains"), BuiltinMethod("has_key", "TO", "b", "PyDict_Contains"), BuiltinMethod("items", "T", "O", "__Pyx_PyDict_Items", utility_code=UtilityCode.load("py_dict_items", "Builtins.c")), BuiltinMethod("keys", "T", "O", "__Pyx_PyDict_Keys", utility_code=UtilityCode.load("py_dict_keys", "Builtins.c")), BuiltinMethod("values", "T", "O", "__Pyx_PyDict_Values", utility_code=UtilityCode.load("py_dict_values", "Builtins.c")), BuiltinMethod("iteritems", "T", "O", "__Pyx_PyDict_IterItems", utility_code=UtilityCode.load("py_dict_iteritems", "Builtins.c")), BuiltinMethod("iterkeys", "T", "O", "__Pyx_PyDict_IterKeys", utility_code=UtilityCode.load("py_dict_iterkeys", "Builtins.c")), BuiltinMethod("itervalues", "T", "O", "__Pyx_PyDict_IterValues", utility_code=UtilityCode.load("py_dict_itervalues", "Builtins.c")), BuiltinMethod("viewitems", "T", "O", "__Pyx_PyDict_ViewItems", utility_code=UtilityCode.load("py_dict_viewitems", "Builtins.c")), BuiltinMethod("viewkeys", "T", "O", "__Pyx_PyDict_ViewKeys", utility_code=UtilityCode.load("py_dict_viewkeys", "Builtins.c")), BuiltinMethod("viewvalues", "T", "O", "__Pyx_PyDict_ViewValues", utility_code=UtilityCode.load("py_dict_viewvalues", "Builtins.c")), BuiltinMethod("clear", "T", "r", "__Pyx_PyDict_Clear", utility_code=UtilityCode.load("py_dict_clear", "Optimize.c")), BuiltinMethod("copy", "T", "T", "PyDict_Copy")]), ("slice", "PySlice_Type", [BuiltinAttribute('start'), BuiltinAttribute('stop'), BuiltinAttribute('step'), ]), # ("file", "PyFile_Type", []), # not in Py3 ("set", "PySet_Type", [BuiltinMethod("__contains__", "TO", "b", "PySequence_Contains"), BuiltinMethod("clear", "T", "r", "PySet_Clear", utility_code = py_set_utility_code), # discard() and remove() have a special treatment for unhashable values # BuiltinMethod("discard", "TO", "r", "PySet_Discard", # utility_code = py_set_utility_code), BuiltinMethod("add", "TO", "r", "PySet_Add", utility_code = py_set_utility_code), BuiltinMethod("pop", "T", "O", "PySet_Pop", utility_code = py_set_utility_code)]), ("frozenset", "PyFrozenSet_Type", []), ] types_that_construct_their_instance = set([ # some builtin types do not always return an instance of # themselves - these do: 'type', 'bool', 'long', 'float', 'complex', 'bytes', 'unicode', 'bytearray', 'tuple', 'list', 'dict', 'set', 'frozenset' # 'str', # only in Py3.x # 'file', # only in Py2.x ]) builtin_structs_table = [ ('Py_buffer', 'Py_buffer', [("buf", PyrexTypes.c_void_ptr_type), ("obj", PyrexTypes.py_object_type), ("len", PyrexTypes.c_py_ssize_t_type), ("itemsize", PyrexTypes.c_py_ssize_t_type), ("readonly", PyrexTypes.c_bint_type), ("ndim", PyrexTypes.c_int_type), ("format", PyrexTypes.c_char_ptr_type), ("shape", PyrexTypes.c_py_ssize_t_ptr_type), ("strides", PyrexTypes.c_py_ssize_t_ptr_type), ("suboffsets", PyrexTypes.c_py_ssize_t_ptr_type), ("smalltable", PyrexTypes.CArrayType(PyrexTypes.c_py_ssize_t_type, 2)), ("internal", PyrexTypes.c_void_ptr_type), ]), ('Py_complex', 'Py_complex', [('real', PyrexTypes.c_double_type), ('imag', PyrexTypes.c_double_type), ]) ] # set up builtin scope builtin_scope = BuiltinScope() def init_builtin_funcs(): for bf in builtin_function_table: bf.declare_in_scope(builtin_scope) builtin_types = {} def init_builtin_types(): global builtin_types for name, cname, methods in builtin_types_table: utility = builtin_utility_code.get(name) if name == 'frozenset': objstruct_cname = 'PySetObject' elif name == 'bool': objstruct_cname = None else: objstruct_cname = 'Py%sObject' % name.capitalize() the_type = builtin_scope.declare_builtin_type(name, cname, utility, objstruct_cname) builtin_types[name] = the_type for method in methods: method.declare_in_type(the_type) def init_builtin_structs(): for name, cname, attribute_types in builtin_structs_table: scope = StructOrUnionScope(name) for attribute_name, attribute_type in attribute_types: scope.declare_var(attribute_name, attribute_type, None, attribute_name, allow_pyobject=True) builtin_scope.declare_struct_or_union( name, "struct", scope, 1, None, cname = cname) def init_builtins(): init_builtin_structs() init_builtin_types() init_builtin_funcs() builtin_scope.declare_var( '__debug__', PyrexTypes.c_const_type(PyrexTypes.c_bint_type), pos=None, cname='(!Py_OptimizeFlag)', is_cdef=True) global list_type, tuple_type, dict_type, set_type, frozenset_type global bytes_type, str_type, unicode_type, basestring_type, slice_type global float_type, bool_type, type_type, complex_type, bytearray_type type_type = builtin_scope.lookup('type').type list_type = builtin_scope.lookup('list').type tuple_type = builtin_scope.lookup('tuple').type dict_type = builtin_scope.lookup('dict').type set_type = builtin_scope.lookup('set').type frozenset_type = builtin_scope.lookup('frozenset').type slice_type = builtin_scope.lookup('slice').type bytes_type = builtin_scope.lookup('bytes').type str_type = builtin_scope.lookup('str').type unicode_type = builtin_scope.lookup('unicode').type basestring_type = builtin_scope.lookup('basestring').type bytearray_type = builtin_scope.lookup('bytearray').type float_type = builtin_scope.lookup('float').type bool_type = builtin_scope.lookup('bool').type complex_type = builtin_scope.lookup('complex').type init_builtins()
""" Titanic service Implements server side of http:#rfc.zeromq.org/spec:9 Author: Min RK <benjaminrk@gmail.com> """ import cPickle as pickle import os import sys import threading import time from uuid import uuid4 import zmq from mdwrkapi import MajorDomoWorker from mdcliapi import MajorDomoClient from zhelpers import zpipe TITANIC_DIR = ".titanic" def request_filename (uuid): """Returns freshly allocated request filename for given UUID""" return os.path.join(TITANIC_DIR, "%s.req" % uuid) # def reply_filename (uuid): """Returns freshly allocated reply filename for given UUID""" return os.path.join(TITANIC_DIR, "%s.rep" % uuid) # --------------------------------------------------------------------- # Titanic request service def titanic_request (pipe): worker = MajorDomoWorker("tcp://localhost:5555", "titanic.request") reply = None while True: # Send reply if it's not null # And then get next request from broker request = worker.recv(reply) if not request: break # Interrupted, exit # Ensure message directory exists if not os.path.exists(TITANIC_DIR): os.mkdir(TITANIC_DIR) # Generate UUID and save message to disk uuid = uuid4().hex filename = request_filename (uuid) with open(filename, 'w') as f: pickle.dump(request, f) # Send UUID through to message queue pipe.send(uuid) # Now send UUID back to client # Done by the worker.recv() at the top of the loop reply = ["200", uuid] # --------------------------------------------------------------------- # Titanic reply service def titanic_reply (): worker = MajorDomoWorker("tcp://localhost:5555", "titanic.reply") reply = None while True: request = worker.recv(reply) if not request: break # Interrupted, exit uuid = request.pop(0) req_filename = request_filename(uuid) rep_filename = reply_filename(uuid) if os.path.exists(rep_filename): with open(rep_filename, 'r') as f: reply = pickle.load(f) reply = ["200"] + reply else: if os.path.exists(req_filename): reply = ["300"] # pending else: reply = ["400"] # unknown # --------------------------------------------------------------------- # Titanic close service def titanic_close(): worker = MajorDomoWorker("tcp://localhost:5555", "titanic.close") reply = None while True: request = worker.recv(reply) if not request: break # Interrupted, exit uuid = request.pop(0) req_filename = request_filename(uuid) rep_filename = reply_filename(uuid) # should these be protected? Does zfile_delete ignore files # that have already been removed? That's what we are doing here. if os.path.exists(req_filename): os.remove(req_filename) if os.path.exists(rep_filename): os.remove(rep_filename) reply = ["200"] def service_success(client, uuid): """Attempt to process a single request, return True if successful""" # Load request message, service will be first frame filename = request_filename (uuid) # If the client already closed request, treat as successful if not os.path.exists(filename): return True with open(filename, 'r') as f: request = pickle.load(f) service = request.pop(0) # Use MMI protocol to check if service is available mmi_request = [service] mmi_reply = client.send("mmi.service", mmi_request) service_ok = mmi_reply and mmi_reply[0] == "200" if service_ok: reply = client.send(service, request) if reply: filename = reply_filename (uuid) with open(filename, "w") as f: pickle.dump(reply, f) return True return False def main(): verbose = '-v' in sys.argv ctx = zmq.Context() # Create MDP client session with short timeout client = MajorDomoClient("tcp://localhost:5555", verbose) client.timeout = 1000 # 1 sec client.retries = 1 # only 1 retry request_pipe, peer = zpipe(ctx) request_thread = threading.Thread(target=titanic_request, args=(peer,)) request_thread.daemon = True request_thread.start() reply_thread = threading.Thread(target=titanic_reply) reply_thread.daemon = True reply_thread.start() close_thread = threading.Thread(target=titanic_close) close_thread.daemon = True close_thread.start() poller = zmq.Poller() poller.register(request_pipe, zmq.POLLIN) # Main dispatcher loop while True: # Ensure message directory exists if not os.path.exists(TITANIC_DIR): os.mkdir(TITANIC_DIR) # We'll dispatch once per second, if there's no activity try: items = poller.poll(1000) except KeyboardInterrupt: break; # Interrupted if items: # Append UUID to queue, prefixed with '-' for pending uuid = request_pipe.recv() with open(os.path.join(TITANIC_DIR, 'queue'), 'a') as f: f.write("-%s\n" % uuid) # Brute-force dispatcher # with open(os.path.join(TITANIC_DIR, 'queue'), 'r+b') as f: for entry in f.readlines(): # UUID is prefixed with '-' if still waiting if entry[0] == '-': uuid = entry[1:].rstrip() # rstrip '\n' etc. print "I: processing request %s" % uuid if service_success(client, uuid): # mark queue entry as processed here = f.tell() f.seek(-1*len(entry), os.SEEK_CUR) f.write('+') f.seek(here, os.SEEK_SET) if __name__ == '__main__': main()
"""Weight Boosting This module contains weight boosting estimators for both classification and regression. The module structure is the following: - The ``BaseWeightBoosting`` base class implements a common ``fit`` method for all the estimators in the module. Regression and classification only differ from each other in the loss function that is optimized. - ``AdaBoostClassifier`` implements adaptive boosting (AdaBoost-SAMME) for classification problems. - ``AdaBoostRegressor`` implements adaptive boosting (AdaBoost.R2) for regression problems. """ # Authors: Noel Dawe <noel@dawe.me> # Gilles Louppe <g.louppe@gmail.com> # Hamzeh Alsalhi <ha258@cornell.edu> # Arnaud Joly <arnaud.v.joly@gmail.com> # # Licence: BSD 3 clause from abc import ABCMeta, abstractmethod import numpy as np from numpy.core.umath_tests import inner1d from .base import BaseEnsemble from ..base import ClassifierMixin, RegressorMixin from ..externals import six from ..externals.six.moves import zip from ..externals.six.moves import xrange as range from .forest import BaseForest from ..tree import DecisionTreeClassifier, DecisionTreeRegressor from ..tree.tree import BaseDecisionTree from ..tree._tree import DTYPE from ..utils import check_array, check_X_y, check_random_state from ..metrics import accuracy_score, r2_score from sklearn.utils.validation import has_fit_parameter, check_is_fitted __all__ = [ 'AdaBoostClassifier', 'AdaBoostRegressor', ] class BaseWeightBoosting(six.with_metaclass(ABCMeta, BaseEnsemble)): """Base class for AdaBoost estimators. Warning: This class should not be used directly. Use derived classes instead. """ @abstractmethod def __init__(self, base_estimator=None, n_estimators=50, estimator_params=tuple(), learning_rate=1., random_state=None): super(BaseWeightBoosting, self).__init__( base_estimator=base_estimator, n_estimators=n_estimators, estimator_params=estimator_params) self.learning_rate = learning_rate self.random_state = random_state def fit(self, X, y, sample_weight=None): """Build a boosted classifier/regressor from the training set (X, y). Parameters ---------- X : {array-like, sparse matrix} of shape = [n_samples, n_features] The training input samples. Sparse matrix can be CSC, CSR, COO, DOK, or LIL. COO, DOK, and LIL are converted to CSR. The dtype is forced to DTYPE from tree._tree if the base classifier of this ensemble weighted boosting classifier is a tree or forest. y : array-like of shape = [n_samples] The target values (class labels in classification, real numbers in regression). sample_weight : array-like of shape = [n_samples], optional Sample weights. If None, the sample weights are initialized to 1 / n_samples. Returns ------- self : object Returns self. """ # Check parameters if self.learning_rate <= 0: raise ValueError("learning_rate must be greater than zero") if (self.base_estimator is None or isinstance(self.base_estimator, (BaseDecisionTree, BaseForest))): dtype = DTYPE accept_sparse = 'csc' else: dtype = None accept_sparse = ['csr', 'csc'] X, y = check_X_y(X, y, accept_sparse=accept_sparse, dtype=dtype) if sample_weight is None: # Initialize weights to 1 / n_samples sample_weight = np.empty(X.shape[0], dtype=np.float) sample_weight[:] = 1. / X.shape[0] else: # Normalize existing weights sample_weight = sample_weight / sample_weight.sum(dtype=np.float64) # Check that the sample weights sum is positive if sample_weight.sum() <= 0: raise ValueError( "Attempting to fit with a non-positive " "weighted number of samples.") # Check parameters self._validate_estimator() # Clear any previous fit results self.estimators_ = [] self.estimator_weights_ = np.zeros(self.n_estimators, dtype=np.float) self.estimator_errors_ = np.ones(self.n_estimators, dtype=np.float) for iboost in range(self.n_estimators): # Boosting step sample_weight, estimator_weight, estimator_error = self._boost( iboost, X, y, sample_weight) # Early termination if sample_weight is None: break self.estimator_weights_[iboost] = estimator_weight self.estimator_errors_[iboost] = estimator_error # Stop if error is zero if estimator_error == 0: break sample_weight_sum = np.sum(sample_weight) # Stop if the sum of sample weights has become non-positive if sample_weight_sum <= 0: break if iboost < self.n_estimators - 1: # Normalize sample_weight /= sample_weight_sum return self @abstractmethod def _boost(self, iboost, X, y, sample_weight): """Implement a single boost. Warning: This method needs to be overriden by subclasses. Parameters ---------- iboost : int The index of the current boost iteration. X : {array-like, sparse matrix} of shape = [n_samples, n_features] The training input samples. Sparse matrix can be CSC, CSR, COO, DOK, or LIL. COO, DOK, and LIL are converted to CSR. y : array-like of shape = [n_samples] The target values (class labels). sample_weight : array-like of shape = [n_samples] The current sample weights. Returns ------- sample_weight : array-like of shape = [n_samples] or None The reweighted sample weights. If None then boosting has terminated early. estimator_weight : float The weight for the current boost. If None then boosting has terminated early. error : float The classification error for the current boost. If None then boosting has terminated early. """ pass def staged_score(self, X, y, sample_weight=None): """Return staged scores for X, y. This generator method yields the ensemble score after each iteration of boosting and therefore allows monitoring, such as to determine the score on a test set after each boost. Parameters ---------- X : {array-like, sparse matrix} of shape = [n_samples, n_features] The training input samples. Sparse matrix can be CSC, CSR, COO, DOK, or LIL. DOK and LIL are converted to CSR. y : array-like, shape = [n_samples] Labels for X. sample_weight : array-like, shape = [n_samples], optional Sample weights. Returns ------- z : float """ for y_pred in self.staged_predict(X): if isinstance(self, ClassifierMixin): yield accuracy_score(y, y_pred, sample_weight=sample_weight) else: yield r2_score(y, y_pred, sample_weight=sample_weight) @property def feature_importances_(self): """Return the feature importances (the higher, the more important the feature). Returns ------- feature_importances_ : array, shape = [n_features] """ if self.estimators_ is None or len(self.estimators_) == 0: raise ValueError("Estimator not fitted, " "call `fit` before `feature_importances_`.") try: norm = self.estimator_weights_.sum() return (sum(weight * clf.feature_importances_ for weight, clf in zip(self.estimator_weights_, self.estimators_)) / norm) except AttributeError: raise AttributeError( "Unable to compute feature importances " "since base_estimator does not have a " "feature_importances_ attribute") def _check_sample_weight(self): if not has_fit_parameter(self.base_estimator_, "sample_weight"): raise ValueError("%s doesn't support sample_weight." % self.base_estimator_.__class__.__name__) def _validate_X_predict(self, X): """Ensure that X is in the proper format""" if (self.base_estimator is None or isinstance(self.base_estimator, (BaseDecisionTree, BaseForest))): X = check_array(X, accept_sparse='csr', dtype=DTYPE) else: X = check_array(X, accept_sparse=['csr', 'csc', 'coo']) return X def _samme_proba(estimator, n_classes, X): """Calculate algorithm 4, step 2, equation c) of Zhu et al [1]. References ---------- .. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009. """ proba = estimator.predict_proba(X) # Displace zero probabilities so the log is defined. # Also fix negative elements which may occur with # negative sample weights. proba[proba <= 0] = 1e-5 log_proba = np.log(proba) return (n_classes - 1) * (log_proba - (1. / n_classes) * log_proba.sum(axis=1)[:, np.newaxis]) class AdaBoostClassifier(BaseWeightBoosting, ClassifierMixin): """An AdaBoost classifier. An AdaBoost [1] classifier is a meta-estimator that begins by fitting a classifier on the original dataset and then fits additional copies of the classifier on the same dataset but where the weights of incorrectly classified instances are adjusted such that subsequent classifiers focus more on difficult cases. This class implements the algorithm known as AdaBoost-SAMME [2]. Read more in the :ref:`User Guide <adaboost>`. Parameters ---------- base_estimator : object, optional (default=DecisionTreeClassifier) The base estimator from which the boosted ensemble is built. Support for sample weighting is required, as well as proper `classes_` and `n_classes_` attributes. n_estimators : integer, optional (default=50) The maximum number of estimators at which boosting is terminated. In case of perfect fit, the learning procedure is stopped early. learning_rate : float, optional (default=1.) Learning rate shrinks the contribution of each classifier by ``learning_rate``. There is a trade-off between ``learning_rate`` and ``n_estimators``. algorithm : {'SAMME', 'SAMME.R', 'LOGIT'}, optional (default='SAMME.R') If 'SAMME.R' then use the SAMME.R real boosting algorithm. ``base_estimator`` must support calculation of class probabilities. If 'SAMME' then use the SAMME discrete boosting algorithm. The SAMME.R algorithm typically converges faster than SAMME, achieving a lower test error with fewer boosting iterations. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Attributes ---------- estimators_ : list of classifiers The collection of fitted sub-estimators. classes_ : array of shape = [n_classes] The classes labels. n_classes_ : int The number of classes. estimator_weights_ : array of floats Weights for each estimator in the boosted ensemble. estimator_errors_ : array of floats Classification error for each estimator in the boosted ensemble. feature_importances_ : array of shape = [n_features] The feature importances if supported by the ``base_estimator``. See also -------- AdaBoostRegressor, GradientBoostingClassifier, DecisionTreeClassifier References ---------- .. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of on-Line Learning and an Application to Boosting", 1995. .. [2] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009. """ def __init__(self, base_estimator=None, n_estimators=50, learning_rate=1., algorithm='SAMME.R', random_state=None): super(AdaBoostClassifier, self).__init__( base_estimator=base_estimator, n_estimators=n_estimators, learning_rate=learning_rate, random_state=random_state) self.algorithm = algorithm def fit(self, X, y, sample_weight=None): """Build a boosted classifier from the training set (X, y). Parameters ---------- X : {array-like, sparse matrix} of shape = [n_samples, n_features] The training input samples. Sparse matrix can be CSC, CSR, COO, DOK, or LIL. DOK and LIL are converted to CSR. y : array-like of shape = [n_samples] The target values (class labels). sample_weight : array-like of shape = [n_samples], optional Sample weights. If None, the sample weights are initialized to ``1 / n_samples``. Returns ------- self : object Returns self. """ # Check that algorithm is supported if self.algorithm not in ('SAMME', 'SAMME.R', 'LOGIT'): raise ValueError("algorithm %s is not supported" % self.algorithm) # Fit return super(AdaBoostClassifier, self).fit(X, y, sample_weight) def _validate_estimator(self): """Check the estimator and set the base_estimator_ attribute.""" super(AdaBoostClassifier, self)._validate_estimator( default=DecisionTreeClassifier(max_depth=1)) # SAMME-R requires predict_proba-enabled base estimators if self.algorithm == 'SAMME.R': if not hasattr(self.base_estimator_, 'predict_proba'): raise TypeError( "AdaBoostClassifier with algorithm='SAMME.R' requires " "that the weak learner supports the calculation of class " "probabilities with a predict_proba method.\n" "Please change the base estimator or set " "algorithm='SAMME' instead.") self._check_sample_weight() def _boost(self, iboost, X, y, sample_weight): """Implement a single boost. Perform a single boost according to the real multi-class SAMME.R algorithm or to the discrete SAMME algorithm and return the updated sample weights. Parameters ---------- iboost : int The index of the current boost iteration. X : {array-like, sparse matrix} of shape = [n_samples, n_features] The training input samples. Sparse matrix can be CSC, CSR, COO, DOK, or LIL. DOK and LIL are converted to CSR. y : array-like of shape = [n_samples] The target values (class labels). sample_weight : array-like of shape = [n_samples] The current sample weights. Returns ------- sample_weight : array-like of shape = [n_samples] or None The reweighted sample weights. If None then boosting has terminated early. estimator_weight : float The weight for the current boost. If None then boosting has terminated early. estimator_error : float The classification error for the current boost. If None then boosting has terminated early. """ if self.algorithm == 'SAMME.R': return self._boost_real(iboost, X, y, sample_weight) else: # elif self.algorithm == "SAMME": return self._boost_discrete(iboost, X, y, sample_weight) def _boost_real(self, iboost, X, y, sample_weight): """Implement a single boost using the SAMME.R real algorithm.""" estimator = self._make_estimator() try: estimator.set_params(random_state=self.random_state) except ValueError: pass estimator.fit(X, y, sample_weight=sample_weight) y_predict_proba = estimator.predict_proba(X) if iboost == 0: self.classes_ = getattr(estimator, 'classes_', None) self.n_classes_ = len(self.classes_) y_predict = self.classes_.take(np.argmax(y_predict_proba, axis=1), axis=0) # Instances incorrectly classified incorrect = y_predict != y # Error fraction estimator_error = np.mean( np.average(incorrect, weights=sample_weight, axis=0)) # Stop if classification is perfect if estimator_error <= 0: return sample_weight, 1., 0. # Construct y coding as described in Zhu et al [2]: # # y_k = 1 if c == k else -1 / (K - 1) # # where K == n_classes_ and c, k in [0, K) are indices along the second # axis of the y coding with c being the index corresponding to the true # class label. n_classes = self.n_classes_ classes = self.classes_ y_codes = np.array([-1. / (n_classes - 1), 1.]) y_coding = y_codes.take(classes == y[:, np.newaxis]) # Displace zero probabilities so the log is defined. # Also fix negative elements which may occur with # negative sample weights. y_predict_proba[y_predict_proba <= 0] = 1e-5 # Boost weight using multi-class AdaBoost SAMME.R alg estimator_weight = (-1. * self.learning_rate * (((n_classes - 1.) / n_classes) * inner1d(y_coding, np.log(y_predict_proba)))) # Only boost the weights if it will fit again if not iboost == self.n_estimators - 1: # Only boost positive weights sample_weight *= np.exp(estimator_weight * ((sample_weight > 0) | (estimator_weight < 0))) return sample_weight, 1., estimator_error def _boost_discrete(self, iboost, X, y, sample_weight): """Implement a single boost using the SAMME discrete algorithm.""" estimator = self._make_estimator() try: estimator.set_params(random_state=self.random_state) except ValueError: pass estimator.fit(X, y, sample_weight=sample_weight) y_predict = estimator.predict(X) if iboost == 0: self.classes_ = getattr(estimator, 'classes_', None) self.n_classes_ = len(self.classes_) # Instances incorrectly classified incorrect = y_predict != y # Error fraction estimator_error = np.mean( np.average(incorrect, weights=sample_weight, axis=0)) # Stop if classification is perfect if estimator_error <= 0: return sample_weight, 1., 0. n_classes = self.n_classes_ # Stop if the error is at least as bad as random guessing if estimator_error >= 1. - (1. / n_classes): self.estimators_.pop(-1) if len(self.estimators_) == 0: raise ValueError('BaseClassifier in AdaBoostClassifier ' 'ensemble is worse than random, ensemble ' 'can not be fit.') return None, None, None # Boost weight using multi-class AdaBoost SAMME alg estimator_weight = self.learning_rate * ( np.log((1. - estimator_error) / estimator_error) + np.log(n_classes - 1.)) # Only boost the weights if I will fit again if not iboost == self.n_estimators - 1: # Only boost positive weights if self.algorithm == 'LOGIT': sample_weight *= np.log(1 + np.exp(estimator_weight * incorrect * ((sample_weight > 0) | (estimator_weight < 0)))) else: sample_weight *= np.exp(estimator_weight * incorrect * ((sample_weight > 0) | (estimator_weight < 0))) return sample_weight, estimator_weight, estimator_error def predict(self, X): """Predict classes for X. The predicted class of an input sample is computed as the weighted mean prediction of the classifiers in the ensemble. Parameters ---------- X : {array-like, sparse matrix} of shape = [n_samples, n_features] The training input samples. Sparse matrix can be CSC, CSR, COO, DOK, or LIL. DOK and LIL are converted to CSR. Returns ------- y : array of shape = [n_samples] The predicted classes. """ pred = self.decision_function(X) if self.n_classes_ == 2: return self.classes_.take(pred > 0, axis=0) return self.classes_.take(np.argmax(pred, axis=1), axis=0) def staged_predict(self, X): """Return staged predictions for X. The predicted class of an input sample is computed as the weighted mean prediction of the classifiers in the ensemble. This generator method yields the ensemble prediction after each iteration of boosting and therefore allows monitoring, such as to determine the prediction on a test set after each boost. Parameters ---------- X : array-like of shape = [n_samples, n_features] The input samples. Returns ------- y : generator of array, shape = [n_samples] The predicted classes. """ n_classes = self.n_classes_ classes = self.classes_ if n_classes == 2: for pred in self.staged_decision_function(X): yield np.array(classes.take(pred > 0, axis=0)) else: for pred in self.staged_decision_function(X): yield np.array(classes.take( np.argmax(pred, axis=1), axis=0)) def decision_function(self, X): """Compute the decision function of ``X``. Parameters ---------- X : {array-like, sparse matrix} of shape = [n_samples, n_features] The training input samples. Sparse matrix can be CSC, CSR, COO, DOK, or LIL. DOK and LIL are converted to CSR. Returns ------- score : array, shape = [n_samples, k] The decision function of the input samples. The order of outputs is the same of that of the `classes_` attribute. Binary classification is a special cases with ``k == 1``, otherwise ``k==n_classes``. For binary classification, values closer to -1 or 1 mean more like the first or second class in ``classes_``, respectively. """ check_is_fitted(self, "n_classes_") X = self._validate_X_predict(X) n_classes = self.n_classes_ classes = self.classes_[:, np.newaxis] pred = None if self.algorithm == 'SAMME.R': # The weights are all 1. for SAMME.R pred = sum(_samme_proba(estimator, n_classes, X) for estimator in self.estimators_) else: # self.algorithm == "SAMME" pred = sum((estimator.predict(X) == classes).T * w for estimator, w in zip(self.estimators_, self.estimator_weights_)) pred /= self.estimator_weights_.sum() if n_classes == 2: pred[:, 0] *= -1 return pred.sum(axis=1) return pred def staged_decision_function(self, X): """Compute decision function of ``X`` for each boosting iteration. This method allows monitoring (i.e. determine error on testing set) after each boosting iteration. Parameters ---------- X : {array-like, sparse matrix} of shape = [n_samples, n_features] The training input samples. Sparse matrix can be CSC, CSR, COO, DOK, or LIL. DOK and LIL are converted to CSR. Returns ------- score : generator of array, shape = [n_samples, k] The decision function of the input samples. The order of outputs is the same of that of the `classes_` attribute. Binary classification is a special cases with ``k == 1``, otherwise ``k==n_classes``. For binary classification, values closer to -1 or 1 mean more like the first or second class in ``classes_``, respectively. """ check_is_fitted(self, "n_classes_") X = self._validate_X_predict(X) n_classes = self.n_classes_ classes = self.classes_[:, np.newaxis] pred = None norm = 0. for weight, estimator in zip(self.estimator_weights_, self.estimators_): norm += weight if self.algorithm == 'SAMME.R': # The weights are all 1. for SAMME.R current_pred = _samme_proba(estimator, n_classes, X) else: # elif self.algorithm == "SAMME": current_pred = estimator.predict(X) current_pred = (current_pred == classes).T * weight if pred is None: pred = current_pred else: pred += current_pred if n_classes == 2: tmp_pred = np.copy(pred) tmp_pred[:, 0] *= -1 yield (tmp_pred / norm).sum(axis=1) else: yield pred / norm def predict_proba(self, X): """Predict class probabilities for X. The predicted class probabilities of an input sample is computed as the weighted mean predicted class probabilities of the classifiers in the ensemble. Parameters ---------- X : {array-like, sparse matrix} of shape = [n_samples, n_features] The training input samples. Sparse matrix can be CSC, CSR, COO, DOK, or LIL. DOK and LIL are converted to CSR. Returns ------- p : array of shape = [n_samples] The class probabilities of the input samples. The order of outputs is the same of that of the `classes_` attribute. """ check_is_fitted(self, "n_classes_") n_classes = self.n_classes_ X = self._validate_X_predict(X) if self.algorithm == 'SAMME.R': # The weights are all 1. for SAMME.R proba = sum(_samme_proba(estimator, n_classes, X) for estimator in self.estimators_) else: # self.algorithm == "SAMME" proba = sum(estimator.predict_proba(X) * w for estimator, w in zip(self.estimators_, self.estimator_weights_)) proba /= self.estimator_weights_.sum() proba = np.exp((1. / (n_classes - 1)) * proba) normalizer = proba.sum(axis=1)[:, np.newaxis] normalizer[normalizer == 0.0] = 1.0 proba /= normalizer return proba def staged_predict_proba(self, X): """Predict class probabilities for X. The predicted class probabilities of an input sample is computed as the weighted mean predicted class probabilities of the classifiers in the ensemble. This generator method yields the ensemble predicted class probabilities after each iteration of boosting and therefore allows monitoring, such as to determine the predicted class probabilities on a test set after each boost. Parameters ---------- X : {array-like, sparse matrix} of shape = [n_samples, n_features] The training input samples. Sparse matrix can be CSC, CSR, COO, DOK, or LIL. DOK and LIL are converted to CSR. Returns ------- p : generator of array, shape = [n_samples] The class probabilities of the input samples. The order of outputs is the same of that of the `classes_` attribute. """ X = self._validate_X_predict(X) n_classes = self.n_classes_ proba = None norm = 0. for weight, estimator in zip(self.estimator_weights_, self.estimators_): norm += weight if self.algorithm == 'SAMME.R': # The weights are all 1. for SAMME.R current_proba = _samme_proba(estimator, n_classes, X) else: # elif self.algorithm == "SAMME": current_proba = estimator.predict_proba(X) * weight if proba is None: proba = current_proba else: proba += current_proba real_proba = np.exp((1. / (n_classes - 1)) * (proba / norm)) normalizer = real_proba.sum(axis=1)[:, np.newaxis] normalizer[normalizer == 0.0] = 1.0 real_proba /= normalizer yield real_proba def predict_log_proba(self, X): """Predict class log-probabilities for X. The predicted class log-probabilities of an input sample is computed as the weighted mean predicted class log-probabilities of the classifiers in the ensemble. Parameters ---------- X : {array-like, sparse matrix} of shape = [n_samples, n_features] The training input samples. Sparse matrix can be CSC, CSR, COO, DOK, or LIL. DOK and LIL are converted to CSR. Returns ------- p : array of shape = [n_samples] The class probabilities of the input samples. The order of outputs is the same of that of the `classes_` attribute. """ return np.log(self.predict_proba(X)) class AdaBoostRegressor(BaseWeightBoosting, RegressorMixin): """An AdaBoost regressor. An AdaBoost [1] regressor is a meta-estimator that begins by fitting a regressor on the original dataset and then fits additional copies of the regressor on the same dataset but where the weights of instances are adjusted according to the error of the current prediction. As such, subsequent regressors focus more on difficult cases. This class implements the algorithm known as AdaBoost.R2 [2]. Read more in the :ref:`User Guide <adaboost>`. Parameters ---------- base_estimator : object, optional (default=DecisionTreeRegressor) The base estimator from which the boosted ensemble is built. Support for sample weighting is required. n_estimators : integer, optional (default=50) The maximum number of estimators at which boosting is terminated. In case of perfect fit, the learning procedure is stopped early. learning_rate : float, optional (default=1.) Learning rate shrinks the contribution of each regressor by ``learning_rate``. There is a trade-off between ``learning_rate`` and ``n_estimators``. loss : {'linear', 'square', 'exponential'}, optional (default='linear') The loss function to use when updating the weights after each boosting iteration. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Attributes ---------- estimators_ : list of classifiers The collection of fitted sub-estimators. estimator_weights_ : array of floats Weights for each estimator in the boosted ensemble. estimator_errors_ : array of floats Regression error for each estimator in the boosted ensemble. feature_importances_ : array of shape = [n_features] The feature importances if supported by the ``base_estimator``. See also -------- AdaBoostClassifier, GradientBoostingRegressor, DecisionTreeRegressor References ---------- .. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of on-Line Learning and an Application to Boosting", 1995. .. [2] H. Drucker, "Improving Regressors using Boosting Techniques", 1997. """ def __init__(self, base_estimator=None, n_estimators=50, learning_rate=1., loss='linear', random_state=None): super(AdaBoostRegressor, self).__init__( base_estimator=base_estimator, n_estimators=n_estimators, learning_rate=learning_rate, random_state=random_state) self.loss = loss self.random_state = random_state def fit(self, X, y, sample_weight=None): """Build a boosted regressor from the training set (X, y). Parameters ---------- X : {array-like, sparse matrix} of shape = [n_samples, n_features] The training input samples. Sparse matrix can be CSC, CSR, COO, DOK, or LIL. DOK and LIL are converted to CSR. y : array-like of shape = [n_samples] The target values (real numbers). sample_weight : array-like of shape = [n_samples], optional Sample weights. If None, the sample weights are initialized to 1 / n_samples. Returns ------- self : object Returns self. """ # Check loss if self.loss not in ('linear', 'square', 'exponential'): raise ValueError( "loss must be 'linear', 'square', or 'exponential'") # Fit return super(AdaBoostRegressor, self).fit(X, y, sample_weight) def _validate_estimator(self): """Check the estimator and set the base_estimator_ attribute.""" super(AdaBoostRegressor, self)._validate_estimator( default=DecisionTreeRegressor(max_depth=3)) self._check_sample_weight() def _boost(self, iboost, X, y, sample_weight): """Implement a single boost for regression Perform a single boost according to the AdaBoost.R2 algorithm and return the updated sample weights. Parameters ---------- iboost : int The index of the current boost iteration. X : {array-like, sparse matrix} of shape = [n_samples, n_features] The training input samples. Sparse matrix can be CSC, CSR, COO, DOK, or LIL. DOK and LIL are converted to CSR. y : array-like of shape = [n_samples] The target values (class labels in classification, real numbers in regression). sample_weight : array-like of shape = [n_samples] The current sample weights. Returns ------- sample_weight : array-like of shape = [n_samples] or None The reweighted sample weights. If None then boosting has terminated early. estimator_weight : float The weight for the current boost. If None then boosting has terminated early. estimator_error : float The regression error for the current boost. If None then boosting has terminated early. """ estimator = self._make_estimator() try: estimator.set_params(random_state=self.random_state) except ValueError: pass generator = check_random_state(self.random_state) # Weighted sampling of the training set with replacement # For NumPy >= 1.7.0 use np.random.choice cdf = sample_weight.cumsum() cdf /= cdf[-1] uniform_samples = generator.random_sample(X.shape[0]) bootstrap_idx = cdf.searchsorted(uniform_samples, side='right') # searchsorted returns a scalar bootstrap_idx = np.array(bootstrap_idx, copy=False) # Fit on the bootstrapped sample and obtain a prediction # for all samples in the training set estimator.fit(X[bootstrap_idx], y[bootstrap_idx]) y_predict = estimator.predict(X) error_vect = np.abs(y_predict - y) error_max = error_vect.max() if error_max != 0.: error_vect /= error_max if self.loss == 'square': error_vect **= 2 elif self.loss == 'exponential': error_vect = 1. - np.exp(- error_vect) # Calculate the average loss estimator_error = (sample_weight * error_vect).sum() if estimator_error <= 0: # Stop if fit is perfect return sample_weight, 1., 0. elif estimator_error >= 0.5: # Discard current estimator only if it isn't the only one if len(self.estimators_) > 1: self.estimators_.pop(-1) return None, None, None beta = estimator_error / (1. - estimator_error) # Boost weight using AdaBoost.R2 alg estimator_weight = self.learning_rate * np.log(1. / beta) if not iboost == self.n_estimators - 1: sample_weight *= np.power( beta, (1. - error_vect) * self.learning_rate) return sample_weight, estimator_weight, estimator_error def _get_median_predict(self, X, limit): # Evaluate predictions of all estimators predictions = np.array([ est.predict(X) for est in self.estimators_[:limit]]).T # Sort the predictions sorted_idx = np.argsort(predictions, axis=1) # Find index of median prediction for each sample weight_cdf = self.estimator_weights_[sorted_idx].cumsum(axis=1) median_or_above = weight_cdf >= 0.5 * weight_cdf[:, -1][:, np.newaxis] median_idx = median_or_above.argmax(axis=1) median_estimators = sorted_idx[np.arange(X.shape[0]), median_idx] # Return median predictions return predictions[np.arange(X.shape[0]), median_estimators] def predict(self, X): """Predict regression value for X. The predicted regression value of an input sample is computed as the weighted median prediction of the classifiers in the ensemble. Parameters ---------- X : {array-like, sparse matrix} of shape = [n_samples, n_features] The training input samples. Sparse matrix can be CSC, CSR, COO, DOK, or LIL. DOK and LIL are converted to CSR. Returns ------- y : array of shape = [n_samples] The predicted regression values. """ check_is_fitted(self, "estimator_weights_") X = self._validate_X_predict(X) return self._get_median_predict(X, len(self.estimators_)) def staged_predict(self, X): """Return staged predictions for X. The predicted regression value of an input sample is computed as the weighted median prediction of the classifiers in the ensemble. This generator method yields the ensemble prediction after each iteration of boosting and therefore allows monitoring, such as to determine the prediction on a test set after each boost. Parameters ---------- X : {array-like, sparse matrix} of shape = [n_samples, n_features] The training input samples. Sparse matrix can be CSC, CSR, COO, DOK, or LIL. DOK and LIL are converted to CSR. Returns ------- y : generator of array, shape = [n_samples] The predicted regression values. """ check_is_fitted(self, "estimator_weights_") X = self._validate_X_predict(X) for i, _ in enumerate(self.estimators_, 1): yield self._get_median_predict(X, limit=i)
""" Tools for density estimation See also: - sklearn.mixture.gmm : gaussian mixture models - sklearn.neighbors.KernelDensity : Kernel Density Estimation (version 0.14+) - astroML.density_estimation.XDGMM : extreme deconvolution - scipy.spatial.gaussian_kde : a gaussian KDE implementation """ import warnings import numpy as np from scipy import special from sklearn.metrics import pairwise_kernels, pairwise_distances from sklearn.neighbors import BallTree # TODO: # - KDE with errors (chp 6.1.2) def n_volume(r, n): """compute the n-volume of a sphere of radius r in n dimensions""" return np.pi ** (0.5 * n) / special.gamma(0.5 * n + 1) * (r ** n) class KDE(object): """Kernel Density Estimate .. note:: Deprecated in astroML 0.2 Scikit-learn version 0.14 added a KernelDensity estimator class which has much better performance than this class. The ``KDE`` class will be removed in astroML version 0.3. Parameters ---------- metric : string or callable ['gaussian'|'tophat'|'exponential'] or one of the options in sklearn.metrics.pairwise_kernels. See pairwise_kernels documentation for more information. For 'gaussian' or 'tophat', 'exponential', and 'quadratic', the results will be properly normalized in D dimensions. This may not be the case for other metrics. h : float (optional) if metric is 'gaussian' or 'tophat', h gives the width of the kernel. Otherwise, h is not referenced. **kwargs : other keywords will be passed to the sklearn.metrics.pairwise_kernels function. Notes ----- Kernel forms are as follows: - 'gaussian' : K(x, y) ~ exp( -0.5 (x - y)^2 / h^2 ) - 'tophat' : K(x, y) ~ 1 if abs(x - y) < h ~ 0 otherwise - 'exponential' : K(x, y) ~ exp(- abs(x - y) / h) - 'quadratic' : K(x, y) ~ (1 - (x - y)^2) if abs(x) < 1 ~ 0 otherwise All are properly normalized, so that their integral over all space is 1. See Also -------- - sklearn.mixture.gmm : gaussian mixture models - KNeighborsDenstiy: nearest neighbors density estimation - scipy.spatial.gaussian_kde : a gaussian KDE implementation """ def __init__(self, metric='gaussian', h=None, **kwargs): warnings.warn("The KDE class is deprecated as of version 0.2 and will " "be removed in version 0.3. Upgrade to scikit-learn " "version >0.14 and use sklearn.neighbors.KernelDensity " "instead.") self.metric = metric self.kwargs = kwargs self.h = h self.factor = lambda ndim: 1 def fit(self, X): """Train the kernel density estimator Parameters ---------- X : array_like array of points to use to train the KDE. Shape is (n_points, n_dim) """ self.X_ = np.atleast_2d(X) if self.X_.ndim != 2: raise ValueError('X must be two-dimensional') return self def eval(self, X): """Evaluate the kernel density estimation Parameters ---------- X : array_like array of points at which to evaluate the KDE. Shape is (n_points, n_dim), where n_dim matches the dimension of the training points. Returns ------- dens : ndarray array of shape (n_points,) giving the density at each point. The density will be normalized for metric='gaussian' or metric='tophat', and will be unnormalized otherwise. """ X = np.atleast_2d(X) if X.ndim != 2: raise ValueError('X must be two-dimensional') if X.shape[1] != self.X_.shape[1]: raise ValueError('dimensions of X do not match training dimension') if self.metric == 'gaussian': # wrangle gaussian into scikit-learn's 'rbf' kernel gamma = 0.5 / self.h / self.h D = pairwise_kernels(X, self.X_, metric='rbf', gamma=gamma) D /= np.sqrt(2 * np.pi * self.h ** (2 * X.shape[1])) dens = D.sum(1) elif self.metric == 'tophat': # use Ball Tree to efficiently count neighbors bt = BallTree(self.X_) counts = bt.query_radius(X, self.h, count_only=True) dens = counts / n_volume(self.h, X.shape[1]) elif self.metric == 'exponential': D = pairwise_distances(X, self.X_) dens = np.exp(-abs(D) / self.h) dens = dens.sum(1) dens /= n_volume(self.h, X.shape[1]) * special.gamma(X.shape[1]) elif self.metric == 'quadratic': D = pairwise_distances(X, self.X_) dens = (1 - (D / self.h) ** 2) dens[D > self.h] = 0 dens = dens.sum(1) dens /= 2. * n_volume(self.h, X.shape[1]) / (X.shape[1] + 2) else: D = pairwise_kernels(X, self.X_, metric=self.metric, **self.kwargs) dens = D.sum(1) return dens class KNeighborsDensity(object): """K-neighbors density estimation Parameters ---------- method : string method to use. Must be one of ['simple'|'bayesian'] (see below) n_neighbors : int number of neighbors to use Notes ----- The two methods are as follows: - simple: The density at a point x is estimated by n(x) ~ k / r_k^n - bayesian: The density at a point x is estimated by n(x) ~ sum_{i=1}^k[1 / r_i^n]. See Also -------- KDE : kernel density estimation """ def __init__(self, method='bayesian', n_neighbors=10): if method not in ['simple', 'bayesian']: raise ValueError("method = %s not recognized" % method) self.n_neighbors = n_neighbors self.method = method def fit(self, X): """Train the K-neighbors density estimator Parameters ---------- X : array_like array of points to use to train the KDE. Shape is (n_points, n_dim) """ self.X_ = np.atleast_2d(X) if self.X_.ndim != 2: raise ValueError('X must be two-dimensional') self.bt_ = BallTree(self.X_) return self def eval(self, X): """Evaluate the kernel density estimation Parameters ---------- X : array_like array of points at which to evaluate the KDE. Shape is (n_points, n_dim), where n_dim matches the dimension of the training points. Returns ------- dens : ndarray array of shape (n_points,) giving the density at each point. The density will be normalized for metric='gaussian' or metric='tophat', and will be unnormalized otherwise. """ X = np.atleast_2d(X) if X.ndim != 2: raise ValueError('X must be two-dimensional') if X.shape[1] != self.X_.shape[1]: raise ValueError('dimensions of X do not match training dimension') dist, ind = self.bt_.query(X, self.n_neighbors, return_distance=True) k = float(self.n_neighbors) ndim = X.shape[1] if self.method == 'simple': return k / n_volume(dist[:, -1], ndim) elif self.method == 'bayesian': # XXX this may be wrong in more than 1 dimension! return (k * (k + 1) * 0.5 / n_volume(1, ndim) / (dist ** ndim).sum(1)) else: raise ValueError("Unrecognized method '%s'" % self.method) return dens
from scipy import misc import numpy as np import matplotlib.pyplot as plt import time from scipy.ndimage.filters import gaussian_filter1d, gaussian_filter class Gaussianfilter2D(): ''' 2D gaussian filter on Black and White images The filter image satisfies the relation $$ C_{i,j} = \sum_{m=0}^{2 l_w} \sum_{n=0}^{2 lw} = K_{m,n} \, I_{i-lw + m, j-l_w+n}$$ where C is the new image, K the gaussian kernel and I the original image Parameters ---------- sigma : float standard deviation for the gaussian filter truncate : float, optional truncate the filter at this many standard deviations Default is 4.0 mode : here are the various mode supported by the class mode | Ext | Input | Ext -----------+---------+------------------------+--------- 'reflect' | 3 2 1 | 1 2 3 4 5 6 7 8 | 8 7 6 'nearest' | 1 1 1 | 1 2 3 4 5 6 7 8 | 8 8 8 'constant' | 0 0 0 | 1 2 3 4 5 6 7 8 | 0 0 0 'wrap' | 6 7 8 | 1 2 3 4 5 6 7 8 | 1 2 3 Attributes ---------- kernel_ : 2d-array this is the 2D gaussian filter kernel used in the convolution with the provided image image_benchmark_: 2d-array this is the image filtered with the 2D gaussian filter provided by the scipy library run_time_benchmark_: float this is the run time of the 2D gaussian filter provied by the scipy library image_: 2d-array this is the image filtered by the 2D gaussian filter implemented in python run_time_: float this is the run time of the 2D gaussian filter implemented in python error_ : float this is the norm-2 error of the python function compared with the scipy function ''' def __init__(self, sigma, truncate = 4.0 , mode = 'reflect', cval = 0.0): self.sigma = sigma self.truncate = truncate self.mode = mode self.cval = cval # lw is the number of adjacent pixels to consider in 1D # when using the filter self.lw = int(truncate*sigma+0.5) # pan is the size of the 1D window used to convolute with the gaussian filter # it needs 16 pixels on both sides + the considered pixel in the middle self.pan = 2 * self.lw + 1 # for now we take values on the right until it fills a multiple of 8 # AVX is in x direction (in the matrix) self.span = (self.pan // 8 + 1) * 8 @property def kernel_(self): ''' this function generates the Gaussianfilter2D kernel ''' # initialize the size of the gaussian kernel # kernel size: pan * pan (33 * 33 when truncate = 4.0 and sigma = 4.0) self._kernel = np.zeros((self.pan,self.pan)) # find the distance to the center of all pixels in the kernel for i in range(0,self.lw+1): for j in range(0,self.lw+1): # pixel at the center the distance is 0 if i == 0 and j ==0: self._kernel[self.lw,self.lw] = 0 # the other pixels in the kernel else: self._kernel[i+self.lw,j+self.lw] = np.linalg.norm([i,j])**2 self._kernel[-i+self.lw,-j+self.lw] = np.linalg.norm([i,j])**2 self._kernel[-i+self.lw,j+self.lw] = np.linalg.norm([i,j])**2 self._kernel[i+self.lw,-j+self.lw] = np.linalg.norm([i,j])**2 # compute the gaussian kernel self._kernel *= -.5/self.sigma**2 self._kernel = np.exp(self._kernel) self._kernel /= 2*np.pi * self.sigma**2 self._kernel /= np.sum(self._kernel) return self._kernel def filter_scipy(self, f ): start = time.time() self.image_benchmark_ = gaussian_filter(f,self.sigma , mode = self.mode, cval = self.cval ) self.run_time_benchmark_ = time.time() - start return self.image_benchmark_, self.run_time_benchmark_ def _python_convolution(self, lx, ly, image): # convolution with the gaussian kernel for filtering for i in range(0 , lx ): for j in range(0 , ly ): local_input = image[i : i + 2*self.lw + 1, j: j + 2*self.lw + 1] self.image_[i , j]= np.sum(local_input*self._kernel) return self.image_ def _other(self, local_input): toplot = local_input*0.0 sumg = 0 for k in range(self.pan): # middle column gaussianc = np.exp(-0.5 / self.sigma**2 *np.linalg.norm([k-self.lw,0])**2) # sum sumg += gaussianc toplot[k,self.lw] = local_input[k,self.lw] * gaussianc # columns on the right side for l in range(1, self.lw+1, 8): toplot[k,self.lw+l:self.lw+l+8] = [local_input[k,self.lw+_]*np.exp(-.5/self.sigma*np.linalg.norm([k-self.lw,_])**2) for _ in range(l,l+8)] gaussianr = [np.exp(-.5/self.sigma*np.linalg.norm([k-self.lw,_])**2) for _ in range(l,l+8)] sumg += np.sum(gaussianr) for l in range(0, self.lw, 8): toplot[k,l:l+8] = [local_input[k,_]*np.exp(-.5/self.sigma*np.linalg.norm([k-self.lw,_-self.lw])**2) for _ in range(l,l+8)] gaussianl = [np.exp(-.5/self.sigma*np.linalg.norm([k-self.lw,_-self.lw])**2) for _ in range(l,l+8)] sumg += np.sum(gaussianl) toplot /= sumg self.image_ = toplot # run the filter with scipy to get error and run time difference self.filter_scipy(local_input) self.error_ = np.linalg.norm(self.image_benchmark_-self.image_) return self.image_ def filter_python(self,f): start = time.time() self.image_ = f * 0.0 lx, ly = f.shape # create the gaussian filter kernel self._kernel = self.kernel_ # implement the different type of method to treat the edges if self.mode == 'constant': # padding using the scipy library image = np.lib.pad(f , self.lw, 'constant', constant_values = self.cval) elif self.mode == 'reflect': # padding using the scipy library image = np.lib.pad(f , self.lw, 'reflect') elif self.mode == 'wrap': # padding using the scipy library image = np.lib.pad(f , self.lw, 'wrap') elif self.mode == 'nearest': # padding using the scipy library image = np.lib.pad(f , self.lw, 'edge') # convolution with the gaussian kernel for filtering self.image_= self._python_convolution(lx,ly,image) self.run_time_ = time.time() - start # run the filter with scipy to get error and run time difference self.filter_scipy(f) self.error_ = np.linalg.norm(self.image_benchmark_-self.image_) return self
# # Copyright (c) 2008-2015 Citrix Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License") # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response from nssrc.com.citrix.netscaler.nitro.service.options import options from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util class autoscaleprofile(base_resource) : """ Configuration for autoscale profile resource. """ def __init__(self) : self._name = "" self._type = "" self._url = "" self._apikey = "" self._sharedsecret = "" self.___count = 0 @property def name(self) : """AutoScale profile name.<br/>Minimum length = 1. """ try : return self._name except Exception as e: raise e @name.setter def name(self, name) : """AutoScale profile name.<br/>Minimum length = 1 """ try : self._name = name except Exception as e: raise e @property def type(self) : """The type of profile.<br/>Possible values = CLOUDSTACK. """ try : return self._type except Exception as e: raise e @type.setter def type(self, type) : """The type of profile.<br/>Possible values = CLOUDSTACK """ try : self._type = type except Exception as e: raise e @property def url(self) : """URL providing the service.<br/>Minimum length = 1. """ try : return self._url except Exception as e: raise e @url.setter def url(self, url) : """URL providing the service.<br/>Minimum length = 1 """ try : self._url = url except Exception as e: raise e @property def apikey(self) : """api key for authentication with service.<br/>Minimum length = 1. """ try : return self._apikey except Exception as e: raise e @apikey.setter def apikey(self, apikey) : """api key for authentication with service.<br/>Minimum length = 1 """ try : self._apikey = apikey except Exception as e: raise e @property def sharedsecret(self) : """shared secret for authentication with service.<br/>Minimum length = 1. """ try : return self._sharedsecret except Exception as e: raise e @sharedsecret.setter def sharedsecret(self, sharedsecret) : """shared secret for authentication with service.<br/>Minimum length = 1 """ try : self._sharedsecret = sharedsecret except Exception as e: raise e def _get_nitro_response(self, service, response) : """ converts nitro response into object and returns the object array in case of get request. """ try : result = service.payload_formatter.string_to_resource(autoscaleprofile_response, response, self.__class__.__name__) if(result.errorcode != 0) : if (result.errorcode == 444) : service.clear_session(self) if result.severity : if (result.severity == "ERROR") : raise nitro_exception(result.errorcode, str(result.message), str(result.severity)) else : raise nitro_exception(result.errorcode, str(result.message), str(result.severity)) return result.autoscaleprofile except Exception as e : raise e def _get_object_name(self) : """ Returns the value of object identifier argument """ try : if (self.name) : return str(self.name) return None except Exception as e : raise e @classmethod def add(cls, client, resource) : """ Use this API to add autoscaleprofile. """ try : if type(resource) is not list : addresource = autoscaleprofile() addresource.name = resource.name addresource.type = resource.type addresource.url = resource.url addresource.apikey = resource.apikey addresource.sharedsecret = resource.sharedsecret return addresource.add_resource(client) else : if (resource and len(resource) > 0) : addresources = [ autoscaleprofile() for _ in range(len(resource))] for i in range(len(resource)) : addresources[i].name = resource[i].name addresources[i].type = resource[i].type addresources[i].url = resource[i].url addresources[i].apikey = resource[i].apikey addresources[i].sharedsecret = resource[i].sharedsecret result = cls.add_bulk_request(client, addresources) return result except Exception as e : raise e @classmethod def delete(cls, client, resource) : """ Use this API to delete autoscaleprofile. """ try : if type(resource) is not list : deleteresource = autoscaleprofile() if type(resource) != type(deleteresource): deleteresource.name = resource else : deleteresource.name = resource.name return deleteresource.delete_resource(client) else : if type(resource[0]) != cls : if (resource and len(resource) > 0) : deleteresources = [ autoscaleprofile() for _ in range(len(resource))] for i in range(len(resource)) : deleteresources[i].name = resource[i] else : if (resource and len(resource) > 0) : deleteresources = [ autoscaleprofile() for _ in range(len(resource))] for i in range(len(resource)) : deleteresources[i].name = resource[i].name result = cls.delete_bulk_request(client, deleteresources) return result except Exception as e : raise e @classmethod def update(cls, client, resource) : """ Use this API to update autoscaleprofile. """ try : if type(resource) is not list : updateresource = autoscaleprofile() updateresource.name = resource.name updateresource.url = resource.url updateresource.apikey = resource.apikey updateresource.sharedsecret = resource.sharedsecret return updateresource.update_resource(client) else : if (resource and len(resource) > 0) : updateresources = [ autoscaleprofile() for _ in range(len(resource))] for i in range(len(resource)) : updateresources[i].name = resource[i].name updateresources[i].url = resource[i].url updateresources[i].apikey = resource[i].apikey updateresources[i].sharedsecret = resource[i].sharedsecret result = cls.update_bulk_request(client, updateresources) return result except Exception as e : raise e @classmethod def get(cls, client, name="", option_="") : """ Use this API to fetch all the autoscaleprofile resources that are configured on netscaler. """ try : if not name : obj = autoscaleprofile() response = obj.get_resources(client, option_) else : if type(name) != cls : if type(name) is not list : obj = autoscaleprofile() obj.name = name response = obj.get_resource(client, option_) else : if name and len(name) > 0 : response = [autoscaleprofile() for _ in range(len(name))] obj = [autoscaleprofile() for _ in range(len(name))] for i in range(len(name)) : obj[i] = autoscaleprofile() obj[i].name = name[i] response[i] = obj[i].get_resource(client, option_) return response except Exception as e : raise e @classmethod def get_filtered(cls, client, filter_) : """ Use this API to fetch filtered set of autoscaleprofile resources. filter string should be in JSON format.eg: "port:80,servicetype:HTTP". """ try : obj = autoscaleprofile() option_ = options() option_.filter = filter_ response = obj.getfiltered(client, option_) return response except Exception as e : raise e @classmethod def count(cls, client) : """ Use this API to count the autoscaleprofile resources configured on NetScaler. """ try : obj = autoscaleprofile() option_ = options() option_.count = True response = obj.get_resources(client, option_) if response : return response[0].__dict__['___count'] return 0 except Exception as e : raise e @classmethod def count_filtered(cls, client, filter_) : """ Use this API to count filtered the set of autoscaleprofile resources. Filter string should be in JSON format.eg: "port:80,servicetype:HTTP". """ try : obj = autoscaleprofile() option_ = options() option_.count = True option_.filter = filter_ response = obj.getfiltered(client, option_) if response : return response[0].__dict__['___count'] return 0 except Exception as e : raise e class Type: CLOUDSTACK = "CLOUDSTACK" class autoscaleprofile_response(base_response) : def __init__(self, length=1) : self.autoscaleprofile = [] self.errorcode = 0 self.message = "" self.severity = "" self.sessionid = "" self.autoscaleprofile = [autoscaleprofile() for _ in range(length)]
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import mxnet as mx import unittest import os import numpy as np from mxnet import gluon from mxnet.gluon import nn from mxnet.test_utils import assert_almost_equal from common import assertRaises, xfail_when_nonstandard_decimal_separator from copy import deepcopy import pytest def dict_equ(a, b): assert set(a) == set(b) for k in a: assert (a[k].asnumpy() == b[k].asnumpy()).all() def test_multi_trainer(): x = gluon.Parameter('x', shape=(10,), stype='row_sparse') x.initialize() # test set trainer trainer0 = gluon.Trainer([x], 'sgd') assert(x._trainer() is trainer0) # test unset trainer x._set_trainer(None) assert(x._trainer is None) x._set_trainer(trainer0) with pytest.raises(RuntimeError): # multiple trainers for a sparse Parameter is not allowed trainer1 = gluon.Trainer([x], 'sgd') def test_trainer_with_sparse_grad_on_single_context(): x = gluon.Parameter('x', shape=(10,), grad_stype='row_sparse') x.initialize(ctx=[mx.cpu(0)], init='zeros') trainer = gluon.Trainer([x], 'sgd', {'learning_rate': 1.0, 'momentum': 0.5}) with mx.autograd.record(): for w in x.list_data(): y = w + 1 y.backward() trainer.step(1) assert trainer._update_on_kvstore is None assert trainer._kvstore is None # No kvstore created for single-device training assert (x.data(mx.cpu(0)).asnumpy() == -1).all() def test_trainer_with_teststore(): x = gluon.Parameter('x', shape=(10,)) x.initialize(ctx=[mx.cpu(0), mx.cpu(1)], init='zeros') kv = mx.kv.create('teststore') trainer = gluon.Trainer([x], 'sgd', {'learning_rate': 1.0, 'momentum': 0.5}, kvstore=kv) with mx.autograd.record(): for w in x.list_data(): y = w + 1 y.backward() trainer.step(1) assert trainer._update_on_kvstore == False assert (x.data(mx.cpu(1)).asnumpy() == -2).all() # Expect exceptions if update_on_kvstore is set to True, # because TestStore does not support that invalid_trainer = gluon.Trainer([x], 'sgd', kvstore=kv, update_on_kvstore=True) pytest.raises(ValueError, invalid_trainer._init_kvstore) def test_trainer(): x = gluon.Parameter('x', shape=(10,)) x.initialize(ctx=[mx.cpu(0), mx.cpu(1)], init='zeros') trainer = gluon.Trainer([x], 'sgd', {'learning_rate': 1.0, 'momentum': 0.5}) with mx.autograd.record(): for w in x.list_data(): y = w + 1 y.backward() trainer.step(1) assert trainer._optimizer.param_dict == trainer._optimizer.param_dict assert (x.data(mx.cpu(1)).asnumpy() == -2).all() x.lr_mult = 0.5 with mx.autograd.record(): for w in x.list_data(): y = w + 1 y.backward() trainer.step(1) assert (x.data(mx.cpu(1)).asnumpy() == -4).all() trainer.save_states('test_trainer.states') states = deepcopy(trainer._kvstore._updater.states) if trainer._update_on_kvstore \ else deepcopy(trainer._updaters[0].states) trainer.load_states('test_trainer.states') if trainer._update_on_kvstore: dict_equ(trainer._kvstore._updater.states, states) assert trainer._optimizer == trainer._kvstore._updater.optimizer # invalid usage of update and allreduce_grads if update_on_kvstore pytest.raises(AssertionError, trainer.update, 1) pytest.raises(AssertionError, trainer.allreduce_grads) else: for updater in trainer._updaters: dict_equ(updater.states, states) assert trainer._optimizer == trainer._updaters[0].optimizer x = gluon.Parameter('x', shape=(10,)) x.initialize(ctx=[mx.cpu(0), mx.cpu(1)], init='zeros') trainer2 = gluon.Trainer([x], 'sgd', {'learning_rate': 1.0, 'momentum': 0.5}, update_on_kvstore=False) with mx.autograd.record(): for i, w in enumerate(x.list_data()): y = i*w y.backward() assert (x.grad(mx.cpu(0)).asnumpy() != x.grad(mx.cpu(1)).asnumpy()).all() trainer2.allreduce_grads() assert (x.grad(mx.cpu(0)).asnumpy() == x.grad(mx.cpu(1)).asnumpy()).all() trainer2.update(1) assert (x.data(mx.cpu(1)).asnumpy() == -1).all(), x.data(mx.cpu(1)).asnumpy() def test_trainer_save_load(): previous_update_on_kvstore = os.getenv('MXNET_UPDATE_ON_KVSTORE', "1") os.putenv('MXNET_UPDATE_ON_KVSTORE', '1') x = gluon.Parameter('x', shape=(10,), lr_mult=1.0) x.initialize(ctx=[mx.cpu(0), mx.cpu(1)], init='zeros') trainer = gluon.Trainer([x], 'sgd', {'learning_rate': 0.1}) with mx.autograd.record(): for w in x.list_data(): y = w + 1 y.backward() trainer.step(1) assert trainer._kvstore._updater.optimizer._get_lr(0) == 0.1 trainer.save_states('test_trainer_save_load.states') trainer.load_states('test_trainer_save_load.states') x.lr_mult = 2.0 # check if parameter dict is correctly associated with optimizer after load_state assert trainer._kvstore._updater.optimizer._get_lr(0) == 0.2 os.putenv('MXNET_UPDATE_ON_KVSTORE', previous_update_on_kvstore) def test_trainer_sparse_save_load(): x = gluon.Parameter('x', shape=(10, 1), lr_mult=1.0, stype='row_sparse', grad_stype='row_sparse') x.initialize(ctx=[mx.cpu(0)], init='zeros') trainer = gluon.Trainer([x], 'sgd', {'learning_rate': 0.1}) all_rows = mx.nd.arange(0, 10, ctx=mx.cpu(0)) with mx.autograd.record(): for w in x.list_row_sparse_data(all_rows): y = w * 1 y.backward() trainer.step(1) assert trainer._kvstore._updater.optimizer._get_lr(0) == 0.1 trainer.save_states('test_trainer_sparse_save_load.states') trainer.load_states('test_trainer_sparse_save_load.states') x.lr_mult = 2.0 # check if parameter dict is correctly associated with optimizer after load_state assert trainer._kvstore._updater.optimizer._get_lr(0) == 0.2 def test_trainer_multi_layer_init(): class Net(gluon.Block): def __init__(self, **kwargs): super(Net, self).__init__(**kwargs) # sparse param self.embed_weight = gluon.Parameter('embed_weight', stype='row_sparse', shape=(4,3), grad_stype='row_sparse') # dense param from a hybrid block self.dense0 = nn.Dense(2) def forward(self, x): embed_weight = self.embed_weight.row_sparse_data(x) embed = mx.nd.Embedding(data=x, weight=embed_weight, input_dim=4, output_dim=3, sparse_grad=True) return self.dense0(embed) def check_init(ctxes): net = Net() net.initialize(mx.init.One(), ctx=ctxes) trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 1}) data = mx.nd.array([[0,2], [1,2]]) xs = gluon.utils.split_and_load(data, ctxes) ys = [] with mx.autograd.record(): for x in xs: y = net(x) ys.append(y) for y in ys: y.backward() trainer.step(1) # all parameters should be initialized assert not trainer._params_to_init all_rows = mx.nd.arange(0, 4, ctx=mx.cpu(1)) # check the updated weights weight = net.embed_weight.row_sparse_data(all_rows).asnumpy() assert (weight[0] == -1).all() assert (weight[1] == -1).all() assert (weight[2] == -3).all() assert (weight[3] == 1).all() check_init([mx.cpu(1), mx.cpu(2)]) check_init([mx.cpu(1)]) @xfail_when_nonstandard_decimal_separator def test_trainer_reset_kv(): def check_trainer_reset_kv(kv): x = gluon.Parameter('x', shape=(10,), lr_mult=1.0) params = {'x': x} x.initialize(ctx=[mx.cpu(0), mx.cpu(1)], init='zeros') trainer = gluon.Trainer(params, 'sgd', {'learning_rate': 0.1}, kvstore=kv) mx.nd.save('test_trainer_reset_kv.params', {k: v._reduce() for k, v in params.items()}) with mx.autograd.record(): for w in x.list_data(): y = w + 1 y.backward() trainer.step(1) assert trainer._kvstore.type == kv # load would reset kvstore mx.nd.waitall() params = mx.nd.load('test_trainer_reset_kv.params') x._load_init(params['x'], None) if trainer._update_on_kvstore: # drop kvstore state if new parameters are loaded assert trainer._kvstore is None assert trainer._kv_initialized is False with mx.autograd.record(): for w in x.list_data(): y = w + 1 y.backward() trainer.step(1) # the updated parameter should be based on the loaded checkpoint assert (x.data(mx.cpu()) == -0.2).asnumpy().all() kvs = ['local', 'device'] for kv in kvs: check_trainer_reset_kv(kv) @xfail_when_nonstandard_decimal_separator def test_trainer_sparse_kv(): def check_trainer_sparse_kv(kv, stype, grad_stype, update_on_kv, expected): x = mx.gluon.Parameter('x', shape=(10,1), lr_mult=1.0, stype=stype, grad_stype=grad_stype) x.initialize(ctx=[mx.cpu(0), mx.cpu(1)], init='zeros') trainer = gluon.Trainer([x], 'sgd', {'learning_rate': 0.1}, kvstore=kv, update_on_kvstore=update_on_kv) all_rows = mx.nd.arange(0, 10, ctx=mx.cpu(0)) try: ws = x.list_data() if stype == 'default' else x.list_row_sparse_data(all_rows) with mx.autograd.record(): for w in ws: y = w + 1 y.backward() trainer.step(1) assert trainer._kvstore.type == kv assert trainer._kv_initialized assert trainer._update_on_kvstore is expected # the updated parameter should be based on the loaded checkpoint mx.nd.waitall() updated_w = x.data(mx.cpu(0)) if stype == 'default' else x.row_sparse_data(all_rows) assert (updated_w == -0.2).asnumpy().all(), updated_w except Exception as err: assert isinstance(err, expected) kvs = ['local', 'device'] global_update_on_kvstore = bool(int(os.getenv('MXNET_UPDATE_ON_KVSTORE', "1"))) for kv in kvs: check_trainer_sparse_kv(kv, 'default', 'default', True, True) check_trainer_sparse_kv(kv, 'default', 'default', False, False) check_trainer_sparse_kv(kv, 'default', 'default', None, global_update_on_kvstore) check_trainer_sparse_kv(kv, 'default', 'row_sparse', None, False) check_trainer_sparse_kv(kv, 'default', 'row_sparse', True, True) check_trainer_sparse_kv(kv, 'default', 'row_sparse', False, False) check_trainer_sparse_kv(kv, 'row_sparse', 'row_sparse', None, True) check_trainer_sparse_kv(kv, 'row_sparse', 'row_sparse', False, ValueError) def test_trainer_lr_sched(): x = gluon.Parameter('x', shape=(10,)) x.initialize(ctx=[mx.cpu(0), mx.cpu(1)], init='zeros') freq = 2 factor = 0.1 lr = 1 lr_sched = mx.lr_scheduler.FactorScheduler(freq, factor=factor, base_lr=lr) trainer = gluon.Trainer([x], 'sgd', {'learning_rate': lr, 'lr_scheduler': lr_sched}) for i in range(10): with mx.autograd.record(): for w in x.list_data(): y = w + 1 y.backward() trainer.step(1) if i % freq == 0: assert trainer.learning_rate == lr, (lr, trainer.learning_rate, i) lr *= factor mx.nd.waitall() # Update on kvstore = False x = gluon.Parameter('x', shape=(10,)) x.initialize(ctx=[mx.cpu(0), mx.cpu(1)], init='zeros') freq = 2 factor = 0.1 lr = 1 lr_sched = mx.lr_scheduler.FactorScheduler(freq, factor=factor, base_lr=lr) trainer = gluon.Trainer([x], 'sgd', {'learning_rate': lr, 'lr_scheduler': lr_sched}, update_on_kvstore=False) for i in range(10): with mx.autograd.record(): for w in x.list_data(): y = w + 1 y.backward() trainer.step(1) if i % freq == 0: assert trainer.learning_rate == lr, (lr, trainer.learning_rate, i) lr *= factor mx.nd.waitall() def test_gluon_trainer_param_order(): net = mx.gluon.nn.Sequential() # layers may be added in a random order for all workers layers = {'ones_': 1, 'zeros_': 0} for name, init in layers.items(): net.add(mx.gluon.nn.Dense(10, in_units=10, weight_initializer=mx.init.Constant(init), use_bias=False)) net.initialize() params = net.collect_params() trainer = gluon.Trainer(params, 'sgd') for name, init in layers.items(): expected_idx = 0 if name == 'ones_' else 1 expected_name = '{}.weight'.format(expected_idx) assert trainer._params[expected_idx].name == params[expected_name].name def test_trainer_allreduce_hybridsequential(): contexts = [mx.cpu(0), mx.cpu(1)] net = mx.gluon.nn.HybridSequential() for _ in range(8): # Create a network with 8 layers net.add(mx.gluon.nn.Dense(1, weight_initializer='ones', bias_initializer='ones')) net.initialize(ctx=contexts) net.hybridize() trainer = mx.gluon.Trainer(net.collect_params(), 'sgd', update_on_kvstore=False) for ctx in contexts: with mx.autograd.record(): out = net(mx.nd.ones((1, 1), ctx=ctx)) out.backward() trainer.allreduce_grads() def test_trainer_share_parameters(): class Net(gluon.Block): def __init__(self, **kwargs): super(Net, self).__init__(**kwargs) self.dense1 = gluon.nn.Dense(5, in_units=2, use_bias=False) params = self.dense1.collect_params() self.dense2 = gluon.nn.Dense(5, in_units=2, use_bias=False).share_parameters(params) self.dense3 = gluon.nn.Dense(5, in_units=5, use_bias=False) def forward(self, x): hidden = self.dense1(x) + self.dense2(x) out = self.dense3(hidden) return out net = Net() ctxes = [mx.cpu(0), mx.cpu(1)] net.initialize(mx.init.One(), ctx=ctxes) trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 1}) data = mx.nd.array([[1, 1], [1, 1]]) xs = gluon.utils.split_and_load(data, ctxes) ys = [] with mx.autograd.record(): for x in xs: y = net(x) ys.append(y) for y in ys: y.backward() trainer.step(1) params = net.collect_params() shared_params = [] for param in params.values(): p = param.data(mx.cpu(0)).asnumpy() if p.shape[1] == 2: shared_params.append(p) assert((shared_params[0] == shared_params[1]).all())
# Authors: Emmanuelle Gouillart <emmanuelle.gouillart@normalesup.org> # Gael Varoquaux <gael.varoquaux@normalesup.org> # License: BSD 3 clause import numpy as np import scipy as sp from scipy import ndimage from scipy.sparse.csgraph import connected_components import pytest from sklearn.feature_extraction.image import ( img_to_graph, grid_to_graph, extract_patches_2d, reconstruct_from_patches_2d, PatchExtractor, _extract_patches, ) from sklearn.utils._testing import ignore_warnings def test_img_to_graph(): x, y = np.mgrid[:4, :4] - 10 grad_x = img_to_graph(x) grad_y = img_to_graph(y) assert grad_x.nnz == grad_y.nnz # Negative elements are the diagonal: the elements of the original # image. Positive elements are the values of the gradient, they # should all be equal on grad_x and grad_y np.testing.assert_array_equal( grad_x.data[grad_x.data > 0], grad_y.data[grad_y.data > 0] ) def test_img_to_graph_sparse(): # Check that the edges are in the right position # when using a sparse image with a singleton component mask = np.zeros((2, 3), dtype=bool) mask[0, 0] = 1 mask[:, 2] = 1 x = np.zeros((2, 3)) x[0, 0] = 1 x[0, 2] = -1 x[1, 2] = -2 grad_x = img_to_graph(x, mask=mask).todense() desired = np.array([[1, 0, 0], [0, -1, 1], [0, 1, -2]]) np.testing.assert_array_equal(grad_x, desired) def test_grid_to_graph(): # Checking that the function works with graphs containing no edges size = 2 roi_size = 1 # Generating two convex parts with one vertex # Thus, edges will be empty in _to_graph mask = np.zeros((size, size), dtype=bool) mask[0:roi_size, 0:roi_size] = True mask[-roi_size:, -roi_size:] = True mask = mask.reshape(size ** 2) A = grid_to_graph(n_x=size, n_y=size, mask=mask, return_as=np.ndarray) assert connected_components(A)[0] == 2 # check ordering mask = np.zeros((2, 3), dtype=bool) mask[0, 0] = 1 mask[:, 2] = 1 graph = grid_to_graph(2, 3, 1, mask=mask.ravel()).todense() desired = np.array([[1, 0, 0], [0, 1, 1], [0, 1, 1]]) np.testing.assert_array_equal(graph, desired) # Checking that the function works whatever the type of mask is mask = np.ones((size, size), dtype=np.int16) A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask) assert connected_components(A)[0] == 1 # Checking dtype of the graph mask = np.ones((size, size)) A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=bool) assert A.dtype == bool A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=int) assert A.dtype == int A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.float64) assert A.dtype == np.float64 @ignore_warnings(category=DeprecationWarning) # scipy deprecation inside face def test_connect_regions(): try: face = sp.face(gray=True) except AttributeError: # Newer versions of scipy have face in misc from scipy import misc face = misc.face(gray=True) # subsample by 4 to reduce run time face = face[::4, ::4] for thr in (50, 150): mask = face > thr graph = img_to_graph(face, mask=mask) assert ndimage.label(mask)[1] == connected_components(graph)[0] @ignore_warnings(category=DeprecationWarning) # scipy deprecation inside face def test_connect_regions_with_grid(): try: face = sp.face(gray=True) except AttributeError: # Newer versions of scipy have face in misc from scipy import misc face = misc.face(gray=True) # subsample by 4 to reduce run time face = face[::4, ::4] mask = face > 50 graph = grid_to_graph(*face.shape, mask=mask) assert ndimage.label(mask)[1] == connected_components(graph)[0] mask = face > 150 graph = grid_to_graph(*face.shape, mask=mask, dtype=None) assert ndimage.label(mask)[1] == connected_components(graph)[0] def _downsampled_face(): try: face = sp.face(gray=True) except AttributeError: # Newer versions of scipy have face in misc from scipy import misc face = misc.face(gray=True) face = face.astype(np.float32) face = face[::2, ::2] + face[1::2, ::2] + face[::2, 1::2] + face[1::2, 1::2] face = face[::2, ::2] + face[1::2, ::2] + face[::2, 1::2] + face[1::2, 1::2] face = face.astype(np.float32) face /= 16.0 return face def _orange_face(face=None): face = _downsampled_face() if face is None else face face_color = np.zeros(face.shape + (3,)) face_color[:, :, 0] = 256 - face face_color[:, :, 1] = 256 - face / 2 face_color[:, :, 2] = 256 - face / 4 return face_color def _make_images(face=None): face = _downsampled_face() if face is None else face # make a collection of faces images = np.zeros((3,) + face.shape) images[0] = face images[1] = face + 1 images[2] = face + 2 return images downsampled_face = _downsampled_face() orange_face = _orange_face(downsampled_face) face_collection = _make_images(downsampled_face) def test_extract_patches_all(): face = downsampled_face i_h, i_w = face.shape p_h, p_w = 16, 16 expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1) patches = extract_patches_2d(face, (p_h, p_w)) assert patches.shape == (expected_n_patches, p_h, p_w) def test_extract_patches_all_color(): face = orange_face i_h, i_w = face.shape[:2] p_h, p_w = 16, 16 expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1) patches = extract_patches_2d(face, (p_h, p_w)) assert patches.shape == (expected_n_patches, p_h, p_w, 3) def test_extract_patches_all_rect(): face = downsampled_face face = face[:, 32:97] i_h, i_w = face.shape p_h, p_w = 16, 12 expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1) patches = extract_patches_2d(face, (p_h, p_w)) assert patches.shape == (expected_n_patches, p_h, p_w) def test_extract_patches_max_patches(): face = downsampled_face i_h, i_w = face.shape p_h, p_w = 16, 16 patches = extract_patches_2d(face, (p_h, p_w), max_patches=100) assert patches.shape == (100, p_h, p_w) expected_n_patches = int(0.5 * (i_h - p_h + 1) * (i_w - p_w + 1)) patches = extract_patches_2d(face, (p_h, p_w), max_patches=0.5) assert patches.shape == (expected_n_patches, p_h, p_w) with pytest.raises(ValueError): extract_patches_2d(face, (p_h, p_w), max_patches=2.0) with pytest.raises(ValueError): extract_patches_2d(face, (p_h, p_w), max_patches=-1.0) def test_extract_patch_same_size_image(): face = downsampled_face # Request patches of the same size as image # Should return just the single patch a.k.a. the image patches = extract_patches_2d(face, face.shape, max_patches=2) assert patches.shape[0] == 1 def test_extract_patches_less_than_max_patches(): face = downsampled_face i_h, i_w = face.shape p_h, p_w = 3 * i_h // 4, 3 * i_w // 4 # this is 3185 expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1) patches = extract_patches_2d(face, (p_h, p_w), max_patches=4000) assert patches.shape == (expected_n_patches, p_h, p_w) def test_reconstruct_patches_perfect(): face = downsampled_face p_h, p_w = 16, 16 patches = extract_patches_2d(face, (p_h, p_w)) face_reconstructed = reconstruct_from_patches_2d(patches, face.shape) np.testing.assert_array_almost_equal(face, face_reconstructed) def test_reconstruct_patches_perfect_color(): face = orange_face p_h, p_w = 16, 16 patches = extract_patches_2d(face, (p_h, p_w)) face_reconstructed = reconstruct_from_patches_2d(patches, face.shape) np.testing.assert_array_almost_equal(face, face_reconstructed) def test_patch_extractor_fit(): faces = face_collection extr = PatchExtractor(patch_size=(8, 8), max_patches=100, random_state=0) assert extr == extr.fit(faces) def test_patch_extractor_max_patches(): faces = face_collection i_h, i_w = faces.shape[1:3] p_h, p_w = 8, 8 max_patches = 100 expected_n_patches = len(faces) * max_patches extr = PatchExtractor( patch_size=(p_h, p_w), max_patches=max_patches, random_state=0 ) patches = extr.transform(faces) assert patches.shape == (expected_n_patches, p_h, p_w) max_patches = 0.5 expected_n_patches = len(faces) * int( (i_h - p_h + 1) * (i_w - p_w + 1) * max_patches ) extr = PatchExtractor( patch_size=(p_h, p_w), max_patches=max_patches, random_state=0 ) patches = extr.transform(faces) assert patches.shape == (expected_n_patches, p_h, p_w) def test_patch_extractor_max_patches_default(): faces = face_collection extr = PatchExtractor(max_patches=100, random_state=0) patches = extr.transform(faces) assert patches.shape == (len(faces) * 100, 19, 25) def test_patch_extractor_all_patches(): faces = face_collection i_h, i_w = faces.shape[1:3] p_h, p_w = 8, 8 expected_n_patches = len(faces) * (i_h - p_h + 1) * (i_w - p_w + 1) extr = PatchExtractor(patch_size=(p_h, p_w), random_state=0) patches = extr.transform(faces) assert patches.shape == (expected_n_patches, p_h, p_w) def test_patch_extractor_color(): faces = _make_images(orange_face) i_h, i_w = faces.shape[1:3] p_h, p_w = 8, 8 expected_n_patches = len(faces) * (i_h - p_h + 1) * (i_w - p_w + 1) extr = PatchExtractor(patch_size=(p_h, p_w), random_state=0) patches = extr.transform(faces) assert patches.shape == (expected_n_patches, p_h, p_w, 3) def test_extract_patches_strided(): image_shapes_1D = [(10,), (10,), (11,), (10,)] patch_sizes_1D = [(1,), (2,), (3,), (8,)] patch_steps_1D = [(1,), (1,), (4,), (2,)] expected_views_1D = [(10,), (9,), (3,), (2,)] last_patch_1D = [(10,), (8,), (8,), (2,)] image_shapes_2D = [(10, 20), (10, 20), (10, 20), (11, 20)] patch_sizes_2D = [(2, 2), (10, 10), (10, 11), (6, 6)] patch_steps_2D = [(5, 5), (3, 10), (3, 4), (4, 2)] expected_views_2D = [(2, 4), (1, 2), (1, 3), (2, 8)] last_patch_2D = [(5, 15), (0, 10), (0, 8), (4, 14)] image_shapes_3D = [(5, 4, 3), (3, 3, 3), (7, 8, 9), (7, 8, 9)] patch_sizes_3D = [(2, 2, 3), (2, 2, 2), (1, 7, 3), (1, 3, 3)] patch_steps_3D = [(1, 2, 10), (1, 1, 1), (2, 1, 3), (3, 3, 4)] expected_views_3D = [(4, 2, 1), (2, 2, 2), (4, 2, 3), (3, 2, 2)] last_patch_3D = [(3, 2, 0), (1, 1, 1), (6, 1, 6), (6, 3, 4)] image_shapes = image_shapes_1D + image_shapes_2D + image_shapes_3D patch_sizes = patch_sizes_1D + patch_sizes_2D + patch_sizes_3D patch_steps = patch_steps_1D + patch_steps_2D + patch_steps_3D expected_views = expected_views_1D + expected_views_2D + expected_views_3D last_patches = last_patch_1D + last_patch_2D + last_patch_3D for (image_shape, patch_size, patch_step, expected_view, last_patch) in zip( image_shapes, patch_sizes, patch_steps, expected_views, last_patches ): image = np.arange(np.prod(image_shape)).reshape(image_shape) patches = _extract_patches( image, patch_shape=patch_size, extraction_step=patch_step ) ndim = len(image_shape) assert patches.shape[:ndim] == expected_view last_patch_slices = tuple( slice(i, i + j, None) for i, j in zip(last_patch, patch_size) ) assert ( patches[(-1, None, None) * ndim] == image[last_patch_slices].squeeze() ).all() def test_extract_patches_square(): # test same patch size for all dimensions face = downsampled_face i_h, i_w = face.shape p = 8 expected_n_patches = ((i_h - p + 1), (i_w - p + 1)) patches = _extract_patches(face, patch_shape=p) assert patches.shape == (expected_n_patches[0], expected_n_patches[1], p, p) def test_width_patch(): # width and height of the patch should be less than the image x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) with pytest.raises(ValueError): extract_patches_2d(x, (4, 1)) with pytest.raises(ValueError): extract_patches_2d(x, (1, 4))
# -*- coding: utf-8 -*- import base64 import functools import hashlib import logging import os import random import re import string import time import urllib import flask from M2Crypto import RSA import requests from docker_registry.core import compat json = compat.json from . import storage from .lib import config cfg = config.load() logger = logging.getLogger(__name__) _re_docker_version = re.compile('docker/([^\s]+)') _re_authorization = re.compile(r'(\w+)[:=][\s"]?([^",]+)"?') _re_hex_image_id = re.compile(r'^([a-f0-9]{16}|[a-f0-9]{64})$') def valid_image_id(f): @functools.wraps(f) def wrapper(*args, **kwargs): image_id = kwargs.get('image_id', '') if _re_hex_image_id.match(image_id): return f(*args, **kwargs) return api_error("Invalid image ID", 404) return wrapper def docker_client_version(): """Try and extract the client version from the User-Agent string So we can warn older versions of the Docker engine/daemon about incompatible APIs. If we can't figure out the version (e.g. the client is not a Docker engine), just return None. """ ua = flask.request.headers.get('user-agent', '') m = _re_docker_version.search(ua) if not m: return version = m.group(1) if '-' in version: version = version.split('-')[0] try: return tuple(int(x) for x in version) except ValueError: return class SocketReader(object): def __init__(self, fp): self._fp = fp self.handlers = [] def __iter__(self): return self.iterate() def iterate(self, chunk_size=-1): if isinstance(self._fp, requests.Response): if chunk_size == -1: chunk_size = 1024 for chunk in self._fp.iter_content(chunk_size): for handler in self.handlers: handler(chunk) yield chunk else: chunk = self._fp.read(chunk_size) while chunk: for handler in self.handlers: handler(chunk) yield chunk chunk = self._fp.read(chunk_size) def add_handler(self, handler): self.handlers.append(handler) def read(self, n=-1): buf = self._fp.read(n) if not buf: return '' for handler in self.handlers: handler(buf) return buf def response(data=None, code=200, headers=None, raw=False): if data is None: data = True h = { 'Cache-Control': 'no-cache', 'Expires': '-1', 'Content-Type': 'application/json' } if headers: h.update(headers) if h['Cache-Control'] == 'no-cache': h['Pragma'] = 'no-cache' try: if raw is False: data = json.dumps(data, sort_keys=True, skipkeys=True) except TypeError: data = str(data) return flask.current_app.make_response((data, code, h)) def validate_parent_access(parent_id): if cfg.standalone: return True auth = _parse_auth_header() if not auth: return False full_repos_name = auth.get('repository', '').split('/') if len(full_repos_name) != 2: logger.debug('validate_parent: Invalid repository field') return False url = '{0}/v1/repositories/{1}/{2}/layer/{3}/access'.format( cfg.index_endpoint, full_repos_name[0], full_repos_name[1], parent_id ) headers = {'Authorization': flask.request.headers.get('authorization')} resp = requests.get(url, verify=True, headers=headers) if resp.status_code != 200: logger.debug('validate_parent: index returns status {0}'.format( resp.status_code )) return False try: # Note(dmp): unicode patch XXX not applied! Assuming requests does it logger.debug('validate_parent: Content: {0}'.format(resp.text)) return json.loads(resp.text).get('access', False) except ValueError: logger.debug('validate_parent: Wrong response format') return False def validate_token(auth): full_repos_name = auth.get('repository', '').split('/') if len(full_repos_name) != 2: logger.debug('validate_token: Invalid repository field') return False url = '{0}/v1/repositories/{1}/{2}/images'.format(cfg.index_endpoint, full_repos_name[0], full_repos_name[1]) headers = {'Authorization': flask.request.headers.get('authorization')} resp = requests.get(url, verify=True, headers=headers) logger.debug('validate_token: Index returned {0}'.format(resp.status_code)) if resp.status_code != 200: return False store = storage.load() try: # Note(dmp): unicode patch XXX not applied (requests) images_list = [i['id'] for i in json.loads(resp.text)] store.put_content(store.images_list_path(*full_repos_name), json.dumps(images_list)) except ValueError: logger.debug('validate_token: Wrong format for images_list') return False return True def get_remote_ip(): if 'X-Forwarded-For' in flask.request.headers: return flask.request.headers.getlist('X-Forwarded-For')[0] if 'X-Real-Ip' in flask.request.headers: return flask.request.headers.getlist('X-Real-Ip')[0] return flask.request.remote_addr def is_ssl(): for header in ('X-Forwarded-Proto', 'X-Forwarded-Protocol'): if header in flask.request.headers and ( flask.request.headers[header].lower() in ('https', 'ssl') ): return True return False def _parse_auth_header(): auth = flask.request.headers.get('authorization', '') if auth.split(' ')[0].lower() != 'token': logger.debug('check_token: Invalid token format') return None logger.debug('Auth Token = {0}'.format(auth)) auth = dict(_re_authorization.findall(auth)) logger.debug('auth = {0}'.format(auth)) return auth def check_token(args): logger.debug('args = {0}'.format(args)) if cfg.disable_token_auth is True or cfg.standalone is True: return True auth = _parse_auth_header() if not auth: return False if 'namespace' in args and 'repository' in args: # We're authorizing an action on a repository, # let's check that it matches the repos name provided in the token full_repos_name = '{namespace}/{repository}'.format(**args) logger.debug('full_repos_name = {0}'.format(full_repos_name)) if full_repos_name != auth.get('repository'): logger.debug('check_token: Wrong repository name in the token:' '{0} != {1}'.format(full_repos_name, auth.get('repository'))) return False # Check that the token `access' variable is aligned with the HTTP method access = auth.get('access') if access == 'write' and flask.request.method not in ['POST', 'PUT']: logger.debug('check_token: Wrong access value in the token') return False if access == 'read' and flask.request.method != 'GET': logger.debug('check_token: Wrong access value in the token') return False if access == 'delete' and flask.request.method != 'DELETE': logger.debug('check_token: Wrong access value in the token') return False if validate_token(auth) is False: return False # Token is valid return True def check_signature(): pkey = cfg.privileged_key if not pkey: return False headers = flask.request.headers signature = headers.get('X-Signature') if not signature: logger.debug('No X-Signature header in request') return False sig = parse_content_signature(signature) logger.debug('Parsed signature: {}'.format(sig)) sigdata = base64.b64decode(sig['data']) header_keys = sorted([ x for x in headers.iterkeys() if x.startswith('X-Docker') ]) message = ','.join([flask.request.method, flask.request.path] + ['{}:{}'.format(k, headers[k]) for k in header_keys]) logger.debug('Signed message: {}'.format(message)) try: return pkey.verify(message_digest(message), sigdata, 'sha1') == 1 except RSA.RSAError as e: logger.exception(e) return False def parse_content_signature(s): lst = [x.strip().split('=', 1) for x in s.split(';')] ret = {} for k, v in lst: ret[k] = v return ret def message_digest(s): m = hashlib.new('sha1') m.update(s) return m.digest() def requires_auth(f): @functools.wraps(f) def wrapper(*args, **kwargs): if check_signature() is True or check_token(kwargs) is True: return f(*args, **kwargs) headers = {'WWW-Authenticate': 'Token'} return api_error('Requires authorization', 401, headers) return wrapper def api_error(message, code=400, headers=None): logger.debug('api_error: {0}'.format(message)) return response({'error': message}, code, headers) def gen_random_string(length=16): return ''.join([random.choice(string.ascii_uppercase + string.digits) for x in range(length)]) def parse_repository_name(f): @functools.wraps(f) def wrapper(repository, *args, **kwargs): parts = repository.rstrip('/').split('/', 1) if len(parts) < 2: namespace = 'library' repository = parts[0] else: (namespace, repository) = parts repository = urllib.quote_plus(repository) return f(namespace=namespace, repository=repository, *args, **kwargs) return wrapper def exclusive_lock(f): @functools.wraps(f) def wrapper(*args, **kwargs): lock_path = os.path.join( './', 'registry.{0}.lock'.format(f.func_name) ) if os.path.exists(lock_path): x = 0 while os.path.exists(lock_path) and x < 100: logger.warn('Another process is creating the search database') x += 1 time.sleep(1) if x == 100: raise Exception('Timedout waiting for db init') return lock_file = open(lock_path, 'w') lock_file.close() try: result = f(*args, **kwargs) finally: os.remove(lock_path) return result return wrapper def get_repository(): auth = flask.request.headers.get('authorization', '') if not auth: return auth = dict(_re_authorization.findall(auth)) repository = auth.get('repository') if repository is None: return ('', '') parts = repository.rstrip('/').split('/', 1) if len(parts) < 2: return ('library', parts[0]) return (parts[0], parts[1]) def get_endpoints(overcfg=None): registry_endpoints = (overcfg or cfg).registry_endpoints if not registry_endpoints: # registry_endpoints = socket.gethostname() registry_endpoints = flask.request.environ['HTTP_HOST'] return registry_endpoints
#!/usr/bin/env python # Copyright 2015-2016 Yelp Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from kubernetes.client import V1Deployment from kubernetes.client import V1StatefulSet from pytest import fixture from pytest import raises from paasta_tools.cleanup_kubernetes_jobs import cleanup_unused_apps from paasta_tools.cleanup_kubernetes_jobs import DontKillEverythingError from paasta_tools.cleanup_kubernetes_jobs import main from paasta_tools.kubernetes.application.controller_wrappers import DeploymentWrapper @fixture def fake_deployment(): fake_deployment = V1Deployment( metadata=mock.Mock( namespace="paasta", labels={ "yelp.com/paasta_service": "service", "yelp.com/paasta_instance": "instance-1", "yelp.com/paasta_git_sha": "1234", "yelp.com/paasta_config_sha": "1234", "paasta.yelp.com/service": "service", "paasta.yelp.com/instance": "instance-1", "paasta.yelp.com/git_sha": "1234", "paasta.yelp.com/config_sha": "1234", }, ), spec=mock.Mock(replicas=0), ) type(fake_deployment.metadata).name = mock.PropertyMock( return_value="service-instance-1" ) return fake_deployment @fixture def fake_stateful_set(): fake_stateful_set = V1StatefulSet( metadata=mock.Mock( namespace="paasta", labels={ "yelp.com/paasta_service": "service", "yelp.com/paasta_instance": "instance-2", "yelp.com/paasta_git_sha": "1234", "yelp.com/paasta_config_sha": "1234", "paasta.yelp.com/service": "service", "paasta.yelp.com/instance": "instance-2", "paasta.yelp.com/git_sha": "1234", "paasta.yelp.com/config_sha": "1234", }, ), spec=mock.Mock(replicas=0), ) type(fake_stateful_set.metadata).name = ( mock.PropertyMock(return_value="service-instance-2"), ) return fake_stateful_set @fixture def invalid_app(): invalid_app = V1Deployment( metadata=mock.Mock(namespace="paasta", labels={}), spec=mock.Mock(replicas=0) ) type(invalid_app.metadata).name = (mock.PropertyMock(return_value="invalid_app"),) return invalid_app def test_main(fake_deployment, fake_stateful_set, invalid_app): soa_dir = "paasta_maaaachine" with mock.patch( "paasta_tools.cleanup_kubernetes_jobs.cleanup_unused_apps", autospec=True ) as cleanup_patch: main(("--soa-dir", soa_dir)) cleanup_patch.assert_called_once_with(soa_dir, kill_threshold=0.5, force=False) def test_list_apps(fake_deployment, fake_stateful_set, invalid_app): mock_kube_client = mock.MagicMock() with mock.patch( "paasta_tools.cleanup_kubernetes_jobs.KubeClient", return_value=mock_kube_client, autospec=True, ), mock.patch( "paasta_tools.cleanup_kubernetes_jobs.get_services_for_cluster", return_value={}, autospec=True, ), mock.patch( "paasta_tools.cleanup_kubernetes_jobs.alert_state_change", autospec=True ) as mock_alert_state_change: mock_alert_state_change.__enter__ = mock.Mock(return_value=(mock.Mock(), None)) mock_alert_state_change.__exit__ = mock.Mock(return_value=None) cleanup_unused_apps("soa_dir", kill_threshold=1, force=False) assert mock_kube_client.deployments.list_namespaced_deployment.call_count == 1 assert mock_kube_client.deployments.list_namespaced_stateful_set.call_count == 1 def test_cleanup_unused_apps(fake_deployment, fake_stateful_set, invalid_app): mock_kube_client = mock.MagicMock() with mock.patch( "paasta_tools.cleanup_kubernetes_jobs.KubeClient", return_value=mock_kube_client, autospec=True, ), mock.patch( "paasta_tools.cleanup_kubernetes_jobs.list_namespaced_applications", return_value=[DeploymentWrapper(fake_deployment)], autospec=True, ), mock.patch( "paasta_tools.cleanup_kubernetes_jobs.get_services_for_cluster", return_value={}, autospec=True, ), mock.patch( "paasta_tools.cleanup_kubernetes_jobs.alert_state_change", autospec=True ) as mock_alert_state_change: mock_alert_state_change.__enter__ = mock.Mock(return_value=(mock.Mock(), None)) mock_alert_state_change.__exit__ = mock.Mock(return_value=None) cleanup_unused_apps("soa_dir", kill_threshold=1, force=False) assert mock_kube_client.deployments.delete_namespaced_deployment.call_count == 1 def test_cleanup_unused_apps_does_not_delete( fake_deployment, fake_stateful_set, invalid_app ): mock_kube_client = mock.MagicMock() with mock.patch( "paasta_tools.cleanup_kubernetes_jobs.KubeClient", return_value=mock_kube_client, autospec=True, ), mock.patch( "paasta_tools.cleanup_kubernetes_jobs.list_namespaced_applications", return_value=[DeploymentWrapper(fake_deployment)], autospec=True, ), mock.patch( "paasta_tools.cleanup_kubernetes_jobs.get_services_for_cluster", return_value={("service", "instance-1"), ("service", "instance-2")}, autospec=True, ), mock.patch( "paasta_tools.cleanup_kubernetes_jobs.alert_state_change", autospec=True ) as mock_alert_state_change: mock_alert_state_change.__enter__ = mock.Mock(return_value=(mock.Mock(), None)) mock_alert_state_change.__exit__ = mock.Mock(return_value=None) cleanup_unused_apps("soa_dir", kill_threshold=1, force=False) assert mock_kube_client.deployments.delete_namespaced_deployment.call_count == 0 def test_cleanup_unused_apps_dont_kill_everything( fake_deployment, fake_stateful_set, invalid_app ): mock_kube_client = mock.MagicMock() with mock.patch( "paasta_tools.cleanup_kubernetes_jobs.KubeClient", return_value=mock_kube_client, autospec=True, ), mock.patch( "paasta_tools.cleanup_kubernetes_jobs.list_namespaced_applications", return_value=[DeploymentWrapper(fake_deployment)], autospec=True, ), mock.patch( "paasta_tools.cleanup_kubernetes_jobs.get_services_for_cluster", return_value={}, autospec=True, ), mock.patch( "paasta_tools.cleanup_kubernetes_jobs.alert_state_change", autospec=True ) as mock_alert_state_change: mock_alert_state_change.__enter__ = mock.Mock(return_value=(mock.Mock(), None)) mock_alert_state_change.__exit__ = mock.Mock(return_value=None) with raises(DontKillEverythingError): cleanup_unused_apps("soa_dir", kill_threshold=0, force=False) assert mock_kube_client.deployments.delete_namespaced_deployment.call_count == 0 def test_cleanup_unused_apps_force(fake_deployment, fake_stateful_set, invalid_app): mock_kube_client = mock.MagicMock() with mock.patch( "paasta_tools.cleanup_kubernetes_jobs.KubeClient", return_value=mock_kube_client, autospec=True, ), mock.patch( "paasta_tools.cleanup_kubernetes_jobs.list_namespaced_applications", return_value=[DeploymentWrapper(fake_deployment)], autospec=True, ), mock.patch( "paasta_tools.cleanup_kubernetes_jobs.get_services_for_cluster", return_value={}, autospec=True, ), mock.patch( "paasta_tools.cleanup_kubernetes_jobs.alert_state_change", autospec=True ) as mock_alert_state_change: mock_alert_state_change.__enter__ = mock.Mock(return_value=(mock.Mock(), None)) mock_alert_state_change.__exit__ = mock.Mock(return_value=None) cleanup_unused_apps("soa_dir", kill_threshold=0, force=True) assert mock_kube_client.deployments.delete_namespaced_deployment.call_count == 1 def test_cleanup_unused_apps_ignore_invalid_apps( fake_deployment, fake_stateful_set, invalid_app ): mock_kube_client = mock.MagicMock() with mock.patch( "paasta_tools.cleanup_kubernetes_jobs.KubeClient", return_value=mock_kube_client, autospec=True, ), mock.patch( "paasta_tools.cleanup_kubernetes_jobs.get_services_for_cluster", return_value={}, autospec=True, ), mock.patch( "paasta_tools.cleanup_kubernetes_jobs.alert_state_change", autospec=True ) as mock_alert_state_change: mock_alert_state_change.__enter__ = mock.Mock(return_value=(mock.Mock(), None)) mock_alert_state_change.__exit__ = mock.Mock(return_value=None) mock_kube_client.deployments.list_namespaced_deployment.return_value = mock.MagicMock( items=[invalid_app] ) cleanup_unused_apps("soa_dir", kill_threshold=0, force=True) assert mock_kube_client.deployments.delete_namespaced_deployment.call_count == 0
# -*- coding: utf-8 -*- from __future__ import absolute_import from celery import task from datetime import datetime from hashlib import md5 try: from urlparse import urlparse except ImportError: from urllib.parse import urlparse import feedparser import time import mimetypes from bs4 import BeautifulSoup from django.conf import settings from django.contrib.sites.models import Site from django.contrib.auth import get_user_model from tagging.models import Tag from planet.models import (Blog, Generator, Feed, FeedLink, Post, PostLink, Author, PostAuthorData, Enclosure, Category) from planet.signals import feeds_updated from planet.signals import post_created class PostAlreadyExists(Exception): pass @task def process_feed(feed_url, owner_id=None, create=False, category_title=None): """ Stores a feed, its related data, its entries and their related data. If create=True then it creates the feed, otherwise it only stores new entries and their related data. """ print("[process_feed] URL={}".format(feed_url)) def normalize_tag(tag): """ converts things like "-noise-" to "noise" and "- noise -" to "noise" """ if tag.startswith("-"): tag = tag[1:] if tag.endswith("-"): tag = tag[:-1] # fix for HTML entities tag = BeautifulSoup(tag).prettify(formatter="html") tag = tag.strip().lower() return tag try: USER_AGENT = settings.PLANET["USER_AGENT"] except (KeyError, AttributeError): print( """Please set PLANET = {" USER_AGENT": <string>} in your settings.py""") exit(0) feed_url = str(feed_url).strip() try: planet_feed = Feed.objects.get(url=feed_url) except Feed.DoesNotExist: planet_feed = None print("*" * 20) print("Feed: {}".format(feed_url)) if create and planet_feed: # can't create it due to it already exists print("This feed already exists!") exit(0) if not create and not planet_feed: # can't update it due to it does not exist print("This feed does not exist!") exit(0) # retrieve and parse feed using conditional GET method if not create: modified = datetime.timetuple(planet_feed.last_modified) etag = planet_feed.etag # update last checked datetime planet_feed.last_checked = datetime.now() planet_feed.save() else: modified = etag = None document = feedparser.parse(feed_url, agent=USER_AGENT, modified=modified, etag=etag) current_site = Site.objects.get(pk=settings.SITE_ID) if create: # then create blog, feed, generator, feed links and feed tags title = document.feed.get("title", "--") subtitle = document.feed.get("subtitle") blog_url = document.feed.get("link") rights = document.feed.get("rights") or document.feed.get("license") info = document.feed.get("info") try: guid = unicode(md5(document.feed.get("link")).hexdigest()) except NameError: guid = md5(document.feed.get("link").encode('utf-8')).hexdigest() image_url = document.feed.get("image", {}).get("href") icon_url = document.feed.get("icon") language = document.feed.get("language") etag = document.get("etag", '') updated_parsed = document.get("updated_parsed") if updated_parsed: last_modified = datetime.fromtimestamp(time.mktime(updated_parsed)) else: last_modified = datetime.now() feed_links = document.feed.get("links", []) if not blog_url: link = [item for item in feed_links if item["rel"] == "alternate"] if link: blog_url = link[0]["href"] User = get_user_model() try: owner = User.objects.get(pk=owner_id) except User.DoesNotExist: owner = None blog, created = Blog.objects.get_or_create( url=blog_url, defaults={"title": title}, owner=owner) generator_dict = document.feed.get("generator_detail", {}) if generator_dict: generator, created = Generator.objects.get_or_create( name=generator_dict.get("name", "--"), link=generator_dict.get("link"), version=generator_dict.get("version")) else: generator = None if category_title: # TODO: site_objects! category = Category.objects.get(title=category_title) else: category = None planet_feed = Feed(title=title, subtitle=subtitle, blog=blog, url=feed_url, rights=rights, info=info, guid=guid, image_url=image_url, icon_url=icon_url, language=language, etag=etag, last_modified=last_modified, generator=generator, is_active=True, last_checked=datetime.now(), site=current_site, category=category ) planet_feed.save() for tag_dict in document.feed.get("tags", []): name = tag_dict.get("term") for link_dict in feed_links: feed_link, created = FeedLink.objects.get_or_create( feed=planet_feed, rel=link_dict.get("rel", "--"), mime_type=link_dict.get("type", "text/html"), link=link_dict.get("href", blog_url) ) entries = [] total_results = int( document.feed.get("opensearch_totalresults", len(document.entries))) items_per_page = int(document.feed.get("opensearch_itemsperpage", 25)) new_posts_count = 0 if total_results == 0: print("No entries to store. status: {} {}".format( document.get("status"), document.get("debug_message"))) else: print("Entries total count: {}".format(total_results)) stop_retrieving = False while (total_results > len(entries)) and not stop_retrieving: # retrieve and store feed posts entries.extend(document.entries) print("Processing {} entries".format(len(document.entries))) for entry in document.entries: title = entry.get("title", "") url = entry.get("link") try: guid = unicode(md5(entry.get("link")).hexdigest()) except NameError: guid = md5(entry.get("link").encode('utf-8')).hexdigest() content = entry.get('description') or entry.get( "content", [{"value": ""}])[0]["value"] comments_url = entry.get("comments") date_modified = entry.get("updated_parsed") or\ entry.get("published_parsed") try: date_modified = datetime.fromtimestamp( time.mktime(date_modified)) except Exception: date_modified = planet_feed.last_modified or datetime.now() try: if len(Post.objects.filter(url=url, guid=guid)): raise PostAlreadyExists post = Post(title=title, url=url, guid=guid, content=content, comments_url=comments_url, date_modified=date_modified, feed=planet_feed) # To have the feed entry in the pre_save signal post.entry = entry post.save() except PostAlreadyExists: print("Skipping post {} ({}) because already exists" .format(guid, url)) if not create: # if it is in update-mode then stop retrieving when # it finds repeated posts stop_retrieving = True else: new_posts_count += 1 # create post tags... for tag_dict in entry.get("tags", []): tag_name = tag_dict.get( "term") or tag_dict.get("label") tag_name = normalize_tag(tag_name) if len(tag_name) > 50: continue try: if "/" in tag_name: # For path based categories for subtag in tag_name.split("/"): if subtag: # empty string if starts/ends with # slash Tag.objects.add_tag( post, '"%s"' % subtag) else: Tag.objects.add_tag(post, '"%s"' % tag_name) except AttributeError as e: print("Ignoring tag error: {}".format(e)) # create post links... for link_dict in entry.get("links", []): post_link, created = PostLink.objects.get_or_create( post=post, rel=link_dict.get("rel", "--"), mime_type=link_dict.get("type", "text/html"), link=link_dict.get("href", "--"), title=link_dict.get("title", "--") ) # create and store enclosures... if entry.get('media_thumbnail', False): try: media_url = entry.get('media_thumbnail').href media_list = [{"url": media_url}] except AttributeError: media_list = entry.get( 'media_thumbnail', [{"url": None}]) for media in media_list: media_url = media["url"] mime_type, enc = mimetypes.guess_type( urlparse(media_url).path) post_enclosure, created = Enclosure.objects.get_or_create( post=post, length=0, mime_type=mime_type, link=media_url ) for enclosure_dict in entry.get("enclosures", []): post_enclosure = Enclosure( post=post, length=enclosure_dict.get("length", 0), mime_type=enclosure_dict.get("type", ""), link=enclosure_dict.get("href") ) post_enclosure.save() # create and store author... author_dict = entry.get("author_detail") if author_dict: author, created = Author.objects.get_or_create( name=author_dict.get("name", ""), email=author_dict.get("email", ""), profile_url=author_dict.get("href") ) try: PostAuthorData.objects.get( author=author, post=post) except PostAuthorData.DoesNotExist: pad = PostAuthorData(author=author, post=post) pad.save() # create and store contributors... for contributor_dict in entry.get("contributors", []): contributor, created = Author.objects.get_or_create( name=author_dict.get("name", ""), email=author_dict.get("email", ""), profile_url=contributor_dict.get("href") ) try: PostAuthorData.objects.get( author=contributor, post=post) except PostAuthorData.DoesNotExist: pad = PostAuthorData(author=contributor, post=post, is_contributor=True) pad.save() # We send a post_created signal print('post_created.send(sender=post)', post) post_created.send(sender=post, instance=post) if not stop_retrieving: opensearch_url = "{}?start-index={}&max-results={}".format( feed_url, len(entries) + 1, items_per_page) print("retrieving {}...".format(opensearch_url)) document = feedparser.parse(opensearch_url, agent=USER_AGENT) if new_posts_count: # update last modified datetime planet_feed.last_modified = datetime.now() planet_feed.save() print("{} posts were created. Done.".format(new_posts_count)) return new_posts_count @task(ignore_results=True) def update_feeds(): """ Task for running on celery beat! CELERYBEAT_SCHEDULE = { 'update_feeds': { 'task': 'planet.tasks.update_feeds', 'schedule': timedelta(hours=1) } } """ for feed_url in Feed.site_objects.all().values_list("url", flat=True): print("Scheduling feed URL={}...".format(feed_url)) process_feed.delay(feed_url, create=False) print("Done!") feeds_updated.send(sender=None, instance=None)
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import collections import numpy as np from ..... import compat as cpt from .... import core from ....framework import IrGraph from ....framework import IrNode from .... import unique_name __all__ = [ 'QuantizationTransformPass', 'QuantizationFreezePass', 'ConvertToInt8Pass', 'TransformForMobilePass' ] def _init_var_node(var_node, value, scope, place): assert isinstance(value, np.ndarray), 'The type of value should be numpy array.' assert scope is not None, \ 'The scope cannot be set None.' assert place is not None, \ 'The place cannot be set None.' tensor = scope.var(var_node.name()).get_tensor() tensor.set(value, place) class QuantizationTransformPass(object): def __init__(self, scope=None, place=None, weight_bits=8, activation_bits=8, activation_quantize_type='abs_max', weight_quantize_type='abs_max', window_size=10000, moving_rate=0.9): """ Convert and rewrite the IrGraph according to weight and activation quantization type. Args: scope(fluid.Scope): When activation use 'range_abs_max' as the quantize type, this pass will create some new parameters. The scope is used to initialize these new parameters. place(fluid.CPUPlace|fluid.CUDAPlace): place is used to initialize new parameters described above. weight_bits (int): quantization bit number for weights, the bias is not quantized. activation_bits (int): quantization bit number for activation. activation_quantize_type (str): quantization type for activation, now support 'abs_max', 'range_abs_max' and 'moving_average_abs_max'. If use 'abs_max' mode, the quantization scale will be calculated dynamically each step in both training and testing period. If use 'range_abs_max', a static quantization scale will be calculated during training and used in inference. weight_quantize_type (str): quantization type for weights, support 'abs_max' and 'channel_wise_abs_max'. The 'range_abs_max' usually is not used for weight, since weights are fixed once the model is well trained. window_size (int): the window size for 'range_abs_max' quantization. Examples: .. code-block:: python # The original graph will be rewrite. import paddle.fluid as fluid from paddle.fluid.contrib.slim.quantization \ import QuantizationTransformPass from paddle.fluid.contrib.slim.graph import IrGraph from paddle.fluid import core graph = IrGraph(core.Graph(program.desc), for_test=False) place = fluid.CPUPlace() transform_pass = QuantizationTransformPass(fluid.global_scope(), place) transform_pass.apply(graph) """ self._scope = scope self._place = place self._weight_bits = weight_bits self._activation_bits = activation_bits quant_type = [ 'abs_max', 'channel_wise_abs_max', 'range_abs_max', 'moving_average_abs_max' ] assert activation_quantize_type != 'channel_wise_abs_max', "The activation quantization type does not support 'channel_wise_abs_max'." if activation_quantize_type not in quant_type: raise ValueError( "Unknown activation_quantize_type : '%s'. It can only be " "'abs_max' or 'range_abs_max' or 'moving_average_abs_max'." % (str(activation_quantize_type))) if weight_quantize_type not in quant_type: raise ValueError( "Unknown weight_quantize_type: '%s'. It can only be " "'abs_max' or 'channel_wise_abs_max' or 'range_abs_max' or 'moving_average_abs_max'." % (str(weight_quantize_type))) self._activation_quantize_type = activation_quantize_type self._weight_quantize_type = weight_quantize_type self._window_size = window_size self._moving_rate = moving_rate self._quantizable_ops = ['conv2d', 'depthwise_conv2d', 'mul'] self._conv_ops = ['conv2d', 'depthwise_conv2d'] self._quantizable_grad_ops = [ '%s_grad' % (op) for op in self._quantizable_ops ] self._is_test = None self._global_step = None def apply(self, graph): """ Quantize the graph for training process. According to weight and activation quantization type, the graph will be added some fake quantize operators and fake dequantize operators. Args: graph(IrGraph): the applied graph. """ assert isinstance(graph, IrGraph), 'graph must be the instance of IrGraph.' self._is_test = graph.is_test() # marked the variable which has been dequantized. dequantized_vars = collections.OrderedDict() persistable_vars = [p.name() for p in graph.all_persistable_nodes()] def _transform_forward(graph, op): for var_node in op.inputs: if var_node.name() not in op.input_arg_names(): continue if var_node.name() in dequantized_vars: dequant_var_node = dequantized_vars[var_node.name()] else: quant_bits = self._weight_bits if var_node.name() in persistable_vars \ else self._activation_bits quant_type = self._weight_quantize_type if var_node.name() \ in persistable_vars else self._activation_quantize_type if quant_type == 'channel_wise_abs_max': assert var_node.name( ) in persistable_vars, "'channel_wise_abs_max' can only be applied on weights." if op.name() in self._conv_ops: quant_var_node, scale_var_node = self._insert_channel_quant_op( graph, var_node, quant_bits) dequant_var_node = self._insert_channel_dequant_op( graph, quant_var_node, [scale_var_node], [quant_bits]) else: quant_var_node, scale_var_node = self._insert_quant_op( graph, var_node, quant_bits, 'abs_max') dequant_var_node = self._insert_dequant_op( graph, quant_var_node, scale_var_node, quant_bits) else: quant_var_node, scale_var_node = self._insert_quant_op( graph, var_node, quant_bits, quant_type) dequant_var_node = self._insert_dequant_op( graph, quant_var_node, scale_var_node, quant_bits) dequantized_vars[var_node.name()] = dequant_var_node graph.update_input_link(var_node, dequant_var_node, op) def _transform_backward(graph, op): no_dequanted_input_vars = True for var_node in op.inputs: if var_node.name() not in op.input_arg_names(): continue if var_node.name() in dequantized_vars: dequant_var_node = dequantized_vars[var_node.name()] graph.update_input_link(var_node, dequant_var_node, op) no_dequanted_input_vars = False if no_dequanted_input_vars: raise ValueError("There is no dequanted inputs for op %s." % (op.name())) if not self._is_test: self._create_global_step(graph) ops = graph.all_op_nodes() # The process of _transform_forward and _transform_backward is needed in two for loops. # The loop for transforming the forward graph: for op in ops: if op.name() in self._quantizable_ops: _transform_forward(graph, op) # The loop for renaming the inputs of backward op. for op in ops: if op.name() in self._quantizable_grad_ops: _transform_backward(graph, op) graph.resolve_hazard() return graph def _create_global_step(self, graph): if self._weight_quantize_type == 'range_abs_max' or \ self._activation_quantize_type == 'range_abs_max': counter_name = cpt.to_text('@STEP_COUNTER@') for node in graph.all_var_nodes(): if node.name() == counter_name: self._global_step = node if self._global_step is None: global_step_in = graph.create_persistable_node( name=counter_name, var_type=core.VarDesc.VarType.LOD_TENSOR, shape=[1], var_dtype=core.VarDesc.VarType.INT64) _init_var_node( global_step_in, np.zeros( [1], dtype='int64'), self._scope, self._place) global_step_out = graph.create_var_node_from_desc( global_step_in.var()) # The attribute of `op_role` is needed by ParallelExecutor. increment_op = graph.create_op_node( op_type='increment', attrs={ 'step': 1.0, 'op_role': core.op_proto_and_checker_maker.OpRole.Forward }, inputs={'X': global_step_in}, outputs={'Out': global_step_out}) graph.link_to(global_step_in, increment_op) graph.link_to(increment_op, global_step_out) self._global_step = global_step_out def _insert_quant_op(self, graph, var_node, quant_bits, quant_type): """ Insert fake_quantize_op in the graph. """ if quant_type == 'abs_max': return self._insert_quant_abs_max_op(graph, var_node, quant_bits) elif quant_type == 'range_abs_max': return self._insert_quant_range_abs_max_op(graph, var_node, quant_bits) elif quant_type == 'moving_average_abs_max': return self._insert_quant_moving_average_abs_max_op(graph, var_node, quant_bits) def _insert_quant_abs_max_op(self, graph, var_node, quant_bits): """ Insert fake_quantize_abs_max op in the graph. """ assert var_node.is_var(), '{} is not a var'.format(var_node.name()) quant_var_node = graph.create_var_node( name=self._quantized_var_name(var_node.name()), var_type=var_node.type(), shape=var_node.shape(), var_dtype=var_node.dtype()) scale_var_node = graph.create_var_node( name=self._quantized_scale_name(var_node.name()), var_type=var_node.type(), shape=[1], var_dtype=var_node.dtype()) quant_op_node = graph.create_op_node( op_type='fake_quantize_abs_max', attrs={ 'bit_length': quant_bits, 'op_role': core.op_proto_and_checker_maker.OpRole.Forward }, inputs={'X': var_node}, outputs={'Out': quant_var_node, 'OutScale': scale_var_node}) graph.link_to(var_node, quant_op_node) graph.link_to(quant_op_node, quant_var_node) graph.link_to(quant_op_node, scale_var_node) return quant_var_node, scale_var_node def _insert_quant_range_abs_max_op(self, graph, var_node, quant_bits): """ Insert fake_quantize_range_abs_max on the graph. """ assert var_node.is_var(), '{} is not a var'.format(var_node.name()) quant_var_node = graph.create_var_node( name=self._quantized_var_name(var_node.name()), var_type=var_node.type(), shape=var_node.shape(), var_dtype=var_node.dtype()) scale_in_node = graph.create_persistable_node( name=self._quantized_scale_name(var_node.name()), var_type=core.VarDesc.VarType.LOD_TENSOR, shape=[1], var_dtype=var_node.dtype()) data_type = 'float64' if var_node.dtype( ) == core.VarDesc.VarType.FP64 else 'float32' _init_var_node( scale_in_node, np.array( [0.001], dtype=data_type), self._scope, self._place) scale_out_node = graph.create_var_node_from_desc(scale_in_node.var()) inputs = {'X': var_node, 'InScale': scale_in_node} outputs = {'Out': quant_var_node, 'OutScale': scale_out_node} if not self._is_test: # The name of scales_var_node maybe 'scales_0', 'scales_1', etc. scales_node = graph.create_persistable_node( name=unique_name.generate('scales'), var_type=core.VarDesc.VarType.LOD_TENSOR, shape=[self._window_size], var_dtype=var_node.dtype()) data_type = 'float64' if var_node.dtype( ) == core.VarDesc.VarType.FP64 else 'float32' _init_var_node( scales_node, np.zeros( [self._window_size], dtype=data_type), self._scope, self._place) inputs['Iter'] = self._global_step outputs['OutScales'] = scales_node attrs = { 'window_size': self._window_size, 'bit_length': quant_bits, 'is_test': self._is_test, 'op_role': core.op_proto_and_checker_maker.OpRole.Forward } quant_op_node = graph.create_op_node( op_type='fake_quantize_range_abs_max', attrs=attrs, inputs=inputs, outputs=outputs) graph.link_to(var_node, quant_op_node) graph.link_to(scale_in_node, quant_op_node) graph.link_to(quant_op_node, quant_var_node) graph.link_to(quant_op_node, scale_out_node) if not self._is_test: graph.link_to(self._global_step, quant_op_node) graph.link_to(quant_op_node, scales_node) return quant_var_node, scale_out_node def _insert_quant_moving_average_abs_max_op(self, graph, var_node, quant_bits): """Insert fake_quantize_moving_average_abs_max """ quant_var_node = graph.create_var_node( name=self._quantized_var_name(var_node.name()), var_type=var_node.type(), shape=var_node.shape(), var_dtype=var_node.dtype()) scale_in_node = graph.create_persistable_node( name=self._quantized_scale_name(var_node.name()), var_type=core.VarDesc.VarType.LOD_TENSOR, shape=[1], var_dtype=var_node.dtype()) data_type = 'float64' if var_node.dtype( ) == core.VarDesc.VarType.FP64 else 'float32' _init_var_node( scale_in_node, np.array( [0.001], dtype=data_type), self._scope, self._place) scale_out_node = graph.create_var_node_from_desc(scale_in_node.var()) ins = {'X': var_node, 'InScale': scale_in_node} outs = {'Out': quant_var_node, 'OutScale': scale_out_node} if not self._is_test: state_in_node = graph.create_persistable_node( name=unique_name.generate('state'), var_type=core.VarDesc.VarType.LOD_TENSOR, var_dtype=var_node.dtype(), shape=[1]) data_type = 'float64' if var_node.dtype( ) == core.VarDesc.VarType.FP64 else 'float32' _init_var_node( scale_in_node, np.ones( [1], dtype=data_type), self._scope, self._place) accum_in_node = graph.create_persistable_node( name=unique_name.generate('accum'), var_type=core.VarDesc.VarType.LOD_TENSOR, var_dtype=var_node.dtype(), shape=[1]) _init_var_node( accum_in_node, np.ones( [1], dtype=data_type), self._scope, self._place) state_out_node = graph.create_var_node_from_desc(state_in_node.var( )) accum_out_node = graph.create_var_node_from_desc(accum_in_node.var( )) ins['InState'] = state_in_node ins['InAccum'] = accum_in_node outs['OutState'] = state_out_node outs['OutAccum'] = accum_out_node attrs = { 'bit_length': quant_bits, 'moving_rate': self._moving_rate, 'is_test': self._is_test, 'op_role': core.op_proto_and_checker_maker.OpRole.Forward } quant_op_node = graph.create_op_node( op_type='fake_quantize_moving_average_abs_max', attrs=attrs, inputs=ins, outputs=outs) graph.link_to(var_node, quant_op_node) graph.link_to(scale_in_node, quant_op_node) graph.link_to(quant_op_node, quant_var_node) graph.link_to(quant_op_node, scale_out_node) if not self._is_test: graph.link_to(state_in_node, quant_op_node) graph.link_to(accum_in_node, quant_op_node) graph.link_to(quant_op_node, state_out_node) graph.link_to(quant_op_node, accum_out_node) return quant_var_node, scale_out_node def _insert_channel_quant_op(self, graph, var_node, quant_bits): """ Insert fake_channel_wise_quantize_abs_max op in the graph. """ assert var_node.is_var(), '{} is not a var'.format(var_node.name()) quant_var_node = graph.create_var_node( name=self._quantized_var_name(var_node.name()), var_type=var_node.type(), shape=var_node.shape(), var_dtype=var_node.dtype()) scale_var_node = graph.create_var_node( name=self._quantized_scale_name(var_node.name()), var_type=var_node.type(), shape=[var_node.shape()[0]], var_dtype=var_node.dtype()) quant_op_node = graph.create_op_node( op_type='fake_channel_wise_quantize_abs_max', attrs={ 'bit_length': quant_bits, 'op_role': core.op_proto_and_checker_maker.OpRole.Forward }, inputs={'X': var_node}, outputs={'Out': quant_var_node, 'OutScale': scale_var_node}) graph.link_to(var_node, quant_op_node) graph.link_to(quant_op_node, quant_var_node) graph.link_to(quant_op_node, scale_var_node) return quant_var_node, scale_var_node def _insert_dequant_op(self, graph, var_node, scale_var_node, quant_bits): """ Insert fake_dequantize_op in the graph. """ assert var_node.is_var(), '{} is not a var'.format(var_node.name()) dequant_var_node = graph.create_var_node( name=self._dequantized_var_name(var_node.name()), var_type=var_node.type(), shape=var_node.shape(), var_dtype=var_node.dtype()) max_range = (1 << (quant_bits - 1)) - 1 dequant_op_node = graph.create_op_node( op_type='fake_dequantize_max_abs', attrs={ 'max_range': float(max_range), 'op_role': core.op_proto_and_checker_maker.OpRole.Forward }, inputs={'X': var_node, 'Scale': scale_var_node}, outputs={'Out': dequant_var_node}) graph.link_to(var_node, dequant_op_node) graph.link_to(scale_var_node, dequant_op_node) graph.link_to(dequant_op_node, dequant_var_node) return dequant_var_node def _insert_channel_dequant_op(self, graph, var_node, scale_var_nodes, quant_bits): """ Insert fake_channel_wise_dequantize_max_abs in the graph. """ assert var_node.is_var(), '{} is not a var'.format(var_node.name()) dequant_var_node = graph.create_var_node( name=self._dequantized_var_name(var_node.name()), var_type=var_node.type(), shape=var_node.shape(), var_dtype=var_node.dtype()) dequant_op_node = graph.create_op_node( op_type='fake_channel_wise_dequantize_max_abs', attrs={ 'quant_bits': quant_bits, 'op_role': core.op_proto_and_checker_maker.OpRole.Forward }, inputs={'X': var_node, 'Scales': scale_var_nodes}, outputs={'Out': dequant_var_node}) graph.link_to(var_node, dequant_op_node) for scale_n in scale_var_nodes: graph.link_to(scale_n, dequant_op_node) graph.link_to(dequant_op_node, dequant_var_node) return dequant_var_node def _quantized_var_name(self, var_name): """ Return quantized variable name for the input `var_name`. """ return "%s.quantized" % (var_name) def _dequantized_var_name(self, var_name): """ Return dequantized variable name for the input `var_name`. """ return "%s.dequantized" % (var_name) def _quantized_scale_name(self, var_name): """ Return the scale name of quantized variable for the input `var_name`. """ return "%s.scale" % (var_name) class QuantizationFreezePass(object): """ The freeze pass is used to adjust the quantize operator order, for example: 1) `activation -> quant -> dequant -> conv2d` will be freezed into `activation -> quant -> conv2d -> dequant` 2) `weight -> quant -> dequant -> conv2d` will be freezed into `weight -> conv2d`, and weight will be sacled offline. Args: scope(fluid.Scope): scope is used to get the weight tensor values. place(fluid.CPUPlace|fluid.CUDAPlace): place is used to restore the weight tensors. weight_bits (int): quantization bit number for weights. activation_bits (int): quantization bit number for activation. weight_quantize_type (str): quantization type for weights, support 'abs_max' and 'channel_wise_abs_max'. The 'range_abs_max' usually is not used for weight, since weights are fixed once the model is well trained. """ def __init__(self, scope, place, weight_bits=8, activation_bits=8, weight_quantize_type='abs_max'): assert scope is not None, \ 'The scope cannot be set None.' assert place is not None, \ 'The place cannot be set None.' self._scope = scope self._place = place self._weight_bits = weight_bits self._activation_bits = activation_bits self._weight_quantize_type = weight_quantize_type self._quantizable_ops = ['conv2d', 'depthwise_conv2d', 'mul'] self._conv_ops = ['conv2d', 'depthwise_conv2d'] self._fake_quant_op_names = [ 'fake_quantize_abs_max', 'fake_quantize_range_abs_max', 'fake_quantize_moving_average_abs_max', 'fake_channel_wise_quantize_abs_max' ] self._fake_dequant_op_names = [ 'fake_dequantize_max_abs', 'fake_channel_wise_dequantize_max_abs' ] self._op_input_rename_map = collections.OrderedDict() self._op_output_rename_map = collections.OrderedDict() self._var_scale_map = collections.OrderedDict() def apply(self, graph): """ Adjust quantize/dequantize operators order for the inference process. Args: graph(IrGraph): the applied graph. """ persistable_vars = [p.name() for p in graph.all_persistable_nodes()] ops = graph.all_op_nodes() for op_node in ops: op_name = op_node.name() if op_name in self._fake_quant_op_names: input_arg_name = op_node.input('X')[0] if input_arg_name in persistable_vars: if self._weight_quantize_type == 'abs_max': param = self._load_var(input_arg_name) scale_v = np.max(np.abs(param)) elif self._weight_quantize_type == 'channel_wise_abs_max': param = self._load_var(input_arg_name) if len(param.shape) == 4: # conv2d or depthwise_conv2d scale_v = [] for i in range(param.shape[0]): scale_v.append(np.max(np.abs(param[i]))) else: scale_v = np.max(np.abs(param)) else: scale_v = self._load_var( op_node.output('OutScale')[0])[0] self._var_scale_map[input_arg_name] = scale_v self._remove_fake_quant_and_dequant_op(graph, op_node) # quantize weight and restore param_v = self._load_var(input_arg_name) quantized_param_v = self._quant(param_v, scale_v, self._weight_bits) self._restore_var(input_arg_name, quantized_param_v) else: scale_v = graph._find_node_by_name( op_node.outputs, op_node.output('OutScale')[0]) self._var_scale_map[input_arg_name] = scale_v ops = graph.all_op_nodes() for op_node in ops: op_name = op_node.name() if op_name in self._fake_dequant_op_names: self._remove_fake_quant_and_dequant_op(graph, op_node) ops = graph.all_op_nodes() for op_node in ops: op_name = op_node.name() if op_name in self._quantizable_ops: if self._weight_quantize_type == 'channel_wise_abs_max' and op_name in self._conv_ops: self._insert_post_channel_dequant_op(graph, op_node) else: self._insert_post_dequant_op(graph, op_node) for op_node in ops: # insert dequant_op after fc/conv, need to rename inputs of the followed ops for var_node in op_node.inputs: if var_node.node in self._op_output_rename_map: old_in = var_node new_in = self._op_output_rename_map[var_node.node] graph.update_input_link(old_in, new_in, op_node) # remove the unused var node in the graph self._remove_unused_var_nodes(graph) graph.resolve_hazard() return graph def _remove_fake_quant_and_dequant_op(self, graph, op_node): k = graph._find_node_by_name(op_node.outputs, op_node.output('Out')[0]) v = graph._find_node_by_name(op_node.inputs, op_node.input('X')[0]) if v.node not in self._op_input_rename_map: self._op_input_rename_map[k.node] = v else: self._op_input_rename_map[k.node] = self._op_input_rename_map[ v.node] graph.safe_remove_nodes(op_node) def _insert_post_channel_dequant_op(self, graph, op_node): persistable_vars = [p.name() for p in graph.all_persistable_nodes()] for var_node in op_node.inputs: name = var_node.name() if name not in op_node.input_arg_names(): continue if var_node.node in self._op_input_rename_map: old_in = var_node new_in = self._op_input_rename_map[var_node.node] new_in.clear_outputs() graph.update_input_link(old_in, new_in, op_node) original_var_name = self._original_var_name(name) scale_v = self._var_scale_map[original_var_name] if original_var_name in persistable_vars: assert isinstance( scale_v, list), 'The scale of parameter %s is not a list.' % ( original_var_name) channel_scale = np.array(scale_v) else: assert isinstance(scale_v, IrNode) scale_var_node = self._var_scale_map[original_var_name] if len(op_node.output_arg_names()) != 1: raise ValueError("Only support one output, but op %s has" " more than one output." % (op_node.name())) output_var_node = graph._find_node_by_name( op_node.outputs, op_node.output_arg_names()[0]) weight_scale_node = graph.create_persistable_node( name=unique_name.generate('channel_scale'), var_type=core.VarDesc.VarType.LOD_TENSOR, shape=[channel_scale.shape[0]], var_dtype=output_var_node.dtype()) data_type = 'float64' if output_var_node.dtype( ) == core.VarDesc.VarType.FP64 else 'float32' _init_var_node(weight_scale_node, channel_scale.astype(data_type), self._scope, self._place) dequant_var_node = graph.create_var_node( name=self._dequantized_var_name(output_var_node.name()), var_type=output_var_node.type(), shape=output_var_node.shape(), var_dtype=output_var_node.dtype()) dequant_op_node = graph.create_op_node( op_type='fake_channel_wise_dequantize_max_abs', attrs={ 'quant_bits': [self._weight_bits, self._activation_bits], 'op_role': core.op_proto_and_checker_maker.OpRole.Forward }, inputs={ 'X': output_var_node, 'Scales': [weight_scale_node, scale_var_node] }, outputs={'Out': dequant_var_node}) graph.link_to(output_var_node, dequant_op_node) graph.link_to(scale_var_node, dequant_op_node) graph.link_to(weight_scale_node, dequant_op_node) graph.link_to(dequant_op_node, dequant_var_node) self._op_output_rename_map[output_var_node.node] = dequant_var_node return dequant_var_node def _insert_post_dequant_op(self, graph, op_node): persistable_vars = [p.name() for p in graph.all_persistable_nodes()] for var_node in op_node.inputs: name = var_node.name() if name not in op_node.input_arg_names(): continue if var_node.node in self._op_input_rename_map: old_in = var_node new_in = self._op_input_rename_map[var_node.node] new_in.clear_outputs() graph.update_input_link(old_in, new_in, op_node) original_var_name = self._original_var_name(name) scale_v = self._var_scale_map[original_var_name] if original_var_name in persistable_vars: param_range = (1 << (self._weight_bits - 1)) - 1 act_range = (1 << (self._activation_bits - 1)) - 1 assert self._is_float( scale_v), 'The scale of parameter %s is not a float.' % ( original_var_name) max_range = param_range * act_range / scale_v else: assert isinstance(scale_v, IrNode) scale_var_node = self._var_scale_map[original_var_name] if len(op_node.output_arg_names()) != 1: raise ValueError("Only support one output, but op %s has" " more than one output." % (op_node.name())) output_var_node = graph._find_node_by_name( op_node.outputs, op_node.output_arg_names()[0]) dequant_var_node = graph.create_var_node( name=self._dequantized_var_name(output_var_node.name()), var_type=output_var_node.type(), shape=output_var_node.shape(), var_dtype=output_var_node.dtype()) dequant_op_node = graph.create_op_node( op_type='fake_dequantize_max_abs', attrs={ 'max_range': float(max_range), 'op_role': core.op_proto_and_checker_maker.OpRole.Forward }, inputs={'X': output_var_node, 'Scale': scale_var_node}, outputs={'Out': dequant_var_node}) graph.link_to(output_var_node, dequant_op_node) graph.link_to(scale_var_node, dequant_op_node) graph.link_to(dequant_op_node, dequant_var_node) self._op_output_rename_map[output_var_node.node] = dequant_var_node return dequant_var_node def _load_var(self, name): return np.array(self._scope.find_var(name).get_tensor()) def _restore_var(self, name, array): tensor = self._scope.find_var(name).get_tensor() tensor.set(array, self._place) def _remove_unused_var_nodes(self, graph): all_used_vars = set() ops = graph.all_op_nodes() for op_node in ops: for input_node in op_node.inputs: all_used_vars.add(input_node) for output_node in op_node.outputs: all_used_vars.add(output_node) all_used_vars = {n.node for n in all_used_vars} all_unused_vars = { n for n in filter(lambda node: node.node not in all_used_vars, graph.all_var_nodes()) } graph.safe_remove_nodes(all_unused_vars) def _original_var_name(self, var_name): """ Return the original variable name. """ if var_name.endswith('.quantized.dequantized'): return var_name[:-len('.quantized.dequantized')] if var_name.endswith('.quantized'): return var_name[:-len('.quantized')] if var_name.endswith('.dequantized'): return var_name[:-len('.dequantized')] if var_name.endswith('.scale'): return var_name[:-len('.scale')] else: return var_name def _dequantized_var_name(self, var_name): """ Return dequantized variable name for the input `var_name`. """ return "%s.dequantized" % (var_name) def _is_float(self, v): return isinstance(v, float) or isinstance(v, np.float32) \ or isinstance(v, np.float64) def _quant(self, x, scale, num_bits): if isinstance(scale, list): for i, s in enumerate(scale): x[i] = np.round(x[i] / s * ((1 << (num_bits - 1)) - 1)) return x else: return np.round(x / scale * ((1 << (num_bits - 1)) - 1)) class ConvertToInt8Pass(object): """ Convert the weights into int8_t type. Args: scope(fluid.Scope): scope is used to get the weight tensor values. place(fluid.CPUPlace|fluid.CUDAPlace): place is used to restore the 8bits weight tensors. """ def __init__(self, scope, place): assert scope is not None, \ 'The scope cannot be set None.' assert place is not None, \ 'The place cannot be set None.' self._scope = scope self._place = place self._quantizable_ops = ['conv2d', 'depthwise_conv2d', 'mul'] def apply(self, graph): """ Convert weights' tpye of the graph. After that, the data type of the graph weigths is int8_t. Args: graph(IrGraph): the applied graph. """ persistable_vars = [p.name() for p in graph.all_persistable_nodes()] ops = graph.all_op_nodes() input_map = {} for op_node in ops: op_name = op_node.name() if op_name in self._quantizable_ops: for var_node in op_node.inputs: name = var_node.name() if name in persistable_vars: if name not in input_map: int8_var_node = self._convert_to_int8(graph, var_node) input_map[name] = int8_var_node graph.update_input_link(var_node, input_map[name], op_node) # remove the unused var node in the graph self._remove_unused_var_nodes(graph) graph.resolve_hazard() return graph def _convert_to_int8(self, graph, var_node): int8_var_node_name = var_node.name() + ".int8" int8_var_node = graph.create_persistable_node( name=cpt.to_text(int8_var_node_name), var_type=var_node.type(), shape=var_node.shape(), var_dtype=core.VarDesc.VarType.INT8) array = self._load_var(var_node.name()) self._scope.var(int8_var_node_name) self._store_var(int8_var_node_name, array, np.int8) return int8_var_node def _load_var(self, name): return np.array(self._scope.find_var(name).get_tensor()) def _store_var(self, name, array, dtype): tensor = self._scope.find_var(name).get_tensor() tensor.set(array.astype(dtype), self._place) def _remove_unused_var_nodes(self, graph): all_used_vars = set() ops = graph.all_op_nodes() for op_node in ops: for input_node in op_node.inputs: all_used_vars.add(input_node) for output_node in op_node.outputs: all_used_vars.add(output_node) all_used_vars = {n.node for n in all_used_vars} all_unused_vars = { n for n in filter(lambda node: node.node not in all_used_vars, graph.all_var_nodes()) } graph.safe_remove_nodes(all_unused_vars) class TransformForMobilePass(object): """ This pass is used to convert the freezed graph for paddle-mobile execution. """ def __init__(self): self._fake_quant_op_names = [ 'fake_quantize_abs_max', 'fake_quantize_range_abs_max', 'fake_quantize_moving_average_abs_max', 'fake_channel_wise_quantize_abs_max' ] self._fake_dequant_op_names = [ 'fake_dequantize_max_abs', 'fake_channel_wise_dequantize_max_abs' ] def apply(self, graph): """ Because paddle-mobile use `quantize` an `dequantize` as the names of quantize operator and dequantize operator, the `apply` function just realize this logic. Args: graph(IrGraph): the graph will be transformed. """ ops = graph.all_op_nodes() for op_node in ops: name = op_node.name() if name in self._fake_quant_op_names: op_node.set_type('quantize') quant_node = graph.create_op_node_from_desc(op_node.op()) for input_node in op_node.inputs: graph.link_to(input_node, quant_node) for output_node in op_node.outputs: graph.link_to(quant_node, output_node) graph.safe_remove_nodes(op_node) if name in self._fake_dequant_op_names: op_node.set_type('dequantize') dequant_node = graph.create_op_node_from_desc(op_node.op()) for input_node in op_node.inputs: graph.link_to(input_node, dequant_node) for output_node in op_node.outputs: graph.link_to(dequant_node, output_node) graph.safe_remove_nodes(op_node) graph.resolve_hazard() return graph
# -*- coding: utf-8 -*- """ :mod:`pambox.distort` regroups various types of distortions and processings that can be applied to signals. """ from __future__ import absolute_import, division, print_function import numpy as np import scipy as sp from six.moves import zip from scipy.io import wavfile from pambox import utils from pambox.utils import fftfilt, hilbert import six try: _ = np.use_fastnumpy # MKL FFT optimizations from Enthought. from numpy.fft import fft, ifft, rfft, irfft except AttributeError: try: import mklfft # MKL FFT optimizations from Continuum Analytics from numpy.fft import fft, ifft, rfft, irfft except ImportError: from scipy.fftpack import fft, ifft from numpy.fft import rfft, irfft def mix_noise(clean, noise, sent_level, snr=None): """Mix a signal signal noise at a given signal-to-noise ratio. Parameters ---------- clean : ndarray Clean signal. noise : ndarray Noise signal. sent_level : float Sentence level, in dB SPL. snr : Signal-to-noise ratio at which to mix the signals, in dB. If snr is `None`, no noise is mixed with the signal (Default value = None) Returns ------- tuple of ndarrays Returns the clean signal, the mixture, and the noise. """ # Pick a random section of the noise n_clean = len(clean) n_noise = len(noise) if n_noise > n_clean: start_idx = np.random.randint(n_noise - n_clean) noise = noise[start_idx:start_idx + n_clean] if snr is not None: # Get speech level and set noise level accordingly # clean_level = utils.dbspl(clean) # noise = utils.setdbspl(noise, clean_level - snr) noise = noise / utils.rms(noise) * 10 ** ((sent_level - snr) / 20) mix = clean + noise else: mix = clean return clean, mix, noise def phase_jitter(x, a): """ Apply phase jitter to a signal. The expression of phase jitter is: .. math:: y(t) = s(t) * cos(\Phi(t)), where :math:`\Phi(t)` is a random process uniformly distributed over :math:`[0, 2\pi\\alpha]`. The effect of the jitter when :math:`\\alpha` is 0.5 or 1 is to completely destroy the carrier signal, effectively yielding modulated white noise. Parameters ---------- x : ndarray Signal a : float Phase jitter parameter, typically between 0 and 1, but it can be anything. Returns ------- ndarray Processed signal of the same dimension as the input signal. """ n = len(x) return x * np.cos(2 * np.pi * a * np.random.random_sample(n)) def reverb(x, rt): """ Applies reverberation to a signal. Parameters ---------- x : ndarray Input signal. rt : float Reverberation time Returns ------- ndarray Processed signal. """ pass def spec_sub(x, noise, factor, w=1024 / 2., padz=1024 / 2., shift_p=0.5): """ Apply spectral subtraction to a signal. The defaul values of the parameters are typical for a sampling frequency of 44100 Hz. Note that (W+padz) is the final frame window and hence the fft length (it is normally chose as a power of 2). Parameters ---------- x : ndarray Input signal noise : Input noise signal factor : float Noise subtraction factor, must be larger than 0. w : int Frame length, in samples. (Default value = 1024 / 2.) padz : int Zero padding (pad with padz/2 from the left and the right) (Default value = 1024 / 2.) shift_p : float Shift percentage (overlap) between each window, in fraction of the window size (Default value = 0.5) Returns ------- clean_estimate : ndarray Estimate of the clean signal. noise_estimate : ndarray Estimate of the noisy signal. """ wnd = np.hanning(w + 2) # create hanning window with length = W wnd = wnd[1:-1] stim = np.vstack((x, noise)) len_signal = stim.shape[-1] # Signal length shift_p_indexes = np.floor(w * shift_p) n_segments = np.floor((len_signal - w) / shift_p_indexes + 1) len_segment = w + padz * 2 * shift_p y = np.empty((2, n_segments, len_segment)) # Initialize arrays for spectral subtraction. Use only positive # frequencies. Y_hat = np.empty((n_segments, len_segment / 2 + 1)) PN_hat = Y_hat.copy() # For each signal for k in range(2): # CUT THE APPROPRIATE SIGNAL FRAMES indexes = np.tile(np.arange(w), (n_segments, 1)) index_shift = np.arange(n_segments) * shift_p_indexes indexes = indexes + index_shift[:, np.newaxis] y_tmp = stim[k] y_tmp = y_tmp[indexes.astype('int')] * wnd # PAD WITH ZEROS pad = np.zeros((n_segments, padz / 2)) y_pad = np.hstack((pad, y_tmp, pad)) y[k, :, :] = y_pad # FREQUENCY DOMAIN # signal: Y = fft(y[0]) # YY = Y(1:round(end/2)+1,:); # Half window (exploit the symmetry) YY = Y[:, :(len_segment / 2 + 1)] # Half window (exploit the symmetry) YPhase = np.angle(YY) # Phase Y1 = np.abs(YY) # Spectrum Y2 = Y1 ** 2 # Power Spectrum # noise: Y_N = fft(y[1]) YY_N = Y_N[:, :(len_segment / 2 + 1)] # Half window (exploit the symmetry) Y_NPhase = np.angle(YY_N) # Phase Y_N1 = np.abs(YY_N) # Spectrum Y_N2 = Y_N1 ** 2 # Power Spectrum # The noise "estimate" is simply the average of the noise power # spectral density in the frame: P_N = Y_N2.mean(axis=-1) Y_hat = Y2 - factor * P_N[:, np.newaxis] # subtraction Y_hat = np.maximum(Y_hat, 0) # Make the minima equal zero PN_hat = Y_N2 - factor * P_N[:, np.newaxis] # subtraction for noise alone # PN_hat = np.maximum(PN_hat, 0) PN_hat[Y_hat == 0] = 0 Y_hat[0:2, :] = 0 PN_hat[0:2, :] = 0 # Combining the estimated power spectrum with the original noisy phase, # and add the frames using an overlap-add technique output_Y = overlap_and_add(np.sqrt(Y_hat), YPhase, (w + padz), shift_p * w) output_N = overlap_and_add(np.sqrt(PN_hat.astype('complex')), Y_NPhase, (w + padz), shift_p * w) return output_Y, output_N def overlap_and_add(powers, phases, len_window, shift_size): """Reconstruct a signal with the overlap and add method. Parameters ---------- powers : ndarray Magnitude of the power spectrum of the signal to reconstruct. phases : ndarray Phase of the signal to reconstruct. len_window : int Frame length, in samples. shift_size : int Shift length. For non overlapping signals, in would equal `len_window`. For 50% overlapping signals, it would be `len_window/2`. Returns ------- ndarray Reconstructed time-domain signal. """ len_window = int(len_window) shift_size = int(shift_size) n_frames, len_frame = powers.shape spectrum = powers * np.exp(1j * phases) signal = np.zeros(n_frames * shift_size + len_window - shift_size) # Create full spectrum, by joining conjugated positive spectrum if len_window % 2: # Do no duplicate the DC bin spectrum = np.hstack((spectrum, np.conj(np.fliplr(spectrum[:, 1:])))) else: # If odd-numbered, do not duplicated the DC ans FS/2 bins spectrum = np.hstack((spectrum, np.conj(np.fliplr(spectrum[:, 1:-1])))) signal = np.zeros((n_frames - 1) * shift_size + len_window) for i_frame, hop in enumerate(range(0, len(signal) - int(len_window) + 1, int(shift_size))): signal[hop:hop + len_window] \ += np.real(ifft(spectrum[i_frame], len_window)) return signal class WestermannCrm(object): """Applies HRTF and BRIR for a given target and masker distance. Parameters ---------- fs : int Samping frequenc of the process. (Default value = 40000) Attributes ---------- brir : dict Binaural room impulse responses for each distance. delays : dict Delay until the first peak in the BRIR for each distance. dist : ndarray List of the valid distances (0.5, 2, 5, and 10 meters). References ---------- .. [1] A. Westermann and J. M. Buchholz: Release from masking through spatial separation in distance in hearing impaired listeners. Proceedings of Meetings on Acoustics 19 (2013) 050156. """ def __init__(self, fs=40000): self.dist = np.asarray([0.5, 2, 5, 10]) self.fs = fs self.brir = self._load_brirs() self.delays = self._find_delay() def _load_brirs(self): """Loads BRIRs from file.""" brirs = {} for d in self.dist: fname = '../stimuli/crm/brirs_{fs}/aud{d_str}m.wav'.format( fs=self.fs, d_str=self._normalize_fname(d) ) wav = wavfile.read(fname) brirs[d] = np.array(wav[1].astype('float') / 2. ** 15).T return brirs def _find_delay(self): """Calculates the delay of the direct sound, in samples.""" delays = {} for k, v in six.iteritems(self.brir): x = np.mean(v, axis=0) delays[k] = np.abs(x).argmax() return delays @staticmethod def _normalize_fname(d): """ Parameters ---------- d : float Returns ------- """ if d > 1: d_str = str('%d' % d) else: d_str = str(d).replace('.', '') return d_str def _load_eqfilt(self, tdist, mdist): """ Returns the equalization filter for the pair of target and masker. Parameters ---------- tdist : float Target distance in meters. Must be in the set (0.5, 2, 5, 10). mdist : Masker distance in meters. Must be in the set (0.5, 2, 5, 10). Returns ------- ndarray Equalization filter. """ eqfilt_name = 't{}m_m{}m.mat'.format(self._normalize_fname(tdist), self._normalize_fname(mdist)) eqfilt_path = '../stimuli/crm/eqfilts_{}/{}'.format(self.fs, eqfilt_name) try: eqfilt = sp.io.loadmat(eqfilt_path, squeeze_me=True) except IOError: raise IOError('Cannot file file %s' % eqfilt_path) return eqfilt def apply(self, x, m, tdist, mdist, align=True): """Applies the "Westermann" distortion to a target and masker. target and masker are not co-located, the masker is equalized before applying the BRIR, so that both the target and masker will have the same average spectrum after the BRIR filtering. By default, the delay introduced by the BRIR is compensated for, such that the maxiumum of the BRIR happen simulatenously. Parameters ---------- x : ndarray Mono clean speech signal of length `N`. m : ndarray Mono masker signal of length `N`. tdist : float Target distance, in meters. mdist : float Masker distance, in meters. align : bool Compensate for the delay in the BRIRs with distance (default is `True`). Returns ------- mix : (2, N) ndarray Mixture processesed by the BRIRs. noise : (2, N) Noise alone processed by the BRIRs. """ if tdist not in self.dist or mdist not in self.dist: raise ValueError('The distance values are incorrect.') n_orig = x.shape[-1] # Filter target with BRIR only out_x = np.asarray([fftfilt(b, x) for b in self.brir[tdist]]) # Equalized masker and then apply the BRIR if tdist == mdist: m = [m, m] else: eqfilt = self._load_eqfilt(tdist, mdist) m = [fftfilt(b, m) for b in [eqfilt['bl'], eqfilt['br']]] out_m = np.asarray([fftfilt(b, chan) for b, chan in zip(self.brir[mdist], m)]) if align: i_x, i_m = self._calc_aligned_idx(tdist, mdist) else: i_x = 0 i_m = 0 # Pad with zeros if necessary, so that the lengths stay the same out_x, out_m = utils.make_same_length(out_x[:, i_x:], out_m[:, i_m:]) return out_x, out_m def _calc_aligned_idx(self, tdist, mdist): """Calculates the index of the required delay to align the max of the BRIRs Parameters ---------- tdist : float, distance to target, in meters mdist : float, distance to masker, in meters :return: tuple, index of the target and masker. Returns ------- i_x : int Index of earliest peak in the signal. i_m : int Index of the earliest peak in the maskers. """ # location of earliest peak m_is_shortest = np.argmin([self.delays[tdist], self.delays[mdist]]) if m_is_shortest: i_x = self.delays[tdist] - self.delays[mdist] i_m = 0 else: i_x = 0 i_m = self.delays[mdist] - self.delays[tdist] return i_x, i_m def noise_from_signal(x, fs=40000, keep_env=False): """Create a noise with same spectrum as the input signal. Parameters ---------- x : array_like Input signal. fs : int Sampling frequency of the input signal. (Default value = 40000) keep_env : bool Apply the envelope of the original signal to the noise. (Default value = False) Returns ------- ndarray Noise signal. """ x = np.asarray(x) n_x = x.shape[-1] n_fft = utils.next_pow_2(n_x) X = rfft(x, utils.next_pow_2(n_fft)) # Randomize phase. noise_mag = np.abs(X) * np.exp( 2 * np.pi * 1j * np.random.random(X.shape[-1])) noise = np.real(irfft(noise_mag, n_fft)) out = noise[:n_x] if keep_env: env = np.abs(hilbert(x)) [bb, aa] = sp.signal.butter(6, 50 / (fs / 2)) # 50 Hz LP filter env = sp.signal.filtfilt(bb, aa, env) out *= env return out
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests dense attention layers.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized import numpy as np from tensorflow.python import keras from tensorflow.python.eager import context from tensorflow.python.keras import combinations from tensorflow.python.keras import testing_utils from tensorflow.python.keras.layers import core from tensorflow.python.keras.layers import dense_attention from tensorflow.python.keras.mixed_precision import policy from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import random_ops from tensorflow.python.platform import test @combinations.generate(combinations.combine(mode=['graph', 'eager'])) class BaseDenseAttentionTest(test.TestCase, parameterized.TestCase): def test_one_dim_with_mask(self): # Scores tensor of shape [1, 1, 1] scores = np.array([[[1.1]]], dtype=np.float32) # Value tensor of shape [1, 1, 1] v = np.array([[[1.6]]], dtype=np.float32) # Scores mask tensor of shape [1, 1, 1] scores_mask = np.array([[[True]]], dtype=np.bool_) actual, actual_scores = dense_attention.BaseDenseAttention()._apply_scores( scores=scores, value=v, scores_mask=scores_mask) # Expected softmax_scores = [[[1]]] expected_scores = np.array([[[1.]]], dtype=np.float32) self.assertAllClose(expected_scores, actual_scores) # Expected tensor of shape [1, 1, 1]. # expected000 = softmax_scores[0, 0] * 1.6 = 1.6 expected = np.array([[[1.6]]], dtype=np.float32) self.assertAllClose(expected, actual) def test_one_dim_no_mask(self): # Scores tensor of shape [1, 1, 1] scores = np.array([[[1.1]]], dtype=np.float32) # Value tensor of shape [1, 1, 1] v = np.array([[[1.6]]], dtype=np.float32) actual, actual_scores = dense_attention.BaseDenseAttention()._apply_scores( scores=scores, value=v) # Expected softmax_scores = [[[1]]] expected_scores = np.array([[[1.]]], dtype=np.float32) self.assertAllClose(expected_scores, actual_scores) # Expected tensor of shape [1, 1, 1]. # expected000 = softmax_scores[0, 0] * 1.6 = 1.6 expected = np.array([[[1.6]]], dtype=np.float32) self.assertAllClose(expected, actual) def test_multi_dim_with_mask(self): # Scores tensor of shape [1, 1, 3] scores = np.array([[[1., 0., 1.]]], dtype=np.float32) # Value tensor of shape [1, 3, 1] v = np.array([[[1.6], [0.7], [-0.8]]], dtype=np.float32) # Scores mask tensor of shape [1, 1, 3] scores_mask = np.array([[[True, True, False]]], dtype=np.bool_) actual, actual_scores = dense_attention.BaseDenseAttention()._apply_scores( scores=scores, value=v, scores_mask=scores_mask) # Expected softmax scores = softmax(scores) with zeros in positions where # v_mask == False. # => softmax_scores000 = exp(1)/(exp(1) + exp(0)) = 0.73105857863 # softmax_scores001 = exp(0)/(exp(1) + exp(0)) = 0.26894142137 # softmax_scores002 = 0 expected_scores = np.array([[[0.73105857863, 0.26894142137, 0.]]], dtype=np.float32) self.assertAllClose(expected_scores, actual_scores) # Expected tensor of shape [1, 1, 1]. # expected000 = 0.73105857863 * 1.6 + 0.26894142137 * 0.7 - 0 * 0.8 # = 1.35795272077 expected = np.array([[[1.35795272077]]], dtype=np.float32) self.assertAllClose(expected, actual) def test_multi_dim_no_mask(self): # Scores tensor of shape [1, 1, 3] scores = np.array([[[1., 0., 1.]]], dtype=np.float32) # Value tensor of shape [1, 3, 1] v = np.array([[[1.6], [0.7], [-0.8]]], dtype=np.float32) actual, actual_scores = dense_attention.BaseDenseAttention()._apply_scores( scores=scores, value=v) # Expected softmax_scores = softmax(scores). # => softmax_scores000 = exp(1)/(exp(1) + exp(0) + exp(1)) # = 0.42231879825 # softmax_scores001 = exp(0)/(exp(1) + exp(0) + exp(1)) # = 0.15536240349 # softmax_scores002 = exp(1)/(exp(1) + exp(0) + exp(1)) # = 0.42231879825 expected_scores = np.array( [[[0.42231879825, 0.15536240349, 0.42231879825]]], dtype=np.float32) self.assertAllClose(expected_scores, actual_scores) # Expected tensor of shape [1, 1, 1]. # expected000 = 0.42231879825 * 1.6 + 0.15536240349 * 0.7 # - 0.42231879825 * 0.8 # = 0.44660872104 expected = np.array([[[0.44660872104]]], dtype=np.float32) self.assertAllClose(expected, actual) def test_one_dim_batch_size_two(self): # Scores tensor of shape [2, 1, 1] scores = np.array([[[1.1]], [[2.1]]], dtype=np.float32) # Value tensor of shape [2, 1, 1] v = np.array([[[1.6]], [[2.6]]], dtype=np.float32) # Scpres mask tensor of shape [2, 1, 1] scores_mask = np.array([[[True]], [[True]]], dtype=np.bool_) actual, actual_scores = dense_attention.BaseDenseAttention()._apply_scores( scores=scores, value=v, scores_mask=scores_mask) # Expected softmax_scores = [[[1]], [[1]]] expected_scores = np.array([[[1.]], [[1.]]], dtype=np.float32) self.assertAllClose(expected_scores, actual_scores) # Expected tensor of shape [2, 1, 1]. # expected000 = softmax_scores[0, 0] * 1.6 = 1.6 # expected100 = softmax_scores[1, 0] * 2.6 = 2.6 expected = np.array([[[1.6]], [[2.6]]], dtype=np.float32) self.assertAllClose(expected, actual) def test_shape_with_dropout(self): # scores: Scores float tensor of shape `[batch_size, tq, tv]`. # value: Value tensor of shape `[batch_size, tv, dim]`. batch_size = 4 tq = 5 tv = 6 dim = 7 scores = np.ones((batch_size, tq, tv)) value = np.ones((batch_size, tv, dim)) actual, actual_scores = dense_attention.BaseDenseAttention( dropout=0.1)._apply_scores( scores=scores, value=value, training=False) # Expected Tensor of shape `[batch_size, tq, tv]`. expected_scores_shape = [batch_size, tq, tv] self.assertAllEqual(expected_scores_shape, array_ops.shape(actual_scores)) # Expected Tensor of shape `[batch_size, tq, dim]`. expected_shape = [batch_size, tq, dim] self.assertAllEqual(expected_shape, array_ops.shape(actual)) def test_serialization(self): # Test serialization with causal layer = dense_attention.BaseDenseAttention(causal=True) config = keras.layers.serialize(layer) new_layer = keras.layers.deserialize(config) self.assertEqual(new_layer.causal, True) config = layer.get_config() new_layer = dense_attention.BaseDenseAttention.from_config(config) self.assertEqual(new_layer.causal, True) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) class AttentionTest(test.TestCase, parameterized.TestCase): def test_calculate_scores_one_dim(self): # Query tensor of shape [1, 1, 1] q = np.array([[[1.1]]], dtype=np.float32) # Key tensor of shape [1, 1, 1] k = np.array([[[1.6]]], dtype=np.float32) attention_layer = dense_attention.Attention() attention_layer.build(input_shape=([1, 1, 1], [1, 1, 1])) actual = attention_layer._calculate_scores(query=q, key=k) # Expected tensor of shape [1, 1, 1]. # expected000 = 1.1*1.6 = 1.76 expected = np.array([[[1.76]]], dtype=np.float32) self.assertAllClose(expected, actual) def test_calculate_scores_multi_dim(self): # Query tensor of shape [1, 2, 4] q = np.array([[[1., 1.1, 1.2, 1.3], [2., 2.1, 2.2, 2.3]]], dtype=np.float32) # Key tensor of shape [1, 3, 4] k = np.array( [[[1.5, 1.6, 1.7, 1.8], [2.5, 2.6, 2.7, 2.8], [3.5, 3.6, 3.7, 3.8]]], dtype=np.float32) attention_layer = dense_attention.Attention() attention_layer.build(input_shape=([1, 2, 4], [1, 3, 4])) actual = attention_layer._calculate_scores(query=q, key=k) # Expected tensor of shape [1, 2, 3]. # expected000 = 1.*1.5+1.1*1.6+1.2*1.7+1.3*1.8 = 7.64 # expected001 = 1.*2.5+1.1*2.6+1.2*2.7+1.3*2.8 = 12.24 # expected002 = 1.*3.5+1.1*3.6+1.2*3.7+1.3*3.8 = 16.84 # expected010 = 2.*1.5+2.1*1.6+2.2*1.7+2.3*1.8 = 14.24 # expected011 = 2.*2.5+2.1*2.6+2.2*2.7+2.3*2.8 = 22.84 # expected012 = 2.*3.5+2.1*3.6+2.2*3.7+2.3*3.8 = 31.44 expected = np.array([[[7.64, 12.24, 16.84], [14.24, 22.84, 31.44]]], dtype=np.float32) self.assertAllClose(expected, actual) def test_calculate_scores_one_dim_batch_size_two(self): # Query tensor of shape [2, 1, 1] q = np.array([[[1.1]], [[2.1]]], dtype=np.float32) # Key tensor of shape [2, 1, 1] k = np.array([[[1.6]], [[2.6]]], dtype=np.float32) attention_layer = dense_attention.Attention() attention_layer.build(input_shape=([2, 1, 1], [2, 1, 1])) actual = attention_layer._calculate_scores(query=q, key=k) # Expected tensor of shape [2, 1, 1]. # expected000 = 1.1*1.6 = 1.76 # expected100 = 2.1*2.6 = 5.46 expected = np.array([[[1.76]], [[5.46]]], dtype=np.float32) self.assertAllClose(expected, actual) def test_calculate_scores_one_dim_with_scale(self): """Tests that scores are multiplied by scale.""" # Query tensor of shape [1, 1, 1] q = np.array([[[1.1]]], dtype=np.float32) # Key tensor of shape [1, 1, 1] k = np.array([[[1.6]]], dtype=np.float32) attention_layer = dense_attention.Attention(use_scale=True) attention_layer.build(input_shape=([1, 1, 1], [1, 1, 1])) attention_layer.scale = -2. actual = attention_layer._calculate_scores(query=q, key=k) # Expected tensor of shape [1, 1, 1]. # expected000 = -2*1.1*1.6 = -3.52 expected = np.array([[[-3.52]]], dtype=np.float32) self.assertAllClose(expected, actual) def test_shape(self): # Query tensor of shape [1, 2, 4] q = np.array([[[1., 1.1, 1.2, 1.3], [2., 2.1, 2.2, 2.3]]], dtype=np.float32) # Value tensor of shape [1, 3, 4] v = np.array( [[[1.5, 1.6, 1.7, 1.8], [2.5, 2.6, 2.7, 2.8], [3.5, 3.6, 3.7, 3.8]]], dtype=np.float32) # Value mask tensor of shape [1, 3] v_mask = np.array([[True, True, False]], dtype=np.bool_) attention_layer = dense_attention.Attention() actual = attention_layer([q, v], mask=[None, v_mask]) expected_shape = [1, 2, 4] self.assertAllEqual(expected_shape, array_ops.shape(actual)) def test_shape_with_key(self): # Query tensor of shape [1, 2, 4] q = np.array([[[1., 1.1, 1.2, 1.3], [2., 2.1, 2.2, 2.3]]], dtype=np.float32) # Value tensor of shape [1, 3, 4] v = np.array( [[[1.5, 1.6, 1.7, 1.8], [2.5, 2.6, 2.7, 2.8], [3.5, 3.6, 3.7, 3.8]]], dtype=np.float32) # Key tensor of shape [1, 3, 4] k = np.array( [[[1.5, 1.6, 1.7, 1.8], [2.5, 2.6, 2.7, 2.8], [3.5, 3.6, 3.7, 3.8]]], dtype=np.float32) # Value mask tensor of shape [1, 3] v_mask = np.array([[True, True, False]], dtype=np.bool_) attention_layer = dense_attention.Attention() actual = attention_layer([q, v, k], mask=[None, v_mask]) expected_shape = [1, 2, 4] self.assertAllEqual(expected_shape, array_ops.shape(actual)) def test_multi_dim(self): # Query tensor of shape [1, 1, 1] q = np.array([[[1.1]]], dtype=np.float32) # Value tensor of shape [1, 3, 1] v = np.array([[[1.6], [0.7], [-0.8]]], dtype=np.float32) # Value mask tensor of shape [1, 3] v_mask = np.array([[True, True, False]], dtype=np.bool_) attention_layer = dense_attention.Attention() actual = attention_layer([q, v], mask=[None, v_mask]) # Expected scores of shape [1, 1, 3] # scores = [[[1.1*1.6, 1.1*0.7, -1.1*0.8]]] = [[[1.76, 0.77, -0.88]]] # Expected attention distribution = softmax(scores) with zeros in # positions where v_mask == False. # => attention_distribution000 = exp(1.76)/(exp(1.76) + exp(0.77)) # = 0.72908792234 # attention_distribution001 = exp(0.77)/(exp(1.76) + exp(0.77)) # = 0.27091207765 # attention_distribution002 = 0 # # Expected tensor of shape [1, 1, 1]. # expected000 = 0.72908792234 * 1.6 + 0.27091207765 * 0.7 - 0 * 0.8 # = 1.3561791301 expected = np.array([[[1.3561791301]]], dtype=np.float32) self.assertAllClose(expected, actual) def test_multi_dim_with_key(self): # Query tensor of shape [1, 1, 1] q = np.array([[[1.1]]], dtype=np.float32) # Value tensor of shape [1, 3, 1] v = np.array([[[0.5], [0.8], [-0.3]]], dtype=np.float32) # Key tensor of shape [1, 3, 1] k = np.array([[[1.6], [0.7], [-0.8]]], dtype=np.float32) # Value mask tensor of shape [1, 3] v_mask = np.array([[True, True, False]], dtype=np.bool_) attention_layer = dense_attention.Attention() actual = attention_layer([q, v, k], mask=[None, v_mask]) # Expected scores of shape [1, 1, 3] # scores = [[[1.1*1.6, 1.1*0.7, -1.1*0.8]]] = [[[1.76, 0.77, -0.88]]] # Expected attention distribution = softmax(scores) with zeros in # positions where v_mask == False. # => attention_distribution000 = exp(1.76)/(exp(1.76) + exp(0.77)) # = 0.72908792234 # attention_distribution001 = exp(0.77)/(exp(1.76) + exp(0.77)) # = 0.27091207765 # attention_distribution002 = 0 # # Expected tensor of shape [1, 1, 1]. # expected000 = 0.72908792234 * 0.5 + 0.27091207765 * 0.8 - 0 * 0.3 # = 0.58127362329 expected = np.array([[[0.58127362329]]], dtype=np.float32) self.assertAllClose(expected, actual) @parameterized.named_parameters( ('', False), ('return_attention_scores', True), ) def test_multi_dim_with_query_mask(self, return_attention_scores): # Query tensor of shape [1, 2, 1] q = np.array([[[1.1], [-0.5]]], dtype=np.float32) # Value tensor of shape [1, 3, 1] v = np.array([[[1.6], [0.7], [-0.8]]], dtype=np.float32) # Query mask tensor of shape [1, 2] q_mask = np.array([[True, False]], dtype=np.bool_) # Value mask tensor of shape [1, 3] v_mask = np.array([[True, True, False]], dtype=np.bool_) attention_layer = dense_attention.Attention() if return_attention_scores: actual, actual_scores = attention_layer( [q, v], mask=[q_mask, v_mask], return_attention_scores=return_attention_scores) else: actual = attention_layer([q, v], mask=[q_mask, v_mask], return_attention_scores=return_attention_scores) # Expected scores of shape [1, 2, 3] # scores = [[[1.1*1.6, 1.1*0.7, -1.1*0.8], [-0.5*1.6, -0.5*0.7, 0.5*0.8]]] # = [[[1.76, 0.77, -0.88], [-0.8, -0.35, 0.4]]] # Expected attention distribution = softmax(scores) with zeros in # positions where v_mask == False. # => attention_distribution000 = exp(1.76)/(exp(1.76) + exp(0.77)) # = 0.72908792234 # attention_distribution001 = exp(0.77)/(exp(1.76) + exp(0.77)) # = 0.27091207765 # attention_distribution002 = 0 # => attention_distribution010 = exp(-0.8)/(exp(-0.8) + exp(-0.35)) # = 0.38936076605 # attention_distribution011 = exp(-0.35)/(exp(-0.8) + exp(-0.35)) # = 0.61063923394 # attention_distribution012 = 0 if return_attention_scores: expected_scores = np.array([[[0.72908792234, 0.27091207765, 0.], [0.38936076605, 0.61063923394, 0.]]], dtype=np.float32) self.assertAllClose(expected_scores, actual_scores) # Expected tensor of shape [1, 2, 1] with zeros where q_mask == False. # expected000 = 0.72908792234 * 1.6 + 0.27091207765 * 0.7 - 0 * 0.8 # = 1.3561791301 # expected000 = 0 expected = np.array([[[1.3561791301], [0.]]], dtype=np.float32) self.assertAllClose(expected, actual) def test_scale_None(self): """Tests that scale is None by default.""" attention_layer = dense_attention.Attention() attention_layer.build(input_shape=([1, 1, 1], [1, 1, 1])) self.assertIsNone(attention_layer.scale) def test_scale_init_eager(self): """Tests that scale initializes to 1 when use_scale=True.""" if not context.executing_eagerly(): self.skipTest('Only run in eager mode') attention_layer = dense_attention.Attention(use_scale=True) attention_layer.build(input_shape=([1, 1, 1], [1, 1, 1])) self.assertAllClose(1., attention_layer.scale.value()) def test_scale_init_graph(self): """Tests that scale initializes to 1 when use_scale=True.""" with self.cached_session() as sess: attention_layer = dense_attention.Attention(use_scale=True) attention_layer.build(input_shape=([1, 1, 1], [1, 1, 1])) sess.run(attention_layer.scale.initializer) self.assertAllClose(1., attention_layer.scale.value()) @parameterized.named_parameters( ('', False), ('return_attention_scores', True), ) def test_self_attention_causal(self, return_attention_scores): # Query-value tensor of shape [1, 3, 1] q = np.array([[[0.5], [0.8], [-0.3]]], dtype=np.float32) attention_layer = dense_attention.Attention(causal=True) if return_attention_scores: actual, actual_scores = attention_layer( [q, q], return_attention_scores=return_attention_scores) else: actual = attention_layer([q, q], return_attention_scores=return_attention_scores) # Expected scores of shape [1, 3, 3] # scores = [[0.25, 0.4, -0.15], [0.4, 0.64, -0.24], [-0.15, -0.24, 0.09]] # Expected attention distribution = softmax(scores) lower triangular # => attention_distribution00 = [1., 0., 0.] # attention_distribution01 # = [exp(0.4), exp(0.64), 0.] / (exp(0.4) + exp(0.64)) # = [0.44028635073, 0.55971364926, 0.] # attention_distribution02 # = [exp(-0.15), exp(-0.24), exp(0.09)] # / (exp(-0.15) + exp(-0.24) + exp(0.09)) # = [0.31395396638, 0.28693232061, 0.399113713] if return_attention_scores: expected_scores = np.array( [[[1., 0., 0.], [0.44028635073, 0.55971364926, 0.], [0.31395396638, 0.28693232061, 0.399113713]]], dtype=np.float32) self.assertAllClose(expected_scores, actual_scores) # Expected tensor of shape [1, 3, 1]. # expected000 = 0.5 # expected010 = 0.44028635073 * 0.5 + 0.55971364926 * 0.8 # = 0.66791409477 # expected020 = 0.31395396638 * 0.5 +0.28693232061 * 0.8 -0.399113713 * 0.3 # = 0.26678872577 expected = np.array([[[0.5], [0.66791409477], [0.26678872577]]], dtype=np.float32) self.assertAllClose(expected, actual) def test_inputs_not_list(self): attention_layer = dense_attention.Attention() q = np.array([[[1.1]]], dtype=np.float32) with self.assertRaisesRegex( ValueError, 'Attention layer must be called on a list of inputs'): attention_layer(q) def test_inputs_too_short(self): attention_layer = dense_attention.Attention() q = np.array([[[1.1]]], dtype=np.float32) with self.assertRaisesRegex( ValueError, 'Attention layer accepts inputs list of length 2 or 3'): attention_layer([q]) def test_inputs_too_long(self): attention_layer = dense_attention.Attention() q = np.array([[[1.1]]], dtype=np.float32) with self.assertRaisesRegex( ValueError, 'Attention layer accepts inputs list of length 2 or 3'): attention_layer([q, q, q, q]) def test_mask_not_list(self): attention_layer = dense_attention.Attention() q = np.array([[[1.1]]], dtype=np.float32) mask = np.array([[True]], dtype=np.bool_) with self.assertRaisesRegex(ValueError, 'Attention layer mask must be a list'): attention_layer([q, q], mask=mask) def test_mask_too_short(self): attention_layer = dense_attention.Attention() q = np.array([[[1.1]]], dtype=np.float32) mask = np.array([[True]], dtype=np.bool_) with self.assertRaisesRegex( ValueError, 'Attention layer mask must be a list of length 2'): attention_layer([q, q], mask=[mask]) def test_mask_too_long(self): attention_layer = dense_attention.Attention() q = np.array([[[1.1]]], dtype=np.float32) mask = np.array([[True]], dtype=np.bool_) with self.assertRaisesRegex( ValueError, 'Attention layer mask must be a list of length 2'): attention_layer([q, q], mask=[mask, mask, mask]) def test_override_mask(self): attention_layer = dense_attention.Attention() q = core.Masking()(np.array([[[1.1]]], dtype=np.float32)) mask = np.array([[False]], dtype=np.bool_) actual = attention_layer([q, q], mask=[mask, mask]) self.assertAllClose([[[0]]], actual) def test_implicit_mask(self): attention_layer = dense_attention.Attention() q = core.Masking(1.1)(np.array([[[1.1], [1]]], dtype=np.float32)) v = core.Masking(1.2)(np.array([[[1.2], [1]]], dtype=np.float32)) actual = attention_layer([q, v]) self.assertAllClose([[[0], [1]]], actual) @parameterized.named_parameters( ('', False), ('use_scale', True), ) def test_serialization(self, use_scale): # Test serialization with use_scale layer = dense_attention.Attention(use_scale=use_scale) config = keras.layers.serialize(layer) new_layer = keras.layers.deserialize(config) self.assertEqual(new_layer.use_scale, use_scale) config = layer.get_config() new_layer = dense_attention.Attention.from_config(config) self.assertEqual(new_layer.use_scale, use_scale) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) class AdditiveAttentionTest(test.TestCase, parameterized.TestCase): def test_calculate_scores_one_dim(self): # Query tensor of shape [1, 1, 1] q = np.array([[[1.1]]], dtype=np.float32) # Key tensor of shape [1, 1, 1] k = np.array([[[1.6]]], dtype=np.float32) attention_layer = dense_attention.AdditiveAttention() attention_layer.build(input_shape=([1, 1, 1], [1, 1, 1])) # Scale tensor of shape [1] attention_layer.scale = np.array([[[0.5]]], dtype=np.float32) actual = attention_layer._calculate_scores(query=q, key=k) # Expected tensor of shape [1, 1, 1]. # expected000 = 0.5 * tanh(1.1 + 1.6) = 0.49550372683 expected = np.array([[[0.49550372683]]], dtype=np.float32) self.assertAllClose(expected, actual) def test_calculate_scores_multi_dim(self): # Query tensor of shape [1, 2, 4] q = np.array([[[1., 1.1, 1.2, 1.3], [2., 2.1, 2.2, 2.3]]], dtype=np.float32) # Key tensor of shape [1, 3, 4] k = np.array( [[[1.5, 1.6, 1.7, 1.8], [2.5, 2.6, 2.7, 2.8], [3.5, 3.6, 3.7, 3.8]]], dtype=np.float32) attention_layer = dense_attention.AdditiveAttention() attention_layer.build(input_shape=([1, 2, 4], [1, 3, 4])) # Scale tensor of shape [4] attention_layer.scale = np.array([[[0.5, 0.6, 0.7, 0.8]]], dtype=np.float32) actual = attention_layer._calculate_scores(query=q, key=k) # pylint:disable=line-too-long # expected000 = 0.5*tanh(1.+1.5) + 0.6*tanh(1.1+1.6) + 0.7*tanh(1.2+1.7) + 0.8*tanh(1.3+1.8) = 2.58044532581 # expected001 = 0.5*tanh(1.+2.5) + 0.6*tanh(1.1+2.6) + 0.7*tanh(1.2+2.7) + 0.8*tanh(1.3+2.8) = 2.59734317449 # expected002 = 0.5*tanh(1.+3.5) + 0.6*tanh(1.1+3.6) + 0.7*tanh(1.2+3.7) + 0.8*tanh(1.3+3.8) = 2.59964024652 # expected010 = 0.5*tanh(2.+1.5) + 0.6*tanh(2.1+1.6) + 0.7*tanh(2.2+1.7) + 0.8*tanh(2.3+1.8) = 2.59734317449 # expected011 = 0.5*tanh(2.+2.5) + 0.6*tanh(2.1+2.6) + 0.7*tanh(2.2+2.7) + 0.8*tanh(2.3+2.8) = 2.59964024652 # expected012 = 0.5*tanh(2.+3.5) + 0.6*tanh(2.1+3.6) + 0.7*tanh(2.2+3.7) + 0.8*tanh(2.3+3.8) = 2.59995130916 # pylint:enable=line-too-long expected = np.array([[[2.58044532581, 2.59734317449, 2.59964024652], [2.59734317449, 2.59964024652, 2.59995130916]]], dtype=np.float32) self.assertAllClose(expected, actual) def test_calculate_scores_one_dim_batch_size_two(self): # Query tensor of shape [2, 1, 1] q = np.array([[[1.1]], [[2.1]]], dtype=np.float32) # Key tensor of shape [2, 1, 1] k = np.array([[[1.6]], [[2.6]]], dtype=np.float32) attention_layer = dense_attention.AdditiveAttention() attention_layer.build(input_shape=([2, 1, 1], [2, 1, 1])) # Scale tensor of shape [1] attention_layer.scale = np.array([[[0.5]]], dtype=np.float32) actual = attention_layer._calculate_scores(query=q, key=k) # Expected tensor of shape [2, 1, 1]. # expected000 = 0.5 * tanh(1.1 + 1.6) = 0.49550372683 # expected100 = 0.5 * tanh(2.1 + 2.6) = 0.49991728277 expected = np.array([[[0.49550372683]], [[0.49991728277]]], dtype=np.float32) self.assertAllClose(expected, actual) def test_shape(self): # Query tensor of shape [1, 2, 4] q = np.array([[[1., 1.1, 1.2, 1.3], [2., 2.1, 2.2, 2.3]]], dtype=np.float32) # Value tensor of shape [1, 3, 4] v = np.array( [[[1.5, 1.6, 1.7, 1.8], [2.5, 2.6, 2.7, 2.8], [3.5, 3.6, 3.7, 3.8]]], dtype=np.float32) # Value mask tensor of shape [1, 3] v_mask = np.array([[True, True, False]], dtype=np.bool_) attention_layer = dense_attention.AdditiveAttention() actual = attention_layer([q, v], mask=[None, v_mask]) expected_shape = [1, 2, 4] self.assertAllEqual(expected_shape, array_ops.shape(actual)) def test_shape_no_scale(self): # Query tensor of shape [1, 2, 4] q = np.array([[[1., 1.1, 1.2, 1.3], [2., 2.1, 2.2, 2.3]]], dtype=np.float32) # Value tensor of shape [1, 3, 4] v = np.array( [[[1.5, 1.6, 1.7, 1.8], [2.5, 2.6, 2.7, 2.8], [3.5, 3.6, 3.7, 3.8]]], dtype=np.float32) # Value mask tensor of shape [1, 3] v_mask = np.array([[True, True, False]], dtype=np.bool_) attention_layer = dense_attention.AdditiveAttention(use_scale=False) actual = attention_layer([q, v], mask=[None, v_mask]) expected_shape = [1, 2, 4] self.assertAllEqual(expected_shape, array_ops.shape(actual)) def test_shape_with_key(self): # Query tensor of shape [1, 2, 4] q = np.array([[[1., 1.1, 1.2, 1.3], [2., 2.1, 2.2, 2.3]]], dtype=np.float32) # Value tensor of shape [1, 3, 4] v = np.array( [[[1.5, 1.6, 1.7, 1.8], [2.5, 2.6, 2.7, 2.8], [3.5, 3.6, 3.7, 3.8]]], dtype=np.float32) # Key tensor of shape [1, 3, 4] k = np.array( [[[1.5, 1.6, 1.7, 1.8], [2.5, 2.6, 2.7, 2.8], [3.5, 3.6, 3.7, 3.8]]], dtype=np.float32) # Value mask tensor of shape [1, 3] v_mask = np.array([[True, True, False]], dtype=np.bool_) attention_layer = dense_attention.AdditiveAttention() actual = attention_layer([q, v, k], mask=[None, v_mask]) expected_shape = [1, 2, 4] self.assertAllEqual(expected_shape, array_ops.shape(actual)) def test_multi_dim(self): # Query tensor of shape [1, 1, 1] q = np.array([[[1.1]]], dtype=np.float32) # Value tensor of shape [1, 3, 1] v = np.array([[[1.6], [0.7], [-0.8]]], dtype=np.float32) # Value mask tensor of shape [1, 3] v_mask = np.array([[True, True, False]], dtype=np.bool_) attention_layer = dense_attention.AdditiveAttention() attention_layer.build(input_shape=([1, 1, 1], [1, 3, 1])) # Scale tensor of shape [1] attention_layer.scale = np.array([[[0.5]]], dtype=np.float32) actual = attention_layer([q, v], mask=[None, v_mask]) # pylint:disable=line-too-long # Expected scores of shape [1, 1, 3] # scores = [[[0.5 * tanh(1.1 + 1.6), 0.5 * tanh(1.1 + 0.7), 0.5 * tanh(1.1 - 0.8)]]] # = [[[0.49550372683, 0.47340300642, 0.14565630622]]] # Expected attention distribution = softmax(scores) with zeros in # positions where v_mask == False. # => attention_distribution000 # = exp(0.49550372683)/(exp(0.49550372683) + exp(0.47340300642)) # = 0.50552495521 # attention_distribution001 # = exp(0.47340300642)/(exp(0.49550372683) + exp(0.47340300642)) # = 0.49447504478 # attention_distribution002 = 0 # # Expected tensor of shape [1, 1, 1]. # expected000 = 0.50552495521 * 1.6 + 0.49447504478 * 0.7 - 0 * 0.8 # = 1.15497245968 # pylint:enable=line-too-long expected = np.array([[[1.15497245968]]], dtype=np.float32) self.assertAllClose(expected, actual) def test_multi_dim_with_key(self): # Query tensor of shape [1, 1, 1] q = np.array([[[1.1]]], dtype=np.float32) # Value tensor of shape [1, 3, 1] v = np.array([[[0.5], [0.8], [-0.3]]], dtype=np.float32) # Key tensor of shape [1, 3, 1] k = np.array([[[1.6], [0.7], [-0.8]]], dtype=np.float32) # Value mask tensor of shape [1, 3] v_mask = np.array([[True, True, False]], dtype=np.bool_) attention_layer = dense_attention.AdditiveAttention() attention_layer.build(input_shape=([1, 1, 1], [1, 3, 1])) # Scale tensor of shape [1] attention_layer.scale = np.array([[[0.5]]], dtype=np.float32) actual = attention_layer([q, v, k], mask=[None, v_mask]) # pylint:disable=line-too-long # Expected scores of shape [1, 1, 3] # scores = [[[0.5 * tanh(1.1 + 1.6), 0.5 * tanh(1.1 + 0.7), 0.5 * tanh(1.1 - 0.8)]]] # = [[[0.49550372683, 0.47340300642, 0.14565630622]]] # Expected attention distribution = softmax(scores) with zeros in # positions where v_mask == False. # => attention_distribution000 # = exp(0.49550372683)/(exp(0.49550372683) + exp(0.47340300642)) # = 0.50552495521 # attention_distribution001 # = exp(0.47340300642)/(exp(0.49550372683) + exp(0.47340300642)) # = 0.49447504478 # attention_distribution002 = 0 # # Expected tensor of shape [1, 1, 1]. # expected000 = 0.50552495521 * 0.5 + 0.49447504478 * 0.8 - 0 * 0.3 # = 0.64834251342 # pylint:enable=line-too-long expected = np.array([[[0.64834251342]]], dtype=np.float32) self.assertAllClose(expected, actual) def test_multi_dim_with_query_mask(self): # Query tensor of shape [1, 2, 1] q = np.array([[[1.1], [-0.5]]], dtype=np.float32) # Value tensor of shape [1, 3, 1] v = np.array([[[1.6], [0.7], [-0.8]]], dtype=np.float32) # Query mask tensor of shape [1, 2] q_mask = np.array([[True, False]], dtype=np.bool_) # Value mask tensor of shape [1, 3] v_mask = np.array([[True, True, False]], dtype=np.bool_) attention_layer = dense_attention.AdditiveAttention() attention_layer.build(input_shape=([1, 1, 1], [1, 3, 1])) # Scale tensor of shape [1] attention_layer.scale = np.array([[[0.5]]], dtype=np.float32) actual = attention_layer([q, v], mask=[q_mask, v_mask]) # pylint:disable=line-too-long # Expected scores of shape [1, 2, 3] # scores = [[[0.5 * tanh(1.1 + 1.6), 0.5 * tanh(1.1 + 0.7), 0.5 * tanh(1.1 - 0.8)], # [0.5 * tanh(-0.5 + 1.6), 0.5 * tanh(-0.5 + 0.7), 0.5 * tanh(-0.5 - 0.8)]]] # = [[[0.49550372683, 0.47340300642, 0.14565630622], # [0.40024951088, 0.09868766011, -0.43086157965]]] # Expected attention distribution = softmax(scores) with zeros in # positions where v_mask == False. # => attention_distribution000 # = exp(0.49550372683)/(exp(0.49550372683) + exp(0.47340300642)) # = 0.50552495521 # attention_distribution001 # = exp(0.47340300642)/(exp(0.49550372683) + exp(0.47340300642)) # = 0.49447504478 # attention_distribution002 = 0 # => attention_distribution010 # = exp(0.40024951088)/(exp(0.40024951088) + exp(0.09868766011)) # = 0.57482427975 # attention_distribution011 # = exp(0.09868766011)/(exp(0.40024951088) + exp(0.09868766011)) # = 0.42517572025 # attention_distribution012 = 0 # # Expected tensor of shape [1, 2, 1] with zeros where q_mask == False. # expected000 = 0.50552495521 * 1.6 + 0.49447504478 * 0.7 - 0 * 0.8 # = 1.15497245968 # expected000 = 0 # pylint:enable=line-too-long expected = np.array([[[1.15497245968], [0.]]], dtype=np.float32) self.assertAllClose(expected, actual) def test_serialization(self): # Test serialization with use_scale layer = dense_attention.AdditiveAttention(use_scale=True) config = keras.layers.serialize(layer) new_layer = keras.layers.deserialize(config) self.assertEqual(new_layer.use_scale, True) config = layer.get_config() new_layer = dense_attention.AdditiveAttention.from_config(config) self.assertEqual(new_layer.use_scale, True) @testing_utils.enable_v2_dtype_behavior def test_mixed_float16_policy(self): # Test case for GitHub issue: # https://github.com/tensorflow/tensorflow/issues/46064 with policy.policy_scope('mixed_float16'): q = math_ops.cast(random_ops.random_uniform((2, 3, 4), seed=1), 'float16') v = math_ops.cast(random_ops.random_uniform((2, 3, 4), seed=2), 'float16') k = math_ops.cast(random_ops.random_uniform((2, 3, 4), seed=3), 'float16') layer = dense_attention.AdditiveAttention(causal=True) _ = layer([q, v, k]) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) class LowerTriangularMaskTest(test.TestCase, parameterized.TestCase): def test_square_shape(self): actual = dense_attention._lower_triangular_mask([3, 3]) expected = np.array( [[True, False, False], [True, True, False], [True, True, True]], dtype=np.bool_) self.assertAllEqual(expected, actual) def test_orthogonal_shape(self): actual = dense_attention._lower_triangular_mask([3, 2]) expected = np.array([[True, False], [True, True], [True, True]], dtype=np.bool_) self.assertAllEqual(expected, actual) def test_three_dim(self): actual = dense_attention._lower_triangular_mask([1, 3, 3]) expected = np.array( [[[True, False, False], [True, True, False], [True, True, True]]], dtype=np.bool_) self.assertAllEqual(expected, actual) if __name__ == '__main__': test.main()
#!/usr/bin/env python # Copyright (C) 2013 Hewlett-Packard. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Provides local yaml parsing classes and extend yaml module """Custom application specific yamls tags are supported to provide enhancements when reading yaml configuration. These allow inclusion of arbitrary files as a method of having blocks of data managed separately to the yaml job configurations. A specific usage of this is inlining scripts contained in separate files, although such tags may also be used to simplify usage of macros or job templates. The tag ``!include:`` will treat the following string as file which should be parsed as yaml configuration data. Example: .. literalinclude:: /../../tests/localyaml/fixtures/include001.yaml contents of include001.yaml.inc: .. literalinclude:: /../../tests/yamlparser/fixtures/include001.yaml.inc The tag ``!include-raw:`` will treat the given string or list of strings as filenames to be opened as one or more data blob, which should be read into the calling yaml construct without any further parsing. Any data in a file included through this tag, will be treated as string data. Examples: .. literalinclude:: /../../tests/localyaml/fixtures/include-raw001.yaml contents of include-raw001-hello-world.sh: .. literalinclude:: /../../tests/localyaml/fixtures/include-raw001-hello-world.sh contents of include-raw001-vars.sh: .. literalinclude:: /../../tests/localyaml/fixtures/include-raw001-vars.sh using a list of files: .. literalinclude:: /../../tests/localyaml/fixtures/include-raw-multi001.yaml The tag ``!include-raw-escape:`` treats the given string or list of strings as filenames to be opened as one or more data blobs, which should be escaped before being read in as string data. This allows job-templates to use this tag to include scripts from files without needing to escape braces in the original file. Examples: .. literalinclude:: /../../tests/localyaml/fixtures/include-raw-escaped001.yaml contents of include-raw001-hello-world.sh: .. literalinclude:: /../../tests/localyaml/fixtures/include-raw001-hello-world.sh contents of include-raw001-vars.sh: .. literalinclude:: /../../tests/localyaml/fixtures/include-raw001-vars.sh using a list of files: .. literalinclude:: /../../tests/localyaml/fixtures/include-raw-escaped-multi001.yaml For all the multi file includes, the files are simply appended using a newline character. """ import functools import io import logging import os import re import yaml from yaml.constructor import BaseConstructor from yaml import YAMLObject from collections import OrderedDict logger = logging.getLogger(__name__) class OrderedConstructor(BaseConstructor): """The default constructor class for PyYAML loading uses standard python dictionaries which can have randomized ordering enabled (default in CPython from version 3.3). The order of the XML elements being outputted is both important for tests and for ensuring predictable generation based on the source. This subclass overrides this behaviour to ensure that all dict's created make use of OrderedDict to have iteration of keys to always follow the order in which the keys were inserted/created. """ def construct_yaml_map(self, node): data = OrderedDict() yield data value = self.construct_mapping(node) if isinstance(node, yaml.MappingNode): self.flatten_mapping(node) else: raise yaml.constructor.ConstructorError( None, None, 'expected a mapping node, but found %s' % node.id, node.start_mark) mapping = OrderedDict() for key_node, value_node in node.value: key = self.construct_object(key_node, deep=False) try: hash(key) except TypeError as exc: raise yaml.constructor.ConstructorError( 'while constructing a mapping', node.start_mark, 'found unacceptable key (%s)' % exc, key_node.start_mark) value = self.construct_object(value_node, deep=False) mapping[key] = value data.update(mapping) class LocalAnchorLoader(yaml.Loader): """Subclass for yaml.Loader which keeps Alias between calls""" anchors = {} def __init__(self, *args, **kwargs): super(LocalAnchorLoader, self).__init__(*args, **kwargs) self.anchors = LocalAnchorLoader.anchors @classmethod def reset_anchors(cls): cls.anchors = {} # override the default composer to skip resetting the anchors at the # end of the current document def compose_document(self): # Drop the DOCUMENT-START event. self.get_event() # Compose the root node. node = self.compose_node(None, None) # Drop the DOCUMENT-END event. self.get_event() return node class LocalLoader(OrderedConstructor, LocalAnchorLoader): """Subclass for yaml.Loader which handles storing the search_path and escape_callback functions for use by the custom YAML objects to find files and escape the content where required. Constructor access a list of search paths to look under for the given file following each tag, taking the first match found. Search path by default will include the same directory as the yaml file and the current working directory. Loading:: # use the load function provided in this module import local_yaml data = local_yaml.load(io.open(fn, 'r', encoding='utf-8')) # Loading by providing the alternate class to the default yaml load from local_yaml import LocalLoader data = yaml.load(io.open(fn, 'r', encoding='utf-8'), LocalLoader) # Loading with a search path from local_yaml import LocalLoader import functools data = yaml.load(io.open(fn, 'r', encoding='utf-8'), functools.partial(LocalLoader, search_path=['path'])) """ def __init__(self, *args, **kwargs): # make sure to pop off any local settings before passing to # the parent constructor as any unknown args may cause errors. self.search_path = list() if 'search_path' in kwargs: for p in kwargs.pop('search_path'): logger.debug("Adding '{0}' to search path for include tags" .format(p)) self.search_path.append(os.path.normpath(p)) if 'escape_callback' in kwargs: self.escape_callback = kwargs.pop('escape_callback') else: self.escape_callback = self._escape super(LocalLoader, self).__init__(*args, **kwargs) # constructor to preserve order of maps and ensure that the order of # keys returned is consistent across multiple python versions self.add_constructor(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, type(self).construct_yaml_map) if hasattr(self.stream, 'name'): self.search_path.append(os.path.normpath( os.path.dirname(self.stream.name))) self.search_path.append(os.path.normpath(os.path.curdir)) def _escape(self, data): return re.sub(r'({|})', r'\1\1', data) class BaseYAMLObject(YAMLObject): yaml_loader = LocalLoader yaml_dumper = yaml.Dumper class YamlInclude(BaseYAMLObject): yaml_tag = u'!include:' @classmethod def _find_file(cls, filename, search_path): for dirname in search_path: candidate = os.path.expanduser(os.path.join(dirname, filename)) if os.path.isfile(candidate): logger.info("Including file '{0}' from path '{1}'" .format(filename, dirname)) return candidate return filename @classmethod def _open_file(cls, loader, scalar_node): filename = cls._find_file(loader.construct_yaml_str(scalar_node), loader.search_path) try: with io.open(filename, 'r', encoding='utf-8') as f: return f.read() except: logger.error("Failed to include file using search path: '{0}'" .format(':'.join(loader.search_path))) raise @classmethod def _from_file(cls, loader, node): data = yaml.load(cls._open_file(loader, node), functools.partial(cls.yaml_loader, search_path=loader.search_path)) return data @classmethod def from_yaml(cls, loader, node): if isinstance(node, yaml.ScalarNode): return cls._from_file(loader, node) elif isinstance(node, yaml.SequenceNode): return u'\n'.join(cls._from_file(loader, scalar_node) for scalar_node in node.value) else: raise yaml.constructor.ConstructorError( None, None, "expected either a sequence or scalar node, but " "found %s" % node.id, node.start_mark) class YamlIncludeRaw(YamlInclude): yaml_tag = u'!include-raw:' @classmethod def _from_file(cls, loader, node): return cls._open_file(loader, node) class YamlIncludeRawEscape(YamlIncludeRaw): yaml_tag = u'!include-raw-escape:' @classmethod def from_yaml(cls, loader, node): return loader.escape_callback(YamlIncludeRaw.from_yaml(loader, node)) class DeprecatedTag(BaseYAMLObject): @classmethod def from_yaml(cls, loader, node): logger.warn("tag '%s' is deprecated, switch to using '%s'", cls.yaml_tag, cls._new.yaml_tag) return cls._new.from_yaml(loader, node) class YamlIncludeDeprecated(DeprecatedTag): yaml_tag = u'!include' _new = YamlInclude class YamlIncludeRawDeprecated(DeprecatedTag): yaml_tag = u'!include-raw' _new = YamlIncludeRaw class YamlIncludeRawEscapeDeprecated(DeprecatedTag): yaml_tag = u'!include-raw-escape' _new = YamlIncludeRawEscape def load(stream, **kwargs): LocalAnchorLoader.reset_anchors() return yaml.load(stream, functools.partial(LocalLoader, **kwargs))
""" Make api awesomeness """ import copy import inspect import logging import os import salt.auth import salt.client import salt.client.ssh.client import salt.config import salt.daemons.masterapi import salt.exceptions import salt.log # pylint: disable=W0611 import salt.runner import salt.syspaths import salt.utils.args import salt.utils.minions import salt.wheel from salt.defaults import DEFAULT_TARGET_DELIM log = logging.getLogger(__name__) class NetapiClient: """ Provide a uniform method of accessing the various client interfaces in Salt in the form of low-data data structures. For example: >>> client = NetapiClient(__opts__) >>> lowstate = {'client': 'local', 'tgt': '*', 'fun': 'test.ping', 'arg': ''} >>> client.run(lowstate) """ def __init__(self, opts): self.opts = opts apiopts = copy.deepcopy(self.opts) apiopts["enable_ssh_minions"] = True apiopts["cachedir"] = os.path.join(opts["cachedir"], "saltapi") if not os.path.exists(apiopts["cachedir"]): os.makedirs(apiopts["cachedir"]) self.resolver = salt.auth.Resolver(apiopts) self.loadauth = salt.auth.LoadAuth(apiopts) self.key = salt.daemons.masterapi.access_keys(apiopts) self.ckminions = salt.utils.minions.CkMinions(apiopts) def _is_master_running(self): """ Perform a lightweight check to see if the master daemon is running Note, this will return an invalid success if the master crashed or was not shut down cleanly. """ # Windows doesn't have IPC. Assume the master is running. # At worse, it will error 500. if salt.utils.platform.is_windows(): return True if self.opts["transport"] == "tcp": ipc_file = "publish_pull.ipc" else: ipc_file = "workers.ipc" return os.path.exists(os.path.join(self.opts["sock_dir"], ipc_file)) def _prep_auth_info(self, clear_load): sensitive_load_keys = [] key = None if "token" in clear_load: auth_type = "token" err_name = "TokenAuthenticationError" sensitive_load_keys = ["token"] return auth_type, err_name, key, sensitive_load_keys elif "eauth" in clear_load: auth_type = "eauth" err_name = "EauthAuthenticationError" sensitive_load_keys = ["username", "password"] return auth_type, err_name, key, sensitive_load_keys raise salt.exceptions.EauthAuthenticationError( "No authentication credentials given" ) def _authorize_ssh(self, low): auth_type, err_name, key, sensitive_load_keys = self._prep_auth_info(low) auth_check = self.loadauth.check_authentication(low, auth_type, key=key) auth_list = auth_check.get("auth_list", []) error = auth_check.get("error") if error: raise salt.exceptions.EauthAuthenticationError(error) delimiter = low.get("kwargs", {}).get("delimiter", DEFAULT_TARGET_DELIM) _res = self.ckminions.check_minions( low["tgt"], low.get("tgt_type", "glob"), delimiter ) minions = _res.get("minions", list()) missing = _res.get("missing", list()) authorized = self.ckminions.auth_check( auth_list, low["fun"], low.get("arg", []), low["tgt"], low.get("tgt_type", "glob"), minions=minions, ) if not authorized: raise salt.exceptions.EauthAuthenticationError( "Authorization error occurred." ) def run(self, low): """ Execute the specified function in the specified client by passing the lowstate """ # Eauth currently requires a running daemon and commands run through # this method require eauth so perform a quick check to raise a # more meaningful error. if not self._is_master_running(): raise salt.exceptions.SaltDaemonNotRunning("Salt Master is not available.") if low.get("client") not in CLIENTS: raise salt.exceptions.SaltInvocationError( "Invalid client specified: '{}'".format(low.get("client")) ) if not ("token" in low or "eauth" in low): raise salt.exceptions.EauthAuthenticationError( "No authentication credentials given" ) if low.get("raw_shell") and not self.opts.get("netapi_allow_raw_shell"): raise salt.exceptions.EauthAuthenticationError( "Raw shell option not allowed." ) if low["client"] == "ssh": self._authorize_ssh(low) l_fun = getattr(self, low["client"]) f_call = salt.utils.args.format_call(l_fun, low) return l_fun(*f_call.get("args", ()), **f_call.get("kwargs", {})) def local_async(self, *args, **kwargs): """ Run :ref:`execution modules <all-salt.modules>` asynchronously Wraps :py:meth:`salt.client.LocalClient.run_job`. :return: job ID """ with salt.client.get_local_client(mopts=self.opts) as client: return client.run_job(*args, **kwargs) def local(self, *args, **kwargs): """ Run :ref:`execution modules <all-salt.modules>` synchronously See :py:meth:`salt.client.LocalClient.cmd` for all available parameters. Sends a command from the master to the targeted minions. This is the same interface that Salt's own CLI uses. Note the ``arg`` and ``kwarg`` parameters are sent down to the minion(s) and the given function, ``fun``, is called with those parameters. :return: Returns the result from the execution module """ with salt.client.get_local_client(mopts=self.opts) as client: return client.cmd(*args, **kwargs) def local_subset(self, *args, **kwargs): """ Run :ref:`execution modules <all-salt.modules>` against subsets of minions .. versionadded:: 2016.3.0 Wraps :py:meth:`salt.client.LocalClient.cmd_subset` """ with salt.client.get_local_client(mopts=self.opts) as client: return client.cmd_subset(*args, **kwargs) def local_batch(self, *args, **kwargs): """ Run :ref:`execution modules <all-salt.modules>` against batches of minions .. versionadded:: 0.8.4 Wraps :py:meth:`salt.client.LocalClient.cmd_batch` :return: Returns the result from the exeuction module for each batch of returns """ with salt.client.get_local_client(mopts=self.opts) as client: return client.cmd_batch(*args, **kwargs) def ssh(self, *args, **kwargs): """ Run salt-ssh commands synchronously Wraps :py:meth:`salt.client.ssh.client.SSHClient.cmd_sync`. :return: Returns the result from the salt-ssh command """ with salt.client.ssh.client.SSHClient( mopts=self.opts, disable_custom_roster=True ) as client: return client.cmd_sync(kwargs) def runner(self, fun, timeout=None, full_return=False, **kwargs): """ Run `runner modules <all-salt.runners>` synchronously Wraps :py:meth:`salt.runner.RunnerClient.cmd_sync`. Note that runner functions must be called using keyword arguments. Positional arguments are not supported. :return: Returns the result from the runner module """ kwargs["fun"] = fun runner = salt.runner.RunnerClient(self.opts) return runner.cmd_sync(kwargs, timeout=timeout, full_return=full_return) def runner_async(self, fun, **kwargs): """ Run `runner modules <all-salt.runners>` asynchronously Wraps :py:meth:`salt.runner.RunnerClient.cmd_async`. Note that runner functions must be called using keyword arguments. Positional arguments are not supported. :return: event data and a job ID for the executed function. """ kwargs["fun"] = fun runner = salt.runner.RunnerClient(self.opts) return runner.cmd_async(kwargs) def wheel(self, fun, **kwargs): """ Run :ref:`wheel modules <all-salt.wheel>` synchronously Wraps :py:meth:`salt.wheel.WheelClient.master_call`. Note that wheel functions must be called using keyword arguments. Positional arguments are not supported. :return: Returns the result from the wheel module """ kwargs["fun"] = fun wheel = salt.wheel.WheelClient(self.opts) return wheel.cmd_sync(kwargs) def wheel_async(self, fun, **kwargs): """ Run :ref:`wheel modules <all-salt.wheel>` asynchronously Wraps :py:meth:`salt.wheel.WheelClient.master_call`. Note that wheel functions must be called using keyword arguments. Positional arguments are not supported. :return: Returns the result from the wheel module """ kwargs["fun"] = fun wheel = salt.wheel.WheelClient(self.opts) return wheel.cmd_async(kwargs) CLIENTS = [ name for name, _ in inspect.getmembers(NetapiClient, predicate=None) if not (name == "run" or name.startswith("_")) ]
# use this if you want to include modules from a subforder. # used for the unit tests to import the struct module import os, sys, inspect cmd_subfolder = os.path.realpath(os.path.abspath( os.path.join(os.path.split \ (inspect.getfile( inspect.currentframe() ))[0],"../"))) if cmd_subfolder not in sys.path: sys.path.insert(0, cmd_subfolder) import structs import re import const import xml.etree.ElementTree as ET #parse the type string and return the proper int value def getNodeType(typeString): type = -1 if (typeString == 'SINK'): type = 1 elif (typeString == 'SOURCE'): type = 2 elif (typeString == 'OPIN'): type = 3 elif (typeString == 'IPIN'): type = 4 elif (typeString == 'CHANX'): type = 5 elif (typeString == 'CHANY'): type = 6 else: assert(0) return type ##parse the rr_graph.echo file #param filename the path to the rr_graph file. # @return a tuple (clusterx,clustery,nodegraph) where # clusterx,clustery are the maximal x and y local coordinates and nodegraph # is a NodeGraph object def parseGraph(filename): #init the return stuff clusterx = 0 clustery = 0 nodeGraph = structs.NodeGraph() #open the graph.echo file fh = open(filename,"r") #parse the lines of the following format: # id type location index direction driver #Node: 0 SINK (0, 1) Ptc_num: 0 Direction: OPEN Drivers: OPEN #parse the file and build up the node graph while 1: line = fh.readline() # read node type, location, ... if not line: break str = line.split() #print id, int(str[1]) #assert(id is int(str[1])) n = structs.Node() #set the id. n.id = int(str[1]) #set the type n.type = getNodeType(str[2]) nums = re.findall(r'\d+', line) nums = [int(i) for i in nums ] #get the location and the index. #The index is the pad position, pin position or track number #depending its a pin on an I/O block, cluster or a channel. #Depending on this node type the values are on different positions #in the file. if n.type < 5 or len(nums) < 5: n.location = (nums[1],nums[2]) n.index = nums[3] else: n.location = (nums[1],nums[2],nums[3],nums[4]) n.index = nums[5] #set the direction of the node. if n.type > 4: dir = line.split(' ')[-3] if dir == 'INC_DIRECTION': #north or east if n.type is 5: n.dir = const.E else: n.dir = const.N else: if n.type is 5: n.dir = const.W else: n.dir = const.S #read the edge ids and append them to #the edge list of the node line = fh.readline() # read edges nums = re.findall(r'\d+', line) #assign the ids n.edges = [int(i) for i in nums[1:]] #skip the rest of the information line = fh.readline() # skip switch types line = fh.readline() # skip (occupancy?) and capacity line = fh.readline() # skip R and C line = fh.readline() # skip cost index line = fh.readline() # skip newline dividing records #clusterx,clustery are the maximal value of location coords. #find these maximal location coords clusterx = max(clusterx,n.location[0]) clustery = max(clustery,n.location[1]) #append the node to the node graph #supply an id to check if this id was add before nodeGraph.add(n,n.id) return (clusterx,clustery,nodeGraph) ##parse the rr_graph.echo file #param filename the path to the rr_graph file. # @return a tuple (clusterx,clustery,nodegraph) where # clusterx,clustery are the maximal x and y local coordinates and nodegraph # is a NodeGraph object def parseGraphXml(filename): #init the return stuff clusterx = 0 clustery = 0 nodeGraph = structs.NodeGraph() #now parse the xml file tree = ET.parse(filename) root = tree.getroot() nodes = root.findall('./rr_nodes/node') for node in nodes: #create a node for the NodeGraph and copy the attributes: n = structs.Node() #set the id n.id = int(node.get('id')) #get the type n.type = getNodeType(node.get('type')) #get the location and the index. #The index is the pad position, pin position or track number #depending its a pin on an I/O block, cluster or a channel. #for channels with length greater one #we have different start and end locations location = node.find('loc') n.index = int(location.get('ptc')) if n.type < 5: xCoord = int(location.get('xlow')) yCoord = int(location.get('ylow')) n.location = (xCoord,yCoord) #we have a channel get start and end location else: xStart = int(location.get('xlow')) yStart = int(location.get('ylow')) xEnd = int(location.get('xhigh')) yEnd = int(location.get('yhigh')) n.location = (xStart,yStart,xEnd,yEnd) #set the direction of the node. #TODO: there is no bidirectional support implemented yet if n.type > 4: dir = node.get('direction') if dir == 'INC_DIR': #north or east if n.type is 5: n.dir = const.E else: n.dir = const.N else: if n.type is 5: n.dir = const.W else: n.dir = const.S #set the edges #therefore we have to search all edges with this id as a source edges = root.findall('./rr_edges/edge[@src_node=\'' + str(n.id) + '\']') for edge in edges: #append it to the node edge list edgeId = int(edge.get('sink_node')) n.edges.append(edgeId) #clusterx,clustery are the maximal value of location coords. #find these maximal location coords clusterx = max(clusterx,n.location[0]) clustery = max(clustery,n.location[1]) #append the node to the node graph #provide an id to check if this id was add before nodeGraph.add(n,n.id) return (clusterx,clustery,nodeGraph) def simpleTest(): (clusterx,clustery,nodeGraph) = parseGraphXml('rr_graph.echo') print clusterx print clustery for node in nodeGraph.nodes: print node.id #some simple unit Test def main(): simpleTest() if __name__ == '__main__': main()
from great_expectations.expectations.expectation import MulticolumnMapExpectation from great_expectations.expectations.util import render_evaluation_parameter_string from great_expectations.render.renderer.renderer import renderer from great_expectations.render.types import RenderedStringTemplateContent from great_expectations.render.util import ( num_to_str, parse_row_condition_string_pandas_engine, substitute_none_for_missing, ) class ExpectCompoundColumnsToBeUnique(MulticolumnMapExpectation): # This dictionary contains metadata for display in the public gallery library_metadata = { "maturity": "production", "package": "great_expectations", "tags": [ "core expectation", "multi-column expectation", "needs migration to modular expectations api", ], "contributors": [ "@great_expectations", ], "requirements": [], } map_metric = "compound_columns.unique" default_kwarg_values = { "row_condition": None, "condition_parser": None, # we expect this to be explicitly set whenever a row_condition is passed "ignore_row_if": "all_values_are_missing", "result_format": "BASIC", "include_config": True, "catch_exceptions": False, } args_keys = ("column_list",) @classmethod def _atomic_prescriptive_template( cls, configuration=None, result=None, language=None, runtime_configuration=None, **kwargs, ): runtime_configuration = runtime_configuration or {} styling = runtime_configuration.get("styling") params = substitute_none_for_missing( configuration.kwargs, [ "column_list", "ignore_row_if", "row_condition", "condition_parser", "mostly", ], ) params_with_json_schema = { "column_list": { "schema": {"type": "array"}, "value": params.get("column_list"), }, "ignore_row_if": { "schema": {"type": "string"}, "value": params.get("ignore_row_if"), }, "row_condition": { "schema": {"type": "string"}, "value": params.get("row_condition"), }, "condition_parser": { "schema": {"type": "string"}, "value": params.get("condition_parser"), }, "mostly": { "schema": {"type": "number"}, "value": params.get("mostly"), }, "mostly_pct": { "schema": {"type": "number"}, "value": params.get("mostly_pct"), }, } if params["mostly"] is not None: params_with_json_schema["mostly_pct"]["value"] = num_to_str( params["mostly"] * 100, precision=15, no_scientific=True ) mostly_str = ( "" if params.get("mostly") is None else ", at least $mostly_pct % of the time" ) template_str = ( f"Values for given compound columns must be unique together{mostly_str}: " ) column_list = params.get("column_list") if params.get("column_list") else [] if len(column_list) > 0: for idx, val in enumerate(column_list[:-1]): param = f"$column_list_{idx}" template_str += f"{param}, " params[param] = val last_idx = len(column_list) - 1 last_param = f"$column_list_{last_idx}" template_str += last_param params[last_param] = column_list[last_idx] if params["row_condition"] is not None: ( conditional_template_str, conditional_params, ) = parse_row_condition_string_pandas_engine( params["row_condition"], with_schema=True ) template_str = ( conditional_template_str + ", then " + template_str[0].lower() + template_str[1:] ) params_with_json_schema.update(conditional_params) return (template_str, params_with_json_schema, styling) @classmethod @renderer(renderer_type="renderer.prescriptive") @render_evaluation_parameter_string def _prescriptive_renderer( cls, configuration=None, result=None, language=None, runtime_configuration=None, **kwargs, ): runtime_configuration = runtime_configuration or {} styling = runtime_configuration.get("styling") params = substitute_none_for_missing( configuration.kwargs, [ "column_list", "ignore_row_if", "row_condition", "condition_parser", "mostly", ], ) if params["mostly"] is not None: params["mostly_pct"] = num_to_str( params["mostly"] * 100, precision=15, no_scientific=True ) mostly_str = ( "" if params.get("mostly") is None else ", at least $mostly_pct % of the time" ) template_str = ( f"Values for given compound columns must be unique together{mostly_str}: " ) for idx in range(len(params["column_list"]) - 1): template_str += f"$column_list_{str(idx)}, " params[f"column_list_{str(idx)}"] = params["column_list"][idx] last_idx = len(params["column_list"]) - 1 template_str += f"$column_list_{str(last_idx)}" params[f"column_list_{str(last_idx)}"] = params["column_list"][last_idx] if params["row_condition"] is not None: ( conditional_template_str, conditional_params, ) = parse_row_condition_string_pandas_engine(params["row_condition"]) template_str = ( conditional_template_str + ", then " + template_str[0].lower() + template_str[1:] ) params.update(conditional_params) return [ RenderedStringTemplateContent( **{ "content_block_type": "string_template", "string_template": { "template": template_str, "params": params, "styling": styling, }, } ) ]
#!/usr/bin/env python """ Copyright 2016 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import asyncore import gc import logging import platform try: from Queue import Queue from Queue import Empty except ImportError: from queue import Queue from queue import Empty import re import signal import socket import sys import threading import time server = None in_pipe = None out_pipe = None must_exit = False options = None dest_addresses = None connections = {} dns_cache = {} port_mappings = None map_localhost = False needs_flush = False flush_pipes = False last_activity = None last_client_disconnected = None REMOVE_TCP_OVERHEAD = 1460.0 / 1500.0 lock = threading.Lock() background_activity_count = 0 current_time = time.clock if sys.platform == "win32" else time.time try: import monotonic current_time = monotonic.monotonic except Exception: pass def PrintMessage(msg): # Print the message to stdout & flush to make sure that the message is not # buffered when tsproxy is run as a subprocess. sys.stdout.write(msg) sys.stdout.flush() ######################################################################################################################## # Traffic-shaping pipe (just passthrough for now) ######################################################################################################################## class TSPipe(): PIPE_IN = 0 PIPE_OUT = 1 def __init__(self, direction, latency, kbps): self.direction = direction self.latency = latency self.kbps = kbps self.queue = Queue() self.last_tick = current_time() self.next_message = None self.available_bytes = .0 self.peer = 'server' if self.direction == self.PIPE_IN: self.peer = 'client' def SendMessage(self, message, main_thread = True): global connections, in_pipe, out_pipe message_sent = False now = current_time() if message['message'] == 'closed': message['time'] = now else: message['time'] = current_time() + self.latency message['size'] = .0 if 'data' in message: message['size'] = float(len(message['data'])) try: connection_id = message['connection'] # Send messages directly, bypassing the queues is throttling is disabled and we are on the main thread if main_thread and connection_id in connections and self.peer in connections[connection_id]and self.latency == 0 and self.kbps == .0: message_sent = self.SendPeerMessage(message) except: pass if not message_sent: try: self.queue.put(message) except: pass def SendPeerMessage(self, message): global last_activity, last_client_disconnected last_activity = current_time() message_sent = False connection_id = message['connection'] if connection_id in connections: if self.peer in connections[connection_id]: try: connections[connection_id][self.peer].handle_message(message) message_sent = True except: # Clean up any disconnected connections try: connections[connection_id]['server'].close() except: pass try: connections[connection_id]['client'].close() except: pass del connections[connection_id] if not connections: last_client_disconnected = current_time() logging.info('[{0:d}] Last connection closed'.format(self.client_id)) return message_sent def tick(self): global connections global flush_pipes next_packet_time = None processed_messages = False now = current_time() try: if self.next_message is None: self.next_message = self.queue.get_nowait() # Accumulate bandwidth if an available packet/message was waiting since our last tick if self.next_message is not None and self.kbps > .0 and self.next_message['time'] <= now: elapsed = now - self.last_tick accumulated_bytes = elapsed * self.kbps * 1000.0 / 8.0 self.available_bytes += accumulated_bytes # process messages as long as the next message is sendable (latency or available bytes) while (self.next_message is not None) and\ (flush_pipes or ((self.next_message['time'] <= now) and (self.kbps <= .0 or self.next_message['size'] <= self.available_bytes))): processed_messages = True if self.kbps > .0: self.available_bytes -= self.next_message['size'] self.SendPeerMessage(self.next_message) self.next_message = None self.next_message = self.queue.get_nowait() except Empty: pass except Exception as e: logging.exception('Tick Exception') # Only accumulate bytes while we have messages that are ready to send if self.next_message is None or self.next_message['time'] > now: self.available_bytes = .0 self.last_tick = now # Figure out how long until the next packet can be sent if self.next_message is not None: # First, just the latency next_packet_time = self.next_message['time'] - now # Additional time for bandwidth if self.kbps > .0: accumulated_bytes = self.available_bytes + next_packet_time * self.kbps * 1000.0 / 8.0 needed_bytes = self.next_message['size'] - accumulated_bytes if needed_bytes > 0: needed_time = needed_bytes / (self.kbps * 1000.0 / 8.0) next_packet_time += needed_time return next_packet_time ######################################################################################################################## # Threaded DNS resolver ######################################################################################################################## class AsyncDNS(threading.Thread): def __init__(self, client_id, hostname, port, is_localhost, result_pipe): threading.Thread.__init__(self) self.hostname = hostname self.port = port self.client_id = client_id self.is_localhost = is_localhost self.result_pipe = result_pipe def run(self): global lock, background_activity_count try: logging.debug('[{0:d}] AsyncDNS - calling getaddrinfo for {1}:{2:d}'.format(self.client_id, self.hostname, self.port)) addresses = socket.getaddrinfo(self.hostname, self.port) logging.info('[{0:d}] Resolving {1}:{2:d} Completed'.format(self.client_id, self.hostname, self.port)) except: addresses = () logging.info('[{0:d}] Resolving {1}:{2:d} Failed'.format(self.client_id, self.hostname, self.port)) message = {'message': 'resolved', 'connection': self.client_id, 'addresses': addresses, 'localhost': self.is_localhost} self.result_pipe.SendMessage(message, False) lock.acquire() if background_activity_count > 0: background_activity_count -= 1 lock.release() # open and close a local socket which will interrupt the long polling loop to process the message s = socket.socket() s.connect((server.ipaddr, server.port)) s.close() ######################################################################################################################## # TCP Client ######################################################################################################################## class TCPConnection(asyncore.dispatcher): STATE_ERROR = -1 STATE_IDLE = 0 STATE_RESOLVING = 1 STATE_CONNECTING = 2 STATE_CONNECTED = 3 def __init__(self, client_id): global options asyncore.dispatcher.__init__(self) self.client_id = client_id self.state = self.STATE_IDLE self.buffer = '' self.addr = None self.dns_thread = None self.hostname = None self.port = None self.needs_config = True self.needs_close = False self.did_resolve = False def SendMessage(self, type, message): message['message'] = type message['connection'] = self.client_id in_pipe.SendMessage(message) def handle_message(self, message): if message['message'] == 'data' and 'data' in message and len(message['data']): self.buffer += message['data'] if self.state == self.STATE_CONNECTED: self.handle_write() elif message['message'] == 'resolve': self.HandleResolve(message) elif message['message'] == 'connect': self.HandleConnect(message) elif message['message'] == 'closed': if len(self.buffer) == 0: self.handle_close() else: self.needs_close = True def handle_error(self): logging.warning('[{0:d}] Error'.format(self.client_id)) if self.state == self.STATE_CONNECTING: self.SendMessage('connected', {'success': False, 'address': self.addr}) def handle_close(self): global last_client_disconnected logging.info('[{0:d}] Server Connection Closed'.format(self.client_id)) self.state = self.STATE_ERROR self.close() try: if self.client_id in connections: if 'server' in connections[self.client_id]: del connections[self.client_id]['server'] if 'client' in connections[self.client_id]: self.SendMessage('closed', {}) else: del connections[self.client_id] if not connections: last_client_disconnected = current_time() logging.info('[{0:d}] Last Browser disconnected'.format(self.client_id)) except: pass def handle_connect(self): if self.state == self.STATE_CONNECTING: self.state = self.STATE_CONNECTED self.SendMessage('connected', {'success': True, 'address': self.addr}) logging.info('[{0:d}] Connected'.format(self.client_id)) self.handle_write() def writable(self): if self.state == self.STATE_CONNECTING: return True return len(self.buffer) > 0 def handle_write(self): if self.needs_config: self.needs_config = False self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 128 * 1024) self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 128 * 1024) if len(self.buffer) > 0: sent = self.send(self.buffer) logging.debug('[{0:d}] TCP => {1:d} byte(s)'.format(self.client_id, sent)) self.buffer = self.buffer[sent:] if self.needs_close and len(self.buffer) == 0: self.needs_close = False self.handle_close() def handle_read(self): try: while True: data = self.recv(1460) if data: if self.state == self.STATE_CONNECTED: logging.debug('[{0:d}] TCP <= {1:d} byte(s)'.format(self.client_id, len(data))) self.SendMessage('data', {'data': data}) else: return except: pass def HandleResolve(self, message): global in_pipe, map_localhost, lock, background_activity_count self.did_resolve = True is_localhost = False if 'hostname' in message: self.hostname = message['hostname'] self.port = 0 if 'port' in message: self.port = message['port'] logging.info('[{0:d}] Resolving {1}:{2:d}'.format(self.client_id, self.hostname, self.port)) if self.hostname == 'localhost': self.hostname = '127.0.0.1' if self.hostname == '127.0.0.1': logging.info('[{0:d}] Connection to localhost detected'.format(self.client_id)) is_localhost = True if (dest_addresses is not None) and (not is_localhost or map_localhost): logging.info('[{0:d}] Resolving {1}:{2:d} to mapped address {3}'.format(self.client_id, self.hostname, self.port, dest_addresses)) self.SendMessage('resolved', {'addresses': dest_addresses, 'localhost': False}) else: lock.acquire() background_activity_count += 1 lock.release() self.state = self.STATE_RESOLVING self.dns_thread = AsyncDNS(self.client_id, self.hostname, self.port, is_localhost, in_pipe) self.dns_thread.start() def HandleConnect(self, message): global map_localhost if 'addresses' in message and len(message['addresses']): self.state = self.STATE_CONNECTING is_localhost = False if 'localhost' in message: is_localhost = message['localhost'] elif not self.did_resolve and message['addresses'][0] == '127.0.0.1': logging.info('[{0:d}] Connection to localhost detected'.format(self.client_id)) is_localhost = True if (dest_addresses is not None) and (not is_localhost or map_localhost): self.addr = dest_addresses[0] else: self.addr = message['addresses'][0] self.create_socket(self.addr[0], socket.SOCK_STREAM) addr = self.addr[4][0] if not is_localhost or map_localhost: port = GetDestPort(message['port']) else: port = message['port'] logging.info('[{0:d}] Connecting to {1}:{2:d}'.format(self.client_id, addr, port)) self.connect((addr, port)) ######################################################################################################################## # Socks5 Server ######################################################################################################################## class Socks5Server(asyncore.dispatcher): def __init__(self, host, port): asyncore.dispatcher.__init__(self) self.create_socket(socket.AF_INET, socket.SOCK_STREAM) try: self.set_reuse_addr() self.bind((host, port)) self.listen(socket.SOMAXCONN) self.ipaddr, self.port = self.socket.getsockname() self.current_client_id = 0 except: PrintMessage("Unable to listen on {0}:{1}. Is the port already in use?".format(host, port)) exit(1) def handle_accept(self): global connections, last_client_disconnected pair = self.accept() if pair is not None: last_client_disconnected = None sock, addr = pair self.current_client_id += 1 logging.info('[{0:d}] Incoming connection from {1}'.format(self.current_client_id, repr(addr))) connections[self.current_client_id] = { 'client' : Socks5Connection(sock, self.current_client_id), 'server' : None } # Socks5 reference: https://en.wikipedia.org/wiki/SOCKS#SOCKS5 class Socks5Connection(asyncore.dispatcher): STATE_ERROR = -1 STATE_WAITING_FOR_HANDSHAKE = 0 STATE_WAITING_FOR_CONNECT_REQUEST = 1 STATE_RESOLVING = 2 STATE_CONNECTING = 3 STATE_CONNECTED = 4 def __init__(self, connected_socket, client_id): global options asyncore.dispatcher.__init__(self, connected_socket) self.client_id = client_id self.state = self.STATE_WAITING_FOR_HANDSHAKE self.ip = None self.addresses = None self.hostname = None self.port = None self.requested_address = None self.buffer = '' self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 128 * 1024) self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 128 * 1024) self.needs_close = False def SendMessage(self, type, message): message['message'] = type message['connection'] = self.client_id out_pipe.SendMessage(message) def handle_message(self, message): if message['message'] == 'data' and 'data' in message and len(message['data']) > 0: self.buffer += message['data'] if self.state == self.STATE_CONNECTED: self.handle_write() elif message['message'] == 'resolved': self.HandleResolved(message) elif message['message'] == 'connected': self.HandleConnected(message) self.handle_write() elif message['message'] == 'closed': if len(self.buffer) == 0: logging.info('[{0:d}] Server connection close being processed, closing Browser connection'.format(self.client_id)) self.handle_close() else: logging.info('[{0:d}] Server connection close being processed, queuing browser connection close'.format(self.client_id)) self.needs_close = True def writable(self): return len(self.buffer) > 0 def handle_write(self): if len(self.buffer) > 0: sent = self.send(self.buffer) logging.debug('[{0:d}] SOCKS <= {1:d} byte(s)'.format(self.client_id, sent)) self.buffer = self.buffer[sent:] if self.needs_close and len(self.buffer) == 0: logging.info('[{0:d}] queued browser connection close being processed, closing Browser connection'.format(self.client_id)) self.needs_close = False self.handle_close() def handle_read(self): global connections global dns_cache try: while True: # Consume in up-to packet-sized chunks (TCP packet payload as 1460 bytes from 1500 byte ethernet frames) data = self.recv(1460) if data: data_len = len(data) if self.state == self.STATE_CONNECTED: logging.debug('[{0:d}] SOCKS => {1:d} byte(s)'.format(self.client_id, data_len)) self.SendMessage('data', {'data': data}) elif self.state == self.STATE_WAITING_FOR_HANDSHAKE: self.state = self.STATE_ERROR #default to an error state, set correctly if things work out if data_len >= 2 and ord(data[0]) == 0x05: supports_no_auth = False auth_count = ord(data[1]) if data_len == auth_count + 2: for i in range(auth_count): offset = i + 2 if ord(data[offset]) == 0: supports_no_auth = True if supports_no_auth: # Respond with a message that "No Authentication" was agreed to logging.info('[{0:d}] New Socks5 client'.format(self.client_id)) response = chr(0x05) + chr(0x00) self.state = self.STATE_WAITING_FOR_CONNECT_REQUEST self.buffer += response self.handle_write() elif self.state == self.STATE_WAITING_FOR_CONNECT_REQUEST: self.state = self.STATE_ERROR #default to an error state, set correctly if things work out if data_len >= 10 and ord(data[0]) == 0x05 and ord(data[2]) == 0x00: if ord(data[1]) == 0x01: #TCP connection (only supported method for now) connections[self.client_id]['server'] = TCPConnection(self.client_id) self.requested_address = data[3:] port_offset = 0 if ord(data[3]) == 0x01: port_offset = 8 self.ip = '{0:d}.{1:d}.{2:d}.{3:d}'.format(ord(data[4]), ord(data[5]), ord(data[6]), ord(data[7])) elif ord(data[3]) == 0x03: name_len = ord(data[4]) if data_len >= 6 + name_len: port_offset = 5 + name_len self.hostname = data[5:5 + name_len] elif ord(data[3]) == 0x04 and data_len >= 22: port_offset = 20 self.ip = '' for i in range(16): self.ip += '{0:02x}'.format(ord(data[4 + i])) if i % 2 and i < 15: self.ip += ':' if port_offset and connections[self.client_id]['server'] is not None: self.port = 256 * ord(data[port_offset]) + ord(data[port_offset + 1]) if self.port: if self.ip is None and self.hostname is not None: if dns_cache is not None and self.hostname in dns_cache: self.state = self.STATE_CONNECTING cache_entry = dns_cache[self.hostname] self.addresses = cache_entry['addresses'] self.SendMessage('connect', {'addresses': self.addresses, 'port': self.port, 'localhost': cache_entry['localhost']}) else: self.state = self.STATE_RESOLVING self.SendMessage('resolve', {'hostname': self.hostname, 'port': self.port}) elif self.ip is not None: self.state = self.STATE_CONNECTING logging.debug('[{0:d}] Socks Connect - calling getaddrinfo for {1}:{2:d}'.format(self.client_id, self.ip, self.port)) self.addresses = socket.getaddrinfo(self.ip, self.port) self.SendMessage('connect', {'addresses': self.addresses, 'port': self.port}) else: return except: pass def handle_close(self): global last_client_disconnected logging.info('[{0:d}] Browser Connection Closed by browser'.format(self.client_id)) self.state = self.STATE_ERROR self.close() try: if self.client_id in connections: if 'client' in connections[self.client_id]: del connections[self.client_id]['client'] if 'server' in connections[self.client_id]: self.SendMessage('closed', {}) else: del connections[self.client_id] if not connections: last_client_disconnected = current_time() logging.info('[{0:d}] Last Browser disconnected'.format(self.client_id)) except: pass def HandleResolved(self, message): global dns_cache if self.state == self.STATE_RESOLVING: if 'addresses' in message and len(message['addresses']): self.state = self.STATE_CONNECTING self.addresses = message['addresses'] if dns_cache is not None: dns_cache[self.hostname] = {'addresses': self.addresses, 'localhost': message['localhost']} logging.debug('[{0:d}] Resolved {1}, Connecting'.format(self.client_id, self.hostname)) self.SendMessage('connect', {'addresses': self.addresses, 'port': self.port, 'localhost': message['localhost']}) else: # Send host unreachable error self.state = self.STATE_ERROR self.buffer += chr(0x05) + chr(0x04) + self.requested_address self.handle_write() def HandleConnected(self, message): if 'success' in message and self.state == self.STATE_CONNECTING: response = chr(0x05) if message['success']: response += chr(0x00) logging.debug('[{0:d}] Connected to {1}'.format(self.client_id, self.hostname)) self.state = self.STATE_CONNECTED else: response += chr(0x04) self.state = self.STATE_ERROR response += chr(0x00) response += self.requested_address self.buffer += response self.handle_write() ######################################################################################################################## # stdin command processor ######################################################################################################################## class CommandProcessor(): def __init__(self): thread = threading.Thread(target = self.run, args=()) thread.daemon = True thread.start() def run(self): global must_exit while not must_exit: for line in iter(sys.stdin.readline, ''): self.ProcessCommand(line.strip()) def ProcessCommand(self, input): global in_pipe global out_pipe global needs_flush global REMOVE_TCP_OVERHEAD global port_mappings global server if len(input): ok = False try: command = input.split() if len(command) and len(command[0]): if command[0].lower() == 'flush': ok = True elif command[0].lower() == 'set' and len(command) >= 3: if command[1].lower() == 'rtt' and len(command[2]): rtt = float(command[2]) latency = rtt / 2000.0 in_pipe.latency = latency out_pipe.latency = latency ok = True elif command[1].lower() == 'inkbps' and len(command[2]): in_pipe.kbps = float(command[2]) * REMOVE_TCP_OVERHEAD ok = True elif command[1].lower() == 'outkbps' and len(command[2]): out_pipe.kbps = float(command[2]) * REMOVE_TCP_OVERHEAD ok = True elif command[1].lower() == 'mapports' and len(command[2]): SetPortMappings(command[2]) ok = True elif command[0].lower() == 'reset' and len(command) >= 2: if command[1].lower() == 'rtt' or command[1].lower() == 'all': in_pipe.latency = 0 out_pipe.latency = 0 ok = True if command[1].lower() == 'inkbps' or command[1].lower() == 'all': in_pipe.kbps = 0 ok = True if command[1].lower() == 'outkbps' or command[1].lower() == 'all': out_pipe.kbps = 0 ok = True if command[1].lower() == 'mapports' or command[1].lower() == 'all': port_mappings = {} ok = True if ok: needs_flush = True except: pass if not ok: PrintMessage('ERROR') # open and close a local socket which will interrupt the long polling loop to process the flush if needs_flush: s = socket.socket() s.connect((server.ipaddr, server.port)) s.close() ######################################################################################################################## # Main Entry Point ######################################################################################################################## def main(): global server global options global in_pipe global out_pipe global dest_addresses global port_mappings global map_localhost global dns_cache import argparse global REMOVE_TCP_OVERHEAD parser = argparse.ArgumentParser(description='Traffic-shaping socks5 proxy.', prog='tsproxy') parser.add_argument('-v', '--verbose', action='count', help="Increase verbosity (specify multiple times for more). -vvvv for full debug output.") parser.add_argument('--logfile', help="Write log messages to given file instead of stdout.") parser.add_argument('-b', '--bind', default='localhost', help="Server interface address (defaults to localhost).") parser.add_argument('-p', '--port', type=int, default=1080, help="Server port (defaults to 1080, use 0 for randomly assigned).") parser.add_argument('-r', '--rtt', type=float, default=.0, help="Round Trip Time Latency (in ms).") parser.add_argument('-i', '--inkbps', type=float, default=.0, help="Download Bandwidth (in 1000 bits/s - Kbps).") parser.add_argument('-o', '--outkbps', type=float, default=.0, help="Upload Bandwidth (in 1000 bits/s - Kbps).") parser.add_argument('-w', '--window', type=int, default=10, help="Emulated TCP initial congestion window (defaults to 10).") parser.add_argument('-d', '--desthost', help="Redirect all outbound connections to the specified host.") parser.add_argument('-m', '--mapports', help="Remap outbound ports. Comma-separated list of original:new with * as a wildcard. --mapports '443:8443,*:8080'") parser.add_argument('-l', '--localhost', action='store_true', default=False, help="Include connections already destined for localhost/127.0.0.1 in the host and port remapping.") parser.add_argument('-n', '--nodnscache', action='store_true', default=False, help="Disable internal DNS cache.") parser.add_argument('-f', '--flushdnscache', action='store_true', default=False, help="Automatically flush the DNS cache 500ms after the last client disconnects.") options = parser.parse_args() # Set up logging log_level = logging.CRITICAL if options.verbose == 1: log_level = logging.ERROR elif options.verbose == 2: log_level = logging.WARNING elif options.verbose == 3: log_level = logging.INFO elif options.verbose >= 4: log_level = logging.DEBUG if options.logfile is not None: logging.basicConfig(filename=options.logfile, level=log_level, format="%(asctime)s.%(msecs)03d - %(message)s", datefmt="%H:%M:%S") else: logging.basicConfig(level=log_level, format="%(asctime)s.%(msecs)03d - %(message)s", datefmt="%H:%M:%S") # Parse any port mappings if options.mapports: SetPortMappings(options.mapports) if options.nodnscache: dns_cache = None map_localhost = options.localhost # Resolve the address for a rewrite destination host if one was specified if options.desthost: logging.debug('Startup - calling getaddrinfo for {0}:{1:d}'.format(options.desthost, GetDestPort(80))) dest_addresses = socket.getaddrinfo(options.desthost, GetDestPort(80)) # Set up the pipes. 1/2 of the latency gets applied in each direction (and /1000 to convert to seconds) in_pipe = TSPipe(TSPipe.PIPE_IN, options.rtt / 2000.0, options.inkbps * REMOVE_TCP_OVERHEAD) out_pipe = TSPipe(TSPipe.PIPE_OUT, options.rtt / 2000.0, options.outkbps * REMOVE_TCP_OVERHEAD) signal.signal(signal.SIGINT, signal_handler) server = Socks5Server(options.bind, options.port) command_processor = CommandProcessor() PrintMessage('Started Socks5 proxy server on {0}:{1:d}\nHit Ctrl-C to exit.'.format(server.ipaddr, server.port)) run_loop() def signal_handler(signal, frame): global server global must_exit logging.error('Exiting...') must_exit = True del server # Wrapper around the asyncore loop that lets us poll the in/out pipes every 1ms def run_loop(): global must_exit global in_pipe global out_pipe global needs_flush global flush_pipes global last_activity global last_client_disconnected global dns_cache winmm = None # increase the windows timer resolution to 1ms if platform.system() == "Windows": try: import ctypes winmm = ctypes.WinDLL('winmm') winmm.timeBeginPeriod(1) except: pass last_activity = current_time() last_check = current_time() # disable gc to avoid pauses during traffic shaping/proxying gc.disable() out_interval = None in_interval = None while not must_exit: # Tick every 1ms if traffic-shaping is enabled and we have data or are doing background dns lookups, every 1 second otherwise lock.acquire() tick_interval = 0.001 if out_interval is not None: tick_interval = max(tick_interval, out_interval) if in_interval is not None: tick_interval = max(tick_interval, in_interval) if background_activity_count == 0: if in_pipe.next_message is None and in_pipe.queue.empty() and out_pipe.next_message is None and out_pipe.queue.empty(): tick_interval = 1.0 elif in_pipe.kbps == .0 and in_pipe.latency == 0 and out_pipe.kbps == .0 and out_pipe.latency == 0: tick_interval = 1.0 lock.release() logging.debug("Tick Time: %0.3f", tick_interval) asyncore.poll(tick_interval, asyncore.socket_map) if needs_flush: flush_pipes = True dns_cache = {} needs_flush = False out_interval = out_pipe.tick() in_interval = in_pipe.tick() if flush_pipes: PrintMessage('OK') flush_pipes = False now = current_time() # Clear the DNS cache 500ms after the last client disconnects if options.flushdnscache and last_client_disconnected is not None and dns_cache: if now - last_client_disconnected >= 0.5: dns_cache = {} last_client_disconnected = None logging.debug("Flushed DNS cache") # Every 500 ms check to see if it is a good time to do a gc if now - last_check >= 0.5: last_check = now # manually gc after 5 seconds of idle if now - last_activity >= 5: last_activity = now logging.debug("Triggering manual GC") gc.collect() if winmm is not None: winmm.timeEndPeriod(1) def GetDestPort(port): global port_mappings if port_mappings is not None: src_port = str(port) if src_port in port_mappings: return port_mappings[src_port] elif 'default' in port_mappings: return port_mappings['default'] return port def SetPortMappings(map_string): global port_mappings port_mappings = {} map_string = map_string.strip('\'" \t\r\n') for pair in map_string.split(','): (src, dest) = pair.split(':') if src == '*': port_mappings['default'] = int(dest) logging.debug("Default port mapped to port {0}".format(dest)) else: logging.debug("Port {0} mapped to port {1}".format(src, dest)) port_mappings[src] = int(dest) if '__main__' == __name__: main()
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012, Cisco Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # @author: Rohit Agarwalla, Cisco Systems, Inc. from sqlalchemy import func from sqlalchemy.orm import exc from quantum.common import exceptions as q_exc from quantum.plugins.linuxbridge import plugin_configuration as conf from quantum.plugins.linuxbridge.common import exceptions as c_exc from quantum.plugins.linuxbridge.db import l2network_models import logging import quantum.db.api as db LOG = logging.getLogger(__name__) def initialize(): 'Establish database connection and load models' if conf.DB_CONNECTION == 'sqlite': options = {"sql_connection": "sqlite://"} else: options = {"sql_connection": "mysql://%s:%s@%s:%s/%s" % (conf.DB_USER, conf.DB_PASS, conf.DB_HOST, conf.DB_PORT, conf.DB_NAME)} db.configure_db(options) create_vlanids() def create_vlanids(): """Prepopulates the vlan_bindings table""" LOG.debug("create_vlanids() called") session = db.get_session() start = int(conf.VLAN_START) end = int(conf.VLAN_END) try: vlanid = session.query(l2network_models.VlanID).\ one() except exc.MultipleResultsFound: """ TODO (Sumit): Salvatore rightly points out that this will not handle change in VLAN ID range across server reboots. This is currently not a supported feature. This logic will need to change if this feature has to be supported. Per Dan's suggestion we just throw a server exception for now. """ current_start = \ int(session.query(func.min(l2network_models.VlanID.vlan_id)). one()[0]) current_end = \ int(session.query(func.max(l2network_models.VlanID.vlan_id)). one()[0]) if current_start != start or current_end != end: LOG.debug("Old VLAN range %s-%s" % (current_start, current_end)) LOG.debug("New VLAN range %s-%s" % (start, end)) raise c_exc.UnableToChangeVlanRange(range_start=current_start, range_end=current_end) except exc.NoResultFound: LOG.debug("Setting VLAN range to %s-%s" % (start, end)) while start <= end: vlanid = l2network_models.VlanID(start) session.add(vlanid) start += 1 session.flush() return def get_all_vlanids(): """Gets all the vlanids""" LOG.debug("get_all_vlanids() called") session = db.get_session() try: vlanids = session.query(l2network_models.VlanID).\ all() return vlanids except exc.NoResultFound: return [] def is_vlanid_used(vlan_id): """Checks if a vlanid is in use""" LOG.debug("is_vlanid_used() called") session = db.get_session() try: vlanid = session.query(l2network_models.VlanID).\ filter_by(vlan_id=vlan_id).\ one() return vlanid["vlan_used"] except exc.NoResultFound: raise c_exc.VlanIDNotFound(vlan_id=vlan_id) def release_vlanid(vlan_id): """Sets the vlanid state to be unused""" LOG.debug("release_vlanid() called") session = db.get_session() try: vlanid = session.query(l2network_models.VlanID).\ filter_by(vlan_id=vlan_id).\ one() vlanid["vlan_used"] = False session.merge(vlanid) session.flush() return vlanid["vlan_used"] except exc.NoResultFound: raise c_exc.VlanIDNotFound(vlan_id=vlan_id) return def delete_vlanid(vlan_id): """Deletes a vlanid entry from db""" LOG.debug("delete_vlanid() called") session = db.get_session() try: vlanid = session.query(l2network_models.VlanID).\ filter_by(vlan_id=vlan_id).\ one() session.delete(vlanid) session.flush() return vlanid except exc.NoResultFound: raise c_exc.VlanIDNotFound(vlan_id=vlan_id) def reserve_vlanid(): """Reserves the first unused vlanid""" LOG.debug("reserve_vlanid() called") session = db.get_session() try: rvlan = session.query(l2network_models.VlanID).\ first() if not rvlan: create_vlanids() rvlan = session.query(l2network_models.VlanID).\ filter_by(vlan_used=False).\ first() if not rvlan: raise c_exc.VlanIDNotAvailable() rvlanid = session.query(l2network_models.VlanID).\ filter_by(vlan_id=rvlan["vlan_id"]).\ one() rvlanid["vlan_used"] = True session.merge(rvlanid) session.flush() return rvlan["vlan_id"] except exc.NoResultFound: raise c_exc.VlanIDNotAvailable() def get_all_vlanids_used(): """Gets all the vlanids used""" LOG.debug("get_all_vlanids() called") session = db.get_session() try: vlanids = session.query(l2network_models.VlanID).\ filter_by(vlan_used=True).\ all() return vlanids except exc.NoResultFound: return [] def get_all_vlan_bindings(): """Lists all the vlan to network associations""" LOG.debug("get_all_vlan_bindings() called") session = db.get_session() try: bindings = session.query(l2network_models.VlanBinding).\ all() return bindings except exc.NoResultFound: return [] def get_vlan_binding(netid): """Lists the vlan given a network_id""" LOG.debug("get_vlan_binding() called") session = db.get_session() try: binding = session.query(l2network_models.VlanBinding).\ filter_by(network_id=netid).\ one() return binding except exc.NoResultFound: raise c_exc.NetworkVlanBindingNotFound(network_id=netid) def add_vlan_binding(vlanid, netid): """Adds a vlan to network association""" LOG.debug("add_vlan_binding() called") session = db.get_session() try: binding = session.query(l2network_models.VlanBinding).\ filter_by(vlan_id=vlanid).\ one() raise c_exc.NetworkVlanBindingAlreadyExists(vlan_id=vlanid, network_id=netid) except exc.NoResultFound: binding = l2network_models.VlanBinding(vlanid, netid) session.add(binding) session.flush() return binding def remove_vlan_binding(netid): """Removes a vlan to network association""" LOG.debug("remove_vlan_binding() called") session = db.get_session() try: binding = session.query(l2network_models.VlanBinding).\ filter_by(network_id=netid).\ one() session.delete(binding) session.flush() return binding except exc.NoResultFound: pass def update_vlan_binding(netid, newvlanid=None): """Updates a vlan to network association""" LOG.debug("update_vlan_binding() called") session = db.get_session() try: binding = session.query(l2network_models.VlanBinding).\ filter_by(network_id=netid).\ one() if newvlanid: binding["vlan_id"] = newvlanid session.merge(binding) session.flush() return binding except exc.NoResultFound: raise q_exc.NetworkNotFound(net_id=netid)
#! /usr/bin/env python import numpy import six from landlab.grid.voronoi import VoronoiDelaunayGrid class HexModelGrid(VoronoiDelaunayGrid): """A grid of hexagonal cells. This inherited class implements a regular 2D grid with hexagonal cells and triangular patches. It is a special type of VoronoiDelaunay grid in which the initial set of points is arranged in a triangular/hexagonal lattice. Examples -------- >>> from landlab import HexModelGrid >>> hmg = HexModelGrid(3, 2, 1.0) >>> hmg.number_of_nodes 7 """ def __init__(self, base_num_rows=0, base_num_cols=0, dx=1.0, orientation='horizontal', shape='hex', reorient_links=False, **kwds): """Create a grid of hexagonal cells. Create a regular 2D grid with hexagonal cells and triangular patches. It is a special type of VoronoiDelaunay grid in which the initial set of points is arranged in a triangular/hexagonal lattice. Parameters ---------- base_num_rows : int Number of rows of nodes in the left column. base_num_cols : int Number of nodes on the first row. dx : float, optional Node spacing. orientation : string, optional One of the 3 cardinal directions in the grid, either 'horizontal' (default) or 'vertical' Returns ------- HexModelGrid A newly-created grid. Examples -------- Create a hex grid with 2 rows of nodes. The first and third rows will have 2 nodes, and the second nodes. >>> from landlab import HexModelGrid >>> hmg = HexModelGrid(3, 2, 1.0) >>> hmg.number_of_nodes 7 """ # Set number of nodes, and initialize if caller has given dimensions #self._num_nodes = num_rows * num_cols if base_num_rows * base_num_cols > 0: self._initialize(base_num_rows, base_num_cols, dx, orientation, shape, reorient_links) super(HexModelGrid, self).__init__(**kwds) def _initialize(self, base_num_rows, base_num_cols, dx, orientation, shape, reorient_links=False): """ Sets up a hexagonal grid with cell spacing dx and (by default) regular boundaries (that is, all perimeter cells are boundaries and all interior cells are active). Parameters ---------- base_num_rows : int Number of rows along left side of grid base_num_cols : int Number of columns along bottom side of grid dx : float Distance between nodes orientation : string Either 'horizontal' (default in __init__) or 'vertical' shape : string Either 'hex' (default in __init__) or 'rect' reorient_links : bool Whether or not to re-orient all links to point between -45 deg and +135 deg clockwise from "north" (i.e., along y axis) Returns ------- (none) Creates/modifies ---------------- Creates and initializes self._num_nodes and self._dx Notes ----- To be consistent with unstructured grids, the hex grid is managed not as a 2D array but rather as a set of arrays that describe connectivity information between nodes, links, cells, faces, patches, corners, and junctions. 'Horizontal' orientation means that one of the 3 axes of the grid is horizontal, whereas the other two are at 30 degree angles to the horizontal, like: \ / ----- / \ 'Vertical' means that one axis is vertical, with the other two at 30 degree angles to the vertical, more like: \ | / \ | / / | \ / | \ (of course, these keyboard characters don't represent the angles quite right) Numbers of rows and columns: a hex grid with a rectangular shape will have a fixed number of rows and columns, and so for rectangular shaped grids we record this information in self._nrows and self._ncols. With a hex-shaped grid, either the number of columns (if 'horizontal') or the number of rows (if 'vertical') will vary across the grid. Therefore, for hex-shaped grids we record only self._nrows for 'horizontal' grids, and only self._ncols for 'vertical' grids. """ if self._DEBUG_TRACK_METHODS: six.print_('HexModelGrid._initialize('+str(base_num_rows) + ', ' + str(base_num_cols) + ', ' + str(dx) + ')') # Make sure the parameter *orientation* is correct assert (orientation[0].lower()=='h' or orientation[0].lower()=='v'), \ 'orientation must be either "horizontal" (default) or "vertical"' # Make sure the parameter *shape* is correct assert (shape[0].lower()=='h' or shape[0].lower()=='r'), \ 'shape must be either "hex" (default) or "rect"' # Create a set of hexagonally arranged points. These will be our nodes. if orientation=='horizontal' and shape=='hex': [pts, self._num_nodes] = HexModelGrid.make_hex_points_horizontal_hex(base_num_rows, base_num_cols, dx) self.orientation = 'horizontal' self._nrows = base_num_rows elif orientation=='horizontal' and shape=='rect': [pts, self._num_nodes] = HexModelGrid.make_hex_points_horizontal_rect(base_num_rows, base_num_cols, dx) self.orientation = 'horizontal' self._nrows = base_num_rows self._ncols = base_num_cols elif orientation=='vertical' and shape=='hex': [pts, self._num_nodes] = HexModelGrid.make_hex_points_vertical_hex(base_num_rows, base_num_cols, dx) self.orientation = 'vertical' self._ncols = base_num_cols else: [pts, self._num_nodes] = HexModelGrid.make_hex_points_vertical_rect(base_num_rows, base_num_cols, dx) self.orientation = 'vertical' self._nrows = base_num_rows self._ncols = base_num_cols # Call the VoronoiDelaunayGrid constructor to triangulate/Voronoi # the nodes into a grid. super(HexModelGrid, self)._initialize(pts[:,0], pts[:,1], reorient_links) # Remember grid spacing self._dx = dx def _setup_cell_areas_array(self): """ Creates and returns an array containing the surface areas of the hexagonal (Voronoi) cells. These cells are perfect hexagons in which the apothem is dx/2. The formula for area is: .. math:: A = 3 dx^2 / 2 \sqrt{3} \approx 0.866 dx^2 """ self._cell_areas = 0.8660254*self._dx**2 + numpy.zeros(self.number_of_cells) return self._cell_areas @staticmethod def make_hex_points_horizontal_hex(num_rows, base_num_cols, dxh): """ Creates and returns a set of (x,y) points in a staggered grid in which the points represent the centers of regular hexagonal cells, and the points could be connected to form equilateral triangles. The overall shape of the lattice is hexagonal, and one of the 3 axes is horizontal. Inputs: num_rows = number of rows in lattice base_num_cols = number of columns in the bottom and top rows (middle rows have more) dxh = horizontal and diagonal spacing between points Return: 2D numpy array containing point (x,y) coordinates, and total number of points. Examples -------- >>> from landlab import HexModelGrid >>> [p, npt] = HexModelGrid.make_hex_points_horizontal_hex(3, 2, 1.0) >>> npt 7 >>> p[1,:] array([ 1., 0.]) >>> p[:3,0] array([ 0. , 1. , -0.5]) """ dxv = dxh * numpy.sqrt(3.) / 2. half_dxh = dxh / 2. if numpy.mod(num_rows, 2) == 0: # even number of rows npts = num_rows * base_num_cols + (num_rows * num_rows) // 4 else: # odd number of rows npts = num_rows * base_num_cols + ((num_rows - 1) // 2) * ((num_rows - 1) // 2) pts = numpy.zeros((npts, 2)) middle_row = num_rows // 2 extra_cols = 0 xshift = 0. i = 0 for r in range(num_rows): for c in range(base_num_cols + extra_cols): pts[i,0] = c * dxh + xshift pts[i,1] = r * dxv i += 1 if r < middle_row: extra_cols += 1 else: extra_cols -= 1 xshift = - half_dxh * extra_cols return pts, npts @staticmethod def make_hex_points_horizontal_rect(num_rows, num_cols, dxh): """ Creates and returns a set of (x,y) points in a staggered grid in which the points represent the centers of regular hexagonal cells, and the points could be connected to form equilateral triangles. The overall shape of the lattice is rectangular, and one of the 3 axes is horizontal. Inputs: num_rows = number of rows in lattice num_cols = number of columns in lattice dxh = horizontal and diagonal spacing between points Return: 2D numpy array containing point (x,y) coordinates, and total number of points. Examples -------- >>> from landlab import HexModelGrid >>> [p, npt] = HexModelGrid.make_hex_points_horizontal_rect(3, 3, 1.0) >>> npt 9 >>> p[1,:] array([ 1., 0.]) >>> p[:3,0] array([ 0., 1., 2.]) """ dxv = dxh * numpy.sqrt(3.) / 2. half_dxh = dxh / 2. npts = num_rows * num_cols pts = numpy.zeros((npts, 2)) xshift = 0. i = 0 for r in range(num_rows): for c in range(num_cols): xshift = half_dxh * (r%2) pts[i,0] = c * dxh + xshift pts[i,1] = r * dxv i += 1 return pts, npts @staticmethod def make_hex_points_vertical_hex(base_num_rows, num_cols, dxv): """ Creates and returns a set of (x,y) points in a staggered grid in which the points represent the centers of regular hexagonal cells, and the points could be connected to form equilateral triangles. The overall shape of the lattice is hexagonal. Inputs: base_num_rows = number of columns in the left and right columns (middle columns have more) num_cols = number of columns in lattice dxv = vertical and diagonal spacing between points Return: 2D numpy array containing point (x,y) coordinates, and total number of points. Examples -------- >>> from landlab import HexModelGrid >>> [p, npt] = HexModelGrid.make_hex_points_vertical_hex(2, 3, 1.0) >>> npt 7 >>> p[1,:] array([ 0., 1.]) >>> p[:3,1] array([ 0. , 1. , -0.5]) """ dxh = dxv * numpy.sqrt(3.) / 2. half_dxv = dxv / 2. if numpy.mod(num_cols, 2) == 0: # even number of columns npts = base_num_rows * num_cols + (num_cols * num_cols) // 4 else: # odd number of columns npts = base_num_rows * num_cols + ((num_cols - 1) // 2) * ((num_cols - 1) // 2) pts = numpy.zeros((npts, 2)) middle_col = num_cols // 2 extra_rows = 0 yshift = 0. i = 0 for c in range(num_cols): for r in range(base_num_rows + extra_rows): pts[i,1] = r * dxv + yshift pts[i,0] = c * dxh i += 1 if c < middle_col: extra_rows += 1 else: extra_rows -= 1 yshift = - half_dxv * extra_rows return pts, npts @staticmethod def make_hex_points_vertical_rect(num_rows, num_cols, dxv): """ Creates and returns a set of (x,y) points in a staggered grid in which the points represent the centers of regular hexagonal cells, and the points could be connected to form equilateral triangles. The overall shape of the lattice is rectangular. Inputs: num_rows = number of columns in lattice num_cols = number of columns in lattice dxv = vertical and diagonal spacing between points Return: 2D numpy array containing point (x,y) coordinates, and total number of points. Examples -------- >>> from landlab import HexModelGrid >>> [p, npt] = HexModelGrid.make_hex_points_vertical_rect(3, 3, 1.0) >>> npt 9 >>> p[1,:] array([ 0., 1.]) >>> p[:3,1] array([ 0., 1., 2.]) """ dxh = dxv * numpy.sqrt(3.) / 2. half_dxv = dxv / 2. npts = num_rows * num_cols pts = numpy.zeros((npts, 2)) yshift = 0. i = 0 for c in range(num_cols): for r in range(num_rows): yshift = half_dxv * (c%2) pts[i,1] = r * dxv + yshift pts[i,0] = c * dxh i += 1 return pts, npts @property def number_of_node_columns(self): """Number of node columns in a rectangular-shaped and/or vertically oriented hex grid. Returns the number of columns, including boundaries. Notes ----- Will generate an error if called with a hex-shaped, horizontally aligned grid. Examples -------- >>> from landlab import HexModelGrid >>> grid = HexModelGrid(5, 5, shape='rect') >>> grid.number_of_node_columns 5 """ return self._ncols @property def number_of_node_rows(self): """Number of node rows in a rectangular-shaped and/or horizontally oriented hex grid. Returns the number of rows, including boundaries. Notes ----- Will generate an error if called with a hex-shaped, vertically aligned grid. Examples -------- >>> from landlab import HexModelGrid >>> grid = HexModelGrid(5, 5, shape='rect') >>> grid.number_of_node_rows 5 """ return self._nrows def configure_hexplot(self, data, data_label=None, color_map=None): """ Sets up necessary information for making plots of the hexagonal grid colored by a given data element. Parameters ---------- data : str OR node array (1d numpy array with number_of_nodes entries) Data field to be colored data_label : str, optional Label for colorbar color_map : matplotlib colormap object, None Color map to apply (defaults to "jet") Returns ------- (none) Notes ----- Creates and stores a PatchCollection representing the hexagons. Also stores a handle to the current plotting axis. Both of these are then used by hexplot(). """ from numpy import array, sqrt, zeros import matplotlib from matplotlib.patches import Polygon from matplotlib.collections import PatchCollection #import matplotlib.pyplot as plt # color if color_map is None: color_map = matplotlib.cm.jet # geometry apothem = self._dx/2.0 radius = 2.0*apothem / sqrt(3.0) # distance from node to each hexagon cell vertex # offsets from node x,y position offsets = zeros((6,2)) poly_verts = zeros((6,2)) # Figure out whether the orientation is horizontal or vertical if self.node_y[0]==self.node_y[1]: # horizontal offsets[:,0] = array([0., apothem, apothem, 0., -apothem, -apothem]) offsets[:,1] = array([radius, radius/2.0, -radius/2.0, -radius, -radius/2.0, radius/2.0]) else: # vertical offsets[:,0] = array([radius/2.0, radius, radius/2.0, -radius/2.0, -radius, -radius/2.0]) offsets[:,1] = array([apothem, 0., -apothem, -apothem, 0., apothem]) patches = [] for i in range(self.number_of_nodes): poly_verts[:,0] = self.node_x[i]+offsets[:,0] poly_verts[:,1] = self.node_y[i]+offsets[:,1] p = Polygon(poly_verts, True) patches.append(p) self._hexplot_pc = PatchCollection(patches, cmap=color_map, edgecolor='none', linewidth=0.0) self._hexplot_configured=True def hexplot(self, data, data_label=None, color_map=None): """ Creates a plot of the grid and one node-data field, showing hexagonal cells colored by values in the field. Parameters ---------- data : str OR node array (1d numpy array with number_of_nodes entries) Data field to be colored data_label : str, optional Label for colorbar Returns ------- (none) """ from numpy import array, amin, amax import matplotlib.pyplot as plt try: self._hexplot_configured is True except: self.configure_hexplot(data, data_label, color_map) # Handle *data*: if it's a numpy array, then we consider it the # data to be plotted. If it's a string, we consider it the name of the # node-field to plot, and we fetch it. if type(data) is str: data_label = data data = self.at_node[data] ax = plt.gca() self._hexplot_pc.set_array(array(data)) ax.add_collection(self._hexplot_pc) plt.xlim([amin(self.node_x)-self._dx, amax(self.node_x)+self._dx]) plt.ylim([amin(self.node_y)-self._dx, amax(self.node_y)+self._dx]) #cb = plt.colorbar(self._hexplot_pc) #if data_label is not None: # cb.set_label(data_label) #plt.show() return ax def from_dict(param_dict): """ Create a HexModelGrid from the dictionary-like object, *param_dict*. Required keys of the dictionary are NUM_ROWS, NUM_COLS. Raises a KeyError if either of these are missing. If GRID_SPACING is given, use it as the HexModelGrid *dx* parameter, otherwise default to unit spacing. """ # Read and create a basic HexModelGrid try: n_rows = int(param_dict['NUM_ROWS']) n_cols = int(param_dict['NUM_COLS']) dx = float(param_dict.get('GRID_SPACING', 1.)) except KeyError as e: raise except ValueError as e: raise else: hg = HexModelGrid(n_rows, n_cols, dx) return hg
# coding=utf-8 # Copyright 2022 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Runner for neural clustering process experiments. """ import itertools import os import sys import timeit from . import em from . import gmm from . import ncp as ncp_models from . import plotting from absl import app from absl import flags import jax from jax import jit from jax import vmap import jax.experimental.optimizers import jax.numpy as np import matplotlib.pyplot as plt MODE_SHAPE_GAUSSIAN = "gaussian" MODE_SHAPE_BANANA = "banana" MODE_SHAPES = [MODE_SHAPE_GAUSSIAN, MODE_SHAPE_BANANA] flags.DEFINE_integer("data_dim", 2, "The dimension of the points to cluster.") flags.DEFINE_integer("num_data_points", 25, "The number of points to sample per MM instance.") flags.DEFINE_integer("num_modes", 2, "The true number of modes in the data.") flags.DEFINE_enum("mode_shape", MODE_SHAPE_GAUSSIAN, MODE_SHAPES, "The shape of modes.") flags.DEFINE_float("mu_prior_mean", 0, "The mean of the prior distribution over mixture means.") flags.DEFINE_float("mu_prior_scale", 1., "The mean of the prior distribution over mixture means.") flags.DEFINE_float("mode_scale", 0.2, "The true scale of each mixture mode.") flags.DEFINE_integer("h_dim", 16, "The dimension of NCP's h representation.") flags.DEFINE_integer("u_dim", 16, "The dimension of NCP's u representation.") flags.DEFINE_integer("g_dim", 16, "The dimension of NCP's g representation.") flags.DEFINE_integer("hidden_layer_dim", 256, "The number of features for the hidden layers in NCP.") flags.DEFINE_integer("num_hidden_layers", 3, "The number of hidden layers in NCP.") flags.DEFINE_integer("batch_size", 64, "The batch size.") flags.DEFINE_integer("eval_batch_size", 128, "The batch size to use for computing average accuracies.") flags.DEFINE_list("eval_num_data_points", [25, 50, 100], "A list of numbers of data points to use for computing " "accuracies.") flags.DEFINE_integer("num_steps", int(1e6), "The number of steps to train for.") flags.DEFINE_float("lr", 1e-3, "The learning rate for ADAM.") flags.DEFINE_integer("summarize_every", 100, "Number of steps between summaries.") flags.DEFINE_string("logdir", "/tmp/ncp", "The directory to put summaries and checkpoints.") FLAGS = flags.FLAGS def train_ncp(data_dim=2, num_data_points=25, num_modes=2, mode_shape=MODE_SHAPE_GAUSSIAN, mu_prior_mean=0., mu_prior_scale=1., mode_scale=0.2, h_dim=16, u_dim=16, g_dim=16, hidden_layer_dim=256, num_hidden_layers=3, batch_size=16, eval_batch_size=128, eval_num_data_points=[25], lr=1e-4, num_steps=100000, summarize_every=100): key = jax.random.PRNGKey(0) key, subkey = jax.random.split(key) ncp = ncp_models.NCP(h_dim, u_dim, g_dim, data_dim, hidden_layer_dim, num_hidden_layers, subkey) def sample_batch(key, num_data_points, batch_size): keys = jax.random.split(key, num=(1 + batch_size)) mus = jax.random.normal( keys[0], shape=[batch_size, num_modes, data_dim] ) * mu_prior_scale + mu_prior_mean if mode_shape == MODE_SHAPE_GAUSSIAN: xs, cs = vmap( gmm.sample, in_axes=(0, None, None, None, 0))( mus, mode_scale, np.ones([num_modes]) / num_modes, num_data_points, keys[1:]) else: xs, cs = vmap( gmm.sample_bananas, in_axes=(0, None, None, None, None, 0))( mus, mode_scale, 1., np.ones([num_modes]) / num_modes, num_data_points, keys[1:]) return xs, cs sample_batch = jit(sample_batch, static_argnums=(1, 2)) def kl(params, key): xs, cs = sample_batch(key, num_data_points, batch_size) lls = vmap( lambda x, c, p: ncp._ll(x, c, p), in_axes=(0, 0, None))(xs, cs, params) return -np.mean(lls) kl_and_grad = jit(jax.value_and_grad(kl, argnums=0)) def ncp_accuracy(xs, cs, params, key): ncp_predicted_cs = ncp._sample(xs, params, key) permutations = np.array(list(itertools.permutations(range(7)))) ncp_permuted_cs = jax.lax.map(lambda p: p[ncp_predicted_cs], permutations) ncp_acc = np.max(jax.lax.map( lambda pcs: np.mean(cs == pcs), ncp_permuted_cs)) return ncp_acc def avg_ncp_accuracy(num_data_points, params, key): keys = jax.random.split(key, num=eval_batch_size+1) xs, cs = sample_batch(keys[0], num_data_points, eval_batch_size) ncp_acc = vmap(ncp_accuracy, in_axes=(0, 0, None, 0))( xs, cs, params, keys[1:]) return np.mean(ncp_acc) avg_ncp_accuracy = jit(avg_ncp_accuracy, static_argnums=0) def em_accuracy(xs, cs, key): _, _, em_log_membership_weights = em.em(xs, num_modes, 25, key) em_predicted_cs = np.argmax(em_log_membership_weights, axis=1) permutations = np.array(list(itertools.permutations(range(num_modes)))) permuted_cs = jax.lax.map(lambda p: p[cs], permutations) em_acc = np.max(jax.lax.map( lambda pcs: np.mean(pcs == em_predicted_cs), permuted_cs)) return em_acc def avg_em_accuracy(batch_size, key): keys = jax.random.split(key, num=batch_size+1) xs, cs = sample_batch(keys[0], num_data_points, batch_size) em_acc = vmap(em_accuracy)(xs, cs, keys[1:]) return np.mean(em_acc) avg_em_accuracy = jit(avg_em_accuracy, static_argnums=0) def plot(num_data_points, writer, step, params, key): keys = jax.random.split(key, num=4) mus = jax.random.normal( keys[0], shape=[num_modes, data_dim]) * mu_prior_scale + mu_prior_mean if mode_shape == MODE_SHAPE_GAUSSIAN: xs, _ = gmm.sample(mus, mode_scale, np.ones([num_modes]) / num_modes, num_data_points, keys[1]) else: xs, _ = gmm.sample_bananas(mus, mode_scale, 1., np.ones([num_modes]) / num_modes, num_data_points, keys[1]) ncp_predicted_cs = ncp._sample(xs, params, keys[2]) num_predicted_modes = np.max(ncp_predicted_cs) + 1 writer.scalar( "num_predicted_modes_%d_points" % num_data_points, num_predicted_modes, step=step) _, _, em_log_membership_weights = em.em(xs, num_modes, 25, keys[3]) em_predicted_cs = np.argmax(em_log_membership_weights, axis=1) fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(8, 4)) for i in range(num_predicted_modes): ncp_mode_i_xs = xs[ncp_predicted_cs == i] ax[0].plot(ncp_mode_i_xs[:, 0], ncp_mode_i_xs[:, 1], "o") for i in range(num_modes): em_mode_i_xs = xs[em_predicted_cs == i] ax[1].plot(em_mode_i_xs[:, 0], em_mode_i_xs[:, 1], "o") ax[1].plot(mus[i, 0], mus[i, 1], "r*") ax[0].plot(mus[i, 0], mus[i, 1], "r*") ax[0].set_title("NCP") ax[1].set_title("EM") plot_img = plotting.plot_to_numpy_image(plt) writer.image("plot_%d_points" % num_data_points, plot_img, step=step) plt.close(fig) def summarize(writer, step, params, key): if step == 0: key, subkey = jax.random.split(key) em_acc = avg_em_accuracy(10*eval_batch_size, subkey) writer.scalar("em_accuracy", em_acc, step=step) print("EM Accuracy: %0.2f" % (em_acc * 100)) for num_pts in eval_num_data_points: key, subkey = jax.random.split(key) ncp_acc = avg_ncp_accuracy(num_pts, params, subkey) writer.scalar("ncp_accuracy_at_%d_points" % num_pts, ncp_acc, step=step) print("NCP Accuracy @ %d: %0.2f" % (num_pts, ncp_acc * 100)) if data_dim == 2: plot(eval_num_data_points[-1], writer, step, params, key) def train_step(t, opt_state, key): params = opt_get_params(opt_state) kl_val, kl_grad = kl_and_grad(params, key) opt_state = opt_update(t, kl_grad, opt_state) return kl_val, opt_state def train_many_steps(t, num_steps, opt_state, key): def step(i, state): key, opt_state, _ = state key, subkey = jax.random.split(key) kl, new_opt_state = train_step(t + i, opt_state, subkey) return (key, new_opt_state, kl) _, new_opt_state, kl = jax.lax.fori_loop( 0, num_steps, step, (key, opt_state, 0.)) return new_opt_state, kl train_many_steps = jit(train_many_steps, static_argnums=1) sw = None opt_init, opt_update, opt_get_params = jax.experimental.optimizers.adam(lr) opt_state = opt_init(ncp.params) start = timeit.default_timer() t = 0 while t < num_steps: key, subkey1, subkey2 = jax.random.split(key, num=3) opt_state, kl_val = train_many_steps(t, summarize_every, opt_state, subkey1) t += summarize_every print("Step %d KL: %0.4f" % (t, kl_val)) sw.scalar("kl", kl_val, step=t) summarize(sw, t, opt_get_params(opt_state), subkey2) end = timeit.default_timer() steps_per_sec = summarize_every / (end - start) print("Steps/sec: %0.2f" % steps_per_sec) sw.scalar("steps_per_sec", steps_per_sec, step=t) start = end sw.flush() sys.stdout.flush() sw.close() def make_logdir(config): basedir = config.logdir exp_dir = "num_pts_%d_data_dim_%d_num_modes_%d_mode_scale_%0.2f" % ( FLAGS.num_data_points, FLAGS.data_dim, FLAGS.num_modes, FLAGS.mode_scale) return os.path.join(basedir, exp_dir) def main(unused_argv): train_ncp( data_dim=FLAGS.data_dim, num_data_points=FLAGS.num_data_points, num_modes=FLAGS.num_modes, mode_shape=FLAGS.mode_shape, mu_prior_mean=FLAGS.mu_prior_mean, mu_prior_scale=FLAGS.mu_prior_scale, mode_scale=FLAGS.mode_scale, h_dim=FLAGS.h_dim, u_dim=FLAGS.u_dim, g_dim=FLAGS.g_dim, hidden_layer_dim=FLAGS.hidden_layer_dim, num_hidden_layers=FLAGS.num_hidden_layers, batch_size=FLAGS.batch_size, eval_batch_size=FLAGS.eval_batch_size, eval_num_data_points=[int(x) for x in FLAGS.eval_num_data_points], lr=FLAGS.lr, num_steps=FLAGS.num_steps, summarize_every=FLAGS.summarize_every) if __name__ == "__main__": app.run(main)
# Copyright (c) 2014 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # # Helpful routines for regression testing # # Add python-bitcoinrpc to module search path: import os import sys from decimal import Decimal, ROUND_DOWN import json import random import shutil import subprocess import time import re from authproxy import AuthServiceProxy, JSONRPCException from util import * def p2p_port(n): return 11000 + n + os.getpid()%999 def rpc_port(n): return 12000 + n + os.getpid()%999 def check_json_precision(): """Make sure json library being used does not lose precision converting BTC values""" n = Decimal("20000000.00000003") satoshis = int(json.loads(json.dumps(float(n)))*1.0e8) if satoshis != 2000000000000003: raise RuntimeError("JSON encode/decode loses precision") def sync_blocks(rpc_connections, wait=1): """ Wait until everybody has the same block count """ while True: counts = [ x.getblockcount() for x in rpc_connections ] if counts == [ counts[0] ]*len(counts): break time.sleep(wait) def sync_mempools(rpc_connections, wait=1): """ Wait until everybody has the same transactions in their memory pools """ while True: pool = set(rpc_connections[0].getrawmempool()) num_match = 1 for i in range(1, len(rpc_connections)): if set(rpc_connections[i].getrawmempool()) == pool: num_match = num_match+1 if num_match == len(rpc_connections): break time.sleep(wait) bitcoind_processes = {} def initialize_datadir(dirname, n): datadir = os.path.join(dirname, "node"+str(n)) if not os.path.isdir(datadir): os.makedirs(datadir) with open(os.path.join(datadir, "testcoin.conf"), 'w') as f: f.write("regtest=1\n"); f.write("rpcuser=rt\n"); f.write("rpcpassword=rt\n"); f.write("port="+str(p2p_port(n))+"\n"); f.write("rpcport="+str(rpc_port(n))+"\n"); return datadir def initialize_chain(test_dir): """ Create (or copy from cache) a 200-block-long chain and 4 wallets. bitcoind and testcoin-cli must be in search path. """ if (not os.path.isdir(os.path.join("cache","node0")) or not os.path.isdir(os.path.join("cache","node1")) or not os.path.isdir(os.path.join("cache","node2")) or not os.path.isdir(os.path.join("cache","node3"))): #find and delete old cache directories if any exist for i in range(4): if os.path.isdir(os.path.join("cache","node"+str(i))): shutil.rmtree(os.path.join("cache","node"+str(i))) devnull = open(os.devnull, "w") # Create cache directories, run bitcoinds: for i in range(4): datadir=initialize_datadir("cache", i) args = [ os.getenv("BITCOIND", "bitcoind"), "-keypool=1", "-datadir="+datadir, "-discover=0" ] if i > 0: args.append("-connect=127.0.0.1:"+str(p2p_port(0))) bitcoind_processes[i] = subprocess.Popen(args) if os.getenv("PYTHON_DEBUG", ""): print "initialize_chain: bitcoind started, calling testcoin-cli -rpcwait getblockcount" subprocess.check_call([ os.getenv("BITCOINCLI", "testcoin-cli"), "-datadir="+datadir, "-rpcwait", "getblockcount"], stdout=devnull) if os.getenv("PYTHON_DEBUG", ""): print "initialize_chain: testcoin-cli -rpcwait getblockcount completed" devnull.close() rpcs = [] for i in range(4): try: url = "http://rt:rt@127.0.0.1:%d"%(rpc_port(i),) rpcs.append(AuthServiceProxy(url)) except: sys.stderr.write("Error connecting to "+url+"\n") sys.exit(1) # Create a 200-block-long chain; each of the 4 nodes # gets 25 mature blocks and 25 immature. # blocks are created with timestamps 10 minutes apart, starting # at 1 Jan 2014 block_time = 1388534400 for i in range(2): for peer in range(4): for j in range(25): set_node_times(rpcs, block_time) rpcs[peer].generate(1) block_time += 10*60 # Must sync before next peer starts generating blocks sync_blocks(rpcs) # Shut them down, and clean up cache directories: stop_nodes(rpcs) wait_bitcoinds() for i in range(4): os.remove(log_filename("cache", i, "debug.log")) os.remove(log_filename("cache", i, "db.log")) os.remove(log_filename("cache", i, "peers.dat")) os.remove(log_filename("cache", i, "fee_estimates.dat")) for i in range(4): from_dir = os.path.join("cache", "node"+str(i)) to_dir = os.path.join(test_dir, "node"+str(i)) shutil.copytree(from_dir, to_dir) initialize_datadir(test_dir, i) # Overwrite port/rpcport in testcoin.conf def initialize_chain_clean(test_dir, num_nodes): """ Create an empty blockchain and num_nodes wallets. Useful if a test case wants complete control over initialization. """ for i in range(num_nodes): datadir=initialize_datadir(test_dir, i) def _rpchost_to_args(rpchost): '''Convert optional IP:port spec to rpcconnect/rpcport args''' if rpchost is None: return [] match = re.match('(\[[0-9a-fA-f:]+\]|[^:]+)(?::([0-9]+))?$', rpchost) if not match: raise ValueError('Invalid RPC host spec ' + rpchost) rpcconnect = match.group(1) rpcport = match.group(2) if rpcconnect.startswith('['): # remove IPv6 [...] wrapping rpcconnect = rpcconnect[1:-1] rv = ['-rpcconnect=' + rpcconnect] if rpcport: rv += ['-rpcport=' + rpcport] return rv def start_node(i, dirname, extra_args=None, rpchost=None, timewait=None, binary=None): """ Start a bitcoind and return RPC connection to it """ datadir = os.path.join(dirname, "node"+str(i)) if binary is None: binary = os.getenv("BITCOIND", "bitcoind") args = [ binary, "-datadir="+datadir, "-keypool=1", "-discover=0", "-rest" ] if extra_args is not None: args.extend(extra_args) bitcoind_processes[i] = subprocess.Popen(args) devnull = open(os.devnull, "w") if os.getenv("PYTHON_DEBUG", ""): print "start_node: bitcoind started, calling testcoin-cli -rpcwait getblockcount" subprocess.check_call([ os.getenv("BITCOINCLI", "testcoin-cli"), "-datadir="+datadir] + _rpchost_to_args(rpchost) + ["-rpcwait", "getblockcount"], stdout=devnull) if os.getenv("PYTHON_DEBUG", ""): print "start_node: calling testcoin-cli -rpcwait getblockcount returned" devnull.close() url = "http://rt:rt@%s:%d" % (rpchost or '127.0.0.1', rpc_port(i)) if timewait is not None: proxy = AuthServiceProxy(url, timeout=timewait) else: proxy = AuthServiceProxy(url) proxy.url = url # store URL on proxy for info return proxy def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None, binary=None): """ Start multiple bitcoinds, return RPC connections to them """ if extra_args is None: extra_args = [ None for i in range(num_nodes) ] if binary is None: binary = [ None for i in range(num_nodes) ] return [ start_node(i, dirname, extra_args[i], rpchost, binary=binary[i]) for i in range(num_nodes) ] def log_filename(dirname, n_node, logname): return os.path.join(dirname, "node"+str(n_node), "regtest", logname) def stop_node(node, i): node.stop() bitcoind_processes[i].wait() del bitcoind_processes[i] def stop_nodes(nodes): for node in nodes: node.stop() del nodes[:] # Emptying array closes connections as a side effect def set_node_times(nodes, t): for node in nodes: node.setmocktime(t) def wait_bitcoinds(): # Wait for all bitcoinds to cleanly exit for bitcoind in bitcoind_processes.values(): bitcoind.wait() bitcoind_processes.clear() def connect_nodes(from_connection, node_num): ip_port = "127.0.0.1:"+str(p2p_port(node_num)) from_connection.addnode(ip_port, "onetry") # poll until version handshake complete to avoid race conditions # with transaction relaying while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()): time.sleep(0.1) def connect_nodes_bi(nodes, a, b): connect_nodes(nodes[a], b) connect_nodes(nodes[b], a) def find_output(node, txid, amount): """ Return index to output of txid with value amount Raises exception if there is none. """ txdata = node.getrawtransaction(txid, 1) for i in range(len(txdata["vout"])): if txdata["vout"][i]["value"] == amount: return i raise RuntimeError("find_output txid %s : %s not found"%(txid,str(amount))) def gather_inputs(from_node, amount_needed, confirmations_required=1): """ Return a random set of unspent txouts that are enough to pay amount_needed """ assert(confirmations_required >=0) utxo = from_node.listunspent(confirmations_required) random.shuffle(utxo) inputs = [] total_in = Decimal("0.00000000") while total_in < amount_needed and len(utxo) > 0: t = utxo.pop() total_in += t["amount"] inputs.append({ "txid" : t["txid"], "vout" : t["vout"], "address" : t["address"] } ) if total_in < amount_needed: raise RuntimeError("Insufficient funds: need %d, have %d"%(amount_needed, total_in)) return (total_in, inputs) def make_change(from_node, amount_in, amount_out, fee): """ Create change output(s), return them """ outputs = {} amount = amount_out+fee change = amount_in - amount if change > amount*2: # Create an extra change output to break up big inputs change_address = from_node.getnewaddress() # Split change in two, being careful of rounding: outputs[change_address] = Decimal(change/2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN) change = amount_in - amount - outputs[change_address] if change > 0: outputs[from_node.getnewaddress()] = change return outputs def send_zeropri_transaction(from_node, to_node, amount, fee): """ Create&broadcast a zero-priority transaction. Returns (txid, hex-encoded-txdata) Ensures transaction is zero-priority by first creating a send-to-self, then using its output """ # Create a send-to-self with confirmed inputs: self_address = from_node.getnewaddress() (total_in, inputs) = gather_inputs(from_node, amount+fee*2) outputs = make_change(from_node, total_in, amount+fee, fee) outputs[self_address] = float(amount+fee) self_rawtx = from_node.createrawtransaction(inputs, outputs) self_signresult = from_node.signrawtransaction(self_rawtx) self_txid = from_node.sendrawtransaction(self_signresult["hex"], True) vout = find_output(from_node, self_txid, amount+fee) # Now immediately spend the output to create a 1-input, 1-output # zero-priority transaction: inputs = [ { "txid" : self_txid, "vout" : vout } ] outputs = { to_node.getnewaddress() : float(amount) } rawtx = from_node.createrawtransaction(inputs, outputs) signresult = from_node.signrawtransaction(rawtx) txid = from_node.sendrawtransaction(signresult["hex"], True) return (txid, signresult["hex"]) def random_zeropri_transaction(nodes, amount, min_fee, fee_increment, fee_variants): """ Create a random zero-priority transaction. Returns (txid, hex-encoded-transaction-data, fee) """ from_node = random.choice(nodes) to_node = random.choice(nodes) fee = min_fee + fee_increment*random.randint(0,fee_variants) (txid, txhex) = send_zeropri_transaction(from_node, to_node, amount, fee) return (txid, txhex, fee) def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants): """ Create a random transaction. Returns (txid, hex-encoded-transaction-data, fee) """ from_node = random.choice(nodes) to_node = random.choice(nodes) fee = min_fee + fee_increment*random.randint(0,fee_variants) (total_in, inputs) = gather_inputs(from_node, amount+fee) outputs = make_change(from_node, total_in, amount, fee) outputs[to_node.getnewaddress()] = float(amount) rawtx = from_node.createrawtransaction(inputs, outputs) signresult = from_node.signrawtransaction(rawtx) txid = from_node.sendrawtransaction(signresult["hex"], True) return (txid, signresult["hex"], fee) def assert_equal(thing1, thing2): if thing1 != thing2: raise AssertionError("%s != %s"%(str(thing1),str(thing2))) def assert_greater_than(thing1, thing2): if thing1 <= thing2: raise AssertionError("%s <= %s"%(str(thing1),str(thing2))) def assert_raises(exc, fun, *args, **kwds): try: fun(*args, **kwds) except exc: pass except Exception as e: raise AssertionError("Unexpected exception raised: "+type(e).__name__) else: raise AssertionError("No exception raised")
# Copyright 2008-2009 WebDriver committers # Copyright 2008-2009 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import string import urllib2 import urlparse from command import Command import utils LOGGER = logging.getLogger(__name__) class Request(urllib2.Request): """Extends the urllib2.Request to support all HTTP request types.""" def __init__(self, url, data=None, method=None): """Initialise a new HTTP request. Args: url - String for the URL to send the request to. data - Data to send with the request. """ if method is None: method = data is not None and 'POST' or 'GET' elif method != 'POST' and method != 'PUT': data = None self._method = method urllib2.Request.__init__(self, url, data=data) def get_method(self): """Returns the HTTP method used by this request.""" return self._method class Response(object): """Represents an HTTP response. Attributes: fp - File object for the response body. code - The HTTP status code returned by the server. headers - A dictionary of headers returned by the server. url - URL of the retrieved resource represented by this Response. """ def __init__(self, fp, code, headers, url): """Initialise a new Response. Args: fp - The response body file object. code - The HTTP status code returned by the server. headers - A dictionary of headers returned by the server. url - URL of the retrieved resource represented by this Response. """ self.fp = fp self.read = fp.read self.code = code self.headers = headers self.url = url def close(self): """Close the response body file object.""" self.read = None self.fp = None def info(self): """Returns the response headers.""" return self.headers def geturl(self): """Returns the URL for the resource returned in this response.""" return self.url class HttpErrorHandler(urllib2.HTTPDefaultErrorHandler): """A custom HTTP error handler. Used to return Response objects instead of raising an HTTPError exception. """ def http_error_default(self, req, fp, code, msg, headers): """Default HTTP error handler. Args: req - The original Request object. fp - The response body file object. code - The HTTP status code returned by the server. msg - The HTTP status message returned by the server. headers - The response headers. Returns: A new Response object. """ return Response(fp, code, headers, req.get_full_url()) class RemoteConnection(object): """A connection with the Remote WebDriver server. Communicates with the server using the WebDriver wire protocol: http://code.google.com/p/selenium/wiki/JsonWireProtocol """ def __init__(self, remote_server_addr): self._url = remote_server_addr self._commands = { Command.NEW_SESSION: ('POST', '/session'), Command.QUIT: ('DELETE', '/session/$sessionId'), Command.GET_CURRENT_WINDOW_HANDLE: ('GET', '/session/$sessionId/window_handle'), Command.GET_WINDOW_HANDLES: ('GET', '/session/$sessionId/window_handles'), Command.GET: ('POST', '/session/$sessionId/url'), Command.GO_FORWARD: ('POST', '/session/$sessionId/forward'), Command.GO_BACK: ('POST', '/session/$sessionId/back'), Command.REFRESH: ('POST', '/session/$sessionId/refresh'), Command.EXECUTE_SCRIPT: ('POST', '/session/$sessionId/execute'), Command.GET_CURRENT_URL: ('GET', '/session/$sessionId/url'), Command.GET_TITLE: ('GET', '/session/$sessionId/title'), Command.GET_PAGE_SOURCE: ('GET', '/session/$sessionId/source'), Command.SCREENSHOT: ('GET', '/session/$sessionId/screenshot'), Command.SET_BROWSER_VISIBLE: ('POST', '/session/$sessionId/visible'), Command.IS_BROWSER_VISIBLE: ('GET', '/session/$sessionId/visible'), Command.FIND_ELEMENT: ('POST', '/session/$sessionId/element'), Command.FIND_ELEMENTS: ('POST', '/session/$sessionId/elements'), Command.GET_ACTIVE_ELEMENT: ('POST', '/session/$sessionId/element/active'), Command.FIND_CHILD_ELEMENT: ('POST', '/session/$sessionId/element/$id/element'), Command.FIND_CHILD_ELEMENTS: ('POST', '/session/$sessionId/element/$id/elements'), Command.CLICK_ELEMENT: ('POST', '/session/$sessionId/element/$id/click'), Command.CLEAR_ELEMENT: ('POST', '/session/$sessionId/element/$id/clear'), Command.SUBMIT_ELEMENT: ('POST', '/session/$sessionId/element/$id/submit'), Command.GET_ELEMENT_TEXT: ('GET', '/session/$sessionId/element/$id/text'), Command.SEND_KEYS_TO_ELEMENT: ('POST', '/session/$sessionId/element/$id/value'), Command.GET_ELEMENT_VALUE: ('GET', '/session/$sessionId/element/$id/value'), Command.GET_ELEMENT_TAG_NAME: ('GET', '/session/$sessionId/element/$id/name'), Command.IS_ELEMENT_SELECTED: ('GET', '/session/$sessionId/element/$id/selected'), Command.SET_ELEMENT_SELECTED: ('POST', '/session/$sessionId/element/$id/selected'), Command.TOGGLE_ELEMENT: ('POST', '/session/$sessionId/element/$id/toggle'), Command.IS_ELEMENT_ENABLED: ('GET', '/session/$sessionId/element/$id/enabled'), Command.IS_ELEMENT_DISPLAYED: ('GET', '/session/$sessionId/element/$id/displayed'), Command.HOVER_OVER_ELEMENT: ('POST', '/session/$sessionId/element/$id/hover'), Command.GET_ELEMENT_LOCATION: ('GET', '/session/$sessionId/element/$id/location'), Command.GET_ELEMENT_LOCATION_ONCE_SCROLLED_INTO_VIEW: ('GET', '/session/$sessionId/element/$id/location_in_view'), Command.GET_ELEMENT_SIZE: ('GET', '/session/$sessionId/element/$id/size'), Command.GET_ELEMENT_ATTRIBUTE: ('GET', '/session/$sessionId/element/$id/attribute/$name'), Command.ELEMENT_EQUALS: ('GET', '/session/$sessionId/element/$id/equals/$other'), Command.GET_ALL_COOKIES: ('GET', '/session/$sessionId/cookie'), Command.ADD_COOKIE: ('POST', '/session/$sessionId/cookie'), Command.DELETE_ALL_COOKIES: ('DELETE', '/session/$sessionId/cookie'), Command.DELETE_COOKIE: ('DELETE', '/session/$sessionId/cookie/$name'), Command.SWITCH_TO_FRAME: ('POST', '/session/$sessionId/frame'), Command.SWITCH_TO_WINDOW: ('POST', '/session/$sessionId/window'), Command.CLOSE: ('DELETE', '/session/$sessionId/window'), Command.DRAG_ELEMENT: ('POST', '/session/$sessionId/element/$id/drag'), Command.GET_SPEED: ('GET', '/session/$sessionId/speed'), Command.SET_SPEED: ('POST', '/session/$sessionId/speed'), Command.GET_ELEMENT_VALUE_OF_CSS_PROPERTY: ('GET', '/session/$sessionId/element/$id/css/$propertyName'), Command.IMPLICIT_WAIT: ('POST', '/session/$sessionId/timeouts/implicit_wait'), Command.EXECUTE_ASYNC_SCRIPT: ('POST','/session/$sessionId/execute_async'), Command.SET_SCRIPT_TIMEOUT: ('POST', '/session/$sessionId/timeouts/async_script'), Command.GET_ELEMENT_VALUE_OF_CSS_PROPERTY: ('GET', '/session/$sessionId/element/$id/css/$propertyName'), Command.DISMISS_ALERT: ('POST', '/session/$sessionId/dismiss_alert'), Command.ACCEPT_ALERT: ('POST', '/session/$sessionId/accept_alert'), Command.SET_ALERT_VALUE: ('POST', '/session/$sessionId/alert_text'), Command.GET_ALERT_TEXT: ('GET', '/session/$sessionId/alert_text')} def execute(self, command, params): """Send a command to the remote server. Any path subtitutions required for the URL mapped to the command should be included in the command parameters. Args: command - A string specifying the command to execute. params - A dictionary of named parameters to send with the command as its JSON payload. """ command_info = self._commands[command] assert command_info is not None, 'Unrecognised command %s' % command data = utils.dump_json(params) path = string.Template(command_info[1]).substitute(params) url = '%s%s' % (self._url, path) return self._request(url, method=command_info[0], data=data) def _request(self, url, data=None, method=None): """Send an HTTP request to the remote server. Args: method - A string for the HTTP method to send the request with. url - The URL to send the request to. body - The message body to send. Returns: A dictionary with the server's parsed JSON response. """ LOGGER.debug('%s %s %s' % (method, url, data)) parsed_url = urlparse.urlparse(url) auth = None password_manager = None if parsed_url.username: netloc = parsed_url.hostname if parsed_url.port: netloc += ":%s" % parsed_url.port cleaned_url = urlparse.urlunparse((parsed_url.scheme, netloc, parsed_url.path, parsed_url.params, parsed_url.query, parsed_url.fragment)) password_manager = urllib2.HTTPPasswordMgrWithDefaultRealm() password_manager.add_password(None, "%s://%s" % (parsed_url.scheme, netloc), parsed_url.username, parsed_url.password) request = Request(cleaned_url, data=data, method=method) else: request = Request(url, data=data, method=method) request.add_header('Accept', 'application/json') if password_manager: opener = urllib2.build_opener(urllib2.HTTPRedirectHandler(), HttpErrorHandler(), urllib2.HTTPBasicAuthHandler(password_manager)) else: opener = urllib2.build_opener(urllib2.HTTPRedirectHandler(), HttpErrorHandler()) response = opener.open(request) try: if response.code > 399 and response.code < 500: return {'status': response.code, 'value': response.read()} body = response.read().replace('\x00', '').strip() content_type = response.info().getheader('Content-Type') or [] if 'application/json' in content_type: data = utils.load_json(body.strip()) assert type(data) is dict, ( 'Invalid server response body: %s' % body) assert 'status' in data, ( 'Invalid server response; no status: %s' % body) # Some of the drivers incorrectly return a response # with no 'value' field when they should return null. if 'value' not in data: data['value'] = None return data elif 'image/png' in content_type: data = {'status': 0, 'value': body.strip()} return data finally: response.close()
#------------------------------------------------------------------------------- # Copyright 2011 Ryan W Sims (rwsims@gmail.com) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #------------------------------------------------------------------------------- import logging import simplejson from django import http from django.views.generic import simple from google.appengine.api import taskqueue from google.appengine.ext import db import forms from vendors import models from util import counters def _count_orphans(): q = models.Vendor.all() q.filter('category_count = ', 0) return q.count(1000) def _count_empty_cats(): q = models.VendorCategory.all() q.filter('vendor_count = ', 0) return q.count(100) def index(request): vendor_count = counters.get_count('vendors') orphan_vendors = _count_orphans() category_count = counters.get_count('categories') empty_cats = _count_empty_cats() return simple.direct_to_template(request, 'sye_admin/index.html', {'category_count': category_count, 'vendor_count': vendor_count, 'orphan_vendors': orphan_vendors, 'empty_cats': empty_cats, }) def all_valid(*forms): valid = True for form in forms: valid &= form.is_valid() return valid def _link_map(data): maps = [] for m in data: if len(m) == 0: continue d = {'title': m['title'], 'url': m['url'], 'class': models.LinkClasses[m['title']]} maps.append(d) return maps def _update_category_txn(vendor, new, old): id = vendor.key().id() marker_key = db.Key.from_path('VendorMarker', id, parent = vendor.key()) marker = db.get(marker_key) if not marker: marker = models.VendorMarker(key = marker_key) marker.sequence += 1 db.put([vendor, marker]) if len(new) > 0: taskqueue.Task(url = '/admin/tasks/update-cats', params = { 'vendor_id': id, 'categories': '|'.join(new), 'sequence': marker.sequence, 'present': True } ).add(transactional = True) if len(old) > 0: taskqueue.Task(url = '/admin/tasks/update-cats', params = { 'vendor_id': id, 'categories': '|'.join(old), 'sequence': marker.sequence, 'present': False } ).add(transactional = True) def _update_vendor_categories(vendor, new_cats): if vendor.key() is None: raise TypeError, 'Must persist vendor before updating categories.' old_cats = vendor.categories vendor.set_categories(new_cats) db.run_in_transaction(_update_category_txn, vendor, new_cats, old_cats) def _put_new_vendor(info, social_links, trackbacks, review_links, sye_reviews): vendor = models.Vendor(account_email = info['account_email'], name = info['name']) logging.info("Creating vendor " + vendor.name) # basic vendor info vendor.public_email = info['public_email'] vendor.website = info['vendor_website'] vendor.zipcode = info['zip_code'] vendor.lgbt_owned = info['owned'] vendor.experienced = info['experienced'] vendor.put() # must persist vendor first _update_vendor_categories(vendor, info['categories'].split(', ')) # premium info premium = models.PremiumVendorInfo(parent = vendor) premium.phone = info['public_phone'] premium.blog_url = info['vendor_blog'] premium.social_links = _link_map(social_links) premium.trackbacks = _link_map(trackbacks) premium.review_links = _link_map(review_links) premium.awards = info['awards'].split('\n') premium.description = info['description'] premium.promo_text = info['promo'] premium.put() vendor.premium_info = premium vendor.put() for rm in sye_reviews: if len(rm) == 0: continue review = models.SyeReview(parent = premium) review.names = rm['names'].split(", ") review.review = rm['review'] r_key = review.put() premium.sye_reviews.append(r_key) premium.put() counters.increment('vendors') return vendor.key().id() def create_new_vendor(request): if request.method == 'POST': info_form = forms.NewVendorInfoForm(request.POST) social_links = forms.LinkFormset(request.POST, request.FILES, prefix = 'social') blog_links = forms.LinkFormset(request.POST, request.FILES, prefix = 'blog') review_links = forms.LinkFormset(request.POST, request.FILES, prefix = 'review') sye_reviews = forms.SyeReviewFormset(request.POST, request.FILES, prefix = 'syereview') if all_valid(info_form, social_links, blog_links, review_links, sye_reviews): vendor_id = _put_new_vendor(info_form.cleaned_data, social_links.cleaned_data, blog_links.cleaned_data, review_links.cleaned_data, sye_reviews.cleaned_data) logging.info("Created vendor id %d"%(vendor_id,)) return http.HttpResponseRedirect("/admin/vendors/%s/wordpress"%(vendor_id,)) else: info_form = forms.NewVendorInfoForm() social_links = forms.LinkFormset(prefix = 'social') blog_links = forms.LinkFormset(prefix = 'blog') review_links = forms.LinkFormset(prefix = 'review') sye_reviews = forms.SyeReviewFormset(prefix = 'syereview') return simple.direct_to_template(request, "sye_admin/vendor-new.html", { 'info_form': info_form, 'social_link_form': social_links, 'blog_link_form': blog_links, 'review_link_form': review_links, 'sye_review_form': sye_reviews, }) def list_categories(request): query = models.VendorCategory.all() query.order('name') cats = query.fetch(100) d = {'categories': [c.name for c in cats]} json = simplejson.dumps(d) return http.HttpResponse(json, mimetype = 'application/json') def show_vendor_wordpress(request, id): vendor = models.Vendor.get_by_id(int(id)) if vendor.premium_info.sye_reviews: sye_reviews = models.SyeReview.get(vendor.premium_info.sye_reviews) else: sye_reviews = [] return simple.direct_to_template(request, "sye_admin/vendor-wordpress.html", {'vendor': vendor, 'premium': vendor.premium_info, 'sye_reviews': sye_reviews}) def _update_vendor_count(id, category, sequence, present): cat_key = db.Key.from_path('VendorCategory', category) marker_key = db.Key.from_path('VendorMarker', id, parent = cat_key) cat, marker = db.get([cat_key, marker_key]) if not cat: counters.increment('categories') cat = models.VendorCategory(key = cat_key, name = category) if not marker: marker = models.VendorMarker(key = marker_key) if marker.sequence >= sequence: raise db.Rollback('Ignore out-of-order') old, marker.present = marker.present, present marker.sequence = sequence marker.put() if old: cat.vendor_count -= 1 if present: cat.vendor_count += 1 db.put(cat) def update_category_task(request): cats = request.POST['categories'].split('|') logging.info("updating categories: %s"%(cats,)) id = int(request.POST['vendor_id']) sequence = int(request.POST['sequence']) present = bool(request.POST['present']) for cat in cats: # this throws an error if we try and run it in a transaction, although that's on the # slides for materialized views _update_vendor_count(id, cat, sequence, present) return http.HttpResponse("OK")
# Copyright 2021 Proyectos y Sistemas de Mantenimiento SL (eProsima) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import glob import os import queue import subprocess import sys import threading import time script_dir = os.path.dirname(os.path.realpath(__file__)) # Test executable process_command = os.environ.get( 'CLIENT_SERVER_DYNAMIC_DISCOVERY_BIN') if not process_command: process_files = glob.glob( os.path.join( script_dir, '**/DDSParticipantDiscovery*'), recursive=True) pf = iter(process_files) process_command = next(pf, None) while process_command and \ (not os.path.isfile(process_command) or not os.access(process_command, os.X_OK)): process_command = next(pf, None) assert(process_command) # Thread that read process output and push it into a queue def output_reader(proc, outq): for line in iter(proc.stdout.readline, b''): outq.put(line.decode('utf-8')) def first_step(outq): first_step_fulfilled = False server_1_discover_client = False count = 0 initial_time = time.time() while not first_step_fulfilled: global stop_threads if stop_threads: break try: line = outq.get(block=False).rstrip() print(line) sys.stdout.flush() assert '44.53.00.5f.45.50.52.4f.53.49.4d.41' in line assert 'discovered participant' in line count = count + 1 if 'discovered participant 44.53.00.5f.45.50.52.4f.53.49.4d.41|0.0.1.c1: 1' in line: print('CLIENT OVERRIDE discovered SERVER 1') server_1_discover_client = True except queue.Empty: # Ensure that 2 s has passed so the file watch can detect that the file has changed if server_1_discover_client and count >= 2 and (time.time() - initial_time) > 2: if count == 2: first_step_fulfilled = True else: print('ERROR: More discoveries than expected') stop_threads = True sys.exit(1) sys.stdout.flush() except AssertionError: print('ASSERTION ERROR: ' + line) stop_threads = True sys.exit(1) time.sleep(0.1) def second_step(outq): second_step_fulfilled = False server_2_discover_client = False client_2_warning = False count = 0 initial_time = time.time() while not second_step_fulfilled: global stop_threads if stop_threads: break try: line = outq.get(block=False).rstrip() print(line) sys.stdout.flush() if 'discovered participant' in line: assert '44.53.01.5f.45.50.52.4f.53.49.4d.41' in line count = count + 1 if 'discovered participant 44.53.01.5f.45.50.52.4f.53.49.4d.41|0.0.1.c1: 2' in line: print('CLIENT OVERRIDE discovered SERVER 2') server_2_discover_client = True elif 'Warning' in line: # Client 2 does not discover anyone assert 'Trying to add Discovery Servers to a participant which is not a SERVER, BACKUP or an' in line assert 'overriden CLIENT (SIMPLE participant transformed into CLIENT with the environment variable)' in line client_2_warning = True else: assert 'detected changes on participant' in line except queue.Empty: # Ensure that 2 s has passed so the file watch can detect that the file has changed if server_2_discover_client and client_2_warning and count >= 2 and (time.time() - initial_time) > 2: if count == 2: second_step_fulfilled = True else: print('ERROR: More discoveries than expected') stop_threads = True sys.exit(1) sys.stdout.flush() except AssertionError: print('ASSERTION ERROR: ' + line) stop_threads = True sys.exit(1) time.sleep(0.1) def third_step(outq): third_step_fulfilled = False server_1_discover_server_2 = False server_2_discover_server_1 = False initial_time = time.time() while not third_step_fulfilled: global stop_threads if stop_threads: break try: line = outq.get(block=False).rstrip() print(line) sys.stdout.flush() if 'Participant 44.53.01.5f.45.50.52.4f.53.49.4d.41|0.0.1.c1 discovered participant' in line and \ '44.53.00.5f.45.50.52.4f.53.49.4d.41|0.0.1.c1: 2' in line: print ('SERVER 2 discovers SERVER 1') server_2_discover_server_1 = True elif 'Participant 44.53.00.5f.45.50.52.4f.53.49.4d.41|0.0.1.c1 discovered participant' in line and \ '44.53.01.5f.45.50.52.4f.53.49.4d.41|0.0.1.c1: 2' in line: print ('SERVER 1 discovers SERVER 2') server_1_discover_server_2 = True else: assert 'detected changes on participant' in line except queue.Empty: # Ensure that 2 s has passed so the file watch can detect that the file has changed if server_1_discover_server_2 and server_2_discover_server_1 and (time.time() - initial_time) > 2: third_step_fulfilled = True sys.stdout.flush() except AssertionError: print('ASSERTION ERROR: ' + line) stop_threads = True sys.exit(1) time.sleep(0.1) def fourth_step(outq): fourth_step_fulfilled = False warning_client_1 = False warning_client_2 = False count = 0 initial_time = time.time() while not fourth_step_fulfilled: global stop_threads if stop_threads: break try: line = outq.get(block=False).rstrip() print(line) sys.stdout.flush() if 'Trying to add Discovery Servers to a participant which is not a SERVER, BACKUP or an' in line \ and 'overriden CLIENT (SIMPLE participant transformed into CLIENT with the environment variable)' in line: warning_client_2 = True elif 'Discovery Servers cannot be removed from the list; they can only be added' in line: warning_client_1 = True elif 'discovered participant' in line: count = count + 1 except queue.Empty: # Ensure that 2 s has passed so the file watch can detect that the file has changed if warning_client_1 and warning_client_2 and count == 0 and (time.time() - initial_time) > 2: fourth_step_fulfilled = True elif count > 0: print('ERROR: More discoveries than expected') stop_threads = True sys.exit(1) sys.stdout.flush() except AssertionError: print('ASSERTION ERROR: ' + line) stop_threads = True sys.exit(1) time.sleep(0.1) def fifth_step(outq): fifth_step_fulfilled = False warning = False count = 0 initial_time = time.time() while not fifth_step_fulfilled: global stop_threads if stop_threads: break try: line = outq.get(block=False).rstrip() print(line) sys.stdout.flush() if 'Discovery Servers cannot be removed from the list; they can only be added' in line: warning = True elif 'discovered participant' in line: count = count + 1 except queue.Empty: # Ensure that 2 s has passed so the file watch can detect that the file has changed if warning and count == 0 and (time.time() - initial_time) > 2: fifth_step_fulfilled = True elif count > 0: print('ERROR: More discoveries than expected') stop_threads = True sys.exit(1) sys.stdout.flush() except AssertionError: print('ASSERTION ERROR: ' + line) stop_threads = True sys.exit(1) time.sleep(0.1) def exit(cv): cv.release() print('ERROR: timeout without expected discovery happening') global stop_threads stop_threads = True os.remove(server_1_env_file) os.remove(server_2_env_file) os.remove(client_env_file) sys.exit(1) def communication(proc, outq, outt, cv): """A""" t = threading.Thread(target=output_reader, args=(proc,outq)) t.start() try: time.sleep(0.2) while True: global stop_threads if stop_threads: break try: line = outt.get(block=False).rstrip() print(line) sys.stdout.flush() if "FIRST STEP" in line: first_step(outq) cv.acquire() cv.notify() cv.release() elif "SECOND STEP" in line: second_step(outq) cv.acquire() cv.notify() cv.release() elif "THIRD STEP" in line: third_step(outq) cv.acquire() cv.notify() cv.release() elif "FOURTH STEP" in line: fourth_step(outq) cv.acquire() cv.notify() cv.release() elif "FIFTH STEP" in line: fifth_step(outq) cv.acquire() cv.notify() cv.release() except queue.Empty: sys.stdout.flush() time.sleep(0.1) finally: proc.terminate() while outq.empty() != True: line = outq.get(block=False).rstrip() print(line) t.join() # Random unicast port random_port_server_1 = os.environ.get( 'W_UNICAST_PORT_RANDOM_NUMBER') random_port_server_2 = str(int(random_port_server_1) + 1) # Condition variable cv = threading.Condition() # Environment files server_1_env_file = "server_1_env_file.json" server_2_env_file = "server_2_env_file.json" client_env_file = "client_env_file.json" # Both server environment files are created empty open(server_1_env_file, 'w+').close() open(server_2_env_file, 'w+').close() # Client environment file should include the locator for the first server f = open(client_env_file, 'w+') f.write('{"ROS_DISCOVERY_SERVER": "localhost:') f.write(random_port_server_1) f.write('"}') f.close() outq = queue.Queue() outt = queue.Queue() outt.put("TEST RUNNING\n") outt.put("FIRST STEP: Override Client discovers Server 1\n") server_1_process = subprocess.Popen([process_command, "--discovery_protocol", "SERVER", "--guid_prefix", "44.53.00.5F.45.50.52.4F.53.49.4D.41", "--unicast_metatraffic_locator", random_port_server_1], env={"FASTDDS_ENVIRONMENT_FILE": server_1_env_file}, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) server_2_process = subprocess.Popen([process_command, "--discovery_protocol", "SERVER", "--guid_prefix", "44.53.01.5F.45.50.52.4F.53.49.4D.41", "--unicast_metatraffic_locator", random_port_server_2], env={"FASTDDS_ENVIRONMENT_FILE": server_2_env_file}, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) # The client must be DiscoveryProtocol::SIMPLE to use the environment variable client_override_process = subprocess.Popen(process_command, env={"FASTDDS_ENVIRONMENT_FILE": client_env_file}, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) # DiscoveryProtocol::CLIENT, environment variable does not apply either initializing as updating client_process = subprocess.Popen([process_command, "--discovery_protocol", "CLIENT"], env={"FASTDDS_ENVIRONMENT_FILE": client_env_file}, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) stop_threads = False t_0 = threading.Thread(target=communication, args=(server_1_process,outq,outt,cv)) t_1 = threading.Thread(target=communication, args=(server_2_process,outq,outt,cv)) t_2 = threading.Thread(target=communication, args=(client_override_process,outq,outt,cv)) t_3 = threading.Thread(target=communication, args=(client_process,outq,outt,cv)) t_0.start() t_1.start() t_2.start() t_3.start() # Wait 10 seconds for the condition variable to be notified cv.acquire() result = cv.wait(10) if result == False: exit(cv) outt.put("SECOND STEP: Override Client discovers both Servers. Normal Client does not discover anyone\n") # Add second server to client f = open(client_env_file, 'w+') f.write('{"ROS_DISCOVERY_SERVER": "localhost:') f.write(random_port_server_1) f.write(';localhost:') f.write(random_port_server_2) f.write('"}') f.close() result = cv.wait(10) if result == False: exit(cv) outt.put("THIRD STEP: Both Servers discover each other\n") # Add second server to first server f = open(server_1_env_file, 'w+') f.write('{"ROS_DISCOVERY_SERVER": ";localhost:') f.write(random_port_server_2) f.write('"}') f.close() result = cv.wait(10) if result == False: exit(cv) outt.put("FOURTH STEP: Removing a Server from the Client list outputs a Log Warning\n") # Remove first server from client list f = open(client_env_file, 'w+') f.write('{"ROS_DISCOVERY_SERVER": ";localhost:') f.write(random_port_server_2) f.write('"}') f.close() result = cv.wait(10) if result == False: exit(cv) outt.put("FIFTH STEP: Removing a Server from the Server list outputs a Log Warning\n") # Remove server from the server 1 list f = open(server_1_env_file, 'w+') f.write('{"ROS_DISCOVERY_SERVER": ""}') f.close() result = cv.wait(10) if result == False: exit(cv) outt.put("Killing processes\n") cv.release() # Kill processes stop_threads = True t_0.join() t_1.join() t_2.join() t_3.join() # Delete files os.remove(server_1_env_file) os.remove(server_2_env_file) os.remove(client_env_file)
#!/usr/bin/python import re, sys, getopt, subprocess, shlex, os, datetime, ogr, glob, csv import cv2, sys from cv2 import cv import os import numpy as np tempgdalfile = '' instructions = 'vectorize_map.py <input file or dir>' defaultgimp = '/Applications/Gimp.app/Contents/MacOS/gimp-2.8' gimp_path = defaultgimp chunksize = 50000 # how to split the mega polygon file currentchunk = 0 totalsubsets = 0 # colors sh/could be an external config file basecolors = [ [206,202,185] # paper ,[199,179,173] # pink ,[179,155,157] # dark red ,[149,156,141] # green ,[199,195,163] # light yellow ,[195,189,154] # yellow ,[255,225,40] # bright yellow ,[137,174,163] # greenish blue ,[187,194,192] # light blue ,[161,175,190] # "navy" blue ] brightness = -50 contrast = 95 thresholdblack = 145 thresholdwhite = 255 def main(argv): global instructions global defaultgimp global gimp_path global basecolors global brightness global contrast global thresholdblack global thresholdwhite try: opts, args = getopt.getopt(argv,"hi:o:",["ifile=","ofile="]) except getopt.GetoptError: print instructions sys.exit(2) for opt, arg in opts: if opt == '-h': print instructions sys.exit() elif opt in ("-i"): inputfile = arg if len(argv) == 1: inputfile = argv[0] if inputfile == '': print instructions sys.exit(2) print "" print "" print "" print "" print "" print "NYPL Labs Map Vectorizer v0.1" print "=============================" print "By: Mauricio Giraldo Arteaga @mgiraldo / @nypl_labs" print "" gimp_path = raw_input("GIMP executable path [" + defaultgimp + "]: ") starttime = datetime.datetime.now() if gimp_path == '': gimp_path = defaultgimp # test for config file # TODO: integer checking # FIRST LINE SHOULD ALWAYS BE PAPER COLOR config_file = "vectorize_config.txt" if os.path.isfile(config_file): tempcolors = [] index = 0 with open(config_file, 'r') as configcsv: configdata = csv.reader(configcsv, delimiter=',') for row in configdata: if index > 0: tempcolors.append([int(row[0]), int(row[1]), int(row[2])]) else: # brightness/contrast/threshold values brightness = int(row[0]) contrast = int(row[1]) thresholdblack = int(row[2]) thresholdwhite = int(row[3]) index = index + 1 if len(tempcolors) > 2: basecolors = tempcolors totalfiles = 0 # if input is a directory iterate through it if os.path.isdir(inputfile) == True: for ff in os.listdir(inputfile): if ff.endswith(".tif"): totalfiles = totalfiles + 1 processfile(ff, inputfile) else: # if input is a file, process it processfile(inputfile, "") totalfiles = 1 endtime = datetime.datetime.now() deltatime = endtime-starttime print "Processed " + str(totalfiles) + " files\n" print "Operation took " + str(deltatime.seconds) + " seconds" def processfile(inputfile, basedir): # # NOTE # # This still needs a lot of work for when dealing with subfolders and such. # Best case is image file is located right next to vectorizer_map.py # global tempgdalfile global instructions global defaultgimp global gimp_path global chunksize global currentchunk global totalsubsets currentchunk = 0 totalsubsets = 0 print "\n\nProcessing file: " + inputfile # right now assuming vectorizer, simplifier and input are in the same folder fullpath = os.path.abspath(__file__) base_name = inputfile[:inputfile.find(".tif")] base_name = base_name[base_name.rfind("/")+1:] # create a folder to store all this if basedir != '': directory = basedir + '/' + base_name inputfile = basedir + '/' + inputfile else: directory = base_name if not os.path.exists(directory): os.makedirs(directory) path = fullpath[:fullpath.find("/vectorize_map.py")] + '/' + directory # GIMP processing dir_base_name = directory + "/" + base_name # create a log file logfile = open(directory + "/py-log.txt", "w") logfile.write("Log file for " + inputfile + " with colors:\n\n") logfile.write(str(basecolors) + "\n\n") thresholdfile = dir_base_name + "-threshold-tmp.tif" comparativefile = dir_base_name + "-comparative-tmp.tif" print "\n\n" print "Thresholdizing:" print "---------------" print inputfile + " into threshold file: " + thresholdfile contraststring = '(gimp-brightness-contrast drawable ' + str(brightness) + ' ' + str(contrast) + ')' thresholdstring = '(gimp-threshold drawable ' + str(thresholdblack) + ' ' + str(thresholdwhite) + ')' gimpcommand = '(let* ((image (car (file-tiff-load RUN-NONINTERACTIVE "' + inputfile + '" "' + inputfile + '"))) (drawable (car (gimp-image-get-layer-by-name image "Background")))) (gimp-selection-none image) ' + contraststring + ' ' + thresholdstring + ' (gimp-file-save RUN-NONINTERACTIVE image drawable "' + thresholdfile + '" "' + thresholdfile + '") (gimp-image-delete image))' if (not os.path.isfile(thresholdfile)): command = gimp_path + ' -i -b \'' + gimpcommand + '\' -b \'(gimp-quit 0)\'' logfile.write(command + "\n") # print command os.system(command) # print inputfile + " into comparative file: " + comparativefile # command = gimp_path + ' -i -b \'(nypl-create-comparative "' + inputfile + '" "' + comparativefile + '")\' -b \'(gimp-quit 0)\'' # logfile.write(command + "\n") # # print command # os.system(command) tempgdalfile = dir_base_name + "-tmp.tif" # GDAL transformation print "\n" print 'Origin GeoTIFF :', inputfile print 'Destination :', tempgdalfile # BETTER (SOME) ERROR HANDLING SHOULD BE DONE!!!!! # first get geotiff data from original geoText = subprocess.Popen(["gdalinfo", inputfile], stdout=subprocess.PIPE).communicate()[0] pattern = re.compile(r"Upper Left\s*\(\s*([0-9\-\.]*),\s*([0-9\-\.]*).*\n.*\n.*\nLower Right\s*\(\s*([0-9\-\.]*),\s*([0-9\-\.]*).*") geoMatch = pattern.findall(geoText) # print pattern print "\n" print "Geodata obtained:" print "-----------------" print "W", geoMatch[0][0] print "N", geoMatch[0][1] print "E", geoMatch[0][2] print "S", geoMatch[0][3] print "\n" W = geoMatch[0][0] N = geoMatch[0][1] E = geoMatch[0][2] S = geoMatch[0][3] print "Applying to destination:" print "------------------------" # print outputgdal outputwsg = dir_base_name + "-wsg-tmp.tif" if (not os.path.isfile(outputwsg)): command = 'gdal_translate -a_srs "+proj=latlong +datum=WGS84" -of GTiff -co "INTERLEAVE=PIXEL" -a_ullr ' + W + ' ' + N + ' ' + E + ' ' + S + ' ' + thresholdfile + ' ' + outputwsg logfile.write(command + "\n") # print command os.system(command) print "" outputgdal = dir_base_name + "-gdal-tmp.tif" if (not os.path.isfile(outputgdal)): command = 'gdalwarp -s_srs EPSG:4326 -t_srs EPSG:3785 -r bilinear ' + outputwsg + ' ' + outputgdal logfile.write(command + "\n") # print command os.system(command) # # transform comparative # comparativewsg = dir_base_name + "-comparative-wsg-tmp.tif" # command = 'gdal_translate -a_srs "+proj=latlong +datum=WGS84" -of GTiff -co "INTERLEAVE=PIXEL" -a_ullr ' + W + ' ' + N + ' ' + E + ' ' + S + ' ' + comparativefile + ' ' + comparativewsg # logfile.write(command + "\n") # # print command # os.system(command) # print "" # comparativegdal = dir_base_name + "-comparative-gdal-tmp.tif" # command = 'gdalwarp -s_srs EPSG:4326 -t_srs EPSG:3785 -r bilinear ' + comparativewsg + ' ' + comparativegdal # logfile.write(command + "\n") # # print command # os.system(command) # QGIS POLYGONIZE print "" print "Polygonizing (coarse):" print "----------------------" shapefile = dir_base_name + '.shp' if (not os.path.isfile(shapefile)): command = 'gdal_polygonize.py ' + outputgdal + ' -f "ESRI Shapefile" ' + shapefile + ' ' + base_name logfile.write(command + "\n") # print command os.system(command) # Split resulting megapolygon file into smaller chunks # most code from: http://cosmicproject.org/OGR/cris_example_write.html print "" print "Splitting megapolygon file into chunks" print "--------------------------------------" ##### # 2 get the shapefile driver driver = ogr.GetDriverByName('ESRI Shapefile') # 3 open the input data source and get the layer inDS = driver.Open(shapefile, 0) #shows cover at given points if inDS is None: print 'Could not open shapefile' sys.exit(1) inLayer = inDS.GetLayer() # 5 get the FieldDefn's for the id and cover fields in the input shapefile feature = inLayer.GetFeature(0) idFieldDefn = feature.GetFieldDefnRef('DN') # 7 loop through the input features inFeature = inLayer.GetNextFeature() while inFeature: if currentchunk == 0 or currentchunk >= chunksize: currentchunk = 0 totalsubsets = totalsubsets + 1 # this is a new temp file # 4 create a new data source and layer fn = dir_base_name + '-tmp-' + str(totalsubsets) + '.shp' if os.path.exists(fn):driver.DeleteDataSource(fn) outDS = driver.CreateDataSource(fn) if outDS is None: print 'Could not create temp shapefile' sys.exit(1) outLayer = outDS.CreateLayer(base_name, geom_type=ogr.wkbPolygon) #create new field in the output shapefile outLayer.CreateField(idFieldDefn) # 6 get the FeatureDefn for the output layer featureDefn = outLayer.GetLayerDefn() # create a new feature outFeature = ogr.Feature(featureDefn)#using featureDefn created in step 6 # set the geometry geom = inFeature.GetGeometryRef() outFeature.SetGeometry(geom) #move it to the new feature # set the attributes DN = inFeature.GetField('DN') outFeature.SetField('DN', DN) #move it to the new feature # add the feature to the output layer outLayer.CreateFeature(outFeature) # destroy the output feature outFeature.Destroy() # destroy the input feature and get a new one inFeature.Destroy() inFeature = inLayer.GetNextFeature() currentchunk = currentchunk + 1 # close the data sources inDS.Destroy() outDS.Destroy() #flush out the last changes here print "" print "Produced " + str(totalsubsets) + " temporary shapefiles" print "" ##### # R Simplification print "" print "Polygonizing (simplify):" print "------------------------" # First simplify each temporary shapefile currentsubset = 1 while currentsubset <= totalsubsets: rinput = path + '/' + base_name + '-tmp-' + str(currentsubset) + '.shp' routput = path + '/' + base_name + '-tmp-' # + str(currentsubset) layer = base_name + '-tmp-' + str(currentsubset) command = 'R --vanilla --silent --slave -f simplify_map.R --args ' + rinput + ' ' + layer + ' ' + routput + ' ' + path + ' ' + str(currentsubset) logfile.write(command + "\n") # print command os.system(command) currentsubset = currentsubset + 1 # Now combine all subsets into a macroset # 4 create a new data source and layer fn = dir_base_name + '-traced.shp' if os.path.exists(fn):driver.DeleteDataSource(fn) outDS = driver.CreateDataSource(fn) if outDS is None: print 'Could not create final shapefile' sys.exit(1) outLayer = outDS.CreateLayer(base_name, geom_type=ogr.wkbPolygon) #create new field in the output shapefile outLayer.CreateField(idFieldDefn) # 6 get the FeatureDefn for the output layer featureDefn = outLayer.GetLayerDefn() # new field definitions for this shapefile # color definition colorDefn = ogr.FieldDefn("Color", ogr.OFTInteger) colorDefn.SetWidth(2) colorDefn.SetPrecision(0) outLayer.CreateField( colorDefn ) # dot count definition dotCountDefn = ogr.FieldDefn("DotCount", ogr.OFTInteger) dotCountDefn.SetWidth(2) dotCountDefn.SetPrecision(0) outLayer.CreateField( dotCountDefn ) # dot type definition dotTypeDefn = ogr.FieldDefn("DotType", ogr.OFTInteger) dotTypeDefn.SetWidth(1) dotTypeDefn.SetPrecision(0) outLayer.CreateField( dotTypeDefn ) # cross count definition crossCountDefn = ogr.FieldDefn("CrossCount", ogr.OFTInteger) crossCountDefn.SetWidth(2) crossCountDefn.SetPrecision(0) outLayer.CreateField( crossCountDefn ) # cross data definition crossDataDefn = ogr.FieldDefn("CrossData", ogr.OFTString) crossDataDefn.SetWidth(255) outLayer.CreateField( crossDataDefn ) polygonfiles = [] for files in os.listdir(path): if files.endswith(".shp") and files.find('-polygon') != -1: polygonfile = path + "/" + files # apply a projection so gdalwarp doesnt complain polygonfilename = files[:files.find(".shp")] os.system("cp " + dir_base_name + ".prj " + path + "/" + polygonfilename + ".prj") extractedfile = path + "/" + polygonfilename + "-extracted.tif" # extract bitmap from original command = "gdalwarp -q -t_srs EPSG:3785 -cutline " + polygonfile + " -crop_to_cutline -of GTiff " + inputfile + " " + extractedfile logfile.write(command + "\n") # print command os.system(command) # calculate color # shrink to 1x1 and find value pixelvalue = subprocess.Popen(["convert", "-quiet", extractedfile, "-resize", "1x1","txt:-"], stdout=subprocess.PIPE).communicate()[0] pattern = re.compile(r"0,0: \(([\s0-9]*),([\s0-9]*),([\s0-9]*).*") values = pattern.findall(pixelvalue) if len(values) > 0: red = int(values[0][0]) green = int(values[0][1]) blue = int(values[0][2]) nearest = 100000 nearestcolor = [] nearestcolorindex = -1 for i, color in enumerate(basecolors): dred = (color[0] - red) * (color[0] - red) dgreen = (color[1] - green) * (color[1] - green) dblue = (color[2] - blue) * (color[2] - blue) dist = dred + dgreen + dblue if dist < nearest: nearest = dist nearestcolor = color nearestcolorindex = i # only add if NOT paper if nearestcolor != basecolors[0]: # check for dots circle_data = cvFeatureDetect(extractedfile) # add to array polygonfiles.append([polygonfile, nearestcolorindex, circle_data]) else: logfile.write("Ignored (paper color): " + polygonfilename + "\n") else: logfile.write("Ignored (regex match error): " + polygonfilename + "\n") for files in polygonfiles: # 3 open the input data source and get the layer tempfile = files[0] #dir_base_name + '-tmp-' + str(currentsubset) + '-traced.shp' inDS = driver.Open(tempfile, 0) #shows cover at given points if inDS is None: print 'Could not open temporary shapefile' break inLayer = inDS.GetLayer() # 7 loop through the input features inFeature = inLayer.GetNextFeature() while inFeature: # create a new feature outFeature = ogr.Feature(featureDefn) #using featureDefn created in step 6 # set the geometry geom = inFeature.GetGeometryRef() outFeature.SetGeometry(geom) #move it to the new feature DN = inFeature.GetField('DN') outFeature.SetField('DN', DN ) #move it to the new feature outFeature.SetField('Color', int(files[1]) ) outFeature.SetField('DotCount', int(files[2]["count"]) ) outFeature.SetField('DotType', int(files[2]["is_outline"]) ) outFeature.SetField('CrossCount', int(files[2]["cross_count"]) ) outFeature.SetField('CrossData', str(files[2]["cross_data"]) ) # outFeature.SetField('circle_count', files[2]["circle_count"]) # outFeature.SetField('circle_type', files[2]["is_outline"]) # add the feature to the output layer outLayer.CreateFeature(outFeature) # destroy the output feature outFeature.Destroy() # destroy the input feature and get a new one inFeature.Destroy() inFeature = inLayer.GetNextFeature() # close the data sources inDS.Destroy() outDS.Destroy() #flush out the last changes here print "" print "Applying projection file to result..." print "-------------------------------------" os.system("cp " + dir_base_name + ".prj " + dir_base_name + "-traced.prj") print "" print "Creating GeoJSON output..." print "--------------------------" jsonfile = dir_base_name + '-traced.json' command = 'ogr2ogr -t_srs EPSG:4326 -s_srs EPSG:3857 -f "GeoJSON" ' + jsonfile + ' ' + fn logfile.write(command + "\n") # print command os.system(command) # Cleaning print "" print "Cleaning..." print "-----------" os.system("rm " + outputgdal) os.system("rm " + outputwsg) os.system("rm " + thresholdfile) os.system("rm " + dir_base_name + "-tmp-*.shp") os.system("rm " + dir_base_name + "-tmp-*.dbf") os.system("rm " + dir_base_name + "-tmp-*.shx") os.system("rm " + dir_base_name + "-tmp-*.prj") os.system("rm " + dir_base_name + "-tmp*.tif") os.system("rm " + dir_base_name + ".*") # close log file logfile.close() def cvFeatureDetect(inputfile): max_dist = 20 # distance between circles to consider it an empty circle retval = {} im=cv2.imread(inputfile) gray=cv2.cvtColor(im,cv.CV_RGB2GRAY) circles = cv2.HoughCircles(gray, cv.CV_HOUGH_GRADIENT, 1, 2, np.array([]), 200, 8, 4, 8) total_circles = 0 outline_circles = 1 unique_circles = [] if not (isinstance(circles, np.ndarray) and circles.shape[1] > 0): retval = {"count":0, "is_outline": 0, "circles":circles} else: total_circles = circles.shape[1] if total_circles == 1: # only one circle and it is filled retval = {"count":total_circles, "is_outline": 0, "circles":circles} else : # this is wrong... use for now outline_circles = 0 if total_circles > 0: current_circle = -1 current_x = circles[0][0][0] current_y = circles[0][0][1] # an array of circles with distance less than max_dist # starts with the first circle unique_circles = [[current_x, current_y]] delta_x = 0 delta_y = 0 for n in range(1, total_circles): circle = circles[0][n] current_x = circle[0] current_y = circle[1] # distance to all the unique circles last_unique = circle is_inside = False for unique in unique_circles: last_unique = unique delta_x = unique[0] - current_x delta_y = unique[1] - current_y square_dist = (delta_x*delta_x) + (delta_y*delta_y) if square_dist <= max_dist: # circle is inside another unique is_inside = True # we assume all are outlines if at least one is outline outline_circles = 1 break if not is_inside: unique_circles.append([current_x, current_y]) # cv2.circle(im,(circle[0],circle[1]),circle[2],(0,0,255), 1) retval = {"count":len(unique_circles), "is_outline": outline_circles, "circles":circles} # NOW DETECT CROSSES # code based on http://nbviewer.ipython.org/5861365 score_threshold = 0.954 # certainty there IS a cross cross1 = cv2.imread("cross1.jpg") cross_count = 0 cross_data = {} if cross1.shape[0] < im.shape[0] and cross1.shape[1] < im.shape[1]: graycross1 = cv2.cvtColor(cross1,cv.CV_RGB2GRAY) match1 = cv2.matchTemplate(gray, graycross1, cv2.TM_CCORR_NORMED) min_score, max_score, (min_x, min_y), (max_x, max_y) = cv2.minMaxLoc(match1) if (max_score >= score_threshold): # only testing 1 cross for now cross_count = 1 corner_topL = (max_x, max_y) corner_botR = (corner_topL[0]+cross1.shape[1], corner_topL[1]+cross1.shape[0]) cross_data = {"top_left":corner_topL, "bottom_right":corner_botR, "score": max_score} retval["cross_count"] = cross_count retval["cross_data"] =cross_data return retval if __name__ == "__main__": main(sys.argv[1:])
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack_dashboard.test.integration_tests import decorators from openstack_dashboard.test.integration_tests import helpers from openstack_dashboard.test.integration_tests.regions import messages class TestImagesBasic(helpers.TestCase): """Login as demo user""" IMAGE_NAME = helpers.gen_random_resource_name("image") @property def images_page(self): return self.home_pg.go_to_compute_imagespage() def image_create(self, local_file=None): images_page = self.images_page if local_file: images_page.create_image(self.IMAGE_NAME, image_source_type='file', image_file=local_file) else: images_page.create_image(self.IMAGE_NAME) self.assertTrue(images_page.find_message_and_dismiss(messages.INFO)) self.assertFalse(images_page.find_message_and_dismiss(messages.ERROR)) self.assertTrue(images_page.is_image_present(self.IMAGE_NAME)) self.assertTrue(images_page.is_image_active(self.IMAGE_NAME)) return images_page def image_delete(self): images_page = self.images_page images_page.delete_image(self.IMAGE_NAME) self.assertTrue(images_page.find_message_and_dismiss(messages.SUCCESS)) self.assertFalse(images_page.find_message_and_dismiss(messages.ERROR)) self.assertFalse(images_page.is_image_present(self.IMAGE_NAME)) def test_image_create_delete(self): """tests the image creation and deletion functionalities: * creates a new image from horizon.conf http_image * verifies the image appears in the images table as active * deletes the newly created image * verifies the image does not appear in the table after deletion """ self.image_create() self.image_delete() def test_image_create_delete_from_local_file(self): """tests the image creation and deletion functionalities: * downloads image from horizon.conf stated in http_image * creates the image from the downloaded file * verifies the image appears in the images table as active * deletes the newly created image * verifies the image does not appear in the table after deletion """ with helpers.gen_temporary_file() as file_name: self.image_create(local_file=file_name) self.image_delete() def test_images_pagination(self): """This test checks images pagination Steps: 1) Login to Horizon Dashboard as horizon user 2) Navigate to user settings page 3) Change 'Items Per Page' value to 1 4) Go to Project -> Compute -> Images page 5) Check that only 'Next' link is available, only one image is available (and it has correct name) 6) Click 'Next' and check that both 'Prev' and 'Next' links are available, only one image is available (and it has correct name) 7) Click 'Next' and check that only 'Prev' link is available, only one image is visible (and it has correct name) 8) Click 'Prev' and check results (should be the same as for step6) 9) Click 'Prev' and check results (should be the same as for step5) 10) Go to user settings page and restore 'Items Per Page' """ default_image_list = self.CONFIG.image.images_list items_per_page = 1 first_page_definition = {'Next': True, 'Prev': False, 'Count': items_per_page, 'Names': [default_image_list[0]]} second_page_definition = {'Next': True, 'Prev': True, 'Count': items_per_page, 'Names': [default_image_list[1]]} third_page_definition = {'Next': False, 'Prev': True, 'Count': items_per_page, 'Names': [default_image_list[2]]} settings_page = self.home_pg.go_to_settings_usersettingspage() settings_page.change_pagesize(items_per_page) settings_page.find_message_and_dismiss(messages.SUCCESS) images_page = self.images_page images_page.images_table.assert_definition(first_page_definition) images_page.images_table.turn_next_page() images_page.images_table.assert_definition(second_page_definition) images_page.images_table.turn_next_page() images_page.images_table.assert_definition(third_page_definition) images_page.images_table.turn_prev_page() images_page.images_table.assert_definition(second_page_definition) images_page.images_table.turn_prev_page() images_page.images_table.assert_definition(first_page_definition) settings_page = self.home_pg.go_to_settings_usersettingspage() settings_page.change_pagesize() settings_page.find_message_and_dismiss(messages.SUCCESS) def test_update_image_metadata(self): """Test update image metadata * logs in as admin user * creates image from locally downloaded file * verifies the image appears in the images table as active * invokes action 'Update Metadata' for the image * adds custom filed 'metadata' * adds value 'image' for the custom filed 'metadata' * gets the actual description of the image * verifies that custom filed is present in the image description * deletes the image * verifies the image does not appear in the table after deletion """ new_metadata = {'metadata1': helpers.gen_random_resource_name("value"), 'metadata2': helpers.gen_random_resource_name("value")} with helpers.gen_temporary_file() as file_name: images_page = self.image_create(local_file=file_name) images_page.add_custom_metadata(self.IMAGE_NAME, new_metadata) results = images_page.check_image_details(self.IMAGE_NAME, new_metadata) self.image_delete() self.assertSequenceTrue(results) # custom matcher def test_remove_protected_image(self): """tests that protected image is not deletable * logs in as admin user * creates image from locally downloaded file * verifies the image appears in the images table as active * marks 'Protected' checkbox * verifies that edit action was successful * verifies that delete action is not available in the list * tries to delete the image * verifies that exception is generated for the protected image * unmarks 'Protected' checkbox * deletes the image * verifies the image does not appear in the table after deletion """ with helpers.gen_temporary_file() as file_name: images_page = self.image_create(local_file=file_name) images_page.edit_image(self.IMAGE_NAME, protected=True) self.assertTrue( images_page.find_message_and_dismiss(messages.SUCCESS)) # Check that Delete action is not available in the action list. # The below action will generate exception since the bind fails. # But only ValueError with message below is expected here. with self.assertRaisesRegexp(ValueError, 'Could not bind method'): images_page.delete_image_via_row_action(self.IMAGE_NAME) # Try to delete image. That should not be possible now. images_page.delete_image(self.IMAGE_NAME) self.assertFalse( images_page.find_message_and_dismiss(messages.SUCCESS)) self.assertTrue( images_page.find_message_and_dismiss(messages.ERROR)) self.assertTrue(images_page.is_image_present(self.IMAGE_NAME)) images_page.edit_image(self.IMAGE_NAME, protected=False) self.assertTrue( images_page.find_message_and_dismiss(messages.SUCCESS)) self.image_delete() class TestImagesAdvanced(helpers.TestCase): """Login as demo user""" IMAGE_NAME = helpers.gen_random_resource_name("image") @property def images_page(self): return self.home_pg.go_to_compute_imagespage() def test_create_volume_from_image(self): """This test case checks create volume from image functionality: Steps: 1. Login to Horizon Dashboard as regular user 2. Navigate to Project -> Compute -> Images 3. Create new volume from image 4. Check that volume is created with expected name 5. Check that volume status is Available """ images_page = self.images_page source_image = self.CONFIG.image.images_list[0] target_volume = "created_from_{0}".format(source_image) volumes_page = images_page.create_volume_from_image( source_image, volume_name=target_volume) self.assertTrue( volumes_page.find_message_and_dismiss(messages.INFO)) self.assertFalse( volumes_page.find_message_and_dismiss(messages.ERROR)) self.assertTrue(volumes_page.is_volume_present(target_volume)) self.assertTrue(volumes_page.is_volume_status(target_volume, 'Available')) volumes_page.delete_volume(target_volume) volumes_page.find_message_and_dismiss(messages.SUCCESS) volumes_page.find_message_and_dismiss(messages.ERROR) self.assertTrue(volumes_page.is_volume_deleted(target_volume)) def test_launch_instance_from_image(self): """This test case checks launch instance from image functionality: Steps: 1. Login to Horizon Dashboard as regular user 2. Navigate to Project -> Compute -> Images 3. Launch new instance from image 4. Check that instance is create 5. Check that status of newly created instance is Active 6. Check that image_name in correct in instances table """ images_page = self.images_page source_image = self.CONFIG.image.images_list[0] target_instance = "created_from_{0}".format(source_image) instances_page = images_page.launch_instance_from_image( source_image, target_instance) self.assertTrue( instances_page.find_message_and_dismiss(messages.SUCCESS)) self.assertFalse( instances_page.find_message_and_dismiss(messages.ERROR)) self.assertTrue(instances_page.is_instance_active(target_instance)) actual_image_name = instances_page.get_image_name(target_instance) self.assertEqual(source_image, actual_image_name) instances_page.delete_instance(target_instance) self.assertTrue( instances_page.find_message_and_dismiss(messages.SUCCESS)) self.assertFalse( instances_page.find_message_and_dismiss(messages.ERROR)) self.assertTrue(instances_page.is_instance_deleted(target_instance)) class TestImagesAdmin(helpers.AdminTestCase, TestImagesBasic): """Login as admin user""" IMAGE_NAME = helpers.gen_random_resource_name("image") @property def images_page(self): return self.home_pg.go_to_system_imagespage() @decorators.skip_because(bugs=['1584057']) def test_image_create_delete(self): super(TestImagesAdmin, self).test_image_create_delete() def test_filter_images(self): """This test checks filtering of images Steps: 1) Login to Horizon dashboard as admin user 2) Go to Admin -> System -> Images 3) Use filter by Image Name 4) Check that filtered table has one image only (which name is equal to filter value) 5) Check that no other images in the table 6) Clear filter and set nonexistent image name. Check that 0 rows are displayed """ images_list = self.CONFIG.image.images_list images_page = self.images_page images_page.images_table.filter(images_list[0]) self.assertTrue(images_page.is_image_present(images_list[0])) for image in images_list[1:]: self.assertFalse(images_page.is_image_present(image)) nonexistent_image_name = "{0}_test".format(self.IMAGE_NAME) images_page.images_table.filter(nonexistent_image_name) self.assertEqual(images_page.images_table.rows, []) images_page.images_table.filter('')
#!/usr/bin/python # -*- coding: utf-8 -*- # pylint: disable=invalid-name """Script to automate generation of wiki pages of the libyal libraries.""" import abc import argparse import io import os import re import string import sys from yaldevtools import configuration class WikiPageGenerator(object): """Generates wiki pages.""" def __init__(self, template_directory): """Initializes a wiki page generator. Args: template_directory (str): path of the template directory. """ super(WikiPageGenerator, self).__init__() self._template_directory = template_directory def _GenerateSection( self, template_filename, template_mappings, output_writer): """Generates a section from template filename. Args: template_filename (str): path of the template file. template_mpppings (dict[str, str]): the template mappings, where the key maps to the name of a template variable. output_writer (OutputWriter): output writer. """ template_string = self._ReadTemplateFile(template_filename) output_data = template_string.substitute(template_mappings) output_writer.Write(output_data) def _GetCygwinBuildDependencies(self, project_configuration): """Retrieves the Cygwin build dependencies. Args: project_configuration (ProjectConfiguration): project configuration. Returns: list[str]: Cygwin build dependencies. """ dependencies = [ 'autoconf', 'automake', 'binutils', 'gcc-core', 'gcc-g++', 'gettext-devel', 'libiconv', 'libtool', 'make', 'pkg-config'] if ('lex' in project_configuration.library_build_dependencies or 'lex' in project_configuration.tools_build_dependencies): dependencies.append('flex') if ('yacc' in project_configuration.library_build_dependencies or 'yacc' in project_configuration.tools_build_dependencies): dependencies.append('byacc') if ('zlib' in project_configuration.library_build_dependencies or 'zlib' in project_configuration.tools_build_dependencies): dependencies.append( 'zlib-devel (for DEFLATE compression support) (optional but ' 'recommended, can be disabled by --with-zlib=no)') if project_configuration.HasDependencyBzip2(): dependencies.append( 'libbz2-devel (required for bzip2 compression support)') if ('crypto' in project_configuration.library_build_dependencies or 'crypto' in project_configuration.tools_build_dependencies): dependencies.append( 'libssl-devel (optional but recommended, can be disabled by ' '--with-openssl=no)') dependencies.extend(project_configuration.cygwin_build_dependencies) return dependencies def _GetCygwinDLLDependencies(self, project_configuration): """Retrieves the Cygwin DLL dependencies. Args: project_configuration (ProjectConfiguration): project configuration. Returns: list[str]: Cygwin DLL dependencies. """ dependencies = ['cygwin1.dll'] dependencies.extend(project_configuration.cygwin_dll_dependencies) return dependencies def _GetDpkgBuildDependencies(self, project_configuration): """Retrieves the dpkg build dependencies. Args: project_configuration (ProjectConfiguration): project configuration. Returns: list[str]: dpkg build dependencies. """ dependencies = list(project_configuration.dpkg_build_dependencies) if ('lex' in project_configuration.library_build_dependencies or 'lex' in project_configuration.tools_build_dependencies): dependencies.append('flex') if ('yacc' in project_configuration.library_build_dependencies or 'yacc' in project_configuration.tools_build_dependencies): dependencies.append('bison') if ('zlib' in project_configuration.library_build_dependencies or 'zlib' in project_configuration.tools_build_dependencies): dependencies.append('zlib1g-dev') if project_configuration.HasDependencyBzip2(): dependencies.append('bzip2-dev') if ('crypto' in project_configuration.library_build_dependencies or 'crypto' in project_configuration.tools_build_dependencies): dependencies.append('libssl-dev') if 'fuse' in project_configuration.tools_build_dependencies: dependencies.append('libfuse-dev') if project_configuration.HasPythonModule(): dependencies.extend([ 'python-dev', 'python-setuptools', 'python3-dev', 'python3-setuptools']) return dependencies def _GetDpkgFilenames(self, project_configuration): """Retrieves the dpkg filenames. Args: project_configuration (ProjectConfiguration): project configuration. Returns: list[str]: dpkg filenames. """ filenames = [] dpkg_library_filename = '{0:s}_<version>-1_<arch>.deb'.format( project_configuration.project_name) filenames.append(dpkg_library_filename) dpkg_development_filename = '{0:s}-dev_<version>-1_<arch>.deb'.format( project_configuration.project_name) filenames.append(dpkg_development_filename) if project_configuration.HasPythonModule(): dpkg_python2_filename = '{0:s}-python_<version>-1_<arch>.deb'.format( project_configuration.project_name) filenames.append(dpkg_python2_filename) dpkg_python3_filename = '{0:s}-python3_<version>-1_<arch>.deb'.format( project_configuration.project_name) filenames.append(dpkg_python3_filename) if project_configuration.HasTools(): dpkg_tools_filename = '{0:s}-tools_<version>-1_<arch>.deb'.format( project_configuration.project_name) filenames.append(dpkg_tools_filename) return filenames def _GetMinGWBuildDependencies(self, project_configuration): """Retrieves the MinGW build dependencies. Args: project_configuration (ProjectConfiguration): project configuration. Returns: list[str]: MinGW build dependencies. """ dependencies = [] if ('lex' in project_configuration.library_build_dependencies or 'lex' in project_configuration.tools_build_dependencies): dependencies.append('flex') if ('yacc' in project_configuration.library_build_dependencies or 'yacc' in project_configuration.tools_build_dependencies): dependencies.append('byacc') if ('zlib' in project_configuration.library_build_dependencies or 'zlib' in project_configuration.tools_build_dependencies): dependencies.append( 'MinGW build of zlib library and source headers (for DEFLATE ' 'compression support) (optional but recommended, can be disabled ' 'by --with-zlib=no)') if project_configuration.HasDependencyBzip2(): dependencies.append( 'MinGW build of bzip2 library and source headers (required for ' 'bzip2 compression support)') dependencies.extend(project_configuration.mingw_build_dependencies) return dependencies def _GetMinGWDLLDependencies(self, project_configuration): """Retrieves the MinGW DLL dependencies. Args: project_configuration (ProjectConfiguration): project configuration. Returns: list[str]: MinGW DLL dependencies. """ dependencies = ['libgcc_s_dw2-1.dll (or equivalent)'] dependencies.extend(project_configuration.mingw_dll_dependencies) return dependencies def _GetRpmBuildDependencies(self, project_configuration): """Retrieves the rpm build dependencies. Args: project_configuration (ProjectConfiguration): project configuration. Returns: list[str]: rpm build dependencies. """ dependencies = list(project_configuration.rpm_build_dependencies) if ('lex' in project_configuration.library_build_dependencies or 'lex' in project_configuration.tools_build_dependencies): dependencies.append('flex') if ('yacc' in project_configuration.library_build_dependencies or 'yacc' in project_configuration.tools_build_dependencies): dependencies.append('byacc') if ('zlib' in project_configuration.library_build_dependencies or 'zlib' in project_configuration.tools_build_dependencies): dependencies.append('zlib-devel') if project_configuration.HasDependencyBzip2(): dependencies.append('bzip2-devel') if ('crypto' in project_configuration.library_build_dependencies or 'crypto' in project_configuration.tools_build_dependencies): dependencies.append('openssl-devel') if 'fuse' in project_configuration.tools_build_dependencies: dependencies.append('fuse-devel') if project_configuration.HasPythonModule(): dependencies.append('python-devel') dependencies.append('python3-devel') return dependencies def _GetRpmFilenames(self, project_configuration): """Retrieves the rpm filenames. Args: project_configuration (ProjectConfiguration): project configuration. Returns: list[str]: rpm filenames. """ filenames = [] rpm_library_filename = ( '~/rpmbuild/RPMS/<arch>/{0:s}-<version>-1.<arch>.rpm').format( project_configuration.project_name) filenames.append(rpm_library_filename) rpm_development_filename = ( '~/rpmbuild/RPMS/<arch>/{0:s}-devel-<version>-1.<arch>.rpm').format( project_configuration.project_name) filenames.append(rpm_development_filename) if project_configuration.HasPythonModule(): rpm_python2_filename = ( '~/rpmbuild/RPMS/<arch>/{0:s}-python-<version>-1.<arch>.rpm').format( project_configuration.project_name) filenames.append(rpm_python2_filename) rpm_python3_filename = ( '~/rpmbuild/RPMS/<arch>/{0:s}-python3-<version>-1.<arch>.rpm').format( project_configuration.project_name) filenames.append(rpm_python3_filename) if project_configuration.HasTools(): rpm_tools_filename = ( '~/rpmbuild/RPMS/<arch>/{0:s}-tools-<version>-1.<arch>.rpm').format( project_configuration.project_name) filenames.append(rpm_tools_filename) rpm_source_filename = '~/rpmbuild/SRPMS/{0:s}-<version>-1.src.rpm'.format( project_configuration.project_name) filenames.append(rpm_source_filename) return filenames def _GetTemplateMappings(self, project_configuration): """Retrieves the template mappings. Args: project_configuration (ProjectConfiguration): project configuration. Returns: dict[str, str]: string template mappings, where the key maps to the name of a template variable. """ building_table_of_contents = '' project_status = '' build_dependencies = '' documentation = '' development_table_of_contents = '' development_main_object_pre_open_python = '' development_main_object_post_open_python = '' development_main_object_post_open_file_object_python = '' tests_profiles = '' troubleshooting_example = '' cygwin_executables = '' gcc_mount_tool = '' mingw_executables = '' msvscpp_build_git = '' msvscpp_mount_tool = '' rpm_rename_source_package = '' mount_tool_additional_arguments = '' mount_tool_source_description_long = '' mount_tool_file_entry_example = '' development_prefix = project_configuration.project_name[3:] python_bindings_name = 'py{0:s}'.format( project_configuration.project_name[3:]) mount_tool_name = '{0:s}mount'.format( project_configuration.project_name[3:]) tools_name = '{0:s}tools'.format(project_configuration.project_name[3:]) if project_configuration.project_status: project_status += '-{0:s}'.format(project_configuration.project_status) if project_configuration.project_documentation_url: documentation = '* [Documentation]({0:s})\n'.format( project_configuration.project_documentation_url) if project_configuration.library_build_dependencies: for dependency in project_configuration.library_build_dependencies: build_dependencies += '* {0:s}\n'.format(dependency) if (project_configuration.HasTests() and project_configuration.tests_profiles): for profile in project_configuration.tests_profiles: tests_profiles += '* {0:s}\n'.format(profile) if project_configuration.troubleshooting_example: troubleshooting_example = project_configuration.troubleshooting_example building_table_of_contents += ( 'The {0:s} source code can be build with different compilers:\n' '\n').format(project_configuration.project_name) # Git support. git_apt_dependencies = [ 'git', 'autoconf', 'automake', 'autopoint', 'libtool', 'pkg-config'] if ('lex' in project_configuration.library_build_dependencies or 'lex' in project_configuration.tools_build_dependencies): git_apt_dependencies.append('flex') if ('yacc' in project_configuration.library_build_dependencies or 'yacc' in project_configuration.tools_build_dependencies): git_apt_dependencies.append('bison') git_apt_dependencies = ' '.join(git_apt_dependencies) git_dnf_dependencies = [ 'git', 'autoconf', 'automake', 'gettext-devel', 'libtool', 'pkg-config'] if ('lex' in project_configuration.library_build_dependencies or 'lex' in project_configuration.tools_build_dependencies): git_dnf_dependencies.append('flex') if ('yacc' in project_configuration.library_build_dependencies or 'yacc' in project_configuration.tools_build_dependencies): git_dnf_dependencies.append('byacc') git_dnf_dependencies = ' '.join(git_dnf_dependencies) git_build_dependencies = [ 'git', 'aclocal', 'autoconf', 'automake', 'autopoint or gettextize', 'libtoolize', 'pkg-config'] if ('lex' in project_configuration.library_build_dependencies or 'lex' in project_configuration.tools_build_dependencies): git_build_dependencies.append('flex') if ('yacc' in project_configuration.library_build_dependencies or 'yacc' in project_configuration.tools_build_dependencies): git_build_dependencies.append('byacc') git_build_dependencies = '\n'.join([ '* {0:s}'.format(dependency) for dependency in git_build_dependencies]) git_macports_dependencies = [ 'git', 'autoconf', 'automake', 'gettext', 'libtool', 'pkgconfig'] if ('lex' in project_configuration.library_build_dependencies or 'lex' in project_configuration.tools_build_dependencies): git_macports_dependencies.append('flex') if ('yacc' in project_configuration.library_build_dependencies or 'yacc' in project_configuration.tools_build_dependencies): git_macports_dependencies.append('byacc') git_macports_dependencies = ' '.join(git_macports_dependencies) git_msvscpp_dependencies = ['.\\synclibs.ps1'] if ('lex' in project_configuration.library_build_dependencies or 'yacc' in project_configuration.tools_build_dependencies or 'yacc' in project_configuration.library_build_dependencies or 'yacc' in project_configuration.tools_build_dependencies): git_msvscpp_dependencies.append('.\\syncwinflexbison.ps1') if ('zlib' in project_configuration.library_build_dependencies or 'zlib' in project_configuration.tools_build_dependencies): git_msvscpp_dependencies.append('.\\synczlib.ps1') git_msvscpp_dependencies = '\n'.join(git_msvscpp_dependencies) # GCC support. building_table_of_contents += ( '* [Using GNU Compiler Collection (GCC)]' '(Building#using-gnu-compiler-collection-gcc)\n') gcc_build_dependencies = [] gcc_static_build_dependencies = [] if ('lex' in project_configuration.library_build_dependencies or 'lex' in project_configuration.tools_build_dependencies): gcc_build_dependencies.append('flex') if ('yacc' in project_configuration.library_build_dependencies or 'yacc' in project_configuration.tools_build_dependencies): gcc_build_dependencies.append('byacc') if ('zlib' in project_configuration.library_build_dependencies or 'zlib' in project_configuration.tools_build_dependencies): gcc_build_dependencies.append( 'zlib (for DEFLATE compression support) (optional but recommended, ' 'can be disabled by --with-zlib=no)') gcc_static_build_dependencies.append( 'zlib (for DEFLATE compression support) (optional but recommended, ' 'can be disabled by --with-zlib=no)') if project_configuration.HasDependencyBzip2(): gcc_build_dependencies.append( 'bzip2 (required for bzip2 compression support)') gcc_static_build_dependencies.append( 'bzip2 (required for bzip2 compression support)') if ('crypto' in project_configuration.library_build_dependencies or 'crypto' in project_configuration.tools_build_dependencies): gcc_build_dependencies.append( 'libcrypto (part of OpenSSL) (optional but recommended, can be ' 'disabled by --with-openssl=no)') gcc_static_build_dependencies.append( 'libcrypto (part of OpenSSL) (optional but recommended, can be ' 'disabled by --with-openssl=no)') if 'fuse' in project_configuration.tools_build_dependencies: gcc_static_build_dependencies.append( 'fuse (optional, can be disabled by --with-libfuse=no)') gcc_build_dependencies.extend(project_configuration.gcc_build_dependencies) gcc_static_build_dependencies.extend( project_configuration.gcc_build_dependencies) gcc_build_dependencies = '\n'.join([ '* {0:s}'.format(dependency) for dependency in gcc_build_dependencies]) if gcc_build_dependencies: gcc_build_dependencies = ( '\n' 'Also make sure to have the following dependencies including ' 'source headers installed:\n' '{0:s}\n').format(gcc_build_dependencies) gcc_static_build_dependencies = '\n'.join([ '* {0:s}'.format(dependency) for dependency in gcc_static_build_dependencies]) # Cygwin support. building_table_of_contents += ' * [Using Cygwin](Building#cygwin)\n' cygwin_build_dependencies = self._GetCygwinBuildDependencies( project_configuration) cygwin_build_dependencies = '\n'.join([ '* {0:s}'.format(dependency) for dependency in cygwin_build_dependencies]) cygwin_dll_dependencies = self._GetCygwinDLLDependencies( project_configuration) cygwin_dll_dependencies = '\n'.join([ '* {0:s}'.format(dependency) for dependency in cygwin_dll_dependencies]) if project_configuration.HasTools(): cygwin_executables += ( 'And the following executables:\n' '```\n') for name in project_configuration.tools_names: cygwin_executables += ( '{0:s}/.libs/{1:s}.exe\n'.format( project_configuration.tools_directory, name)) cygwin_executables += ( '```\n') # Fuse support. if 'fuse' in project_configuration.tools_build_dependencies: gcc_mount_tool += ( '\n' 'If you want to be able to use {0:s}, make sure that:\n' '\n' '* on a Linux system you have libfuse-dev (Debian-based) or ' 'fuse-devel (RedHat-based) installed.\n' '* on a macOS system, you have OSXFuse ' '(http://osxfuse.github.com/) installed.\n').format( mount_tool_name) # MinGW support. building_table_of_contents += ( '* [Using Minimalist GNU for Windows (MinGW)]' '(Building#using-minimalist-gnu-for-windows-mingw)\n') mingw_build_dependencies = self._GetMinGWBuildDependencies( project_configuration) mingw_build_dependencies = '\n'.join([ '* {0:s}'.format(dependency) for dependency in mingw_build_dependencies]) mingw_dll_dependencies = self._GetMinGWDLLDependencies( project_configuration) mingw_dll_dependencies = '\n'.join([ '* {0:s}'.format(dependency) for dependency in mingw_dll_dependencies]) if project_configuration.HasTools(): mingw_executables += ( 'And the following executables:\n' '```\n') for name in project_configuration.tools_names: mingw_executables += ( '{0:s}/.libs/{1:s}.exe\n'.format( project_configuration.tools_directory, name)) mingw_executables += ( '```\n' '\n') # Visual Studio support. building_table_of_contents += ( '* [Using Microsoft Visual Studio]' '(Building#using-microsoft-visual-studio)\n') msvscpp_build_dependencies = self._GetVisualStudioBuildDependencies( project_configuration) msvscpp_build_dependencies = '\n'.join([ '* {0:s}'.format(dependency) for dependency in msvscpp_build_dependencies]) if msvscpp_build_dependencies: msvscpp_build_dependencies = ( '\n' 'To compile {0:s} using Microsoft Visual Studio you\'ll ' 'need:\n' '\n' '{1:s}\n').format( project_configuration.project_name, msvscpp_build_dependencies) msvscpp_dll_dependencies = self._GetVisualStudioDLLDependencies( project_configuration) msvscpp_dll_dependencies = '\n'.join([ '* {0:s}'.format(dependency) for dependency in msvscpp_dll_dependencies]) if msvscpp_dll_dependencies: msvscpp_dll_dependencies = ( '{0:s}.dll is dependent on:\n' '{1:s}\n' '\n' 'These DLLs can be found in the same directory as ' '{0:s}.dll.\n').format( project_configuration.project_name, msvscpp_dll_dependencies) msvscpp_build_git = ( '\n' 'Note that if you want to build {0:s} from source checked out of ' 'git with Visual Studio make sure the autotools are able to make ' 'a distribution package of {0:s} before trying to build it.\n' 'You can create distribution package by running: ' '"make dist".\n').format(project_configuration.project_name) if project_configuration.HasDependencyDokan(): msvscpp_mount_tool += ( '\n' 'If you want to be able to use {0:s} you\'ll need Dokan library ' 'see the corresponding section below.\n' 'Otherwise ignore or remove the dokan_dll and {0:s} Visual Studio ' 'project files.\n').format(mount_tool_name) building_table_of_contents += '\n' building_table_of_contents += ( 'Or directly packaged with different package managers:\n\n') # Dpkg support. dpkg_build_dependencies = '' dpkg_filenames = '' if project_configuration.HasDpkg(): building_table_of_contents += ( '* [Using Debian package tools (DEB)]' '(Building#using-debian-package-tools-deb)\n') dpkg_build_dependencies = self._GetDpkgBuildDependencies( project_configuration) dpkg_build_dependencies = ' '.join(dpkg_build_dependencies) dpkg_filenames = self._GetDpkgFilenames(project_configuration) dpkg_filenames = '\n'.join(dpkg_filenames) # Rpm support. rpm_build_dependencies = '' rpm_filenames = '' rpm_rename_source_package = '' if project_configuration.HasRpm(): building_table_of_contents += ( '* [Using RedHat package tools (RPM)]' '(Building#using-redhat-package-tools-rpm)\n') rpm_build_dependencies = self._GetRpmBuildDependencies( project_configuration) rpm_build_dependencies = ' '.join(rpm_build_dependencies) if project_configuration.project_status != 'stable': rpm_rename_source_package += ( 'mv {0:s}-{1:s}-<version>.tar.gz {0:s}-<version>.tar.gz\n'.format( project_configuration.project_name, project_configuration.project_status)) rpm_filenames = self._GetRpmFilenames(project_configuration) rpm_filenames = '\n'.join(rpm_filenames) # macOS pkgbuild support. building_table_of_contents += ( '* [Using macOS pkgbuild](Building#using-macos-pkgbuild)\n') macos_pkg_configure_options = '' if project_configuration.HasPythonModule(): macos_pkg_configure_options = ' --enable-python --with-pyprefix' if project_configuration.HasPythonModule(): building_table_of_contents += ( '* [Using setup.py](Building#using-setuppy)\n') development_table_of_contents += ( '* [C/C++ development](C-development)\n') if project_configuration.HasPythonModule(): development_table_of_contents += ( '* [Python development](Python-development)\n') development_item_path = project_configuration.development_item_path or '' if development_item_path: development_item_path = development_item_path.replace('\\', '\\\\') if project_configuration.development_main_object_pre_open_python: development_main_object_pre_open_python = '{0:s}\n'.format( project_configuration.development_main_object_pre_open_python) if project_configuration.development_main_object_post_open_python: development_main_object_post_open_python = '{0:s}\n'.format( '\n'.join(project_configuration.development_main_object_post_open_python)) if project_configuration.development_main_object_post_open_file_object_python: development_main_object_post_open_file_object_python = '{0:s}\n'.format( '\n'.join(project_configuration.development_main_object_post_open_file_object_python)) elif project_configuration.development_main_object_post_open_python: development_main_object_post_open_file_object_python = '{0:s}\n'.format( '\n'.join(project_configuration.development_main_object_post_open_python)) if project_configuration.mount_tool_file_entry_example: mount_tool_file_entry_example = ( project_configuration.mount_tool_file_entry_example) else: mount_tool_file_entry_example = '{0:s}1'.format( project_configuration.project_name[3:]) if project_configuration.mount_tool_additional_arguments: mount_tool_additional_arguments = ( project_configuration.mount_tool_additional_arguments) if project_configuration.mount_tool_source_description_long: mount_tool_source_description_long = ( project_configuration.mount_tool_source_description_long) else: mount_tool_source_description_long = ( project_configuration.mount_tool_source_description) if project_configuration.library_name == 'libewf': shared_object_version = '3' else: shared_object_version = '1' template_mappings = { 'building_table_of_contents': building_table_of_contents, 'project_name': project_configuration.project_name, 'project_name_suffix': project_configuration.project_name[3:], 'project_name_suffix_upper_case': ( project_configuration.project_name[3:].upper()), 'project_name_upper_case': project_configuration.project_name.upper(), 'project_status': project_status, 'project_description': project_configuration.project_description, 'project_git_url': project_configuration.project_git_url, 'project_downloads_url': project_configuration.project_downloads_url, 'build_dependencies': build_dependencies, 'python_bindings_name': python_bindings_name, 'mount_tool_file_entry_example': mount_tool_file_entry_example, 'mount_tool_file_entry_example_windows': ( mount_tool_file_entry_example.replace('\\x', '^x')), 'mount_tool_name': mount_tool_name, 'tools_name': tools_name, 'documentation': documentation, 'development_table_of_contents': development_table_of_contents, 'development_prefix': development_prefix, 'development_item_object': ( project_configuration.development_item_object), 'development_item_path': development_item_path, 'development_main_object': ( project_configuration.development_main_object), 'development_main_object_filename': ( project_configuration.development_main_object_filename), 'development_main_object_pre_open_python': ( development_main_object_pre_open_python), 'development_main_object_post_open_python': ( development_main_object_post_open_python), 'development_main_object_post_open_file_object_python': ( development_main_object_post_open_file_object_python), 'development_main_object_size': ( project_configuration.development_main_object_size), 'tests_profiles': tests_profiles, 'tests_example_filename1': ( project_configuration.tests_example_filename1), 'tests_example_filename2': ( project_configuration.tests_example_filename2), 'troubleshooting_example': troubleshooting_example, 'cygwin_build_dependencies': cygwin_build_dependencies, 'cygwin_dll_dependencies': cygwin_dll_dependencies, 'cygwin_dll_filename': project_configuration.cygwin_dll_filename, 'cygwin_executables': cygwin_executables, 'gcc_build_dependencies': gcc_build_dependencies, 'gcc_static_build_dependencies': gcc_static_build_dependencies, 'gcc_mount_tool': gcc_mount_tool, 'git_apt_dependencies': git_apt_dependencies, 'git_build_dependencies': git_build_dependencies, 'git_dnf_dependencies': git_dnf_dependencies, 'git_macports_dependencies': git_macports_dependencies, 'git_msvscpp_dependencies': git_msvscpp_dependencies, 'mingw_build_dependencies': mingw_build_dependencies, 'mingw_dll_dependencies': mingw_dll_dependencies, 'mingw_dll_filename': project_configuration.mingw_dll_filename, 'mingw_executables': mingw_executables, 'msvscpp_build_dependencies': msvscpp_build_dependencies, 'msvscpp_build_git': msvscpp_build_git, 'msvscpp_dll_dependencies': msvscpp_dll_dependencies, 'msvscpp_mount_tool': msvscpp_mount_tool, 'dpkg_build_dependencies': dpkg_build_dependencies, 'dpkg_filenames': dpkg_filenames, 'macos_pkg_configure_options': macos_pkg_configure_options, 'rpm_build_dependencies': rpm_build_dependencies, 'rpm_filenames': rpm_filenames, 'rpm_rename_source_package': rpm_rename_source_package, 'mount_tool_additional_arguments': mount_tool_additional_arguments, 'mount_tool_mounted_description': ( project_configuration.mount_tool_mounted_description), 'mount_tool_source': project_configuration.mount_tool_source, 'mount_tool_source_description': ( project_configuration.mount_tool_source_description), 'mount_tool_source_description_long': ( mount_tool_source_description_long), 'shared_object_version': shared_object_version, } return template_mappings def _GetVisualStudioBuildDependencies(self, project_configuration): """Retrieves the Visual Studio build dependencies. Args: project_configuration (ProjectConfiguration): project configuration. Returns: list[str]: Visual Studio build dependencies. """ dependencies = [] if 'zlib' in project_configuration.library_build_dependencies: dependencies.append( 'zlib (for DEFLATE compression support)') if project_configuration.HasDependencyBzip2(): dependencies.append( 'bzip2 (required for bzip2 compression support)') dependencies.extend(project_configuration.msvscpp_build_dependencies) return dependencies def _GetVisualStudioDLLDependencies(self, project_configuration): """Retrieves the Visual Studio DLL dependencies. Args: project_configuration (ProjectConfiguration): project configuration. Returns: list[str]: Visual Studio DLL dependencies. """ dependencies = [] if 'zlib' in project_configuration.library_build_dependencies: dependencies.append('zlib.dll') if project_configuration.HasDependencyBzip2(): dependencies.append('bzip2.dll') dependencies.extend(project_configuration.msvscpp_dll_dependencies) return dependencies def _ReadTemplateFile(self, filename): """Reads a template string from file. Args: filename (str): path of the file containing the template string. Returns: string.Template: template string. """ path = os.path.join(self._template_directory, filename) with io.open(path, 'r', encoding='utf8') as file_object: file_data = file_object.read() return string.Template(file_data) @abc.abstractmethod def Generate(self, project_configuration, output_writer): """Generates a wiki page. Args: project_configuration (ProjectConfiguration): project configuration. output_writer (OutputWriter): output writer. """ @abc.abstractmethod def HasContent(self, project_configuration): """Determines if the generator will generate content. Args: project_configuration (ProjectConfiguration): project configuration. Returns: bool: True if the generator will generate content. """ class BuildingPageGenerator(WikiPageGenerator): """Class that generates the "Building from source" wiki page.""" def Generate(self, project_configuration, output_writer): """Generates a wiki page. Args: project_configuration (ProjectConfiguration): project configuration. output_writer (OutputWriter): output writer. """ template_mappings = self._GetTemplateMappings(project_configuration) self._GenerateSection('introduction.txt', template_mappings, output_writer) self._GenerateSection('source.txt', template_mappings, output_writer) self._GenerateSection( 'source_distribution_package.txt', template_mappings, output_writer) self._GenerateSection('source_git.txt', template_mappings, output_writer) self._GenerateSection('gcc.txt', template_mappings, output_writer) if project_configuration.HasDebugOutput(): self._GenerateSection( 'gcc_debug_output.txt', template_mappings, output_writer) self._GenerateSection( 'gcc_static_library.txt', template_mappings, output_writer) if project_configuration.HasTools(): self._GenerateSection( 'gcc_static_executables.txt', template_mappings, output_writer) if project_configuration.HasPythonModule(): self._GenerateSection( 'gcc_python.txt', template_mappings, output_writer) self._GenerateSection('cygwin.txt', template_mappings, output_writer) self._GenerateSection('gcc_macos.txt', template_mappings, output_writer) if project_configuration.HasPythonModule(): self._GenerateSection( 'gcc_macos_python.txt', template_mappings, output_writer) self._GenerateSection( 'gcc_solaris.txt', template_mappings, output_writer) # MinGW support. self._GenerateSection('mingw.txt', template_mappings, output_writer) self._GenerateSection('mingw_msys.txt', template_mappings, output_writer) self._GenerateSection('mingw_dll.txt', template_mappings, output_writer) self._GenerateSection( 'mingw_troubleshooting.txt', template_mappings, output_writer) # Visual Studio support. self._GenerateSection('msvscpp.txt', template_mappings, output_writer) if project_configuration.HasDebugOutput(): self._GenerateSection( 'msvscpp_debug.txt', template_mappings, output_writer) if 'zlib' in project_configuration.library_build_dependencies: self._GenerateSection( 'msvscpp_zlib.txt', template_mappings, output_writer) if project_configuration.HasDependencyDokan(): self._GenerateSection( 'msvscpp_dokan.txt', template_mappings, output_writer) if project_configuration.HasPythonModule(): self._GenerateSection( 'msvscpp_python.txt', template_mappings, output_writer) self._GenerateSection( 'msvscpp_build.txt', template_mappings, output_writer) self._GenerateSection( 'msvscpp_dll.txt', template_mappings, output_writer) self._GenerateSection( 'msvscpp_2010.txt', template_mappings, output_writer) if project_configuration.HasDpkg(): self._GenerateSection('dpkg.txt', template_mappings, output_writer) if project_configuration.HasRpm(): self._GenerateSection('rpm.txt', template_mappings, output_writer) self._GenerateSection('macos_pkg.txt', template_mappings, output_writer) if project_configuration.HasPythonModule(): self._GenerateSection('setup_py.txt', template_mappings, output_writer) def HasContent(self, unused_project_configuration): """Determines if the generator will generate content. Args: project_configuration (ProjectConfiguration): project configuration. Returns: bool: True if the generator will generate content. """ return True class DevelopmentPageGenerator(WikiPageGenerator): """Class that generates the "Development" wiki page.""" def Generate(self, project_configuration, output_writer): """Generates a wiki page. Args: project_configuration (ProjectConfiguration): project configuration. output_writer (OutputWriter): output writer. """ template_mappings = self._GetTemplateMappings(project_configuration) self._GenerateSection('main.txt', template_mappings, output_writer) def HasContent(self, project_configuration): """Determines if the generator will generate content. Args: project_configuration (ProjectConfiguration): project configuration. Returns: bool: True if the generator will generate content. """ return project_configuration.HasPythonModule() class CDevelopmentPageGenerator(WikiPageGenerator): """Class that generates the "C/C++ development" wiki page.""" def Generate(self, project_configuration, output_writer): """Generates a wiki page. Args: project_configuration (ProjectConfiguration): project configuration. output_writer (OutputWriter): output writer. """ # TODO: add support for also_see.txt, main_object.txt template_mappings = self._GetTemplateMappings(project_configuration) self._GenerateSection('main.txt', template_mappings, output_writer) if project_configuration.development_main_object: if project_configuration.development_glob: self._GenerateSection( 'main_object_with_glob.txt', template_mappings, output_writer) else: self._GenerateSection( 'main_object.txt', template_mappings, output_writer) self._GenerateSection('also_see.txt', template_mappings, output_writer) def HasContent(self, unused_project_configuration): """Determines if the generator will generate content. Args: project_configuration (ProjectConfiguration): project configuration. Returns: bool: True if the generator will generate content. """ return True class PythonDevelopmentPageGenerator(WikiPageGenerator): """Class that generates the "Python development" wiki page.""" def Generate(self, project_configuration, output_writer): """Generates a wiki page. Args: project_configuration (ProjectConfiguration): project configuration. output_writer (OutputWriter): output writer. """ template_mappings = self._GetTemplateMappings(project_configuration) self._GenerateSection('main.txt', template_mappings, output_writer) if project_configuration.development_main_object: if project_configuration.development_glob: self._GenerateSection( 'main_object_with_glob.txt', template_mappings, output_writer) else: self._GenerateSection( 'main_object.txt', template_mappings, output_writer) if project_configuration.development_item_object: self._GenerateSection('item_object.txt', template_mappings, output_writer) if project_configuration.development_pytsk3: if project_configuration.development_glob: self._GenerateSection( 'pytsk3_with_glob.txt', template_mappings, output_writer) else: self._GenerateSection('pytsk3.txt', template_mappings, output_writer) # TODO: move main object out of this template and create on demand. self._GenerateSection('also_see.txt', template_mappings, output_writer) def HasContent(self, project_configuration): """Determines if the generator will generate content. Args: project_configuration (ProjectConfiguration): project configuration. Returns: bool: True if the generator will generate content. """ return project_configuration.HasPythonModule() class HomePageGenerator(WikiPageGenerator): """Class that generates the "Home" wiki page.""" def Generate(self, project_configuration, output_writer): """Generates a wiki page. Args: project_configuration (ProjectConfiguration): project configuration. output_writer (OutputWriter): output writer. """ template_mappings = self._GetTemplateMappings(project_configuration) self._GenerateSection('introduction.txt', template_mappings, output_writer) def HasContent(self, unused_project_configuration): """Determines if the generator will generate content. Args: project_configuration (ProjectConfiguration): project configuration. Returns: bool: True if the generator will generate content. """ return True class MountingPageGenerator(WikiPageGenerator): """Class that generates the "Mounting a ..." wiki page.""" def Generate(self, project_configuration, output_writer): """Generates a wiki page. Args: project_configuration (ProjectConfiguration): project configuration. output_writer (OutputWriter): output writer. """ template_mappings = self._GetTemplateMappings(project_configuration) if (project_configuration.HasDependencyDokan() or 'fuse' in project_configuration.tools_build_dependencies): self._GenerateSection( 'introduction.txt', template_mappings, output_writer) if project_configuration.mount_tool_source_type in ( 'file', 'image', 'volume'): template_file = 'mounting_{0:s}.txt'.format( project_configuration.mount_tool_source_type) self._GenerateSection(template_file, template_mappings, output_writer) self._GenerateSection( 'mounting_missing_backend.txt', template_mappings, output_writer) if project_configuration.mount_tool_source_type in ( 'image', 'volume'): template_file = 'mounting_{0:s}_contents.txt'.format( project_configuration.mount_tool_source_type) self._GenerateSection(template_file, template_mappings, output_writer) if project_configuration.mount_tool_source_type == 'volume': self._GenerateSection( 'obtaining_volume_offset.txt', template_mappings, output_writer) self._GenerateSection( 'mounting_root_access.txt', template_mappings, output_writer) if project_configuration.HasDependencyDokan(): if project_configuration.mount_tool_source_type in ( 'file', 'image', 'volume'): template_file = 'mounting_{0:s}_windows.txt'.format( project_configuration.mount_tool_source_type) self._GenerateSection(template_file, template_mappings, output_writer) self._GenerateSection( 'unmounting.txt', template_mappings, output_writer) if project_configuration.HasDependencyDokan(): self._GenerateSection( 'unmounting_windows.txt', template_mappings, output_writer) self._GenerateSection( 'troubleshooting.txt', template_mappings, output_writer) def HasContent(self, project_configuration): """Determines if the generator will generate content. Args: project_configuration (ProjectConfiguration): project configuration. Returns: bool: True if the generator will generate content. """ if (project_configuration.HasDependencyDokan() or 'fuse' in project_configuration.tools_build_dependencies): return True return False class TestingPageGenerator(WikiPageGenerator): """Class that generates the "Testing" wiki page.""" def Generate(self, project_configuration, output_writer): """Generates a wiki page. Args: project_configuration (ProjectConfiguration): project configuration. output_writer (OutputWriter): output writer. """ # TODO: implement testing page without input files. template_mappings = self._GetTemplateMappings(project_configuration) if project_configuration.HasTests(): self._GenerateSection('tests.txt', template_mappings, output_writer) if project_configuration.tests_profiles: if (project_configuration.tests_example_filename1 and project_configuration.tests_example_filename2): self._GenerateSection( 'tests_files.txt', template_mappings, output_writer) self._GenerateSection( 'tests_profiles.txt', template_mappings, output_writer) if (project_configuration.tests_example_filename1 and project_configuration.tests_example_filename2): self._GenerateSection( 'tests_profiles_files.txt', template_mappings, output_writer) # TODO: add section about ASAN self._GenerateSection( 'tests_valgrind.txt', template_mappings, output_writer) def HasContent(self, project_configuration): """Determines if the generator will generate content. Args: project_configuration (ProjectConfiguration): project configuration. Returns: bool: True if the generator will generate content. """ if project_configuration.HasTests(): return True return False class TroubleshootingPageGenerator(WikiPageGenerator): """Class that generates the "Troubleshooting" wiki page.""" def Generate(self, project_configuration, output_writer): """Generates a wiki page. Args: project_configuration (ProjectConfiguration): project configuration. output_writer (OutputWriter): output writer. """ template_mappings = self._GetTemplateMappings(project_configuration) self._GenerateSection( 'introduction.txt', template_mappings, output_writer) self._GenerateSection( 'build_errors.txt', template_mappings, output_writer) self._GenerateSection( 'runtime_errors.txt', template_mappings, output_writer) if project_configuration.HasDebugOutput(): self._GenerateSection( 'format_errors.txt', template_mappings, output_writer) if project_configuration.HasTools(): self._GenerateSection( 'crashes.txt', template_mappings, output_writer) def HasContent(self, unused_project_configuration): """Determines if the generator will generate content. Args: project_configuration (ProjectConfiguration): project configuration. Returns: bool: True if the generator will generate content. """ return True class FileWriter(object): """Class that defines a file output writer.""" def __init__(self, name): """Initializes an output writer. Args: name (str): name of the output. """ super(FileWriter, self).__init__() self._file_object = None self._name = name def Open(self): """Opens the output writer object. Returns: bool: True if successful or False if not. """ self._file_object = io.open(self._name, 'w', encoding='utf8') return True def Close(self): """Closes the output writer object.""" self._file_object.close() def Write(self, data): """Writes the data to file. Args: data (bytes): data to write. """ self._file_object.write(data) class StdoutWriter(object): """Class that defines a stdout output writer.""" def Open(self): """Opens the output writer object. Returns: bool: True if successful or False if not. """ return True def Close(self): """Closes the output writer object.""" return def Write(self, data): """Writes the data to stdout (without the default trailing newline). Args: data (bytes): data to write. """ print(data, end='') def Main(): """The main program function. Returns: bool: True if successful or False if not. """ argument_parser = argparse.ArgumentParser(description=( 'Generates wiki pages of the libyal libraries.')) argument_parser.add_argument( 'configuration_file', action='store', metavar='CONFIGURATION_FILE', default='project-wiki.ini', help=( 'The wiki generation configuration file.')) argument_parser.add_argument( '-o', '--output', dest='output_directory', action='store', metavar='OUTPUT_DIRECTORY', default=None, help='path of the output files to write to.') options = argument_parser.parse_args() if not options.configuration_file: print('Configuration file missing.') print('') argument_parser.print_help() print('') return False if not os.path.exists(options.configuration_file): print('No such configuration file: {0:s}.'.format( options.configuration_file)) print('') return False if options.output_directory and not os.path.exists(options.output_directory): print('No such output directory: {0:s}.'.format(options.output_directory)) print('') return False project_configuration = configuration.ProjectConfiguration() project_configuration.ReadFromFile(options.configuration_file) readme_file = os.path.join( os.path.dirname(options.configuration_file), 'README') LINK_RE = re.compile(r'\* (.*): (http[s]://.*)') last_line_was_header = False project_description = [] if os.path.exists(readme_file): with io.open(readme_file, 'r', encoding='utf8') as file_object: for line in file_object.readlines(): if line.startswith('For more information see:'): project_description.pop() break if last_line_was_header: last_line_was_header = False if line != '\n': # Add an empty line to make sure unnumbered list are formatted # correctly by most markdown parsers. project_description.append('\n') line = LINK_RE.sub(r'* [\1](\2)', line) project_description.append(line) if line.endswith(':\n'): last_line_was_header = True project_configuration.project_description = ''.join(project_description) libyal_directory = os.path.abspath(__file__) libyal_directory = os.path.dirname(libyal_directory) libyal_directory = os.path.dirname(libyal_directory) # TODO: generate more wiki pages. wiki_pages = [ ('Building', BuildingPageGenerator), ('Development', DevelopmentPageGenerator), ('Home', HomePageGenerator), ('Mounting', MountingPageGenerator), ('C-development', CDevelopmentPageGenerator), ('Python-development', PythonDevelopmentPageGenerator), ('Testing', TestingPageGenerator), ('Troubleshooting', TroubleshootingPageGenerator), ] for page_name, page_generator_class in wiki_pages: template_directory = os.path.join( libyal_directory, 'data', 'wiki', page_name) wiki_page = page_generator_class(template_directory) if not wiki_page.HasContent(project_configuration): continue if options.output_directory: filename = '{0:s}.md'.format(page_name) output_file = os.path.join(options.output_directory, filename) output_writer = FileWriter(output_file) else: output_writer = StdoutWriter() if not output_writer.Open(): print('Unable to open output writer.') print('') return False wiki_page.Generate(project_configuration, output_writer) output_writer.Close() # TODO: add support for Unicode templates. return True if __name__ == '__main__': if not Main(): sys.exit(1) else: sys.exit(0)
from pandac.PandaModules import * from otp.otpbase import OTPGlobals from direct.gui.DirectGui import * from otp.otpgui import OTPDialog from direct.directnotify import DirectNotifyGlobal from otp.otpbase import OTPLocalizer from direct.task.Task import Task class GuiScreen: notify = DirectNotifyGlobal.directNotify.newCategory('GuiScreen') DGG.ENTERPRESS_ADVANCE = 0 DGG.ENTERPRESS_ADVANCE_IFNOTEMPTY = 1 DGG.ENTERPRESS_DONT_ADVANCE = 2 DGG.ENTERPRESS_REMOVE_FOCUS = 3 ENTRY_WIDTH = 20 def __init__(self): self.waitingForDatabase = None self.focusIndex = None self.suppressClickSound = 0 return def startFocusMgmt(self, startFocus = 0, enterPressBehavior = DGG.ENTERPRESS_ADVANCE_IFNOTEMPTY, overrides = {}, globalFocusHandler = None): GuiScreen.notify.debug('startFocusMgmt:\nstartFocus=%s,\nenterPressBehavior=%s\noverrides=%s' % (startFocus, enterPressBehavior, overrides)) self.accept('tab', self.__handleTab) self.accept('shift-tab', self.__handleShiftTab) self.accept('enter', self.__handleEnter) self.__startFrameStartTask() self.userGlobalFocusHandler = globalFocusHandler self.focusHandlerAbsorbCounts = {} for i in xrange(len(self.focusList)): item = self.focusList[i] if isinstance(item, DirectEntry): self.focusHandlerAbsorbCounts[item] = 0 self.userFocusHandlers = {} self.userCommandHandlers = {} for i in xrange(len(self.focusList)): item = self.focusList[i] if isinstance(item, DirectEntry): self.userFocusHandlers[item] = (item['focusInCommand'], item['focusInExtraArgs']) item['focusInCommand'] = self.__handleFocusChangeAbsorb item['focusInExtraArgs'] = [i] self.userCommandHandlers[item] = (item['command'], item['extraArgs']) item['command'] = None item['extraArgs'] = [] elif isinstance(item, DirectScrolledList): self.userCommandHandlers[item] = (item['command'], item['extraArgs']) item['command'] = self.__handleDirectScrolledListCommand item['extraArgs'] = [i] self.enterPressHandlers = {} for i in xrange(len(self.focusList)): item = self.focusList[i] behavior = enterPressBehavior if item in overrides: behavior = overrides[item] if callable(behavior): self.enterPressHandlers[item] = behavior else: if not isinstance(item, DirectEntry) and behavior == GuiScreen_ENTERPRESS_ADVANCE_IFNOTEMPTY: behavior = GuiScreen_ENTERPRESS_ADVANCE commandHandlers = (self.__alwaysAdvanceFocus, self.__advanceFocusIfNotEmpty, self.__neverAdvanceFocus, self.__ignoreEnterPress) self.enterPressHandlers[item] = commandHandlers[behavior] self.setFocus(startFocus) return def focusMgmtActive(self): return self.focusIndex != None def stopFocusMgmt(self): GuiScreen.notify.debug('stopFocusMgmt') if not self.focusMgmtActive(): return self.ignore('tab') self.ignore('shift-tab') self.ignore('enter') self.__stopFrameStartTask() self.userGlobalFocusHandler = None self.focusIndex = None self.focusHandlerAbsorbCounts = {} for item in self.focusList: if isinstance(item, DirectEntry): userHandler, userHandlerArgs = self.userFocusHandlers[item] item['focusInCommand'] = userHandler item['focusInExtraArgs'] = userHandlerArgs userHandler, userHandlerArgs = self.userCommandHandlers[item] item['command'] = userHandler item['extraArgs'] = userHandlerArgs elif isinstance(item, DirectScrolledList): userHandler, userHandlerArgs = self.userCommandHandlers[item] item['command'] = userHandler item['extraArgs'] = userHandlerArgs self.userFocusHandlers = {} self.userCommandHandlers = {} self.enterPressHandlers = {} return def setFocus(self, arg, suppressSound = 1): if type(arg) == type(0): index = arg else: index = self.focusList.index(arg) if suppressSound: self.suppressClickSound += 1 self.__setFocusIndex(index) def advanceFocus(self, condition = 1): index = self.getFocusIndex() if condition: index += 1 self.setFocus(index, suppressSound=0) def getFocusIndex(self): if not self.focusMgmtActive(): return None return self.focusIndex def getFocusItem(self): if not self.focusMgmtActive(): return None return self.focusList[self.focusIndex] def removeFocus(self): focusItem = self.getFocusItem() if isinstance(focusItem, DirectEntry): focusItem['focus'] = 0 if self.userGlobalFocusHandler: self.userGlobalFocusHandler(None) return def restoreFocus(self): self.setFocus(self.getFocusItem()) def __setFocusIndex(self, index): focusIndex = index % len(self.focusList) focusItem = self.focusList[focusIndex] if isinstance(focusItem, DirectEntry): focusItem['focus'] = 1 self.focusHandlerAbsorbCounts[focusItem] += 1 self.__handleFocusChange(focusIndex) def __chainToUserCommandHandler(self, item): userHandler, userHandlerArgs = self.userCommandHandlers[item] if userHandler: if isinstance(item, DirectEntry): enteredText = item.get() apply(userHandler, [enteredText] + userHandlerArgs) elif isinstance(item, DirectScrolledList): apply(userHandler, userHandlerArgs) def __chainToUserFocusHandler(self, item): if isinstance(item, DirectEntry): userHandler, userHandlerArgs = self.userFocusHandlers[item] if userHandler: apply(userHandler, userHandlerArgs) def __handleTab(self): self.tabPressed = 1 self.focusDirection = 1 self.__setFocusIndex(self.getFocusIndex() + self.focusDirection) def __handleShiftTab(self): self.tabPressed = 1 self.focusDirection = -1 self.__setFocusIndex(self.getFocusIndex() + self.focusDirection) def __handleFocusChangeAbsorb(self, index): item = self.focusList[index] if self.focusHandlerAbsorbCounts[item] > 0: self.focusHandlerAbsorbCounts[item] -= 1 else: self.__handleFocusChange(index) def playFocusChangeSound(self): base.playSfx(DGG.getDefaultClickSound()) def __handleFocusChange(self, index): if index != self.focusIndex: self.removeFocus() self.__focusChangedThisFrame = 1 if hasattr(self, 'tabPressed'): del self.tabPressed else: self.focusDirection = 1 self.focusIndex = index if self.suppressClickSound > 0: self.suppressClickSound -= 1 else: self.playFocusChangeSound() focusItem = self.getFocusItem() if self.userGlobalFocusHandler: self.userGlobalFocusHandler(focusItem) if self.getFocusItem() != focusItem: GuiScreen.notify.debug('focus changed by global focus handler') if self.focusMgmtActive(): self.__chainToUserFocusHandler(focusItem) def __startFrameStartTask(self): self.__focusChangedThisFrame = 0 self.frameStartTaskName = 'GuiScreenFrameStart' taskMgr.add(self.__handleFrameStart, self.frameStartTaskName, -100) def __stopFrameStartTask(self): taskMgr.remove(self.frameStartTaskName) del self.frameStartTaskName del self.__focusChangedThisFrame def __handleFrameStart(self, task): self.__focusChangedThisFrame = 0 return Task.cont def __handleDirectScrolledListCommand(self, index): self.__chainToUserCommandHandler(self.focusList[index]) self.setFocus(index, suppressSound=self.getFocusIndex() == index) def __handleEnter(self): if self.__focusChangedThisFrame: return focusItem = self.getFocusItem() if isinstance(focusItem, DirectEntry): self.__chainToUserCommandHandler(focusItem) if self.focusMgmtActive() and focusItem == self.getFocusItem(): self.enterPressHandlers[focusItem]() def __alwaysAdvanceFocus(self): self.advanceFocus() def __advanceFocusIfNotEmpty(self): focusItem = self.getFocusItem() enteredText = focusItem.get() if enteredText != '': self.advanceFocus() else: self.setFocus(self.getFocusIndex()) def __neverAdvanceFocus(self): self.setFocus(self.getFocusIndex()) def __ignoreEnterPress(self): pass def waitForDatabaseTimeout(self, requestName = 'unknown'): GuiScreen.notify.debug('waiting for database timeout %s at %s' % (requestName, globalClock.getFrameTime())) globalClock.tick() taskMgr.doMethodLater(OTPGlobals.DatabaseDialogTimeout, self.__showWaitingForDatabase, 'waitingForDatabase', extraArgs=[requestName]) def __showWaitingForDatabase(self, requestName): GuiScreen.notify.info('timed out waiting for %s at %s' % (requestName, globalClock.getFrameTime())) dialogClass = OTPGlobals.getDialogClass() self.waitingForDatabase = dialogClass(text=OTPLocalizer.GuiScreenToontownUnavailable, dialogName='WaitingForDatabase', buttonTextList=[OTPLocalizer.GuiScreenCancel], style=OTPDialog.Acknowledge, command=self.__handleCancelWaiting) self.waitingForDatabase.show() taskMgr.doMethodLater(OTPGlobals.DatabaseGiveupTimeout, self.__giveUpWaitingForDatabase, 'waitingForDatabase', extraArgs=[requestName]) return Task.done def __giveUpWaitingForDatabase(self, requestName): GuiScreen.notify.info('giving up waiting for %s at %s' % (requestName, globalClock.getFrameTime())) self.cleanupWaitingForDatabase() messenger.send(self.doneEvent, [{'mode': 'failure'}]) return Task.done def cleanupWaitingForDatabase(self): if self.waitingForDatabase != None: self.waitingForDatabase.cleanup() self.waitingForDatabase = None taskMgr.remove('waitingForDatabase') return def __handleCancelWaiting(self, value): self.cleanupWaitingForDatabase() messenger.send(self.doneEvent, [{'mode': 'quit'}])
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Create sample PR curve summary data. We have 3 classes: R, G, and B. We generate colors within RGB space from 3 normal distributions (1 at each corner of the color triangle: [255, 0, 0], [0, 255, 0], and [0, 0, 255]). The true label of each random color is associated with the normal distribution that generated it. Using 3 other normal distributions (over the distance each color is from a corner of the color triangle - RGB), we then compute the probability that each color belongs to the class. We use those probabilities to generate PR curves. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import os.path from six.moves import xrange # pylint: disable=redefined-builtin import tensorflow as tf from tensorboard.plugins.pr_curve import summary FLAGS = tf.flags.FLAGS tf.flags.DEFINE_string('logdir', '/tmp/pr_curve_demo', 'Directory into which to write TensorBoard data.') tf.flags.DEFINE_integer('steps', 10, 'Number of steps to generate for each PR curve.') def start_runs( logdir, steps, run_name, thresholds, mask_every_other_prediction=False): """Generate a PR curve with precision and recall evenly weighted. Arguments: logdir: The directory into which to store all the runs' data. steps: The number of steps to run for. run_name: The name of the run. thresholds: The number of thresholds to use for PR curves. mask_every_other_prediction: Whether to mask every other prediction by alternating weights between 0 and 1. """ tf.reset_default_graph() tf.set_random_seed(42) # Create a normal distribution layer used to generate true color labels. channel_distribution = tf.distributions.Normal(loc=0., scale=142.) # Sample the distribution to generate colors. Lets generate different numbers # of each color. The first dimension is the count of examples. # Generate reds. number_of_reds = 100 true_reds = tf.clip_by_value( tf.concat([ 255 - tf.abs(channel_distribution.sample([number_of_reds, 1])), tf.abs(channel_distribution.sample([number_of_reds, 2])) ], axis=1), 0, 255) # Generate greens. number_of_greens = 200 true_greens = tf.clip_by_value( tf.concat([ tf.abs(channel_distribution.sample([number_of_greens, 1])), 255 - tf.abs(channel_distribution.sample([number_of_greens, 1])), tf.abs(channel_distribution.sample([number_of_greens, 1])) ], axis=1), 0, 255) # Generate blues. number_of_blues = 150 true_blues = tf.clip_by_value( tf.concat([ tf.abs(channel_distribution.sample([number_of_blues, 2])), 255 - tf.abs(channel_distribution.sample([number_of_blues, 1])) ], axis=1), 0, 255) # Assign each color a vector of 3 booleans based on its true label. labels = tf.concat([ tf.tile(tf.constant([[True, False, False]]), (number_of_reds, 1)), tf.tile(tf.constant([[False, True, False]]), (number_of_greens, 1)), tf.tile(tf.constant([[False, False, True]]), (number_of_blues, 1)), ], axis=0) # We introduce 3 normal distributions. They are used to predict whether a # color falls under a certain class (based on distances from corners of the # color triangle). The distributions vary per color. We have the distributions # narrow over time. initial_standard_deviations = [v + FLAGS.steps for v in (158, 200, 242)] iteration = tf.placeholder(tf.int32, shape=[]) red_predictor = tf.distributions.Normal( loc=0., scale=tf.cast( initial_standard_deviations[0] - iteration, dtype=tf.float32)) green_predictor = tf.distributions.Normal( loc=0., scale=tf.cast( initial_standard_deviations[1] - iteration, dtype=tf.float32)) blue_predictor = tf.distributions.Normal( loc=0., scale=tf.cast( initial_standard_deviations[2] - iteration, dtype=tf.float32)) # Make predictions (assign 3 probabilities to each color based on each color's # distance to each of the 3 corners). We seek double the area in the right # tail of the normal distribution. examples = tf.concat([true_reds, true_greens, true_blues], axis=0) probabilities_colors_are_red = (1 - red_predictor.cdf( tf.norm(examples - tf.constant([255., 0, 0]), axis=1))) * 2 probabilities_colors_are_green = (1 - green_predictor.cdf( tf.norm(examples - tf.constant([0, 255., 0]), axis=1))) * 2 probabilities_colors_are_blue = (1 - blue_predictor.cdf( tf.norm(examples - tf.constant([0, 0, 255.]), axis=1))) * 2 predictions = ( probabilities_colors_are_red, probabilities_colors_are_green, probabilities_colors_are_blue ) # This is the crucial piece. We write data required for generating PR curves. # We create 1 summary per class because we create 1 PR curve per class. for i, color in enumerate(('red', 'green', 'blue')): description = ('The probabilities used to create this PR curve are ' 'generated from a normal distribution. Its standard ' 'deviation is initially %0.0f and decreases over time.' % initial_standard_deviations[i]) weights = None if mask_every_other_prediction: # Assign a weight of 0 to every even-indexed prediction. Odd-indexed # predictions are assigned a default weight of 1. consecutive_indices = tf.reshape( tf.range(tf.size(predictions[i])), tf.shape(predictions[i])) weights = tf.cast(consecutive_indices % 2, dtype=tf.float32) summary.op( tag=color, labels=labels[:, i], predictions=predictions[i], num_thresholds=thresholds, weights=weights, display_name='classifying %s' % color, description=description) merged_summary_op = tf.summary.merge_all() events_directory = os.path.join(logdir, run_name) sess = tf.Session() writer = tf.summary.FileWriter(events_directory, sess.graph) for step in xrange(steps): feed_dict = { iteration: step, } merged_summary = sess.run(merged_summary_op, feed_dict=feed_dict) writer.add_summary(merged_summary, step) writer.close() def run_all(logdir, steps, thresholds, verbose=False): """Generate PR curve summaries. Arguments: logdir: The directory into which to store all the runs' data. steps: The number of steps to run for. verbose: Whether to print the names of runs into stdout during execution. thresholds: The number of thresholds to use for PR curves. """ # First, we generate data for a PR curve that assigns even weights for # predictions of all classes. run_name = 'colors' if verbose: print('--- Running: %s' % run_name) start_runs( logdir=logdir, steps=steps, run_name=run_name, thresholds=thresholds) # Next, we generate data for a PR curve that assigns arbitrary weights to # predictions. run_name = 'mask_every_other_prediction' if verbose: print('--- Running: %s' % run_name) start_runs( logdir=logdir, steps=steps, run_name=run_name, thresholds=thresholds, mask_every_other_prediction=True) def main(unused_argv): print('Saving output to %s.' % FLAGS.logdir) run_all(FLAGS.logdir, FLAGS.steps, 50, verbose=True) print('Done. Output saved to %s.' % FLAGS.logdir) if __name__ == '__main__': tf.app.run()
# coding: utf-8 from __future__ import division, unicode_literals """ This module defines classes for point defects """ import os import abc import json from bisect import bisect_left from pymatgen.core.periodic_table import Specie, Element from pymatgen.core.sites import PeriodicSite from pymatgen.symmetry.analyzer import SpacegroupAnalyzer from pymatgen.io.zeoio import get_voronoi_nodes, get_void_volume_surfarea, \ get_high_accuracy_voronoi_nodes from pymatgen.command_line.gulp_caller import get_energy_buckingham, \ get_energy_relax_structure_buckingham from pymatgen.analysis.structure_analyzer import VoronoiCoordFinder, \ RelaxationAnalyzer from pymatgen.analysis.structure_matcher import StructureMatcher from pymatgen.analysis.bond_valence import BVAnalyzer import six from six.moves import filter from six.moves import map from six.moves import zip file_dir = os.path.dirname(__file__) rad_file = os.path.join(file_dir, 'ionic_radii.json') with open(rad_file, 'r') as fp: _ion_radii = json.load(fp) class ValenceIonicRadiusEvaluator(object): """ Computes site valences and ionic radii for a structure using bond valence analyzer Args: structure: pymatgen.core.structure.Structure """ def __init__(self, structure): self._structure = structure.copy() self._valences = self._get_valences() self._ionic_radii = self._get_ionic_radii() @property def radii(self): """ List of ionic radii of elements in the order of sites. """ el = [site.species_string for site in self._structure.sites] radii_dict = dict(zip(el, self._ionic_radii)) #print radii_dict return radii_dict @property def valences(self): """ List of oxidation states of elements in the order of sites. """ el = [site.species_string for site in self._structure.sites] valence_dict = dict(zip(el, self._valences)) return valence_dict @property def structure(self): """ Returns oxidation state decorated structurel. """ return self._structure.copy() def _get_ionic_radii(self): """ Computes ionic radii of elements for all sites in the structure. If valence is zero, atomic radius is used. """ radii = [] coord_finder = VoronoiCoordFinder(self._structure) def nearest_key(sorted_vals, key): i = bisect_left(sorted_vals, key) if i == len(sorted_vals): return sorted_vals[-1] if i == 0: return sorted_vals[0] before = sorted_vals[i-1] after = sorted_vals[i] if after-key < key-before: return after else: return before for i in range(len(self._structure.sites)): site = self._structure.sites[i] if isinstance(site.specie,Element): radius = site.specie.atomic_radius radii.append(radius) continue el = site.specie.symbol oxi_state = int(round(site.specie.oxi_state)) coord_no = int(round(coord_finder.get_coordination_number(i))) try: tab_oxi_states = sorted(map(int, _ion_radii[el].keys())) oxi_state = nearest_key(tab_oxi_states, oxi_state) radius = _ion_radii[el][str(oxi_state)][str(coord_no)] except KeyError: if coord_finder.get_coordination_number(i)-coord_no > 0: new_coord_no = coord_no + 1 else: new_coord_no = coord_no - 1 try: radius = _ion_radii[el][str(oxi_state)][str(new_coord_no)] coord_no = new_coord_no except: tab_coords = sorted(map(int, _ion_radii[el][str(oxi_state)].keys())) new_coord_no = nearest_key(tab_coords, coord_no) i = 0 for val in tab_coords: if val > coord_no: break i = i + 1 if i == len(tab_coords): key = str(tab_coords[-1]) radius = _ion_radii[el][str(oxi_state)][key] elif i == 0: key = str(tab_coords[0]) radius = _ion_radii[el][str(oxi_state)][key] else: key = str(tab_coords[i-1]) radius1 = _ion_radii[el][str(oxi_state)][key] key = str(tab_coords[i]) radius2 = _ion_radii[el][str(oxi_state)][key] radius = (radius1+radius2)/2 #implement complex checks later radii.append(radius) return radii def _get_valences(self): """ Computes ionic valences of elements for all sites in the structure. """ try: bv = BVAnalyzer() self._structure = bv.get_oxi_state_decorated_structure(self._structure) valences = bv.get_valences(self._structure) except: try: bv = BVAnalyzer(symm_tol=0.0) self._structure = bv.get_oxi_state_decorated_structure(self._structure) valences = bv.get_valences(self._structure) except: valences = [] for site in self._structure.sites: valences.append(site.specie.common_oxidation_states[0]) if sum(valences): valences = [0]*self._structure.num_sites else: self._structure.add_oxidation_state_by_site(valences) #raise #el = [site.specie.symbol for site in self._structure.sites] #el = [site.species_string for site in self._structure.sites] #el = [site.specie for site in self._structure.sites] #valence_dict = dict(zip(el, valences)) #print valence_dict return valences class Defect(six.with_metaclass(abc.ABCMeta, object)): """ Abstract class for point defects """ @abc.abstractmethod def enumerate_defectsites(self): """ Enumerates all the symmetrically distinct defects. """ raise NotImplementedError() @property def structure(self): """ Returns the structure without any defects Useful for Mott-Littleton calculations. """ return self._structure @property def struct_radii(self): """ Radii of elements in the structure """ return self._rad_dict @property def struct_valences(self): """ Valence of elements in the structure """ return self._valence_dict def defectsite_count(self): """ Returns the number of symmetrically distinct defect sites """ return len(self._defect_sites) def get_defectsite(self, n): """ Returns the defect site at the index. """ return self._defect_sites[n] def get_defectsite_multiplicity(self, n): """ Returns the symmtric multiplicity of the defect site at the index. """ return self._defect_site_multiplicity[n] def get_defectsite_coordination_number(self, n): """ Coordination number of interstitial site. Args: n: Index of interstitial list """ return self._defectsite_coord_no[n] def get_coordinated_sites(self, n): """ The sites in structure surrounding the defect site. Args: n: Index of defects list """ return self._defect_coord_sites[n] def get_coordinated_elements(self, n): """ Elements of sites in structure surrounding the defect site. Args: n: Index of defect list """ coordinated_species = [] for site in self._defect_coord_sites[n]: coordinated_species.append(site.specie.symbol) return list(set(coordinated_species)) @abc.abstractmethod def make_supercells_with_defects(self, scaling_matrix): """ Generate the supercell with input multipliers and create the defect. First supercell has no defects. To create unit cell with defect pass unit matrix. """ raise NotImplementedError() class Vacancy(Defect): """ Subclass of Defect to generate vacancies and their analysis. Args: structure: pymatgen.core.structure.Structure valences: valences of elements as a dictionary radii: Radii of elements as a dictionary """ def __init__(self, structure, valences, radii): self._structure = structure self._valence_dict = valences self._rad_dict = radii # Store symmetrically distinct sites, their coordination numbers # coordinated_sites, effective charge symm_finder = SpacegroupAnalyzer(self._structure) symm_structure = symm_finder.get_symmetrized_structure() equiv_site_seq = symm_structure.equivalent_sites self._defect_sites = [] self._defect_site_multiplicity = [] for equiv_sites in equiv_site_seq: self._defect_sites.append(equiv_sites[0]) self._defect_site_multiplicity.append(len(equiv_sites)) self._vac_site_indices = [] for site in self._defect_sites: for i in range(len(self._structure.sites)): if site == self._structure[i]: self._vac_site_indices.append(i) coord_finder = VoronoiCoordFinder(self._structure) self._defectsite_coord_no = [] self._defect_coord_sites = [] for i in self._vac_site_indices: self._defectsite_coord_no.append( coord_finder.get_coordination_number(i) ) self._defect_coord_sites.append( coord_finder.get_coordinated_sites(i) ) # Store the ionic radii for the elements in the structure # (Used to computing the surface are and volume) # Computed based on valence of each element self._vac_eff_charges = None self._vol = None self._sa = None #@property #def valence_dict(self): # return self._valence_dict def enumerate_defectsites(self): """ Returns symmetrically distinct vacancy sites """ return self._defect_sites def get_defectsite_structure_indices(self): """ Returns indices of symmetrically distinct vacancy sites """ return self._vac_site_indices def get_defectsite_structure_index(self, n): """ index of the vacacy site in the structure.sites list Args: n: Index of vacancy list """ return self._vac_site_indices[n] def get_defectsite_effective_charge(self, n): """ Effective charge (In Kroger-Vink notation, cation vacancy has effectively -ve charge and anion vacancy has +ve charge.) Args: n: Index of vacancy list Returns: Effective charnge of defect site """ # Effective charge (In Kroger-Vink notation, cation vacancy has # effectively -ve charge and anion vacancy has +ve charge.) Inverse # the BVAnalyzer.get_valences result. el = self.get_defectsite(n).species_string return -self._valence_dict[el] #if not self._vac_eff_charges: # self._vac_eff_charges = [] # for site in self.enumerate_defectsites(): # specie = site.specie.symbol # self._vac_eff_charges.append(-self._valence_dict[specie]) #return self._vac_eff_charges[n] def get_coordsites_min_max_charge(self, n): """ Minimum and maximum charge of sites surrounding the vacancy site. Args: n: Index of vacancy list """ bv = BVAnalyzer() struct_valences = bv.get_valences(self._structure) coordinated_site_valences = [] def _get_index(site): for i in range(len(self._structure.sites)): if site.is_periodic_image(self._structure.sites[i]): return i raise ValueError("Site not found") for site in self._defect_coord_sites[n]: ind = _get_index(site) coordinated_site_valences.append(struct_valences[ind]) coordinated_site_valences.sort() return coordinated_site_valences[0], coordinated_site_valences[-1] # deprecated def get_volume(self, n): """ Volume of the nth vacancy Args: n: Index of symmetrically distinct vacancies list Returns: floating number representing volume of vacancy """ if not self._vol: self._vol = [] self._sa = [] um = [[1, 0, 0], [0, 1, 0], [0, 0, 1]] sc = self.make_supercells_with_defects(um)[1:] rad_dict = self.struct_radii for i in range(len(sc)): vol, sa = get_void_volume_surfarea(sc[i], rad_dict) self._vol.append(vol) self._sa.append(sa) return self._vol[n] # deprecated def get_surface_area(self, n): """ Surface area of the nth vacancy Args: n: Index of symmetrically distinct vacancies list Returns: floating number representing volume of vacancy """ if not self._sa: self._vol = [] self._sa = [] um = [[1, 0, 0], [0, 1, 0], [0, 0, 1]] supercells = self.make_supercells_with_defects(um)[1:] rad_dict = self.struct_radii for sc in supercells: vol, sa = get_void_volume_surfarea(sc, rad_dict) self._vol.append(vol) self._sa.append(sa) return self._sa[n] def _supercell_with_defect(self, scaling_matrix, defect_site): sc = self._structure.copy() sc.make_supercell(scaling_matrix) oldf_coords = defect_site.frac_coords coords = defect_site.lattice.get_cartesian_coords(oldf_coords) newf_coords = sc.lattice.get_fractional_coords(coords) sc_defect_site = PeriodicSite(defect_site.species_and_occu, newf_coords, sc.lattice, properties=defect_site.properties) for i in range(len(sc.sites)): #if sc_defect_site == sc.sites[i]: if sc_defect_site.distance(sc.sites[i]) < 1e-3: del sc[i] return sc raise ValueError('Something wrong if reached here') def make_supercells_with_defects(self, scaling_matrix, species=None, limit_return_structures=False): """ Generate sequence of supercells in pymatgen.core.structure.Structure format, with each supercell containing one vacancy. Args: scaling_matrix: super cell scale parameters in matrix forms species: Species in list format only for which vacancy supercells are required. If not specified all the species are considered. limit_return_structures: Boolean or positive number If number, only that many structures are returned. Returns: Supercells with vacancies. First supercell has no defects. """ sc_with_vac = [] sc = self._structure.copy() sc.make_supercell(scaling_matrix) sc_with_vac.append(sc) if not species: species = sc.symbol_set if not limit_return_structures: limit_return_structures = self.defectsite_count() for defect_site in self.enumerate_defectsites(): if len(sc_with_vac) <= limit_return_structures: if isinstance(defect_site.specie,Specie): site_specie = defect_site.specie.element.symbol elif isinstance(defect_site.specie,Element): site_specie = defect_site.specie.symbol else: raise TypeError("site specie is neither Specie nor Element") if site_specie in species: sc_with_vac.append(self._supercell_with_defect( scaling_matrix, defect_site)) return sc_with_vac class VacancyFormationEnergy(object): """ Using GULP compute the vacancy formation energy. Works only for binary metal oxides due to the use of Buckingham Potentials """ def __init__(self, vacancy): self._vacancy = vacancy self._energies = [] def get_energy(self, n, tol=0.5): """ Formation Energy for nth symmetrically distinct vacancy. """ #generate defect free structure energy if not self._energies: no_vac = self._vacancy.defectsite_count() prev_energies = [0.0] * no_vac tol_flg = [False] * no_vac vac_gulp_kw = ('optimise', 'conp', 'qok') val_dict = self._vacancy.struct_valences for sp in range(2, 6): if not (False in tol_flg): #print sp break scale_mat = [[sp, 0, 0], [0, sp, 0], [0, 0, sp]] sc = self._vacancy.make_supercells_with_defects(scale_mat) blk_energy = get_energy_buckingham(sc[0]) no = len(sc[0].sites) #print no for i in range(1, no_vac + 1): if not tol_flg[i - 1]: vac_energy = get_energy_buckingham( sc[i], keywords=vac_gulp_kw, valence_dict=val_dict ) form_energy = vac_energy - (no - 1) / no * blk_energy if abs(form_energy - prev_energies[i - 1]) < tol: tol_flg[i - 1] = True prev_energies[i - 1] = form_energy self._energies = prev_energies self._tol_flg = tol_flg if not self._tol_flg[n]: print("Caution: tolerance not reached for {0} vacancy".format(n)) return self._energies[n] class Interstitial(Defect): """ Subclass of Defect to generate interstitial sites """ def __init__(self, structure, valences, radii, site_type='voronoi_vertex', accuracy='Normal', symmetry_flag=True, oxi_state = False): """ Given a structure, generate symmetrically distinct interstitial sites. For a non-ionic structure, use oxi_state=True and give atomic radii. Args: structure: pymatgen.core.structure.Structure valences: Dictionary of oxidation states of elements in {el:valence} form radii: Radii of elemnts in the structure site_type: "voronoi_vertex" uses voronoi nodes "voronoi_edgecenter" uses voronoi polyhedra edge centers "voronoi_facecenter" uses voronoi polyhedra face centers "all" combines vertices, edgecenters and facecenters. Default is "voronoi_vertex" accuracy: Flag denoting whether to use high accuracy version of Zeo++. Options are "Normal" and "High". Default is normal. symmetry_flag: If True, only returns symmetrically distinct sites oxi_state: If False, input structure is considered devoid of oxidation-state decoration. And oxi-state for each site is determined. Use True, if input structure is oxi-state decorated. This option is useful when the structure is not electro-neutral after deleting/adding sites. In that case oxi-decorate the structure before deleting/adding the sites. """ if not oxi_state: self._structure = ValenceIonicRadiusEvaluator(structure).structure else: self._structure = structure self._valence_dict = valences self._rad_dict = radii """ Use Zeo++ to obtain the voronoi nodes. Apply symmetry reduction and the symmetry reduced voronoi nodes are possible candidates for interstitial sites. """ if accuracy == "Normal": high_accuracy_flag = False elif accuracy == "High": high_accuracy_flag = True else: raise NotImplementedError("Accuracy setting not implemented.") if accuracy == "High": if site_type in ('voronoi_facecenter','voronoi_edgecenter','all'): raise NotImplementedError( "Site type not implemented for the accuracy setting") vor_node_sites, vor_edgecenter_sites, vor_facecenter_sites = \ symmetry_reduced_voronoi_nodes(self._structure, self._rad_dict, high_accuracy_flag, symmetry_flag) if site_type == 'voronoi_vertex': possible_interstitial_sites = vor_node_sites elif site_type == 'voronoi_facecenter': possible_interstitial_sites = vor_facecenter_sites elif site_type == 'voronoi_edgecenter': possible_interstitial_sites = vor_edgecenter_sites elif site_type == "all": possible_interstitial_sites = vor_node_sites + \ vor_facecenter_sites + vor_edgecenter_sites else: raise ValueError("Input site type not implemented") #Do futher processing on possibleInterstitialSites to obtain #interstitial sites self._defect_sites = possible_interstitial_sites self._defectsite_coord_no = [] self._defect_coord_sites = [] self._defect_coord_charge = [] self._radii = [] for site in self._defect_sites: coord_no, coord_sites, chrg = self._get_coord_no_sites_chrg(site) self._defectsite_coord_no.append(coord_no) self._defect_coord_sites.append(coord_sites) self._defect_coord_charge.append(chrg) for site in self._defect_sites: vor_radius = site.properties.get('voronoi_radius',None) if vor_radius: vor_radius = float(vor_radius) self._radii.append(vor_radius) def _get_coord_no_sites_chrg(self, site): """ Compute the coordination number and coordination charge Args: site: pymatgen.core.sites.Site """ struct = self._structure.copy() struct.append(site.specie.symbol, site.frac_coords) coord_finder = VoronoiCoordFinder(struct) coord_no = coord_finder.get_coordination_number(-1) coord_sites = coord_finder.get_coordinated_sites(-1) # In some cases coordination sites to interstitials include # interstitials also. Filtering them. def no_inter(site): return not site.specie.symbol == 'X' coord_sites = filter(no_inter, coord_sites) coord_chrg = 0 for site, weight in coord_finder.get_voronoi_polyhedra(-1).items(): if not site.specie.symbol == 'X': coord_chrg += weight * self._valence_dict[site.species_string] return coord_no, coord_sites, coord_chrg def enumerate_defectsites(self): """ Enumerate all the symmetrically distinct interstitial sites. The defect site has "X" as occupied specie. """ return self._defect_sites def append_defectsite(self, site): """ Append a site to list of possible interstitials Args: site: pymatgen.core.sites.Site """ raise NotImplementedError() def delete_defectsite(self, n): """ Remove a symmetrically distinct interstitial site Args: n: Index of interstitial site """ del self._defect_sites[n] def get_coordsites_charge_sum(self, n): """ Total charge of the interstitial coordinated sites. Args: n: Index of interstitial list """ return self._defect_coord_charge[n] def get_coordsites_min_max_charge(self, n): """ Minimum and maximum charge of sites surrounding the interstitial site. Args: n: Index of symmetrical distinct interstitial site """ coord_site_valences = [] for site in self._defect_coord_sites[n]: coord_site_valences.append(self._valence_dict[site.specie.symbol]) coord_site_valences.sort() return coord_site_valences[0], coord_site_valences[-1] def get_radius(self, n): """ Volume of the nth interstitial Args: n: Index of symmetrically distinct vacancies list Returns: floating number representing radius of interstitial sphere """ return self._radii[n] def get_radii(self): return self._radii def reduce_defectsites(self): """ If multiple defect sites have same voronoi radius, only one is kept. Useful if the symmetry based reduction of initial sites returned from Zeo++ is not working properly due to deviation in ideal lattice coordinates. """ distinct_radii = list(set(self._radii)) for rad in distinct_radii: ind = self._radii.index(rad) # Index of first site with 'rad' for i in reversed(list(range(ind + 1, len(self._radii)))): # Backward search for remaining sites so index is not changed if self._radii[i] == rad: self._defect_sites.pop(i) self._defectsite_coord_no.pop(i) self._defect_coord_sites.pop(i) self._radii.pop(i) def radius_prune_defectsites(self, radius): """ Remove all the defect sites with voronoi radius less than input radius """ for i in reversed(list(range(len(self._radii)))): if self._radii[i] < radius: self._defect_sites.pop(i) self._defectsite_coord_no.pop(i) self._defect_coord_sites.pop(i) self._radii.pop(i) def prune_defectsites(self, el="C", oxi_state=4, dlta=0.1): """ Prune all the defect sites which can't acoomodate the input elment with the input oxidation state. """ rad = float(Specie(el, oxi_state).ionic_radius) - dlta self.radius_prune_defectsites(rad) def prune_close_defectsites(self, dist=0.2): """ Prune the sites that are very close. """ #print self.defectsite_count() ind = 0 while ind < self.defectsite_count(): #i = ind + 1 #while i < self.defectsite_count(): i = self.defectsite_count()-1 #print ind, i while i > ind: d = self._defect_sites[ind].distance(self._defect_sites[i]) #print d, dist if d < dist: self._defect_sites.pop(i) #self._defectsite_coord_no.pop(i) #self._defect_coord_sites.pop(i) #self._radii.pop(i) # i += 1 i -= 1 ind += 1 #print self.defectsite_count() def _supercell_with_defect(self, scaling_matrix, defect_site, element): sc = self._structure.copy() sc.make_supercell(scaling_matrix) oldf_coords = defect_site.frac_coords coords = defect_site.lattice.get_cartesian_coords(oldf_coords) #print coords newf_coords = sc.lattice.get_fractional_coords(coords) for i in range(3): coord = newf_coords[i] if coord < 0: while (coord < 0): coord = coord+1 newf_coords[i] = coord elif coord > 1: while (coord > 1): coord = coord-1 newf_coords[i] = coord #print newf_coords #sc_defect_site = PeriodicSite(element, newf_coords, # sc.lattice) try: sc.append(element, newf_coords, coords_are_cartesian=False, validate_proximity=True) except ValueError: sc = None finally: return sc def make_supercells_with_defects(self, scaling_matrix, element): """ Returns sequence of supercells in pymatgen.core.structure.Structure format, with each supercell containing an interstitial. First supercell has no defects. """ sc_list_with_interstitial = [] sc = self._structure.copy() sc.make_supercell(scaling_matrix) sc_list_with_interstitial.append(sc) for defect_site in self.enumerate_defectsites(): sc_with_inter = self._supercell_with_defect( scaling_matrix, defect_site, element ) if sc_with_inter: sc_list_with_interstitial.append(sc_with_inter) return sc_list_with_interstitial class InterstitialAnalyzer(object): """ Use GULP to compute the interstitial formation energy, relaxed structures. Works only for metal oxides due to the use of Buckingham Potentials. Args: inter: pymatgen.defects.point_defects.Interstitial el: Element name in short hand notation ("El") oxi_state: Oxidtation state scd: Super cell dimension as number. The scaling is equal along xyz. """ def __init__(self, inter, el, oxi_state, scd=2): self._inter = inter self._el = el self._oxi_state = oxi_state self._scd = scd self._relax_energies = [] self._norelax_energies = [] self._relax_struct = [] def get_energy(self, n, relax=True): """ Formation Energy for nth symmetrically distinct interstitial. """ if relax and not self._relax_energies: self._relax_analysis() if not relax and not self._norelax_energies: no_inter = self._inter.defectsite_count() inter_gulp_kw = ('qok',) val_dict = self._inter.struct_valences val_dict[self._el] = self._oxi_state # If element not in structure scd = self._scd scale_mat = [[scd, 0, 0], [0, scd, 0], [0, 0, scd]] sc = self._inter.make_supercells_with_defects(scale_mat, self._el) blk_energy = get_energy_buckingham(sc[0]) for i in range(1, no_inter + 1): inter_energy = get_energy_buckingham( sc[i], keywords=inter_gulp_kw, valence_dict=val_dict ) form_energy = inter_energy - blk_energy self._norelax_energies.append(form_energy) if relax: return self._relax_energies[n] else: return self._norelax_energies[n] def _relax_analysis(self): """ Optimize interstitial structures """ no_inter = self._inter.defectsite_count() inter_gulp_kw = ('optimise', 'conp', 'qok') val_dict = self._inter.struct_valences scd = self._scd scale_mat = [[scd, 0, 0], [0, scd, 0], [0, 0, scd]] sc = self._inter.make_supercells_with_defects(scale_mat, self._el) blk_energy, rlx_struct = get_energy_relax_structure_buckingham(sc[0]) self._relax_struct.append(rlx_struct) val_dict[self._el] = self._oxi_state # If element not in structure for i in range(1, no_inter + 1): energy, rlx_struct = get_energy_relax_structure_buckingham( sc[i], keywords=inter_gulp_kw, valence_dict=val_dict ) form_energy = energy - blk_energy self._relax_energies.append(form_energy) self._relax_struct.append(rlx_struct) def get_relaxed_structure(self, n): """ Optimized interstitial structure Args: n: Symmetrically distinct interstitial index .. note:: To get relaxed bulk structure pass -1. -ve index will not work as expected. """ if not self._relax_struct: self._relax_analysis() return self._relax_struct[n + 1] def get_percentage_volume_change(self, n): """ Volume change after the introduction of interstitial Args: n: Symmetrically distinct interstitial index """ if not self._relax_struct: self._relax_analysis() blk_struct = self._relax_struct[0] def_struct = self._relax_struct[n + 1:n + 2][0] del def_struct.sites[-1] rv = RelaxationAnalyzer(blk_struct, def_struct) return rv.get_percentage_volume_change() def get_percentage_lattice_parameter_change(self, n): """ Lattice parameter change after the introduction of interstitial Args: n: Symmetrically distinct interstitial index """ if not self._relax_struct: self._relax_analysis() blk_struct = self._relax_struct[0] def_struct = self._relax_struct[n + 1:n + 2][0] del def_struct.sites[-1] rv = RelaxationAnalyzer(blk_struct, def_struct) return rv.get_percentage_lattice_parameter_changes() def get_percentage_bond_distance_change(self, n): """ Bond distance change after the introduction of interstitial Args: n: Symmetrically distinct interstitial index """ if not self._relax_struct: self._relax_analysis() blk_struct = self._relax_struct[0] def_struct = self._relax_struct[n + 1:n + 2][0] del def_struct.sites[-1] #print def_struct rv = RelaxationAnalyzer(blk_struct, def_struct) return rv.get_percentage_bond_dist_changes() def relaxed_structure_match(self, i, j): """ Check if the relaxed structures of two interstitials match Args: i: Symmetrically distinct interstitial index j: Symmetrically distinct interstitial index .. note:: To use relaxed bulk structure pass -1. -ve index will not work as expected """ if not self._relax_struct: self._relax_analysis() sm = StructureMatcher() struct1 = self._relax_struct[i + 1] struct2 = self._relax_struct[j + 1] return sm.fit(struct1, struct2) class StructureRelaxer(object): def __init__(self, structure): self._unrelax_struct = structure self.relax() def relax(self): energy, rlx_struct = get_energy_relax_structure_buckingham( self._unrelax_struct) self._relax_struct = rlx_struct def get_relaxed_structure(self): return self._relax_struct class InterstitialStructureRelaxer(object): """ Performs structural relaxation for each interstitial supercell. Args: interstitial: Unrelaxed interstitial el: Species string in short notation oxi_state: Oxidation state of the element supercell_dim: Dimensions of super cell """ def __init__(self, interstitial, el, oxi_state, supercell_dim=2): self._inter = interstitial self._scd = supercell_dim self._el = el self._oxi_state = oxi_state self._relax_structs = [] self._relax_energies = [] def relax(self): """ Optimize interstitial structures """ no_inter = self._inter.defectsite_count() inter_gulp_kw = ('optimise', 'conp', 'qok') val_dict = self._inter.struct_valences scd = self._scd scale_mat = [[scd, 0, 0], [0, scd, 0], [0, 0, scd]] sc = self._inter.make_supercells_with_defects(scale_mat, self._el) blk_energy, rlx_struct = get_energy_relax_structure_buckingham(sc[0]) self._relax_structs.append(rlx_struct) self._relax_energies.append(blk_energy) val_dict[self._el] = self._oxi_state # If element not in structure for i in range(1, no_inter + 1): try: energy, rlx_struct = get_energy_relax_structure_buckingham( sc[i], keywords=inter_gulp_kw, valence_dict=val_dict ) self._relax_energies.append(energy) self._relax_structs.append(rlx_struct) except: self._relax_energies.append(None) self._relax_structs.append(None) def is_empty(lst): for value in lst: if value: return False return True if is_empty(self._relax_energies): raise IOError('Relaxation failed') def relaxed_structure_match(self, i, j): """ Check if the relaxed structures of two interstitials match Args: i: Symmetrically distinct interstitial index j: Symmetrically distinct interstitial index .. note:: Index 0 corresponds to bulk. """ if not self._relax_structs: self.relax() sm = StructureMatcher() struct1 = self._relax_structs[i] struct2 = self._relax_structs[j] return sm.fit(struct1, struct2) def relaxed_energy_match(self, i, j): """ Check if the relaxed energies of two interstitials match Args: i: Symmetrically distinct interstitial index j: Symmetrically distinct interstitial index .. note:: Index 0 corresponds to bulk. """ if not self._relax_energies: self.relax() energy1 = self._relax_energies[i] energy2 = self._relax_energies[j] return energy1 == energy2 def get_relaxed_structure(self, n): """ Get the relaxed structure of nth symmetrically distinct interstitial. Args: n: Symmetrically distinct interstitial index .. note:: 0 corresponds to relaxed bulk structure """ if not self._relax_structs: self.relax() return self._relax_structs[n] def get_relaxed_energy(self, n): """ Get the relaxed structure of nth symmetrically distinct interstitial. Args: n: Symmetrically distinct interstitial index .. note:: 0 corresponds to relaxed bulk structure """ if not self._relax_energies: self.relax() return self._relax_energies[n] def get_relaxed_interstitial(self): """ Get the relaxed structure of nth symmetrically distinct interstitial. Args: n: Symmetrically distinct interstitial index """ if not self._relax_energies: self.relax() energies = self._relax_energies[:] structs = self._relax_structs[:] distinct_energy_set = set(energies[1:]) # only interstitial energies if None in distinct_energy_set: distinct_energy_set.remove(None) distinct_structs = [structs[0]] # bulk distinct_energies = [energies[0]] for energy in distinct_energy_set: ind = energies.index(energy) distinct_structs.append(structs[ind]) distinct_energies.append(energies[ind]) return RelaxedInterstitial( distinct_structs, distinct_energies, self._inter.struct_valences ) class RelaxedInterstitial(object): """ Stores the relaxed supercell structures for each interstitial Used to compute formation energies, displacement of atoms near the the interstitial. Args: struct_list: List of structures(supercells). The first structure should represent relaxed bulk structure and the subsequent ones interstitial structures (with the extra interstitial site appended at the end). energy_list: List of energies for relaxed interstitial structures. The first energy should correspond to bulk structure valence_dict: Valences of elements in dictionary form """ def __init__(self, struct_list, energy_list, valence_dict): self._blk_struct = struct_list[0] struct_list.pop(0) self._structs = struct_list self._blk_energy = energy_list[0] energy_list.pop(0) self._energies = energy_list self._valence_dict = valence_dict self._coord_no = [] self._coord_sites = [] self._coord_charge_no = [] def formation_energy(self, n, chem_pot=0): """ Compute the interstitial formation energy Args: n: Index of interstitials chem_pot: Chemical potential of interstitial site element. If not given, assumed as zero. The user is strongly urged to supply the chemical potential value """ return self._energies[n] - self._blk_energy - chem_pot def get_percentage_volume_change(self, n): """ Volume change after the introduction of interstitial Args: n: index of interstitials """ def_struct = self._structs[n:n + 1][0] del def_struct.sites[-1] rv = RelaxationAnalyzer(self._blk_struct, def_struct) return rv.get_percentage_volume_change() def get_percentage_lattice_parameter_change(self, n): """ Lattice parameter change after the introduction of interstitial Args: n: index of interstitials """ def_struct = self._structs[n:n + 1][0] # copy del def_struct.sites[-1] rv = RelaxationAnalyzer(self._blk_struct, def_struct) return rv.get_percentage_lattice_parameter_changes() def get_percentage_bond_distance_change(self, n): """ Bond distance change after the introduction of interstitial. Args: n: index of interstitials """ def_struct = self._structs[n:n + 1][0] # copy del def_struct.sites[-1] rv = RelaxationAnalyzer(self._blk_struct, def_struct) return rv.get_percentage_bond_dist_changes() def get_bulk_structure(self): """ Return relaxed bulk structure """ return self._blk_struct def get_interstitial_structure(self, n): """ Return relaxed bulk structure """ return self._structs[n] def defect_count(self): """ Returns the number of distinct interstitials """ return len(self._structs) def get_defectsite(self, n): """ Returns the defect site of nth interstitial. Args: n: Index of interstitial """ return self._structs[n][-1] def get_coordination_number(self, n): """ Coordination number for nth interstitial. Args: n: Index of interstitials """ if not self._coord_no: self._coord_find() return self._coord_no[n] def get_charge_coordination_number(self, n): """ Charge coordination number for nth interstitial. Args: n: Index of interstitials """ if not self._coord_charge_no: self._coord_find() return self._coord_charge_no[n] def get_coordinated_sites(self, n): """ Coordinated sites for nth interstitial. Args: n: Index of interstitials """ if not self._coord_sites: self._coord_find() return self._coord_sites[n] def get_coordinated_bulk_sites(self, n): """ Bulk sites corresponding to the coordinated sites for nth interstitial. Args: n: Index of interstitials """ blk_sites = [] for site in self.get_coordinated_sites(n): site_index = self._structs[n].sites.index(site) blk_sites.append(self._blk_struct[site_index]) return blk_sites def get_coordinated_site_displacement(self, n): """ Compute the total displacement of coordinated sites from the interstitial sites during the relaxation Args: n: Index of defect site """ coord_sites = self.get_coordinated_sites(n) coord_blk_sites = self.get_coordinated_bulk_sites(n) dist_sum = 0 for i in range(len(coord_sites)): dist_sum += coord_sites[i].distance_from_point(coord_blk_sites[i]) # How to compute the average? return dist_sum def _coord_find(self): """ calls VoronoiCoordFinder to compute the coordination number, coordination charge """ for i in range(self.defect_count()): struct = self._structs[i].copy() coord_finder = VoronoiCoordFinder(struct) self._coord_no.append(coord_finder.get_coordination_number(-1)) self._coord_sites.append(coord_finder.get_coordinated_sites(-1)) coord_chrg = 0 for site, weight in coord_finder.get_voronoi_polyhedra(-1).items(): coord_chrg += weight * self._valence_dict[site.species_string] self._coord_charge_no.append(coord_chrg) def symmetry_reduced_voronoi_nodes( structure, rad_dict, high_accuracy_flag=False, symm_flag=True): """ Obtain symmetry reduced voronoi nodes using Zeo++ and pymatgen.symmetry.finder.SpacegroupAnalyzer Args: strucutre: pymatgen Structure object rad_dict: Dictionary containing radii of spcies in the structure high_accuracy_flag: Flag denotting whether to use high accuracy version of Zeo++ symm_flag: Flag denoting whether to return symmetrically distinct sites only Returns: Symmetrically distinct voronoi nodes as pymatgen Strucutre """ def add_closest_equiv_site(dist_sites, equiv_sites): if not dist_sites: dist_sites.append(equiv_sites[0]) else: avg_dists = [] for site in equiv_sites: dists = [site.distance(dst_site, jimage=[0, 0, 0]) for dst_site in dist_sites] avg_dist = sum(dists) / len(dist_sites) avg_dists.append(avg_dist) min_avg_dist = min(avg_dists) ind = avg_dists.index(min_avg_dist) dist_sites.append(equiv_sites[ind]) def cmp_memoize_last_site(f): #Compares and stores last site def not_duplicates(site1, site2): if site1.distance(site2) < 1e-5: return False else: return True cmp_memoize_last_site.cache = None def helper(x): if not cmp_memoize_last_site.cache: cmp_memoize_last_site.cache = f(x) return True y = f(x) if not_duplicates(cmp_memoize_last_site.cache, y): cmp_memoize_last_site.cache = y return True else: return False return helper @cmp_memoize_last_site def check_not_duplicates(site): return site if not symm_flag: if not high_accuracy_flag: vor_node_struct, vor_edgecenter_struct, vor_facecenter_struct = \ get_voronoi_nodes(structure, rad_dict) return vor_node_struct.sites, vor_edgecenter_struct.sites, \ vor_facecenter_struct.sites else: # Only the nodes are from high accuracy voronoi decomposition vor_node_struct = \ get_high_accuracy_voronoi_nodes(structure, rad_dict) # Before getting the symmetry, remove the duplicates vor_node_struct.sites.sort(key = lambda site: site.voronoi_radius) #print type(vor_node_struct.sites[0]) dist_sites = filter(check_not_duplicates, vor_node_struct.sites) return dist_sites, None, None if not high_accuracy_flag: vor_node_struct, vor_edgecenter_struct, vor_facecenter_struct = \ get_voronoi_nodes(structure, rad_dict) vor_node_symmetry_finder = SpacegroupAnalyzer(vor_node_struct, symprec=1e-1) vor_node_symm_struct = vor_node_symmetry_finder.get_symmetrized_structure() node_equiv_sites_list = vor_node_symm_struct.equivalent_sites node_dist_sites = [] for equiv_sites in node_equiv_sites_list: add_closest_equiv_site(node_dist_sites, equiv_sites) vor_edge_symmetry_finder = SpacegroupAnalyzer( vor_edgecenter_struct, symprec=1e-1) vor_edge_symm_struct = vor_edge_symmetry_finder.get_symmetrized_structure() edgecenter_equiv_sites_list = vor_edge_symm_struct.equivalent_sites edgecenter_dist_sites = [] for equiv_sites in edgecenter_equiv_sites_list: add_closest_equiv_site(edgecenter_dist_sites, equiv_sites) if not edgecenter_equiv_sites_list: # Fix this so doesn't arise edgecenter_dist_sites = vor_edgecenter_struct.sites vor_fc_symmetry_finder = SpacegroupAnalyzer( vor_facecenter_struct, symprec=1e-1) vor_fc_symm_struct = vor_fc_symmetry_finder.get_symmetrized_structure() facecenter_equiv_sites_list = vor_fc_symm_struct.equivalent_sites facecenter_dist_sites = [] for equiv_sites in facecenter_equiv_sites_list: add_closest_equiv_site(facecenter_dist_sites, equiv_sites) if not facecenter_equiv_sites_list: # Fix this so doesn't arise facecenter_dist_sites = vor_facecenter_struct.sites return node_dist_sites, edgecenter_dist_sites, facecenter_dist_sites else: # Only the nodes are from high accuracy voronoi decomposition vor_node_struct = \ get_high_accuracy_voronoi_nodes(structure, rad_dict) # Before getting the symmetry, remove the duplicates vor_node_struct.sites.sort(key = lambda site: site.voronoi_radius) #print type(vor_node_struct.sites[0]) dist_sites = list(filter(check_not_duplicates, vor_node_struct.sites)) # Ignore symmetry from ha voronoi nodes # Increase the symmetry precision to 0.25 #spg = SpacegroupAnalyzer(structure,symprec=1e-1).get_spacegroup() # Remove symmetrically equivalent sites #i = 0 #while (i < len(dist_sites)-1): # sites1 = [dist_sites[i]] # sites2 = [dist_sites[i+1]] # if spg.are_symmetrically_equivalent(sites1,sites2): # del dist_sites[i+1] # else: # i = i+1 node_dist_sites = dist_sites return (node_dist_sites, None, None) #vor_edge_symmetry_finder = SpacegroupAnalyzer( # vor_edgecenter_struct, symprec=1e-1) #vor_edge_symm_struct = vor_edge_symmetry_finder.get_symmetrized_structure() #edgecenter_equiv_sites_list = vor_edge_symm_struct.equivalent_sites #edgecenter_dist_sites = [] #for equiv_sites in edgecenter_equiv_sites_list: # add_closest_equiv_site(edgecenter_dist_sites, equiv_sites) #if not edgecenter_equiv_sites_list: # edgecenter_dist_sites = vor_edgecenter_struct.sites #vor_fc_symmetry_finder = SpacegroupAnalyzer( # vor_facecenter_struct, symprec=1e-1) #vor_fc_symm_struct = vor_fc_symmetry_finder.get_symmetrized_structure() #facecenter_equiv_sites_list = vor_fc_symm_struct.equivalent_sites #facecenter_dist_sites = [] #for equiv_sites in facecenter_equiv_sites_list: # add_closest_equiv_site(facecenter_dist_sites, equiv_sites) #if not facecenter_equiv_sites_list: # facecenter_dist_sites = vor_facecenter_struct.sites #return node_dist_sites, edgecenter_dist_sites, facecenter_dist_sites
from benchmark.fortune_html_parser import FortuneHTMLParser from setup.linux import setup_util from benchmark.test_types import * import importlib import os import subprocess import time import re from pprint import pprint import sys import traceback import json import logging import csv import shlex import math from collections import OrderedDict from requests import ConnectionError from threading import Thread from threading import Event from utils import header # Cross-platform colored text from colorama import Fore, Back, Style from datetime import datetime from datetime import timedelta class FrameworkTest: headers_template = "-H 'Host: localhost' -H '{accept}' -H 'Connection: keep-alive'" # Used for test types that require no pipelining or query string params. concurrency_template = """ echo "" echo "---------------------------------------------------------" echo " Running Primer {name}" echo " {wrk} {headers} --latency -d 5 -c 8 --timeout 8 -t 8 \"http://{server_host}:{port}{url}\"" echo "---------------------------------------------------------" echo "" {wrk} {headers} --latency -d 5 -c 8 --timeout 8 -t 8 "http://{server_host}:{port}{url}" sleep 5 echo "" echo "---------------------------------------------------------" echo " Running Warmup {name}" echo " {wrk} {headers} --latency -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}\"" echo "---------------------------------------------------------" echo "" {wrk} {headers} --latency -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}" sleep 5 echo "" echo "---------------------------------------------------------" echo " Synchronizing time" echo "---------------------------------------------------------" echo "" ntpdate -s pool.ntp.org for c in {levels} do echo "" echo "---------------------------------------------------------" echo " Concurrency: $c for {name}" echo " {wrk} {headers} --latency -d {duration} -c $c --timeout $c -t $(($c>{max_threads}?{max_threads}:$c)) \"http://{server_host}:{port}{url}\"" echo "---------------------------------------------------------" echo "" STARTTIME=$(date +"%s") {wrk} {headers} --latency -d {duration} -c $c --timeout $c -t "$(($c>{max_threads}?{max_threads}:$c))" http://{server_host}:{port}{url} echo "STARTTIME $STARTTIME" echo "ENDTIME $(date +"%s")" sleep 2 done """ # Used for test types that require pipelining. pipeline_template = """ echo "" echo "---------------------------------------------------------" echo " Running Primer {name}" echo " {wrk} {headers} --latency -d 5 -c 8 --timeout 8 -t 8 \"http://{server_host}:{port}{url}\"" echo "---------------------------------------------------------" echo "" {wrk} {headers} --latency -d 5 -c 8 --timeout 8 -t 8 "http://{server_host}:{port}{url}" sleep 5 echo "" echo "---------------------------------------------------------" echo " Running Warmup {name}" echo " {wrk} {headers} --latency -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}\"" echo "---------------------------------------------------------" echo "" {wrk} {headers} --latency -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}" sleep 5 echo "" echo "---------------------------------------------------------" echo " Synchronizing time" echo "---------------------------------------------------------" echo "" ntpdate -s pool.ntp.org for c in {levels} do echo "" echo "---------------------------------------------------------" echo " Concurrency: $c for {name}" echo " {wrk} {headers} --latency -d {duration} -c $c --timeout $c -t $(($c>{max_threads}?{max_threads}:$c)) \"http://{server_host}:{port}{url}\" -s ~/pipeline.lua -- {pipeline}" echo "---------------------------------------------------------" echo "" STARTTIME=$(date +"%s") {wrk} {headers} --latency -d {duration} -c $c --timeout $c -t "$(($c>{max_threads}?{max_threads}:$c))" http://{server_host}:{port}{url} -s ~/pipeline.lua -- {pipeline} echo "STARTTIME $STARTTIME" echo "ENDTIME $(date +"%s")" sleep 2 done """ # Used for test types that require a database - # These tests run at a static concurrency level and vary the size of # the query sent with each request query_template = """ echo "" echo "---------------------------------------------------------" echo " Running Primer {name}" echo " wrk {headers} --latency -d 5 -c 8 --timeout 8 -t 8 \"http://{server_host}:{port}{url}2\"" echo "---------------------------------------------------------" echo "" wrk {headers} --latency -d 5 -c 8 --timeout 8 -t 8 "http://{server_host}:{port}{url}2" sleep 5 echo "" echo "---------------------------------------------------------" echo " Running Warmup {name}" echo " wrk {headers} --latency -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}2\"" echo "---------------------------------------------------------" echo "" wrk {headers} --latency -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}2" sleep 5 echo "" echo "---------------------------------------------------------" echo " Synchronizing time" echo "---------------------------------------------------------" echo "" ntpdate -s pool.ntp.org for c in {levels} do echo "" echo "---------------------------------------------------------" echo " Queries: $c for {name}" echo " wrk {headers} --latency -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}$c\"" echo "---------------------------------------------------------" echo "" STARTTIME=$(date +"%s") wrk {headers} --latency -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}$c" echo "STARTTIME $STARTTIME" echo "ENDTIME $(date +"%s")" sleep 2 done """ ############################################################ # start(benchmarker) # Start the test using it's setup file ############################################################ def start(self, out, err): # Setup environment variables logDir = os.path.join(self.fwroot, self.benchmarker.latest_results_directory, 'logs', self.name.lower()) bash_functions_path= os.path.join(self.fwroot, 'toolset/setup/linux/bash_functions.sh') setup_util.replace_environ(config='$FWROOT/config/benchmark_profile', command='''\ export TROOT=%s && \ export IROOT=%s && \ export DBHOST=%s && \ export LOGDIR=%s && \ export MAX_THREADS=%s && \ export MAX_CONCURRENCY=%s \ ''' % ( self.directory, self.install_root, self.database_host, logDir, self.benchmarker.threads, max(self.benchmarker.concurrency_levels))) # Always ensure that IROOT belongs to the runner_user if not os.path.exists(self.install_root): os.mkdir(self.install_root) chown = "sudo chown -R %s:%s %s" % (self.benchmarker.runner_user, self.benchmarker.runner_user, os.path.join(self.fwroot, self.install_root)) subprocess.check_call(chown, shell=True, cwd=self.fwroot, executable='/bin/bash') # Run the module start inside parent of TROOT # - we use the parent as a historical accident, a number of tests # refer to their TROOT maually still previousDir = os.getcwd() os.chdir(os.path.dirname(self.troot)) logging.info("Running setup module start (cwd=%s)", self.directory) # Run the start script for the test as the "testrunner" user # # `sudo` - Switching user requires superuser privs # -u [username] The username # -E Preserves the current environment variables # -H Forces the home var (~) to be reset to the user specified # `stdbuf` - Disable buffering, send output to python ASAP # -o0 zero-sized buffer for stdout # -e0 zero-sized buffer for stderr # `bash` - Run the setup.sh script using bash # -e Force bash to exit on first error # -x Turn on bash tracing e.g. print commands before running # # Most servers do not output to stdout/stderr while serving # requests so there is no performance hit from disabling # output buffering. This disabling is necessary to # a) allow TFB to show output in real time and b) avoid loosing # output in the buffer when the testrunner processes are forcibly # killed # # See http://www.pixelbeat.org/programming/stdio_buffering/ # See https://blogs.gnome.org/markmc/2013/06/04/async-io-and-python/ # See http://eyalarubas.com/python-subproc-nonblock.html command = 'sudo -u %s -E -H stdbuf -o0 -e0 bash -exc "source %s && source %s.sh"' % ( self.benchmarker.runner_user, bash_functions_path, os.path.join(self.troot, self.setup_file)) debug_command = '''\ export FWROOT=%s && \\ export TROOT=%s && \\ export IROOT=%s && \\ export DBHOST=%s && \\ export LOGDIR=%s && \\ export MAX_THREADS=%s && \\ export MAX_CONCURRENCY=%s && \\ cd %s && \\ %s''' % (self.fwroot, self.directory, self.install_root, self.database_host, logDir, self.benchmarker.threads, self.directory, max(self.benchmarker.concurrency_levels), command) logging.info("To run %s manually, copy/paste this:\n%s", self.name, debug_command) def tee_output(prefix, line): # Needs to be one atomic write # Explicitly use UTF-8 as it's the most common framework output # TODO improve encoding handling line = prefix.encode('utf-8') + line # Log to current terminal sys.stdout.write(line) sys.stdout.flush() # logging.error("".join([prefix, line])) out.write(line) out.flush() # Start the setup.sh command p = subprocess.Popen(command, cwd=self.directory, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) nbsr = setup_util.NonBlockingStreamReader(p.stdout, "%s: %s.sh and framework processes have terminated" % (self.name, self.setup_file)) # Set a limit on total execution time of setup.sh timeout = datetime.now() + timedelta(minutes = 105) time_remaining = timeout - datetime.now() # Need to print to stdout once every 10 minutes or Travis-CI will abort travis_timeout = datetime.now() + timedelta(minutes = 5) # Flush output until setup.sh work is finished. This is # either a) when setup.sh exits b) when the port is bound # c) when we run out of time. Note that 'finished' doesn't # guarantee setup.sh process is dead - the OS may choose to make # setup.sh a zombie process if it still has living children # # Note: child processes forked (using &) will remain alive # after setup.sh has exited. The will have inherited the # stdout/stderr descriptors and will be directing their # output to the pipes. # prefix = "Setup %s: " % self.name while (p.poll() is None and not self.benchmarker.is_port_bound(self.port) and not time_remaining.total_seconds() < 0): # The conditions above are slow to check, so # we will delay output substantially if we only # print one line per condition check. # Adding a tight loop here mitigates the effect, # ensuring that most of the output directly from # setup.sh is sent to tee_output before the outer # loop exits and prints things like "setup.sh exited" # for i in xrange(10): try: line = nbsr.readline(0.05) if line: tee_output(prefix, line) # Reset Travis-CI timer travis_timeout = datetime.now() + timedelta(minutes = 5) except setup_util.EndOfStream: tee_output(prefix, "Setup has terminated\n") break time_remaining = timeout - datetime.now() if (travis_timeout - datetime.now()).total_seconds() < 0: sys.stdout.write(prefix + 'Printing so Travis-CI does not time out\n') sys.stdout.write(prefix + "Status: Poll: %s, Port %s bound: %s, Time Left: %s\n" % ( p.poll(), self.port, self.benchmarker.is_port_bound(self.port), time_remaining)) sys.stdout.flush() travis_timeout = datetime.now() + timedelta(minutes = 5) # Did we time out? if time_remaining.total_seconds() < 0: tee_output(prefix, "%s.sh timed out!! Aborting...\n" % self.setup_file) p.kill() return 1 # What's our return code? # If setup.sh has terminated, use that code # Otherwise, detect if the port was bound tee_output(prefix, "Status: Poll: %s, Port %s bound: %s, Time Left: %s\n" % ( p.poll(), self.port, self.benchmarker.is_port_bound(self.port), time_remaining)) retcode = (p.poll() if p.poll() is not None else 0 if self.benchmarker.is_port_bound(self.port) else 1) if p.poll() is not None: tee_output(prefix, "%s.sh process exited naturally with %s\n" % (self.setup_file, p.poll())) elif self.benchmarker.is_port_bound(self.port): tee_output(prefix, "Bound port detected on %s\n" % self.port) # Before we return control to the benchmarker, spin up a # thread to keep an eye on the pipes in case the running # framework uses stdout/stderr. Once all processes accessing # the subprocess.PIPEs are dead, this thread will terminate. # Use a different prefix to indicate this is the framework # speaking prefix = "Server %s: " % self.name def watch_child_pipes(nbsr, prefix): while True: try: line = nbsr.readline(60) if line: tee_output(prefix, line) except setup_util.EndOfStream: tee_output(prefix, "Framework processes have terminated\n") return watch_thread = Thread(target = watch_child_pipes, args = (nbsr, prefix)) watch_thread.daemon = True watch_thread.start() logging.info("Executed %s.sh, returning %s", self.setup_file, retcode) os.chdir(previousDir) return retcode ############################################################ # End start ############################################################ ############################################################ # verify_urls # Verifys each of the URLs for this test. THis will sinply # curl the URL and check for it's return status. # For each url, a flag will be set on this object for whether # or not it passed # Returns True if all verifications succeeded ############################################################ def verify_urls(self, out, err): result = True def verify_type(test_type): test = self.runTests[test_type] test.setup_out_err(out, err) out.write(header("VERIFYING %s" % test_type.upper())) base_url = "http://%s:%s" % (self.benchmarker.server_host, self.port) try: results = test.verify(base_url) except ConnectionError as e: results = [('fail',"Server did not respond to request", base_url)] logging.warning("Verifying test %s for %s caused an exception: %s", test_type, self.name, e) except Exception as e: results = [('fail',"""Caused Exception in TFB This almost certainly means your return value is incorrect, but also that you have found a bug. Please submit an issue including this message: %s\n%s""" % (e, traceback.format_exc()), base_url)] logging.warning("Verifying test %s for %s caused an exception: %s", test_type, self.name, e) traceback.format_exc() test.failed = any(result == 'fail' for (result, reason, url) in results) test.warned = any(result == 'warn' for (result, reason, url) in results) test.passed = all(result == 'pass' for (result, reason, url) in results) def output_result(result, reason, url): specific_rules_url = "http://frameworkbenchmarks.readthedocs.org/en/latest/Project-Information/Framework-Tests/#specific-test-requirements" color = Fore.GREEN if result.upper() == "WARN": color = Fore.YELLOW elif result.upper() == "FAIL": color = Fore.RED out.write((" " + color + "%s" + Style.RESET_ALL + " for %s\n") % (result.upper(), url)) print (" " + color + "%s" + Style.RESET_ALL + " for %s\n") % (result.upper(), url) if reason is not None and len(reason) != 0: for line in reason.splitlines(): out.write(" " + line + '\n') print " " + line if not test.passed: out.write(" See %s\n" % specific_rules_url) print " See %s\n" % specific_rules_url [output_result(r1,r2,url) for (r1, r2, url) in results] if test.failed: self.benchmarker.report_verify_results(self, test_type, 'fail') elif test.warned: self.benchmarker.report_verify_results(self, test_type, 'warn') elif test.passed: self.benchmarker.report_verify_results(self, test_type, 'pass') else: raise Exception("Unknown error - test did not pass,warn,or fail") result = True for test_type in self.runTests: verify_type(test_type) if self.runTests[test_type].failed: result = False return result ############################################################ # End verify_urls ############################################################ ############################################################ # benchmark # Runs the benchmark for each type of test that it implements # JSON/DB/Query. ############################################################ def benchmark(self, out, err): def benchmark_type(test_type): out.write("BENCHMARKING %s ... " % test_type.upper()) test = self.runTests[test_type] test.setup_out_err(out, err) output_file = self.benchmarker.output_file(self.name, test_type) if not os.path.exists(output_file): # Open to create the empty file with open(output_file, 'w'): pass if not test.failed: if test_type == 'plaintext': # One special case remote_script = self.__generate_pipeline_script(test.get_url(), self.port, test.accept_header) elif test_type == 'query' or test_type == 'update': remote_script = self.__generate_query_script(test.get_url(), self.port, test.accept_header) else: remote_script = self.__generate_concurrency_script(test.get_url(), self.port, test.accept_header) # Begin resource usage metrics collection self.__begin_logging(test_type) # Run the benchmark with open(output_file, 'w') as raw_file: p = subprocess.Popen(self.benchmarker.client_ssh_string.split(" "), stdin=subprocess.PIPE, stdout=raw_file, stderr=err) p.communicate(remote_script) err.flush() # End resource usage metrics collection self.__end_logging() results = self.__parse_test(test_type) print "Benchmark results:" pprint(results) self.benchmarker.report_benchmark_results(framework=self, test=test_type, results=results['results']) out.write( "Complete\n" ) out.flush() for test_type in self.runTests: benchmark_type(test_type) ############################################################ # End benchmark ############################################################ ############################################################ # parse_all # Method meant to be run for a given timestamp ############################################################ def parse_all(self): for test_type in self.runTests: if os.path.exists(self.benchmarker.get_output_file(self.name, test_type)): results = self.__parse_test(test_type) self.benchmarker.report_benchmark_results(framework=self, test=test_type, results=results['results']) ########################################################################################## # Private Methods ########################################################################################## ############################################################ # __parse_test(test_type) ############################################################ def __parse_test(self, test_type): try: results = dict() results['results'] = [] stats = [] if os.path.exists(self.benchmarker.get_output_file(self.name, test_type)): with open(self.benchmarker.output_file(self.name, test_type)) as raw_data: is_warmup = True rawData = None for line in raw_data: if "Queries:" in line or "Concurrency:" in line: is_warmup = False rawData = None continue if "Warmup" in line or "Primer" in line: is_warmup = True continue if not is_warmup: if rawData == None: rawData = dict() results['results'].append(rawData) #if "Requests/sec:" in line: # m = re.search("Requests/sec:\s+([0-9]+)", line) # rawData['reportedResults'] = m.group(1) # search for weighttp data such as succeeded and failed. if "Latency" in line: m = re.findall("([0-9]+\.*[0-9]*[us|ms|s|m|%]+)", line) if len(m) == 4: rawData['latencyAvg'] = m[0] rawData['latencyStdev'] = m[1] rawData['latencyMax'] = m[2] # rawData['latencyStdevPercent'] = m[3] #if "Req/Sec" in line: # m = re.findall("([0-9]+\.*[0-9]*[k|%]*)", line) # if len(m) == 4: # rawData['requestsAvg'] = m[0] # rawData['requestsStdev'] = m[1] # rawData['requestsMax'] = m[2] # rawData['requestsStdevPercent'] = m[3] #if "requests in" in line: # m = re.search("requests in ([0-9]+\.*[0-9]*[ms|s|m|h]+)", line) # if m != None: # # parse out the raw time, which may be in minutes or seconds # raw_time = m.group(1) # if "ms" in raw_time: # rawData['total_time'] = float(raw_time[:len(raw_time)-2]) / 1000.0 # elif "s" in raw_time: # rawData['total_time'] = float(raw_time[:len(raw_time)-1]) # elif "m" in raw_time: # rawData['total_time'] = float(raw_time[:len(raw_time)-1]) * 60.0 # elif "h" in raw_time: # rawData['total_time'] = float(raw_time[:len(raw_time)-1]) * 3600.0 if "requests in" in line: m = re.search("([0-9]+) requests in", line) if m != None: rawData['totalRequests'] = int(m.group(1)) if "Socket errors" in line: if "connect" in line: m = re.search("connect ([0-9]+)", line) rawData['connect'] = int(m.group(1)) if "read" in line: m = re.search("read ([0-9]+)", line) rawData['read'] = int(m.group(1)) if "write" in line: m = re.search("write ([0-9]+)", line) rawData['write'] = int(m.group(1)) if "timeout" in line: m = re.search("timeout ([0-9]+)", line) rawData['timeout'] = int(m.group(1)) if "Non-2xx" in line: m = re.search("Non-2xx or 3xx responses: ([0-9]+)", line) if m != None: rawData['5xx'] = int(m.group(1)) if "STARTTIME" in line: m = re.search("[0-9]+", line) rawData["startTime"] = int(m.group(0)) if "ENDTIME" in line: m = re.search("[0-9]+", line) rawData["endTime"] = int(m.group(0)) test_stats = self.__parse_stats(test_type, rawData["startTime"], rawData["endTime"], 1) # rawData["averageStats"] = self.__calculate_average_stats(test_stats) stats.append(test_stats) with open(self.benchmarker.stats_file(self.name, test_type) + ".json", "w") as stats_file: json.dump(stats, stats_file, indent=2) return results except IOError: return None ############################################################ # End benchmark ############################################################ ############################################################ # __generate_concurrency_script(url, port) # Generates the string containing the bash script that will # be run on the client to benchmark a single test. This # specifically works for the variable concurrency tests (JSON # and DB) ############################################################ def __generate_concurrency_script(self, url, port, accept_header, wrk_command="wrk"): headers = self.headers_template.format(accept=accept_header) return self.concurrency_template.format(max_concurrency=max(self.benchmarker.concurrency_levels), max_threads=self.benchmarker.threads, name=self.name, duration=self.benchmarker.duration, levels=" ".join("{}".format(item) for item in self.benchmarker.concurrency_levels), server_host=self.benchmarker.server_host, port=port, url=url, headers=headers, wrk=wrk_command) ############################################################ # __generate_pipeline_script(url, port) # Generates the string containing the bash script that will # be run on the client to benchmark a single pipeline test. ############################################################ def __generate_pipeline_script(self, url, port, accept_header, wrk_command="wrk"): headers = self.headers_template.format(accept=accept_header) return self.pipeline_template.format(max_concurrency=16384, max_threads=self.benchmarker.threads, name=self.name, duration=self.benchmarker.duration, levels=" ".join("{}".format(item) for item in [256,1024,4096,16384]), server_host=self.benchmarker.server_host, port=port, url=url, headers=headers, wrk=wrk_command, pipeline=16) ############################################################ # __generate_query_script(url, port) # Generates the string containing the bash script that will # be run on the client to benchmark a single test. This # specifically works for the variable query tests (Query) ############################################################ def __generate_query_script(self, url, port, accept_header): headers = self.headers_template.format(accept=accept_header) return self.query_template.format(max_concurrency=max(self.benchmarker.concurrency_levels), max_threads=self.benchmarker.threads, name=self.name, duration=self.benchmarker.duration, levels=" ".join("{}".format(item) for item in self.benchmarker.query_levels), server_host=self.benchmarker.server_host, port=port, url=url, headers=headers) ############################################################ # Returns True if any test type this this framework test will use a DB ############################################################ def requires_database(self): '''Returns True/False if this test requires a database''' return any(tobj.requires_db for (ttype,tobj) in self.runTests.iteritems()) ############################################################ # __begin_logging # Starts a thread to monitor the resource usage, to be synced with the client's time # TODO: MySQL and InnoDB are possible. Figure out how to implement them. ############################################################ def __begin_logging(self, test_type): output_file = "{file_name}".format(file_name=self.benchmarker.get_stats_file(self.name, test_type)) dstat_string = "dstat -afilmprsT --aio --fs --ipc --lock --raw --socket --tcp \ --raw --socket --tcp --udp --unix --vm --disk-util \ --rpc --rpcd --output {output_file}".format(output_file=output_file) cmd = shlex.split(dstat_string) dev_null = open(os.devnull, "w") self.subprocess_handle = subprocess.Popen(cmd, stdout=dev_null) ############################################################## # Begin __end_logging # Stops the logger thread and blocks until shutdown is complete. ############################################################## def __end_logging(self): self.subprocess_handle.terminate() self.subprocess_handle.communicate() ############################################################## # Begin __parse_stats # For each test type, process all the statistics, and return a multi-layered dictionary # that has a structure as follows: # (timestamp) # | (main header) - group that the stat is in # | | (sub header) - title of the stat # | | | (stat) - the stat itself, usually a floating point number ############################################################## def __parse_stats(self, test_type, start_time, end_time, interval): stats_dict = dict() stats_file = self.benchmarker.stats_file(self.name, test_type) with open(stats_file) as stats: while(stats.next() != "\n"): # dstat doesn't output a completely compliant CSV file - we need to strip the header pass stats_reader = csv.reader(stats) main_header = stats_reader.next() sub_header = stats_reader.next() time_row = sub_header.index("epoch") int_counter = 0 for row in stats_reader: time = float(row[time_row]) int_counter+=1 if time < start_time: continue elif time > end_time: return stats_dict if int_counter % interval != 0: continue row_dict = dict() for nextheader in main_header: if nextheader != "": row_dict[nextheader] = dict() header = "" for item_num, column in enumerate(row): if(len(main_header[item_num]) != 0): header = main_header[item_num] row_dict[header][sub_header[item_num]] = float(column) # all the stats are numbers, so we want to make sure that they stay that way in json stats_dict[time] = row_dict return stats_dict ############################################################## # End __parse_stats ############################################################## def __getattr__(self, name): """For backwards compatibility, we used to pass benchmarker as the argument to the setup.sh files""" try: x = getattr(self.benchmarker, name) except AttributeError: print "AttributeError: %s not a member of FrameworkTest or Benchmarker" % name print "This is probably a bug" raise return x ############################################################## # Begin __calculate_average_stats # We have a large amount of raw data for the statistics that # may be useful for the stats nerds, but most people care about # a couple of numbers. For now, we're only going to supply: # * Average CPU # * Average Memory # * Total network use # * Total disk use # More may be added in the future. If they are, please update # the above list. # Note: raw_stats is directly from the __parse_stats method. # Recall that this consists of a dictionary of timestamps, # each of which contain a dictionary of stat categories which # contain a dictionary of stats ############################################################## def __calculate_average_stats(self, raw_stats): raw_stat_collection = dict() for timestamp, time_dict in raw_stats.items(): for main_header, sub_headers in time_dict.items(): item_to_append = None if 'cpu' in main_header: # We want to take the idl stat and subtract it from 100 # to get the time that the CPU is NOT idle. item_to_append = sub_headers['idl'] - 100.0 elif main_header == 'memory usage': item_to_append = sub_headers['used'] elif 'net' in main_header: # Network stats have two parts - recieve and send. We'll use a tuple of # style (recieve, send) item_to_append = (sub_headers['recv'], sub_headers['send']) elif 'dsk' or 'io' in main_header: # Similar for network, except our tuple looks like (read, write) item_to_append = (sub_headers['read'], sub_headers['writ']) if item_to_append is not None: if main_header not in raw_stat_collection: raw_stat_collection[main_header] = list() raw_stat_collection[main_header].append(item_to_append) # Simple function to determine human readable size # http://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size def sizeof_fmt(num): # We'll assume that any number we get is convertable to a float, just in case num = float(num) for x in ['bytes','KB','MB','GB']: if num < 1024.0 and num > -1024.0: return "%3.1f%s" % (num, x) num /= 1024.0 return "%3.1f%s" % (num, 'TB') # Now we have our raw stats in a readable format - we need to format it for display # We need a floating point sum, so the built in sum doesn't cut it display_stat_collection = dict() for header, values in raw_stat_collection.items(): display_stat = None if 'cpu' in header: display_stat = sizeof_fmt(math.fsum(values) / len(values)) elif main_header == 'memory usage': display_stat = sizeof_fmt(math.fsum(values) / len(values)) elif 'net' in main_header: receive, send = zip(*values) # unzip display_stat = {'receive': sizeof_fmt(math.fsum(receive)), 'send': sizeof_fmt(math.fsum(send))} else: # if 'dsk' or 'io' in header: read, write = zip(*values) # unzip display_stat = {'read': sizeof_fmt(math.fsum(read)), 'write': sizeof_fmt(math.fsum(write))} display_stat_collection[header] = display_stat return display_stat ########################################################################################### # End __calculate_average_stats ######################################################################################### ########################################################################################## # Constructor ########################################################################################## def __init__(self, name, directory, benchmarker, runTests, args): self.name = name self.directory = directory self.benchmarker = benchmarker self.runTests = runTests self.fwroot = benchmarker.fwroot self.approach = "" self.classification = "" self.database = "" self.framework = "" self.language = "" self.orm = "" self.platform = "" self.webserver = "" self.os = "" self.database_os = "" self.display_name = "" self.notes = "" self.versus = "" # setup logging logging.basicConfig(stream=sys.stderr, level=logging.INFO) self.install_root="%s/%s" % (self.fwroot, "installs") if benchmarker.install_strategy is 'pertest': self.install_root="%s/pertest/%s" % (self.install_root, name) # Used in setup.sh scripts for consistency with # the bash environment variables self.troot = self.directory self.iroot = self.install_root self.__dict__.update(args) ############################################################ # End __init__ ############################################################ ############################################################ # End FrameworkTest ############################################################ # Static methods def test_order(type_name): """ This sort ordering is set up specifically to return the length of the test name. There were SO many problems involved with 'plaintext' being run first (rather, just not last) that we needed to ensure that it was run last for every framework. """ return len(type_name) def validate_urls(test_name, test_keys): """ Separated from validate_test because urls are not required anywhere. We know a url is incorrect if it is empty or does not start with a "/" character. There is no validation done to ensure the url conforms to the suggested url specifications, although those suggestions are presented if a url fails validation here. """ example_urls = { "json_url": "/json", "db_url": "/mysql/db", "query_url": "/mysql/queries?queries= or /mysql/queries/", "fortune_url": "/mysql/fortunes", "update_url": "/mysql/updates?queries= or /mysql/updates/", "plaintext_url": "/plaintext" } for test_url in ["json_url","db_url","query_url","fortune_url","update_url","plaintext_url"]: key_value = test_keys.get(test_url, None) if key_value != None and not key_value.startswith('/'): errmsg = """`%s` field in test \"%s\" does not appear to be a valid url: \"%s\"\n Example `%s` url: \"%s\" """ % (test_url, test_name, key_value, test_url, example_urls[test_url]) raise Exception(errmsg) def validate_test(test_name, test_keys, directory): """ Validate benchmark config values for this test based on a schema """ # Ensure that each FrameworkTest has a framework property, inheriting from top-level if not if not test_keys['framework']: test_keys['framework'] = config['framework'] recommended_lang = directory.split('/')[-2] windows_url = "https://github.com/TechEmpower/FrameworkBenchmarks/milestones/Windows%%20Compatibility" schema = { 'language': { 'help': ('language', 'The language of the framework used, suggestion: %s' % recommended_lang) }, 'webserver': { 'help': ('webserver', 'Name of the webserver also referred to as the "front-end server"') }, 'classification': { 'allowed': [ ('Fullstack', '...'), ('Micro', '...'), ('Platform', '...') ] }, 'database': { 'allowed': [ ('MySQL', 'One of the most popular databases around the web and in TFB'), ('Postgres', 'An advanced SQL database with a larger feature set than MySQL'), ('MongoDB', 'A popular document-store database'), ('Cassandra', 'A highly performant and scalable NoSQL database'), ('Elasticsearch', 'A distributed RESTful search engine that is used as a database for TFB tests'), ('Redis', 'An open-sourced, BSD licensed, advanced key-value cache and store'), ('SQLite', 'A network-less database, still supported for backwards compatibility'), ('SQLServer', 'Microsoft\'s SQL implementation'), ('None', 'No database was used for these tests, as is the case with Json Serialization and Plaintext') ] }, 'approach': { 'allowed': [ ('Realistic', '...'), ('Stripped', '...') ] }, 'orm': { 'allowed': [ ('Full', 'Has a full suite of features like lazy loading, caching, multiple language support, sometimes pre-configured with scripts.'), ('Micro', 'Has basic database driver capabilities such as establishing a connection and sending queries.'), ('Raw', 'Tests that do not use an ORM will be classified as "raw" meaning they use the platform\'s raw database connectivity.') ] }, 'platform': { 'help': ('platform', 'Name of the platform this framework runs on, e.g. Node.js, Pypy, hhvm, JRuby ...') }, 'framework': { # Guranteed to be here and correct at this point # key is left here to produce the set of required keys }, 'os': { 'allowed': [ ('Linux', 'Our best-supported host OS, it is recommended that you build your tests for Linux hosts'), ('Windows', 'TFB is not fully-compatible on windows, contribute towards our work on compatibility: %s' % windows_url) ] }, 'database_os': { 'allowed': [ ('Linux', 'Our best-supported host OS, it is recommended that you build your tests for Linux hosts'), ('Windows', 'TFB is not fully-compatible on windows, contribute towards our work on compatibility: %s' % windows_url) ] } } # Confirm required keys are present required_keys = schema.keys() missing = list(set(required_keys) - set(test_keys)) if len(missing) > 0: missingstr = (", ").join(map(str, missing)) raise Exception("benchmark_config.json for test %s is invalid, please amend by adding the following required keys: [%s]" % (test_name, missingstr)) # Check the (all optional) test urls validate_urls(test_name, test_keys) # Check values of keys against schema for key in required_keys: val = test_keys.get(key, "").lower() has_predefined_acceptables = 'allowed' in schema[key] if has_predefined_acceptables: allowed = schema[key].get('allowed', []) acceptable_values, descriptors = zip(*allowed) acceptable_values = [a.lower() for a in acceptable_values] if val not in acceptable_values: msg = ("Invalid `%s` value specified for test \"%s\" in framework \"%s\"; suggestions:\n" % (key, test_name, config['framework'])) helpinfo = ('\n').join([" `%s` -- %s" % (v, desc) for (v, desc) in zip(acceptable_values, descriptors)]) fullerr = msg + helpinfo + "\n" raise Exception(fullerr) elif not has_predefined_acceptables and val == "": msg = ("Value for `%s` in test \"%s\" in framework \"%s\" was missing:\n" % (key, test_name, config['framework'])) helpinfo = " %s -- %s" % schema[key]['help'] fullerr = msg + helpinfo + '\n' raise Exception(fullerr) def parse_config(config, directory, benchmarker): """ Parses a config file into a list of FrameworkTest objects """ tests = [] # The config object can specify multiple tests # Loop over them and parse each into a FrameworkTest for test in config['tests']: tests_to_run = [name for (name,keys) in test.iteritems()] if "default" not in tests_to_run: logging.warn("Framework %s does not define a default test in benchmark_config.json", config['framework']) # Check that each test configuration is acceptable # Throw exceptions if a field is missing, or how to improve the field for test_name, test_keys in test.iteritems(): # Validates the benchmark_config entry validate_test(test_name, test_keys, directory) # Map test type to a parsed FrameworkTestType object runTests = dict() for type_name, type_obj in benchmarker.types.iteritems(): try: # Makes a FrameWorkTestType object using some of the keys in config # e.g. JsonTestType uses "json_url" runTests[type_name] = type_obj.copy().parse(test_keys) except AttributeError as ae: # This is quite common - most tests don't support all types # Quitely log it and move on (debug logging is on in travis and this causes # ~1500 lines of debug, so I'm totally ignoring it for now # logging.debug("Missing arguments for test type %s for framework test %s", type_name, test_name) pass # We need to sort by test_type to run sortedTestKeys = sorted(runTests.keys(), key=test_order) sortedRunTests = OrderedDict() for sortedTestKey in sortedTestKeys: sortedRunTests[sortedTestKey] = runTests[sortedTestKey] # Prefix all test names with framework except 'default' test # Done at the end so we may still refer to the primary test as `default` in benchmark config error messages if test_name == 'default': test_name = config['framework'] else: test_name = "%s-%s" % (config['framework'], test_name) # By passing the entire set of keys, each FrameworkTest will have a member for each key tests.append(FrameworkTest(test_name, directory, benchmarker, sortedRunTests, test_keys)) return tests
from datetime import date, datetime, timedelta import pandas as pd """States between which we consider an issue to be being worked on for the purposes of calculating cycletime""" CREATED_STATE = 'Open' START_STATE = 'In Progress' END_STATE = 'Customer Approval' REOPENED_STATE = 'Reopened' def cycle_time(history, start_state=START_STATE, after_state=None, end_state=END_STATE, exit_state=None, reopened_state=REOPENED_STATE, include_states=None, exclude_states=None): """Calculate how long it has taken an issue to get from START_STATE to END_STATE. If we want to count from the date an issue was created we need to specify the date the issue was created and the CREATED_STATE if different from the default.""" if include_states is not None: count = 0 for day in history: if day in include_states: count += 1 return count if exclude_states is not None: count = 0 for day in history: if day not in exclude_states: count += 1 return count start_date = None end_date = None for i, day in enumerate(history): new_start_date = None if after_state: if day == after_state: if i == len(history.index) - 1: new_start_date = history.index[i] else: new_start_date = history.index[i+1] else: if day == start_state: new_start_date = history.index[i] if day == start_state: new_start_date = history.index[i] if new_start_date is not None: if start_date is None: start_date = new_start_date else: if new_start_date < start_date: start_date = new_start_date if exit_state is not None: if day == exit_state: if i == len(history.index) - 1: end_date = history.index[i] else: end_date = history.index[i+1] else: if day == end_state: # We ignore transitions to end_state if # they are from reopened. # This is because we sometime have to re-open # tickets just to fix # details of ownership, component, type or resolution. if day != reopened_state: end_date = history.index[i] if start_date is None: # Round up if we only ever saw the end state. # This means that the start state was on the same day. if end_date is not None: return 1 if end_date is None: return None offset = 0 if exit_state is None: offset = 1 return ((end_date - start_date).days) + offset def extract_date(created): return datetime.strptime(created[:10], '%Y-%m-%d').date() def time_in_states(histories, from_date=None, until_date=None): """ How long did an issue spend in each state in its history. For the first state it was in count 'from' the start of the period we are interested in, typically when the issue was created For the last state it was in count from the time the state was entered until the date specified in 'until' - typically today's date """ time_in_states = [] current_state = u'Open' if from_date is None: from_date = date(1970, 01, 01) if hasattr(from_date, 'date'): prev_state_change_date = from_date.date() else: prev_state_change_date = from_date for history in reversed(histories): for item in history.items: if item.field == 'status': state_change_date = extract_date(history.created) days_in_state = state_change_date - prev_state_change_date if current_state is None: current_state = item.fromString time_in_states.append({'state': current_state, 'days': days_in_state.days}) current_state = item.toString prev_state_change_date = state_change_date if until_date is not None: final_state_days = until_date - prev_state_change_date time_in_states.append({'state': current_state, 'days': final_state_days.days}) else: time_in_states.append({'state': current_state, 'days': 1}) return time_in_states def history_from_jira_changelog(changelog, created_date, until_date=None): issue_history = time_in_states(changelog.histories, from_date=created_date, until_date=until_date) issue_day_history = [] history = None total_days = 0 for state_days in issue_history: state = state_days['state'] days = state_days['days'] days_in_state = [state] * days issue_day_history += days_in_state total_days += days dates = [created_date + timedelta(days=x) for x in range(0, total_days)] try: history = pd.Series(issue_day_history, index=dates) except AssertionError as e: print e print dates print issue_day_history return history def arrivals(histories, add_to=None): if add_to is None: arrivals = {} else: arrivals = add_to for history in histories: day = extract_date(history.created) if not day in arrivals: arrivals[day] = {} for item in history.items: if item.field == 'status': if not item.toString in arrivals[day]: arrivals[day][item.toString] = 1 else: arrivals[day][item.toString] += 1 return arrivals def history_from_state_transitions(start_date, state_transitions, end_date): """ Get a daily history of states based on state transitions """ history = [] to_state = None last_date = start_date for state in state_transitions: date = state['timestamp'].date() num_days = (date - last_date).days for n in range(0, num_days): history.append(state['from']) last_date = date to_state = state['to'] num_days = (end_date - last_date).days for n in range(0, num_days + 1): history.append(to_state) dates = [start_date + timedelta(days=x) for x in range(0, (end_date - start_date).days + 1)] return pd.Series(history, index=dates)
#---------------------------------------------------------------------- # Copyright (c) 2013-2016 Raytheon BBN Technologies # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and/or hardware specification (the "Work") to # deal in the Work without restriction, including without limitation the # rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Work, and to permit persons to whom the Work # is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Work. # # THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS # IN THE WORK. #---------------------------------------------------------------------- # Update the user certs in the database ma_inside_keys tables to reflect the current ma_cert import optparse import os import sys import subprocess import tempfile import datetime import OpenSSL # Helper functions # Get contents of file def read_file(filename): f = open(filename, 'r') contents = f.read() f.close() return contents # Run SQL query and dump output into given file def run_sql_to_file(sql, outfile): # The -A argument has the practical effect of stripping the trailing '+' from each line # in keys when run against postgresql 9.1 (not seen in 8.4). No harm in earlier DBs. cmd = ['psql', 'portal', '-U', 'portal', '-h', 'localhost', '-c', sql, '-o', outfile, '-t' , '-q', '-A'] output = subprocess.call(cmd); def run_sql(sql): file = get_tempfile() run_sql_to_file(sql, file) contents = read_file(file) os.remove(file) return contents # Create a tempfile ready for writing to def get_tempfile(): (fd, filename) = tempfile.mkstemp() os.close(fd) return filename # Strip leading spaces and trailing blank lines from key file def fix_keyfile(keyfile): key_data = read_file(keyfile); key_data_lines = key_data.split('\n'); new_key_data_lines = [line.strip() for line in key_data_lines if len(line.strip()) > 0] new_key_data = "\n".join(new_key_data_lines) + "\n" file = open(keyfile, 'w'); file.write(new_key_data); file.close() class UserCertGenerator: def __init__(self): pass # Constants ca_config = '/usr/share/geni-ch/CA/openssl.cnf' extname = 'v3_user' def create_csr_for_user_key(self, user_key_file): csr_file = get_tempfile() # Create the csr (certificate signing request) csr_cmd = ['openssl', 'req', \ '-new', \ '-key', user_key_file, \ '-batch', \ '-out', csr_file] # print "CMD = " + " ".join(csr_cmd) subprocess.call(csr_cmd) return csr_file # Write the extension file # Return 0 if no error def write_ext_file(self, user_urn, user_uuid, user_email): extdata_template = "[ %s ]\n" \ + "subjectKeyIdentifier=hash\n" \ + "authorityKeyIdentifier=keyid:always,issuer:always\n" \ + "basicConstraints = CA:FALSE\n" \ + "subjectAltName=email:copy,URI:%s,URI:urn:uuid:%s\n" extdata = extdata_template % (self.extname, user_urn, user_uuid); retval = 0 ext_file = "" try: ext_file = get_tempfile() f = open(ext_file, 'w'); f.write(extdata); f.close() except Exception, e: print "WEF %s" % str(e) retval = 1 return ext_file, retval # Sign the Certificate signing request, writing file to given filename def sign_csr(self, csr_file, cert_file, signer_cert_file, signer_key_file, subject, user_urn, user_uuid, user_email): (ext_file, ext_failure) = self.write_ext_file(user_urn, user_uuid, user_email) if ext_failure: print "Returning from error in write_ext_file %d" % ext_failure return 1 sign_cmd = ['openssl', 'ca', \ '-config', self.ca_config, \ '-policy', 'policy_anything', \ '-in', csr_file, \ '-out', cert_file, \ '-extfile', ext_file, \ '-extensions', self.extname, \ '-batch', \ '-notext', \ '-cert', signer_cert_file, \ '-keyfile', signer_key_file, \ '-subj', subject] # print "CMD = " + " ".join(sign_cmd) retcode = subprocess.call(sign_cmd) if retcode == 0: os.remove(ext_file) else: print "sign command failed. ext file is %s" % (ext_file) # Create a cert with the user's URN, UUID and email signed by # Signed by the user's private key and then certified by signer's signature def create_cert_for_user_key(self, signer_cert_file, signer_key_file, \ user_urn, user_uuid, user_email, \ user_key_file, cert_file): csr_file = self.create_csr_for_user_key(user_key_file) subject = "/CN=%s/emailAddress=%s" % (user_uuid, user_email) self.sign_csr(csr_file, cert_file, \ signer_cert_file, signer_key_file, \ subject, user_urn, user_uuid, user_email) os.remove(csr_file) def cert_expiration(pemcert): cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, pemcert) not_after = cert.get_notAfter() expires = datetime.datetime.strptime(not_after, '%Y%m%d%H%M%SZ') return expires # Update user certificate in database table def update_certificate(user_id, table_name, user_cert, ma_cert): new_cert = user_cert + ma_cert expiration = cert_expiration(user_cert) sql = ("update %s set certificate = '%s', expiration = '%s'" + " where member_id = '%s'") sql = sql % (table_name, new_cert, expiration, user_id) run_sql(sql) # Class to update all certificates in a given database in ma_inside_key and ma_outside_cert tables class UserCertificateUpdater: def __init__(self, argv): self._argv = argv self._options = self.parse_args() self._ma_cert_file = self._options.ma_cert_file self._ma_key_file = self._options.ma_key_file self._ma_cert = read_file(self._options.ma_cert_file) self._old_authority= self._options.old_authority self._new_authority= self._options.new_authority def parse_args(self): parser = optparse.OptionParser() parser.add_option("--ma_cert_file", help="location of MA cert") parser.add_option("--ma_key_file", help="location of MA private key") parser.add_option("--old_authority", help="name of old MA authority") parser.add_option("--new_authority", help="name of new MA authority") options, args = parser.parse_args(self._argv) if not options.ma_cert_file \ or not options.ma_key_file \ or not options.old_authority \ or not options.new_authority: parser.print_help() sys.exit() return options # def update_certs_in_table(self, tablename): # sql = "select member_id from %s" % tablename # user_ids = run_sql(sql).split('\n') # # for user_id in user_ids: # user_id = user_id.strip() # if len(user_id) == 0: continue # user_id = int(user_id) # # sql = "select certificate from %s where member_id = %d" % (tablename, user_id); # cert = run_sql(sql) # # Need to split into lines, take off the leading space and rejoin # lines = cert.split('\n') # trimmed_lines = [line[1:] for line in lines] # cert = '\n'.join(trimmed_lines) # end_certificate = 'END CERTIFICATE-----\n' # cert_pieces = cert.split(end_certificate); # user_cert = cert_pieces[0] + end_certificate # ma_cert = cert_pieces[1] + end_certificate # # if ma_cert == self._old_ma_cert: # print "Replacing old MA cert with new MA cert: user ID %d table %s" % (user_id, tablename) # update_certificate(user_id, tablename, user_cert, self._new_ma_cert) # elif ma_cert == self._new_ma_cert: # print "Already associated with new MA cert: user ID %d table %s" % (user_id, tablename) # else: # print "MA cert unknown: user ID %d table %s" % (user_id, tablename) def create_certs_in_table(self, tablename, send_email_if_no_private_key): sql = "select member_id from %s" % tablename user_uuids = run_sql(sql).split('\n') uus = [uu.strip() for uu in user_uuids if len(uu.strip()) > 0] user_uuids = uus sql = "select member_id from %s where private_key is null and certificate is not null" % tablename; user_uuids_no_key = run_sql(sql).split('\n') uunks = [uunk.strip() for uunk in user_uuids_no_key if len(uunk.strip()) > 0] user_uuids_no_key = uunks user_emails_no_key = [] user_emails_with_key = [] sql = "select member_id from %s where private_key is null and certificate is null" % tablename; user_uuids_no_key_no_cert = run_sql(sql).split('\n') uunkncs = [uunknc.strip() for uunknc in user_uuids_no_key_no_cert if len(uunknc.strip()) > 0] user_uuids_no_key_no_cert = uunkncs if not send_email_if_no_private_key and len(user_uuids_no_key): print "Error: table with no private keys not allowed: %s" % tablename sys.exit(-1) for user_uuid in user_uuids: sql = "select value from ma_member_attribute where member_id = '%s' and name = 'email_address'" % user_uuid addresses = run_sql(sql).split('\n') user_email = addresses[0].strip() if not user_email: continue sql = "select value from ma_member_attribute where member_id = '%s' and name = 'urn'" \ % user_uuid urns = run_sql(sql).split('\n') old_user_urn = urns[0].strip() user_urn = old_user_urn.replace(self._old_authority, self._new_authority) # print "Old urn = %s New urn = %s" % (old_user_urn, user_urn) full_user_uuid = "urn:uuid:%s" % user_uuid if user_uuid in user_uuids_no_key_no_cert: print "User has no private key or cert: %s" % user_uuid sql = "delete from %s where member_id = '%s'" % (tablename, user_uuid) run_sql(sql) elif user_uuid in user_uuids_no_key: # No private key: Send an email to generate a new CSR print "User has no private key: %s" % user_uuid if send_email_if_no_private_key: # Note this in the DB sql = "insert into ma_member_attribute (member_id, name, value, self_asserted) values ('%s', 'panther_outside_cert', 'no key', false)" % (user_uuid) run_sql(sql) # Delete the old outside cert, so they are forced to re generate sql = "delete from %s where member_id = '%s'" % (tablename, user_uuid) run_sql(sql) # Record their email in a file so we can email all these people user_emails_no_key.append(user_email) else: print "User has private key: %s" % user_uuid if send_email_if_no_private_key: # Record their email in a file so we can email all these people user_emails_with_key.append(user_email) # Note this in the DB sql = "insert into ma_member_attribute (member_id, name, value, self_asserted) values ('%s', 'panther_outside_cert', 'had key', false)" % (user_uuid) run_sql(sql) # Re-generate their cert sql = "select private_key from %s where member_id='%s'" % (tablename, user_uuid) user_key_file = get_tempfile() run_sql_to_file(sql, user_key_file) fix_keyfile(user_key_file) cert_file = "/tmp/cert-%s.pem" % user_uuid cert_generator = UserCertGenerator() cert_generator.create_cert_for_user_key(self._ma_cert_file, self._ma_key_file, user_urn, user_uuid, user_email, user_key_file, cert_file) user_cert = read_file(cert_file) update_certificate(user_uuid, tablename, user_cert, self._ma_cert) os.remove(user_key_file) os.remove(cert_file) # End of loop over user_uuids if send_email_if_no_private_key: # Record file of people who had outside cert no private key (did CSR) if len(user_emails_no_key): fname = "/tmp/%s-user-emails-no-key.txt" % tablename with open(fname, 'w') as file: for email in user_emails_no_key: file.write(email) file.write('\n') # Record file of people who had outside cert with private key (need to re-download) if len(user_emails_with_key): fname = "/tmp/%s-user-emails-with-key.txt" % tablename with open(fname, 'w') as file: for email in user_emails_with_key: file.write(email); file.write('\n') # end of create_certs_in_table def run(self): self.create_certs_in_table('ma_inside_key', False) self.create_certs_in_table('ma_outside_cert', True) def main(argv=None): if not argv: argv = sys.argv updater = UserCertificateUpdater(argv) updater.run() if __name__ == "__main__": sys.exit(main())
import os import shutil import sys from textwrap import dedent import jinja2 from .util import Executor, Q from bloggertool.str_util import qname email = "andrew.svetlov@gmail.com" blogid = "1016801571750880882" def run(): folder = os.path.dirname(os.path.abspath(sys.argv[0])) regr_folder = os.path.join(folder, 'regr-data') blog_cmd = 'blog' exe = Executor(blog_cmd, regr_folder) exe.rmtree() exe.mkdir('.') project_dir = exe.full_name('sample_blog') CREATE_PROJECT = Q("INFO Create project {0!q}") ALREADY_IN_PROJECT = Q("ERROR Already a project: {0!q}") USER_INFO = Q(""" INFO User info: email: {email!Q} blogid: {blogid!Q} template: dir: {dir!Q} file: {file!Q} source-encoding: utf-8 """) USER_UPDATED = Q("INFO User updated") ADD = Q("INFO Add {name!q} -> {file!q}") HTML = Q("INFO Generate html for {name!q}") HTML_WARNING_NO_TEMPLATE = HTML + '\n' + Q(""" WARNING User settings has no template specified. Use markdown output as html. """) SKIP_FRESH = Q("INFO Skip fresh {name!q}") ############################################ print '***** INIT ******' print 'init project' out = exe.go('init sample_blog') #import pdb;pdb.set_trace() CREATE_PROJECT(project_dir) == out print 'init same project' out = exe.go('init sample_blog', 255) ALREADY_IN_PROJECT(project_dir) == out print 'init sub project' out = exe.go('init sample_blog/sample', 255) ALREADY_IN_PROJECT(project_dir) == out print "init sub project with cd" with exe.cd('sample_blog'): out = exe.go('init sample', 255) ALREADY_IN_PROJECT(project_dir) == out shutil.rmtree(project_dir) print "init project in current folder" exe.mkdir('sample_blog') with exe.cd('sample_blog'): out = exe.go('init .') CREATE_PROJECT(project_dir) == out with exe.cd('sample_blog'): out = exe.go('ls') Q("INFO No posts") == out ############################# print "***** USER *****" print "info of empty project" with exe.cd('sample_blog'): out = exe.go('info') #import pdb;pdb.set_trace() USER_INFO() == out print "fail rinfo for empty user" with exe.cd('sample_blog'): out = exe.go('rinfo', 255) Q("ERROR Set user email first") == out print "setup project info" with exe.cd('sample_blog'): cmd = 'info --email %s --blogid %s' % (email, blogid) out = exe.go(cmd) USER_UPDATED == out print "info of filled project" with exe.cd('sample_blog'): out = exe.go('info') USER_INFO(email=email, blogid=blogid) == out ############################### print "*********** rinfo ************" print "simple" with exe.cd('sample_blog'): out = exe.go('rinfo') found = False for m in Q(r"[[](?P<blogid>\d+)]").ifind(out): if m['blogid'] == blogid: found = True break assert found ############################## print "****** SIMPLE HTML *******" TXT = 'Text of sample article' INNER_HTML = '<p>' + TXT + '</p>' print "add post" with exe.cd('sample_blog'): exe.write('article.md', TXT) md_fname = exe.full_name('article.md') out = exe.go('add article.md') ADD(name='article', file='article.md') == out print "generate html without template" with exe.cd('sample_blog'): out = exe.go('html article.md') Q(INNER_HTML) == exe.read('article.inner.html') HTML_WARNING_NO_TEMPLATE(name='article', file=md_fname) == out print "generate fresh html without template" with exe.cd('sample_blog'): out = exe.go('html article.md') Q(INNER_HTML) == exe.read('article.inner.html') SKIP_FRESH(name='article') == out print "generate fresh html with --always parameter without template" with exe.cd('sample_blog'): out = exe.go('html article.md --always') Q(INNER_HTML) == exe.read('article.inner.html') HTML_WARNING_NO_TEMPLATE(name='article', file=md_fname) == out ########################################### print "******* TEMPLATED HTML *******" TEMPLATE_BODY = Q("""\ <html> <head> <title>{{title}}</title> </head> <body> <h1>{{title}}</h1> <p>Slug: <em>{{slug}}</em></p> <p>Labels: <em> {% for label in labels %} {{label}}, {% endfor %} <hr> {{inner}} </body> </html> """) env = jinja2.Environment(loader=jinja2.DictLoader( {"template": TEMPLATE_BODY})) TEMPLATE = env.get_template("template") print "setup project template" with exe.cd('sample_blog'): exe.mkdir('template') exe.write('template/templ.html', TEMPLATE_BODY) out = exe.go('info --template template/templ.html') USER_UPDATED == out print "info of filled project" with exe.cd('sample_blog'): out = exe.go('info') USER_INFO(email=email, blogid=blogid, dir='template', file='templ.html') == out print ("generate html with template without title and slug, " "slug derived from name") with exe.cd('sample_blog'): out = exe.go('html article.md --always') inner = exe.read('article.inner.html') INNER_HTML == inner TEMPLATE.render(title='', inner=inner, slug='article', labels=[]) == exe.read('article.html') HTML(name='article') == out ######################### print "******* TEMPLATED HTML WITH METAINFO **********" TXT2 = Q("""\ Title: Post Title Slug: second-post Labels: sample, other Text of second article """) INNER_HTML2 = '<p>Text of second article</p>' print "add second post in subfolder" exe.mkdir('sample_blog/sub') with exe.cd('sample_blog/sub'): exe.write('second.md', TXT2) out = exe.go('add second.md --show-traceback') ADD(name='sub/second', file='sub/second.md') == out print "generate html with template with full metadata" with exe.cd('sample_blog/sub'): out = exe.go('html second --force', 0) inner = exe.read('second.inner.html') INNER_HTML == inner TEMPLATE.render(title='Post Title', inner=inner, slug='second-post', labels='other, sample') == exe.read('second.html') HTML(name='sub/second') == out ######################### print "******** LABELS ***********" LABEL = Q('INFO Labels for post {name!q}: {labels}') LABEL_UPDATE = Q('INFO Updated labels for post {name!q}: {labels}') print "show empty labels for article.md" with exe.cd('sample_blog'): out = exe.go('label article') LABEL(name='article', labels=None) == out print "show empty labels for article.md from sub folder" with exe.cd('sample_blog/sub'): out = exe.go('label ../article') LABEL(name='article', labels=None) == out print "show labels for sub/second.md" with exe.cd('sample_blog'): out = exe.go('label sub/second') LABEL(name='sub/second', labels='other, sample') == out print "show labels for sub/second.md from sub folder" with exe.cd('sample_blog/sub'): out = exe.go('label second') LABEL(name='sub/second', labels='other, sample') == out print "add label for article.md" with exe.cd('sample_blog'): out = exe.go('label article --add "a, b"') LABEL_UPDATE(name='article', labels='a, b') == out print "...check" with exe.cd('sample_blog'): out = exe.go('label article') LABEL(name='article', labels='a, b') == out print "remove label from article.md" with exe.cd('sample_blog'): out = exe.go('label article --rm "a"') LABEL_UPDATE(name='article', labels='b') == out print "...check" with exe.cd('sample_blog'): out = exe.go('label article') LABEL(name='article', labels='b') == out print "set label for article.md" with exe.cd('sample_blog'): out = exe.go('label article --set a') LABEL_UPDATE(name='article', labels='a') == out print "...check" with exe.cd('sample_blog'): out = exe.go('label article') LABEL(name='article', labels='a') == out ################################# print "*********** POST **************" POST_SHOW = Q("""\ INFO Post {changed}{name!q} title: {title} link: {link} slug: {slug} labels: {labels} postid: {postid} published: {published} updated: {updated} localstamp: now """) LOCALSTAMP = "(?P<localstamp>.+?)$" POST_UPDATE = Q('INFO Post {name} updated.') POST_UPDATE_WARNING = Q("WARNING Skip title modification for {name}") print 'show post article' with exe.cd('sample_blog'): out = exe.go('post article') POST_SHOW(name='article', title='', changed='[*]', # screen * regexp spec symbol link='', slug='article', labels='a', postid='').match(out) print 'set title for post article' with exe.cd('sample_blog'): out = exe.go('post article --title "New Title" --show-traceback') POST_UPDATE(name='article') == out print '...check' with exe.cd('sample_blog'): out = exe.go('post article') POST_SHOW(name='article', title='New Title', changed='[*]', # screen * regexp spec symbol link='', slug='article', labels='a', postid='').match(out) print 'cannot change existing title for post article' with exe.cd('sample_blog'): out = exe.go('post article --title "New Title 2"') POST_UPDATE_WARNING(name='article') == out print '...check' with exe.cd('sample_blog'): out = exe.go('post article') POST_SHOW(name='article', title='New Title', changed='[*]', # screen * regexp spec symbol link='', slug='article', labels='a', postid='').match(out) print 'change existing title for post article with --force' with exe.cd('sample_blog'): out = exe.go('post article --title "New Title 2" --force') POST_UPDATE(name='article') == out print '...check' with exe.cd('sample_blog'): out = exe.go('post article') POST_SHOW(name='article', title='New Title 2', changed='[*]', # screen * regexp spec symbol link='', slug='article', labels='a', postid='').match(out) ################################# print "************ LS *************" print "ls of project root" with exe.cd('sample_blog'): out = exe.go('ls') Q("""\ INFO Posts: *article *sub/second """) == out print "ls of sample/sub" with exe.cd('sample_blog/sub'): out = exe.go('ls') Q("""\ INFO Posts: *sub/second """) == out ############################### PUBLISH = Q("INFO Post {name!q} published as (?P<link>.+) " r"[[](?P<postid>\d+)]") print "*********** publish ************" print "publish article" with exe.cd('sample_blog'): out = exe.go('publish article') ret = PUBLISH(name='article').match(out) link1 = ret['link'] postid1 = ret['postid'] slug1 = os.path.splitext(os.path.basename(link1))[0] print "...check" with exe.cd('sample_blog'): out = exe.go('ls') Q("""\ INFO Posts: article *sub/second """) == out print '...check' with exe.cd('sample_blog'): out = exe.go('post article') POST_SHOW(name='article', title='New Title 2', changed='', link=link1, slug=slug1, labels='a', published='now', updated='now', postid=postid1).match(out) print '...check' with exe.cd('sample_blog'): out = exe.go('rls --show-traceback') tst = Q("{title!q} -> {link}")(title="New Title 2", link=link1) last_record = out.splitlines()[2].strip() tst == last_record #### print "publish sub/second" with exe.cd('sample_blog/sub'): out = exe.go('publish second') ret = PUBLISH(name='sub/second').match(out) link2 = ret['link'] postid2 = ret['postid'] slug2 = os.path.splitext(os.path.basename(link2))[0] print "...check" with exe.cd('sample_blog'): out = exe.go('ls') Q("""\ INFO Posts: article sub/second """) == out print '...check' with exe.cd('sample_blog/sub'): out = exe.go('post second') POST_SHOW(name='sub/second', title='Post Title', changed='', link=link2, slug=slug2, labels='other, sample', published='now', updated='now', postid=postid2).match(out) print '...check' with exe.cd('sample_blog/sub'): out = exe.go('rls --show-traceback') tst = Q("{title!q} -> {link}")(title="Post Title", link=link2) last_record = out.splitlines()[2].strip() tst == last_record ###################### print "************ RM *****************" RM = Q("INFO Remove {name!q}") print 'rm article and sub/second' with exe.cd('sample_blog/sub'): out = exe.go('rm ../article') RM(name='article') == out out = exe.go('rm second') RM(name='sub/second') == out with exe.cd('sample_blog'): out = exe.go('ls') Q("INFO No posts") == out ###################### print "************ LINK *****************" LINK = Q("INFO Post {name!q} connected to {link}") published = r"(?P<published>now|\d+ minutes ago)" print 'add removed files' with exe.cd('sample_blog/sub'): exe.go('add ../article.md') exe.go('add second.md') print 'link article' with exe.cd('sample_blog/sub'): out = exe.go('link ../article %s' % link1) LINK(name='article', link=link1) == out print '...check' with exe.cd('sample_blog'): out = exe.go('post article') POST_SHOW(name='article', title='New Title 2', changed='', link=link1, slug=slug1, labels='a', published=published, updated='now', postid=postid1).match(out) print 'link sub/second' with exe.cd('sample_blog/sub'): out = exe.go('link second %s' % link2) LINK(name='sub/second', link=link2) == out print '...check' with exe.cd('sample_blog/sub'): out = exe.go('post second') POST_SHOW(name='sub/second', title='Post Title', changed='', link=link2, slug=slug2, labels='other, sample', published=published, updated='now', postid=postid2).match(out) print "**************** DIFF and PUSH **************" TXT3 = Q("""\ Title: Post Title Slug: {slug} Labels: sample, other Text of second article. Modified version. """)(slug=slug2) DIFF = Q("""\ INFO Generate html for sub/second INFO Difference: --- {link} +++ {inner} @@ -1,1 +1,2 @@ -<p>Text of second article</p> +<p>Text of second article. +Modified version.</p> """) PUSH = Q("INFO Post {name!q} updated") print "modify and diff" with exe.cd('sample_blog/sub'): exe.write('second.md', TXT3) out = exe.go('diff second --force') DIFF(link=link2 + ' ', inner='sub/second.inner.html ') == out print "push" with exe.cd('sample_blog/sub'): out = exe.go('push second') PUSH(name='sub/second') == out print "check diff again" with exe.cd('sample_blog/sub'): out = exe.go('diff second') Q("INFO No differences") == out
import pytest from mock import Mock, patch, call from ramses import generators from .fixtures import engine_mock, config_mock class TestHelperFunctions(object): @patch.object(generators, 'get_static_parent') def test_get_nefertari_parent_resource_no_parent(self, mock_get): mock_get.return_value = None assert generators._get_nefertari_parent_resource(1, 2, 3) == 3 mock_get.assert_called_once_with(1) @patch.object(generators, 'get_static_parent') def test_get_nefertari_parent_resource_parent_not_defined( self, mock_get): mock_get.return_value = Mock(path='foo') assert generators._get_nefertari_parent_resource( 1, {}, 3) == 3 mock_get.assert_called_once_with(1) @patch.object(generators, 'get_static_parent') def test_get_nefertari_parent_resource_parent_defined( self, mock_get): mock_get.return_value = Mock(path='foo') assert generators._get_nefertari_parent_resource( 1, {'foo': 'bar'}, 3) == 'bar' mock_get.assert_called_once_with(1) @patch.object(generators, 'generate_resource') def test_generate_server_no_resources(self, mock_gen): generators.generate_server(Mock(resources=None), 'foo') assert not mock_gen.called @patch.object(generators, '_get_nefertari_parent_resource') @patch.object(generators, 'generate_resource') def test_generate_server_resources_generated( self, mock_gen, mock_get): config = Mock() resources = [ Mock(path='/foo'), Mock(path='/bar'), ] generators.generate_server(Mock(resources=resources), config) assert mock_get.call_count == 2 mock_gen.assert_has_calls([ call(config, resources[0], mock_get()), call(config, resources[1], mock_get()), ]) @patch.object(generators, '_get_nefertari_parent_resource') @patch.object(generators, 'generate_resource') def test_generate_server_call_per_path( self, mock_gen, mock_get): config = Mock() resources = [ Mock(path='/foo'), Mock(path='/foo'), ] generators.generate_server(Mock(resources=resources), config) assert mock_get.call_count == 1 mock_gen.assert_called_once_with(config, resources[0], mock_get()) @pytest.mark.usefixtures('engine_mock') class TestGenerateModels(object): @patch('ramses.generators.is_dynamic_uri') def test_no_resources(self, mock_dyn): generators.generate_models(config=1, raml_resources=[]) assert not mock_dyn.called @patch('ramses.models.handle_model_generation') def test_dynamic_uri(self, mock_handle): generators.generate_models( config=1, raml_resources=[Mock(path='/{id}')]) assert not mock_handle.called @patch('ramses.models.handle_model_generation') def test_no_post_resources(self, mock_handle): generators.generate_models(config=1, raml_resources=[ Mock(path='/stories', method='get'), Mock(path='/stories', method='options'), Mock(path='/stories', method='patch'), ]) assert not mock_handle.called @patch('ramses.generators.attr_subresource') @patch('ramses.models.handle_model_generation') def test_attr_subresource(self, mock_handle, mock_attr): mock_attr.return_value = True resource = Mock(path='/stories', method='POST') generators.generate_models(config=1, raml_resources=[resource]) assert not mock_handle.called mock_attr.assert_called_once_with(resource, 'stories') @patch('ramses.generators.attr_subresource') @patch('ramses.models.handle_model_generation') def test_non_auth_model(self, mock_handle, mock_attr): mock_attr.return_value = False mock_handle.return_value = ('Foo', False) config = Mock() resource = Mock(path='/stories', method='POST') generators.generate_models( config=config, raml_resources=[resource]) mock_attr.assert_called_once_with(resource, 'stories') mock_handle.assert_called_once_with(config, resource, 'stories') assert config.registry.auth_model != 'Foo' @patch('ramses.generators.attr_subresource') @patch('ramses.models.handle_model_generation') def test_auth_model(self, mock_handle, mock_attr): mock_attr.return_value = False mock_handle.return_value = ('Foo', True) config = Mock() resource = Mock(path='/stories', method='POST') generators.generate_models( config=config, raml_resources=[resource]) mock_attr.assert_called_once_with(resource, 'stories') mock_handle.assert_called_once_with(config, resource, 'stories') assert config.registry.auth_model == 'Foo' class TestGenerateResource(object): def test_dynamic_root_parent(self): raml_resource = Mock(path='/foobar/{id}') parent_resource = Mock(is_root=True) config = config_mock() with pytest.raises(Exception) as ex: generators.generate_resource( config, raml_resource, parent_resource) expected = ("Top-level resources can't be dynamic and must " "represent collections instead") assert str(ex.value) == expected def test_dynamic_not_root_parent(self): raml_resource = Mock(path='/foobar/{id}') parent_resource = Mock(is_root=False) config = config_mock() new_resource = generators.generate_resource( config, raml_resource, parent_resource) assert new_resource is None @patch('ramses.generators.dynamic_part_name') @patch('ramses.generators.singular_subresource') @patch('ramses.generators.attr_subresource') @patch('ramses.models.get_existing_model') @patch('ramses.generators.generate_acl') @patch('ramses.generators.resource_view_attrs') @patch('ramses.generators.generate_rest_view') def test_full_run( self, generate_view, view_attrs, generate_acl, get_model, attr_res, singular_res, mock_dyn): mock_dyn.return_value = 'fooid' model_cls = Mock() model_cls.pk_field.return_value = 'my_id' attr_res.return_value = False singular_res.return_value = False get_model.return_value = model_cls raml_resource = Mock(path='/stories') parent_resource = Mock(is_root=False, uid=1) config = config_mock() res = generators.generate_resource( config, raml_resource, parent_resource) get_model.assert_called_once_with('Story') generate_acl.assert_called_once_with( config, model_cls=model_cls, raml_resource=raml_resource) mock_dyn.assert_called_once_with( raml_resource=raml_resource, clean_uri='stories', pk_field='my_id') view_attrs.assert_called_once_with(raml_resource, False) generate_view.assert_called_once_with( config, model_cls=model_cls, attrs=view_attrs(), attr_view=False, singular=False ) parent_resource.add.assert_called_once_with( 'story', 'stories', id_name='fooid', factory=generate_acl(), view=generate_view() ) assert res == parent_resource.add() @patch('ramses.generators.dynamic_part_name') @patch('ramses.generators.singular_subresource') @patch('ramses.generators.attr_subresource') @patch('ramses.models.get_existing_model') @patch('ramses.generators.generate_acl') @patch('ramses.generators.resource_view_attrs') @patch('ramses.generators.generate_rest_view') def test_full_run_singular( self, generate_view, view_attrs, generate_acl, get_model, attr_res, singular_res, mock_dyn): mock_dyn.return_value = 'fooid' model_cls = Mock() model_cls.pk_field.return_value = 'my_id' attr_res.return_value = False singular_res.return_value = True get_model.return_value = model_cls raml_resource = Mock(path='/stories') parent_resource = Mock(is_root=False, uid=1) parent_resource.view.Model.pk_field.return_value = 'other_id' config = config_mock() res = generators.generate_resource( config, raml_resource, parent_resource) get_model.assert_called_once_with('Story') generate_acl.assert_called_once_with( config, model_cls=parent_resource.view.Model, raml_resource=raml_resource) assert not mock_dyn.called view_attrs.assert_called_once_with(raml_resource, True) generate_view.assert_called_once_with( config, model_cls=parent_resource.view.Model, attrs=view_attrs(), attr_view=False, singular=True ) parent_resource.add.assert_called_once_with( 'story', factory=generate_acl(), view=generate_view() ) assert res == parent_resource.add()
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union import warnings from azure.core.async_paging import AsyncItemPaged, AsyncList from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod from azure.mgmt.core.exceptions import ARMErrorFormat from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling from ... import models as _models T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] class ApplicationTypesOperations: """ApplicationTypesOperations async operations. You should not instantiate this class directly. Instead, you should create a Client instance that instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. :type models: ~service_fabric_managed_clusters_management_client.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ models = _models def __init__(self, client, config, serializer, deserializer) -> None: self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config async def get( self, resource_group_name: str, cluster_name: str, application_type_name: str, **kwargs ) -> "_models.ApplicationTypeResource": """Gets a Service Fabric managed application type name resource. Get a Service Fabric application type name resource created or in the process of being created in the Service Fabric managed cluster resource. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param cluster_name: The name of the cluster resource. :type cluster_name: str :param application_type_name: The name of the application type name resource. :type application_type_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: ApplicationTypeResource, or the result of cls(response) :rtype: ~service_fabric_managed_clusters_management_client.models.ApplicationTypeResource :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationTypeResource"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2021-05-01" accept = "application/json" # Construct URL url = self.get.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'), 'applicationTypeName': self._serialize.url("application_type_name", application_type_name, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.get(url, query_parameters, header_parameters) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorModel, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('ApplicationTypeResource', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceFabric/managedclusters/{clusterName}/applicationTypes/{applicationTypeName}'} # type: ignore async def create_or_update( self, resource_group_name: str, cluster_name: str, application_type_name: str, parameters: "_models.ApplicationTypeResource", **kwargs ) -> "_models.ApplicationTypeResource": """Creates or updates a Service Fabric managed application type name resource. Create or update a Service Fabric managed application type name resource with the specified name. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param cluster_name: The name of the cluster resource. :type cluster_name: str :param application_type_name: The name of the application type name resource. :type application_type_name: str :param parameters: The application type name resource. :type parameters: ~service_fabric_managed_clusters_management_client.models.ApplicationTypeResource :keyword callable cls: A custom type or function that will be passed the direct response :return: ApplicationTypeResource, or the result of cls(response) :rtype: ~service_fabric_managed_clusters_management_client.models.ApplicationTypeResource :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationTypeResource"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2021-05-01" content_type = kwargs.pop("content_type", "application/json") accept = "application/json" # Construct URL url = self.create_or_update.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'), 'applicationTypeName': self._serialize.url("application_type_name", application_type_name, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') body_content_kwargs = {} # type: Dict[str, Any] body_content = self._serialize.body(parameters, 'ApplicationTypeResource') body_content_kwargs['content'] = body_content request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorModel, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('ApplicationTypeResource', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceFabric/managedclusters/{clusterName}/applicationTypes/{applicationTypeName}'} # type: ignore async def update( self, resource_group_name: str, cluster_name: str, application_type_name: str, parameters: "_models.ApplicationTypeUpdateParameters", **kwargs ) -> "_models.ApplicationTypeResource": """Updates the tags of an application type resource of a given managed cluster. Updates the tags of an application type resource of a given managed cluster. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param cluster_name: The name of the cluster resource. :type cluster_name: str :param application_type_name: The name of the application type name resource. :type application_type_name: str :param parameters: The application type resource updated tags. :type parameters: ~service_fabric_managed_clusters_management_client.models.ApplicationTypeUpdateParameters :keyword callable cls: A custom type or function that will be passed the direct response :return: ApplicationTypeResource, or the result of cls(response) :rtype: ~service_fabric_managed_clusters_management_client.models.ApplicationTypeResource :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationTypeResource"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2021-05-01" content_type = kwargs.pop("content_type", "application/json") accept = "application/json" # Construct URL url = self.update.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'), 'applicationTypeName': self._serialize.url("application_type_name", application_type_name, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') body_content_kwargs = {} # type: Dict[str, Any] body_content = self._serialize.body(parameters, 'ApplicationTypeUpdateParameters') body_content_kwargs['content'] = body_content request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorModel, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('ApplicationTypeResource', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceFabric/managedclusters/{clusterName}/applicationTypes/{applicationTypeName}'} # type: ignore async def _delete_initial( self, resource_group_name: str, cluster_name: str, application_type_name: str, **kwargs ) -> None: cls = kwargs.pop('cls', None) # type: ClsType[None] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2021-05-01" accept = "application/json" # Construct URL url = self._delete_initial.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'), 'applicationTypeName': self._serialize.url("application_type_name", application_type_name, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.delete(url, query_parameters, header_parameters) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 202, 204]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorModel, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) _delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceFabric/managedclusters/{clusterName}/applicationTypes/{applicationTypeName}'} # type: ignore async def begin_delete( self, resource_group_name: str, cluster_name: str, application_type_name: str, **kwargs ) -> AsyncLROPoller[None]: """Deletes a Service Fabric managed application type name resource. Delete a Service Fabric managed application type name resource with the specified name. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param cluster_name: The name of the cluster resource. :type cluster_name: str :param application_type_name: The name of the application type name resource. :type application_type_name: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: Pass in True if you'd like the AsyncARMPolling polling method, False for no polling, or your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[None] :raises ~azure.core.exceptions.HttpResponseError: """ polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod] cls = kwargs.pop('cls', None) # type: ClsType[None] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = await self._delete_initial( resource_group_name=resource_group_name, cluster_name=cluster_name, application_type_name=application_type_name, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): if cls: return cls(pipeline_response, None, {}) path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'), 'applicationTypeName': self._serialize.url("application_type_name", application_type_name, 'str'), } if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceFabric/managedclusters/{clusterName}/applicationTypes/{applicationTypeName}'} # type: ignore def list( self, resource_group_name: str, cluster_name: str, **kwargs ) -> AsyncIterable["_models.ApplicationTypeResourceList"]: """Gets the list of application type name resources created in the specified Service Fabric managed cluster resource. Gets all application type name resources created or in the process of being created in the Service Fabric managed cluster resource. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param cluster_name: The name of the cluster resource. :type cluster_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either ApplicationTypeResourceList or the result of cls(response) :rtype: ~azure.core.async_paging.AsyncItemPaged[~service_fabric_managed_clusters_management_client.models.ApplicationTypeResourceList] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationTypeResourceList"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2021-05-01" accept = "application/json" def prepare_request(next_link=None): # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') if not next_link: # Construct URL url = self.list.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} # type: Dict[str, Any] request = self._client.get(url, query_parameters, header_parameters) return request async def extract_data(pipeline_response): deserialized = self._deserialize('ApplicationTypeResourceList', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, AsyncList(list_of_elem) async def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: error = self._deserialize.failsafe_deserialize(_models.ErrorModel, response) map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) return pipeline_response return AsyncItemPaged( get_next, extract_data ) list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceFabric/managedclusters/{clusterName}/applicationTypes'} # type: ignore
import chainer import chainerx from chainerx_tests import array_utils from chainerx_tests import dtype_utils from chainerx_tests import op_utils n_step_lstm_dtypes_valid = dtype_utils._permutate_dtype_mapping([ # Floats. (('float16', ), ()), (('float32', ), ()), (('float64', ), ()), ]) @op_utils.op_test(['native:0', 'cuda:0']) @chainer.testing.parameterize(*( chainer.testing.product([ chainer.testing.from_pytest_parameterize( 'n_layers,hidden_size,input_size,batches', [ (2, 2, 1, (1, 1, 1)), (2, 2, 3, (3, 2, 1)), (3, 8, 4, (4, 2, 1)), (4, 12, 4, (4, 3, 2)), ]), chainer.testing.from_pytest_parameterize( 'in_dtypes, out_dtype', n_step_lstm_dtypes_valid) ]) )) class TestNStepLstm(op_utils.ChainerOpTest): def setup(self): self.check_forward_options.update({ 'rtol': 1e-2, 'atol': 1e-2}) self.check_backward_options.update({ 'rtol': 1e-2, 'atol': 1e-2}) self.check_double_backward_options.update({ 'rtol': 5e-3, 'atol': 5e-2}) if self.in_dtypes[0] == 'float16': self.check_forward_options.update({ 'rtol': 1e-1, 'atol': 1e-1}) self.check_backward_options.update({ 'rtol': 1e-1, 'atol': 1e-1}) self.check_double_backward_options.update({ 'rtol': 1e-1, 'atol': 1e-1}) device = chainerx.get_default_device() if device.backend.name == 'cuda': if self.in_dtypes[0] != 'float32': self.skip_backward_test = True self.skip_double_backward_test = True def generate_inputs(self): h_shape = (self.n_layers, self.batches[0], self.hidden_size) dtype = self.in_dtypes[0] h = array_utils.uniform(h_shape, dtype) c = array_utils.uniform(h_shape, dtype) in_size = self.input_size out_size = self.hidden_size xs = [array_utils.uniform((self.batches[b], in_size), dtype) for b in range(len(self.batches))] def w_in(i, j): return in_size if i == 0 and j < 4 else out_size inputs = [] inputs.append(h) inputs.append(c) for i in range(len(self.batches)): inputs.append(xs[i]) for n in range(self.n_layers): for i in range(8): inputs.append(array_utils.uniform( (out_size, w_in(n, i)), dtype)) for i in range(8): inputs.append(array_utils.uniform((out_size,), dtype)) return tuple(inputs) def process_input(self, inputs): h = inputs[0] c = inputs[1] xs = inputs[2:2 + len(self.batches)] ws = [] bs = [] index = 2 + len(self.batches) for n in range(self.n_layers): ws.append(inputs[index: index + 8]) bs.append(inputs[index + 8: index + 16]) index += 16 return h, c, ws, bs, xs def forward_chainerx(self, inputs): h, c, ws, bs, xs = self.process_input(inputs) out = chainerx.n_step_lstm(self.n_layers, h, c, ws, bs, xs) rets = [] rets.append(out[0]) rets.append(out[1]) for i in range(len(out[2])): rets.append(out[2][i]) return tuple(rets) def forward_chainer(self, inputs): h, c, ws, bs, xs = self.process_input(inputs) out = chainer.functions.n_step_lstm( self.n_layers, 0.0, h, c, ws, bs, xs) rets = [] rets.append(out[0]) rets.append(out[1]) for i in range(len(out[2])): rets.append(out[2][i]) return tuple(rets) @op_utils.op_test(['native:0', 'cuda:0']) @chainer.testing.parameterize(*( chainer.testing.product([ chainer.testing.from_pytest_parameterize( 'n_layers,hidden_size,input_size,batches', [ (1, 2, 1, (1, 1, 1)), (2, 6, 8, (4, 2, 2)), (3, 8, 4, (4, 2, 1)), (4, 12, 4, (4, 3, 2)), ]), chainer.testing.from_pytest_parameterize( 'in_dtypes,out_dtype', n_step_lstm_dtypes_valid) ]) )) class TestNStepBiLstm(op_utils.ChainerOpTest): def setup(self): self.check_forward_options.update({ 'rtol': 1e-2, 'atol': 1e-2}) self.check_backward_options.update({ 'rtol': 1e-2, 'atol': 1e-2}) self.check_double_backward_options.update({ 'rtol': 5e-3, 'atol': 5e-2}) if self.in_dtypes[0] == 'float16': self.check_forward_options.update({ 'rtol': 1e-1, 'atol': 1e-1}) self.check_backward_options.update({ 'rtol': 1e-1, 'atol': 1e-1}) self.check_double_backward_options.update({ 'rtol': 1e-1, 'atol': 1e-1}) device = chainerx.get_default_device() if device.backend.name == 'cuda': if self.in_dtypes[0] != 'float32': self.skip_backward_test = True self.skip_double_backward_test = True def generate_inputs(self): h_shape = (self.n_layers * 2, self.batches[0], self.hidden_size) dtype = self.in_dtypes[0] h = array_utils.uniform(h_shape, dtype) c = array_utils.uniform(h_shape, dtype) in_size = self.input_size out_size = self.hidden_size xs = [array_utils.uniform((self.batches[b], in_size), dtype) for b in range(len(self.batches))] def w_in(i, j): if i == 0 and j < 4: return in_size elif i > 0 and j < 4: return out_size * 2 else: return out_size inputs = [] inputs.append(h) inputs.append(c) for i in range(len(self.batches)): inputs.append(xs[i]) for n in range(self.n_layers): for direction in (0, 1): for i in range(8): inputs.append(array_utils.uniform( (out_size, w_in(n, i)), dtype)) for i in range(8): inputs.append(array_utils.uniform((out_size,), dtype)) return tuple(inputs) def process_input(self, inputs): h = inputs[0] c = inputs[1] xs = inputs[2:2 + len(self.batches)] ws = [] bs = [] index = 2 + len(self.batches) for n in range(self.n_layers): ws.append(inputs[index: index + 8]) bs.append(inputs[index + 8: index + 16]) ws.append(inputs[index + 16: index + 24]) bs.append(inputs[index + 24: index + 32]) index += 32 return h, c, ws, bs, xs def forward_chainerx(self, inputs): h, c, ws, bs, xs = self.process_input(inputs) out = chainerx.n_step_bilstm(self.n_layers, h, c, ws, bs, xs) rets = [] rets.append(out[0]) rets.append(out[1]) for i in range(len(out[2])): rets.append(out[2][i]) return tuple(rets) def forward_chainer(self, inputs): h, c, ws, bs, xs = self.process_input(inputs) out = chainer.functions.n_step_bilstm( self.n_layers, 0.0, h, c, ws, bs, xs) rets = [] rets.append(out[0]) rets.append(out[1]) for i in range(len(out[2])): rets.append(out[2][i]) return tuple(rets) @op_utils.op_test(['native:0', 'cuda:0']) @chainer.testing.parameterize(*( chainer.testing.product([ chainer.testing.from_pytest_parameterize( 'n_layers,hidden_size,input_size,batches', [ (2, 2, 1, (1, 1, 1)), (2, 2, 3, (3, 2, 1)), (3, 8, 4, (4, 2, 1)), (4, 6, 4, (4, 3, 2)), ]), chainer.testing.from_pytest_parameterize( 'in_dtypes, out_dtype', n_step_lstm_dtypes_valid) ]) )) class TestNStepGru(op_utils.ChainerOpTest): def setup(self): self.check_forward_options.update({ 'rtol': 1e-2, 'atol': 1e-2}) self.check_backward_options.update({ 'rtol': 1e-2, 'atol': 1e-2}) self.check_double_backward_options.update({ 'rtol': 5e-3, 'atol': 5e-2}) if self.in_dtypes[0] == 'float16': self.check_forward_options.update({ 'rtol': 1e-1, 'atol': 1e-1}) self.check_backward_options.update({ 'rtol': 1e-1, 'atol': 1e-1}) self.check_double_backward_options.update({ 'rtol': 1e-1, 'atol': 1e-1}) device = chainerx.get_default_device() if device.backend.name == 'cuda': if self.in_dtypes[0] != 'float32': self.skip_backward_test = True self.skip_double_backward_test = True def generate_inputs(self): h_shape = (self.n_layers, self.batches[0], self.hidden_size) dtype = self.in_dtypes[0] h = array_utils.uniform(h_shape, dtype) in_size = self.input_size out_size = self.hidden_size xs = [array_utils.uniform((self.batches[b], in_size), dtype) for b in range(len(self.batches))] def w_in(i, j): return in_size if i == 0 and j < 3 else out_size inputs = [] inputs.append(h) for i in range(len(self.batches)): inputs.append(xs[i]) for n in range(self.n_layers): for i in range(6): inputs.append(array_utils.uniform( (out_size, w_in(n, i)), dtype)) for i in range(6): inputs.append(array_utils.uniform((out_size,), dtype)) return tuple(inputs) def process_input(self, inputs): h = inputs[0] xs = inputs[1:1 + len(self.batches)] ws = [] bs = [] index = 1 + len(self.batches) for n in range(self.n_layers): ws.append(inputs[index: index + 6]) bs.append(inputs[index + 6: index + 12]) index += 12 return h, ws, bs, xs def forward_chainerx(self, inputs): h, ws, bs, xs = self.process_input(inputs) out = chainerx.n_step_gru(self.n_layers, h, ws, bs, xs) rets = [] rets.append(out[0]) for i in range(len(out[1])): rets.append(out[1][i]) return tuple(rets) def forward_chainer(self, inputs): h, ws, bs, xs = self.process_input(inputs) out = chainer.functions.n_step_gru( self.n_layers, 0.0, h, ws, bs, xs) rets = [] rets.append(out[0]) for i in range(len(out[1])): rets.append(out[1][i]) return tuple(rets) @op_utils.op_test(['native:0', 'cuda:0']) @chainer.testing.parameterize(*( chainer.testing.product([ chainer.testing.from_pytest_parameterize( 'n_layers,hidden_size,input_size,batches', [ (2, 2, 1, (1, 1, 1)), (2, 2, 3, (3, 2, 1)), (3, 4, 4, (4, 2, 1)), (4, 5, 4, (4, 3, 2)), ]), chainer.testing.from_pytest_parameterize( 'in_dtypes,out_dtype', n_step_lstm_dtypes_valid) ]) )) class TestNStepBiGRU(op_utils.ChainerOpTest): def setup(self): self.check_forward_options.update({ 'rtol': 1e-2, 'atol': 1e-2}) self.check_backward_options.update({ 'rtol': 1e-2, 'atol': 1e-2}) self.check_double_backward_options.update({ 'rtol': 5e-2, 'atol': 5e-2}) if self.in_dtypes[0] == 'float16': self.check_forward_options.update({ 'rtol': 1e-1, 'atol': 1e-1}) self.check_backward_options.update({ 'rtol': 1e-1, 'atol': 1e-1}) self.check_double_backward_options.update({ 'rtol': 1e-1, 'atol': 1e-1}) device = chainerx.get_default_device() if device.backend.name == 'cuda': if self.in_dtypes[0] != 'float32': self.skip_backward_test = True self.skip_double_backward_test = True def generate_inputs(self): h_shape = (self.n_layers * 2, self.batches[0], self.hidden_size) dtype = self.in_dtypes[0] h = array_utils.uniform(h_shape, dtype) in_size = self.input_size out_size = self.hidden_size xs = [array_utils.uniform((self.batches[b], in_size), dtype) for b in range(len(self.batches))] def w_in(i, j): if i == 0 and j < 3: return in_size elif i > 0 and j < 3: return out_size * 2 else: return out_size inputs = [] inputs.append(h) for i in range(len(self.batches)): inputs.append(xs[i]) for n in range(self.n_layers): for direction in (0, 1): for i in range(6): inputs.append(array_utils.uniform( (out_size, w_in(n, i)), dtype)) for i in range(6): inputs.append(array_utils.uniform((out_size,), dtype)) return tuple(inputs) def process_input(self, inputs): h = inputs[0] xs = inputs[1:1 + len(self.batches)] ws = [] bs = [] index = 1 + len(self.batches) for n in range(self.n_layers): ws.append(inputs[index: index + 6]) bs.append(inputs[index + 6: index + 12]) ws.append(inputs[index + 12: index + 18]) bs.append(inputs[index + 18: index + 24]) index += 24 return h, ws, bs, xs def forward_chainerx(self, inputs): h, ws, bs, xs = self.process_input(inputs) out = chainerx.n_step_bigru(self.n_layers, h, ws, bs, xs) rets = [] rets.append(out[0]) for i in range(len(out[1])): rets.append(out[1][i]) return tuple(rets) def forward_chainer(self, inputs): h, ws, bs, xs = self.process_input(inputs) out = chainer.functions.n_step_bigru( self.n_layers, 0.0, h, ws, bs, xs) rets = [] rets.append(out[0]) for i in range(len(out[1])): rets.append(out[1][i]) return tuple(rets) @op_utils.op_test(['native:0', 'cuda:0']) # ReLU activation is unstable around 0 but can seemingly not be dodged # automatically. @op_utils.fix_random() @chainer.testing.parameterize(*( chainer.testing.product([ chainer.testing.from_pytest_parameterize( 'n_layers,hidden_size,input_size,batches,activation', [ (2, 2, 1, (1, 1, 1), "tanh"), (2, 2, 1, (1, 1, 1), "relu"), (2, 2, 3, (3, 2, 1), "tanh"), (2, 2, 3, (3, 2, 1), "relu"), (3, 4, 4, (4, 2, 1), "tanh"), (3, 4, 4, (4, 2, 1), "relu"), (4, 5, 4, (4, 3, 2), "tanh"), (4, 5, 4, (4, 3, 2), "relu"), ]), chainer.testing.from_pytest_parameterize( 'in_dtypes, out_dtype', n_step_lstm_dtypes_valid) ]) )) class TestNStepRNN(op_utils.ChainerOpTest): check_numpy_strides_compliance = False dodge_nondifferentiable = True def setup(self): self.check_forward_options.update({ 'rtol': 1e-2, 'atol': 1e-2}) self.check_backward_options.update({ 'rtol': 1e-2, 'atol': 1e-2}) self.check_double_backward_options.update({ 'rtol': 5e-2, 'atol': 5e-2}) if self.in_dtypes[0] == 'float16': self.check_forward_options.update({ 'rtol': 1e-1, 'atol': 1e-1}) self.check_backward_options.update({ 'rtol': 1e-1, 'atol': 1e-1}) self.check_double_backward_options.update({ 'rtol': 1e-1, 'atol': 1e-1}) device = chainerx.get_default_device() if device.backend.name == 'cuda': if self.in_dtypes[0] != 'float32': self.skip_forward_test = True self.skip_backward_test = True self.skip_double_backward_test = True def generate_inputs(self): h_shape = (self.n_layers, self.batches[0], self.hidden_size) dtype = self.in_dtypes[0] h = array_utils.uniform(h_shape, dtype) in_size = self.input_size out_size = self.hidden_size xs = [array_utils.uniform((self.batches[b], in_size), dtype) for b in range(len(self.batches))] def w_in(i, j): return in_size if i == 0 and j < 1 else out_size inputs = [] inputs.append(h) for i in range(len(self.batches)): inputs.append(xs[i]) for n in range(self.n_layers): for i in range(2): inputs.append(array_utils.uniform( (out_size, w_in(n, i)), dtype)) for i in range(2): inputs.append(array_utils.uniform((out_size,), dtype)) return tuple(inputs) def process_input(self, inputs): h = inputs[0] xs = inputs[1:1 + len(self.batches)] ws = [] bs = [] index = 1 + len(self.batches) for n in range(self.n_layers): ws.append(inputs[index: index + 2]) bs.append(inputs[index + 2: index + 4]) index += 4 return h, ws, bs, xs def forward_chainerx(self, inputs): h, ws, bs, xs = self.process_input(inputs) out = chainerx.n_step_rnn( self.n_layers, h, ws, bs, xs, self.activation) rets = [] rets.append(out[0]) for i in range(len(out[1])): rets.append(out[1][i]) return tuple(rets) def forward_chainer(self, inputs): h, ws, bs, xs = self.process_input(inputs) out = chainer.functions.n_step_rnn( self.n_layers, 0.0, h, ws, bs, xs, self.activation) rets = [] rets.append(out[0]) for i in range(len(out[1])): rets.append(out[1][i]) return tuple(rets) @op_utils.op_test(['native:0', 'cuda:0']) # ReLU activation is unstable around 0 but can seemingly not be dodged # automatically. @op_utils.fix_random() @chainer.testing.parameterize(*( chainer.testing.product([ chainer.testing.from_pytest_parameterize( 'n_layers,hidden_size,input_size,batches,activation', [ (2, 2, 1, (1, 1, 1), "tanh"), (2, 2, 1, (1, 1, 1), "relu"), (2, 2, 3, (3, 2, 1), "tanh"), (2, 2, 3, (3, 2, 1), "relu"), (3, 4, 4, (4, 2, 1), "tanh"), (3, 4, 4, (4, 2, 1), "relu"), ]), chainer.testing.from_pytest_parameterize( 'in_dtypes,out_dtype', n_step_lstm_dtypes_valid) ]) )) class TestNStepBiRNN(op_utils.ChainerOpTest): check_numpy_strides_compliance = False dodge_nondifferentiable = True def setup(self): self.check_forward_options.update({ 'rtol': 1e-2, 'atol': 1e-2}) self.check_backward_options.update({ 'rtol': 1e-2, 'atol': 1e-2}) self.check_double_backward_options.update({ 'rtol': 5e-2, 'atol': 5e-2}) if self.in_dtypes[0] == 'float16': self.check_forward_options.update({ 'rtol': 1e-1, 'atol': 1e-1}) self.check_backward_options.update({ 'rtol': 1e-1, 'atol': 1e-1}) self.check_double_backward_options.update({ 'rtol': 1e-1, 'atol': 1e-1}) device = chainerx.get_default_device() if device.backend.name == 'cuda': if self.in_dtypes[0] != 'float32': self.skip_forward_test = True self.skip_backward_test = True self.skip_double_backward_test = True def generate_inputs(self): h_shape = (self.n_layers * 2, self.batches[0], self.hidden_size) dtype = self.in_dtypes[0] low = -1.0 high = 1.0 if dtype == 'float16': low = -0.5 high = 0.5 h = array_utils.uniform(h_shape, dtype) in_size = self.input_size out_size = self.hidden_size xs = [array_utils.uniform((self.batches[b], in_size), dtype, low=low, high=high) for b in range(len(self.batches))] def w_in(i, j): if i == 0 and j < 1: return in_size elif i > 0 and j < 1: return out_size * 2 else: return out_size inputs = [] inputs.append(h) for i in range(len(self.batches)): inputs.append(xs[i]) for n in range(self.n_layers): for direction in (0, 1): for i in range(2): inputs.append(array_utils.uniform( (out_size, w_in(n, i)), dtype, low=low, high=high)) for i in range(2): inputs.append(array_utils.uniform( (out_size,), dtype, low=low, high=high)) return tuple(inputs) def process_input(self, inputs): h = inputs[0] xs = inputs[1:1 + len(self.batches)] ws = [] bs = [] index = 1 + len(self.batches) for n in range(self.n_layers): ws.append(inputs[index: index + 2]) bs.append(inputs[index + 2: index + 4]) ws.append(inputs[index + 4: index + 6]) bs.append(inputs[index + 6: index + 8]) index += 8 return h, ws, bs, xs def forward_chainerx(self, inputs): h, ws, bs, xs = self.process_input(inputs) out = chainerx.n_step_birnn( self.n_layers, h, ws, bs, xs, self.activation) rets = [] rets.append(out[0]) for i in range(len(out[1])): rets.append(out[1][i]) return tuple(rets) def forward_chainer(self, inputs): h, ws, bs, xs = self.process_input(inputs) out = chainer.functions.n_step_birnn( self.n_layers, 0.0, h, ws, bs, xs, self.activation) rets = [] rets.append(out[0]) for i in range(len(out[1])): rets.append(out[1][i]) return tuple(rets)
from __future__ import unicode_literals, division, absolute_import from builtins import * # pylint: disable=unused-import, redefined-builtin import json import logging import os import pkgutil from collections import deque from functools import wraps from flask import Flask, request from flask.ext.cors import CORS from flask_compress import Compress from flask_restplus import Api as RestPlusAPI from flask_restplus.model import Model from flask_restplus.resource import Resource from jsonschema.exceptions import RefResolutionError from flexget import manager from flexget.config_schema import process_config, register_config_key from flexget.event import event from flexget.utils.database import with_session from flexget.webserver import User from flexget.webserver import register_app, get_secret __version__ = '0.5.3-beta' log = logging.getLogger('api') api_config = {} api_config_schema = { 'type': 'boolean', 'additionalProperties': False } @with_session def api_key(session=None): log.debug('fetching token for internal lookup') return session.query(User).first().token @event('config.register') def register_config(): register_config_key('api', api_config_schema) class ApiSchemaModel(Model): """A flask restplus :class:`flask_restplus.models.ApiModel` which can take a json schema directly.""" def __init__(self, name, schema, *args, **kwargs): super(ApiSchemaModel, self).__init__(name, *args, **kwargs) self._schema = schema @property def __schema__(self): if self.__parent__: return { 'allOf': [ {'$ref': '#/definitions/{0}'.format(self.__parent__.name)}, self._schema ] } else: return self._schema def __nonzero__(self): return bool(self._schema) def __repr__(self): return '<ApiSchemaModel(%r)>' % self._schema class Api(RestPlusAPI): """ Extends a flask restplus :class:`flask_restplus.Api` with: - methods to make using json schemas easier - methods to auto document and handle :class:`ApiError` responses """ def _rewrite_refs(self, schema): if isinstance(schema, list): for value in schema: self._rewrite_refs(value) if isinstance(schema, dict): for key, value in schema.items(): if isinstance(value, (list, dict)): self._rewrite_refs(value) if key == '$ref' and value.startswith('/'): schema[key] = '#definitions%s' % value def schema(self, name, schema, **kwargs): """ Register a json schema. Usable like :meth:`flask_restplus.Api.model`, except takes a json schema as its argument. :returns: An :class:`ApiSchemaModel` instance registered to this api. """ model = ApiSchemaModel(name, schema, **kwargs) model.__apidoc__.update(kwargs) self.models[name] = model return model def inherit(self, name, parent, fields): """ Extends :meth:`flask_restplus.Api.inherit` to allow `fields` to be a json schema, if `parent` is a :class:`ApiSchemaModel`. """ if isinstance(parent, ApiSchemaModel): model = ApiSchemaModel(name, fields) model.__apidoc__['name'] = name model.__parent__ = parent self.models[name] = model return model return super(Api, self).inherit(name, parent, fields) def validate(self, model, schema_override=None, description=None): """ When a method is decorated with this, json data submitted to the endpoint will be validated with the given `model`. This also auto-documents the expected model, as well as the possible :class:`ValidationError` response. """ def decorator(func): @api.expect((model, description)) @api.response(ValidationError) @wraps(func) def wrapper(*args, **kwargs): payload = request.json try: schema = schema_override if schema_override else model.__schema__ errors = process_config(config=payload, schema=schema, set_defaults=False) if errors: raise ValidationError(errors) except RefResolutionError as e: raise ApiError(str(e)) return func(*args, **kwargs) return wrapper return decorator def response(self, code_or_apierror, description=None, model=None, **kwargs): """ Extends :meth:`flask_restplus.Api.response` to allow passing an :class:`ApiError` class instead of response code. If an `ApiError` is used, the response code, and expected response model, is automatically documented. """ try: if issubclass(code_or_apierror, ApiError): description = description or code_or_apierror.description return self.doc(responses={code_or_apierror.code: (description, code_or_apierror.response_model)}) except TypeError: # If first argument isn't a class this happens pass return super(Api, self).response(code_or_apierror, description, model=model) class APIResource(Resource): """All api resources should subclass this class.""" method_decorators = [with_session] def __init__(self, api, *args, **kwargs): self.manager = manager.manager super(APIResource, self).__init__(api, *args, **kwargs) app = Flask(__name__, template_folder=os.path.join(os.path.dirname(__path__[0]), 'templates')) app.config['REMEMBER_COOKIE_NAME'] = 'flexgetToken' app.config['DEBUG'] = True CORS(app) Compress(app) api = Api( app, catch_all_404s=True, title='API', version=__version__, description='<font color="red"><b>Warning: under development, subject to change without notice.<b/></font>' ) class ApiError(Exception): code = 500 description = 'server error' response_model = api.schema('error', { 'type': 'object', 'properties': { 'code': {'type': 'integer'}, 'message': {'type': 'string'} }, 'required': ['code', 'error'] }) def __init__(self, message, payload=None): self.message = message self.payload = payload def to_dict(self): rv = self.payload or {} rv.update(code=self.code, message=self.message) return rv @classmethod def schema(cls): return cls.response_model.__schema__ class NotFoundError(ApiError): code = 404 description = 'not found' class ValidationError(ApiError): code = 400 description = 'validation error' response_model = api.inherit('validation_error', ApiError.response_model, { 'type': 'object', 'properties': { 'validation_errors': { 'type': 'array', 'items': { 'type': 'object', 'properties': { 'message': {'type': 'string', 'description': 'A human readable message explaining the error.'}, 'validator': {'type': 'string', 'description': 'The name of the failed validator.'}, 'validator_value': { 'type': 'string', 'description': 'The value for the failed validator in the schema.' }, 'path': {'type': 'string'}, 'schema_path': {'type': 'string'}, } } } }, 'required': ['validation_errors'] }) verror_attrs = ( 'message', 'cause', 'validator', 'validator_value', 'path', 'schema_path', 'parent' ) def __init__(self, validation_errors, message='validation error'): payload = {'validation_errors': [self._verror_to_dict(error) for error in validation_errors]} super(ValidationError, self).__init__(message, payload=payload) def _verror_to_dict(self, error): error_dict = {} for attr in self.verror_attrs: if isinstance(getattr(error, attr), deque): error_dict[attr] = list(getattr(error, attr)) else: error_dict[attr] = getattr(error, attr) return error_dict @api.errorhandler(ApiError) @api.errorhandler(NotFoundError) @api.errorhandler(ValidationError) def api_errors(error): return error.to_dict(), error.code @event('manager.daemon.started') def register_api(mgr): global api_config api_config = mgr.config.get('api') app.secret_key = get_secret() if api_config: register_app('/api', app) class ApiClient(object): """ This is an client which can be used as a more pythonic interface to the rest api. It skips http, and is only usable from within the running flexget process. """ def __init__(self): self.app = app.test_client() def __getattr__(self, item): return ApiEndopint('/api/' + item, self.get_endpoint) def get_endpoint(self, url, data=None, method=None): if method is None: method = 'POST' if data is not None else 'GET' auth_header = dict(Authorization='Token %s' % api_key()) response = self.app.open(url, data=data, follow_redirects=True, method=method, headers=auth_header) result = json.loads(response.get_data(as_text=True)) # TODO: Proper exceptions if 200 > response.status_code >= 300: raise Exception(result['error']) return result class ApiEndopint(object): def __init__(self, endpoint, caller): self.endpoint = endpoint self.caller = caller def __getattr__(self, item): return self.__class__(self.endpoint + '/' + item, self.caller) __getitem__ = __getattr__ def __call__(self, data=None, method=None): return self.caller(self.endpoint, data=data, method=method) # Import API Sub Modules for loader, module_name, is_pkg in pkgutil.walk_packages(__path__): loader.find_module(module_name).load_module(module_name)
#!/usr/bin/env python # # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Utility for creating well-formed pull request merges and pushing them to Apache. This script is a modified version # of the one created by the Spark project (https://github.com/apache/spark/blob/master/dev/merge_spark_pr.py). # # Usage: ./kafka-merge-pr.py (see config env vars below) # # This utility assumes you already have local a kafka git folder and that you # have added remotes corresponding to both: # (i) the github apache kafka mirror and # (ii) the apache kafka git repo. import json import os import re import subprocess import sys import urllib2 try: import jira.client JIRA_IMPORTED = True except ImportError: JIRA_IMPORTED = False PROJECT_NAME = "kafka" CAPITALIZED_PROJECT_NAME = "kafka".upper() # Location of the local git repository REPO_HOME = os.environ.get("%s_HOME" % CAPITALIZED_PROJECT_NAME, os.getcwd()) # Remote name which points to the GitHub site PR_REMOTE_NAME = os.environ.get("PR_REMOTE_NAME", "apache-github") # Remote name where we want to push the changes to (GitHub by default, but Apache Git would work if GitHub is down) PUSH_REMOTE_NAME = os.environ.get("PUSH_REMOTE_NAME", "apache-github") # ASF JIRA username JIRA_USERNAME = os.environ.get("JIRA_USERNAME", "") # ASF JIRA password JIRA_PASSWORD = os.environ.get("JIRA_PASSWORD", "") # OAuth key used for issuing requests against the GitHub API. If this is not defined, then requests # will be unauthenticated. You should only need to configure this if you find yourself regularly # exceeding your IP's unauthenticated request rate limit. You can create an OAuth key at # https://github.com/settings/tokens. This script only requires the "public_repo" scope. GITHUB_OAUTH_KEY = os.environ.get("GITHUB_OAUTH_KEY") GITHUB_USER = os.environ.get("GITHUB_USER", "apache") GITHUB_BASE = "https://github.com/%s/%s/pull" % (GITHUB_USER, PROJECT_NAME) GITHUB_API_BASE = "https://api.github.com/repos/%s/%s" % (GITHUB_USER, PROJECT_NAME) JIRA_BASE = "https://issues.apache.org/jira/browse" JIRA_API_BASE = "https://issues.apache.org/jira" # Prefix added to temporary branches TEMP_BRANCH_PREFIX = "PR_TOOL" DEV_BRANCH_NAME = "trunk" DEFAULT_FIX_VERSION = os.environ.get("DEFAULT_FIX_VERSION", "2.0.0") def get_json(url): try: request = urllib2.Request(url) if GITHUB_OAUTH_KEY: request.add_header('Authorization', 'token %s' % GITHUB_OAUTH_KEY) return json.load(urllib2.urlopen(request)) except urllib2.HTTPError as e: if "X-RateLimit-Remaining" in e.headers and e.headers["X-RateLimit-Remaining"] == '0': print "Exceeded the GitHub API rate limit; see the instructions in " + \ "kafka-merge-pr.py to configure an OAuth token for making authenticated " + \ "GitHub requests." else: print "Unable to fetch URL, exiting: %s" % url sys.exit(-1) def fail(msg): print msg clean_up() sys.exit(-1) def run_cmd(cmd): print cmd if isinstance(cmd, list): return subprocess.check_output(cmd) else: return subprocess.check_output(cmd.split(" ")) def continue_maybe(prompt): result = raw_input("\n%s (y/n): " % prompt) if result.lower() != "y": fail("Okay, exiting") def clean_up(): if original_head != get_current_branch(): print "Restoring head pointer to %s" % original_head run_cmd("git checkout %s" % original_head) branches = run_cmd("git branch").replace(" ", "").split("\n") for branch in filter(lambda x: x.startswith(TEMP_BRANCH_PREFIX), branches): print "Deleting local branch %s" % branch run_cmd("git branch -D %s" % branch) def get_current_branch(): return run_cmd("git rev-parse --abbrev-ref HEAD").replace("\n", "") # merge the requested PR and return the merge hash def merge_pr(pr_num, target_ref, title, body, pr_repo_desc): pr_branch_name = "%s_MERGE_PR_%s" % (TEMP_BRANCH_PREFIX, pr_num) target_branch_name = "%s_MERGE_PR_%s_%s" % (TEMP_BRANCH_PREFIX, pr_num, target_ref.upper()) run_cmd("git fetch %s pull/%s/head:%s" % (PR_REMOTE_NAME, pr_num, pr_branch_name)) run_cmd("git fetch %s %s:%s" % (PUSH_REMOTE_NAME, target_ref, target_branch_name)) run_cmd("git checkout %s" % target_branch_name) had_conflicts = False try: run_cmd(['git', 'merge', pr_branch_name, '--squash']) except Exception as e: msg = "Error merging: %s\nWould you like to manually fix-up this merge?" % e continue_maybe(msg) msg = "Okay, please fix any conflicts and 'git add' conflicting files... Finished?" continue_maybe(msg) had_conflicts = True commit_authors = run_cmd(['git', 'log', 'HEAD..%s' % pr_branch_name, '--pretty=format:%an <%ae>']).split("\n") distinct_authors = sorted(set(commit_authors), key=lambda x: commit_authors.count(x), reverse=True) primary_author = raw_input( "Enter primary author in the format of \"name <email>\" [%s]: " % distinct_authors[0]) if primary_author == "": primary_author = distinct_authors[0] reviewers = raw_input( "Enter reviewers in the format of \"name1 <email1>, name2 <email2>\": ").strip() run_cmd(['git', 'log', 'HEAD..%s' % pr_branch_name, '--pretty=format:%h [%an] %s']).split("\n") merge_message_flags = [] merge_message_flags += ["-m", title] if body is not None: # Remove "Committer Checklist" section checklist_index = body.find("### Committer Checklist") if checklist_index != -1: body = body[:checklist_index].rstrip() # Remove @ symbols from the body to avoid triggering e-mails to people every time someone creates a # public fork of the project. body = body.replace("@", "") merge_message_flags += ["-m", body] authors = "\n".join(["Author: %s" % a for a in distinct_authors]) merge_message_flags += ["-m", authors] if reviewers != "": merge_message_flags += ["-m", "Reviewers: %s" % reviewers] if had_conflicts: committer_name = run_cmd("git config --get user.name").strip() committer_email = run_cmd("git config --get user.email").strip() message = "This patch had conflicts when merged, resolved by\nCommitter: %s <%s>" % ( committer_name, committer_email) merge_message_flags += ["-m", message] # The string "Closes #%s" string is required for GitHub to correctly close the PR close_line = "Closes #%s from %s" % (pr_num, pr_repo_desc) merge_message_flags += ["-m", close_line] run_cmd(['git', 'commit', '--author="%s"' % primary_author] + merge_message_flags) continue_maybe("Merge complete (local ref %s). Push to %s?" % ( target_branch_name, PUSH_REMOTE_NAME)) try: run_cmd('git push %s %s:%s' % (PUSH_REMOTE_NAME, target_branch_name, target_ref)) except Exception as e: clean_up() fail("Exception while pushing: %s" % e) merge_hash = run_cmd("git rev-parse %s" % target_branch_name)[:8] clean_up() print("Pull request #%s merged!" % pr_num) print("Merge hash: %s" % merge_hash) return merge_hash def cherry_pick(pr_num, merge_hash, default_branch): pick_ref = raw_input("Enter a branch name [%s]: " % default_branch) if pick_ref == "": pick_ref = default_branch pick_branch_name = "%s_PICK_PR_%s_%s" % (TEMP_BRANCH_PREFIX, pr_num, pick_ref.upper()) run_cmd("git fetch %s %s:%s" % (PUSH_REMOTE_NAME, pick_ref, pick_branch_name)) run_cmd("git checkout %s" % pick_branch_name) try: run_cmd("git cherry-pick -sx %s" % merge_hash) except Exception as e: msg = "Error cherry-picking: %s\nWould you like to manually fix-up this merge?" % e continue_maybe(msg) msg = "Okay, please fix any conflicts and finish the cherry-pick. Finished?" continue_maybe(msg) continue_maybe("Pick complete (local ref %s). Push to %s?" % ( pick_branch_name, PUSH_REMOTE_NAME)) try: run_cmd('git push %s %s:%s' % (PUSH_REMOTE_NAME, pick_branch_name, pick_ref)) except Exception as e: clean_up() fail("Exception while pushing: %s" % e) pick_hash = run_cmd("git rev-parse %s" % pick_branch_name)[:8] clean_up() print("Pull request #%s picked into %s!" % (pr_num, pick_ref)) print("Pick hash: %s" % pick_hash) return pick_ref def fix_version_from_branch(branch, versions): # Note: Assumes this is a sorted (newest->oldest) list of un-released versions if branch == DEV_BRANCH_NAME: versions = filter(lambda x: x == DEFAULT_FIX_VERSION, versions) if len(versions) > 0: return versions[0] else: return None else: versions = filter(lambda x: x.startswith(branch), versions) if len(versions) > 0: return versions[-1] else: return None def resolve_jira_issue(merge_branches, comment, default_jira_id=""): asf_jira = jira.client.JIRA({'server': JIRA_API_BASE}, basic_auth=(JIRA_USERNAME, JIRA_PASSWORD)) jira_id = raw_input("Enter a JIRA id [%s]: " % default_jira_id) if jira_id == "": jira_id = default_jira_id try: issue = asf_jira.issue(jira_id) except Exception as e: fail("ASF JIRA could not find %s\n%s" % (jira_id, e)) cur_status = issue.fields.status.name cur_summary = issue.fields.summary cur_assignee = issue.fields.assignee if cur_assignee is None: cur_assignee = "NOT ASSIGNED!!!" else: cur_assignee = cur_assignee.displayName if cur_status == "Resolved" or cur_status == "Closed": fail("JIRA issue %s already has status '%s'" % (jira_id, cur_status)) print ("=== JIRA %s ===" % jira_id) print ("summary\t\t%s\nassignee\t%s\nstatus\t\t%s\nurl\t\t%s/%s\n" % ( cur_summary, cur_assignee, cur_status, JIRA_BASE, jira_id)) versions = asf_jira.project_versions(CAPITALIZED_PROJECT_NAME) versions = sorted(versions, key=lambda x: x.name, reverse=True) versions = filter(lambda x: x.raw['released'] is False, versions) version_names = map(lambda x: x.name, versions) default_fix_versions = map(lambda x: fix_version_from_branch(x, version_names), merge_branches) default_fix_versions = filter(lambda x: x != None, default_fix_versions) default_fix_versions = ",".join(default_fix_versions) fix_versions = raw_input("Enter comma-separated fix version(s) [%s]: " % default_fix_versions) if fix_versions == "": fix_versions = default_fix_versions fix_versions = fix_versions.replace(" ", "").split(",") def get_version_json(version_str): return filter(lambda v: v.name == version_str, versions)[0].raw jira_fix_versions = map(lambda v: get_version_json(v), fix_versions) resolve = filter(lambda a: a['name'] == "Resolve Issue", asf_jira.transitions(jira_id))[0] resolution = filter(lambda r: r.raw['name'] == "Fixed", asf_jira.resolutions())[0] asf_jira.transition_issue( jira_id, resolve["id"], fixVersions = jira_fix_versions, comment = comment, resolution = {'id': resolution.raw['id']}) print "Successfully resolved %s with fixVersions=%s!" % (jira_id, fix_versions) def resolve_jira_issues(title, merge_branches, comment): jira_ids = re.findall("%s-[0-9]{4,5}" % CAPITALIZED_PROJECT_NAME, title) if len(jira_ids) == 0: resolve_jira_issue(merge_branches, comment) for jira_id in jira_ids: resolve_jira_issue(merge_branches, comment, jira_id) def standardize_jira_ref(text): """ Standardize the jira reference commit message prefix to "PROJECT_NAME-XXX; Issue" >>> standardize_jira_ref("%s-5954; Top by key" % CAPITALIZED_PROJECT_NAME) 'KAFKA-5954; Top by key' >>> standardize_jira_ref("%s-5821; ParquetRelation2 CTAS should check if delete is successful" % PROJECT_NAME) 'KAFKA-5821; ParquetRelation2 CTAS should check if delete is successful' >>> standardize_jira_ref("%s-4123 [WIP] Show new dependencies added in pull requests" % PROJECT_NAME) 'KAFKA-4123; [WIP] Show new dependencies added in pull requests' >>> standardize_jira_ref("%s 5954: Top by key" % PROJECT_NAME) 'KAFKA-5954; Top by key' >>> standardize_jira_ref("%s-979 a LRU scheduler for load balancing in TaskSchedulerImpl" % PROJECT_NAME) 'KAFKA-979; a LRU scheduler for load balancing in TaskSchedulerImpl' >>> standardize_jira_ref("%s-1094 Support MiMa for reporting binary compatibility across versions." % CAPITALIZED_PROJECT_NAME) 'KAFKA-1094; Support MiMa for reporting binary compatibility across versions.' >>> standardize_jira_ref("[WIP] %s-1146; Vagrant support" % CAPITALIZED_PROJECT_NAME) 'KAFKA-1146; [WIP] Vagrant support' >>> standardize_jira_ref("%s-1032. If Yarn app fails before registering, app master stays aroun..." % PROJECT_NAME) 'KAFKA-1032; If Yarn app fails before registering, app master stays aroun...' >>> standardize_jira_ref("%s-6250 %s-6146 %s-5911: Types are now reserved words in DDL parser." % (PROJECT_NAME, PROJECT_NAME, CAPITALIZED_PROJECT_NAME)) 'KAFKA-6250 KAFKA-6146 KAFKA-5911; Types are now reserved words in DDL parser.' >>> standardize_jira_ref("Additional information for users building from source code") 'Additional information for users building from source code' """ jira_refs = [] components = [] # Extract JIRA ref(s): pattern = re.compile(r'(%s[-\s]*[0-9]{3,6})+' % CAPITALIZED_PROJECT_NAME, re.IGNORECASE) for ref in pattern.findall(text): # Add brackets, replace spaces with a dash, & convert to uppercase jira_refs.append(re.sub(r'\s+', '-', ref.upper())) text = text.replace(ref, '') # Extract project name component(s): # Look for alphanumeric chars, spaces, dashes, periods, and/or commas pattern = re.compile(r'(\[[\w\s,-\.]+\])', re.IGNORECASE) for component in pattern.findall(text): components.append(component.upper()) text = text.replace(component, '') # Cleanup any remaining symbols: pattern = re.compile(r'^\W+(.*)', re.IGNORECASE) if (pattern.search(text) is not None): text = pattern.search(text).groups()[0] # Assemble full text (JIRA ref(s), module(s), remaining text) jira_prefix = ' '.join(jira_refs).strip() if jira_prefix: jira_prefix = jira_prefix + "; " clean_text = jira_prefix + ' '.join(components).strip() + " " + text.strip() # Replace multiple spaces with a single space, e.g. if no jira refs and/or components were included clean_text = re.sub(r'\s+', ' ', clean_text.strip()) return clean_text def main(): global original_head original_head = get_current_branch() branches = get_json("%s/branches" % GITHUB_API_BASE) branch_names = filter(lambda x: x[0].isdigit(), [x['name'] for x in branches]) # Assumes branch names can be sorted lexicographically latest_branch = sorted(branch_names, reverse=True)[0] pr_num = raw_input("Which pull request would you like to merge? (e.g. 34): ") pr = get_json("%s/pulls/%s" % (GITHUB_API_BASE, pr_num)) pr_events = get_json("%s/issues/%s/events" % (GITHUB_API_BASE, pr_num)) url = pr["url"] pr_title = pr["title"] commit_title = raw_input("Commit title [%s]: " % pr_title.encode("utf-8")).decode("utf-8") if commit_title == "": commit_title = pr_title # Decide whether to use the modified title or not modified_title = standardize_jira_ref(commit_title) if modified_title != commit_title: print "I've re-written the title as follows to match the standard format:" print "Original: %s" % commit_title print "Modified: %s" % modified_title result = raw_input("Would you like to use the modified title? (y/n): ") if result.lower() == "y": commit_title = modified_title print "Using modified title:" else: print "Using original title:" print commit_title body = pr["body"] target_ref = pr["base"]["ref"] user_login = pr["user"]["login"] base_ref = pr["head"]["ref"] pr_repo_desc = "%s/%s" % (user_login, base_ref) # Merged pull requests don't appear as merged in the GitHub API; # Instead, they're closed by asfgit. merge_commits = \ [e for e in pr_events if e["actor"]["login"] == "asfgit" and e["event"] == "closed"] if merge_commits: merge_hash = merge_commits[0]["commit_id"] message = get_json("%s/commits/%s" % (GITHUB_API_BASE, merge_hash))["commit"]["message"] print "Pull request %s has already been merged, assuming you want to backport" % pr_num commit_is_downloaded = run_cmd(['git', 'rev-parse', '--quiet', '--verify', "%s^{commit}" % merge_hash]).strip() != "" if not commit_is_downloaded: fail("Couldn't find any merge commit for #%s, you may need to update HEAD." % pr_num) print "Found commit %s:\n%s" % (merge_hash, message) cherry_pick(pr_num, merge_hash, latest_branch) sys.exit(0) if not bool(pr["mergeable"]): msg = "Pull request %s is not mergeable in its current form.\n" % pr_num + \ "Continue? (experts only!)" continue_maybe(msg) print ("\n=== Pull Request #%s ===" % pr_num) print ("PR title\t%s\nCommit title\t%s\nSource\t\t%s\nTarget\t\t%s\nURL\t\t%s" % ( pr_title, commit_title, pr_repo_desc, target_ref, url)) continue_maybe("Proceed with merging pull request #%s?" % pr_num) merged_refs = [target_ref] merge_hash = merge_pr(pr_num, target_ref, commit_title, body, pr_repo_desc) pick_prompt = "Would you like to pick %s into another branch?" % merge_hash while raw_input("\n%s (y/n): " % pick_prompt).lower() == "y": merged_refs = merged_refs + [cherry_pick(pr_num, merge_hash, latest_branch)] if JIRA_IMPORTED: if JIRA_USERNAME and JIRA_PASSWORD: continue_maybe("Would you like to update an associated JIRA?") jira_comment = "Issue resolved by pull request %s\n[%s/%s]" % (pr_num, GITHUB_BASE, pr_num) resolve_jira_issues(commit_title, merged_refs, jira_comment) else: print "JIRA_USERNAME and JIRA_PASSWORD not set" print "Exiting without trying to close the associated JIRA." else: print "Could not find jira-python library. Run 'sudo pip install jira' to install." print "Exiting without trying to close the associated JIRA." if __name__ == "__main__": import doctest (failure_count, test_count) = doctest.testmod() if (failure_count): exit(-1) main()
"""Model classes for 3D density distribution.""" __author__ = "Robert Nikutta, Claudia Agliozzo" __version__ = "2016-10-12" # imports from rhocube import Cube import numpy as N def spiral3D(h,Rbase,nturns,rtube,envelope='dualcone'): """Function to compute a helical parametric curve along the outline of a dual cone or a cylinder. """ a = nturns * 2*N.pi/h delta = rtube/3. th = a*h # / (2*N.pi) nsteps = int(th/N.float(delta)) t = N.linspace(-th,th,2*nsteps+1) z = t/a if envelope == 'dualcone': zprogression = z*(Rbase/h) elif envelope == 'cylinder': zprogression = Rbase else: raise Exception, "Invalid value for 'envelope'. Must be either of: ['dualcone','cylinder']." x = zprogression * N.cos(N.abs(t)) y = zprogression * N.sin(N.abs(t)) return x, y, z class Helix3D(Cube): def __init__(self,npix,transform=None,smoothing=1.,snakefunc=spiral3D,envelope='dualcone'): """Helical tube winding along a dual cone, with constant density inside the tube. """ Cube.__init__(self,npix,transform=transform,smoothing=smoothing,buildkdtree=True) self.z = N.unique(self.Z.flatten()) self.snakefunc = snakefunc self.envelope = envelope def __call__(self,h,nturns,rtube,tiltx=0.,tilty=0.,tiltz=0.,xoff=0.,yoff=0.,weight=None): """Return density rho at (x,y,z) TODO: automatically determine args (their names), and produce self.ARG members, and use those in """ self.h = h self.Rbase = self.h self.nturns = nturns self.rtube = rtube self.tiltx = tiltx self.tilty = tilty self.tiltz = tiltz self.xoff = xoff self.yoff = yoff self.weight = weight self.sanity() self.get_rho() # get_rho should set self.rho (3D) self.apply_rho_ops() # shift, rotate3d, smooth, in that order return self.rho def sanity(self): """Sanity checks. """ pass # not yet implemented def get_rho(self): """Compute rho(x,y,z) in every voxel. """ self.x, self.y, self.z = self.snakefunc(self.h,self.Rbase,self.nturns,self.rtube,self.envelope) co = N.zeros(N.prod(self.ndshape),dtype=N.float) # get a cube full of zeros # for evey voxel quickly determine if it's close enough to the helix center line for j,pt in enumerate(zip(self.x,self.y,self.z)): idxes = self.kdtree_query(pt,self.rtube)[1] co[idxes] = 1. self.rho = co.reshape(self.ndshape) class PowerLawShell(Cube): def __init__(self,npix,transform=None,smoothing=1.,exponent=-1.): """Power-law shell. """ Cube.__init__(self,npix,transform=transform,smoothing=smoothing,buildkdtree=False,computeR=True) self.exponent = exponent def __call__(self,rin,rout,xoff=0.,yoff=0.,weight=None): """A spherical shell with inner and outer radii, and radial power-law density fall-off. Parameters: ----------- rin : float Inner radius of the shell, in fractions of unity, i.e. between 0 and 1. rout : float Outer radius of the shell, in fractions of unity, i.e. between 0 and 1. xoff, yoff : floats x and y offsets of the shell center from (0,0). Positive values are to the right and up, negative to the left and down. In units if unity (the image is within [-1,1]. Defaults: xoff = yoff = 0. weight : float or None Normalize the total (relative) mass contained in the shell to this value. The total mass is the sum of rho over all pixels (in 3D). This is useful if you e.g. want to have more than one component, and wish to distribute different amounts of mass inside each one. Default: weight=1. """ self.rin = rin self.rout = rout # helper arrays for finding the edges of the shell in get_rho() self.Rin = self.rin * N.ones(self.X.shape) self.Rout = self.rout * N.ones(self.X.shape) self.xoff = xoff self.yoff = yoff self.weight = weight self.sanity() self.get_rho() # get_rho sets self.rho (3D) self.apply_rho_ops() # shift, rotate3d, smooth, in that order return self.rho def sanity(self): """Sanity checks.""" assert (0. < self.rin < self.rout) # this automatically asserts that the shell thickness is finite and positive def get_rho(self): """Compute rho(x,y,z) in every voxel.""" self.rho = self.R**self.exponent co = ((self.R >= self.rin) & (self.R <= self.rout)) | N.isclose(self.R,self.Rout) | N.isclose(self.R,self.Rin) # isclose also captures pixels at the very edge of the shell self.rho[~co] = 0. class TruncatedNormalShell(Cube): def __init__(self,npix,transform=None,smoothing=1.): """Truncated Normal Shell """ Cube.__init__(self,npix,transform=transform,smoothing=smoothing,buildkdtree=False,computeR=True) def __call__(self,r,width,clipa=0.,clipb=1.,xoff=0.,yoff=0.,weight=None): """Return density rho at (x,y,z) A spherical shell with radius 'r', and Gaussian density fall-off from r. The width of the Normal is 'width'. The PDF of the Normal is truncated at 'clip' values. Parameters: ----------- r : float Radius at which the shell is centered, in fractions of unity, i.e. between 0 and 1. width : float Thickness of the Gaussian that makes up the shell, in same units as r. clipa : float Lower clip radius of the Gaussian. Default is 0. clipb : float Upper clip radius of the Gaussian. Default is 1. xoff, yoff : floats x and y offsets of the shell center from (0,0). Positive values are to the right and up, negative to the left and down. In units if unity (remember that the image is within [-1,1]. Defaults: xoff = yoff = 0. weight : float Normalize the total (relative) mass contained in the shell to this value. The total mass is the sum of rho over all pixels (in 3D). This is useful if you e.g. want to have more than one component, and wish to distribute different amounts of mass inside each one. """ self.r = r self.width = width self.clipa = clipa self.clipb = clipb self.xoff = xoff self.yoff = yoff self.weight = weight self.sanity() self.get_rho() self.apply_rho_ops() # shift, rotate3d, smooth, in that order return self.rho def sanity(self): # CAREFUL ASSERTIONS # lower cut clipa must be smaller than r # lower cut clipa can be as small as zero # upper cut clipb must be greater than r # upper cub clipb can be in principle larger than unity (but we'll default to 1.0) # width must be a positive number assert (0. <= self.clipa < self.r < self.clipb) # radial distance relations that must hold: 0. <= clipa < r < clipb [<= 1.] assert (self.width > 0.) def get_rho(self): """Compute rho(x,y,z) in every voxel. """ self.rho = self.get_pdf(self.R) def get_pdf(self,x): """Distribution of density according to a Gaussian with (mu,sig) = (r,width). """ from scipy.stats import truncnorm # Because of the non-standard way that Scipy defines # distributions, we compute the shape parameters for a # truncated Normal, with mean mu, standard deviation sigma, # and truncated left and right at clipa and clipb. mu, sig = self.r, self.width a, b = (self.clipa - mu) / sig, (self.clipb - mu) / sig rv = truncnorm(a, b, loc=mu, scale=sig) pdf = rv.pdf(x) return pdf class ConstantDensityTorus(Cube): def __init__(self,npix,transform=None,smoothing=1.): Cube.__init__(self,npix,transform=transform,smoothing=smoothing,buildkdtree=False,computeR=True) def __call__(self,r,rcross,xoff=0.,yoff=0.,tiltx=0.,tiltz=0,weight=1.): """Torus as a ring with circular cross-section. Parameters: ----------- r : float Radius of the torus, in fractions of unity, i.e. between 0 and 1. rcross : float The radius of the torus cross-section (tube), in same units as r. xoff, yoff : floats x and y offsets of the shell center from (0,0). Positive values are to the right and up, negative to the left and down. In units if unity (remember that the image is within [-1,1]. Defaults: xoff = yoff = 0. tiltx, tiltz : floats The rotation angles of the model about the x and z axes, in degrees. Defaults are both 0 (= no rotation). weight : float Normalize the total (relative) mass contained in the shell to this value. The total mass is the sum of rho over all pixels (in 3D). This is useful if you e.g. want to have more than one component, and wish to distribute different amounts of mass inside each one. """ self.r = r self.rcross = rcross self.xoff = xoff self.yoff = yoff self.tiltx = tiltx self.tiltz = tiltz self.weight = weight self.sanity() self.get_rho() self.apply_rho_ops() # shift, rotate3d, smooth, in that order return self.rho def sanity(self): assert (0. < self.rcross <= self.r) assert (0. <= self.tiltx <= 180.) assert (0. <= self.tiltz <= 180.) def get_rho(self): """ A point (x,y,z) is inside the torus when: (x^2 + y^2 + z^2 + r^2 - rcross^2)^2 - 4 * r^2 * (x^2 + z^2) < 0 """ # To speed up computation a bit (the following expression are used twice each in the formula below) r2 = self.r**2 X2 = self.X**2 Z2 = self.Z**2 co = (X2 + self.Y**2 + Z2 + r2 - self.rcross**2)**2 - 4 * r2 * (X2 + Z2) < 0 self.set_rho(val=0.) # default is 0. self.rho[co] = 1. class ConstantDensityDualCone(Cube): def __init__(self,npix,transform=None,smoothing=1.): Cube.__init__(self,npix,transform=transform,smoothing=1.,buildkdtree=False,computeR=True) def __call__(self,r,theta,tiltx=0.,tiltz=0,xoff=0.,yoff=0.,weight=None): """Dual cone (i.e. two cones touching at the central point, with constant density inside. Parameters: ----------- r : float Radius (or height) of one cone above the center point, in fractions of unity, i.e. between 0 and 1. theta : float The opening angle (the angle covered by the cone), in degrees. tiltx, tiltz : floats The rotation angles of the model about the x and z axes, in degrees. Defaults are both 0 (= no rotation). xoff, yoff : floats x and y offsets of the shell center from (0,0). Positive values are to the right and up, negative to the left and down. In units if unity (remember that the image is within [-1,1]. Defaults: xoff = yoff = 0. weight : float Normalize the total (relative) mass contained in the shell to this value. The total mass is the sum of rho over all pixels (in 3D). This is useful if you e.g. want to have more than one component, and wish to distribute different amounts of mass inside each one. """ self.r = r self.theta_deg = theta self.theta_rad = N.radians(self.theta_deg) self.tiltx = tiltx self.tiltz = tiltz self.xoff = xoff self.yoff = yoff self.weight = weight self.get_rho() # get_rho should set self.rho (3D) self.apply_rho_ops() # shift, rotate3d, smooth, in that order return self.rho def sanity(self): """Sanity checks for constant density-edge shell. """ pass # not yet implemented def get_rho(self): """Compute rho(x,y,z) in every voxel. """ # cone formula aux = ((self.X**2 + self.Z**2) * N.cos(self.theta_rad)**2 - (self.Y*N.sin(self.theta_rad))**2) co1 = (aux <= 0) | N.isclose(aux,0) # radial caps co2 = (N.sqrt(self.X**2 + self.Y**2 + self.Z**2) <= self.r) # overall coall = co1 & co2 #| coaux # set all occupied voxels to one self.set_rho(val=0.) # default is 0. self.rho[coall] = 1.
#!/usr/bin/env python # this script sets up the testing packages to allow the tests # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import, division, print_function import argparse import os import subprocess import platform import sys pip_packages = [ "autest", "hyper", "requests", "dnslib", "httpbin", ] distro_packages = { "RHEL": [ "install epel-release", "install python35", "install rh-python35-python-virtualenv" ], "Fedora": [ "install python3", "install python3-virtualenv", "install python-virtualenv", ], "Ubuntu": [ "install python3", "install python3-virtualenv", "install virtualenv" ], "CentOS": [ "install epel-release", "install rh-python35-python-virtualenv" ] } def command_output(cmd_str): print(cmd_str) proc = subprocess.Popen( cmd_str, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True) # while command runs get output while proc.poll() == None: tmp = proc.stdout.readline() sys.stdout.write(tmp) for last_output in proc.stdout.readlines(): sys.stdout.write(last_output) return proc.returncode def get_distro(): return platform.linux_distribution() def distro_version(): return int(get_distro()[1].split(".")[0]) def isFedora(): return get_distro()[0].startswith("Fedora") def isCentOS(): return get_distro()[0].startswith("CentOS") def distro(): if isFedora(): return "Fedora" if isCentOS(): return "CentOS" if get_distro()[0].startswith("Red Hat"): return "RHEL" if get_distro()[0].startswith("Ubuntu"): return "Ubuntu" def isRedHatBased(): return get_distro()[0].startswith("Red Hat") or get_distro()[0].startswith( "Fedora") or get_distro()[0].startswith("CentOS") def isInstalled(prog): out = subprocess.Popen( ["which", prog], stdout=subprocess.PIPE).communicate() if out[0] != '': return True return False def installManagerName(): if isRedHatBased() and distro_version() >= 22: ret = "sudo dnf -y" # Fedora 22 or newer elif isRedHatBased(): ret = "sudo yum -y" # Red Hat distro else: ret = "sudo apt-get -y" # Ubuntu/Debian return ret def installToolName(): if isRedHatBased(): ret = "rpm -ihv" # Red Hat Based else: ret = "dpkg -iv" # Ubuntu/Debian return ret def run_cmds(cmds): for cmd in cmds: # print (cmd.split[" "]) # subprocess.call(cmd.split[" "]) if command_output(cmd): print("'{0}'' - Failed".format(cmd)) def gen_package_cmds(packages): # main install tool/manager (yum, dnf, apt-get, etc) mtool = installManagerName() # core install tool (rpm, dpkg, etc) itool = installToolName() ret = [] for p in packages: if p.startswith("wget"): pth = p[5:] pack = os.path.split(pth)[1] cmd = ["wget {0}".format(pth), "{0} ./{1}".format(itool, pack)] else: cmd = ["{0} {1}".format(mtool, p)] ret.extend(cmd) return ret extra = '' if distro() == 'RHEL' or distro() == 'CentOS': extra = ". /opt/rh/rh-python35/enable ;" def venv_cmds(path): ''' Create virtual environment and add it to the path being used for the script ''' return [ # first command only needed for rhel and centos systems at this time extra + " virtualenv --python=python3 {0}".format(path), extra + " {0}/bin/pip install pip --upgrade".format(path) ] def main(): " main script logic" parser = argparse.ArgumentParser() parser.add_argument( "--use-pip", nargs='?', default="pip", help="Which pip to use") parser.add_argument( "venv_path", nargs='?', default="env-test", help="The directory to us to for the virtualenv") parser.add_argument( "--disable-virtualenv", default=False, action='store_true', help="Do not create virtual environment to install packages under") parser.add_argument( '-V', '--version', action='version', version='%(prog)s 1.0.0') args = parser.parse_args() # print(args) # print(get_distro()) # do we know of packages to install for the given platform dist = distro() cmds = [] if dist: cmds = gen_package_cmds(distro_packages[dist]) # test to see if we should use a certain version of pip path_to_pip = None if args.use_pip != "pip": path_to_pip = args.use_pip # install on the system, or use virtualenv for pip based stuff if not args.disable_virtualenv: # Create virtual env cmds += venv_cmds(args.venv_path) if path_to_pip is None: path_to_pip = os.path.join(args.venv_path, "bin", args.use_pip) cmds += [extra + "{0} install {1}".format(path_to_pip, " ".join(pip_packages))] run_cmds(cmds) if __name__ == '__main__': main()
"""Test to verify that Home Assistant core works.""" # pylint: disable=protected-access,too-many-public-methods # pylint: disable=too-few-public-methods import asyncio import unittest from unittest.mock import patch, MagicMock from datetime import datetime, timedelta import pytz import homeassistant.core as ha from homeassistant.exceptions import InvalidEntityFormatError import homeassistant.util.dt as dt_util from homeassistant.util.unit_system import (METRIC_SYSTEM) from homeassistant.const import ( __version__, EVENT_STATE_CHANGED, ATTR_FRIENDLY_NAME, CONF_UNIT_SYSTEM) from tests.common import get_test_home_assistant PST = pytz.timezone('America/Los_Angeles') def test_split_entity_id(): """Test split_entity_id.""" assert ha.split_entity_id('domain.object_id') == ['domain', 'object_id'] def test_async_add_job_schedule_callback(): """Test that we schedule coroutines and add jobs to the job pool.""" hass = MagicMock() job = MagicMock() ha.HomeAssistant.async_add_job(hass, ha.callback(job)) assert len(hass.loop.call_soon.mock_calls) == 1 assert len(hass.loop.create_task.mock_calls) == 0 assert len(hass.add_job.mock_calls) == 0 @patch('asyncio.iscoroutinefunction', return_value=True) def test_async_add_job_schedule_coroutinefunction(mock_iscoro): """Test that we schedule coroutines and add jobs to the job pool.""" hass = MagicMock() job = MagicMock() ha.HomeAssistant.async_add_job(hass, job) assert len(hass.loop.call_soon.mock_calls) == 0 assert len(hass.loop.create_task.mock_calls) == 1 assert len(hass.add_job.mock_calls) == 0 @patch('asyncio.iscoroutinefunction', return_value=False) def test_async_add_job_add_threaded_job_to_pool(mock_iscoro): """Test that we schedule coroutines and add jobs to the job pool.""" hass = MagicMock() job = MagicMock() ha.HomeAssistant.async_add_job(hass, job) assert len(hass.loop.call_soon.mock_calls) == 0 assert len(hass.loop.create_task.mock_calls) == 0 assert len(hass.add_job.mock_calls) == 1 def test_async_run_job_calls_callback(): """Test that the callback annotation is respected.""" hass = MagicMock() calls = [] def job(): calls.append(1) ha.HomeAssistant.async_run_job(hass, ha.callback(job)) assert len(calls) == 1 assert len(hass.async_add_job.mock_calls) == 0 def test_async_run_job_delegates_non_async(): """Test that the callback annotation is respected.""" hass = MagicMock() calls = [] def job(): calls.append(1) ha.HomeAssistant.async_run_job(hass, job) assert len(calls) == 0 assert len(hass.async_add_job.mock_calls) == 1 class TestHomeAssistant(unittest.TestCase): """Test the Home Assistant core classes.""" def setUp(self): # pylint: disable=invalid-name """Setup things to be run when tests are started.""" self.hass = get_test_home_assistant(0) def tearDown(self): # pylint: disable=invalid-name """Stop everything that was started.""" self.hass.stop() # This test hangs on `loop.add_signal_handler` # def test_start_and_sigterm(self): # """Start the test.""" # calls = [] # self.hass.bus.listen_once(EVENT_HOMEASSISTANT_START, # lambda event: calls.append(1)) # self.hass.start() # self.assertEqual(1, len(calls)) # self.hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, # lambda event: calls.append(1)) # os.kill(os.getpid(), signal.SIGTERM) # self.hass.block_till_done() # self.assertEqual(1, len(calls)) class TestEvent(unittest.TestCase): """A Test Event class.""" def test_eq(self): """Test events.""" now = dt_util.utcnow() data = {'some': 'attr'} event1, event2 = [ ha.Event('some_type', data, time_fired=now) for _ in range(2) ] self.assertEqual(event1, event2) def test_repr(self): """Test that repr method works.""" self.assertEqual( "<Event TestEvent[L]>", str(ha.Event("TestEvent"))) self.assertEqual( "<Event TestEvent[R]: beer=nice>", str(ha.Event("TestEvent", {"beer": "nice"}, ha.EventOrigin.remote))) def test_as_dict(self): """Test as dictionary.""" event_type = 'some_type' now = dt_util.utcnow() data = {'some': 'attr'} event = ha.Event(event_type, data, ha.EventOrigin.local, now) expected = { 'event_type': event_type, 'data': data, 'origin': 'LOCAL', 'time_fired': now, } self.assertEqual(expected, event.as_dict()) class TestEventBus(unittest.TestCase): """Test EventBus methods.""" def setUp(self): # pylint: disable=invalid-name """Setup things to be run when tests are started.""" self.hass = get_test_home_assistant() self.bus = self.hass.bus self.bus.listen('test_event', lambda x: len) def tearDown(self): # pylint: disable=invalid-name """Stop down stuff we started.""" self.hass.stop() def test_add_remove_listener(self): """Test remove_listener method.""" old_count = len(self.bus.listeners) def listener(_): pass self.bus.listen('test', listener) self.assertEqual(old_count + 1, len(self.bus.listeners)) # Try deleting a non registered listener, nothing should happen self.bus._remove_listener('test', lambda x: len) # Remove listener self.bus._remove_listener('test', listener) self.assertEqual(old_count, len(self.bus.listeners)) # Try deleting listener while category doesn't exist either self.bus._remove_listener('test', listener) def test_unsubscribe_listener(self): """Test unsubscribe listener from returned function.""" calls = [] def listener(event): """Mock listener.""" calls.append(event) unsub = self.bus.listen('test', listener) self.bus.fire('test') self.hass.block_till_done() assert len(calls) == 1 unsub() self.bus.fire('event') self.hass.block_till_done() assert len(calls) == 1 def test_listen_once_event(self): """Test listen_once_event method.""" runs = [] self.bus.listen_once('test_event', lambda x: runs.append(1)) self.bus.fire('test_event') # Second time it should not increase runs self.bus.fire('test_event') self.hass.block_till_done() self.assertEqual(1, len(runs)) def test_thread_event_listener(self): """Test a event listener listeners.""" thread_calls = [] def thread_listener(event): thread_calls.append(event) self.bus.listen('test_thread', thread_listener) self.bus.fire('test_thread') self.hass.block_till_done() assert len(thread_calls) == 1 def test_callback_event_listener(self): """Test a event listener listeners.""" callback_calls = [] @ha.callback def callback_listener(event): callback_calls.append(event) self.bus.listen('test_callback', callback_listener) self.bus.fire('test_callback') self.hass.block_till_done() assert len(callback_calls) == 1 def test_coroutine_event_listener(self): """Test a event listener listeners.""" coroutine_calls = [] @asyncio.coroutine def coroutine_listener(event): coroutine_calls.append(event) self.bus.listen('test_coroutine', coroutine_listener) self.bus.fire('test_coroutine') self.hass.block_till_done() assert len(coroutine_calls) == 1 class TestState(unittest.TestCase): """Test State methods.""" def test_init(self): """Test state.init.""" self.assertRaises( InvalidEntityFormatError, ha.State, 'invalid_entity_format', 'test_state') def test_domain(self): """Test domain.""" state = ha.State('some_domain.hello', 'world') self.assertEqual('some_domain', state.domain) def test_object_id(self): """Test object ID.""" state = ha.State('domain.hello', 'world') self.assertEqual('hello', state.object_id) def test_name_if_no_friendly_name_attr(self): """Test if there is no friendly name.""" state = ha.State('domain.hello_world', 'world') self.assertEqual('hello world', state.name) def test_name_if_friendly_name_attr(self): """Test if there is a friendly name.""" name = 'Some Unique Name' state = ha.State('domain.hello_world', 'world', {ATTR_FRIENDLY_NAME: name}) self.assertEqual(name, state.name) def test_dict_conversion(self): """Test conversion of dict.""" state = ha.State('domain.hello', 'world', {'some': 'attr'}) self.assertEqual(state, ha.State.from_dict(state.as_dict())) def test_dict_conversion_with_wrong_data(self): """Test conversion with wrong data.""" self.assertIsNone(ha.State.from_dict(None)) self.assertIsNone(ha.State.from_dict({'state': 'yes'})) self.assertIsNone(ha.State.from_dict({'entity_id': 'yes'})) def test_repr(self): """Test state.repr.""" self.assertEqual("<state happy.happy=on @ 1984-12-08T12:00:00+00:00>", str(ha.State( "happy.happy", "on", last_changed=datetime(1984, 12, 8, 12, 0, 0)))) self.assertEqual( "<state happy.happy=on; brightness=144 @ " "1984-12-08T12:00:00+00:00>", str(ha.State("happy.happy", "on", {"brightness": 144}, datetime(1984, 12, 8, 12, 0, 0)))) class TestStateMachine(unittest.TestCase): """Test State machine methods.""" def setUp(self): # pylint: disable=invalid-name """Setup things to be run when tests are started.""" self.hass = get_test_home_assistant(0) self.states = self.hass.states self.states.set("light.Bowl", "on") self.states.set("switch.AC", "off") def tearDown(self): # pylint: disable=invalid-name """Stop down stuff we started.""" self.hass.stop() def test_is_state(self): """Test is_state method.""" self.assertTrue(self.states.is_state('light.Bowl', 'on')) self.assertFalse(self.states.is_state('light.Bowl', 'off')) self.assertFalse(self.states.is_state('light.Non_existing', 'on')) def test_is_state_attr(self): """Test is_state_attr method.""" self.states.set("light.Bowl", "on", {"brightness": 100}) self.assertTrue( self.states.is_state_attr('light.Bowl', 'brightness', 100)) self.assertFalse( self.states.is_state_attr('light.Bowl', 'friendly_name', 200)) self.assertFalse( self.states.is_state_attr('light.Bowl', 'friendly_name', 'Bowl')) self.assertFalse( self.states.is_state_attr('light.Non_existing', 'brightness', 100)) def test_entity_ids(self): """Test get_entity_ids method.""" ent_ids = self.states.entity_ids() self.assertEqual(2, len(ent_ids)) self.assertTrue('light.bowl' in ent_ids) self.assertTrue('switch.ac' in ent_ids) ent_ids = self.states.entity_ids('light') self.assertEqual(1, len(ent_ids)) self.assertTrue('light.bowl' in ent_ids) def test_all(self): """Test everything.""" states = sorted(state.entity_id for state in self.states.all()) self.assertEqual(['light.bowl', 'switch.ac'], states) def test_remove(self): """Test remove method.""" events = [] self.hass.bus.listen(EVENT_STATE_CHANGED, lambda event: events.append(event)) self.assertIn('light.bowl', self.states.entity_ids()) self.assertTrue(self.states.remove('light.bowl')) self.hass.block_till_done() self.assertNotIn('light.bowl', self.states.entity_ids()) self.assertEqual(1, len(events)) self.assertEqual('light.bowl', events[0].data.get('entity_id')) self.assertIsNotNone(events[0].data.get('old_state')) self.assertEqual('light.bowl', events[0].data['old_state'].entity_id) self.assertIsNone(events[0].data.get('new_state')) # If it does not exist, we should get False self.assertFalse(self.states.remove('light.Bowl')) self.hass.block_till_done() self.assertEqual(1, len(events)) def test_case_insensitivty(self): """Test insensitivty.""" runs = [] self.hass.bus.listen(EVENT_STATE_CHANGED, lambda event: runs.append(event)) self.states.set('light.BOWL', 'off') self.hass.block_till_done() self.assertTrue(self.states.is_state('light.bowl', 'off')) self.assertEqual(1, len(runs)) def test_last_changed_not_updated_on_same_state(self): """Test to not update the existing, same state.""" state = self.states.get('light.Bowl') future = dt_util.utcnow() + timedelta(hours=10) with patch('homeassistant.util.dt.utcnow', return_value=future): self.states.set("light.Bowl", "on", {'attr': 'triggers_change'}) self.hass.block_till_done() state2 = self.states.get('light.Bowl') assert state2 is not None assert state.last_changed == state2.last_changed def test_force_update(self): """Test force update option.""" events = [] self.hass.bus.listen(EVENT_STATE_CHANGED, lambda ev: events.append(ev)) self.states.set('light.bowl', 'on') self.hass.block_till_done() self.assertEqual(0, len(events)) self.states.set('light.bowl', 'on', None, True) self.hass.block_till_done() self.assertEqual(1, len(events)) class TestServiceCall(unittest.TestCase): """Test ServiceCall class.""" def test_repr(self): """Test repr method.""" self.assertEqual( "<ServiceCall homeassistant.start>", str(ha.ServiceCall('homeassistant', 'start'))) self.assertEqual( "<ServiceCall homeassistant.start: fast=yes>", str(ha.ServiceCall('homeassistant', 'start', {"fast": "yes"}))) class TestServiceRegistry(unittest.TestCase): """Test ServicerRegistry methods.""" def setUp(self): # pylint: disable=invalid-name """Setup things to be run when tests are started.""" self.hass = get_test_home_assistant() self.services = self.hass.services self.services.register("Test_Domain", "TEST_SERVICE", lambda x: None) def tearDown(self): # pylint: disable=invalid-name """Stop down stuff we started.""" self.hass.stop() def test_has_service(self): """Test has_service method.""" self.assertTrue( self.services.has_service("tesT_domaiN", "tesT_servicE")) self.assertFalse( self.services.has_service("test_domain", "non_existing")) self.assertFalse( self.services.has_service("non_existing", "test_service")) def test_services(self): """Test services.""" expected = { 'test_domain': {'test_service': {'description': '', 'fields': {}}} } self.assertEqual(expected, self.services.services) def test_call_with_blocking_done_in_time(self): """Test call with blocking.""" calls = [] def service_handler(call): """Service handler.""" calls.append(call) self.services.register("test_domain", "register_calls", service_handler) self.assertTrue( self.services.call('test_domain', 'REGISTER_CALLS', blocking=True)) self.assertEqual(1, len(calls)) def test_call_non_existing_with_blocking(self): """Test non-existing with blocking.""" prior = ha.SERVICE_CALL_LIMIT try: ha.SERVICE_CALL_LIMIT = 0.01 assert not self.services.call('test_domain', 'i_do_not_exist', blocking=True) finally: ha.SERVICE_CALL_LIMIT = prior def test_async_service(self): """Test registering and calling an async service.""" calls = [] @asyncio.coroutine def service_handler(call): """Service handler coroutine.""" calls.append(call) self.services.register('test_domain', 'register_calls', service_handler) self.assertTrue( self.services.call('test_domain', 'REGISTER_CALLS', blocking=True)) self.hass.block_till_done() self.assertEqual(1, len(calls)) def test_callback_service(self): """Test registering and calling an async service.""" calls = [] @ha.callback def service_handler(call): """Service handler coroutine.""" calls.append(call) self.services.register('test_domain', 'register_calls', service_handler) self.assertTrue( self.services.call('test_domain', 'REGISTER_CALLS', blocking=True)) self.hass.block_till_done() self.assertEqual(1, len(calls)) class TestConfig(unittest.TestCase): """Test configuration methods.""" def setUp(self): # pylint: disable=invalid-name """Setup things to be run when tests are started.""" self.config = ha.Config() self.assertIsNone(self.config.config_dir) def test_path_with_file(self): """Test get_config_path method.""" self.config.config_dir = '/tmp/ha-config' self.assertEqual("/tmp/ha-config/test.conf", self.config.path("test.conf")) def test_path_with_dir_and_file(self): """Test get_config_path method.""" self.config.config_dir = '/tmp/ha-config' self.assertEqual("/tmp/ha-config/dir/test.conf", self.config.path("dir", "test.conf")) def test_as_dict(self): """Test as dict.""" self.config.config_dir = '/tmp/ha-config' expected = { 'latitude': None, 'longitude': None, CONF_UNIT_SYSTEM: METRIC_SYSTEM.as_dict(), 'location_name': None, 'time_zone': 'UTC', 'components': [], 'config_dir': '/tmp/ha-config', 'version': __version__, } self.assertEqual(expected, self.config.as_dict()) class TestWorkerPool(unittest.TestCase): """Test WorkerPool methods.""" def test_exception_during_job(self): """Test exception during a job.""" pool = ha.create_worker_pool(1) def malicious_job(_): raise Exception("Test breaking worker pool") calls = [] def register_call(_): calls.append(1) pool.add_job(ha.JobPriority.EVENT_DEFAULT, (malicious_job, None)) pool.add_job(ha.JobPriority.EVENT_DEFAULT, (register_call, None)) pool.block_till_done() self.assertEqual(1, len(calls)) class TestWorkerPoolMonitor(object): """Test monitor_worker_pool.""" @patch('homeassistant.core._LOGGER.warning') def test_worker_pool_monitor(self, mock_warning, event_loop): """Test we log an error and increase threshold.""" hass = MagicMock() hass.pool.worker_count = 3 schedule_handle = MagicMock() hass.loop.call_later.return_value = schedule_handle ha.async_monitor_worker_pool(hass) assert hass.loop.call_later.called assert hass.bus.async_listen_once.called assert not schedule_handle.called check_threshold = hass.loop.call_later.mock_calls[0][1][1] hass.pool.queue_size = 8 check_threshold() assert not mock_warning.called hass.pool.queue_size = 9 check_threshold() assert mock_warning.called mock_warning.reset_mock() assert not mock_warning.called check_threshold() assert not mock_warning.called hass.pool.queue_size = 17 check_threshold() assert not mock_warning.called hass.pool.queue_size = 18 check_threshold() assert mock_warning.called hass.bus.async_listen_once.mock_calls[0][1][1](None) assert schedule_handle.cancel.called class TestAsyncCreateTimer(object): """Test create timer.""" @patch('homeassistant.core.asyncio.Event') @patch('homeassistant.core.dt_util.utcnow') def test_create_timer(self, mock_utcnow, mock_event, event_loop): """Test create timer fires correctly.""" hass = MagicMock() now = mock_utcnow() event = mock_event() now.second = 1 mock_utcnow.reset_mock() ha.async_create_timer(hass) assert len(hass.bus.async_listen_once.mock_calls) == 2 start_timer = hass.bus.async_listen_once.mock_calls[1][1][1] event_loop.run_until_complete(start_timer(None)) assert hass.loop.create_task.called timer = hass.loop.create_task.mock_calls[0][1][0] event.is_set.side_effect = False, False, True event_loop.run_until_complete(timer) assert len(mock_utcnow.mock_calls) == 1 assert hass.loop.call_soon.called event_type, event_data = hass.loop.call_soon.mock_calls[0][1][1:] assert ha.EVENT_TIME_CHANGED == event_type assert {ha.ATTR_NOW: now} == event_data stop_timer = hass.bus.async_listen_once.mock_calls[0][1][1] stop_timer(None) assert event.set.called
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). import unittest from unittest.mock import Mock from pants.backend.jvm.artifact import Artifact from pants.backend.jvm.repository import Repository from pants.backend.jvm.scala_artifact import ScalaArtifact from pants.backend.jvm.targets.jar_library import JarLibrary from pants.backend.jvm.targets.java_library import JavaLibrary from pants.backend.jvm.tasks.jar_publish import JarPublish from pants.base.exceptions import TaskError from pants.build_graph.build_file_aliases import BuildFileAliases from pants.build_graph.target import Target from pants.scm.scm import Scm from pants.testutil.jvm.nailgun_task_test_base import NailgunTaskTestBase from pants.util.contextutil import temporary_dir from pants.util.dirutil import safe_mkdir, safe_mkdtemp, safe_walk from pants.util.memo import memoized_classproperty class JarPublishTest(NailgunTaskTestBase): @classmethod def task_type(cls): return JarPublish def test_smoke_publish(self): with temporary_dir() as publish_dir: self.set_options(local=publish_dir) task = self.create_task(self.context()) task.execute() @classmethod def alias_groups(cls): return BuildFileAliases( targets={"jar_library": JarLibrary, "java_library": JavaLibrary, "target": Target,}, objects={"artifact": Artifact, "scala_artifact": ScalaArtifact,}, context_aware_object_factories={ "internal": lambda _: Repository( name="internal", url="http://example.com", push_db_basedir=cls.push_db_basedir ), }, ) @memoized_classproperty def push_db_basedir(cls): return safe_mkdtemp() def setUp(self): super().setUp() safe_mkdir(self.push_db_basedir, clean=True) def _prepare_for_publishing(self, with_alias=False): targets = [] nail_target = self._create_nail_target() targets.append(nail_target) shoe_target = self.create_library( path="b", target_type="java_library", name="b", sources=["B.java"], provides="artifact(org='com.example', name='shoe', repo=internal)", dependencies=[nail_target.address.reference()], ) targets.append(shoe_target) shoe_address = shoe_target.address.reference() if with_alias: # add an alias target between c and b alias_target = self.create_library( path="z", target_type="target", name="z", dependencies=[shoe_address] ) targets.append(alias_target) horse_deps = [alias_target.address.reference()] else: horse_deps = [shoe_address] horse_target = self.create_library( path="c", target_type="java_library", name="c", sources=["C.java"], provides="artifact(org='com.example', name='horse', repo=internal)", dependencies=horse_deps, ) targets.append(horse_target) return targets def _create_nail_target(self): return self.create_library( path="a", target_type="java_library", name="a", sources=["A.java"], provides="artifact(org='com.example', name='nail', repo=internal)", ) def _prepare_targets_with_duplicates(self): targets = list(self._prepare_for_publishing()) conflict = self.create_library( path="conflict", target_type="java_library", name="conflict", sources=["Conflict.java"], provides="artifact(org='com.example', name='nail', repo=internal)", ) targets.append(conflict) return targets def _get_repos(self): return {"internal": {"resolver": "example.com",}} def _prepare_mocks(self, task): task.scm = Mock() task.scm.changed_files = Mock(return_value=[]) task._copy_artifact = Mock() task.create_source_jar = Mock() task.create_doc_jar = Mock() task.changelog = Mock(return_value="Many changes") task.publish = Mock() task.confirm_push = Mock(return_value=True) task.context.products.get = Mock(return_value=Mock()) def test_publish_unlisted_repo(self): # Note that we set a different config here, so repos:internal has no config repos = {"another-repo": {"resolver": "example.org",}} targets = self._prepare_for_publishing() with temporary_dir(): self.set_options(dryrun=False, repos=repos) task = self.create_task(self.context(target_roots=targets)) self._prepare_mocks(task) with self.assertRaises(TaskError): try: task.execute() except TaskError as e: assert "Repository internal has no" in str(e) raise e def test_publish_local_dryrun(self): targets = self._prepare_for_publishing() with temporary_dir() as publish_dir: self.set_options(local=publish_dir) task = self.create_task(self.context(target_roots=targets)) self._prepare_mocks(task) task.execute() # Nothing is written to the pushdb during a dryrun publish # (maybe some directories are created, but git will ignore them) files = [] for _, _, filenames in safe_walk(self.push_db_basedir): files.extend(filenames) self.assertEqual( 0, len(files), "Nothing should be written to the pushdb during a dryrun publish" ) self.assertEqual( 0, task.confirm_push.call_count, "Expected confirm_push not to be called" ) self.assertEqual(0, task.publish.call_count, "Expected publish not to be called") def test_publish_local(self): for with_alias in [True, False]: targets = self._prepare_for_publishing(with_alias=with_alias) with temporary_dir() as publish_dir: self.set_options(dryrun=False, local=publish_dir) task = self.create_task(self.context(target_roots=targets)) self._prepare_mocks(task) task.execute() # Nothing is written to the pushdb during a local publish # (maybe some directories are created, but git will ignore them) files = [] for _, _, filenames in safe_walk(self.push_db_basedir): files.extend(filenames) self.assertEqual( 0, len(files), "Nothing should be written to the pushdb during a local publish" ) publishable_count = len(targets) - (1 if with_alias else 0) self.assertEqual( publishable_count, task.confirm_push.call_count, "Expected one call to confirm_push per artifact", ) self.assertEqual( publishable_count, task.publish.call_count, "Expected one call to publish per artifact", ) def test_publish_remote(self): targets = self._prepare_for_publishing() self.set_options(dryrun=False, repos=self._get_repos(), push_postscript="\nPS") task = self.create_task(self.context(target_roots=targets)) self._prepare_mocks(task) task.execute() # One file per task is written to the pushdb during a local publish files = [] for _, _, filenames in safe_walk(self.push_db_basedir): files.extend(filenames) self.assertEqual( len(targets), len(files), "During a remote publish, one pushdb should be written per target", ) self.assertEqual( len(targets), task.confirm_push.call_count, "Expected one call to confirm_push per artifact", ) self.assertEqual( len(targets), task.publish.call_count, "Expected one call to publish per artifact" ) self.assertEqual( len(targets), task.scm.commit.call_count, "Expected one call to scm.commit per artifact" ) args, kwargs = task.scm.commit.call_args message = args[0] message_lines = message.splitlines() self.assertTrue( len(message_lines) > 1, "Expected at least one commit message line in addition to the post script.", ) self.assertEqual("PS", message_lines[-1]) self.assertEqual( len(targets), task.scm.add.call_count, "Expected one call to scm.add per artifact" ) self.assertEqual( len(targets), task.scm.tag.call_count, "Expected one call to scm.tag per artifact" ) args, kwargs = task.scm.tag.call_args tag_name, tag_message = args tag_message_splitlines = tag_message.splitlines() self.assertTrue( len(tag_message_splitlines) > 1, "Expected at least one tag message line in addition to the post script.", ) self.assertEqual("PS", tag_message_splitlines[-1]) def test_publish_retry_works(self): self.set_options(dryrun=False, scm_push_attempts=3, repos=self._get_repos()) task = self.create_task(self.context(target_roots=self._create_nail_target())) self._prepare_mocks(task) task.scm.push = Mock() task.scm.push.side_effect = FailNTimes(2, Scm.RemoteException) task.execute() # Two failures, one success self.assertEqual(2 + 1, task.scm.push.call_count) def test_publish_retry_eventually_fails(self): # confirm that we fail if we have too many failed push attempts self.set_options(dryrun=False, scm_push_attempts=3, repos=self._get_repos()) task = self.create_task(self.context(target_roots=self._create_nail_target())) self._prepare_mocks(task) task.scm.push = Mock() task.scm.push.side_effect = FailNTimes(3, Scm.RemoteException) with self.assertRaises(Scm.RemoteException): task.execute() def test_publish_retry_fails_immediately_with_exception_on_refresh_failure(self): self.set_options(dryrun=False, scm_push_attempts=3, repos=self._get_repos()) task = self.create_task(self.context(target_roots=self._create_nail_target())) self._prepare_mocks(task) task.scm.push = Mock() task.scm.push.side_effect = FailNTimes(3, Scm.RemoteException) task.scm.refresh = Mock() task.scm.refresh.side_effect = FailNTimes(1, Scm.LocalException) with self.assertRaises(Scm.LocalException): task.execute() self.assertEqual(1, task.scm.push.call_count) def test_publish_local_only(self): with self.assertRaises(TaskError): self.create_task(self.context()) def test_check_targets_fails_with_duplicate_artifacts(self): bad_targets = self._prepare_targets_with_duplicates() with temporary_dir() as publishdir: self.set_options(dryrun=False, local=publishdir) task = self.create_task(self.context(target_roots=bad_targets)) self._prepare_mocks(task) with self.assertRaises(JarPublish.DuplicateArtifactError): task.check_targets(task.exported_targets()) class FailNTimes: def __init__(self, tries, exc_type, success=None): self.tries = tries self.exc_type = exc_type self.success = success def __call__(self, *args, **kwargs): self.tries -= 1 if self.tries >= 0: raise self.exc_type() else: return self.success class FailNTimesTest(unittest.TestCase): def test_fail_n_times(self): with self.assertRaises(ValueError): foo = Mock() foo.bar.side_effect = FailNTimes(1, ValueError) foo.bar() foo.bar() class JarPublishAuthTest(NailgunTaskTestBase): """Tests for backend jvm JarPublish class.""" def _default_jvm_opts(self): """Return a fresh copy of this list every time.""" return ["jvm_opt_1", "jvm_opt_2"] @classmethod def task_type(cls): return JarPublish def setUp(self): super().setUp() self.set_options( jvm_options=["-Dfoo=bar"], repos={ "some_ext_repo": { "resolver": "artifactory.foobar.com", "confs": ["default", "sources"], "auth": "", "help": "You break it, you bought it", } }, ) context = self.context() self._jar_publish = self.create_task(context) def test_options_with_no_auth(self): """When called without authentication credentials, `JarPublish._ivy_jvm_options()` shouldn't modify any options.""" self._jar_publish._jvm_options = self._default_jvm_opts() repo = {} modified_opts = self._jar_publish._ivy_jvm_options(repo) self.assertEqual(modified_opts, self._default_jvm_opts()) def test_options_with_auth(self): """`JarPublish._ivy_jvm_options()` should produce the same list, when called multiple times with authentication credentials.""" self._jar_publish._jvm_options = self._default_jvm_opts() username = "mjk" password = "h." creds_options = [f"-Dlogin={username}", f"-Dpassword={password}"] repo = { "auth": "blah", "username": username, "password": password, } modified_opts = self._jar_publish._ivy_jvm_options(repo) self.assertEqual(modified_opts, self._default_jvm_opts() + creds_options) # Now run it again, and make sure we don't get dupes. modified_opts = self._jar_publish._ivy_jvm_options(repo) self.assertEqual(modified_opts, self._default_jvm_opts() + creds_options)
#!/usr/bin/env python ########################################################### ## ## ## acts.py ## ## ## ## Author: Tony Fischetti ## ## tony.fischetti@gmail.com ## ## ## ########################################################### # ############################################################################## # # # Copyright (c) 2013, 2014, 2015, 2016, 2017, 2018, Tony Fischetti # # # # MIT License, http://www.opensource.org/licenses/mit-license.php # # # # Permission is hereby granted, free of charge, to any person obtaining a # # copy of this software and associated documentation files (the "Software"), # # to deal in the Software without restriction, including without limitation # # the rights to use, copy, modify, merge, publish, distribute, sublicense, # # and/or sell copies of the Software, and to permit persons to whom the # # Software is furnished to do so, subject to the following conditions: # # # # The above copyright notice and this permission notice shall be included in # # all copies or substantial portions of the Software. # # # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # # DEALINGS IN THE SOFTWARE. # # # ############################################################################## """ Various actions that the main entry delegates to """ from __future__ import unicode_literals from __future__ import print_function from subprocess import Popen import codecs import collections import fnmatch import glob import io import itertools import networkx as nx import os import re import string import sys import yaml class PatternTemplate(string.Template): delimiter = "%" def get_print_functions(settings): """ This returns the appropriate print functions in a tuple The print function are: - sprint - for standard printing - warn - for warnings - error - for errors This will all be the same if color is False. The returned print functions will contain an optional parameter that specifies the output level (verbose or not). If not verbose, the print function will ignore the message. """ verbose = settings["verbose"] # the regular print doesn't use color by default # (even if color is True) def sprint(message, level=None, color=False): if level=="verbose" and not verbose: return # for colors prepend = "" postfix = "" if settings["color"] and color: prepend = "\033[92m" postfix = "\033[0m" print("{}{}{}".format(prepend, message, postfix)) sys.stdout.flush() def warn(message, level=None, color=True): if level=="verbose" and not verbose: return # for colors prepend = "" postfix = "" if settings["color"] and color: prepend = "\033[93m" postfix = "\033[0m" print("{}{}{}".format(prepend, message, postfix)) sys.stdout.flush() def error(message, level=None, color=True): # this condition does really make any sense but w/e if level=="verbose" and not verbose: return # for colors prepend = "" postfix = "" if settings["color"] and color: prepend = "\033[91m" postfix = "\033[0m" print("{}{}{}".format(prepend, message, postfix), file=sys.stderr) sys.stderr.flush() return sprint, warn, error def find_standard_sakefile(settings): """Returns the filename of the appropriate sakefile""" error = settings["error"] if settings["customsake"]: custom = settings["customsake"] if not os.path.isfile(custom): error("Specified sakefile '{}' doesn't exist", custom) sys.exit(1) return custom # no custom specified, going over defaults in order for name in ["Sakefile", "Sakefile.yaml", "Sakefile.yml"]: if os.path.isfile(name): return name error("Error: there is no Sakefile to read") sys.exit(1) def parse(file, text, includes): try: sakefile = yaml.safe_load(text) or {} except yaml.YAMLError as exc: sys.stderr.write("Error: {} failed to parse as valid YAML\n".format(file)) if hasattr(exc, 'problem_mark'): mark = exc.problem_mark sys.stderr.write("Error near line {}\n".format(str(mark.line+1))) sys.exit(1) for filename, (subdata, subincludes) in includes.items(): sakefile.update(parse(filename, subdata, subincludes)) return sakefile def clean_path(a_path, force_os=None, force_start=None): """ This function is used to normalize the path (of an output or dependency) and also provide the path in relative form. It is relative to the current working directory """ if not force_start: force_start = os.curdir if force_os == "windows": import ntpath return ntpath.relpath(ntpath.normpath(a_path), start=force_start) if force_os == "posix": import posixpath return posixpath.relpath(posixpath.normpath(a_path), start=force_start) return os.path.relpath(os.path.normpath(a_path), start=force_start) def escp(target_name): """ This function is used by sake help. Since sakefiles allow for targets with spaces in them, sake help needs to quote all targets with spaces. This takes a target name and quotes it if necessary """ if ' ' in target_name: return '"{}"'.format(target_name) return target_name def get_help(sakefile): """ Returns the prettily formatted help strings (for printing) Args: A dictionary that is the parsed Sakefile (from sake.py) NOTE: the list sorting in this function is required for this function to be deterministic """ full_string = "You can 'sake' one of the following...\n\n" errmes = "target '{}' is not allowed to not have help message\n" outerlines = [] for target in sakefile: if target == "all": # this doesn't have a help message continue middle_lines = [] if "formula" not in sakefile[target]: # this means it's a meta-target innerstr = "{}:\n - {}\n\n".format(escp(target), sakefile[target]["help"]) inner = [] for atom_target in sakefile[target]: if atom_target == "help": continue inner.append(" {}:\n - {}\n\n".format(escp(atom_target), sakefile[target][atom_target]["help"])) if inner: innerstr += '\n'.join(sorted(inner)) middle_lines.append(innerstr) else: middle_lines.append("{}:\n - {}\n\n".format(escp(target), sakefile[target]["help"])) if middle_lines: outerlines.append('\n'.join(sorted(middle_lines))) if outerlines: full_string += '\n'.join(sorted(outerlines)) what_clean_does = "remove all targets' outputs and start from scratch" full_string += "\nclean:\n - {}\n\n".format(what_clean_does) what_visual_does = "output visual representation of project's dependencies" full_string += "visual:\n - {}\n".format(what_visual_does) full_string = re.sub("\n{3,}", "\n\n", full_string) return full_string def parse_defines(args): """ This parses a list of define argument in the form of -DNAME=VALUE or -DNAME ( which is treated as -DNAME=1). """ macros = {} for arg in args: try: var, val = arg.split('=', 1) except ValueError: var = arg val = '1' macros[var] = val return macros def expand_macros(raw_text, macros): """ this gets called before the sakefile is parsed. it looks for macros defined anywhere in the sakefile (the start of the line is '#!') and then replaces all occurences of '$variable' with the value defined in the macro. it then returns the contents of the file with the macros expanded. """ includes = {} result = [] pattern = re.compile("#!\s*(\w+)\s*(?:(\??\s*)=\s*(.*$)|or\s*(.*))", re.UNICODE) ipattern = re.compile("#<\s*(\S+)\s*(optional|or\s+(.+))?$", re.UNICODE) for line in raw_text.split("\n"): line = string.Template(line).safe_substitute(macros) # note that the line is appended to result before it is checked for macros # this prevents macros expanding into themselves result.append(line) if line.startswith("#!"): match = pattern.match(line) try: var, opt, val, or_ = match.group(1, 2, 3, 4) except: raise InvalidMacroError("Failed to parse macro {}\n".format(line)) if or_: if var not in macros: raise InvalidMacroError("Macro {} is not defined: {}\n".format(var, or_)) elif not (opt and var in macros): macros[var] = val elif line.startswith("#<"): match = ipattern.match(line) try: filename = match.group(1) except: error("Failed to parse include {}\n".format(line)) sys.exit(1) try: with io.open(filename, 'r') as f: includes[filename] = expand_macros(f.read(), macros) except IOError: if match.group(2): if match.group(2).startswith('or '): sprint(match.group(3)) else: error("Nonexistent include {}\n".format(filename)) sys.exit(1) return "\n".join(result), includes def check_for_dep_in_outputs(dep, verbose, G): """ Function to help construct_graph() identify dependencies Args: A dependency A flag indication verbosity A (populated) NetworkX DiGraph Returns: A list of targets that build given dependency """ if verbose: print("checking dep {}".format(dep)) ret_list = [] for node in G.nodes(data=True): if "output" not in node[1]: continue for out in node[1]['output']: if fnmatch.fnmatch(out, dep): ret_list.append(node[0]) break return ret_list def get_patterns(dep): engine = PatternTemplate(dep) empty = True patterns = [] for match in engine.pattern.finditer(dep): text = match.group("named") or match.group("braced") if text: empty = False patterns.append(text) if empty: return None, None else: return engine, patterns def expand_patterns(name, target, settings): error = settings["error"] data = collections.OrderedDict() if name == "all": return {name: target} elif "formula" not in target: # a meta-target res = {} for subname, subtarget in target.items(): if subname == "help": res["help"] = subtarget else: res.update(expand_patterns(subname, subtarget, settings)) return {name: res} if "dependencies" not in target or not target["dependencies"]: return {name: target} for dep in target["dependencies"]: engine, patterns = get_patterns(dep) if not patterns: continue subs = {} for pat in patterns: subs[pat] = "(?P<%s>.+?)" % pat try: matcher = engine.substitute(dict(zip(patterns, itertools.repeat("*")))) expanded = PatternTemplate(re.sub(r"\\(%|\{|\})", r"\1", re.escape(dep))).substitute(subs) except: error("Error parsing dependency patterns for target '{}'".format(name)) regex = re.compile(expanded) files = [] for f in glob.iglob(matcher): match = regex.match(f) assert match for k, v in match.groupdict().items(): if k not in data: data[k] = [v] else: data[k].append(v) if not data: return {name: target} # check for presence of output # it is not allowed to use a pattern # and not have outputs if "output" not in target or not target['output']: sys.exit("Target using pattern must have non-empty 'output' field") # based on http://stackoverflow.com/a/5228294/2097780 product = (dict(zip(data, x)) for x in itertools.product(*data.values())) res = {} for sub in product: new_outputs = [] new_deps = [] new_name = PatternTemplate(name).safe_substitute(sub) if new_name == name: errmes = "Target {} that has pattern in dependencies must have " errmes += "pattern in name" sys.exit(errmes.format(name)) new_help = PatternTemplate(target["help"]).safe_substitute(sub) new_formula = PatternTemplate(target["formula"]).safe_substitute(sub) for dep in target["dependencies"]: new_deps.append(PatternTemplate(dep).safe_substitute(sub)) for out in target["output"]: new_outputs.append(PatternTemplate(out).safe_substitute(sub)) res[new_name] = {"help": new_help, "output": new_outputs, "dependencies": new_deps, "formula": new_formula} return res def get_ties(G): """ If you specify a target that shares a dependency with another target, both targets need to be updated. This is because running one will resolve the sha mismatch and sake will think that the other one doesn't have to run. This is called a "tie". This function will find such ties. """ # we are going to make a dictionary whose keys are every dependency # and whose values are a list of all targets that use that dependency. # after making the dictionary, values whose length is above one will # be called "ties" ties = [] dep_dict = {} for node in G.nodes(data=True): if 'dependencies' in node[1]: for item in node[1]['dependencies']: if item not in dep_dict: dep_dict[item] = [] dep_dict[item].append(node[0]) for item in dep_dict: if len(list(set(dep_dict[item]))) > 1: ties.append(list(set(dep_dict[item]))) return ties def get_tied_targets(original_targets, the_ties): """ This function gets called when a target is specified to ensure that all 'tied' targets also get included in the subgraph to be built """ my_ties = [] for original_target in original_targets: for item in the_ties: if original_target in item: for thing in item: my_ties.append(thing) my_ties = list(set(my_ties)) if my_ties: ties_message = "" ties_message += "The following targets share dependencies and must be run together:" for item in sorted(my_ties): ties_message += "\n - {}".format(item) return list(set(my_ties+original_targets)), ties_message return original_targets, "" def construct_graph(sakefile, settings): """ Takes the sakefile dictionary and builds a NetworkX graph Args: A dictionary that is the parsed Sakefile (from sake.py) The settings dictionary Returns: A NetworkX graph """ verbose = settings["verbose"] sprint = settings["sprint"] G = nx.DiGraph() sprint("Going to construct Graph", level="verbose") for target in sakefile: if target == "all": # we don't want this node continue if "formula" not in sakefile[target]: # that means this is a meta target for atomtarget in sakefile[target]: if atomtarget == "help": continue sprint("Adding '{}'".format(atomtarget), level="verbose") data_dict = sakefile[target][atomtarget] data_dict["parent"] = target G.add_node(atomtarget, **data_dict) else: sprint("Adding '{}'".format(target), level="verbose") G.add_node(target, **sakefile[target]) sprint("Nodes are built\nBuilding connections", level="verbose") for node in G.nodes(data=True): sprint("checking node {} for dependencies".format(node[0]), level="verbose") # normalize all paths in output for k, v in node[1].items(): if v is None: node[1][k] = [] if "output" in node[1]: for index, out in enumerate(node[1]['output']): node[1]['output'][index] = clean_path(node[1]['output'][index]) if "dependencies" not in node[1]: continue sprint("it has dependencies", level="verbose") connects = [] # normalize all paths in dependencies for index, dep in enumerate(node[1]['dependencies']): dep = os.path.normpath(dep) shrt = "dependencies" node[1]['dependencies'][index] = clean_path(node[1][shrt][index]) for node in G.nodes(data=True): connects = [] if "dependencies" not in node[1]: continue for dep in node[1]['dependencies']: matches = check_for_dep_in_outputs(dep, verbose, G) if not matches: continue for match in matches: sprint("Appending {} to matches".format(match), level="verbose") connects.append(match) if connects: for connect in connects: G.add_edge(connect, node[0]) return G def get_all_outputs(node_dict): """ This function takes a node dictionary and returns a list of the node's output files. Some of the entries in the 'output' attribute may be globs, and without this function, sake won't know how to handle that. This will unglob all globs and return the true list of *all* outputs. """ outlist = [] for item in node_dict['output']: glist = glob.glob(item) if glist: for oneglob in glist: outlist.append(oneglob) else: outlist.append(item) return outlist def get_all_dependencies(node_dict): """ ............................... """ deplist = [] for item in node_dict['dependencies']: glist = glob.glob(item) if glist: for oneglob in glist: deplist.append(oneglob) else: deplist.append(item) return deplist def clean_all(G, settings): """ Removes all the output files from all targets. Takes the graph as the only argument Args: The networkx graph object The settings dictionary Returns: 0 if successful 1 if removing even one file failed """ quiet = settings["quiet"] recon = settings["recon"] sprint = settings["sprint"] error = settings["error"] all_outputs = [] for node in G.nodes(data=True): if "output" in node[1]: for item in get_all_outputs(node[1]): all_outputs.append(item) all_outputs.append(".shastore") retcode = 0 for item in sorted(all_outputs): if os.path.isfile(item): if recon: sprint("Would remove file: {}".format(item)) continue sprint("Attempting to remove file '{}'", level="verbose") try: os.remove(item) sprint("Removed file", level="verbose") except: errmes = "Error: file '{}' failed to be removed" error(errmes.format(item)) retcode = 1 if not retcode and not recon: sprint("All clean", color=True) return retcode def write_dot_file(G, filename): """ Writes the graph G in dot file format for graphviz visualization. Args: a Networkx graph A filename to name the dot files """ with io.open(filename, "w") as fh: fh.write("strict digraph DependencyDiagram {\n") edge_list = G.edges() node_list = set(G.nodes()) if edge_list: for edge in sorted(edge_list): source, targ = edge node_list = node_list - set(source) node_list = node_list - set(targ) line = '"{}" -> "{}";\n' fh.write(line.format(source, targ)) # draw nodes with no links if node_list: for node in sorted(node_list): line = '"{}"\n'.format(node) fh.write(line) fh.write("}") def visualize(G, settings, filename="dependencies", no_graphviz=False): """ Uses networkX to draw a graphviz dot file either (a) calls the graphviz command "dot" to turn it into a SVG and remove the dotfile (default), or (b) if no_graphviz is True, just output the graphviz dot file Args: a NetworkX DiGraph the settings dictionary a filename (a default is provided a flag indicating whether graphviz should *not* be called Returns: 0 if everything worked will cause fatal error on failure """ error = settings["error"] if no_graphviz: write_dot_file(G, filename) return 0 write_dot_file(G, "tempdot") renderer = "svg" if re.search("\.jpg$", filename, re.IGNORECASE): renderer = "jpg" elif re.search("\.jpeg$", filename, re.IGNORECASE): renderer = "jpg" elif re.search("\.svg$", filename, re.IGNORECASE): renderer = "svg" elif re.search("\.png$", filename, re.IGNORECASE): renderer = "png" elif re.search("\.gif$", filename, re.IGNORECASE): renderer = "gif" elif re.search("\.ps$", filename, re.IGNORECASE): renderer = "ps" elif re.search("\.pdf$", filename, re.IGNORECASE): renderer = "pdf" else: renderer = "svg" filename += ".svg" command = "dot -T{} tempdot -o {}".format(renderer, filename) p = Popen(command, shell=True) p.communicate() if p.returncode: errmes = "Either graphviz is not installed, or its not on PATH" os.remove("tempdot") error(errmes) sys.exit(1) os.remove("tempdot") return 0 ##################### ## CUSTOM ERRORS ## ##################### class Error(Exception): """Base class for exceptions in this module.""" pass class InvalidMacroError(Error): def __init__(self, message): self.message = message
from datetime import timezone from typing import Any, List import numpy as np from astropy import units as u from astropy.coordinates import CartesianRepresentation from astropy.time import Time, TimeDelta from czml3.core import Document, Packet, Preamble from czml3.enums import InterpolationAlgorithms, ReferenceFrames from czml3.properties import ( Billboard, Clock, Color, Label, Material, Path, Position, SolidColorMaterial, ) from czml3.types import IntervalValue, TimeInterval from poliastro.bodies import Earth from poliastro.czml.utils import ellipsoidal_to_cartesian, project_point_on_ellipsoid from poliastro.twobody.propagation import propagate PIC_SATELLITE = ( "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAX" "NSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAJcEhZcwAADsMAAA7DAcdvqGQAAADJSURBVD" "hPnZHRDcMgEEMZjVEYpaNklIzSEfLfD4qNnXAJSFWfhO7w2Zc0Tf9QG2rXrEzSUeZLOGm47W" "oH95x3Hl3jEgilvDgsOQUTqsNl68ezEwn1vae6lceSEEYvvWNT/Rxc4CXQNGadho1NXoJ+9i" "aqc2xi2xbt23PJCDIB6TQjOC6Bho/sDy3fBQT8PrVhibU7yBFcEPaRxOoeTwbwByCOYf9VGp" "1BYI1BA+EeHhmfzKbBoJEQwn1yzUZtyspIQUha85MpkNIXB7GizqDEECsAAAAASUVORK5CYI" "I=" ) PIC_GROUNDSTATION = ( "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAX" "NSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAJcEhZcwAADsMAAA7DAcdvqGQAAACvSURBVD" "hPrZDRDcMgDAU9GqN0lIzijw6SUbJJygUeNQgSqepJTyHG91LVVpwDdfxM3T9TSl1EXZvDwi" "i471fivK73cBFFQNTT/d2KoGpfGOpSIkhUpgUMxq9DFEsWv4IXhlyCnhBFnZcFEEuYqbiUlN" "wWgMTdrZ3JbQFoEVG53rd8ztG9aPJMnBUQf/VFraBJeWnLS0RfjbKyLJA8FkT5seDYS1Qwyv" "8t0B/5C2ZmH2/eTGNNBgMmAAAAAElFTkSuQmCC" ) class CZMLExtractor: """A class for extracting orbitary data to Cesium""" def __init__( self, start_epoch, end_epoch, N, attractor=None, pr_map=None, scene3D=True ): """ Orbital constructor Parameters ---------- start_epoch: ~astropy.time.core.Time Starting epoch end_epoch: ~astropy.time.core.Time Ending epoch N: int Default number of sample points. Unless otherwise specified, the number of sampled data points will be N when calling add_orbit() attractor: poliastro.Body Attractor of the orbits scene3D: bool Determines the scene mode. If set to true, the scene is set to 3D mode, otherwise it's the orthographic projection. """ self.packets = [] # type: List[Packet] self.trajectories = [] # type: List[Any] self.attractor = attractor self.orbits = [] # type: List[Any] self.N = N self.i = 0 self.gs_n = 0 if not self.attractor: self.attractor = Earth elif not (self.attractor.R and self.attractor.R_polar): raise ValueError( "Invalid ellipsoid of attractor.\n" + "Make sure your body has valid 'R' and 'R_polar' parameters" ) ellipsoid = ( self.attractor.R.to(u.m).value, self.attractor.R.to(u.m).value, self.attractor.R_polar.to(u.m).value, ) self.cust_prop = [ellipsoid, pr_map, scene3D] self.start_epoch = Time(start_epoch, format="isot") self.end_epoch = Time(end_epoch, format="isot") self._init_czml_() self._change_custom_params(*self.cust_prop) def _init_orbit_packet_cords_(self, i, rtol): """ Parameters ---------- i: int Index of referenced orbit rtol: float Maximum relative error permitted Returns ------- coordinate list """ cart_cords = [] # type: List[float] h = (self.end_epoch - self.orbits[i][2]).to(u.second) / self.orbits[i][1] # Get rounding factor given the relative tolerance rf = 0 while rtol < 1: rtol *= 10 rf += 1 for k in range(self.orbits[i][1] + 2): position = propagate(self.orbits[i][0], TimeDelta(k * h), rtol=rtol) cords = position.represent_as(CartesianRepresentation).xyz.to(u.meter).value cords = np.insert(cords, 0, h.value * k, axis=0) # flatten list cart_cords += list(map(lambda x: round(x[0], rf), cords.tolist())) return cart_cords def _init_groundtrack_packet_cords_(self, i, rtol): """ Parameters ---------- i: int Index of referenced orbit rtol: float Maximum relative error permitted Returns ------- coordinate list """ cart_cords = [] # type: List[float] h = (self.end_epoch - self.orbits[i][2]).to(u.second) / self.orbits[i][1] # Get rounding factor given the relative tolerance rf = 0 while rtol < 1: rtol *= 10 rf += 1 ellipsoid = self.cust_prop[0] for k in range(self.orbits[i][1] + 2): position = propagate(self.orbits[i][0], TimeDelta(k * h), rtol=rtol) cords = position.represent_as(CartesianRepresentation).xyz.to(u.meter).value cords = np.insert(cords, 0, h.value * k, axis=0) # flatten list cords = list(map(lambda x: round(x[0], rf), cords.tolist())) t, p = cords[0], cords[1:] pr_p = project_point_on_ellipsoid( p[0], p[1], p[2], ellipsoid[0], ellipsoid[1], ellipsoid[2] ) # Add a small number to ensure that our point lies above the surface of the # ellipsoid. We do this because small losses in precision may cause the point # to lie slightly bellow the surface. An iterative method could be used instead # but the error margin is too small to be worth it. _cords = t, pr_p[0] + 0.1, pr_p[1] + 0.1, pr_p[2] + 0.1 cart_cords += _cords return cart_cords def _init_czml_(self): """ Only called at the initialization of the extractor Builds packets. """ pckt = Preamble( name="document_packet", clock=IntervalValue( start=self.start_epoch, end=self.end_epoch, value=Clock( currentTime=self.start_epoch.datetime.replace(tzinfo=timezone.utc), multiplier=60, ), ), ) self.packets.append(pckt) def _change_custom_params(self, ellipsoid, pr_map, scene3D): """ Change the custom properties package. Parameters ---------- ellipsoid: list(int) Defines the attractor ellipsoid. The list must have three numbers representing the radii in the x, y and z axis pr_map: str A URL to the projection of the defined ellipsoid (UV map) """ if pr_map is None: pr_map = ( "https://upload.wikimedia.org/wikipedia/commons/c/c4/Earthmap1000x500compac.jpg", ) self.cust_prop[1] = pr_map custom_props = { "custom_attractor": True, "ellipsoid": [{"array": ellipsoid}], "map_url": pr_map, "scene3D": scene3D, } pckt = Packet(id="custom_properties", properties=custom_props) self.packets.append(pckt) def add_ground_station( self, pos, id_description=None, label_fill_color=None, label_font=None, label_outline_color=None, label_text=None, label_show=True, ): """ Adds a ground station Parameters ---------- orbit: poliastro.Orbit Orbit to be added pos: list [~astropy.units] coordinates of ground station, [u v] ellipsoidal coordinates (0 elevation) id_description: str Set ground station description label_fill_color: list (int) Fill Color in rgba format label_outline_color: list (int) Outline Color in rgba format label_font: str Set label font style and size (CSS syntax) label_text: str Set label text label_show: bool Indicates whether the label is visible """ if ( len(pos) == 2 and isinstance(pos[0], u.quantity.Quantity) and isinstance(pos[0], u.quantity.Quantity) ): u0, v0 = pos if self.cust_prop[0]: a, b = ( self.cust_prop[0][0], self.cust_prop[0][2], ) # get semi-major and semi-minor axises else: a, b = Earth.R.to(u.m).value, Earth.R_polar.to(u.m).value pos = list(map(lambda x: x.value, ellipsoidal_to_cartesian(a, b, u0, v0))) else: raise TypeError( "Invalid coordinates. Coordinates must be of the form [u, v] where u, v are astropy units" ) pckt = Packet( id="GS" + str(self.gs_n), description=id_description, availability=TimeInterval(start=self.start_epoch, end=self.end_epoch), position=Position(cartesian=pos), label=Label( show=label_show, text=label_text, font=label_font if label_font is not None else "11pt Lucida Console", fillColor=Color(rgba=label_fill_color) if label_fill_color is not None else None, outlineColor=Color(rgba=label_outline_color) if label_outline_color is not None else None, ), billboard=Billboard(image=PIC_GROUNDSTATION, show=True), ) self.packets.append(pckt) self.gs_n += 1 def add_orbit( self, orbit, rtol=1e-10, N=None, groundtrack_show=False, groundtrack_lead_time=None, groundtrack_trail_time=None, groundtrack_width=None, groundtrack_color=None, id_name=None, id_description=None, path_width=None, path_show=None, path_color=None, label_fill_color=None, label_outline_color=None, label_font=None, label_text=None, label_show=None, ): """ Adds an orbit Parameters ---------- orbit: poliastro.Orbit Orbit to be added rtol: float Maximum relative error permitted N: int Number of sample points groundtrack_show: bool If set to true, the groundtrack is displayed. groundtrack_lead_time: double The time the animation is ahead of the real-time groundtrack groundtrack_trail_time: double The time the animation is behind the real-time groundtrack groundtrack_width: int Groundtrack width groundtrack_color: list (int) Rgba groundtrack color. By default, it is set to the path color id_name: str Set orbit name id_description: str Set orbit description path_width: int Path width path_show: bool Indicates whether the path is visible path_color: list (int) Rgba path color label_fill_color: list (int) Fill Color in rgba format label_outline_color: list (int) Outline Color in rgba format label_font: str Set label font style and size (CSS syntax) label_text: str Set label text label_show: bool Indicates whether the label is visible """ if N is None: N = self.N if orbit.epoch < Time(self.start_epoch): orbit = orbit.propagate(self.start_epoch - orbit.epoch) elif orbit.epoch > Time(self.end_epoch): raise ValueError( "The orbit's epoch cannot exceed the constructor's ending epoch" ) if rtol <= 0 or rtol >= 1: raise ValueError( "The relative tolerance must be a value in the range (0, 1)" ) self.orbits.append([orbit, N, orbit.epoch]) cartesian_cords = self._init_orbit_packet_cords_(self.i, rtol=rtol) start_epoch = Time(min(self.orbits[self.i][2], self.start_epoch), format="isot") pckt = Packet( id=self.i, name=id_name, description=id_description, availability=TimeInterval(start=self.start_epoch, end=self.end_epoch), position=Position( interpolationDegree=5, interpolationAlgorithm=InterpolationAlgorithms.LAGRANGE, referenceFrame=ReferenceFrames.INERTIAL, cartesian=cartesian_cords, # Use explicit UTC timezone, rather than the default, which is a local timezone. epoch=start_epoch.datetime.replace(tzinfo=timezone.utc), ), path=Path( show=path_show, width=path_width, material=Material( solidColor=SolidColorMaterial(color=Color.from_list(path_color)) ) if path_color is not None else Material( solidColor=SolidColorMaterial(color=Color.from_list([255, 255, 0])) ), resolution=120, ), label=Label( text=label_text, font=label_font if label_font is not None else "11pt Lucida Console", show=label_show, fillColor=Color(rgba=label_fill_color) if label_fill_color is not None else Color(rgba=[255, 255, 0, 255]), outlineColor=Color(rgba=label_outline_color) if label_outline_color is not None else Color(rgba=[255, 255, 0, 255]), ), billboard=Billboard(image=PIC_SATELLITE, show=True), ) self.packets.append(pckt) if groundtrack_show: groundtrack_color = path_color groundtrack_cords = self._init_groundtrack_packet_cords_(self.i, rtol=rtol) pckt = Packet( id="groundtrack" + str(self.i), availability=TimeInterval(start=self.start_epoch, end=self.end_epoch), position=Position( interpolationDegree=5, interpolationAlgorithm=InterpolationAlgorithms.LAGRANGE, referenceFrame=ReferenceFrames.INERTIAL, cartesian=groundtrack_cords, # Use explicit UTC timezone, rather than the default, which is a local timezone. epoch=start_epoch.datetime.replace(tzinfo=timezone.utc), ), path=Path( show=True, material=Material( solidColor=SolidColorMaterial( color=Color(rgba=groundtrack_color) ) ) if groundtrack_color is not None else Material( solidColor=SolidColorMaterial( color=Color(rgba=[255, 255, 0, 255]) ) ), resolution=60, width=groundtrack_width, leadTime=groundtrack_lead_time if groundtrack_lead_time else 100, trailTime=groundtrack_trail_time if groundtrack_trail_time else 100, ), ) self.packets.append(pckt) self.i += 1 def add_trajectory( self, positions, epochs, groundtrack_show=False, groundtrack_lead_time=None, groundtrack_trail_time=None, groundtrack_width=None, groundtrack_color=None, id_name=None, id_description=None, path_width=None, path_show=None, path_color=None, label_fill_color=None, label_outline_color=None, label_font=None, label_text=None, label_show=None, ): """ Adds trajectory Parameters ---------- positions: ~astropy.coordinates.CartesianRepresentation Trajectory to plot. epochs: ~astropy.time.core.Time Epochs for positions. groundtrack_show: bool If set to true, the groundtrack is displayed. groundtrack_lead_time: double The time the animation is ahead of the real-time groundtrack groundtrack_trail_time: double The time the animation is behind the real-time groundtrack groundtrack_width: int Groundtrack width groundtrack_color: list (int) Rgba groundtrack color. By default, it is set to the path color id_name: str Set orbit name id_description: str Set orbit description path_width: int Path width path_show: bool Indicates whether the path is visible path_color: list (int) Rgba path color label_fill_color: list (int) Fill Color in rgba format label_outline_color: list (int) Outline Color in rgba format label_font: str Set label font style and size (CSS syntax) label_text: str Set label text label_show: bool Indicates whether the label is visible """ if self.attractor is None: raise ValueError("An attractor must be set up first.") positions = ( positions.represent_as(CartesianRepresentation).get_xyz(1).to(u.meter).value ) epochs = Time(epochs, format="isot") if len(epochs) != len(positions): raise ValueError("Number of Points and Epochs must be equal.") epochs = np.fromiter( map(lambda epoch: (epoch - epochs[0]).to(u.second).value, epochs), dtype=np.float, ) positions = np.around( np.concatenate([epochs[..., None], positions], axis=1).ravel(), 1 ).tolist() self.trajectories.append([positions, None, label_text, path_color]) start_epoch = Time(self.start_epoch, format="isot") pckt = Packet( id=self.i, name=id_name, description=id_description, availability=TimeInterval(start=self.start_epoch, end=self.end_epoch), position=Position( interpolationDegree=5, interpolationAlgorithm=InterpolationAlgorithms.LAGRANGE, referenceFrame=ReferenceFrames.INERTIAL, cartesian=positions, # Use explicit UTC timezone, rather than the default, which is a local timezone. epoch=start_epoch.datetime.replace(tzinfo=timezone.utc), ), path=Path( show=path_show, width=path_width, material=Material( solidColor=SolidColorMaterial(color=Color.from_list(path_color)) ) if path_color is not None else Material( solidColor=SolidColorMaterial(color=Color.from_list([255, 255, 0])) ), resolution=120, ), label=Label( text=label_text, font=label_font if label_font is not None else "11pt Lucida Console", show=label_show, fillColor=Color(rgba=label_fill_color) if label_fill_color is not None else Color(rgba=[255, 255, 0, 255]), outlineColor=Color(rgba=label_outline_color) if label_outline_color is not None else Color(rgba=[255, 255, 0, 255]), ), billboard=Billboard(image=PIC_SATELLITE, show=True), ) self.packets.append(pckt) if groundtrack_show: raise NotImplementedError( "Ground tracking for trajectory not implemented yet" ) self.i += 1 def get_document(self): return Document(self.packets)
#!/usr/bin/env python from datetime import datetime import json import os import time import endpoints from protorpc import messages from protorpc import message_types from protorpc import remote from google.appengine.api import urlfetch from google.appengine.ext import ndb from models import Profile from models import ProfileMiniForm from models import ProfileForm from models import TeeShirtSize from models import Conference from models import ConferenceForm from models import Session from models import Speaker from settings import WEB_CLIENT_ID from utils import getUserId from models import ConferenceForms from models import ConferenceQueryForm from models import ConferenceQueryForms from models import BooleanMessage from models import ConflictException from google.appengine.api import memcache from google.appengine.api import taskqueue from models import StringMessage from models import SessionForm from models import SpeakerForm from models import SpeakerForms from models import SessionForms from models import QueryForm from models import QueryForms from models import ConferenceSessionQueryForm from models import ConferenceSessionTypeSessionQueryForm from models import SpeakerSessionQueryForm from models import SessionStartTimeQueryForm from models import ConferenceSessionTypeStartTimeQueryForm from models import SessionStartTimeDurationQueryForm from models import SessionMinStartTimeDurationHighlightsQueryForm """ conference.py -- Udacity conference server-side Python App Engine API; uses Google Cloud Endpoints, Extended the provided code and added new methods/Endpoints $Id: conference.py,v 1 2016/01/03 Zeeshan Ahamd Author: Zeeshan Ahmad Email: ahmad.zeeshaan@gmail.com """ __author__ = 'ahmad.zeeshan@gmail.com (Zeeshan Ahmad)' CONF_GET_REQUEST = endpoints.ResourceContainer( message_types.VoidMessage, websafeConferenceKey=messages.StringField(1), ) SESSION_GET_REQUEST = endpoints.ResourceContainer( message_types.VoidMessage, websafeSessionKey=messages.StringField(1), ) SPEAKER_GET_REQUEST = endpoints.ResourceContainer( message_types.VoidMessage, websafeSpeakerKey=messages.StringField(1), ) EMAIL_SCOPE = endpoints.EMAIL_SCOPE API_EXPLORER_CLIENT_ID = endpoints.API_EXPLORER_CLIENT_ID DEFAULTS = { "city": "Default City", "maxAttendees": 0, "seatsAvailable": 0, "topics": ["Default", "Topic"], } OPERATORS = { 'EQ': '=', 'GT': '>', 'GTEQ': '>=', 'LT': '<', 'LTEQ': '<=', 'NE': '!=' } FIELDS = { 'CITY': 'city', 'TOPIC': 'topics', 'MONTH': 'month', 'MAX_ATTENDEES': 'maxAttendees', 'NAME': 'name', 'INTERESTS': 'interests', 'ORGANIZATION': 'organization', } MEMCACHE_ANNOUNCEMENTS_KEY = 'MEMCACHE_ANNOUNCEMENTS_KEY' MEMCACHE_FEATURED_SPEAKER_KEY = 'MEMCACHE_FEATURED_SPEAKER_KEY' # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @endpoints.api(name='conference', version='v1', allowed_client_ids=[WEB_CLIENT_ID, API_EXPLORER_CLIENT_ID], scopes=[EMAIL_SCOPE]) class ConferenceApi(remote.Service): """Conference API v0.1""" # - - - Profile objects - - - - - - - - - - - - - - - - - - - def _copyProfileToForm(self, prof): """Copy relevant fields from Profile to ProfileForm.""" # copy relevant fields from Profile to ProfileForm pf = ProfileForm() for field in pf.all_fields(): if hasattr(prof, field.name): # convert t-shirt string to Enum; just copy others if field.name == 'teeShirtSize': setattr(pf, field.name, getattr(TeeShirtSize, getattr(prof, field.name))) else: setattr(pf, field.name, getattr(prof, field.name)) pf.check_initialized() return pf def _getProfileFromUser(self): """Return user Profile from datastore, creating new one if non-existent.""" # TODO 2 # step 1: make sure user is authed # uncomment the following lines: user = endpoints.get_current_user() if not user: raise endpoints.UnauthorizedException('Authorization required') p_key = ndb.Key(Profile, getUserId(user)) profile = None # step 2: create a new Profile from logged in user data # you can use user.nickname() to get displayName # and user.email() to get mainEmail profile = p_key.get() if not profile: profile = Profile( key=p_key, displayName=user.nickname(), mainEmail=user.email(), teeShirtSize=str(TeeShirtSize.NOT_SPECIFIED), ) profile.put() return profile # return Profile def _doProfile(self, save_request=None): """Get user Profile and return to user, possibly updating it first.""" # get user Profile prof = self._getProfileFromUser() # if saveProfile(), process user-modifyable fields if save_request: for field in ('displayName', 'teeShirtSize'): if hasattr(save_request, field): val = getattr(save_request, field) if val: setattr(prof, field, str(val)) prof.put() # return ProfileForm return self._copyProfileToForm(prof) @endpoints.method(message_types.VoidMessage, ProfileForm, path='profile', http_method='GET', name='getProfile') def getProfile(self, request): """Return user profile.""" return self._doProfile() # TODO 1 # 1. change request class # 2. pass request to _doProfile function @endpoints.method(ProfileMiniForm, ProfileForm, path='profile', http_method='POST', name='saveProfile') def saveProfile(self, request): """Update & return user profile.""" return self._doProfile(request) # - - - Conference objects - - - - - - - - - - - - - - - - - def _copyConferenceToForm(self, conf, displayName): """Copy relevant fields from Conference to ConferenceForm.""" cf = ConferenceForm() for field in cf.all_fields(): if hasattr(conf, field.name): # convert Date to date string; just copy others if field.name.endswith('Date'): setattr(cf, field.name, str(getattr(conf, field.name))) else: setattr(cf, field.name, getattr(conf, field.name)) elif field.name == "websafeKey": setattr(cf, field.name, conf.key.urlsafe()) if displayName: setattr(cf, 'organizerDisplayName', displayName) cf.check_initialized() return cf def _createConferenceObject(self, request): """Create or update Conference object, returning ConferenceForm/request.""" # preload necessary data items user = endpoints.get_current_user() if not user: raise endpoints.UnauthorizedException('Authorization required') user_id = getUserId(user) if not request.name: raise endpoints.BadRequestException("Conference 'name'\ field required") # copy ConferenceForm/ProtoRPC Message into dict data = {field.name: getattr(request, field.name) for field in request.all_fields()} del data['websafeKey'] del data['organizerDisplayName'] # add default values for those missing (both data model & # outbound Message) for df in DEFAULTS: if data[df] in (None, []): data[df] = DEFAULTS[df] setattr(request, df, DEFAULTS[df]) # convert dates from strings to Date objects; set month # based on start_date if data['startDate']: data['startDate'] = datetime.strptime(data['startDate'][:10], "%Y-%m-%d").date() data['month'] = data['startDate'].month else: data['month'] = 0 if data['endDate']: data['endDate'] = datetime.strptime(data['endDate'][:10], "%Y-%m-%d").date() # set seatsAvailable to be same as maxAttendees on creation # both for data model & outbound Message if data["maxAttendees"] > 0: data["seatsAvailable"] = data["maxAttendees"] setattr(request, "seatsAvailable", data["maxAttendees"]) # make Profile Key from user ID p_key = ndb.Key(Profile, user_id) # allocate new Conference ID with Profile key as parent c_id = Conference.allocate_ids(size=1, parent=p_key)[0] # make Conference key from ID c_key = ndb.Key(Conference, c_id, parent=p_key) data['key'] = c_key data['organizerUserId'] = request.organizerUserId = user_id # create Conference & return (modified) ConferenceForm Conference(**data).put() taskqueue.add(params={'email': user.email(), 'conferenceInfo': repr(request)}, url='/tasks/send_confirmation_email' ) return request @endpoints.method(ConferenceForm, ConferenceForm, path='conference', http_method='POST', name='createConference') def createConference(self, request): """Create new conference.""" return self._createConferenceObject(request) @endpoints.method(ConferenceQueryForms, ConferenceForms, path='queryConferences', http_method='POST', name='queryConferences') def queryConferences(self, request): """Query for conferences.""" conferences = self._getQuery(request) # return individual ConferenceForm object per Conference return ConferenceForms( items=[self._copyConferenceToForm(conf, "") for conf in conferences] ) @endpoints.method(message_types.VoidMessage, ConferenceForms, path='getConferencesCreated', http_method='POST', name='getConferencesCreated') def getConferencesCreated(self, request): """Return conferences created by user.""" # make sure user is authed user = endpoints.get_current_user() if not user: raise endpoints.UnauthorizedException('Authorization required') # make profile key p_key = ndb.Key(Profile, getUserId(user)) # create ancestor query for this user conferences = Conference.query(ancestor=p_key) # get the user profile and display name prof = p_key.get() displayName = getattr(prof, 'displayName') # return set of ConferenceForm objects per Conference return ConferenceForms( items=[self._copyConferenceToForm(conf, displayName) for conf in conferences] ) # registers API @endpoints.method(message_types.VoidMessage, ConferenceForms, path='filterPlayground', http_method='POST', name='filterPlayground') def filterPlayground(self, request): user = endpoints.get_current_user() if not user: raise endpoints.UnauthorizedException('Authorization required') q = Conference.query() # simple filter usage: # q = q.filter(Conference.city == "Paris") q = q.filter(Conference.city == "London") q = q.filter(Conference.topics == "Medical Innovations") q = q.order(Conference.name) q = q.filter(Conference.month == 12) return ConferenceForms( items=[self._copyConferenceToForm(conf, "") for conf in q] ) def _getQuery(self, request): """Return formatted query from the submitted filters.""" q = Conference.query() inequality_filter, filters = self._formatFilters(request.filters) # If exists, sort on inequality filter first if not inequality_filter: q = q.order(Conference.name) else: q = q.order(ndb.GenericProperty(inequality_filter)) q = q.order(Conference.name) for filtr in filters: if filtr["field"] in ["month", "maxAttendees"]: filtr["value"] = int(filtr["value"]) formatted_query = ndb.query.FilterNode( filtr["field"], filtr["operator"], filtr["value"]) q = q.filter(formatted_query) return q def _formatFilters(self, filters): """Parse, check validity and format user supplied filters.""" formatted_filters = [] inequality_field = None for f in filters: filtr = {field.name: getattr(f, field.name) for field in f.all_fields()} try: filtr["field"] = FIELDS[filtr["field"]] filtr["operator"] = OPERATORS[filtr["operator"]] except KeyError: raise endpoints.BadRequestException("Filter contains \ invalid field or operator.") # Every operation except "=" is an inequality if filtr["operator"] != "=": # check if inequality operation has been used in previous # filters disallow the filter if inequality was performed # on a different field before # track the field on which the inequality operation # is performed if inequality_field and inequality_field != filtr["field"]: raise endpoints.BadRequestException( "Inequality filter is allowed on only one field.") else: inequality_field = filtr["field"] formatted_filters.append(filtr) return (inequality_field, formatted_filters) @ndb.transactional(xg=True) def _conferenceRegistration(self, request, reg=True): """Register or unregister user for selected conference.""" retval = None prof = self._getProfileFromUser() # get user Profile # check if conf exists given websafeConfKey # get conference; check that it exists wsck = request.websafeConferenceKey conf = ndb.Key(urlsafe=wsck).get() if not conf: raise endpoints.NotFoundException( 'No conference found with key: %s' % wsck) # register if reg: # check if user already registered otherwise add if wsck in prof.conferenceKeysToAttend: raise ConflictException( "You have already registered for this conference") # check if seats avail if conf.seatsAvailable <= 0: raise ConflictException( "There are no seats available.") # register user, take away one seat prof.conferenceKeysToAttend.append(wsck) conf.seatsAvailable -= 1 retval = True # unregister else: # check if user already registered if wsck in prof.conferenceKeysToAttend: # unregister user, add back one seat prof.conferenceKeysToAttend.remove(wsck) conf.seatsAvailable += 1 retval = True else: retval = False # write things back to the datastore & return prof.put() conf.put() return BooleanMessage(data=retval) @endpoints.method(CONF_GET_REQUEST, ConferenceForm, path='conference/{websafeConferenceKey}', http_method='GET', name='getConference') def getConference(self, request): """Return requested conference (by websafeConferenceKey).""" # get Conference object from request; bail if not found conf = ndb.Key(urlsafe=request.websafeConferenceKey).get() if not conf: raise endpoints.NotFoundException( 'No conference found with key: %s' % request.websafeConferenceKey) prof = conf.key.parent().get() # return ConferenceForm return self._copyConferenceToForm(conf, getattr(prof, 'displayName')) @endpoints.method(CONF_GET_REQUEST, BooleanMessage, path='conference/register/{websafeConferenceKey}', http_method='POST', name='registerForConference') def registerForConference(self, request): """Register user for selected conference.""" return self._conferenceRegistration(request) @endpoints.method(CONF_GET_REQUEST, BooleanMessage, path='conference/unregister/{websafeConferenceKey}', http_method='POST', name='unregisterFromConference') def unregisterFromConference(self, request): """Unregister user for selected conference.""" return self._conferenceRegistration(request, False) # endpoint for getting all the conferences for which user has registered @endpoints.method(message_types.VoidMessage, ConferenceForms, path='conferences/attending', http_method='GET', name='getConferencesToAttend') def getConferencesToAttend(self, request): """Get list of conferences that user has registered for.""" # TODO: user = endpoints.get_current_user() if not user: raise endpoints.UnauthorizedException('Authorization required') # step 1: get user profile # make profile key prof = self._getProfileFromUser() # step 2: get conferenceKeysToAttend from profile. conf_keys = [ndb.Key(urlsafe=wsck) for wsck in prof.conferenceKeysToAttend] conferences = ndb.get_multi(conf_keys) # Do not fetch them one by one! # return set of ConferenceForm objects per Conference return ConferenceForms(items=[self._copyConferenceToForm(conf, "") for conf in conferences] ) # adds the announcement to memcache if the available seats are less than or # equal to 5 @staticmethod def _cacheAnnouncement(): """Create Announcement & assign to memcache; used by memcache cron job & putAnnouncement(). """ confs = Conference.query(ndb.AND( Conference.seatsAvailable <= 5, Conference.seatsAvailable > 0) ).fetch(projection=[Conference.name]) if confs: # If there are almost sold out conferences, # format announcement and set it in memcache announcement = '%s %s' % ( 'Last chance to attend! The following conferences ' 'are nearly sold out:', ', '.join(conf.name for conf in confs)) memcache.set(MEMCACHE_ANNOUNCEMENTS_KEY, announcement) else: # If there are no sold out conferences, # delete the memcache announcements entry announcement = "" memcache.delete(MEMCACHE_ANNOUNCEMENTS_KEY) return announcement # Gets announcement from memcache @endpoints.method(message_types.VoidMessage, StringMessage, path='conference/announcement/get', http_method='GET', name='getAnnouncement') def getAnnouncement(self, request): """Return Announcement from memcache.""" # TODO 1 # return an existing announcement from Memcache or an empty string. announcement = memcache.get(MEMCACHE_ANNOUNCEMENTS_KEY) return StringMessage(data=announcement) # ---------------- Session Objects ---------------------- def _copySessionToForm(self, session): """ Input: session: Session object Returns: SessionForm Description: This method takes a session object with values defined from client side and returns the SessionFrom after copying the relevant fields in it which can be returned to client as ProRPC Message. """ sessionform = SessionForm() for field in sessionform.all_fields(): if hasattr(session, field.name): # convert Date to date string; just copy others if field.name.endswith('date'): setattr(sessionform, field.name, str(getattr(session, field.name))) else: setattr(sessionform, field.name, getattr(session, field.name)) # Checks if the field is websafeSessionKey, then converts it into # urlsafe key elif field.name == "websafeSessionKey": setattr(sessionform, field.name, session.key.urlsafe()) sessionform.check_initialized() return sessionform def _createSessionObject(self, request): """ Input: request: Takes a request object to parse the data recieved from client Returns: Returns the SessionForm object Description: Retrieves information from request object and uses the information to add new session. uses the websafeConferenceKey passed from client to to add this session as child entity of that conference. After adding the session, it adds a message to memcache which states featured speaker and his/her session names if the speaker appears in more than one sessions """ # preload necessary data items user = endpoints.get_current_user() if not user: raise endpoints.UnauthorizedException('Authorization required') user_id = getUserId(user) if not request.name: raise endpoints.BadRequestException("Session 'name' \ field required") # # conferences = Conference.query(ancestor=p_key) # conferences = conferences.filter() conf_key = ndb.Key(urlsafe=request.websafeConferenceKey) conf = conf_key.get() if not conf: raise endpoints.NotFoundException( 'No conference found with key: %s' % request.websafeConferenceKey) # Get logged in user's profile key p_key = ndb.Key(Profile, user_id) # Get conference's parent profile prof = conf.key.parent().get() # convert to urlsafe usafep1 = prof.key.urlsafe() usafep2 = p_key.urlsafe() # Check if user is the one who added the conference. Otherwise throw # unauthorized exception because only those users who added conferences # can add sessions to those conferences if usafep1 != usafep2: raise endpoints.UnauthorizedException('User is not authorized to '\ + 'add new session to this conference as he/she is not the '\ + 'creator of this conference.') speaker_key = ndb.Key(urlsafe=request.websafeSpeakerKey) speaker = speaker_key.get() if not speaker: raise endpoints.NotFoundException( 'No speaker found with key: %s' % request.websafeSpeakerKey) data = {field.name: getattr(request, field.name) for field in request.all_fields()} del data['websafeSessionKey'] del data['websafeConferenceKey'] if data['date']: data['date'] = datetime.strptime(data['date'][:10], "%Y-%m-%d").date() s_id = Session.allocate_ids(size=1, parent=conf_key)[0] s_key = ndb.Key(Session, s_id, parent=conf_key) data['key'] = s_key # create Conference & return (modified) ConferenceForm Session(**data).put() taskqueue.add(params={'email': user.email(), 'sessionInfo': repr(request)}, url='/tasks/send_session_confirmation_email' ) taskqueue.add(params={'websafeConferenceKey': \ request.websafeConferenceKey, 'websafeSpeakerKey': request.websafeSpeakerKey, 'speaker': speaker.name}, url='/tasks/set_featured_speaker' ) # Return data as SessionForm. Cannot use self._copySessionToForm as # that method implementation looks for session object instead data dict # and changing the implementation of that method affects the query # endpoints sessionform = SessionForm() for field in sessionform.all_fields(): if data.has_key(field.name): # convert Date to date string; just copy others if field.name.endswith('date'): setattr(sessionform, field.name, str(data[field.name])) else: setattr(sessionform, field.name, data[field.name]) # Checks if the field is websafeSessionKey, then converts it into # urlsafe key elif field.name == "websafeSessionKey": setattr(sessionform, field.name, data['key'].urlsafe()) sessionform.check_initialized() return sessionform @staticmethod def _setFeaturedSpeaker(self, websafeConferenceKey, websafeSpeakerKey, speaker): """ Input: websafeConferenceKey, websafeSpeakerKey, speaker Returns: Doesn't return anything Description: this method checks if the speaker has more than one sessions, within the same conference and adds a message in memcache mentioning the speaker name as featured speaker and session names he/she is delivering. NOTE: This method is being executed using taskqueue from SetFeaturedSpeakerHandler() in main.py """ # --------- add featured speaker to memcache ----------- conf_key = ndb.Key(urlsafe=websafeConferenceKey) # Gets all the sessions for current Conference sessions = Session.query(ancestor=conf_key) # Filters the returned sessions based on speaker sessions = sessions.filter(Session.websafeSpeakerKey == websafeSpeakerKey) sessions = sessions.fetch() # Checks if the more than one sessions were returned for current # speaker if len(sessions) > 1: featuredSpeakerMessage = speaker + " is featured speaker " + \ "and he will be delivering talk in following sessions. " sessionsCSV = "" # building a comma separated list of session names where featured # speaker is speaking for session in sessions: sessionsCSV += session.name + ", " featuredSpeakerMessage = featuredSpeakerMessage + \ sessionsCSV[:-2] + "." memcache.set(MEMCACHE_FEATURED_SPEAKER_KEY, featuredSpeakerMessage) def _getFeaturedSpeaker(self): """ It retrieves the featured speaker and the session names from memcache """ return memcache.get(MEMCACHE_FEATURED_SPEAKER_KEY) @endpoints.method(message_types.VoidMessage, StringMessage, path='getfeaturedspeaker', http_method='GET', name='getFeaturedSpeaker') def getFeaturedSpeaker(self, request): """ getFeaturedSpeaker endpoint recieves the calls from client to get the featured speaker information from memcache Input: It doesn't require any input parameters Returns: String message about featured speaker and his/her sessions """ featuredSpeaker = memcache.get(MEMCACHE_FEATURED_SPEAKER_KEY) return StringMessage(data=featuredSpeaker or "No Featured Speaker") def _getConferenceSessions(self, request): """ Description: Retrieves all the sessions in a conference and returns as SessionForms object """ conf_key = ndb.Key(urlsafe=request.websafeConferenceKey) sessions = Session.query(ancestor=conf_key) return SessionForms( sessions=[self._copySessionToForm(session) for session in sessions] ) def _getConferenceSessionsByType(self, request): """ Input: request contains typeOfSession: parameter for filtering based on type websafeConferenceKey: parameter for filitering conference on key Retrieves all the sessions in a conference filtered by type and Returns as SessionForms object """ conf_key = ndb.Key(urlsafe=request.websafeConferenceKey) # First filter the sessions on parent conference based # websafeConferenceKey provided by client sessions = Session.query(ancestor=conf_key) # Filter resulting sessions by typeOfSession sessions = sessions.filter(Session.typeOfSession == request.typeOfSession) return SessionForms( sessions=[self._copySessionToForm(session) for session in sessions] ) def _getConferenceSessionsBySpeaker(self, request): """ Input: websafeSpeakerKey is passed as parameter for filtering Retrieves all the sessions filtered by speaker Returns as SessionForms object """ sessions = Session.query() # Filters based on websafeSpeakerKey sessions = sessions.filter(Session.websafeSpeakerKey == request.websafeSpeakerKey) return SessionForms( sessions=[self._copySessionToForm(session) for session in sessions] ) @endpoints.method(SessionForm, SessionForm, path='session', http_method='POST', name='createSession') def createSession(self, request): """ Creates a new session using conference as parent entity and speaker's websafeKey as attribute along with other attributes like name, startTime, duration, highilghts etc. Input: name, duration, highlights, startTime, date Returns: SessionForm object NOTE: startTime takes integer value in military hours notation e.g. 1705 """ return self._createSessionObject(request) @endpoints.method(ConferenceSessionQueryForm, SessionForms, path='session/conference', http_method='POST', name='queryConferenceSessions') def getConferenceSessions(self, request): """ getConferenceSessions endpoint: calls _getConferenceSessions method, websafeConferenceKey is passed from the client as ConferenceSessionQueryForm """ return self._getConferenceSessions(request) @endpoints.method(ConferenceSessionTypeSessionQueryForm, SessionForms, path='session/conference/sessionType', http_method='POST', name='getConferenceSessionsByType') def getConferenceSessionsByType(self, request): """ getConferenceSessionsByType endpoint: Retrieves sessions based on type from a conference websafeConferenceKey and typeOfSession is passed from the client """ return self._getConferenceSessionsByType(request) @endpoints.method(SpeakerSessionQueryForm, SessionForms, path='session/speaker', http_method='POST', name='getSessionsBySpeaker') def getSessionsBySpeaker(self, request): """ getSessionsBySpeaker endpoint: Retrieves sessions based on speaker websafeSpeakerKey is passed from the client """ return self._getConferenceSessionsBySpeaker(request) # ---------------- Additional Queries ------------ # @endpoints.method(SessionStartTimeDurationQueryForm, SessionForms, path='session/starttime/duration', http_method='POST', name='getSessionsByStartTimeAndDuration') def getSessionsByStartTimeAndDuration(self, request): """ getSessionsByStartTimeAndDuration endpoint: Gets the sessions filtered by startTime and duration. Checks if the session starts at or after the time provided by client and has required duration startTime and duration are passed from the client returns SessionForms object containing resulting sessions """ return self._getSessionsByStartTimeAndDuration(request) def _getSessionsByStartTimeAndDuration(self, request): """ Queries the sessions and filters based on the startTime and duration provided by client. Checks for the sessions which start at or after the time provided by client and have requested duration. Returns SessionForms object with resulting sessions """ sessions = Session.query() # Filtering with "greater than or equal to" inequality sessions = sessions.filter(Session.startTime >= request.startTime) # Filtering further based on matching duration sessions = sessions.filter(Session.duration == request.duration) return SessionForms( sessions=[self._copySessionToForm(session) for session in sessions] ) @endpoints.method(SessionMinStartTimeDurationHighlightsQueryForm, SessionForms, path='session/minstarttime/duration/highlights', http_method='POST', name='getSessionsByMinStartTimeDurationHighlights') def getSessionsByMinStartTimeDurationHighlights(self, request): """find sessions with min start time, duration and matching highlights.""" return self._getSessionsByMinStartTimeDurationHighlights(request) def _getSessionsByMinStartTimeDurationHighlights(self, request): """ Queries the sessions and filters based on the startTime, duration and highlights provided by client. Checks for the sessions which start at or after the time provided by client, have requested duration and highlights. Returns SessionForms object with resulting sessions """ sessions = Session.query() sessions = sessions.filter(Session.startTime >= request.startTime) sessions = sessions.filter(Session.duration == request.duration) sessions = sessions.filter(Session.highlights == request.highlights) return SessionForms( sessions=[self._copySessionToForm(session) for session in sessions] ) @endpoints.method(SessionStartTimeQueryForm, SessionForms, path='session/starttime', http_method='POST', name='getSessionsByStartTime') def getSessionsByStartTime(self, request): """ getSessionsByStartTime endpoint: Retrieves sessions based on startTime startTime is passed from the client """ return self._getSessionsByStartTime(request) def _getSessionsByStartTime(self, request): """ Queries the sessions and filters based on the startTime provided by client. Returns SessionForms object with resulting sessions """ sessions = Session.query() sessions = sessions.filter(Session.startTime == request.startTime) return SessionForms( sessions=[self._copySessionToForm(session) for session in sessions] ) @endpoints.method(message_types.VoidMessage, SpeakerForm, path='session/speakerwithmostsessions', http_method='POST', name='getSpeakerWithHighestNumberOfSessions') def getSpeakerWithHighestNumberOfSessions(self, request): """ getSpeakerWithHighestNumberOfSessions endpoint: Gets all the sessions and their speakers websafe keys. """ return self._getSpeakerWithHighestNumberOfSessions(request) def _getSpeakerWithHighestNumberOfSessions(self, request): """ _getSpeakerWithHighestNumberOfSessions: gets all the sessions and their speakers websafe keys. Uses max() method to determine the highest occuring websafeSpeakerKey in resulting speaker keys and gets that speaker's information. Hence, the speaker with highest number of sessions """ sessions = Session.query() websafeSpeakerKeys = [session.websafeSpeakerKey for session in sessions] # Checks for highest count of websafeSpeakerKeys and retrieves that # speaker key speaker_key = ndb.Key(urlsafe=max(set(websafeSpeakerKeys), key=websafeSpeakerKeys.count)) speaker = speaker_key.get() return self._copySpeakerToForm(speaker=speaker) @endpoints.method(ConferenceSessionTypeStartTimeQueryForm, SessionForms, path='session/bytype/bystarttime', http_method='POST', name='querySessionByTypeAndStartTime') def querySessionByTypeAndStartTime(self, request): """ querySessionByTypeAndStartTime solves the multiple inequality problem by first retrieving the sessions filtered by typeOfSession requested by client and then iterates through the results to check startTime which is lesser than the provided startTime Returns the resulting sessions in SessionForms """ sessions = Session.query() # Filters session on typeOfSession sessions = sessions.filter(Session.typeOfSession != request.typeOfSession) # Checks if each session has startTime lower than the provided # startTime sessionsBeforeTime = [session for session in sessions if session.startTime < request.startTime] # return individual SessionForm object per Session return SessionForms( sessions=[self._copySessionToForm(session) for session in sessionsBeforeTime] ) # ---------------- Speaker Objects ------------ # def _createSpeakerObject(self, request): """ Creates a Speaker entity in datastore based on the information provided by client. Sends a confirmation email after adding the new Speaker """ # preload necessary data items user = endpoints.get_current_user() if not user: raise endpoints.UnauthorizedException('Authorization required') user_id = getUserId(user) if not request.name: raise endpoints.BadRequestException("Speaker 'name' \ field required") data = {field.name: getattr(request, field.name) for field in request.all_fields()} del data['websafeSpeakerKey'] # create Speaker & return (modified) SpeakerForm speaker_key = Speaker(**data).put() taskqueue.add(params={'email': user.email(), 'speakerInfo': repr(request)}, url='/tasks/send_speaker_confirmation_email' ) # Return data as SpeakerForm speakerform = SpeakerForm() for field in speakerform.all_fields(): if data.has_key(field.name): setattr(speakerform, field.name, data[field.name]) # Checks if the field is websafeSpeakerKey, then converts it into # urlsafe key elif field.name == "websafeSpeakerKey": setattr(speakerform, field.name, speaker_key.urlsafe()) speakerform.check_initialized() return speakerform def _copySpeakerToForm(self, speaker): """Copy relevant fields from Speaker to SpeakerForm.""" speakerform = SpeakerForm() for field in speakerform.all_fields(): if hasattr(speaker, field.name): setattr(speakerform, field.name, getattr(speaker, field.name)) elif field.name == "websafeSpeakerKey": setattr(speakerform, field.name, speaker.key.urlsafe()) speakerform.check_initialized() return speakerform def _getSpeakers(self, request): """ Returns all the speakers if no criteria is provided. It can take generic filters based on client provided field, operator, value format and return results based on that. Same as conferences filters """ q = Speaker.query() inequality_filter, filters = self._formatFilters(request.filters) # If exists, sort on inequality filter first if not inequality_filter: q = q.order(Speaker.name) else: q = q.order(ndb.GenericProperty(inequality_filter)) q = q.order(Speaker.name) for filtr in filters: formatted_query = ndb.query.FilterNode( filtr["field"], filtr["operator"], filtr["value"]) q = q.filter(formatted_query) return q # endpoint for Creating Speaker @endpoints.method(SpeakerForm, SpeakerForm, path='speaker', http_method='POST', name='createSpeaker') def createSpeaker(self, request): """ Creates a new speaker Input: name, list of interests, organization Returns: name, list of interests, organization, websafeSpeakerKey """ return self._createSpeakerObject(request) # endpoint for querying speakers @endpoints.method(QueryForms, SpeakerForms, path='querySpeakers', http_method='POST', name='querySpeakers') def querySpeakers(self, request): """ Queries Speakers, takes generic filters Input: Field, Operator, Value """ speakers = self._getSpeakers(request) # return individual SpeakerForm object return SpeakerForms( speakers=[self._copySpeakerToForm(speaker) for speaker in speakers] ) @ndb.transactional(xg=True) def _updateSessionWishlist(self, request, reg=True): """ It updates the wishlist attribute of the profile entity of user. Stores the session keys It adds the session in wishlist if the reg parameter is true, otherwise removes the session from wishlist This method is transactional so that in case of any failure, the partial changes are reverted """ retval = None prof = self._getProfileFromUser() # get user Profile # check if session exists given websafeSessionKey # get session; check that it exists wssk = request.websafeSessionKey session_key = ndb.Key(urlsafe=wssk) # Assure the websafe key is only for Session and raise exception if # key is provided for non Session kind if session_key.kind() != "Session": raise endpoints.NotFoundException( 'wrong websafeSessionKey provided') session = session_key.get() if not session: raise endpoints.NotFoundException( 'No conference found with key: %s' % wssk) # add session to wishlist if reg: # check if user already has the session in wishlist, otherwise add if session_key in prof.sessionsWishList: raise ConflictException( "This session is already in the wishlist") # register user, take away one seat prof.sessionsWishList.append(session_key) retval = True # remove session from wishlist else: # check if session is already in wishlist if session_key in prof.sessionsWishList: # remove session from wishlist prof.sessionsWishList.remove(session_key) retval = True else: retval = False # write things back to the datastore & return prof.put() return BooleanMessage(data=retval) @endpoints.method(SESSION_GET_REQUEST, BooleanMessage, path='session/addtowishlist/{websafeSessionKey}', http_method='POST', name='addSessionToWishlist') def addSessionToWishlist(self, request): """ endpoint for adding session to wishlist Input: Takes websafeSessionKey in querystring parameters Returns: True/False based on operation completion """ return self._updateSessionWishlist(request) @endpoints.method(SESSION_GET_REQUEST, BooleanMessage, path='session/deletefromwishlist/{websafeSessionKey}', http_method='DELETE', name='deleteSessionInWishlist') def deleteSessionInWishlist(self, request): """ endpoint for deleting session from wishlist Input: Takes websafeSessionKey in querystring parameters Returns: True/False based on operation completion """ return self._updateSessionWishlist(request, False) @endpoints.method(message_types.VoidMessage, SessionForms, path='session/wishlist', http_method='GET', name='getSessionsInWishlist') def getSessionsInWishlist(self, request): """ Get list of sessions that user has added to wishlist """ user = endpoints.get_current_user() if not user: raise endpoints.UnauthorizedException('Authorization required') # get user profile & make profile key prof = self._getProfileFromUser() # get sessionsWishList from profile. sessions = ndb.get_multi(prof.sessionsWishList) # return set of SessionForm objects per Session return SessionForms(sessions=[self._copySessionToForm(session) for session in sessions]) api = endpoints.api_server([ConferenceApi])
import requests import yaml import datetime, time import os import argparse parser = argparse.ArgumentParser(description='wavelo usage data collector') parser.add_argument('-o', '--output_folder', nargs=1, default=['.'], type=str, help='folder in which outpud data are saved', dest='path_to_output_dir') args = parser.parse_args() path_to_output_dir = args.path_to_output_dir[0] if not os.path.exists(path_to_output_dir): raise SystemExit('Path %s does not exist.' %(path_to_output_dir)) if not os.path.isdir(path_to_output_dir): raise SystemExit('Path %s is not a directory.' %(path_to_output_dir)) date = curr_time = datetime.datetime.now().strftime('%Y-%m-%d') data_file = 'wavelo_data-%s.yaml'%(date) data_file_summary = 'wavelo_data_summary-%s.yaml'%(date) data_file_current = 'wavelo_data_current.yaml' all_bikes_data = 'bike_ids.yaml' known_hubs_data = 'hubs.yaml' network_id = 105 #Wavelo network id server = 'https://app.socialbicycles.com/api/' hubs_endpoint = 'networks/%d/hubs.json?per_page=300'%(network_id) bikes_endpoint = 'networks/%d/bikes.json'%(network_id) bike_endpoint = 'bikes/%d' #%d for bike_id user = os.environ['SOCIALB_USER'] password = os.environ['SOCIALB_PASSWORD'] curr_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') timestamp = time.time() hubs_data = {} bikes_data = {} data_summary = { curr_time : { 'timestamp' : timestamp, } } if os.path.exists(os.path.join(path_to_output_dir + '/split_data/', known_hubs_data)): with open(os.path.join(path_to_output_dir + '/split_data/', known_hubs_data), 'r') as infile: known_hubs = yaml.load(infile)['hubs'] else: known_hubs = {} #Hubs data all_available_bikes_hubs = 0 all_current_bikes_hubs = 0 r = requests.get(server + hubs_endpoint, auth=(user, password)) hubs = r.json()['items'] for hub in hubs: keys = ['id', 'name', 'available_bikes', 'current_bikes', 'free_racks'] hub_data = { key: hub[key] for key in keys } hub_id = hub['id'] if not hub_id in known_hubs: keys = ['id', 'name', 'racks_amount', 'description', 'has_kiosk', 'address', 'sponsored', 'polygon', 'middle_point'] known_hubs[hub_id] = { key: hub[key] for key in keys } all_available_bikes_hubs += hub['available_bikes'] all_current_bikes_hubs += hub['current_bikes'] hubs_data[hub_data['id']] = hub_data data_summary[curr_time]['all_available_bikes_hubs'] = all_available_bikes_hubs data_summary[curr_time]['all_current_bikes_hubs'] = all_current_bikes_hubs #Bikes data all_state_not_available = 0 all_repair_state_not_working = 0 all_not_in_hub = 0 all_outside_area = 0 all_rented_bikes = 0 all_unavailable_bikes = 0 r = requests.get(server + bikes_endpoint + '?per_page=%d'%(1), auth=(user, password)) all_available_bikes = r.json()['total_entries'] r = requests.get(server + bikes_endpoint+ '?per_page=%d'%(all_available_bikes), auth=(user, password)) bikes = r.json()['items'] with open(os.path.join(path_to_output_dir + '/split_data/', all_bikes_data), 'r') as infile: all_bike_ids = yaml.load(infile)['bike_ids'] print(all_available_bikes) print(len(bikes)) all_bikes_in_system = {} for bike_id in all_bike_ids: all_bikes_in_system[bike_id] = bike_id unavailable_bikes = all_bikes_in_system.copy() new_bikes = [] test_bikes = {} all_new_bikes = 0 all_test_bikes = 0 for bike in bikes: keys = ['id', 'name', 'hub_id', 'distance', 'inside_area'] bike_data = { key: bike[key] for key in keys } unavailable_bikes.pop(bike['id'], None) if 'state' in bike_data: bike['state'] = bike_data['state'] if 'repair_state' in bike_data: bike['repair_state'] = bike_data['repair_state'] if not bike['id'] in all_bikes_in_system: if bike['name'].startswith('_'): bike_data['current_position'] = bike['current_position'] test_bikes[bike['id']] = bike_data all_test_bikes += 1 continue else: new_bikes.append(bike['id']) all_bike_ids.append(bike['id']) all_new_bikes += 1 if bike['hub_id'] == None: all_not_in_hub += 1 bike_data['current_position'] = bike['current_position'] if bike['inside_area'] == False: all_outside_area += 1 bikes_data[bike_data['id']] = bike_data rented_bikes_data = {} unavailable_bikes_data = {} broken_bikes_data = {} # State: available (?), rented, hold, booked # Repair_state: working, soft_broken for bike in unavailable_bikes: r = requests.get(server + bike_endpoint%(bike), auth=(user, password)) bike_d = r.json() keys = ['id', 'name', 'hub_id', 'distance', 'inside_area', 'current_position'] bike_data = { key: bike_d[key] for key in keys } if 'state' in bike_d: bike_data['state'] = bike_d['state'] if 'repair_state' in bike_d: bike_data['repair_state'] = bike_d['repair_state'] if 'repair_state' in bike_data: if bike_data['repair_state'] != 'working': all_repair_state_not_working += 1 broken_bikes_data[bike] = bike_data continue if 'state' in bike_data: if bike_data['state'] != 'available': all_rented_bikes += 1 rented_bikes_data[bike] = bike_data continue all_unavailable_bikes += 1 unavailable_bikes_data[bike] = bike_data data_summary[curr_time]['all_state_not_available'] = all_state_not_available data_summary[curr_time]['all_repair_state_not_working'] = all_repair_state_not_working data_summary[curr_time]['all_not_in_hub'] = all_not_in_hub data_summary[curr_time]['all_available_bikes'] = all_available_bikes data_summary[curr_time]['all_outside_area'] = all_outside_area data_summary[curr_time]['all_rented_bikes'] = all_rented_bikes data_summary[curr_time]['all_new_bikes'] = all_new_bikes data_summary[curr_time]['all_test_bikes'] = all_test_bikes data_summary[curr_time]['all_unavailable_bikes'] = all_unavailable_bikes with open(os.path.join(path_to_output_dir + '/split_data/', data_file_summary), 'a') as outfile: yaml.safe_dump(data_summary, outfile, encoding='utf-8', default_flow_style=False, allow_unicode=True) data_summary[curr_time]['hubs'] = hubs_data data_summary[curr_time]['bikes'] = bikes_data data_summary[curr_time]['rented_bikes'] = rented_bikes_data data_summary[curr_time]['unavailable_bikes'] = unavailable_bikes_data data_summary[curr_time]['new_bikes'] = new_bikes data_summary[curr_time]['test_bikes'] = test_bikes data_summary[curr_time]['broken_bikes'] = broken_bikes_data with open(os.path.join(path_to_output_dir + '/split_data/', data_file), 'a') as outfile: yaml.safe_dump(data_summary, outfile, encoding='utf-8', default_flow_style=False, allow_unicode=True) with open(os.path.join(path_to_output_dir, data_file_current), 'w') as outfile: yaml.safe_dump(data_summary, outfile, encoding='utf-8', default_flow_style=False, allow_unicode=True) with open(os.path.join(path_to_output_dir + '/split_data/', all_bikes_data), 'w') as outfile: yaml.safe_dump({'bike_ids' : all_bike_ids}, outfile, encoding='utf-8', default_flow_style=False, allow_unicode=True) with open(os.path.join(path_to_output_dir + '/split_data/', known_hubs_data), 'w') as outfile: yaml.safe_dump({'hubs' : known_hubs}, outfile, encoding='utf-8', default_flow_style=False, allow_unicode=True)
# Lint as: python2, python3 # Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Python TF-Lite interpreter.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import ctypes import platform import sys import os import numpy as np # pylint: disable=g-import-not-at-top if not os.path.splitext(__file__)[0].endswith( os.path.join('tflite_runtime', 'interpreter')): # This file is part of tensorflow package. from tensorflow.lite.python.interpreter_wrapper import _pywrap_tensorflow_interpreter_wrapper as _interpreter_wrapper from tensorflow.python.util.tf_export import tf_export as _tf_export else: # This file is part of tflite_runtime package. from tflite_runtime import _pywrap_tensorflow_interpreter_wrapper as _interpreter_wrapper def _tf_export(*x, **kwargs): del x, kwargs return lambda x: x try: from tensorflow.lite.python import metrics_portable as metrics except ImportError: from tensorflow.lite.python import metrics_nonportable as metrics # pylint: enable=g-import-not-at-top class Delegate(object): """Python wrapper class to manage TfLiteDelegate objects. The shared library is expected to have two functions: TfLiteDelegate* tflite_plugin_create_delegate( char**, char**, size_t, void (*report_error)(const char *)) void tflite_plugin_destroy_delegate(TfLiteDelegate*) The first one creates a delegate object. It may return NULL to indicate an error (with a suitable error message reported by calling report_error()). The second one destroys delegate object and must be called for every created delegate object. Passing NULL as argument value is allowed, i.e. tflite_plugin_destroy_delegate(tflite_plugin_create_delegate(...)) always works. """ def __init__(self, library, options=None): """Loads delegate from the shared library. Args: library: Shared library name. options: Dictionary of options that are required to load the delegate. All keys and values in the dictionary should be serializable. Consult the documentation of the specific delegate for required and legal options. (default None) Raises: RuntimeError: This is raised if the Python implementation is not CPython. """ # TODO(b/136468453): Remove need for __del__ ordering needs of CPython # by using explicit closes(). See implementation of Interpreter __del__. if platform.python_implementation() != 'CPython': raise RuntimeError('Delegates are currently only supported into CPython' 'due to missing immediate reference counting.') self._library = ctypes.pydll.LoadLibrary(library) self._library.tflite_plugin_create_delegate.argtypes = [ ctypes.POINTER(ctypes.c_char_p), ctypes.POINTER(ctypes.c_char_p), ctypes.c_int, ctypes.CFUNCTYPE(None, ctypes.c_char_p) ] self._library.tflite_plugin_create_delegate.restype = ctypes.c_void_p # Convert the options from a dictionary to lists of char pointers. options = options or {} options_keys = (ctypes.c_char_p * len(options))() options_values = (ctypes.c_char_p * len(options))() for idx, (key, value) in enumerate(options.items()): options_keys[idx] = str(key).encode('utf-8') options_values[idx] = str(value).encode('utf-8') class ErrorMessageCapture(object): def __init__(self): self.message = '' def report(self, x): self.message += x if isinstance(x, str) else x.decode('utf-8') capture = ErrorMessageCapture() error_capturer_cb = ctypes.CFUNCTYPE(None, ctypes.c_char_p)(capture.report) # Do not make a copy of _delegate_ptr. It is freed by Delegate's finalizer. self._delegate_ptr = self._library.tflite_plugin_create_delegate( options_keys, options_values, len(options), error_capturer_cb) if self._delegate_ptr is None: raise ValueError(capture.message) def __del__(self): # __del__ can not be called multiple times, so if the delegate is destroyed. # don't try to destroy it twice. if self._library is not None: self._library.tflite_plugin_destroy_delegate.argtypes = [ctypes.c_void_p] self._library.tflite_plugin_destroy_delegate(self._delegate_ptr) self._library = None def _get_native_delegate_pointer(self): """Returns the native TfLiteDelegate pointer. It is not safe to copy this pointer because it needs to be freed. Returns: TfLiteDelegate * """ return self._delegate_ptr @_tf_export('lite.experimental.load_delegate') def load_delegate(library, options=None): """Returns loaded Delegate object. Args: library: Name of shared library containing the [TfLiteDelegate](https://www.tensorflow.org/lite/performance/delegates). options: Dictionary of options that are required to load the delegate. All keys and values in the dictionary should be convertible to str. Consult the documentation of the specific delegate for required and legal options. (default None) Returns: Delegate object. Raises: ValueError: Delegate failed to load. RuntimeError: If delegate loading is used on unsupported platform. """ try: delegate = Delegate(library, options) except ValueError as e: raise ValueError('Failed to load delegate from {}\n{}'.format( library, str(e))) return delegate class SignatureRunner(object): """SignatureRunner class for running TFLite models using SignatureDef. This class should be instantiated through TFLite Interpreter only using get_signature_runner method on Interpreter. Example, signature = interpreter.get_signature_runner("my_signature") result = signature(input_1=my_input_1, input_2=my_input_2) print(result["my_output"]) print(result["my_second_output"]) All names used are this specific SignatureDef names. Notes: No other function on this object or on the interpreter provided should be called while this object call has not finished. """ def __init__(self, interpreter=None, signature_def_name=None): """Constructor. Args: interpreter: Interpreter object that is already initialized with the requested model. signature_def_name: SignatureDef names to be used. """ if not interpreter: raise ValueError('None interpreter provided.') if not signature_def_name: raise ValueError('None signature_def_name provided.') self._interpreter = interpreter self._signature_def_name = signature_def_name signature_defs = interpreter._get_full_signature_list() if signature_def_name not in signature_defs: raise ValueError('Invalid signature_def_name provided.') self._signature_def = signature_defs[signature_def_name] self._outputs = self._signature_def['outputs'].items() self._inputs = self._signature_def['inputs'] def __call__(self, **kwargs): """Runs the SignatureDef given the provided inputs in arguments. Args: **kwargs: key,value for inputs to the model. Key is the SignatureDef input name. Value is numpy array with the value. Returns: dictionary of the results from the model invoke. Key in the dictionary is SignatureDef output name. Value is the result Tensor. """ if len(kwargs) != len(self._inputs): raise ValueError( 'Invalid number of inputs provided for running a SignatureDef, ' 'expected %s vs provided %s' % (len(kwargs), len(self._inputs))) # Resize input tensors for input_name, value in kwargs.items(): if input_name not in self._inputs: raise ValueError('Invalid Input name (%s) for SignatureDef' % input_name) self._interpreter.resize_tensor_input(self._inputs[input_name], value.shape) # Allocate tensors. self._interpreter.allocate_tensors() # Set the input values. for input_name, value in kwargs.items(): self._interpreter._set_input_tensor( input_name, value=value, method_name=self._signature_def_name) self._interpreter.invoke() result = {} for output_name, output_index in self._outputs: result[output_name] = self._interpreter.get_tensor(output_index) return result @_tf_export('lite.Interpreter') class Interpreter(object): """Interpreter interface for TensorFlow Lite Models. This makes the TensorFlow Lite interpreter accessible in Python. It is possible to use this interpreter in a multithreaded Python environment, but you must be sure to call functions of a particular instance from only one thread at a time. So if you want to have 4 threads running different inferences simultaneously, create an interpreter for each one as thread-local data. Similarly, if you are calling invoke() in one thread on a single interpreter but you want to use tensor() on another thread once it is done, you must use a synchronization primitive between the threads to ensure invoke has returned before calling tensor(). """ def __init__(self, model_path=None, model_content=None, experimental_delegates=None, num_threads=None): """Constructor. Args: model_path: Path to TF-Lite Flatbuffer file. model_content: Content of model. experimental_delegates: Experimental. Subject to change. List of [TfLiteDelegate](https://www.tensorflow.org/lite/performance/delegates) objects returned by lite.load_delegate(). num_threads: Sets the number of threads used by the interpreter and available to CPU kernels. If not set, the interpreter will use an implementation-dependent default number of threads. Currently, only a subset of kernels, such as conv, support multi-threading. Raises: ValueError: If the interpreter was unable to create. """ if not hasattr(self, '_custom_op_registerers'): self._custom_op_registerers = [] if model_path and not model_content: custom_op_registerers_by_name = [ x for x in self._custom_op_registerers if isinstance(x, str) ] custom_op_registerers_by_func = [ x for x in self._custom_op_registerers if not isinstance(x, str) ] self._interpreter = ( _interpreter_wrapper.CreateWrapperFromFile( model_path, custom_op_registerers_by_name, custom_op_registerers_by_func)) if not self._interpreter: raise ValueError('Failed to open {}'.format(model_path)) elif model_content and not model_path: custom_op_registerers_by_name = [ x for x in self._custom_op_registerers if isinstance(x, str) ] custom_op_registerers_by_func = [ x for x in self._custom_op_registerers if not isinstance(x, str) ] # Take a reference, so the pointer remains valid. # Since python strings are immutable then PyString_XX functions # will always return the same pointer. self._model_content = model_content self._interpreter = ( _interpreter_wrapper.CreateWrapperFromBuffer( model_content, custom_op_registerers_by_name, custom_op_registerers_by_func)) elif not model_content and not model_path: raise ValueError('`model_path` or `model_content` must be specified.') else: raise ValueError('Can\'t both provide `model_path` and `model_content`') if num_threads is not None: if not isinstance(num_threads, int): raise ValueError('type of num_threads should be int') if num_threads < 1: raise ValueError('num_threads should >= 1') self._interpreter.SetNumThreads(num_threads) # Each delegate is a wrapper that owns the delegates that have been loaded # as plugins. The interpreter wrapper will be using them, but we need to # hold them in a list so that the lifetime is preserved at least as long as # the interpreter wrapper. self._delegates = [] if experimental_delegates: self._delegates = experimental_delegates for delegate in self._delegates: self._interpreter.ModifyGraphWithDelegate( delegate._get_native_delegate_pointer()) # pylint: disable=protected-access self._signature_defs = self.get_signature_list() self._metrics = metrics.TFLiteMetrics() self._metrics.increase_counter_interpreter_creation() def __del__(self): # Must make sure the interpreter is destroyed before things that # are used by it like the delegates. NOTE this only works on CPython # probably. # TODO(b/136468453): Remove need for __del__ ordering needs of CPython # by using explicit closes(). See implementation of Interpreter __del__. self._interpreter = None self._delegates = None def allocate_tensors(self): self._ensure_safe() return self._interpreter.AllocateTensors() def _safe_to_run(self): """Returns true if there exist no numpy array buffers. This means it is safe to run tflite calls that may destroy internally allocated memory. This works, because in the wrapper.cc we have made the numpy base be the self._interpreter. """ # NOTE, our tensor() call in cpp will use _interpreter as a base pointer. # If this environment is the only _interpreter, then the ref count should be # 2 (1 in self and 1 in temporary of sys.getrefcount). return sys.getrefcount(self._interpreter) == 2 def _ensure_safe(self): """Makes sure no numpy arrays pointing to internal buffers are active. This should be called from any function that will call a function on _interpreter that may reallocate memory e.g. invoke(), ... Raises: RuntimeError: If there exist numpy objects pointing to internal memory then we throw. """ if not self._safe_to_run(): raise RuntimeError("""There is at least 1 reference to internal data in the interpreter in the form of a numpy array or slice. Be sure to only hold the function returned from tensor() if you are using raw data access.""") # Experimental and subject to change def _get_op_details(self, op_index): """Gets a dictionary with arrays of ids for tensors involved with an op. Args: op_index: Operation/node index of node to query. Returns: a dictionary containing the index, op name, and arrays with lists of the indices for the inputs and outputs of the op/node. """ op_index = int(op_index) op_name = self._interpreter.NodeName(op_index) op_inputs = self._interpreter.NodeInputs(op_index) op_outputs = self._interpreter.NodeOutputs(op_index) details = { 'index': op_index, 'op_name': op_name, 'inputs': op_inputs, 'outputs': op_outputs, } return details def _get_tensor_details(self, tensor_index): """Gets tensor details. Args: tensor_index: Tensor index of tensor to query. Returns: A dictionary containing the following fields of the tensor: 'name': The tensor name. 'index': The tensor index in the interpreter. 'shape': The shape of the tensor. 'quantization': Deprecated, use 'quantization_parameters'. This field only works for per-tensor quantization, whereas 'quantization_parameters' works in all cases. 'quantization_parameters': The parameters used to quantize the tensor: 'scales': List of scales (one if per-tensor quantization) 'zero_points': List of zero_points (one if per-tensor quantization) 'quantized_dimension': Specifies the dimension of per-axis quantization, in the case of multiple scales/zero_points. Raises: ValueError: If tensor_index is invalid. """ tensor_index = int(tensor_index) tensor_name = self._interpreter.TensorName(tensor_index) tensor_size = self._interpreter.TensorSize(tensor_index) tensor_size_signature = self._interpreter.TensorSizeSignature(tensor_index) tensor_type = self._interpreter.TensorType(tensor_index) tensor_quantization = self._interpreter.TensorQuantization(tensor_index) tensor_quantization_params = self._interpreter.TensorQuantizationParameters( tensor_index) tensor_sparsity_params = self._interpreter.TensorSparsityParameters( tensor_index) if not tensor_type: raise ValueError('Could not get tensor details') details = { 'name': tensor_name, 'index': tensor_index, 'shape': tensor_size, 'shape_signature': tensor_size_signature, 'dtype': tensor_type, 'quantization': tensor_quantization, 'quantization_parameters': { 'scales': tensor_quantization_params[0], 'zero_points': tensor_quantization_params[1], 'quantized_dimension': tensor_quantization_params[2], }, 'sparsity_parameters': tensor_sparsity_params } return details # Experimental and subject to change def _get_ops_details(self): """Gets op details for every node. Returns: A list of dictionaries containing arrays with lists of tensor ids for tensors involved in the op. """ return [ self._get_op_details(idx) for idx in range(self._interpreter.NumNodes()) ] def get_tensor_details(self): """Gets tensor details for every tensor with valid tensor details. Tensors where required information about the tensor is not found are not added to the list. This includes temporary tensors without a name. Returns: A list of dictionaries containing tensor information. """ tensor_details = [] for idx in range(self._interpreter.NumTensors()): try: tensor_details.append(self._get_tensor_details(idx)) except ValueError: pass return tensor_details def get_input_details(self): """Gets model input details. Returns: A list of input details. """ return [ self._get_tensor_details(i) for i in self._interpreter.InputIndices() ] def set_tensor(self, tensor_index, value): """Sets the value of the input tensor. Note this copies data in `value`. If you want to avoid copying, you can use the `tensor()` function to get a numpy buffer pointing to the input buffer in the tflite interpreter. Args: tensor_index: Tensor index of tensor to set. This value can be gotten from the 'index' field in get_input_details. value: Value of tensor to set. Raises: ValueError: If the interpreter could not set the tensor. """ self._interpreter.SetTensor(tensor_index, value) def resize_tensor_input(self, input_index, tensor_size, strict=False): """Resizes an input tensor. Args: input_index: Tensor index of input to set. This value can be gotten from the 'index' field in get_input_details. tensor_size: The tensor_shape to resize the input to. strict: Only unknown dimensions can be resized when `strict` is True. Unknown dimensions are indicated as `-1` in the `shape_signature` attribute of a given tensor. (default False) Raises: ValueError: If the interpreter could not resize the input tensor. Usage: ``` interpreter = Interpreter(model_content=tflite_model) interpreter.resize_tensor_input(0, [num_test_images, 224, 224, 3]) interpreter.allocate_tensors() interpreter.set_tensor(0, test_images) interpreter.invoke() ``` """ self._ensure_safe() # `ResizeInputTensor` now only accepts int32 numpy array as `tensor_size # parameter. tensor_size = np.array(tensor_size, dtype=np.int32) self._interpreter.ResizeInputTensor(input_index, tensor_size, strict) def get_output_details(self): """Gets model output details. Returns: A list of output details. """ return [ self._get_tensor_details(i) for i in self._interpreter.OutputIndices() ] def get_signature_list(self): """Gets list of SignatureDefs in the model. Example, ``` signatures = interpreter.get_signature_list() print(signatures) # { # 'add': {'inputs': ['x', 'y'], 'outputs': ['output_0']} # } Then using the names in the signature list you can get a callable from get_signature_runner(). ``` Returns: A list of SignatureDef details in a dictionary structure. It is keyed on the SignatureDef method name, and the value holds dictionary of inputs and outputs. """ full_signature_defs = self._interpreter.GetSignatureDefs() for _, signature_def in full_signature_defs.items(): signature_def['inputs'] = list(signature_def['inputs'].keys()) signature_def['outputs'] = list(signature_def['outputs'].keys()) return full_signature_defs def _get_full_signature_list(self): """Gets list of SignatureDefs in the model. Example, ``` signatures = interpreter._get_full_signature_list() print(signatures) # { # 'add': {'inputs': {'x': 1, 'y': 0}, 'outputs': {'output_0': 4}} # } Then using the names in the signature list you can get a callable from get_signature_runner(). ``` Returns: A list of SignatureDef details in a dictionary structure. It is keyed on the SignatureDef method name, and the value holds dictionary of inputs and outputs. """ return self._interpreter.GetSignatureDefs() def _set_input_tensor(self, input_name, value, method_name=None): """Sets the value of the input tensor. Input tensor is identified by `input_name` in the SignatureDef identified by `method_name`. If the model has a single SignatureDef then you can pass None as `method_name`. Note this copies data in `value`. Example, ``` input_data = np.array([1.2, 1.4], np.float32) signatures = interpreter.get_signature_list() print(signatures) # { # 'add': {'inputs': {'x': 1, 'y': 0}, 'outputs': {'output_0': 4}} # } interpreter._set_input_tensor(input_name='x', value=input_data, method_name='add_fn') ``` Args: input_name: Name of the output tensor in the SignatureDef. value: Value of tensor to set as a numpy array. method_name: The exported method name for the SignatureDef, it can be None if and only if the model has a single SignatureDef. Default value is None. Raises: ValueError: If the interpreter could not set the tensor. Or if `method_name` is None and model doesn't have a single Signature. """ if method_name is None: if len(self._signature_defs) != 1: raise ValueError( 'SignatureDef method_name is None and model has {0} Signatures. ' 'None is only allowed when the model has 1 SignatureDef'.format( len(self._signature_defs))) else: method_name = next(iter(self._signature_defs)) self._interpreter.SetInputTensorFromSignatureDefName( input_name, method_name, value) def get_signature_runner(self, method_name=None): """Gets callable for inference of specific SignatureDef. Example usage, ``` interpreter = tf.lite.Interpreter(model_content=tflite_model) interpreter.allocate_tensors() fn = interpreter.get_signature_runner('div_with_remainder') output = fn(x=np.array([3]), y=np.array([2])) print(output) # { # 'quotient': array([1.], dtype=float32) # 'remainder': array([1.], dtype=float32) # } ``` None can be passed for method_name if the model has a single Signature only. All names used are this specific SignatureDef names. Args: method_name: The exported method name for the SignatureDef, it can be None if and only if the model has a single SignatureDef. Default value is None. Returns: This returns a callable that can run inference for SignatureDef defined by argument 'method_name'. The callable will take key arguments corresponding to the arguments of the SignatureDef, that should have numpy values. The callable will returns dictionary that maps from output names to numpy values of the computed results. Raises: ValueError: If passed method_name is invalid. """ if method_name is None: if len(self._signature_defs) != 1: raise ValueError( 'SignatureDef method_name is None and model has {0} Signatures. ' 'None is only allowed when the model has 1 SignatureDef'.format( len(self._signature_defs))) else: method_name = next(iter(self._signature_defs)) return SignatureRunner(interpreter=self, signature_def_name=method_name) def get_tensor(self, tensor_index): """Gets the value of the output tensor (get a copy). If you wish to avoid the copy, use `tensor()`. This function cannot be used to read intermediate results. Args: tensor_index: Tensor index of tensor to get. This value can be gotten from the 'index' field in get_output_details. Returns: a numpy array. """ return self._interpreter.GetTensor(tensor_index) def tensor(self, tensor_index): """Returns function that gives a numpy view of the current tensor buffer. This allows reading and writing to this tensors w/o copies. This more closely mirrors the C++ Interpreter class interface's tensor() member, hence the name. Be careful to not hold these output references through calls to `allocate_tensors()` and `invoke()`. This function cannot be used to read intermediate results. Usage: ``` interpreter.allocate_tensors() input = interpreter.tensor(interpreter.get_input_details()[0]["index"]) output = interpreter.tensor(interpreter.get_output_details()[0]["index"]) for i in range(10): input().fill(3.) interpreter.invoke() print("inference %s" % output()) ``` Notice how this function avoids making a numpy array directly. This is because it is important to not hold actual numpy views to the data longer than necessary. If you do, then the interpreter can no longer be invoked, because it is possible the interpreter would resize and invalidate the referenced tensors. The NumPy API doesn't allow any mutability of the the underlying buffers. WRONG: ``` input = interpreter.tensor(interpreter.get_input_details()[0]["index"])() output = interpreter.tensor(interpreter.get_output_details()[0]["index"])() interpreter.allocate_tensors() # This will throw RuntimeError for i in range(10): input.fill(3.) interpreter.invoke() # this will throw RuntimeError since input,output ``` Args: tensor_index: Tensor index of tensor to get. This value can be gotten from the 'index' field in get_output_details. Returns: A function that can return a new numpy array pointing to the internal TFLite tensor state at any point. It is safe to hold the function forever, but it is not safe to hold the numpy array forever. """ return lambda: self._interpreter.tensor(self._interpreter, tensor_index) def invoke(self): """Invoke the interpreter. Be sure to set the input sizes, allocate tensors and fill values before calling this. Also, note that this function releases the GIL so heavy computation can be done in the background while the Python interpreter continues. No other function on this object should be called while the invoke() call has not finished. Raises: ValueError: When the underlying interpreter fails raise ValueError. """ self._ensure_safe() self._interpreter.Invoke() def reset_all_variables(self): return self._interpreter.ResetVariableTensors() # Experimental and subject to change. def _native_handle(self): """Returns a pointer to the underlying tflite::Interpreter instance. This allows extending tflite.Interpreter's functionality in a custom C++ function. Consider how that may work in a custom pybind wrapper: m.def("SomeNewFeature", ([](py::object handle) { auto* interpreter = reinterpret_cast<tflite::Interpreter*>(handle.cast<intptr_t>()); ... })) and corresponding Python call: SomeNewFeature(interpreter.native_handle()) Note: This approach is fragile. Users must guarantee the C++ extension build is consistent with the tflite.Interpreter's underlying C++ build. """ return self._interpreter.interpreter() class InterpreterWithCustomOps(Interpreter): """Interpreter interface for TensorFlow Lite Models that accepts custom ops. The interface provided by this class is experimental and therefore not exposed as part of the public API. Wraps the tf.lite.Interpreter class and adds the ability to load custom ops by providing the names of functions that take a pointer to a BuiltinOpResolver and add a custom op. """ def __init__(self, model_path=None, model_content=None, experimental_delegates=None, custom_op_registerers=None): """Constructor. Args: model_path: Path to TF-Lite Flatbuffer file. model_content: Content of model. experimental_delegates: Experimental. Subject to change. List of [TfLiteDelegate](https://www.tensorflow.org/lite/performance/delegates) objects returned by lite.load_delegate(). custom_op_registerers: List of str (symbol names) or functions that take a pointer to a MutableOpResolver and register a custom op. When passing functions, use a pybind function that takes a uintptr_t that can be recast as a pointer to a MutableOpResolver. Raises: ValueError: If the interpreter was unable to create. """ self._custom_op_registerers = custom_op_registerers or [] super(InterpreterWithCustomOps, self).__init__( model_path=model_path, model_content=model_content, experimental_delegates=experimental_delegates)
"""Generates API documentation by introspection.""" from django.http import HttpRequest from rest_framework import viewsets from rest_framework.serializers import BaseSerializer from .introspectors import APIViewIntrospector, \ ViewSetIntrospector, BaseMethodIntrospector, IntrospectorHelper, \ get_resolved_value, YAMLDocstringParser from . import SWAGGER_SETTINGS class DocumentationGenerator(object): # Serializers defined in docstrings explicit_serializers = set() # Serializers defined in fields fields_serializers = set() # Response classes defined in docstrings explicit_response_types = dict() def generate(self, apis): """ Returns documentation for a list of APIs """ api_docs = [] for api in apis: api_docs.append({ 'description': IntrospectorHelper.get_view_description(api['callback']), 'path': api['path'], 'operations': self.get_operations(api), }) return api_docs def get_operations(self, api): """ Returns docs for the allowed methods of an API endpoint """ operations = [] path = api['path'] pattern = api['pattern'] callback = api['callback'] callback.request = HttpRequest() if issubclass(callback, viewsets.ViewSetMixin): introspector = ViewSetIntrospector(callback, path, pattern) else: introspector = APIViewIntrospector(callback, path, pattern) for method_introspector in introspector: if not isinstance(method_introspector, BaseMethodIntrospector) or \ method_introspector.get_http_method() == "OPTIONS" or \ method_introspector.get_http_method().lower() not in SWAGGER_SETTINGS['enabled_methods']: continue # No one cares. I impose JSON. doc_parser = YAMLDocstringParser( docstring=method_introspector.get_docs()) serializer = self._get_method_serializer( doc_parser, method_introspector) response_type = self._get_method_response_type( doc_parser, serializer, introspector, method_introspector) operation = { 'method': method_introspector.get_http_method(), 'summary': method_introspector.get_summary(), 'nickname': method_introspector.get_nickname(), 'notes': method_introspector.get_notes(), 'type': response_type, } response_messages = doc_parser.get_response_messages() parameters = doc_parser.discover_parameters( inspector=method_introspector) if parameters: operation['parameters'] = parameters if response_messages: operation['responseMessages'] = response_messages # override operation from callback's docstring callback_doc_parser = YAMLDocstringParser(docstring=callback.__doc__) method = method_introspector.method.lower() if method in callback_doc_parser.object: overrides = callback_doc_parser.object[method] allowed_keys = {'method', 'summary', 'nickname', 'notes', 'type', 'parameters', 'responseMessages'} if isinstance(overrides, dict): for key, value in overrides.items(): if key in allowed_keys: operation[key] = value for parser in [doc_parser, callback_doc_parser]: if parser.yaml_error is not None: operation['notes'] += "<pre>YAMLError:\n {err}</pre>".format( err=parser.yaml_error) operations.append(operation) return operations def get_models(self, apis): """ Builds a list of Swagger 'models'. These represent DRF serializers and their fields """ serializers = self._get_serializer_set(apis) serializers.update(self.explicit_serializers) serializers.update( self._find_field_serializers(serializers) ) models = {} for serializer in serializers: data = self._get_serializer_fields(serializer) # Register 2 models with different subset of properties suitable # for data reading and writing. # i.e. rest framework does not output write_only fields in response # or require read_only fields in complex input. serializer_name = IntrospectorHelper.get_serializer_name(serializer) # Writing # no readonly fields w_name = "Write{serializer}".format(serializer=serializer_name) w_properties = dict((k, v) for k, v in data['fields'].items() if k not in data['read_only']) models[w_name] = { 'id': w_name, 'required': [i for i in data['required'] if i in w_properties.keys()], 'properties': w_properties, } # Reading # no write_only fields r_name = serializer_name r_properties = dict((k, v) for k, v in data['fields'].items() if k not in data['write_only']) models[r_name] = { 'id': r_name, 'required': [i for i in r_properties.keys()], 'properties': r_properties, } # Enable original model for testing purposes # models[serializer_name] = { # 'id': serializer_name, # 'required': data['required'], # 'properties': data['fields'], # } models.update(self.explicit_response_types) models.update(self.fields_serializers) return models def _get_method_serializer(self, doc_parser, method_inspector): """ Returns serializer used in method. Registers custom serializer from docstring in scope. Serializer might be ignored if explicitly told in docstring """ serializer = method_inspector.get_serializer_class() docstring_serializer = doc_parser.get_serializer_class( callback=method_inspector.callback ) if doc_parser.get_response_type() is not None: # Custom response class detected return None if docstring_serializer is not None: self.explicit_serializers.add(docstring_serializer) serializer = docstring_serializer if doc_parser.should_omit_serializer(): serializer = None return serializer def _get_method_response_type(self, doc_parser, serializer, view_inspector, method_inspector): """ Returns response type for method. This might be custom `type` from docstring or discovered serializer class name. Once custom `type` found in docstring - it'd be registered in a scope """ response_type = doc_parser.get_response_type() if response_type is not None: # Register class in scope view_name = view_inspector.callback.__name__ view_name = view_name.replace('ViewSet', '') view_name = view_name.replace('APIView', '') view_name = view_name.replace('View', '') response_type_name = "{view}{method}Response".format( view=view_name, method=method_inspector.method.title().replace('_', '') ) self.explicit_response_types.update({ response_type_name: { "id": response_type_name, "properties": response_type } }) return response_type_name else: serializer_name = IntrospectorHelper.get_serializer_name(serializer) if serializer_name is not None: return serializer_name return None def _get_serializer_set(self, apis): """ Returns a set of serializer classes for a provided list of APIs """ serializers = set() for api in apis: serializer = self._get_serializer_class(api['callback']) if serializer is not None: serializers.add(serializer) return serializers def _find_field_serializers(self, serializers): """ Returns set of serializers discovered from fields """ serializers_set = set() for serializer in serializers: fields = serializer().get_fields() for name, field in fields.items(): if isinstance(field, BaseSerializer): serializers_set.add(field) return serializers_set def _get_serializer_fields(self, serializer): """ Returns serializer fields in the Swagger MODEL format """ if serializer is None: return if hasattr(serializer, '__call__'): fields = serializer().get_fields() else: fields = serializer.get_fields() data = { 'fields': {}, 'required': [], 'write_only': [], 'read_only': [], } for name, field in fields.items(): if getattr(field, 'write_only', False): data['write_only'].append(name) if getattr(field, 'read_only', False): data['read_only'].append(name) if getattr(field, 'required', False): data['required'].append(name) data_type = field.type_label # guess format data_format = 'string' if data_type in BaseMethodIntrospector.PRIMITIVES: data_format = BaseMethodIntrospector.PRIMITIVES.get(data_type)[0] f = { 'description': getattr(field, 'help_text', ''), 'type': data_type, 'format': data_format, 'required': getattr(field, 'required', False), 'defaultValue': get_resolved_value(field, 'default'), 'readOnly': getattr(field, 'read_only', None), } # Min/Max values max_val = getattr(field, 'max_val', None) min_val = getattr(field, 'min_val', None) if max_val is not None and data_type == 'integer': f['minimum'] = min_val if max_val is not None and data_type == 'integer': f['maximum'] = max_val # ENUM options if field.type_label == 'multiple choice' \ and isinstance(field.choices, list): f['enum'] = [k for k, v in field.choices] # Support for complex types if isinstance(field, BaseSerializer): field_serializer = IntrospectorHelper.get_serializer_name(field) if getattr(field, 'write_only', False): field_serializer = "Write{}".format(field_serializer) f['type'] = field_serializer if field.many: f['type'] = 'array' if data_type in BaseMethodIntrospector.PRIMITIVES: f['items'] = {'type': data_type} else: f['items'] = {'$ref': field_serializer} # memorize discovered field data['fields'][name] = f return data def _get_serializer_class(self, callback): if hasattr(callback, 'get_serializer_class'): return callback().get_serializer_class()
import urllib import urllib2 import logging import base64 import zlib import time import os import socks import json import cookielib try: import bz2 bzip2_enabled = True except Exception: print "Failed to import bz2 library (Did you compile Python from sources?). Bzip2 compression will not be a available." bzip2_enabled = False from urllib2 import HTTPError from utils import DictDiffer from socksipyhandler import SocksiPyHandler from client_base import * USER_AGENT = 'curl/7.51.0' class ByteportHTTPRedirectHandler(urllib2.HTTPRedirectHandler): def http_error_302(self, req, fp, code, msg, headers): print "Cookie Manip Right Here" return urllib2.HTTPRedirectHandler.http_error_302(self, req, fp, code, msg, headers) http_error_301 = http_error_303 = http_error_307 = http_error_302 class ByteportHttpClient(AbstractByteportClient): DEFAULT_BYTEPORT_API_PROTOCOL = 'http' DEFAULT_BYTEPORT_API_HOSTNAME = 'api.byteport.se' # DATETIME FORMAT ISO8601 = '%Y-%m-%dT%H:%M:%S.%f' # APIV1 URLS LOGIN_PATH = '/api/v1/login/' LOGOUT_PATH = '/api/v1/logout/' SESSION_PATH = '/api/v1/session/' ECHO_PATH = '/api/v1/echo/' LIST_NAMESPACES = '/api/v1/namespace/' QUERY_DEVICES = '/api/v1/search_devices/' GET_DEVICE = '/api/v1/namespace/%s/device/' GET_DEVICE_TYPE = '/api/v1/namespace/%s/device_type/' GET_FIRMWARE = '/api/v1/namespace/%s/device_type/%s/firmware/' GET_FIELD_DEFINITION = '/api/v1/namespace/%s/device_type/%s/field_definition/' REGISTER_DEVICES = '/api/v1/namespace/register_device/%s/' LOAD_TIMESERIES_DATA = '/api/v1/timeseries/%s/%s/%s/' DEFAULT_BYTEPORT_STORE_PATH = '/api/v1/timeseries/' PACKETS_STORE_PATH = '/api/legacy/packets/timeseries/' SEND_MESSAGE = '/api/v1/message/%s/%s/' SET_FIELDS = '/api/v1/device_control/set_fields/%s/%s/' # DEFAULT_BYTEPORT_API_STORE_URL = '%s://%s%s' % (DEFAULT_BYTEPORT_API_PROTOCOL, DEFAULT_BYTEPORT_API_HOSTNAME, DEFAULT_BYTEPORT_STORE_PATH) def __init__(self, namespace_name=None, api_key=None, default_device_uid=None, byteport_api_hostname=DEFAULT_BYTEPORT_API_HOSTNAME, proxy_type=socks.PROXY_TYPE_SOCKS5, proxy_addr="127.0.0.1", proxy_port=None, proxy_username=None, proxy_password=None, initial_heartbeat=True ): # If any of the following are left as default (None), no store methods can be used self.namespace_name = namespace_name self.api_key = api_key if None in [namespace_name, api_key]: logging.warn("Store functions using API-key methods are disabled as no Namespace or API-key was supplied.") self.store_enabled = False else: self.store_enabled = True self.device_uid = default_device_uid self.byteport_api_hostname = byteport_api_hostname # Ie. for tunneling HTTP via SSH, first do: # ssh -D 5000 -N username@sshserver.org if proxy_port is not None: self.opener = urllib2.build_opener(SocksiPyHandler(proxy_type, proxy_addr, proxy_port)) logging.info("Connecting through type %s proxy at %s:%s" % (proxy_type, proxy_addr, proxy_port)) else: self.opener = None self.cookiejar = cookielib.CookieJar() if self.store_enabled: self.store_base_url = '%s://%s%s%s' % (self.DEFAULT_BYTEPORT_API_PROTOCOL, byteport_api_hostname, self.DEFAULT_BYTEPORT_STORE_PATH, namespace_name) logging.info('Storing data to Byteport using %s/%s/' % (self.store_base_url, default_device_uid)) # Make empty test call to verify the credentials if initial_heartbeat: # This can also act as heart beat, no need to send data to signal "online" in Byteport self.store() def login(self, username, password, login_path=LOGIN_PATH): url = '%s://%s%s' % (self.DEFAULT_BYTEPORT_API_PROTOCOL, self.byteport_api_hostname, login_path) # This will induce a GET-call to obtain the csrftoken needed for the actual login self.make_request(url) # Now, also extract the value of the csrftoken since we need it as a post data also csrftoken = self.__get_value_of_cookie('csrftoken') if csrftoken is None: raise ByteportClientException("Failed to extract csrftoken.") # And make the POST-call to login try: self.make_request(url=url, post_data={'username': username, 'password': password, 'csrfmiddlewaretoken': csrftoken} ) except ByteportClientForbiddenException as e: raise ByteportLoginFailedException("Failed to login user with name %s" % username) # Make sure the sessionid cookie is present in the cookie jar now for cookie in self.cookiejar: if cookie.name == 'sessionid': return raise ByteportLoginFailedException("Failed to login user with name %s" % username) def __get_value_of_cookie(self, cookie_name): for cookie in self.cookiejar: if cookie.name == cookie_name: return cookie.value return None def logout(self): url = '%s://%s%s' % (self.DEFAULT_BYTEPORT_API_PROTOCOL, self.byteport_api_hostname, self.LOGOUT_PATH) return self.make_request(url).read() def list_namespaces(self): url = '%s://%s%s' % (self.DEFAULT_BYTEPORT_API_PROTOCOL, self.byteport_api_hostname, self.LIST_NAMESPACES) rq = self.make_request(url) return json.loads(rq.read()) def query_devices(self, term, full=False, limit=20): request_parameters = {'term': term, 'full': u'%s' % full, 'limit': limit} encoded_data = urllib.urlencode(request_parameters) url = '%s://%s%s?%s' % (self.DEFAULT_BYTEPORT_API_PROTOCOL, self.byteport_api_hostname, self.QUERY_DEVICES, encoded_data) return json.loads(self.make_request(url).read()) def search_devices(self, term, full, limit): return self.query_devices(term, full, limit) def send_message(self, namespace, device_uid, message, format='json'): base_url = '%s://%s%s' % (self.DEFAULT_BYTEPORT_API_PROTOCOL, self.byteport_api_hostname, self.SEND_MESSAGE) url = base_url % (namespace, device_uid) csrftoken = self.__get_value_of_cookie('csrftoken') post_data = {'message': message, 'format': format, 'csrfmiddlewaretoken': csrftoken} # Encode data to UTF-8 before storing utf8_encoded_data = self.convert_data_to_utf8(post_data) return json.loads(self.make_request(url, utf8_encoded_data).read()) #TODO: Deprecated. Remove at some point. def get_device(self, namespace, uid): base_url = '%s://%s%s' % (self.DEFAULT_BYTEPORT_API_PROTOCOL, self.byteport_api_hostname, self.GET_DEVICE) encoded_data = urllib.urlencode( {'uid':u'%s' % uid, 'depth': 1 } ) url = base_url % (namespace) + "?%s" % encoded_data return json.loads(self.make_request(url).read()) #TODO: Deprecated. Remove at some point. def list_devices(self, namespace, depth=0): base_url = '%s://%s%s' % (self.DEFAULT_BYTEPORT_API_PROTOCOL, self.byteport_api_hostname, self.GET_DEVICE) request_parameters = {'depth': u'%s' % depth} encoded_data = urllib.urlencode(request_parameters) url = base_url % namespace + '?%s' % encoded_data return json.loads(self.make_request(url).read()) def get_devices(self, namespace, key=None): base_url = '%s://%s%s' % (self.DEFAULT_BYTEPORT_API_PROTOCOL, self.byteport_api_hostname, self.GET_DEVICE) request_parameters = {} if key: request_parameters['_key'] = key url = base_url % namespace + '?%s' % urllib.urlencode(request_parameters) return json.loads(self.make_request(url).read()) def get_device_types(self, namespace, key=None): base_url = '%s://%s%s' % (self.DEFAULT_BYTEPORT_API_PROTOCOL, self.byteport_api_hostname, self.GET_DEVICE_TYPE) request_parameters = {} if key: request_parameters['_key'] = key url = base_url % namespace + '?%s' % urllib.urlencode(request_parameters) return json.loads(self.make_request(url).read()) def get_firmwares(self, namespace, device_type_id, key=None): base_url = '%s://%s%s' % (self.DEFAULT_BYTEPORT_API_PROTOCOL, self.byteport_api_hostname, self.GET_FIRMWARE) request_parameters = {} if key: request_parameters['_key'] = key url = base_url % (namespace, device_type_id) + '?%s' % urllib.urlencode(request_parameters) return json.loads(self.make_request(url).read()) def get_field_definitions(self, namespace, device_type_id, key=None): base_url = '%s://%s%s' % (self.DEFAULT_BYTEPORT_API_PROTOCOL, self.byteport_api_hostname, self.GET_FIELD_DEFINITION) request_parameters = {} if key: request_parameters['_key'] = key url = base_url % (namespace, device_type_id) + '?%s' % urllib.urlencode(request_parameters) return json.loads(self.make_request(url).read()) def batch_register_devices(self, namespace, uids, device_type_id, force=False, batch_register=False, reg_code_length=None, active= False, serial=None): base_url = '%s://%s%s' % (self.DEFAULT_BYTEPORT_API_PROTOCOL, self.byteport_api_hostname, self.REGISTER_DEVICES) url = base_url % namespace post_data = dict() post_data['device_uid'] = uids post_data['batch_register'] = batch_register post_data['active'] = active post_data['device_type_id'] = device_type_id post_data['force'] = force if reg_code_length: post_data['reg_code_length'] = reg_code_length if serial: post_data['serial'] = serial post_data['csrfmiddlewaretoken'] = self.__get_value_of_cookie('csrftoken') # Encode data to UTF-8 before storing utf8_encoded_data = self.convert_data_to_utf8(post_data) return json.loads(self.make_request(url, utf8_encoded_data).read()) def load_timeseries_data_range(self, namespace, uid, field_name, from_time, to_time): """ Load data using a datetime objects to define an exact range to fetch :param namespace: :param uid: :param field_name: :param from_time: :param to_time: :return: """ base_url = '%s://%s%s' % (self.DEFAULT_BYTEPORT_API_PROTOCOL, self.byteport_api_hostname, self.LOAD_TIMESERIES_DATA) request_parameters = {'from': from_time.strftime(self.ISO8601), 'to': to_time.strftime(self.ISO8601)} encoded_data = urllib.urlencode(request_parameters) url = base_url % (namespace, uid, field_name) + '?%s' % encoded_data return json.loads(self.make_request(url).read()) def load_timeseries_data(self, namespace, uid, field_name, **kwargs): """ Load data from byteport using various arguments supplied as request parameters to Byteport :param namespace: :param uid: :param field_name: :param kwargs: :return: """ base_url = '%s://%s%s' % (self.DEFAULT_BYTEPORT_API_PROTOCOL, self.byteport_api_hostname, self.LOAD_TIMESERIES_DATA) encoded_data = urllib.urlencode(kwargs) url = base_url % (namespace, uid, field_name) + '?%s' % encoded_data return json.loads(self.make_request(url).read()) def set_fields(self, namespace, uid, set_fields): base_url = '%s://%s%s' % (self.DEFAULT_BYTEPORT_API_PROTOCOL, self.byteport_api_hostname, self.SET_FIELDS) url = base_url % (namespace, uid) post_data = set_fields post_data['csrfmiddlewaretoken'] = self.__get_value_of_cookie('csrftoken') # Encode data to UTF-8 before storing utf8_encoded_data = self.convert_data_to_utf8(post_data) return json.loads(self.make_request(url, utf8_encoded_data).read()) def make_request(self, url, post_data=None, body=None): ''' :param url: URL to make the request to :param post_data: A dictionary that will be url-encoded if set :param body: If set, this will override any post_data and be directly set as the request body :return: ''' try: logging.debug(url) # Set a valid User agent tag since api.byteport.se is CloudFlared # TODO: add a proper user-agent and make sure CloudFlare can handle it headers = {'User-Agent': USER_AGENT} # NOTE: If post_data != None, the request will be a POST request instead if body is not None: post_data = body headers['Content-Type'] = 'application/json' elif post_data is not None: post_data = urllib.urlencode(post_data) req = urllib2.Request(url, headers=headers, data=post_data) if self.opener: opener = self.opener else: opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cookiejar)) return opener.open(req) except HTTPError as http_error: logging.error(u'HTTPError accessing %s, Error was: %s' % (url, http_error)) if http_error.code == 403: message = u'403, You were not allowed to access the requested resource.' logging.info(message) raise ByteportClientForbiddenException(message) if http_error.code == 404: message = u'404, Make sure the device(s) is registered under ' \ u'namespace %s.' % self.namespace_name logging.info(message) raise ByteportClientDeviceNotFoundException(message) if http_error.code == 406: message = u'Not Allowed. Could be returned during device registration if you supply an invalid UID.' logging.info(message) raise ByteportNotAllowedException(message) if http_error.code == 500: message = u'500, Server error!' raise ByteportServerException(message) except urllib2.URLError as e: logging.error(u'URLError accessing %s, Error was: %s' % (url, e)) logging.info(u'Got URLError, make sure you have the correct network connections (ie. to the internet)!') if self.opener is not None: logging.info(u'Make sure your proxy settings are correct and you can connect to the proxy host you specified.') raise ByteportConnectException(u'Failed to connect to byteport, check your network and proxy settings and setup.') # Simple wrapper for logging with ease def log(self, message, level='info', device_uid=None): self.store({level: message}, device_uid) # # Store a single file vs a field name to Byteport via HTTP POST with optional compresstion # def base64_encode_and_store_file(self, field_name, path_to_file, device_uid=None, timestamp=None, compression=None): if timestamp is not None: timestamp = self.auto_timestamp(timestamp) with open(path_to_file, 'r') as content_file: file_data = content_file.read() self.base64_encode_and_store(field_name, file_data, device_uid, timestamp, compression) # # Store a single file vs a field name with no encoding or compression # def store_file(self, field_name, path_to_file, device_uid=None, timestamp=None): if timestamp is not None: timestamp = self.auto_timestamp(timestamp) with open(path_to_file, 'r') as content_file: data = {field_name: content_file.read()} if timestamp is not None: timestamp = self.auto_timestamp(timestamp) data['_ts'] = timestamp self.store(data, device_uid) def sorted_ls(self, path): mtime = lambda f: os.stat(os.path.join(path, f)).st_mtime return list(sorted(os.listdir(path), key=mtime)) def store_directory(self, directory_path, device_uid, timestamp=None): directory_data = dict() # Get a list of files sorted by time list_of_files_in_directory = self.sorted_ls(directory_path) # Dump files with content to dictionary for file_name in list_of_files_in_directory: path_to_file = directory_path + '/' + file_name with open(path_to_file, 'r') as content_file: directory_data[file_name] = content_file.read() self.store(directory_data, device_uid=device_uid, timestamp=timestamp) ''' NOTE: Move to some kind of "layer-2" helper module instead. let implementor handle the loop? ''' def poll_directory_and_store_upon_content_change(self, directory_path, device_uid, timestamp=None, poll_interval=5): # initial empty data last_data = dict() while True: current_data = dict() # Get a list of files sorted by time list_of_files_in_directory = self.sorted_ls(directory_path) # Dump files with content to dictionary for file_name in list_of_files_in_directory: path_to_file = directory_path + '/' + file_name with open(path_to_file, 'r') as content_file: current_data[file_name] = content_file.read() # This will obtain the keys that has changed value changed_data = DictDiffer(current_data, last_data).changed() added_data = DictDiffer(current_data, last_data).added() data_to_send = dict() for key in changed_data: data_to_send[key] = current_data[key] for key in added_data: data_to_send[key] = current_data[key] if len(data_to_send) > 0: try: self.store(data_to_send, device_uid=device_uid, timestamp=timestamp) last_data = current_data except Exception as e: logging.warn("Failed to store data, reason was: %s" % e) time.sleep(poll_interval) # # Store a single data block vs a field name to Byteport via HTTP POST # def base64_encode_and_store(self, field_name, fileobj, device_uid=None, timestamp=None, compression=None): if compression is None: data_block = fileobj elif compression == 'gzip': data_block = zlib.compress(fileobj) elif compression == 'bzip2' and bzip2_enabled: data_block = bz2.compress(fileobj) else: raise ByteportClientUnsupportedCompressionException("Unsupported compression method '%s'" % compression) data = {field_name: base64.b64encode(data_block)} if timestamp is not None: timestamp = self.auto_timestamp(timestamp) data['_ts'] = timestamp self.store(data, device_uid) def store(self, data=None, device_uid=None, timestamp=None): if data is None: data = dict() if device_uid is None: device_uid = self.device_uid data['_key'] = self.api_key url = '%s/%s/' % (self.store_base_url, device_uid) # Encode data to UTF-8 before storing utf8_encoded_data = self.convert_data_to_utf8(data) self.make_request(url, utf8_encoded_data) def store_packets(self, packets, legacy_key, json_encode=True): url = '%s://%s%s' % (self.DEFAULT_BYTEPORT_API_PROTOCOL, self.byteport_api_hostname, self.PACKETS_STORE_PATH) if json_encode: packets_as_json = json.dumps(packets) else: packets_as_json = packets data = dict() data['packets'] = packets_as_json data['legacy_key'] = legacy_key self.make_request(url, self.convert_data_to_utf8(data)) ''' Simple client for sending data using HTTP GET request (ie. data goes as request parameters) Use the ByteportHttpPostClient for most cases unless you have very good reason for using this method. Since URLs are limited to 2Kb, the maximum allowed data to send is limited for each request. WARNING: May become deprecated! ''' class ByteportHttpGetClient(ByteportHttpClient): # Can use another device_uid to override the one used in the constructor # Useful for Clients that acts as proxies for other devices, ie. over a sensor-network def store(self, data=None, device_uid=None, timestamp=None): if data is None: data = dict() if device_uid is None: device_uid = self.device_uid data['_key'] = self.api_key if timestamp is not None: float_timestamp = self.auto_timestamp(timestamp) data['_ts'] = float_timestamp # Encode data to UTF-8 before storing utf8_encoded_data = self.convert_data_to_utf8(data) # By URL-encoding, the make_request call will be made using GET-request encoded_data = urllib.urlencode(utf8_encoded_data) url = '%s/%s/?%s' % (self.store_base_url, device_uid, encoded_data) self.make_request(url)
from __future__ import division from __future__ import print_function from __future__ import unicode_literals from __future__ import absolute_import from pylab import * from numpy import fft from numpy import linalg from scipy import integrate from scipy import interpolate from numpy.polynomial import chebyshev import os from matplotlib import rc rc("text", usetex=True) from mpltools import style style.use('ggplot') figure(figsize=(5, 4)) ax = subplot() # ----------------------------------------------------------------------------- # Plotting # -------- SHOW_PLOT = True SAVE_PLOT = False PLOT_NAME = "" PLOT_LINE = True PLOT_POINTS = True filename = os.path.join(os.path.dirname(os.path.realpath(__file__)), "paper/figures", PLOT_NAME) + ".pdf" # ----------------------------------------------------------------------------- # Simulations # ----------- T = 0.1 STEPS = T/1E-2 N = 20 METHOD = ["remove_points", "remove_lowest"][1] SHAPE = ["circle", "ellipse", "blob"][2] # ----------------------------------------------------------------------------- # Methods # ------- def remove_points(shape): return arctan((shape.curvature()-1)*5) + pi/2 def remove_lowest(shape): n_fine = 1E2 y_min = amin(real(fft.ifft(change_n(shape.x_hat, n_fine), axis=0))[:,1]) g = 1/(5*absolute(shape.x[:,1] - y_min) + 0.5) - 0.5 g[g<0] = 0 return g method = {"remove_points": remove_points, "remove_lowest": remove_lowest}[METHOD] def circle(s): x = cos(s) y = sin(s) return SpectralShape(vstack([x, y]).T) def ellipse(s): x = 2*cos(s) y = sin(s) return SpectralShape(vstack([x, y]).T) def blob(s): x = cos(s)*chebyshev.chebval((s-pi)/pi, [2,0,1]) y = sin(s)*chebyshev.chebval((s-pi)/pi, [2,0,0,0,1]) return SpectralShape(vstack([x, y]).T) shape_func = {"circle":circle, "ellipse": ellipse, "blob": blob}[SHAPE] # ----------------------------------------------------------------------------- DIGITS = 53; TOL = sqrt(0.5**DIGITS) def ellipse_circumference(a, b): """ Compute the circumference of an ellipse with semi-axes a and b. Require a >= 0 and b >= 0. Relative accuracy is about 0.5^53. """ x = max(a, b) y = min(a, b) if DIGITS*y < TOL*x: return 4 * x s = 0 m = 1 while x-y > tol*y: x = 0.5 * (x + y) y = sqrt(x * y) m *= 2 s += m * (x - y)**2 return pi * ((a + b)**2 - s) / (x + y) def vnorm(x): return sqrt(sum(x**2, axis=-1)) def vdot(a, b): return sum(a * b, axis=-1) def vcross(a, b): return vcross(a, b) def change_n(x_hat, n): n_old = len(x_hat) if n > n_old: x_hat = insert(x_hat, int(n_old/2), zeros([n-n_old, 2]), axis=0) else: x_hat = take(x_hat, indices=fft_k(n), axis=0) return (n / n_old) * x_hat def fft_k(n): return hstack(arange(n)) def fft_theta(n): return linspace(0, 2*pi, n, endpoint=False) def spectral_derivative(x_hat, p=1): n = len(x_hat) k = fft_k(n)[:,newaxis] w_hat = x_hat * (1j*k)**p if p % 2 == 1: w_hat[n/2] = 0 return w_hat # @apply_to_cols # def spectral_integral(x_hat, n=1): # k = arange(len(x_hat)) # w_hat = x_hat * hstack([0, (1/(1j*k[1:])**n)]) # if n % 2 == 1: # w_hat[-1] = 0 # w_hat[0] = fft_theta(len(x_hat)) # return w_hat PLOT_N = 1E2 def plot_spectral(x_hat): s_fine = fft_theta(len(x_hat)) x_fine = real(fft.ifft(change_n(x_hat, PLOT_N), axis=0)) plot(s_fine, x_fine) class SpectralShape(object): def __init__(self, x): self.x = x def __len__(self): return len(self.x_hat) @property def x(self): return real(fft.ifft(self.x_hat, axis=0)) @x.setter def x(self, value): self.x_hat = fft.fft(value, axis=0) def surface_normal(self): x_dot = real(fft.ifft(spectral_derivative(self.x_hat), axis=0)) x_dot_n = x_dot[:,(1,0)] * [-1,1] x_dot_n /= vnorm(x_dot_n)[:,newaxis] return x_dot_n def surface_tangent(self): x_dot = real(fft.ifft(spectral_derivative(self.x_hat), axis=0)) x_dot /= vnorm(x_dot)[:,newaxis] return x_dot def curvature(self): x_dot = real(fft.ifft(spectral_derivative(self.x_hat), axis=0)) x_ddot = real(fft.ifft(spectral_derivative(self.x_hat, p=2), axis=0)) kappa = vcross(x_dot, x_ddot) / vnorm(x_dot)**3 return kappa def dxdt(self, method): g = method(self) dx_hatdt = g[:,newaxis] * self.surface_normal() x_ddot = real(fft.ifft(spectral_derivative(self.x_hat, p=2), axis=0)) a_t = vdot(x_ddot, self.surface_tangent()) a_t *= norm(g) / norm(a_t) dx_hatdt += a_t[:,newaxis] * self.surface_tangent() return dx_hatdt def plot(self, label=None): x_fine = real(fft.ifft(change_n(self.x_hat, PLOT_N), axis=0)) color = ax._get_lines.color_cycle.next() if PLOT_LINE: ax.plot(x_fine[:,0], x_fine[:,1], color, label=label) if PLOT_POINTS: ax.plot(self.x[:,0], self.x[:,1], "x", color="{}".format(color)) axis('equal') # ----------------------------------------------------------------------------- def run_simulation(shape, t_steps, method): def func(x, t): shape.x = x.reshape(-1,2) return shape.dxdt(method).flatten() x_simulation = integrate.odeint(func, shape.x.flatten(), t_steps) x_simulation = x_simulation.reshape(len(t_steps), -1, 2) for i in arange(STEPS, step=int(STEPS/2)): shape.x = x_simulation[i] shape.plot(label="t = {:.2f}".format(t_steps[i])) legend() savefig(filename) show() s = fft_theta(N) shape = shape_func(s) t = linspace(0, T, STEPS) run_simulation(shape, t, method)
# -*- coding: utf-8 -*- """ Created on Tue Mar 10 21:26:00 2015 @author: J.Hillairet """ import skrf as rf import numpy as np import scipy.optimize class ConjugateT(object): """ ConjugateT class. This class describes a conjugate-T with 2 matching capacitors. Its consist of: - two capacitors (defined by their capacitance values) and eventually: - one bridge (an ideal T-junction if not provided) - one impedance transformer (an ideal piece of transmission line if not provided) - one window (an ideal piece of transmission line if not provided) """ def __init__(self, bridge, imp_tr=None, window=None, C=[60e-12, 60e-12], capacitor_model='equivalent', name='CT'): """ Resonant Loop Constructor. Arguments ---------- bridge: :class: 'skrf.network' Bridge network imp_tr: :class: 'skrf.network' Impedance Transformer network window: :class: 'skrf.network' Window (feed-through) network C=[CH,CB]: float array (default: 60pF) Capacitor (Upper and Lower) values in Farad capacitor_model: string {'ideal', 'equivalent'} (defaut:'equivalent') Capacitor electrical model. Ideal means just a capacitance, equivalent means a capacitor equivalent circuit. name: string (default:'CT') Name of the network created """ assert len(C) == 2, 'C=[CH, CB] should be of length 2.' if isinstance(window, rf.Network) and isinstance(imp_tr, rf.Network): # creates the circuit=window + impedance transformer + bridge # impedance_transformer : port0 40 Ohm ; port1 5 Ohm # window : port0 30 ohm ; port1 40 Ohm # 1-imp_tr-0 -- 1-window-0 ==> 1:5 Ohm -- 0:30 Ohm window_imptrans = rf.connect(window, 1, imp_tr, 0) # bridge port0: input window_imptrans_bridge = rf.connect(window_imptrans, 1, bridge, 0) else: # no impedance_transformer nor window window_imptrans_bridge = bridge self.circuit = window_imptrans_bridge # set various properties self.capacitor_model = capacitor_model self.frequency = window_imptrans_bridge.frequency self.z0 = window_imptrans_bridge.z0[0][:] self.C = C # create the network (=circuit+capacitors) #self = self.get_network() self.name = name def __repr__(self): return 'Conjugate-T network with CH={} pF and CB={} pF. Network: {}'\ .format(self._C[0]*1e12, self._C[1]*1e12, self.circuit) @property def C(self): return self._C @C.setter def C(self, C): """ Set the two capacitors values. Arguments --------- C=[CH,CB] in F """ assert len(C) == 2, 'C=[CH, CB] should be of length 2.' self._C = C # update the network self.network = self.get_network() def get_network(self): """ Creates a two-ports (skrf) Network of conugate-T with its capacitors. The returned Network is thus a 3-ports Network, where port#0 is the input port and port#1 and #2 are the load ports. Returns -------- - skrf.Network """ capa_H = self._capacitor_network(self.C[0], z0=self.z0[1]) capa_B = self._capacitor_network(self.C[1], z0=self.z0[2]) # return the skrf Network object # 1-CH-0 ** 1 - 0 # 1-CB-0 ** 2 | return(rf.connect(rf.connect(self.circuit,1, capa_H,0),2, capa_B,0)) def load(self, Z_plasma): """ Load a the conjugate-T with a plasma impedance(s) and return the loaded conjugate-T as a 1-port network. The plasma a complex impedance can be either one or two scalars (ie no poloidal coupling) or a 2x2 array. Parameters ---------- Z_plasma = scalar, 2-element array [Z_plasma_upper, Z_plasma_lower] or 2x2 array: Complex impedances to be connected at bridges output ports Returns ---------- network: :class: 'skrf.network' Resulting network (1 port) """ freq = self.frequency # Frequency object z0_RDL_H = self.z0[1] z0_RDL_B = self.z0[2] ## method 1 : add the complex impedance of the capacitor to the load impedance #Z_CH = 1.0/(1j*self.CH*2*pi*freq.f) #Z_CB = 1.0/(1j*self.CB*2*pi*freq.f) # #Z_L_H = np.reshape(Z_plasma + Z_CH, (len(freq),1,1)) #Z_L_B = np.reshape(Z_plasma + Z_CB, (len(freq),1,1)) # #S_plasma_H = rf.z2s(Z_L_H, z0=z0_RDL_H) #S_plasma_B = rf.z2s(Z_L_B, z0=z0_RDL_B) # ## creates Network object from S-matrix #load_H = rf.Network(frequency=freq, s=S_plasma_H, z0=z0_RDL_H) #load_B = rf.Network(frequency=freq, s=S_plasma_B, z0=z0_RDL_B) # ## connect network #loaded_RDL = rf.connect(rf.connect(self.circuit,1,load_H,0),1, load_B, 0) # method 2 : creates a 2ports network for each capacitor and connect to # Convert the load impedances into networks if np.isscalar(Z_plasma): Z_plasma = np.full(2, Z_plasma) if np.shape(Z_plasma) == (2,): # convert Z into S with the bridge characteristic impedance #S_plasma_H = rf.z2s(Z_plasma[0]*np.ones((len(freq),1,1)), z0=z0_RDL_H) #S_plasma_B = rf.z2s(Z_plasma[1]*np.ones((len(freq),1,1)), z0=z0_RDL_B) #load_H = rf.Network(frequency=freq, s=S_plasma_H, z0=z0_RDL_H) #load_B = rf.Network(frequency=freq, s=S_plasma_B, z0=z0_RDL_B) load_H = rf.Network.from_z(np.full((len(freq),1,1), Z_plasma[0]), z0=z0_RDL_H, frequency=freq) load_B = rf.Network.from_z(np.full((len(freq),1,1), Z_plasma[1]), z0=z0_RDL_B, frequency=freq) return(rf.connect(rf.connect(self.get_network(),1,load_H,0),1, load_B, 0)) elif np.shape(Z_plasma) == (2,2): # Convert the load impedances into a S-parameter matrices (f x n x n), under the correct char. impedance S_L = rf.z2s(np.tile(Z_plasma, (len(freq),1,1)), z0=[z0_RDL_H, z0_RDL_B]) # creates Network object from S-matrix load = rf.Network(s=S_L, z0=[z0_RDL_H, z0_RDL_B] ) load.frequency = freq # Connect the loads to the bridge ports 1 & 2 _loaded_bridge = rf.connect(self.get_network(), 1, load, 0) loaded_bridge = rf.innerconnect(_loaded_bridge, 1, 2) return(loaded_bridge) def _capacitor_network(self, C, z0): """ Return a 2 ports skrf.Network of a capacitor. Parameters ---------- C : float capacitance [F] z0 : float line characteric impedance [Ohm] Returns ------- capacitor : :class: 'skrf.network' Resulting network (2 ports) """ # # Previous version. Network was built from Z -> S # if self.capacitor_model is 'ideal': # Z_capacitor = 1./(1j*C*2*np.pi*self.frequency.f) # elif self.capacitor_model is 'equivalent': # Z_C_serie = 1./(1j*C*2*np.pi*self.frequency.f) # Z_R_serie = 0.01 # Ohm # Z_L_serie = 1j*(24e-9)*2*np.pi*self.frequency.f # 24 nH serie inductance # Z_R_parallel = 20e6 # Ohm # Z_serie = Z_C_serie + Z_R_serie + Z_L_serie # Z_capacitor = (Z_serie * Z_R_parallel)/(Z_serie + Z_R_parallel) # # 2-port series capacity # S_capacitor = np.array([[Z_capacitor, np.tile(2*z0, Z_capacitor.shape)], # [np.tile(2*z0, Z_capacitor.shape), Z_capacitor]]).T \ # / ((Z_capacitor + 2*z0)*np.ones((2,2,len(Z_capacitor)))).T # capacitor = rf.Network(frequency=self.frequency, s=S_capacitor, z0=z0) # return(capacitor) line = rf.media.DefinedGammaZ0(frequency=self.frequency, z0=z0) if self.capacitor_model is 'ideal': capacitor = line.capacitor(C) elif self.capacitor_model is 'equivalent': R_serie = 0.01 # Ohm L_serie = 24e-9 # H capacitor = line.resistor(R_serie) ** line.inductor(L_serie) ** line.capacitor(C) elif self.capacitor_model is 'advanced': R=1e-2 # Ohm L=29.9 # nH R1=1e-2 # Ohm C1=25.7 # pF L1=2.4 # nH pre = line.resistor(R1) ** line.inductor(L1*1e-9) ** line.shunt_capacitor(C1*1e-12) post= line.shunt_capacitor(C1*1e-12) ** line.resistor(R1) ** line.inductor(L1*1e-9) cap = line.resistor(R) ** line.inductor(L*1e-9) ** line.capacitor(C) capacitor = pre ** cap ** post return(capacitor) def match(self, f_match=50e6, z_load=1.0+30*1j, z_match=30+0*1j): """ Match the resonant loop for a prescribed load impedance at a specified frequency Parameters ---------- f_match: (default: 50 MHz) matching frequency in Hz z_load: scalar, 2-element array or 2x2 array (default: 1+30j) complex impedance for both bridge outputs z_match: scalar, matching impedance (default: 30 ohm) Returns ---------- sol: :class: 'scipy.optimize.solution' """ success = False while success == False: # generate a random capacitor sets, centered on 70pF +/-40pF # values expressed in pF C0_pF = 70 + (-1 + 2*scipy.random.rand(2))*40 # use root if _optim_fun returns a vector, but then not bounds sol = scipy.optimize.root(self._optim_fun_single_RL, C0_pF, args=(f_match, z_load, z_match)) # sol = scipy.optimize.minimize(self._optim_fun_single_RL, C0_pF, # args=(f_match, z_load, z_match), # bounds=((12,200),(12,200))) success = sol.success print(success, sol.x) for idm,Cm in enumerate(sol.x): if (Cm < 12) or (Cm > 200): success = False print('Bad solution found (out of range capacitor) ! Re-doing...') self.C = sol.x*1e-12 return(sol) def _optim_fun_single_RL(self, C_pF, f_match, z_load, z_match): """ Return the match conditions at the C=[C1,C2] in pF RL : ResonantLoop class f_match z_load """ self.C = C_pF * 1e-12 loaded_RL = self.load(z_load) index_f_match = np.argmin(np.abs(loaded_RL.frequency.f - f_match)) Z11_re = loaded_RL.z_re[index_f_match].squeeze() # 100 = ~ 50 MHz (mid-band bins point) Z11_im = loaded_RL.z_im[index_f_match].squeeze() # residuals y = [Z11_re - np.real(z_match), # vector return, for root Z11_im - np.imag(z_match)] # y = (Z11_re - np.real(z_match))**2 + (Z11_im - np.imag(z_match))**2 return(y) def _plasma_power_waves(self, Z_plasma, a_in): ''' Returns the power wave a, b at the capacitors (plasma side). Arguments --------- - a_in: power wave input of CT - Z_plasma: complex impedance of the plasma [2x1] Return -------- - a_plasma: power wave from CT to plasma - b_plasma: power wave from plasma to CT ''' # get unloaded network with the current set of capacitors CT = self.get_network() S_plasma_H = rf.z2s(Z_plasma[0]*np.ones((len(CT.f),1,1)), z0=self.z0[1]) S_plasma_B = rf.z2s(Z_plasma[1]*np.ones((len(CT.f),1,1)), z0=self.z0[2]) a_plasma = [] b_plasma = [] for idf,f in enumerate(self.frequency.f): S_CT = CT.s[idf] S_plasma = np.eye(2)*[np.squeeze(S_plasma_H[idf]), np.squeeze(S_plasma_B[idf])] _a = np.linalg.inv(np.eye(2) - S_CT[1:,1:].dot(S_plasma)).dot(S_CT[1:,0])*a_in _b = S_plasma.dot(_a) a_plasma.append(_a) b_plasma.append(_b) a_plasma = np.column_stack(a_plasma) b_plasma = np.column_stack(b_plasma) return a_plasma, b_plasma def get_capacitor_currents_voltages(self, Z_plasma, Pin): ''' Return the currents and voltages at the capacitors (plasma side). Arguments --------- - Pin: input power in the CT [W] - Z_plasma: complex impedance of the plasma [2x1] Return -------- - I_capa : current in A - V_capa : voltage in V ''' # Wath out the factor 2 in the power wave definition # This is expected from the power wave definition # as the power is defined by P = 1/2 V.I --> P = 1/2 a^2 a_in = np.sqrt(2*Pin) a, b = self._plasma_power_waves(Z_plasma, a_in) z0 = self.get_network().z0[:,1:] I_capa = (a-b).T/np.sqrt(np.real(z0)) V_capa = (np.conjugate(z0)*a.T + z0*b.T)/np.sqrt(np.real(z0)) return I_capa, V_capa
#!/usr/bin/python # coding=utf-8 """ audfprint.py Implementation of acoustic-landmark-based robust fingerprinting. Port of the Matlab implementation. 2014-05-25 Dan Ellis dpwe@ee.columbia.edu """ from __future__ import division, print_function # For reporting progress time import time # For command line interface import docopt import os # For __main__ import sys # For multiprocessing options import multiprocessing import joblib # The actual analyzer class/code import audfprint_analyze # Access to match functions, used in command line interface import audfprint_match # My hash_table implementation import hash_table if sys.version_info[0] >= 3: # Python 3 specific definitions time_clock = time.process_time else: # Python 2 specific definitions time_clock = time.clock def filename_list_iterator(filelist, wavdir, wavext, listflag): """ Iterator to yeild all the filenames, possibly interpreting them as list files, prepending wavdir """ if not listflag: for filename in filelist: yield os.path.join(wavdir, filename + wavext) else: for listfilename in filelist: with open(listfilename, 'r') as f: for filename in f: yield os.path.join(wavdir, filename.rstrip('\n') + wavext) # for saving precomputed fprints def ensure_dir(dirname): """ ensure that the named directory exists """ if len(dirname): if not os.path.exists(dirname): # There's a race condition for multiprocessor; don't worry if the # directory gets created before we get to it. try: os.makedirs(dirname) except: pass # Command line interface # basic operations, each in a separate function def file_precompute_peaks_or_hashes(analyzer, filename, precompdir, precompext=None, hashes_not_peaks=True, skip_existing=False, strip_prefix=None): """ Perform precompute action for one file, return list of message strings """ # If strip_prefix is specified and matches the start of filename, # remove it from filename. if strip_prefix and filename[:len(strip_prefix)] == strip_prefix: tail_filename = filename[len(strip_prefix):] else: tail_filename = filename # Form the output filename to check if it exists. # strip relative directory components from file name # Also remove leading absolute path (comp == '') relname = '/'.join([comp for comp in tail_filename.split('/') if comp != '.' and comp != '..' and comp != '']) root = os.path.splitext(relname)[0] if precompext is None: if hashes_not_peaks: precompext = audfprint_analyze.PRECOMPEXT else: precompext = audfprint_analyze.PRECOMPPKEXT opfname = os.path.join(precompdir, root + precompext) if skip_existing and os.path.isfile(opfname): return ["file " + opfname + " exists (and --skip-existing); skipping"] else: # Do the analysis if hashes_not_peaks: type = "hashes" saver = audfprint_analyze.hashes_save output = analyzer.wavfile2hashes(filename) else: type = "peaks" saver = audfprint_analyze.peaks_save output = analyzer.wavfile2peaks(filename) # save the hashes or peaks file if len(output) == 0: message = "Zero length analysis for " + filename + " -- not saving." else: # Make sure the output directory exists ensure_dir(os.path.split(opfname)[0]) # Write the file saver(opfname, output) message = ("wrote " + opfname + " ( %d %s, %.3f sec)" % (len(output), type, analyzer.soundfiledur)) return [message] def file_precompute(analyzer, filename, precompdir, type='peaks', skip_existing=False, strip_prefix=None): """ Perform precompute action for one file, return list of message strings """ print(time.ctime(), "precomputing", type, "for", filename, "...") hashes_not_peaks = (type == 'hashes') return file_precompute_peaks_or_hashes(analyzer, filename, precompdir, hashes_not_peaks=hashes_not_peaks, skip_existing=skip_existing, strip_prefix=strip_prefix) def make_ht_from_list(analyzer, filelist, hashbits, depth, maxtime, pipe=None): """ Populate a hash table from a list, used as target for multiprocess division. pipe is a pipe over which to push back the result, else return it """ # Create new ht instance ht = hash_table.HashTable(hashbits=hashbits, depth=depth, maxtime=maxtime) # Add in the files for filename in filelist: hashes = analyzer.wavfile2hashes(filename) ht.store(filename, hashes) # Pass back to caller if pipe: pipe.send(ht) else: return ht def do_cmd(cmd, analyzer, hash_tab, filename_iter, matcher, outdir, type, report, skip_existing=False, strip_prefix=None): """ Breaks out the core part of running the command. This is just the single-core versions. """ if cmd == 'merge' or cmd == 'newmerge': # files are other hash tables, merge them in for filename in filename_iter: hash_tab2 = hash_table.HashTable(filename) if "samplerate" in hash_tab.params: assert hash_tab.params["samplerate"] == hash_tab2.params["samplerate"] else: # "newmerge" fails to setup the samplerate param hash_tab.params["samplerate"] = hash_tab2.params["samplerate"] hash_tab.merge(hash_tab2) elif cmd == 'precompute': # just precompute fingerprints, single core for filename in filename_iter: report(file_precompute(analyzer, filename, outdir, type, skip_existing=skip_existing, strip_prefix=strip_prefix)) elif cmd == 'match': # Running query, single-core mode for num, filename in enumerate(filename_iter): msgs = matcher.file_match_to_msgs(analyzer, hash_tab, filename, num) report(msgs) elif cmd == 'new' or cmd == 'add': # Adding files tothashes = 0 ix = 0 for filename in filename_iter: report([time.ctime() + " ingesting #" + str(ix) + ": " + filename + " ..."]) dur, nhash = analyzer.ingest(hash_tab, filename) tothashes += nhash ix += 1 report(["Added " + str(tothashes) + " hashes " + "(%.1f" % (tothashes / float(analyzer.soundfiletotaldur)) + " hashes/sec)"]) elif cmd == 'remove': # Removing files from hash table. for filename in filename_iter: hash_tab.remove(filename) elif cmd == 'list': hash_tab.list(lambda x: report([x])) else: raise ValueError("unrecognized command: " + cmd) def multiproc_add(analyzer, hash_tab, filename_iter, report, ncores): """Run multiple threads adding new files to hash table""" # run ncores in parallel to add new files to existing HASH_TABLE # lists store per-process parameters # Pipes to transfer results rx = [[] for _ in range(ncores)] tx = [[] for _ in range(ncores)] # Process objects pr = [[] for _ in range(ncores)] # Lists of the distinct files filelists = [[] for _ in range(ncores)] # unpack all the files into ncores lists ix = 0 for filename in filename_iter: filelists[ix % ncores].append(filename) ix += 1 # Launch each of the individual processes for ix in range(ncores): rx[ix], tx[ix] = multiprocessing.Pipe(False) pr[ix] = multiprocessing.Process(target=make_ht_from_list, args=(analyzer, filelists[ix], hash_tab.hashbits, hash_tab.depth, (1 << hash_tab.maxtimebits), tx[ix])) pr[ix].start() # gather results when they all finish for core in range(ncores): # thread passes back serialized hash table structure hash_tabx = rx[core].recv() report(["hash_table " + str(core) + " has " + str(len(hash_tabx.names)) + " files " + str(sum(hash_tabx.counts)) + " hashes"]) # merge in all the new items, hash entries hash_tab.merge(hash_tabx) # finish that thread... pr[core].join() def matcher_file_match_to_msgs(matcher, analyzer, hash_tab, filename): """Cover for matcher.file_match_to_msgs so it can be passed to joblib""" return matcher.file_match_to_msgs(analyzer, hash_tab, filename) def do_cmd_multiproc(cmd, analyzer, hash_tab, filename_iter, matcher, outdir, type, report, skip_existing=False, strip_prefix=None, ncores=1): """ Run the actual command, using multiple processors """ if cmd == 'precompute': # precompute fingerprints with joblib msgslist = joblib.Parallel(n_jobs=ncores)( joblib.delayed(file_precompute)(analyzer, file, outdir, type, skip_existing, strip_prefix=strip_prefix) for file in filename_iter ) # Collapse into a single list of messages for msgs in msgslist: report(msgs) elif cmd == 'match': # Running queries in parallel msgslist = joblib.Parallel(n_jobs=ncores)( # Would use matcher.file_match_to_msgs(), but you # can't use joblib on an instance method joblib.delayed(matcher_file_match_to_msgs)(matcher, analyzer, hash_tab, filename) for filename in filename_iter ) for msgs in msgslist: report(msgs) elif cmd == 'new' or cmd == 'add': # We add by forking multiple parallel threads each running # analyzers over different subsets of the file list multiproc_add(analyzer, hash_tab, filename_iter, report, ncores) else: # This is not a multiproc command raise ValueError("unrecognized multiproc command: " + cmd) # Command to separate out setting of analyzer parameters def setup_analyzer(args): """Create a new analyzer object, taking values from docopts args""" # Create analyzer object; parameters will get set below analyzer = audfprint_analyze.Analyzer() # Read parameters from command line/docopts analyzer.density = float(args['--density']) analyzer.maxpksperframe = int(args['--pks-per-frame']) analyzer.maxpairsperpeak = int(args['--fanout']) analyzer.f_sd = float(args['--freq-sd']) analyzer.shifts = int(args['--shifts']) # fixed - 512 pt FFT with 256 pt hop at 11025 Hz analyzer.target_sr = int(args['--samplerate']) analyzer.n_fft = 512 analyzer.n_hop = analyzer.n_fft // 2 # set default value for shifts depending on mode if analyzer.shifts == 0: # Default shift is 4 for match, otherwise 1 analyzer.shifts = 4 if args['match'] else 1 analyzer.fail_on_error = not args['--continue-on-error'] return analyzer # Command to separate out setting of matcher parameters def setup_matcher(args): """Create a new matcher objects, set parameters from docopt structure""" matcher = audfprint_match.Matcher() matcher.window = int(args['--match-win']) matcher.threshcount = int(args['--min-count']) matcher.max_returns = int(args['--max-matches']) matcher.search_depth = int(args['--search-depth']) matcher.sort_by_time = args['--sortbytime'] matcher.exact_count = args['--exact-count'] | args['--illustrate'] | args['--illustrate-hpf'] matcher.illustrate = args['--illustrate'] | args['--illustrate-hpf'] matcher.illustrate_hpf = args['--illustrate-hpf'] matcher.verbose = args['--verbose'] matcher.find_time_range = args['--find-time-range'] matcher.time_quantile = float(args['--time-quantile']) return matcher # Command to construct the reporter object def setup_reporter(args): """ Creates a logging function, either to stderr or file""" opfile = args['--opfile'] if opfile and len(opfile): f = open(opfile, "w") def report(msglist): """Log messages to a particular output file""" for msg in msglist: f.write(msg + "\n") else: def report(msglist): """Log messages by printing to stdout""" for msg in msglist: print(msg) return report # CLI specified via usage message thanks to docopt USAGE = """ Landmark-based audio fingerprinting. Create a new fingerprint dbase with "new", append new files to an existing database with "add", or identify noisy query excerpts with "match". "precompute" writes a *.fpt file under precompdir with precomputed fingerprint for each input wav file. "merge" combines previously-created databases into an existing database; "newmerge" combines existing databases to create a new one. Usage: audfprint (new | add | match | precompute | merge | newmerge | list | remove) [options] [<file>]... Options: -d <dbase>, --dbase <dbase> Fingerprint database file -n <dens>, --density <dens> Target hashes per second [default: 20.0] -h <bits>, --hashbits <bits> How many bits in each hash [default: 20] -b <val>, --bucketsize <val> Number of entries per bucket [default: 100] -t <val>, --maxtime <val> Largest time value stored [default: 16384] -u <val>, --maxtimebits <val> maxtime as a number of bits (16384 == 14 bits) -r <val>, --samplerate <val> Resample input files to this [default: 11025] -p <dir>, --precompdir <dir> Save precomputed files under this dir [default: .] -i <val>, --shifts <val> Use this many subframe shifts building fp [default: 0] -w <val>, --match-win <val> Maximum tolerable frame skew to count as a match [default: 2] -N <val>, --min-count <val> Minimum number of matching landmarks to count as a match [default: 5] -x <val>, --max-matches <val> Maximum number of matches to report for each query [default: 1] -X, --exact-count Flag to use more precise (but slower) match counting -R, --find-time-range Report the time support of each match -Q, --time-quantile <val> Quantile at extremes of time support [default: 0.05] -S <val>, --freq-sd <val> Frequency peak spreading SD in bins [default: 30.0] -F <val>, --fanout <val> Max number of hash pairs per peak [default: 3] -P <val>, --pks-per-frame <val> Maximum number of peaks per frame [default: 5] -D <val>, --search-depth <val> How far down to search raw matching track list [default: 100] -H <val>, --ncores <val> Number of processes to use [default: 1] -o <name>, --opfile <name> Write output (matches) to this file, not stdout [default: ] -K, --precompute-peaks Precompute just landmarks (else full hashes) -k, --skip-existing On precompute, skip items if output file already exists -C, --continue-on-error Keep processing despite errors reading input -l, --list Input files are lists, not audio -T, --sortbytime Sort multiple hits per file by time (instead of score) -v <val>, --verbose <val> Verbosity level [default: 1] -I, --illustrate Make a plot showing the match -J, --illustrate-hpf Plot the match, using onset enhancement -W <dir>, --wavdir <dir> Find sound files under this dir [default: ] -V <ext>, --wavext <ext> Extension to add to wav file names [default: ] --version Report version number --help Print this message """ __version__ = 20150406 def main(argv): """ Main routine for the command-line interface to audfprint """ # Other globals set from command line args = docopt.docopt(USAGE, version=__version__, argv=argv[1:]) # Figure which command was chosen poss_cmds = ['new', 'add', 'precompute', 'merge', 'newmerge', 'match', 'list', 'remove'] cmdlist = [cmdname for cmdname in poss_cmds if args[cmdname]] if len(cmdlist) != 1: raise ValueError("must specify exactly one command") # The actual command as a str cmd = cmdlist[0] # Setup output function report = setup_reporter(args) # Keep track of wall time initticks = time_clock() # Command line sanity. if args["--maxtimebits"]: args["--maxtimebits"] = int(args["--maxtimebits"]) else: args["--maxtimebits"] = hash_table._bitsfor(int(args["--maxtime"])) # Setup the analyzer if we're using one (i.e., unless "merge") analyzer = setup_analyzer(args) if not ( cmd == "merge" or cmd == "newmerge" or cmd == "list" or cmd == "remove") else None precomp_type = 'hashes' # Set up the hash table, if we're using one (i.e., unless "precompute") if cmd != "precompute": # For everything other than precompute, we need a database name # Check we have one dbasename = args['--dbase'] if not dbasename: raise ValueError("dbase name must be provided if not precompute") if cmd == "new" or cmd == "newmerge": # Check that the output directory can be created before we start ensure_dir(os.path.split(dbasename)[0]) # Create a new hash table hash_tab = hash_table.HashTable( hashbits=int(args['--hashbits']), depth=int(args['--bucketsize']), maxtime=(1 << int(args['--maxtimebits']))) # Set its samplerate param if analyzer: hash_tab.params['samplerate'] = analyzer.target_sr else: # Load existing hash table file (add, match, merge) if args['--verbose']: report([time.ctime() + " Reading hash table " + dbasename]) hash_tab = hash_table.HashTable(dbasename) if analyzer and 'samplerate' in hash_tab.params \ and hash_tab.params['samplerate'] != analyzer.target_sr: # analyzer.target_sr = hash_tab.params['samplerate'] print("db samplerate overridden to ", analyzer.target_sr) else: # The command IS precompute # dummy empty hash table hash_tab = None if args['--precompute-peaks']: precomp_type = 'peaks' # Create a matcher matcher = setup_matcher(args) if cmd == 'match' else None filename_iter = filename_list_iterator( args['<file>'], args['--wavdir'], args['--wavext'], args['--list']) ####################### # Run the main commmand ####################### # How many processors to use (multiprocessing) ncores = int(args['--ncores']) if ncores > 1 and not (cmd == "merge" or cmd == "newmerge" or cmd == "list" or cmd == "remove"): # merge/newmerge/list/remove are always single-thread processes do_cmd_multiproc(cmd, analyzer, hash_tab, filename_iter, matcher, args['--precompdir'], precomp_type, report, skip_existing=args['--skip-existing'], strip_prefix=args['--wavdir'], ncores=ncores) else: do_cmd(cmd, analyzer, hash_tab, filename_iter, matcher, args['--precompdir'], precomp_type, report, skip_existing=args['--skip-existing'], strip_prefix=args['--wavdir']) elapsedtime = time_clock() - initticks if analyzer and analyzer.soundfiletotaldur > 0.: print("Processed " + "%d files (%.1f s total dur) in %.1f s sec = %.3f x RT" \ % (analyzer.soundfilecount, analyzer.soundfiletotaldur, elapsedtime, (elapsedtime / analyzer.soundfiletotaldur))) # Save the hash table file if it has been modified if hash_tab and hash_tab.dirty: # We already created the directory, if "new". hash_tab.save(dbasename) # Run the main function if called from the command line if __name__ == "__main__": main(sys.argv)
import os import yaml from charmhelpers.core import hookenv from charmhelpers.core import templating from charmhelpers.core.services.base import ManagerCallback __all__ = ['RelationContext', 'TemplateCallback', 'render_template', 'template'] class RelationContext(dict): """ Base class for a context generator that gets relation data from juju. Subclasses must provide the attributes `name`, which is the name of the interface of interest, `interface`, which is the type of the interface of interest, and `required_keys`, which is the set of keys required for the relation to be considered complete. The data for all interfaces matching the `name` attribute that are complete will used to populate the dictionary values (see `get_data`, below). The generated context will be namespaced under the relation :attr:`name`, to prevent potential naming conflicts. :param str name: Override the relation :attr:`name`, since it can vary from charm to charm :param list additional_required_keys: Extend the list of :attr:`required_keys` """ name = None interface = None required_keys = [] def __init__(self, name=None, additional_required_keys=None): if name is not None: self.name = name if additional_required_keys is not None: self.required_keys.extend(additional_required_keys) self.get_data() def __bool__(self): """ Returns True if all of the required_keys are available. """ return self.is_ready() __nonzero__ = __bool__ def __repr__(self): return super(RelationContext, self).__repr__() def is_ready(self): """ Returns True if all of the `required_keys` are available from any units. """ ready = len(self.get(self.name, [])) > 0 if not ready: hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG) return ready def _is_ready(self, unit_data): """ Helper method that tests a set of relation data and returns True if all of the `required_keys` are present. """ return set(unit_data.keys()).issuperset(set(self.required_keys)) def get_data(self): """ Retrieve the relation data for each unit involved in a relation and, if complete, store it in a list under `self[self.name]`. This is automatically called when the RelationContext is instantiated. The units are sorted lexographically first by the service ID, then by the unit ID. Thus, if an interface has two other services, 'db:1' and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1', and 'db:2' having one unit, 'mediawiki/0', all of which have a complete set of data, the relation data for the units will be stored in the order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'. If you only care about a single unit on the relation, you can just access it as `{{ interface[0]['key'] }}`. However, if you can at all support multiple units on a relation, you should iterate over the list, like:: {% for unit in interface -%} {{ unit['key'] }}{% if not loop.last %},{% endif %} {%- endfor %} Note that since all sets of relation data from all related services and units are in a single list, if you need to know which service or unit a set of data came from, you'll need to extend this class to preserve that information. """ if not hookenv.relation_ids(self.name): return ns = self.setdefault(self.name, []) for rid in sorted(hookenv.relation_ids(self.name)): for unit in sorted(hookenv.related_units(rid)): reldata = hookenv.relation_get(rid=rid, unit=unit) if self._is_ready(reldata): ns.append(reldata) def provide_data(self): """ Return data to be relation_set for this interface. """ return {} class MysqlRelation(RelationContext): """ Relation context for the `mysql` interface. :param str name: Override the relation :attr:`name`, since it can vary from charm to charm :param list additional_required_keys: Extend the list of :attr:`required_keys` """ name = 'db' interface = 'mysql' required_keys = ['host', 'user', 'password', 'database'] class HttpRelation(RelationContext): """ Relation context for the `http` interface. :param str name: Override the relation :attr:`name`, since it can vary from charm to charm :param list additional_required_keys: Extend the list of :attr:`required_keys` """ name = 'website' interface = 'http' required_keys = ['host', 'port'] def provide_data(self): return { 'host': hookenv.unit_get('private-address'), 'port': 80, } class RequiredConfig(dict): """ Data context that loads config options with one or more mandatory options. Once the required options have been changed from their default values, all config options will be available, namespaced under `config` to prevent potential naming conflicts (for example, between a config option and a relation property). :param list *args: List of options that must be changed from their default values. """ def __init__(self, *args): self.required_options = args self['config'] = hookenv.config() with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp: self.config = yaml.load(fp).get('options', {}) def __bool__(self): for option in self.required_options: if option not in self['config']: return False current_value = self['config'][option] default_value = self.config[option].get('default') if current_value == default_value: return False if current_value in (None, '') and default_value in (None, ''): return False return True def __nonzero__(self): return self.__bool__() class StoredContext(dict): """ A data context that always returns the data that it was first created with. This is useful to do a one-time generation of things like passwords, that will thereafter use the same value that was originally generated, instead of generating a new value each time it is run. """ def __init__(self, file_name, config_data): """ If the file exists, populate `self` with the data from the file. Otherwise, populate with the given data and persist it to the file. """ if os.path.exists(file_name): self.update(self.read_context(file_name)) else: self.store_context(file_name, config_data) self.update(config_data) def store_context(self, file_name, config_data): if not os.path.isabs(file_name): file_name = os.path.join(hookenv.charm_dir(), file_name) with open(file_name, 'w') as file_stream: os.fchmod(file_stream.fileno(), 0600) yaml.dump(config_data, file_stream) def read_context(self, file_name): if not os.path.isabs(file_name): file_name = os.path.join(hookenv.charm_dir(), file_name) with open(file_name, 'r') as file_stream: data = yaml.load(file_stream) if not data: raise OSError("%s is empty" % file_name) return data class TemplateCallback(ManagerCallback): """ Callback class that will render a Jinja2 template, for use as a ready action. :param str source: The template source file, relative to `$CHARM_DIR/templates` :param str target: The target to write the rendered template to :param str owner: The owner of the rendered file :param str group: The group of the rendered file :param int perms: The permissions of the rendered file """ def __init__(self, source, target, owner='root', group='root', perms=0444): self.source = source self.target = target self.owner = owner self.group = group self.perms = perms def __call__(self, manager, service_name, event_name): service = manager.get_service(service_name) context = {} for ctx in service.get('required_data', []): context.update(ctx) templating.render(self.source, self.target, context, self.owner, self.group, self.perms) # Convenience aliases for templates render_template = template = TemplateCallback
from __future__ import print_function from __future__ import absolute_import import string import os import weakref import re import glob import numpy as np import pandas as pds from pysat import data_dir as data_dir class Files(object): """Maintains collection of files for instrument object. Uses the list_files functions for each specific instrument to create an ordered collection of files in time. Used by instrument object to load the correct files. Files also contains helper methods for determining the presence of new files and creating an ordered list of files. Attributes ---------- base_path : string path to .pysat directory in user home start_date : datetime date of first file, used as default start bound for instrument object stop_date : datetime date of last file, used as default stop bound for instrument object data_path : string path to the directory containing instrument files, top_dir/platform/name/tag/ manual_org : bool if True, then Files will look directly in pysat data directory for data files and will not use /platform/name/tag update_files : bool updates files on instantiation if True Note ---- User should generally use the interface provided by a pysat.Instrument instance. Exceptions are the classmethod from_os, provided to assist in generating the appropriate output for an instrument routine. Examples -------- :: # convenient file access inst = pysat.Instrument(platform=platform, name=name, tag=tag, sat_id=sat_id) # first file inst.files[0] # files from start up to stop (exclusive on stop) start = pysat.datetime(2009,1,1) stop = pysat.datetime(2009,1,3) print(vefi.files[start:stop]) # files for date print(vefi.files[start]) # files by slicing print(vefi.files[0:4]) # get a list of new files # new files are those that weren't present the last time # a given instrument's file list was stored new_files = vefi.files.get_new() # search pysat appropriate directory for instrument files and # update Files instance. vefi.files.refresh() """ def __init__(self, sat, manual_org=False, directory_format=None, update_files=False, file_format=None, write_to_disk=True): # pysat.Instrument object self._sat = weakref.proxy(sat) # location of .pysat file self.home_path = os.path.join(os.path.expanduser('~'), '.pysat') self.start_date = None self.stop_date = None self.files = pds.Series(None) # location of stored files self.stored_file_name = ''.join((self._sat.platform,'_', self._sat.name, '_',self._sat.tag, '_', self._sat.sat_id, '_stored_file_info.txt')) # flag for setting simple organization of files, only # look under pysat_data_dir self.manual_org = manual_org # path for sub-directories under pysat data path if directory_format is None: directory_format = os.path.join('{platform}','{name}','{tag}') self.directory_format = directory_format # user-specified file format self.file_format = file_format if manual_org: self.sub_dir_path = '' else: # construct subdirectory path self.sub_dir_path = \ self.directory_format.format(name=self._sat.name, platform=self._sat.platform, tag=self._sat.tag, sat_id=self._sat.sat_id) # make sure path always ends with directory seperator self.data_path = os.path.join(data_dir, self.sub_dir_path) if self.data_path[-2] == os.path.sep: self.data_path = self.data_path[:-1] elif self.data_path[-1] != os.path.sep: self.data_path = os.path.join(self.data_path, '') self.write_to_disk = write_to_disk if write_to_disk is False: self._previous_file_list = pds.Series([], dtype='a') self._current_file_list = pds.Series([], dtype='a') if self._sat.platform != '': # load stored file info info = self._load() if not info.empty: self._attach_files(info) if update_files: self.refresh() else: # couldn't find stored info, load file list and then store self.refresh() def _attach_files(self, files_info): """Attaches info returned by instrument list_files routine to Instrument object. """ if not files_info.empty: if (len(files_info.index.unique()) != len(files_info)): estr = 'WARNING! Duplicate datetimes in provided file ' estr = '{:s}information.\nKeeping one of each '.format(estr) estr = '{:s}of the duplicates, dropping the rest.'.format(estr) print(estr) print(files_info.index.get_duplicates()) idx = np.unique(files_info.index, return_index=True) files_info = files_info.ix[idx[1]] #raise ValueError('List of files must have unique datetimes.') self.files = files_info.sort_index() date = files_info.index[0] self.start_date = pds.datetime(date.year, date.month, date.day) date = files_info.index[-1] self.stop_date = pds.datetime(date.year, date.month, date.day) else: self.start_date = None self.stop_date = None # convert to object type # necessary if Series is empty, enables == checks with strings self.files = files_info.astype(np.dtype('O')) def _store(self): """Store currently loaded filelist for instrument onto filesystem""" name = self.stored_file_name # check if current file data is different than stored file list # if so, move file list to previous file list, store current to file # if not, do nothing stored_files = self._load() if len(stored_files) != len(self.files): # # of items is different, things are new new_flag = True elif len(stored_files) == len(self.files): # # of items equal, check specifically for equality if stored_files.eq(self.files).all(): new_flag = False else: # not equal, there are new files new_flag = True if new_flag: # print('New files') if self.write_to_disk: stored_files.to_csv(os.path.join(self.home_path, 'previous_'+name), date_format='%Y-%m-%d %H:%M:%S.%f') self.files.to_csv(os.path.join(self.home_path, name), date_format='%Y-%m-%d %H:%M:%S.%f') else: self._previous_file_list = stored_files self._current_file_list = self.files.copy() return def _load(self, prev_version=False): """Load stored filelist and return as Pandas Series Parameters ---------- prev_version : boolean if True, will load previous version of file list Returns ------- pandas.Series Full path file names are indexed by datetime Series is empty if there is no file list to load """ fname = self.stored_file_name if prev_version: fname = os.path.join(self.home_path, 'previous_'+fname) else: fname = os.path.join(self.home_path, fname) if os.path.isfile(fname) and (os.path.getsize(fname) > 0): if self.write_to_disk: return pds.Series.from_csv(fname, index_col=0) else: # grab files from memory if prev_version: return self._previous_file_list else: return self._current_file_list else: return pds.Series([], dtype='a') def refresh(self): """Update list of files, if there are changes. Calls underlying list_rtn for the particular science instrument. Typically, these routines search in the pysat provided path, pysat_data_dir/platform/name/tag/, where pysat_data_dir is set by pysat.utils.set_data_dir(path=path). """ output_str = '{platform} {name} {tag} {sat_id}' output_str = output_str.format(platform=self._sat.platform, name=self._sat.name, tag=self._sat.tag, sat_id=self._sat.sat_id) output_str = " ".join(("pysat is searching for", output_str, "files.")) output_str = " ".join(output_str.split()) print (output_str) info = self._sat._list_rtn(tag=self._sat.tag, sat_id=self._sat.sat_id, data_path=self.data_path, format_str=self.file_format) if not info.empty: print('Found {ll:d} of them.'.format(ll=len(info))) else: estr = "Unable to find any files that match the supplied template. If you have the necessary files " estr = "{:s}please check pysat settings and file ".format(estr) print("{:s}locations.".format(estr)) info = self._remove_data_dir_path(info) self._attach_files(info) self._store() def get_new(self): """List new files since last recorded file state. pysat stores filenames in the user_home/.pysat directory. Returns a list of all new fileanmes since the last known change to files. Filenames are stored if there is a change and either update_files is True at instrument object level or files.refresh() is called. Returns ------- pandas.Series files are indexed by datetime """ # refresh files self.refresh() # current files new_info = self._load() # previous set of files old_info = self._load(prev_version=True) new_files = new_info[-new_info.isin(old_info)] return new_files # def mark_as_new(self, files): # """Set list of files as new. # # """ # pass # stored_info = self._load() # if not stored_info.empty: # is not False: # new_info = self._sat._list_rtn(tag = self._sat.tag, # data_path=self.data_path, # format_str=self.file_format) # new_info = self._remove_data_dir_path(new_info) # new_files = new_info[~new_info.isin(stored_info) ] # return new_files # else: # print('No previously stored files that we may compare to.') # return pds.Series([], dtype='a') #False def get_index(self, fname): """Return index for a given filename. Parameters ---------- fname : string filename Note ---- If fname not found in the file information already attached to the instrument.files instance, then a files.refresh() call is made. """ idx, = np.where(fname == self.files) if len(idx) == 0: # filename not in index, try reloading files from disk self.refresh() #print("DEBUG get_index:", fname, self.files) idx, = np.where(fname == np.array(self.files)) if len(idx) == 0: raise ValueError('Could not find "'+fname+ '" in available file list. Valid Example: '+self.files.iloc[0]) # return a scalar rather than array - otherwise introduces array to index warnings. return idx[0] # convert this to a normal get so files[in:in2] gives the same as requested # here support slicing via date and index filename is inclusive slicing, # date and index are normal non-inclusive end point def __getitem__(self, key): if isinstance(key, slice): try: out = self.files.ix[key] except IndexError: raise IndexError('Date requested outside file bounds.') if isinstance(key.start, pds.datetime): # enforce exclusive slicing on datetime if len(out) > 1: if out.index[-1] >= key.stop: return out[:-1] else: return out elif len(out) == 1: if out.index[0] >= key.stop: return pds.Series([], dtype='a') else: return out else: return out else: # not a datetime return out else: return self.files.ix[key] #raise ValueError('Not implemented yet.') #if isinstance(key, tuple): # if len(key) == 2: # start = key[0] # end = key[1] # else: # raise ValueError('Must input 2 and only 2 items/iterables') def get_file_array(self, start, end): """Return a list of filenames between and including start and end. Parameters ---------- start: array_like or single string filenames for start of returned filelist stop: array_like or single string filenames inclusive end of list Returns ------- list of filenames between and including start and end over all intervals. """ if hasattr(start, '__iter__') & hasattr(end, '__iter__'): files = [] for (sta,stp) in zip(start, end): id1 = self.get_index(sta) id2 = self.get_index(stp) files.extend(self.files.iloc[id1 : id2+1]) elif hasattr(start, '__iter__') | hasattr(end, '__iter__'): estr = 'Either both or none of the inputs need to be iterable' raise ValueError(estr) else: id1 = self.get_index(start) id2 = self.get_index(end) files = self.files[id1:id2+1].to_list() return files def _remove_data_dir_path(self, inp=None): # import string """Remove the data directory path from filenames""" # need to add a check in here to make sure data_dir path is actually in # the filename if inp is not None: split_str = os.path.join(self.data_path, '') return inp.apply(lambda x: x.split(split_str)[-1]) #elif inp is not None: # # return inp.split(split_str)[-1] # match = os.path.join(self.data_path,'') # num = len(match) # return inp.apply(lambda x: x[num:]) @classmethod def from_os(cls, data_path=None, format_str=None, two_digit_year_break=None): """ Produces a list of files and and formats it for Files class. Parameters ---------- data_path : string Top level directory to search files for. This directory is provided by pysat to the instrument_module.list_files functions as data_path. format_str : string with python format codes Provides the naming pattern of the instrument files and the locations of date information so an ordered list may be produced. two_digit_year_break : int If filenames only store two digits for the year, then '1900' will be added for years >= two_digit_year_break and '2000' will be added for years < two_digit_year_break. Note ---- Does not produce a Files instance, but the proper output from instrument_module.list_files method. """ import collections from pysat.utils import create_datetime_index if format_str is None: raise ValueError("Must supply a filename template (format_str).") if data_path is None: raise ValueError("Must supply instrument directory path (dir_path)") # parse format string to figure out the search string to use # to identify files in the filesystem search_str = '' form = string.Formatter() keys = [] snips = [] length = [] stored = collections.OrderedDict() stored['year'] = []; stored['month'] = []; stored['day'] = []; stored['hour'] = []; stored['min'] = []; stored['sec'] = []; for snip in form.parse(format_str): search_str += snip[0] snips.append(snip[0]) if snip[1] is not None: keys.append(snip[1]) search_str += '*' # try and determine formatting width temp = re.findall(r'\d+', snip[2]) if temp: # there are items, try and grab width for i in temp: if i != 0: length.append(int(i)) break else: raise ValueError("Couldn't determine formatting width") abs_search_str = os.path.join(data_path, search_str) files = glob.glob(abs_search_str) # we have a list of files, now we need to extract the date information # code below works, but only if the size of file string # remains unchanged # determine the locations the date information in a filename is stored # use these indices to slice out date from loaded filenames # test_str = format_str.format(**periods) if len(files) > 0: idx = 0 begin_key = [] end_key = [] for i,snip in enumerate(snips): idx += len(snip) if i < (len(length)): begin_key.append(idx) idx += length[i] end_key.append(idx) max_len = idx # setting up negative indexing to pick out filenames key_str_idx = [np.array(begin_key, dtype=int) - max_len, np.array(end_key, dtype=int) - max_len] # need to parse out dates for datetime index for i,temp in enumerate(files): for j,key in enumerate(keys): val = temp[key_str_idx[0][j]:key_str_idx[1][j]] stored[key].append(val) # convert to numpy arrays for key in stored.keys(): stored[key] = np.array(stored[key]).astype(int) if len(stored[key]) == 0: stored[key]=None # deal with the possibility of two digit years # years above or equal to break are considered to be 1900+ # years below break are considered to be 2000+ if two_digit_year_break is not None: idx, = np.where(np.array(stored['year']) >= two_digit_year_break) stored['year'][idx] = stored['year'][idx] + 1900 idx, = np.where(np.array(stored['year']) < two_digit_year_break) stored['year'][idx] = stored['year'][idx] + 2000 # need to sort the information for things to work rec_arr = [stored[key] for key in keys] rec_arr.append(files) # sort all arrays val_keys = keys + ['files'] rec_arr = np.rec.fromarrays(rec_arr, names=val_keys) rec_arr.sort(order=val_keys, axis=0) # pull out sorted info for key in keys: stored[key] = rec_arr[key] files = rec_arr['files'] # add hour and minute information to 'sec' if stored['sec'] is None: stored['sec'] = np.zeros(len(files)) if stored['hour'] is not None: stored['sec'] += 3600 * stored['hour'] if stored['min'] is not None: stored['sec'] += 60 * stored['min'] index = create_datetime_index(year=stored['year'], month=stored['month'], day=stored['day'], uts=stored['sec']) return pds.Series(files, index=index) else: return pds.Series(None)
# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """TraceEventImporter imports TraceEvent-formatted data into the provided model. This is a port of the trace event importer from https://code.google.com/p/trace-viewer/ """ import collections import copy import telemetry.timeline.async_slice as tracing_async_slice import telemetry.timeline.flow_event as tracing_flow_event from telemetry.timeline import importer from telemetry.timeline import memory_dump_event from telemetry.timeline import trace_data as trace_data_module class TraceEventTimelineImporter(importer.TimelineImporter): def __init__(self, model, trace_data): super(TraceEventTimelineImporter, self).__init__( model, trace_data, import_order=1) assert isinstance(trace_data, trace_data_module.TraceData) self._trace_data = trace_data self._all_async_events = [] self._all_object_events = [] self._all_flow_events = [] self._all_memory_dumps_by_dump_id = collections.defaultdict(list) self._events = trace_data.GetEventsFor(trace_data_module.CHROME_TRACE_PART) @staticmethod def GetSupportedPart(): return trace_data_module.CHROME_TRACE_PART def _GetOrCreateProcess(self, pid): return self._model.GetOrCreateProcess(pid) def _DeepCopyIfNeeded(self, obj): if self._trace_data.events_are_safely_mutable: return obj return copy.deepcopy(obj) def _ProcessAsyncEvent(self, event): """Helper to process an 'async finish' event, which will close an open slice. """ thread = (self._GetOrCreateProcess(event['pid']) .GetOrCreateThread(event['tid'])) self._all_async_events.append({ 'event': event, 'thread': thread}) def _ProcessCounterEvent(self, event): """Helper that creates and adds samples to a Counter object based on 'C' phase events. """ if 'id' in event: ctr_name = event['name'] + '[' + str(event['id']) + ']' else: ctr_name = event['name'] ctr = (self._GetOrCreateProcess(event['pid']) .GetOrCreateCounter(event['cat'], ctr_name)) # Initialize the counter's series fields if needed. if len(ctr.series_names) == 0: #TODO: implement counter object for series_name in event['args']: ctr.series_names.append(series_name) if len(ctr.series_names) == 0: self._model.import_errors.append('Expected counter ' + event['name'] + ' to have at least one argument to use as a value.') # Drop the counter. del ctr.parent.counters[ctr.full_name] return # Add the sample values. ctr.timestamps.append(event['ts'] / 1000.0) for series_name in ctr.series_names: if series_name not in event['args']: ctr.samples.append(0) continue ctr.samples.append(event['args'][series_name]) def _ProcessObjectEvent(self, event): thread = (self._GetOrCreateProcess(event['pid']) .GetOrCreateThread(event['tid'])) self._all_object_events.append({ 'event': event, 'thread': thread}) def _ProcessDurationEvent(self, event): thread = (self._GetOrCreateProcess(event['pid']) .GetOrCreateThread(event['tid'])) if not thread.IsTimestampValidForBeginOrEnd(event['ts'] / 1000.0): self._model.import_errors.append( 'Timestamps are moving backward.') return if event['ph'] == 'B': thread.BeginSlice(event['cat'], event['name'], event['ts'] / 1000.0, event['tts'] / 1000.0 if 'tts' in event else None, event['args']) elif event['ph'] == 'E': thread = (self._GetOrCreateProcess(event['pid']) .GetOrCreateThread(event['tid'])) if not thread.IsTimestampValidForBeginOrEnd(event['ts'] / 1000.0): self._model.import_errors.append( 'Timestamps are moving backward.') return if not thread.open_slice_count: self._model.import_errors.append( 'E phase event without a matching B phase event.') return new_slice = thread.EndSlice( event['ts'] / 1000.0, event['tts'] / 1000.0 if 'tts' in event else None) for arg_name, arg_value in event.get('args', {}).iteritems(): if arg_name in new_slice.args: self._model.import_errors.append( 'Both the B and E phases of ' + new_slice.name + ' provided values for argument ' + arg_name + '. ' + 'The value of the E phase event will be used.') new_slice.args[arg_name] = arg_value def _ProcessCompleteEvent(self, event): thread = (self._GetOrCreateProcess(event['pid']) .GetOrCreateThread(event['tid'])) thread.PushCompleteSlice( event['cat'], event['name'], event['ts'] / 1000.0, event['dur'] / 1000.0 if 'dur' in event else None, event['tts'] / 1000.0 if 'tts' in event else None, event['tdur'] / 1000.0 if 'tdur' in event else None, event['args']) def _ProcessMarkEvent(self, event): thread = (self._GetOrCreateProcess(event['pid']) .GetOrCreateThread(event['tid'])) thread.PushMarkSlice( event['cat'], event['name'], event['ts'] / 1000.0, event['tts'] / 1000.0 if 'tts' in event else None, event['args'] if 'args' in event else None) def _ProcessMetadataEvent(self, event): if event['name'] == 'thread_name': thread = (self._GetOrCreateProcess(event['pid']) .GetOrCreateThread(event['tid'])) thread.name = event['args']['name'] elif event['name'] == 'process_name': process = self._GetOrCreateProcess(event['pid']) process.name = event['args']['name'] elif event['name'] == 'process_labels': process = self._GetOrCreateProcess(event['pid']) process.labels = event['args']['labels'] elif event['name'] == 'trace_buffer_overflowed': process = self._GetOrCreateProcess(event['pid']) process.SetTraceBufferOverflowTimestamp(event['args']['overflowed_at_ts']) else: self._model.import_errors.append( 'Unrecognized metadata name: ' + event['name']) def _ProcessInstantEvent(self, event): # Treat an Instant event as a duration 0 slice. # SliceTrack's redraw() knows how to handle this. thread = (self._GetOrCreateProcess(event['pid']) .GetOrCreateThread(event['tid'])) thread.BeginSlice(event['cat'], event['name'], event['ts'] / 1000.0, args=event.get('args')) thread.EndSlice(event['ts'] / 1000.0) def _ProcessSampleEvent(self, event): thread = (self._GetOrCreateProcess(event['pid']) .GetOrCreateThread(event['tid'])) thread.AddSample(event['cat'], event['name'], event['ts'] / 1000.0, event.get('args')) def _ProcessFlowEvent(self, event): thread = (self._GetOrCreateProcess(event['pid']) .GetOrCreateThread(event['tid'])) self._all_flow_events.append({ 'event': event, 'thread': thread}) def _ProcessMemoryDumpEvent(self, event): process = self._GetOrCreateProcess(event['pid']) memory_dump = memory_dump_event.ProcessMemoryDumpEvent(process, event) process.AddMemoryDumpEvent(memory_dump) self._all_memory_dumps_by_dump_id[memory_dump.dump_id].append(memory_dump) def ImportEvents(self): """Walks through the events_ list and outputs the structures discovered to model_. """ for event in self._events: phase = event.get('ph', None) if phase == 'B' or phase == 'E': self._ProcessDurationEvent(event) elif phase == 'X': self._ProcessCompleteEvent(event) # Note, S, F, T are deprecated and replaced by 'b' and 'e'. For # backwards compatibility continue to support them here. elif phase == 'S' or phase == 'F' or phase == 'T': self._ProcessAsyncEvent(event) elif phase == 'b' or phase == 'e': self._ProcessAsyncEvent(event) # Note, I is historic. The instant event marker got changed, but we # want to support loading old trace files so we have both I and i. elif phase == 'I' or phase == 'i': self._ProcessInstantEvent(event) elif phase == 'P': self._ProcessSampleEvent(event) elif phase == 'C': self._ProcessCounterEvent(event) elif phase == 'M': self._ProcessMetadataEvent(event) elif phase == 'N' or phase == 'D' or phase == 'O': self._ProcessObjectEvent(event) elif phase == 's' or phase == 't' or phase == 'f': self._ProcessFlowEvent(event) elif phase == 'v': self._ProcessMemoryDumpEvent(event) elif phase == 'R': self._ProcessMarkEvent(event) else: self._model.import_errors.append('Unrecognized event phase: ' + phase + '(' + event['name'] + ')') return self._model def FinalizeImport(self): """Called by the Model after all other importers have imported their events.""" self._model.UpdateBounds() # We need to reupdate the bounds in case the minimum start time changes self._model.UpdateBounds() self._CreateAsyncSlices() self._CreateFlowSlices() self._SetBrowserProcess() self._SetGpuProcess() self._CreateExplicitObjects() self._CreateImplicitObjects() self._CreateMemoryDumps() def _CreateAsyncSlices(self): if len(self._all_async_events) == 0: return self._all_async_events.sort(key=lambda x: x['event']['ts']) async_event_states_by_name_then_id = {} all_async_events = self._all_async_events for async_event_state in all_async_events: event = async_event_state['event'] name = event.get('name', None) if name is None: self._model.import_errors.append( 'Async events (ph: b, e, S, T or F) require an name parameter.') continue event_id = event.get('id') if event_id is None: self._model.import_errors.append( 'Async events (ph: b, e, S, T or F) require an id parameter.') continue # TODO(simonjam): Add a synchronous tick on the appropriate thread. if event['ph'] == 'S' or event['ph'] == 'b': if not name in async_event_states_by_name_then_id: async_event_states_by_name_then_id[name] = {} if event_id in async_event_states_by_name_then_id[name]: self._model.import_errors.append( 'At %d, a slice of the same id %s was already open.' % ( event['ts'], event_id)) continue async_event_states_by_name_then_id[name][event_id] = [] async_event_states_by_name_then_id[name][event_id].append( async_event_state) else: if name not in async_event_states_by_name_then_id: self._model.import_errors.append( 'At %d, no slice named %s was open.' % (event['ts'], name,)) continue if event_id not in async_event_states_by_name_then_id[name]: self._model.import_errors.append( 'At %d, no slice named %s with id=%s was open.' % ( event['ts'], name, event_id)) continue events = async_event_states_by_name_then_id[name][event_id] events.append(async_event_state) if event['ph'] == 'F' or event['ph'] == 'e': # Create a slice from start to end. async_slice = tracing_async_slice.AsyncSlice( events[0]['event']['cat'], name, events[0]['event']['ts'] / 1000.0) async_slice.duration = ((event['ts'] / 1000.0) - (events[0]['event']['ts'] / 1000.0)) async_slice.start_thread = events[0]['thread'] async_slice.end_thread = async_event_state['thread'] if async_slice.start_thread == async_slice.end_thread: if 'tts' in event and 'tts' in events[0]['event']: async_slice.thread_start = events[0]['event']['tts'] / 1000.0 async_slice.thread_duration = ((event['tts'] / 1000.0) - (events[0]['event']['tts'] / 1000.0)) async_slice.id = event_id async_slice.args = events[0]['event']['args'] # Create sub_slices for each step. for j in xrange(1, len(events)): sub_name = name if events[j - 1]['event']['ph'] == 'T': sub_name = name + ':' + events[j - 1]['event']['args']['step'] sub_slice = tracing_async_slice.AsyncSlice( events[0]['event']['cat'], sub_name, events[j - 1]['event']['ts'] / 1000.0) sub_slice.parent_slice = async_slice sub_slice.duration = ((events[j]['event']['ts'] / 1000.0) - (events[j - 1]['event']['ts'] / 1000.0)) sub_slice.start_thread = events[j - 1]['thread'] sub_slice.end_thread = events[j]['thread'] if sub_slice.start_thread == sub_slice.end_thread: if 'tts' in events[j]['event'] and \ 'tts' in events[j - 1]['event']: sub_slice.thread_duration = \ ((events[j]['event']['tts'] / 1000.0) - (events[j - 1]['event']['tts'] / 1000.0)) sub_slice.id = event_id sub_slice.args = events[j - 1]['event']['args'] async_slice.AddSubSlice(sub_slice) # The args for the finish event go in the last sub_slice. last_slice = async_slice.sub_slices[-1] for arg_name, arg_value in event['args'].iteritems(): last_slice.args[arg_name] = arg_value # Add |async_slice| to the start-thread's async_slices. async_slice.start_thread.AddAsyncSlice(async_slice) del async_event_states_by_name_then_id[name][event_id] def _CreateExplicitObjects(self): # TODO(tengs): Implement object instance parsing pass def _CreateImplicitObjects(self): # TODO(tengs): Implement object instance parsing pass def _CreateFlowSlices(self): if len(self._all_flow_events) == 0: return self._all_flow_events.sort(key=lambda x: x['event']['ts']) flow_id_to_event = {} for data in self._all_flow_events: event = data['event'] thread = data['thread'] if 'name' not in event: self._model.import_errors.append( 'Flow events (ph: s, t or f) require a name parameter.') continue if 'id' not in event: self._model.import_errors.append( 'Flow events (ph: s, t or f) require an id parameter.') continue flow_event = tracing_flow_event.FlowEvent( event['cat'], event['id'], event['name'], event['ts'] / 1000.0, event['args']) thread.AddFlowEvent(flow_event) if event['ph'] == 's': if event['id'] in flow_id_to_event: self._model.import_errors.append( 'event id %s already seen when encountering start of' 'flow event.' % event['id']) continue flow_id_to_event[event['id']] = flow_event elif event['ph'] == 't' or event['ph'] == 'f': if not event['id'] in flow_id_to_event: self._model.import_errors.append( 'Found flow phase %s for id: %s but no flow start found.' % ( event['ph'], event['id'])) continue flow_position = flow_id_to_event[event['id']] self._model.flow_events.append([flow_position, flow_event]) if event['ph'] == 'f': del flow_id_to_event[event['id']] else: # Make this event the next start event in this flow. flow_id_to_event[event['id']] = flow_event def _CreateMemoryDumps(self): self._model.SetGlobalMemoryDumps( memory_dump_event.GlobalMemoryDump(events) for events in self._all_memory_dumps_by_dump_id.itervalues()) def _SetBrowserProcess(self): for thread in self._model.GetAllThreads(): if thread.name == 'CrBrowserMain': self._model.browser_process = thread.parent def _SetGpuProcess(self): for thread in self._model.GetAllThreads(): if thread.name == 'CrGpuMain': self._model.gpu_process = thread.parent
# repo.py # Copyright (C) 2008, 2009 Michael Trier (mtrier@gmail.com) and contributors # # This module is part of GitPython and is released under # the BSD License: http://www.opensource.org/licenses/bsd-license.php from git.exc import ( InvalidGitRepositoryError, NoSuchPathError, GitCommandError ) from git.cmd import ( Git, handle_process_output ) from git.refs import ( HEAD, Head, Reference, TagReference, ) from git.objects import ( Submodule, RootModule, Commit ) from git.util import ( Actor, finalize_process ) from git.index import IndexFile from git.config import GitConfigParser from git.remote import ( Remote, add_progress ) from git.db import ( GitCmdObjectDB, GitDB ) from gitdb.util import ( join, isfile, hex_to_bin ) from .fun import ( rev_parse, is_git_dir, find_git_dir, touch, ) from git.compat import ( text_type, defenc ) import os import sys import re DefaultDBType = GitDB if sys.version_info[:2] < (2, 5): # python 2.4 compatiblity DefaultDBType = GitCmdObjectDB # END handle python 2.4 __all__ = ('Repo', ) def _expand_path(p): return os.path.abspath(os.path.expandvars(os.path.expanduser(p))) class Repo(object): """Represents a git repository and allows you to query references, gather commit information, generate diffs, create and clone repositories query the log. The following attributes are worth using: 'working_dir' is the working directory of the git command, which is the working tree directory if available or the .git directory in case of bare repositories 'working_tree_dir' is the working tree directory, but will raise AssertionError if we are a bare repository. 'git_dir' is the .git repository directory, which is always set.""" DAEMON_EXPORT_FILE = 'git-daemon-export-ok' __slots__ = ("working_dir", "_working_tree_dir", "git_dir", "_bare", "git", "odb") # precompiled regex re_whitespace = re.compile(r'\s+') re_hexsha_only = re.compile('^[0-9A-Fa-f]{40}$') re_hexsha_shortened = re.compile('^[0-9A-Fa-f]{4,40}$') re_author_committer_start = re.compile(r'^(author|committer)') re_tab_full_line = re.compile(r'^\t(.*)$') # invariants # represents the configuration level of a configuration file config_level = ("system", "user", "global", "repository") # Subclass configuration # Subclasses may easily bring in their own custom types by placing a constructor or type here GitCommandWrapperType = Git def __init__(self, path=None, odbt=DefaultDBType, search_parent_directories=False): """Create a new Repo instance :param path: the path to either the root git directory or the bare git repo:: repo = Repo("/Users/mtrier/Development/git-python") repo = Repo("/Users/mtrier/Development/git-python.git") repo = Repo("~/Development/git-python.git") repo = Repo("$REPOSITORIES/Development/git-python.git") :param odbt: Object DataBase type - a type which is constructed by providing the directory containing the database objects, i.e. .git/objects. It will be used to access all object data :param search_parent_directories: if True, all parent directories will be searched for a valid repo as well. Please note that this was the default behaviour in older versions of GitPython, which is considered a bug though. :raise InvalidGitRepositoryError: :raise NoSuchPathError: :return: git.Repo """ epath = _expand_path(path or os.getcwd()) self.git = None # should be set for __del__ not to fail in case we raise if not os.path.exists(epath): raise NoSuchPathError(epath) self.working_dir = None self._working_tree_dir = None self.git_dir = None curpath = epath # walk up the path to find the .git dir while curpath: # ABOUT os.path.NORMPATH # It's important to normalize the paths, as submodules will otherwise initialize their # repo instances with paths that depend on path-portions that will not exist after being # removed. It's just cleaner. if is_git_dir(curpath): self.git_dir = os.path.normpath(curpath) self._working_tree_dir = os.path.dirname(self.git_dir) break gitpath = find_git_dir(join(curpath, '.git')) if gitpath is not None: self.git_dir = os.path.normpath(gitpath) self._working_tree_dir = curpath break if not search_parent_directories: break curpath, dummy = os.path.split(curpath) if not dummy: break # END while curpath if self.git_dir is None: raise InvalidGitRepositoryError(epath) self._bare = False try: self._bare = self.config_reader("repository").getboolean('core', 'bare') except Exception: # lets not assume the option exists, although it should pass # adjust the wd in case we are actually bare - we didn't know that # in the first place if self._bare: self._working_tree_dir = None # END working dir handling self.working_dir = self._working_tree_dir or self.git_dir self.git = self.GitCommandWrapperType(self.working_dir) # special handling, in special times args = [join(self.git_dir, 'objects')] if issubclass(odbt, GitCmdObjectDB): args.append(self.git) self.odb = odbt(*args) def __del__(self): if self.git: self.git.clear_cache() def __eq__(self, rhs): if isinstance(rhs, Repo): return self.git_dir == rhs.git_dir return False def __ne__(self, rhs): return not self.__eq__(rhs) def __hash__(self): return hash(self.git_dir) # Description property def _get_description(self): filename = join(self.git_dir, 'description') return open(filename, 'rb').read().rstrip().decode(defenc) def _set_description(self, descr): filename = join(self.git_dir, 'description') open(filename, 'wb').write((descr + '\n').encode(defenc)) description = property(_get_description, _set_description, doc="the project's description") del _get_description del _set_description @property def working_tree_dir(self): """:return: The working tree directory of our git repository. If this is a bare repository, None is returned. """ return self._working_tree_dir @property def bare(self): """:return: True if the repository is bare""" return self._bare @property def heads(self): """A list of ``Head`` objects representing the branch heads in this repo :return: ``git.IterableList(Head, ...)``""" return Head.list_items(self) @property def references(self): """A list of Reference objects representing tags, heads and remote references. :return: IterableList(Reference, ...)""" return Reference.list_items(self) # alias for references refs = references # alias for heads branches = heads @property def index(self): """:return: IndexFile representing this repository's index.""" return IndexFile(self) @property def head(self): """:return: HEAD Object pointing to the current head reference""" return HEAD(self, 'HEAD') @property def remotes(self): """A list of Remote objects allowing to access and manipulate remotes :return: ``git.IterableList(Remote, ...)``""" return Remote.list_items(self) def remote(self, name='origin'): """:return: Remote with the specified name :raise ValueError: if no remote with such a name exists""" r = Remote(self, name) if not r.exists(): raise ValueError("Remote named '%s' didn't exist" % name) return r #{ Submodules @property def submodules(self): """ :return: git.IterableList(Submodule, ...) of direct submodules available from the current head""" return Submodule.list_items(self) def submodule(self, name): """ :return: Submodule with the given name :raise ValueError: If no such submodule exists""" try: return self.submodules[name] except IndexError: raise ValueError("Didn't find submodule named %r" % name) # END exception handling def create_submodule(self, *args, **kwargs): """Create a new submodule :note: See the documentation of Submodule.add for a description of the applicable parameters :return: created submodules""" return Submodule.add(self, *args, **kwargs) def iter_submodules(self, *args, **kwargs): """An iterator yielding Submodule instances, see Traversable interface for a description of args and kwargs :return: Iterator""" return RootModule(self).traverse(*args, **kwargs) def submodule_update(self, *args, **kwargs): """Update the submodules, keeping the repository consistent as it will take the previous state into consideration. For more information, please see the documentation of RootModule.update""" return RootModule(self).update(*args, **kwargs) #}END submodules @property def tags(self): """A list of ``Tag`` objects that are available in this repo :return: ``git.IterableList(TagReference, ...)`` """ return TagReference.list_items(self) def tag(self, path): """:return: TagReference Object, reference pointing to a Commit or Tag :param path: path to the tag reference, i.e. 0.1.5 or tags/0.1.5 """ return TagReference(self, path) def create_head(self, path, commit='HEAD', force=False, logmsg=None): """Create a new head within the repository. For more documentation, please see the Head.create method. :return: newly created Head Reference""" return Head.create(self, path, commit, force, logmsg) def delete_head(self, *heads, **kwargs): """Delete the given heads :param kwargs: Additional keyword arguments to be passed to git-branch""" return Head.delete(self, *heads, **kwargs) def create_tag(self, path, ref='HEAD', message=None, force=False, **kwargs): """Create a new tag reference. For more documentation, please see the TagReference.create method. :return: TagReference object """ return TagReference.create(self, path, ref, message, force, **kwargs) def delete_tag(self, *tags): """Delete the given tag references""" return TagReference.delete(self, *tags) def create_remote(self, name, url, **kwargs): """Create a new remote. For more information, please see the documentation of the Remote.create methods :return: Remote reference""" return Remote.create(self, name, url, **kwargs) def delete_remote(self, remote): """Delete the given remote.""" return Remote.remove(self, remote) def _get_config_path(self, config_level): # we do not support an absolute path of the gitconfig on windows , # use the global config instead if sys.platform == "win32" and config_level == "system": config_level = "global" if config_level == "system": return "/etc/gitconfig" elif config_level == "user": config_home = os.environ.get("XDG_CONFIG_HOME") or os.path.join(os.environ.get("HOME", '~'), ".config") return os.path.normpath(os.path.expanduser(join(config_home, "git", "config"))) elif config_level == "global": return os.path.normpath(os.path.expanduser("~/.gitconfig")) elif config_level == "repository": return os.path.normpath(join(self.git_dir, "config")) raise ValueError("Invalid configuration level: %r" % config_level) def config_reader(self, config_level=None): """ :return: GitConfigParser allowing to read the full git configuration, but not to write it The configuration will include values from the system, user and repository configuration files. :param config_level: For possible values, see config_writer method If None, all applicable levels will be used. Specify a level in case you know which exact file you whish to read to prevent reading multiple files for instance :note: On windows, system configuration cannot currently be read as the path is unknown, instead the global path will be used.""" files = None if config_level is None: files = [self._get_config_path(f) for f in self.config_level] else: files = [self._get_config_path(config_level)] return GitConfigParser(files, read_only=True) def config_writer(self, config_level="repository"): """ :return: GitConfigParser allowing to write values of the specified configuration file level. Config writers should be retrieved, used to change the configuration ,and written right away as they will lock the configuration file in question and prevent other's to write it. :param config_level: One of the following values system = sytem wide configuration file global = user level configuration file repository = configuration file for this repostory only""" return GitConfigParser(self._get_config_path(config_level), read_only=False) def commit(self, rev=None): """The Commit object for the specified revision :param rev: revision specifier, see git-rev-parse for viable options. :return: ``git.Commit``""" if rev is None: return self.head.commit else: return self.rev_parse(text_type(rev) + "^0") def iter_trees(self, *args, **kwargs): """:return: Iterator yielding Tree objects :note: Takes all arguments known to iter_commits method""" return (c.tree for c in self.iter_commits(*args, **kwargs)) def tree(self, rev=None): """The Tree object for the given treeish revision Examples:: repo.tree(repo.heads[0]) :param rev: is a revision pointing to a Treeish ( being a commit or tree ) :return: ``git.Tree`` :note: If you need a non-root level tree, find it by iterating the root tree. Otherwise it cannot know about its path relative to the repository root and subsequent operations might have unexpected results.""" if rev is None: return self.head.commit.tree else: return self.rev_parse(text_type(rev) + "^{tree}") def iter_commits(self, rev=None, paths='', **kwargs): """A list of Commit objects representing the history of a given ref/commit :parm rev: revision specifier, see git-rev-parse for viable options. If None, the active branch will be used. :parm paths: is an optional path or a list of paths to limit the returned commits to Commits that do not contain that path or the paths will not be returned. :parm kwargs: Arguments to be passed to git-rev-list - common ones are max_count and skip :note: to receive only commits between two named revisions, use the "revA...revB" revision specifier :return ``git.Commit[]``""" if rev is None: rev = self.head.commit return Commit.iter_items(self, rev, paths, **kwargs) def merge_base(self, *rev, **kwargs): """Find the closest common ancestor for the given revision (e.g. Commits, Tags, References, etc) :param rev: At least two revs to find the common ancestor for. :param kwargs: Additional arguments to be passed to the repo.git.merge_base() command which does all the work. :return: A list of Commit objects. If --all was not specified as kwarg, the list will have at max one Commit, or is empty if no common merge base exists. :raises ValueError: If not at least two revs are provided """ if len(rev) < 2: raise ValueError("Please specify at least two revs, got only %i" % len(rev)) # end handle input res = list() try: lines = self.git.merge_base(*rev, **kwargs).splitlines() except GitCommandError as err: if err.status == 128: raise # end handle invalid rev # Status code 1 is returned if there is no merge-base # (see https://github.com/git/git/blob/master/builtin/merge-base.c#L16) return res # end exception handling for line in lines: res.append(self.commit(line)) # end for each merge-base return res def _get_daemon_export(self): filename = join(self.git_dir, self.DAEMON_EXPORT_FILE) return os.path.exists(filename) def _set_daemon_export(self, value): filename = join(self.git_dir, self.DAEMON_EXPORT_FILE) fileexists = os.path.exists(filename) if value and not fileexists: touch(filename) elif not value and fileexists: os.unlink(filename) daemon_export = property(_get_daemon_export, _set_daemon_export, doc="If True, git-daemon may export this repository") del _get_daemon_export del _set_daemon_export def _get_alternates(self): """The list of alternates for this repo from which objects can be retrieved :return: list of strings being pathnames of alternates""" alternates_path = join(self.git_dir, 'objects', 'info', 'alternates') if os.path.exists(alternates_path): try: f = open(alternates_path, 'rb') alts = f.read().decode(defenc) finally: f.close() return alts.strip().splitlines() else: return list() def _set_alternates(self, alts): """Sets the alternates :parm alts: is the array of string paths representing the alternates at which git should look for objects, i.e. /home/user/repo/.git/objects :raise NoSuchPathError: :note: The method does not check for the existance of the paths in alts as the caller is responsible.""" alternates_path = join(self.git_dir, 'objects', 'info', 'alternates') if not alts: if isfile(alternates_path): os.remove(alternates_path) else: try: f = open(alternates_path, 'wb') f.write("\n".join(alts).encode(defenc)) finally: f.close() # END file handling # END alts handling alternates = property(_get_alternates, _set_alternates, doc="Retrieve a list of alternates paths or set a list paths to be used as alternates") def is_dirty(self, index=True, working_tree=True, untracked_files=False, submodules=True): """ :return: ``True``, the repository is considered dirty. By default it will react like a git-status without untracked files, hence it is dirty if the index or the working copy have changes.""" if self._bare: # Bare repositories with no associated working directory are # always consired to be clean. return False # start from the one which is fastest to evaluate default_args = ['--abbrev=40', '--full-index', '--raw'] if not submodules: default_args.append('--ignore-submodules') if index: # diff index against HEAD if isfile(self.index.path) and \ len(self.git.diff('--cached', *default_args)): return True # END index handling if working_tree: # diff index against working tree if len(self.git.diff(*default_args)): return True # END working tree handling if untracked_files: if len(self._get_untracked_files(ignore_submodules=not submodules)): return True # END untracked files return False @property def untracked_files(self): """ :return: list(str,...) Files currently untracked as they have not been staged yet. Paths are relative to the current working directory of the git command. :note: ignored files will not appear here, i.e. files mentioned in .gitignore""" return self._get_untracked_files() def _get_untracked_files(self, **kwargs): # make sure we get all files, no only untracked directores proc = self.git.status(porcelain=True, untracked_files=True, as_process=True, **kwargs) # Untracked files preffix in porcelain mode prefix = "?? " untracked_files = list() for line in proc.stdout: line = line.decode(defenc) if not line.startswith(prefix): continue filename = line[len(prefix):].rstrip('\n') # Special characters are escaped if filename[0] == filename[-1] == '"': filename = filename[1:-1].decode('string_escape') untracked_files.append(filename) finalize_process(proc) return untracked_files @property def active_branch(self): """The name of the currently active branch. :return: Head to the active branch""" return self.head.reference def blame(self, rev, file): """The blame information for the given file at the given revision. :parm rev: revision specifier, see git-rev-parse for viable options. :return: list: [git.Commit, list: [<line>]] A list of tuples associating a Commit object with a list of lines that changed within the given commit. The Commit objects will be given in order of appearance.""" data = self.git.blame(rev, '--', file, p=True, stdout_as_string=False) commits = dict() blames = list() info = None keepends = True for line in data.splitlines(keepends): try: line = line.rstrip().decode(defenc) except UnicodeDecodeError: firstpart = '' is_binary = True else: # As we don't have an idea when the binary data ends, as it could contain multiple newlines # in the process. So we rely on being able to decode to tell us what is is. # This can absolutely fail even on text files, but even if it does, we should be fine treating it # as binary instead parts = self.re_whitespace.split(line, 1) firstpart = parts[0] is_binary = False # end handle decode of line if self.re_hexsha_only.search(firstpart): # handles # 634396b2f541a9f2d58b00be1a07f0c358b999b3 1 1 7 - indicates blame-data start # 634396b2f541a9f2d58b00be1a07f0c358b999b3 2 2 - indicates # another line of blame with the same data digits = parts[-1].split(" ") if len(digits) == 3: info = {'id': firstpart} blames.append([None, []]) elif info['id'] != firstpart: info = {'id': firstpart} blames.append([commits.get(firstpart), []]) # END blame data initialization else: m = self.re_author_committer_start.search(firstpart) if m: # handles: # author Tom Preston-Werner # author-mail <tom@mojombo.com> # author-time 1192271832 # author-tz -0700 # committer Tom Preston-Werner # committer-mail <tom@mojombo.com> # committer-time 1192271832 # committer-tz -0700 - IGNORED BY US role = m.group(0) if firstpart.endswith('-mail'): info["%s_email" % role] = parts[-1] elif firstpart.endswith('-time'): info["%s_date" % role] = int(parts[-1]) elif role == firstpart: info[role] = parts[-1] # END distinguish mail,time,name else: # handle # filename lib/grit.rb # summary add Blob # <and rest> if firstpart.startswith('filename'): info['filename'] = parts[-1] elif firstpart.startswith('summary'): info['summary'] = parts[-1] elif firstpart == '': if info: sha = info['id'] c = commits.get(sha) if c is None: c = Commit(self, hex_to_bin(sha), author=Actor._from_string(info['author'] + ' ' + info['author_email']), authored_date=info['author_date'], committer=Actor._from_string( info['committer'] + ' ' + info['committer_email']), committed_date=info['committer_date'], message=info['summary']) commits[sha] = c # END if commit objects needs initial creation if not is_binary: if line and line[0] == '\t': line = line[1:] else: # NOTE: We are actually parsing lines out of binary data, which can lead to the # binary being split up along the newline separator. We will append this to the blame # we are currently looking at, even though it should be concatenated with the last line # we have seen. pass # end handle line contents blames[-1][0] = c blames[-1][1].append(line) info = {'id': sha} # END if we collected commit info # END distinguish filename,summary,rest # END distinguish author|committer vs filename,summary,rest # END distinguish hexsha vs other information return blames @classmethod def init(cls, path=None, mkdir=True, odbt=DefaultDBType, **kwargs): """Initialize a git repository at the given path if specified :param path: is the full path to the repo (traditionally ends with /<name>.git) or None in which case the repository will be created in the current working directory :parm mkdir: if specified will create the repository directory if it doesn't already exists. Creates the directory with a mode=0755. Only effective if a path is explicitly given :param odbt: Object DataBase type - a type which is constructed by providing the directory containing the database objects, i.e. .git/objects. It will be used to access all object data :parm kwargs: keyword arguments serving as additional options to the git-init command :return: ``git.Repo`` (the newly created repo)""" if path: path = _expand_path(path) if mkdir and path and not os.path.exists(path): os.makedirs(path, 0o755) # git command automatically chdir into the directory git = Git(path) git.init(**kwargs) return cls(path, odbt=odbt) @classmethod def _clone(cls, git, url, path, odb_default_type, progress, **kwargs): # special handling for windows for path at which the clone should be # created. # tilde '~' will be expanded to the HOME no matter where the ~ occours. Hence # we at least give a proper error instead of letting git fail prev_cwd = None prev_path = None odbt = kwargs.pop('odbt', odb_default_type) if os.name == 'nt': if '~' in path: raise OSError("Git cannot handle the ~ character in path %r correctly" % path) # on windows, git will think paths like c: are relative and prepend the # current working dir ( before it fails ). We temporarily adjust the working # dir to make this actually work match = re.match("(\w:[/\\\])(.*)", path) if match: prev_cwd = os.getcwd() prev_path = path drive, rest_of_path = match.groups() os.chdir(drive) path = rest_of_path kwargs['with_keep_cwd'] = True # END cwd preparation # END windows handling try: proc = git.clone(url, path, with_extended_output=True, as_process=True, v=True, **add_progress(kwargs, git, progress)) if progress: handle_process_output(proc, None, progress.new_message_handler(), finalize_process) else: finalize_process(proc) # end handle progress finally: if prev_cwd is not None: os.chdir(prev_cwd) path = prev_path # END reset previous working dir # END bad windows handling # our git command could have a different working dir than our actual # environment, hence we prepend its working dir if required if not os.path.isabs(path) and git.working_dir: path = join(git._working_dir, path) # adjust remotes - there may be operating systems which use backslashes, # These might be given as initial paths, but when handling the config file # that contains the remote from which we were clones, git stops liking it # as it will escape the backslashes. Hence we undo the escaping just to be # sure repo = cls(os.path.abspath(path), odbt=odbt) if repo.remotes: writer = repo.remotes[0].config_writer writer.set_value('url', repo.remotes[0].url.replace("\\\\", "\\").replace("\\", "/")) # PY3: be sure cleanup is performed and lock is released writer.release() # END handle remote repo return repo def clone(self, path, progress=None, **kwargs): """Create a clone from this repository. :param path: is the full path of the new repo (traditionally ends with ./<name>.git). :param progress: See 'git.remote.Remote.push'. :param kwargs: * odbt = ObjectDatabase Type, allowing to determine the object database implementation used by the returned Repo instance * All remaining keyword arguments are given to the git-clone command :return: ``git.Repo`` (the newly cloned repo)""" return self._clone(self.git, self.git_dir, path, type(self.odb), progress, **kwargs) @classmethod def clone_from(cls, url, to_path, progress=None, env=None, **kwargs): """Create a clone from the given URL :param url: valid git url, see http://www.kernel.org/pub/software/scm/git/docs/git-clone.html#URLS :param to_path: Path to which the repository should be cloned to :param progress: See 'git.remote.Remote.push'. :param env: Optional dictionary containing the desired environment variables. :param kwargs: see the ``clone`` method :return: Repo instance pointing to the cloned directory""" git = Git(os.getcwd()) if env is not None: git.update_environment(**env) return cls._clone(git, url, to_path, GitCmdObjectDB, progress, **kwargs) def archive(self, ostream, treeish=None, prefix=None, **kwargs): """Archive the tree at the given revision. :parm ostream: file compatible stream object to which the archive will be written as bytes :parm treeish: is the treeish name/id, defaults to active branch :parm prefix: is the optional prefix to prepend to each filename in the archive :parm kwargs: Additional arguments passed to git-archive * Use the 'format' argument to define the kind of format. Use specialized ostreams to write any format supported by python. * You may specify the special **path** keyword, which may either be a repository-relative path to a directory or file to place into the archive, or a list or tuple of multipe paths. :raise GitCommandError: in case something went wrong :return: self""" if treeish is None: treeish = self.head.commit if prefix and 'prefix' not in kwargs: kwargs['prefix'] = prefix kwargs['output_stream'] = ostream path = kwargs.pop('path', list()) if not isinstance(path, (tuple, list)): path = [path] # end assure paths is list self.git.archive(treeish, *path, **kwargs) return self def has_separate_working_tree(self): """ :return: True if our git_dir is not at the root of our working_tree_dir, but a .git file with a platform agnositic symbolic link. Our git_dir will be whereever the .git file points to :note: bare repositories will always return False here """ if self.bare: return False return os.path.isfile(os.path.join(self.working_tree_dir, '.git')) rev_parse = rev_parse def __repr__(self): return '<git.Repo "%s">' % self.git_dir
# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import copy import mock from openstackclient.common import clientmanager from openstackclient.object.v1 import container from openstackclient.tests.object import fakes as object_fakes from openstackclient.tests import utils AUTH_TOKEN = "foobar" AUTH_URL = "http://0.0.0.0" class FakeClient(object): def __init__(self, endpoint=None, **kwargs): self.endpoint = AUTH_URL self.token = AUTH_TOKEN class TestObject(utils.TestCommand): def setUp(self): super(TestObject, self).setUp() api_version = {"object-store": "1"} self.app.client_manager = clientmanager.ClientManager( token=AUTH_TOKEN, url=AUTH_URL, auth_url=AUTH_URL, api_version=api_version, ) class TestObjectClient(TestObject): def test_make_client(self): self.assertEqual(self.app.client_manager.object.endpoint, AUTH_URL) self.assertEqual(self.app.client_manager.object.token, AUTH_TOKEN) @mock.patch( 'openstackclient.object.v1.container.lib_container.list_containers' ) class TestContainerList(TestObject): def setUp(self): super(TestContainerList, self).setUp() # Get the command object to test self.cmd = container.ListContainer(self.app, None) def test_object_list_containers_no_options(self, c_mock): c_mock.return_value = [ copy.deepcopy(object_fakes.CONTAINER), copy.deepcopy(object_fakes.CONTAINER_3), copy.deepcopy(object_fakes.CONTAINER_2), ] arglist = [] verifylist = [] parsed_args = self.check_parser(self.cmd, arglist, verifylist) # DisplayCommandBase.take_action() returns two tuples columns, data = self.cmd.take_action(parsed_args) # Set expected values kwargs = { } c_mock.assert_called_with( self.app.restapi, AUTH_URL, **kwargs ) collist = ('Name',) self.assertEqual(columns, collist) datalist = ( (object_fakes.container_name, ), (object_fakes.container_name_3, ), (object_fakes.container_name_2, ), ) self.assertEqual(tuple(data), datalist) def test_object_list_containers_prefix(self, c_mock): c_mock.return_value = [ copy.deepcopy(object_fakes.CONTAINER), copy.deepcopy(object_fakes.CONTAINER_3), ] arglist = [ '--prefix', 'bit', ] verifylist = [ ('prefix', 'bit'), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) # DisplayCommandBase.take_action() returns two tuples columns, data = self.cmd.take_action(parsed_args) # Set expected values kwargs = { 'prefix': 'bit', } c_mock.assert_called_with( self.app.restapi, AUTH_URL, **kwargs ) collist = ('Name',) self.assertEqual(columns, collist) datalist = ( (object_fakes.container_name, ), (object_fakes.container_name_3, ), ) self.assertEqual(tuple(data), datalist) def test_object_list_containers_marker(self, c_mock): c_mock.return_value = [ copy.deepcopy(object_fakes.CONTAINER), copy.deepcopy(object_fakes.CONTAINER_3), ] arglist = [ '--marker', object_fakes.container_name, ] verifylist = [ ('marker', object_fakes.container_name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) # DisplayCommandBase.take_action() returns two tuples columns, data = self.cmd.take_action(parsed_args) # Set expected values kwargs = { 'marker': object_fakes.container_name, } c_mock.assert_called_with( self.app.restapi, AUTH_URL, **kwargs ) collist = ('Name',) self.assertEqual(columns, collist) datalist = ( (object_fakes.container_name, ), (object_fakes.container_name_3, ), ) self.assertEqual(tuple(data), datalist) def test_object_list_containers_end_marker(self, c_mock): c_mock.return_value = [ copy.deepcopy(object_fakes.CONTAINER), copy.deepcopy(object_fakes.CONTAINER_3), ] arglist = [ '--end-marker', object_fakes.container_name_3, ] verifylist = [ ('end_marker', object_fakes.container_name_3), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) # DisplayCommandBase.take_action() returns two tuples columns, data = self.cmd.take_action(parsed_args) # Set expected values kwargs = { 'end_marker': object_fakes.container_name_3, } c_mock.assert_called_with( self.app.restapi, AUTH_URL, **kwargs ) collist = ('Name',) self.assertEqual(columns, collist) datalist = ( (object_fakes.container_name, ), (object_fakes.container_name_3, ), ) self.assertEqual(tuple(data), datalist) def test_object_list_containers_limit(self, c_mock): c_mock.return_value = [ copy.deepcopy(object_fakes.CONTAINER), copy.deepcopy(object_fakes.CONTAINER_3), ] arglist = [ '--limit', '2', ] verifylist = [ ('limit', 2), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) # DisplayCommandBase.take_action() returns two tuples columns, data = self.cmd.take_action(parsed_args) # Set expected values kwargs = { 'limit': 2, } c_mock.assert_called_with( self.app.restapi, AUTH_URL, **kwargs ) collist = ('Name',) self.assertEqual(columns, collist) datalist = ( (object_fakes.container_name, ), (object_fakes.container_name_3, ), ) self.assertEqual(tuple(data), datalist) def test_object_list_containers_long(self, c_mock): c_mock.return_value = [ copy.deepcopy(object_fakes.CONTAINER), copy.deepcopy(object_fakes.CONTAINER_3), ] arglist = [ '--long', ] verifylist = [ ('long', True), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) # DisplayCommandBase.take_action() returns two tuples columns, data = self.cmd.take_action(parsed_args) # Set expected values kwargs = { } c_mock.assert_called_with( self.app.restapi, AUTH_URL, **kwargs ) collist = ('Name', 'Bytes', 'Count') self.assertEqual(columns, collist) datalist = ( ( object_fakes.container_name, object_fakes.container_bytes, object_fakes.container_count, ), ( object_fakes.container_name_3, object_fakes.container_bytes * 3, object_fakes.container_count * 3, ), ) self.assertEqual(tuple(data), datalist) def test_object_list_containers_all(self, c_mock): c_mock.return_value = [ copy.deepcopy(object_fakes.CONTAINER), copy.deepcopy(object_fakes.CONTAINER_2), copy.deepcopy(object_fakes.CONTAINER_3), ] arglist = [ '--all', ] verifylist = [ ('all', True), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) # DisplayCommandBase.take_action() returns two tuples columns, data = self.cmd.take_action(parsed_args) # Set expected values kwargs = { 'full_listing': True, } c_mock.assert_called_with( self.app.restapi, AUTH_URL, **kwargs ) collist = ('Name',) self.assertEqual(columns, collist) datalist = ( (object_fakes.container_name, ), (object_fakes.container_name_2, ), (object_fakes.container_name_3, ), ) self.assertEqual(tuple(data), datalist) @mock.patch( 'openstackclient.object.v1.container.lib_container.show_container' ) class TestContainerShow(TestObject): def setUp(self): super(TestContainerShow, self).setUp() # Get the command object to test self.cmd = container.ShowContainer(self.app, None) def test_container_show(self, c_mock): c_mock.return_value = copy.deepcopy(object_fakes.CONTAINER) arglist = [ object_fakes.container_name, ] verifylist = [ ('container', object_fakes.container_name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) # DisplayCommandBase.take_action() returns two tuples columns, data = self.cmd.take_action(parsed_args) # Set expected values kwargs = { } # lib.container.show_container(api, url, container) c_mock.assert_called_with( self.app.restapi, AUTH_URL, object_fakes.container_name, **kwargs ) collist = ('bytes', 'count', 'name') self.assertEqual(columns, collist) datalist = ( object_fakes.container_bytes, object_fakes.container_count, object_fakes.container_name, ) self.assertEqual(data, datalist)
# -*- coding: UTF-8 -*- # # Copyright (c) 2014, Yung-Yu Chen <yyc@solvcon.net> # # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # - Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # - Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # - Neither the name of the copyright holder nor the names of its contributors # may be used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. """ Input and output facilities. """ import os import time import math import warnings import numpy as np import solvcon as sc from solvcon.io import vtkxml class MeshInfoHook(sc.MeshHook): """ Print mesh information. """ def __init__(self, cse, show_bclist=False, perffn=None, **kw): """ If keyword psteps is None, postmarch method will not output performance information. """ #: Flag to show the list of boundary conditions. Default is ``False``. self.show_bclist = show_bclist #: Performance file name. self.perffn = perffn super(MeshInfoHook, self).__init__(cse, **kw) def preloop(self): blk = self.blk self.info("Block information:\n %s\n" % str(blk)) if self.show_bclist: for bc in blk.bclist: self.info(" %s\n" % bc) # explicit info to prompt users to watch out issues like issue #177 if any(bc.__class__ is sc.BC for bc in blk.bclist): self.info(" One of the boundary conditions is generic boundary " "type.\n") def _show_performance(self): """ Show and store performance information. """ ncell = self.blk.ncell time = self.cse.log.time['solver_march'] step_init = self.cse.execution.step_init step_current = self.cse.execution.step_current neq = self.cse.execution.neq npart = self.cse.execution.npart # determine filename. perffn = '%s_perf.txt' % self.cse.io.basefn perffn = self.perffn if self.perffn is not None else perffn perffn = os.path.join(self.cse.io.basedir, perffn) pf = open(perffn, 'w') # calculate and output performance. def out(msg): self.info(msg) pf.write(msg) perf = (step_current-step_init)*ncell / time * 1.e-6 out('Performance of %s:\n' % self.cse.io.basefn) out(' %g seconds in marching solver.\n' % time) out(' %g seconds/step.\n' % (time/(step_current-step_init))) out(' %g microseconds/cell.\n' % (1./perf)) out(' %g Mcells/seconds.\n' % perf) out(' %g Mvariables/seconds.\n' % (perf*neq)) if isinstance(self.cse.execution.npart, int): out(' %g Mcells/seconds/computer.\n' % (perf/npart)) out(' %g Mvariables/seconds/computer.\n' % (perf*neq/npart)) pf.close() def postmarch(self): istep = self.cse.execution.step_current nsteps = self.cse.execution.steps_run psteps = self.psteps if istep > 0 and psteps and istep%psteps == 0 and istep != nsteps: self._show_performance() def postloop(self): self._show_performance() class ProgressHook(sc.MeshHook): """ Print simulation progess. """ def __init__(self, cse, linewidth=50, **kw): #: The maximum width for progress mark. self.linewidth = linewidth super(ProgressHook, self).__init__(cse, **kw) def preloop(self): istep = self.cse.execution.step_current nsteps = self.cse.execution.steps_run info = self.info info("Steps %d/%d\n" % (istep, nsteps)) def postmarch(self): istep = self.cse.execution.step_current nsteps = self.cse.execution.steps_run tstart = self.cse.log.time['run_march'][0] psteps = self.psteps linewidth = self.linewidth info = self.info # calculate estimated remaining time. tcurr = time.time() tleft = (tcurr-tstart) * ((float(nsteps)-float(istep))/float(istep)) # output information. if istep%psteps == 0: info("#") if istep > 0 and istep%(psteps*linewidth) == 0: info("\nStep %d/%d, %.1fs elapsed, %.1fs left\n" % ( istep, nsteps, tcurr-tstart, tleft, )) elif istep == nsteps: info("\nStep %d/%d done\n" % (istep, nsteps)) class FillAnchor(sc.MeshAnchor): """ Fill the specified arrays of a :py:class:`~.solver.GasSolver` with corresponding value. """ def __init__(self, svr, mappers=None, **kw): assert None is not mappers #: A :py:class:`dict` maps the names of attributes of the #: :py:attr:`MeshAnchor.svr <solvcon.MeshAnchor.svr>` to the #: filling value. self.mappers = mappers if mappers else {} super(FillAnchor, self).__init__(svr, **kw) def provide(self): for key, value in self.mappers.items(): getattr(self.svr, key).fill(value) ################################################################################ # Begin CFL evaluation. class CflAnchor(sc.MeshAnchor): """ Counting CFL numbers. Use :py:attr:`MeshSolver.marchret <solvcon.solver.MeshSolver.marchret>` to return results. Overrides :py:meth:`~solvcon.anchor.MeshAnchor.postmarch` method. Pair with :py:class:`CflHook`. """ def __init__(self, svr, rsteps=None, **kw): """ >>> from solvcon.testing import create_trivial_2d_blk >>> from solvcon.solver import MeshSolver >>> svr = MeshSolver(create_trivial_2d_blk()) >>> ank = CflAnchor(svr) # doctest: +ELLIPSIS Traceback (most recent call last): ... TypeError: int() argument must be a string... >>> ank = CflAnchor(svr, 1) >>> ank.rsteps 1 """ #: Steps to run (:py:class:`int`). self.rsteps = int(rsteps) super(CflAnchor, self).__init__(svr, **kw) def postmarch(self): svr = self.svr istep = svr.step_global rsteps = self.rsteps if istep > 0 and istep%rsteps == 0: # download data. ocfl = svr.ocfl[svr.ngstcell:] cfl = svr.cfl[svr.ngstcell:] # determine extremum. mincfl = ocfl.min() maxcfl = ocfl.max() nadj = (cfl==1).sum() # store. lst = svr.marchret.setdefault('cfl', [0.0, 0.0, 0, 0]) lst[0] = mincfl lst[1] = maxcfl lst[2] = nadj lst[3] += nadj class CflHook(sc.MeshHook): """ Makes sure CFL number is bounded and print averaged CFL number over time. Reports CFL information per time step and on finishing. Overrides (i) :py:meth:`~solvcon.hook.MeshHook.postmarch` and (ii) :py:meth:`~solvocn.hook.MeshHook.postloop` methods. Pair with :py:class:`CflAnchor`. """ def __init__(self, cse, name='cfl', cflmin=0.0, cflmax=1.0, fullstop=True, rsteps=None, **kw): #: Name of the CFL tool. self.name = name #: Miminum CFL value. self.cflmin = cflmin #: Maximum CFL value. self.cflmax = cflmax #: Flag to stop when CFL is out of bound. Default is ``True``. self.fullstop = fullstop #: Accumulated CFL. self.aCFL = 0.0 #: Mean CFL. self.mCFL = 0.0 #: Hereditary minimum CFL. self.hnCFL = 1.0 #: Hereditary maximum CFL. self.hxCFL = 0.0 #: Number of adjusted CFL accumulated since last report. self.aadj = 0 #: Total number of adjusted CFL since simulation started. self.haadj = 0 rsteps = rsteps super(CflHook, self).__init__(cse, **kw) #: Steps to run. self.rsteps = rsteps if rsteps else self.psteps self.ankkw = kw def drop_anchor(self, svr): ankkw = self.ankkw.copy() ankkw['name'] = self.name ankkw['rsteps'] = self.rsteps self._deliver_anchor(svr, CflAnchor, ankkw) def _notify(self, msg): if self.fullstop: raise RuntimeError(msg) else: warnings.warn(msg) def postmarch(self): info = self.info istep = self.cse.execution.step_current mr = self.cse.execution.marchret isp = self.cse.is_parallel rsteps = self.rsteps psteps = self.psteps # collect CFL. if istep > 0 and istep%rsteps == 0: nCFL = max([m['cfl'][0] for m in mr]) if isp else mr['cfl'][0] xCFL = max([m['cfl'][1] for m in mr]) if isp else mr['cfl'][1] nadj = sum([m['cfl'][2] for m in mr]) if isp else mr['cfl'][2] aadj = sum([m['cfl'][3] for m in mr]) if isp else mr['cfl'][3] hnCFL = min([nCFL, self.hnCFL]) self.hnCFL = hnCFL if not np.isnan(hnCFL) else self.hnCFL hxCFL = max([xCFL, self.hxCFL]) self.hxCFL = hxCFL if not np.isnan(hxCFL) else self.hxCFL self.aCFL += xCFL*rsteps self.mCFL = self.aCFL/istep self.aadj += aadj self.haadj += aadj # check. if self.cflmin != None and nCFL < self.cflmin: self._notify("CFL = %g < %g after step: %d" % ( nCFL, self.cflmin, istep)) if self.cflmax != None and xCFL >= self.cflmax: self._notify("CFL = %g >= %g after step: %d" % ( xCFL, self.cflmax, istep)) # output information. if istep > 0 and istep%psteps == 0: info("CFL = %.2f/%.2f - %.2f/%.2f adjusted: %d/%d/%d\n" % ( nCFL, xCFL, self.hnCFL, self.hxCFL, nadj, self.aadj, self.haadj)) self.aadj = 0 def postloop(self): self.info("Averaged maximum CFL = %g.\n" % self.mCFL) # End CFL evaluation. ################################################################################ ################################################################################ # Begin solution output. class MarchSaveAnchor(sc.MeshAnchor): """ Save solution data into VTK XML format for a solver. """ def __init__(self, svr, anames=None, compressor=None, fpdtype=None, psteps=None, vtkfn_tmpl=None, **kw): assert None is not compressor assert None is not fpdtype assert None is not psteps assert None is not vtkfn_tmpl #: The arrays in :py:class:`GasSolver <.solver.GasSolver>` or #: :py:attr:`MeshSolver.der <solvcon.solver.MeshSolver.der>` to be #: saved. self.anames = anames if anames else dict() #: Compressor for binary data. Can be either ``'gz'`` or ``''``. self.compressor = compressor #: String for floating point data type (NumPy convention). self.fpdtype = fpdtype #: The interval in step to save data. self.psteps = psteps #: The template string for the VTK file. self.vtkfn_tmpl = vtkfn_tmpl super(MarchSaveAnchor, self).__init__(svr, **kw) def _write(self, istep): ngstcell = self.svr.ngstcell sarrs = dict() varrs = dict() # collect data. for key in self.anames: # get the array. if self.anames[key]: arr = self.svr.der[key][ngstcell:] else: arr = getattr(self.svr, key)[ngstcell:] # put array in dict. if len(arr.shape) == 1: sarrs[key] = arr elif arr.shape[1] == self.svr.ndim: varrs[key] = arr else: for it in range(arr.shape[1]): sarrs['%s[%d]' % (key, it)] = arr[:,it] # write. wtr = vtkxml.VtkXmlUstGridWriter(self.svr.blk, fpdtype=self.fpdtype, compressor=self.compressor, scalars=sarrs, vectors=varrs) svrn = self.svr.svrn wtr.write(self.vtkfn_tmpl % (istep if svrn is None else (istep, svrn))) def preloop(self): self._write(0) def postmarch(self): psteps = self.psteps istep = self.svr.step_global if istep%psteps == 0: self._write(istep) def postloop(self): psteps = self.psteps istep = self.svr.step_global if istep%psteps != 0: self._write(istep) class PMarchSave(sc.MeshHook): """ Save the geometry and variables in a case when time marching in parallel VTK XML format. """ def __init__(self, cse, anames=None, compressor='gz', fpdtype=None, altdir='', altsym='', vtkfn_tmpl=None, **kw): #: The arrays in :py:class:`GasSolver <.solver.GasSolver>` or #: :py:attr:`MeshSolver.der <solvcon.solver.MeshSolver.der>` to be #: saved. Format is (name, inder, ndim), (name, inder, ndim) ... For #: ndim > 0 the array is a spatial vector, for ndim == 0 a simple #: scalar, and ndim < 0 a list of scalar. self.anames = anames if anames else list() #: Compressor for binary data. Can be either ``'gz'`` or ``''``. self.compressor = compressor #: String for floating point data type (NumPy convention). self.fpdtype = fpdtype if fpdtype else str(cse.execution.fpdtype) #: The alternate directory to save the VTK files. self.altdir = altdir #: The symbolic link in basedir pointing to the alternate directory to #: save the VTK files. self.altsym = altsym super(PMarchSave, self).__init__(cse, **kw) # override vtkfn_tmpl. nsteps = cse.execution.steps_run basefn = cse.io.basefn if self.altdir: vdir = self.altdir if self.altsym: altsym = os.path.join(cse.io.basedir, self.altsym) if not os.path.exists(altsym): os.symlink(vdir, altsym) else: vdir = cse.io.basedir if not os.path.exists(vdir): os.makedirs(vdir) if None is vtkfn_tmpl: vtkfn_tmpl = basefn + "_%%0%dd"%int(math.ceil(math.log10(nsteps))+1) vtkfn_tmpl += '.pvtu' #: The template string for the VTK file. self.vtkfn_tmpl = os.path.join(vdir, vtkfn_tmpl) # craft ext name template. npart = cse.execution.npart if npart: self.pextmpl = '.p%%0%dd'%int(math.ceil(math.log10(npart))+1) else: self.pextmpl = '' #: Template for the extension of split VTK file name. self.pextmpl += '.vtu' def drop_anchor(self, svr): basefn = os.path.splitext(self.vtkfn_tmpl)[0] anames = dict([(ent[0], ent[1]) for ent in self.anames]) ankkw = dict(anames=anames, compressor=self.compressor, fpdtype=self.fpdtype, psteps=self.psteps, vtkfn_tmpl=basefn+self.pextmpl) self._deliver_anchor(svr, MarchSaveAnchor, ankkw) def _write(self, istep): if not self.cse.execution.npart: return # collect data. sarrs = dict() varrs = dict() for key, inder, ndim in self.anames: if ndim > 0: varrs[key] = self.fpdtype elif ndim < 0: for it in range(abs(ndim)): sarrs['%s[%d]' % (key, it)] = self.fpdtype else: sarrs[key] = self.fpdtype # write. wtr = vtkxml.PVtkXmlUstGridWriter(self.blk, fpdtype=self.fpdtype, scalars=sarrs, vectors=varrs, npiece=self.cse.execution.npart, pextmpl=self.pextmpl) vtkfn = self.vtkfn_tmpl % istep self.info('Writing \n %s\n... ' % vtkfn) wtr.write(vtkfn) self.info('done.\n') def preloop(self): self._write(0) def postmarch(self): psteps = self.psteps istep = self.cse.execution.step_current if istep%psteps == 0: self._write(istep) def postloop(self): psteps = self.psteps istep = self.cse.execution.step_current if istep%psteps != 0: self._write(istep) # End solution output. ################################################################################ # vim: set ff=unix fenc=utf8 ft=python ai et sw=4 ts=4 tw=79:
__version_info__ = (0, 0, 3) __version__ = '.'.join(map(str, __version_info__)) + 'b1' default_app_config = 'ajaxviews.apps.AjaxViewsConfig' # PyCharm suppress inspections list # usage: # noinspection <name> ... # # PyAbstractClass # PyArgumentEqualDefault # PyArgumentList # PyAssignmentToLoopOrWithParameter # PyAttributeOutsideInit # PyAugmentAssignment # PyBroadException # PyByteLiteral # PyCallByClass # PyCallingNonCallable # PyChainedComparisons # PyClassHasNoInit # PyClassicStyleClass # PyComparisonWithNone # PyCompatibility # PyDecorator # PyDefaultArgument # PyDeprecatedModules # PyDeprecation # PyDictCreation # PyDictDuplicateKeys # PyDocstring # PyDocstringTypes # PyExceptClausesOrder # PyExceptionInherit # PyFromFutureImport # PyGlobalUndefined # PyInconsistentIndentation # PyInitNewSignature # PyInterpreter # PyListCreation # PyMandatoryEncoding # PyMethodFirstArgAssignment # PyMethodMayBeStatic # PyMethodOverriding # PyMethodParameters # PyMissingConstructor # PyNestedDecorators # PyNonAsciiChar # PyNoneFunctionAssignment # PyOldStyleClasses # PyPackageRequirements # PyPep8 # PyPep8Naming # PyPropertyAccess # PyPropertyDefinition # PyProtectedMember # PyRaisingNewStyleClass # PyRedeclaration # PyRedundantParentheses # PyReturnFromInit # PySetFunctionToLiteral # PyShadowingBuiltins # PyShadowingNames # PySimplifyBooleanCheck # PySingleQuotedDocstring # PyStatementEffect # PyStringException # PyStringFormat # PySuperArguments # PyTrailingSemicolon # PyTupleAssignmentBalance # PyTupleItemAssignment # PyTypeChecker # PyUnboundLocalVariable # PyUnnecessaryBackslash # PyUnreachableCode # PyUnresolvedReferences # PyUnsupportedFeatures # PyUnusedLocal # """ # This is a reST style. # # :param param1: this is a first param # :param param2: this is a second param # :returns: this is a description of what is returned # :raises keyError: raises an exception # """ # This is Google style. # # an_example_pypi_project.__init__ # """A pypi demonstration vehicle. # # .. moduleauthor:: Andrew Carter <andrew@invalid.com> # # """ # # import useful_1 # import useful_2 # # # def start(): # "This starts this module running ..." # an_example_pypi_project.useful_1 # """ # .. module:: useful_1 # :platform: Unix, Windows # :synopsis: A useful module indeed. # # .. moduleauthor:: Andrew Carter <andrew@invalid.com> # # # """ # # def public_fn_with_googley_docstring(name, state=None): # """This function does something. # # Args: # name (str): The name to use. # # Kwargs: # state (bool): Current state to be in. # # Returns: # int. The return code:: # # 0 -- Success! # 1 -- No good. # 2 -- Try again. # # Raises: # AttributeError, KeyError # # A really great idea. A way you might use me is # # >>> print public_fn_with_googley_docstring(name='foo', state=None) # 0 # # BTW, this always returns 0. **NEVER** use with :class:`MyPublicClass`. # # """ # return 0 # # def public_fn_with_sphinxy_docstring(name, state=None): # """This function does something. # # :param name: The name to use. # :type name: str. # :param state: Current state to be in. # :type state: bool. # :returns: int -- the return code. # :raises: AttributeError, KeyError # # """ # return 0 # # def public_fn_without_docstring(): # return True # # def _private_fn_with_docstring(foo, bar='baz', foobarbas=None): # """I have a docstring, but won't be imported if you just use ``:members:``. # """ # return None # # # class MyPublicClass(object): # """We use this as a public class example class. # # You never call this class before calling :func:`public_fn_with_sphinxy_docstring`. # # .. note:: # # An example of intersphinx is this: you **cannot** use :mod:`pickle` on this class. # # """ # # def __init__(self, foo, bar='baz'): # """A really simple class. # # Args: # foo (str): We all know what foo does. # # Kwargs: # bar (str): Really, same as foo. # # """ # self._foo = foo # self._bar = bar # # def get_foobar(self, foo, bar=True): # """This gets the foobar # # This really should have a full function definition, but I am too lazy. # # >>> print get_foobar(10, 20) # 30 # >>> print get_foobar('a', 'b') # ab # # Isn't that what you want? # # """ # return foo + bar # # def _get_baz(self, baz=None): # """A private function to get baz. # # This really should have a full function definition, but I am too lazy. # # """ # return baz # code.rst # Documentation for the Code # ************************** # # .. automodule:: an_example_pypi_project # # # useful #1 -- auto members # ========================= # # This is something I want to say that is not in the docstring. # # .. automodule:: an_example_pypi_project.useful_1 # :members: # # useful #2 -- explicit members # ============================= # # This is something I want to say that is not in the docstring. # # .. automodule:: an_example_pypi_project.useful_2 # :members: public_fn_with_sphinxy_docstring, _private_fn_with_docstring # # .. autoclass:: MyPublicClass # :members: get_foobar, _get_baz # -------------------------------------------------------------------------------------------------- # """Example Google style docstrings. # # This module demonstrates documentation as specified by the `Google Python # Style Guide`_. Docstrings may extend over multiple lines. Sections are created # with a section header and a colon followed by a block of indented text. # # Example: # Examples can be given using either the ``Example`` or ``Examples`` # sections. Sections support any reStructuredText formatting, including # literal blocks:: # # $ python example_google.py # # Section breaks are created by resuming unindented text. Section breaks # are also implicitly created anytime a new section starts. # # Attributes: # module_level_variable1 (int): Module level variables may be documented in # either the ``Attributes`` section of the module docstring, or in an # inline docstring immediately following the variable. # # Either form is acceptable, but the two should not be mixed. Choose # one convention to document module level variables and be consistent # with it. # # Todo: # * For module TODOs # * You have to also use ``sphinx.ext.todo`` extension # # .. _Google Python Style Guide: # http://google.github.io/styleguide/pyguide.html # # """ # # module_level_variable1 = 12345 # # module_level_variable2 = 98765 # """int: Module level variable documented inline. # # The docstring may span multiple lines. The type may optionally be specified # on the first line, separated by a colon. # """ # # # def function_with_types_in_docstring(param1, param2): # """Example function with types documented in the docstring. # # `PEP 484`_ type annotations are supported. If attribute, parameter, and # return types are annotated according to `PEP 484`_, they do not need to be # included in the docstring: # # Args: # param1 (int): The first parameter. # param2 (str): The second parameter. # # Returns: # bool: The return value. True for success, False otherwise. # # .. _PEP 484: # https://www.python.org/dev/peps/pep-0484/ # # """ # # # def function_with_pep484_type_annotations(param1: int, param2: str) -> bool: # """Example function with PEP 484 type annotations. # # Args: # param1: The first parameter. # param2: The second parameter. # # Returns: # The return value. True for success, False otherwise. # # """ # # # def module_level_function(param1, param2=None, *args, **kwargs): # """This is an example of a module level function. # # Function parameters should be documented in the ``Args`` section. The name # of each parameter is required. The type and description of each parameter # is optional, but should be included if not obvious. # # If \*args or \*\*kwargs are accepted, # they should be listed as ``*args`` and ``**kwargs``. # # The format for a parameter is:: # # name (type): description # The description may span multiple lines. Following # lines should be indented. The "(type)" is optional. # # Multiple paragraphs are supported in parameter # descriptions. # # Args: # param1 (int): The first parameter. # param2 (:obj:`str`, optional): The second parameter. Defaults to None. # Second line of description should be indented. # *args: Variable length argument list. # **kwargs: Arbitrary keyword arguments. # # Returns: # bool: True if successful, False otherwise. # # The return type is optional and may be specified at the beginning of # the ``Returns`` section followed by a colon. # # The ``Returns`` section may span multiple lines and paragraphs. # Following lines should be indented to match the first line. # # The ``Returns`` section supports any reStructuredText formatting, # including literal blocks:: # # { # 'param1': param1, # 'param2': param2 # } # # Raises: # AttributeError: The ``Raises`` section is a list of all exceptions # that are relevant to the interface. # ValueError: If `param2` is equal to `param1`. # # """ # if param1 == param2: # raise ValueError('param1 may not be equal to param2') # return True # # # def example_generator(n): # """Generators have a ``Yields`` section instead of a ``Returns`` section. # # Args: # n (int): The upper limit of the range to generate, from 0 to `n` - 1. # # Yields: # int: The next number in the range of 0 to `n` - 1. # # Examples: # Examples should be written in doctest format, and should illustrate how # to use the function. # # >>> print([i for i in example_generator(4)]) # [0, 1, 2, 3] # # """ # for i in range(n): # yield i # # # class ExampleError(Exception): # """Exceptions are documented in the same way as classes. # # The __init__ method may be documented in either the class level # docstring, or as a docstring on the __init__ method itself. # # Either form is acceptable, but the two should not be mixed. Choose one # convention to document the __init__ method and be consistent with it. # # Note: # Do not include the `self` parameter in the ``Args`` section. # # Args: # msg (str): Human readable string describing the exception. # code (:obj:`int`, optional): Error code. # # Attributes: # msg (str): Human readable string describing the exception. # code (int): Exception error code. # # """ # # def __init__(self, msg, code): # self.msg = msg # self.code = code # # # class ExampleClass(object): # """The summary line for a class docstring should fit on one line. # # If the class has public attributes, they may be documented here # in an ``Attributes`` section and follow the same formatting as a # function's ``Args`` section. Alternatively, attributes may be documented # inline with the attribute's declaration (see __init__ method below). # # Properties created with the ``@property`` decorator should be documented # in the property's getter method. # # Attributes: # attr1 (str): Description of `attr1`. # attr2 (:obj:`int`, optional): Description of `attr2`. # # """ # # def __init__(self, param1, param2, param3): # """Example of docstring on the __init__ method. # # The __init__ method may be documented in either the class level # docstring, or as a docstring on the __init__ method itself. # # Either form is acceptable, but the two should not be mixed. Choose one # convention to document the __init__ method and be consistent with it. # # Note: # Do not include the `self` parameter in the ``Args`` section. # # Args: # param1 (str): Description of `param1`. # param2 (:obj:`int`, optional): Description of `param2`. Multiple # lines are supported. # param3 (:obj:`list` of :obj:`str`): Description of `param3`. # # """ # self.attr1 = param1 # self.attr2 = param2 # self.attr3 = param3 #: Doc comment *inline* with attribute # # #: list of str: Doc comment *before* attribute, with type specified # self.attr4 = ['attr4'] # # self.attr5 = None # """str: Docstring *after* attribute, with type specified.""" # # @property # def readonly_property(self): # """str: Properties should be documented in their getter method.""" # return 'readonly_property' # # @property # def readwrite_property(self): # """:obj:`list` of :obj:`str`: Properties with both a getter and setter # should only be documented in their getter method. # # If the setter method contains notable behavior, it should be # mentioned here. # """ # return ['readwrite_property'] # # @readwrite_property.setter # def readwrite_property(self, value): # value # # def example_method(self, param1, param2): # """Class methods are similar to regular functions. # # Note: # Do not include the `self` parameter in the ``Args`` section. # # Args: # param1: The first parameter. # param2: The second parameter. # # Returns: # True if successful, False otherwise. # # """ # return True # # def __special__(self): # """By default special members with docstrings are not included. # # Special members are any methods or attributes that start with and # end with a double underscore. Any special member with a docstring # will be included in the output, if # ``napoleon_include_special_with_doc`` is set to True. # # This behavior can be enabled by changing the following setting in # Sphinx's conf.py:: # # napoleon_include_special_with_doc = True # # """ # pass # # def __special_without_docstring__(self): # pass # # def _private(self): # """By default private members are not included. # # Private members are any methods or attributes that start with an # underscore and are *not* special. By default they are not included # in the output. # # This behavior can be changed such that private members *are* included # by changing the following setting in Sphinx's conf.py:: # # napoleon_include_private_with_doc = True # # """ # pass # # def _private_without_docstring(self): # pass
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for MCMC diagnostic utilities.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.contrib.bayesflow.python.ops import mcmc_diagnostics_impl as mcmc_diagnostics from tensorflow.python.ops import array_ops from tensorflow.python.platform import test rng = np.random.RandomState(42) class _PotentialScaleReductionTest(object): @property def use_static_shape(self): raise NotImplementedError( "Subclass failed to impliment `use_static_shape`.") def testListOfStatesWhereFirstPassesSecondFails(self): """Simple test showing API with two states. Read first!.""" n_samples = 1000 # state_0 is two scalar chains taken from iid Normal(0, 1). Will pass. state_0 = rng.randn(n_samples, 2) # state_1 is three 4-variate chains taken from Normal(0, 1) that have been # shifted. Since every chain is shifted, they are not the same, and the # test should fail. offset = np.array([1., -1., 2.]).reshape(3, 1) state_1 = rng.randn(n_samples, 3, 4) + offset rhat = mcmc_diagnostics.potential_scale_reduction( state=[state_0, state_1], independent_chain_ndims=1) self.assertIsInstance(rhat, list) with self.test_session() as sess: rhat_0_, rhat_1_ = sess.run(rhat) # r_hat_0 should be close to 1, meaning test is passed. self.assertAllEqual((), rhat_0_.shape) self.assertAllClose(1., rhat_0_, rtol=0.02) # r_hat_1 should be greater than 1.2, meaning test has failed. self.assertAllEqual((4,), rhat_1_.shape) self.assertAllEqual(np.ones_like(rhat_1_).astype(bool), rhat_1_ > 1.2) def check_results(self, state_, independent_chain_shape, should_pass): sample_ndims = 1 independent_chain_ndims = len(independent_chain_shape) with self.test_session(): state = array_ops.placeholder_with_default( input=state_, shape=state_.shape if self.use_static_shape else None) rhat = mcmc_diagnostics.potential_scale_reduction( state, independent_chain_ndims=independent_chain_ndims) if self.use_static_shape: self.assertAllEqual( state_.shape[sample_ndims + independent_chain_ndims:], rhat.shape) rhat_ = rhat.eval() if should_pass: self.assertAllClose(np.ones_like(rhat_), rhat_, atol=0, rtol=0.02) else: self.assertAllEqual(np.ones_like(rhat_).astype(bool), rhat_ > 1.2) def iid_normal_chains_should_pass_wrapper(self, sample_shape, independent_chain_shape, other_shape, dtype=np.float32): """Check results with iid normal chains.""" state_shape = sample_shape + independent_chain_shape + other_shape state_ = rng.randn(*state_shape).astype(dtype) # The "other" dimensions do not have to be identical, just independent, so # force them to not be identical. if other_shape: state_ *= rng.rand(*other_shape).astype(dtype) self.check_results(state_, independent_chain_shape, should_pass=True) def testPassingIIDNdimsAreIndependentOneOtherZero(self): self.iid_normal_chains_should_pass_wrapper( sample_shape=[10000], independent_chain_shape=[4], other_shape=[]) def testPassingIIDNdimsAreIndependentOneOtherOne(self): self.iid_normal_chains_should_pass_wrapper( sample_shape=[10000], independent_chain_shape=[3], other_shape=[7]) def testPassingIIDNdimsAreIndependentOneOtherTwo(self): self.iid_normal_chains_should_pass_wrapper( sample_shape=[10000], independent_chain_shape=[2], other_shape=[5, 7]) def testPassingIIDNdimsAreIndependentTwoOtherTwo64Bit(self): self.iid_normal_chains_should_pass_wrapper( sample_shape=[10000], independent_chain_shape=[2, 3], other_shape=[5, 7], dtype=np.float64) def offset_normal_chains_should_fail_wrapper( self, sample_shape, independent_chain_shape, other_shape): """Check results with normal chains that are offset from each other.""" state_shape = sample_shape + independent_chain_shape + other_shape state_ = rng.randn(*state_shape) # Add a significant offset to the different (formerly iid) chains. offset = np.linspace( 0, 2, num=np.prod(independent_chain_shape)).reshape([1] * len( sample_shape) + independent_chain_shape + [1] * len(other_shape)) state_ += offset self.check_results(state_, independent_chain_shape, should_pass=False) def testFailingOffsetNdimsAreSampleOneIndependentOneOtherOne(self): self.offset_normal_chains_should_fail_wrapper( sample_shape=[10000], independent_chain_shape=[2], other_shape=[5]) class PotentialScaleReductionStaticTest(test.TestCase, _PotentialScaleReductionTest): @property def use_static_shape(self): return True def testIndependentNdimsLessThanOneRaises(self): with self.assertRaisesRegexp(ValueError, "independent_chain_ndims"): mcmc_diagnostics.potential_scale_reduction( rng.rand(2, 3, 4), independent_chain_ndims=0) class PotentialScaleReductionDynamicTest(test.TestCase, _PotentialScaleReductionTest): @property def use_static_shape(self): return False class _ReduceVarianceTest(object): @property def use_static_shape(self): raise NotImplementedError( "Subclass failed to impliment `use_static_shape`.") def check_versus_numpy(self, x_, axis, biased, keepdims): with self.test_session(): x_ = np.asarray(x_) x = array_ops.placeholder_with_default( input=x_, shape=x_.shape if self.use_static_shape else None) var = mcmc_diagnostics._reduce_variance( x, axis=axis, biased=biased, keepdims=keepdims) np_var = np.var(x_, axis=axis, ddof=0 if biased else 1, keepdims=keepdims) if self.use_static_shape: self.assertAllEqual(np_var.shape, var.shape) var_ = var.eval() # We will mask below, which changes shape, so check shape explicitly here. self.assertAllEqual(np_var.shape, var_.shape) # We get NaN when we divide by zero due to the size being the same as ddof nan_mask = np.isnan(np_var) if nan_mask.any(): self.assertTrue(np.isnan(var_[nan_mask]).all()) self.assertAllClose(np_var[~nan_mask], var_[~nan_mask], atol=0, rtol=0.02) def testScalarBiasedTrue(self): self.check_versus_numpy(x_=-1.234, axis=None, biased=True, keepdims=False) def testScalarBiasedFalse(self): # This should result in NaN. self.check_versus_numpy(x_=-1.234, axis=None, biased=False, keepdims=False) def testShape2x3x4AxisNoneBiasedFalseKeepdimsFalse(self): self.check_versus_numpy( x_=rng.randn(2, 3, 4), axis=None, biased=True, keepdims=False) def testShape2x3x4Axis1BiasedFalseKeepdimsTrue(self): self.check_versus_numpy( x_=rng.randn(2, 3, 4), axis=1, biased=True, keepdims=True) def testShape2x3x4x5Axis13BiasedFalseKeepdimsTrue(self): self.check_versus_numpy( x_=rng.randn(2, 3, 4, 5), axis=1, biased=True, keepdims=True) def testShape2x3x4x5Axis13BiasedFalseKeepdimsFalse(self): self.check_versus_numpy( x_=rng.randn(2, 3, 4, 5), axis=1, biased=False, keepdims=False) class ReduceVarianceTestStaticShape(test.TestCase, _ReduceVarianceTest): @property def use_static_shape(self): return True class ReduceVarianceTestDynamicShape(test.TestCase, _ReduceVarianceTest): @property def use_static_shape(self): return False if __name__ == "__main__": test.main()
# 2D Fluid Simulation using FHP LGCA (Lattice Gas Cellular Automata) # Simulates fluid flow in a circular channel. # Particles go out from right side and enter back from left. # Reference: # Lattice Gas Cellular Automata and Lattice Boltzmann Models by Wolf-Gladrow # FB - 20140818 import math import random from PIL import Image imgx = 512; imgy = 512 # image size image = Image.new("RGB", (imgx, imgy)) pixels = image.load() # simulation parameters: tilesX = 32 tilesY = 32 n = 8 # coarse graining tile size is n by n timeSteps = 300 nodesX = tilesX * n nodesY = tilesY * n nodes = [[[0 for x in range(nodesX)] for y in range(nodesY)] for z in range(6)] obstacle = [[0 for x in range(nodesX)] for y in range(nodesY)] # insert a square obstacle in the middle for y in range(nodesY / 4): for x in range(nodesX / 4): obstacle[y + nodesY / 2 - nodesY / 8][x + nodesX / 2 - nodesX / 8] = 1 # fill-up with fluid flowing towards right for y in range(1, nodesY - 1): # do not include top/bottom walls for x in range(nodesX): if obstacle[y][x] != 1: nodes[0][y][x] = 1 for t in range(timeSteps): # run the simulation # HANDLE COLLISIONS # collisions at non-boundary nodes for y in range(1, nodesY - 1): # do not include top/bottom walls for x in range(nodesX): if obstacle[y][x] != 1: cell = [nodes[z][y][x] for z in range(6)] numParticles = sum(cell) # only 2 or 3 symmetric particle collisions implemented here if numParticles == 3: if cell[0] == cell[2] and cell[2] == cell[4]: # invert the cell contents for z in range(6): nodes[z][y][x] = 1 - cell[z] elif numParticles == 2: # find the cell of one of the particles p = cell.index(1) # its diametric opposite must occupied as well if p > 2: pass elif cell[p + 3] == 0: pass else: # randomly rotate the particle pair clockwise or # counterclockwise if random.randint(0, 1) == 0: # counterclockwise nodes[0][y][x] = cell[5] nodes[1][y][x] = cell[0] nodes[2][y][x] = cell[1] nodes[3][y][x] = cell[2] nodes[4][y][x] = cell[3] nodes[5][y][x] = cell[4] else: # clockwise nodes[0][y][x] = cell[1] nodes[1][y][x] = cell[2] nodes[2][y][x] = cell[3] nodes[3][y][x] = cell[4] nodes[4][y][x] = cell[5] nodes[5][y][x] = cell[0] # collisions along top/bottom walls (no-slip) for x in range(nodesX): cell = [nodes[z][0][x] for z in range(6)] nodes[0][0][x] = cell[3] nodes[1][0][x] = cell[4] nodes[2][0][x] = cell[5] nodes[3][0][x] = cell[0] nodes[4][0][x] = cell[1] nodes[5][0][x] = cell[2] cell = [nodes[z][nodesY - 1][x] for z in range(6)] nodes[0][nodesY - 1][x] = cell[3] nodes[1][nodesY - 1][x] = cell[4] nodes[2][nodesY - 1][x] = cell[5] nodes[3][nodesY - 1][x] = cell[0] nodes[4][nodesY - 1][x] = cell[1] nodes[5][nodesY - 1][x] = cell[2] # collisions at obstacle points (no-slip) for y in range(nodesY): for x in range(nodesX): if obstacle[y][x] == 1: cell = [nodes[z][y][x] for z in range(6)] nodes[0][y][x] = cell[3] nodes[1][y][x] = cell[4] nodes[2][y][x] = cell[5] nodes[3][y][x] = cell[0] nodes[4][y][x] = cell[1] nodes[5][y][x] = cell[2] # HANDLE MOVEMENTS nodesNew = [[[0 for x in range(nodesX)] for y in range(nodesY)] for z in range(6)] for y in range(nodesY): for x in range(nodesX): cell = [nodes[z][y][x] for z in range(6)] # propagation in the 0-direction neighbor_y = y if x == nodesX - 1: neighbor_x = 0 else: neighbor_x = x + 1 nodesNew[0][neighbor_y][neighbor_x] = cell[0] # propagation in the 1-direction if y != nodesY - 1: neighbor_y = y + 1 if y % 2 == 1: if x == nodesX - 1: neighbor_x = 1 else: neighbor_x = x + 1 else: neighbor_x = x nodesNew[1][neighbor_y][neighbor_x] = cell[1] # propagation in the 2-direction if y != nodesY - 1: neighbor_y = y + 1 if y % 2 == 0: if x == 0: neighbor_x = nodesX - 1 else: neighbor_x = x - 1 else: neighbor_x = x nodesNew[2][neighbor_y][neighbor_x] = cell[2] # propagation in the 3-direction neighbor_y = y if x == 0: neighbor_x = nodesX - 1 else: neighbor_x = x - 1 nodesNew[3][neighbor_y][neighbor_x] = cell[3] # propagation in the 4-direction if y != 0: neighbor_y = y - 1 if y % 2 == 0: if x == 0: neighbor_x = nodesX - 1 else: neighbor_x = x - 1 else: neighbor_x = x nodesNew[4][neighbor_y][neighbor_x] = cell[4] # propagation in the 5-direction if y != 0: neighbor_y = y - 1 if y % 2 == 1: if x == nodesX - 1: neighbor_x = 0 else: neighbor_x = x + 1 else: neighbor_x = x nodesNew[5][neighbor_y][neighbor_x] = cell[5] nodes = nodesNew print '%' + str(100 * t / timeSteps) # show progress # Create an image from the final state # Calculate average velocity vectors for tiles aveVelocityVectorMag = [[0.0 for x in range(tilesX)] for y in range(tilesY)] aveVelocityVectorAng = [[0.0 for x in range(tilesX)] for y in range(tilesY)] pi2 = math.pi * 2.0 dx = [math.cos(i * pi2 / 6.0) for i in range(6)] dy = [math.sin(i * pi2 / 6.0) for i in range(6)] for ty in range(tilesY): for tx in range(tilesX): vx = 0.0 vy = 0.0 for cy in range(n): for cx in range(n): for z in range(6): if nodes[z][ty * n + cy][tx * n + cx] == 1 \ and obstacle[ty * n + cy][tx * n + cx] == 0: vx += dx[z] vy += dy[z] aveVelocityVectorMag[ty][tx] = math.hypot(vx, vy) / n ** 2.0 aveVelocityVectorAng[ty][tx] = (math.atan2(vy, vx) + pi2) % pi2 for ky in range(imgy): iy = nodesY * ky / imgy jy = tilesY * ky / imgy for kx in range(imgx): ix = nodesX * kx / imgx jx = tilesX * kx / imgx if obstacle[iy][ix] == 1: # paint the obstacle(s) red = 0 grn = 0 blu = 255 else: # use vector magnitude and angle for coloring aveVelVecMag = aveVelocityVectorMag[jy][jx] aveVelVecAng = aveVelocityVectorAng[jy][jx] red = int(aveVelVecMag * 255) grn = int(aveVelVecAng / pi2 * 255) blu = 0 pixels[kx, ky] = (red, grn, blu) image.save("FHP_LGCA_2DFluidSim.png", "PNG")
########################################################################## # # Copyright (c) 2013, Image Engine Design Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above # copyright notice, this list of conditions and the following # disclaimer. # # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided with # the distribution. # # * Neither the name of John Haddon nor the names of # any other contributors to this software may be used to endorse or # promote products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ########################################################################## import os import unittest import IECore import Gaffer import GafferTest class ReferenceTest( GafferTest.TestCase ) : def setUp( self ) : # stash the SphereNode so we can restore it in # tearDown() - we're going to mischievously delete # it from the GafferTest module to induce errors # during testing. self.__SphereNode = GafferTest.SphereNode def testLoad( self ) : s = Gaffer.ScriptNode() s["n1"] = GafferTest.AddNode() s["n2"] = GafferTest.AddNode() s["n2"]["op1"].setInput( s["n1"]["sum"] ) b = Gaffer.Box.create( s, Gaffer.StandardSet( [ s["n1"] ] ) ) b.exportForReference( "/tmp/test.grf" ) s["r"] = Gaffer.Reference() s["r"].load( "/tmp/test.grf" ) self.assertTrue( "n1" in s["r"] ) self.assertTrue( s["r"]["out"].getInput().isSame( s["r"]["n1"]["sum"] ) ) def testSerialisation( self ) : s = Gaffer.ScriptNode() s["n1"] = GafferTest.AddNode() s["n2"] = GafferTest.AddNode() s["n2"]["op1"].setInput( s["n1"]["sum"] ) b = Gaffer.Box.create( s, Gaffer.StandardSet( [ s["n1"] ] ) ) b.promotePlug( b["n1"]["op1"] ) b.exportForReference( "/tmp/test.grf" ) s = Gaffer.ScriptNode() s["r"] = Gaffer.Reference() s["r"].load( "/tmp/test.grf" ) self.assertTrue( "n1" in s["r"] ) self.assertTrue( s["r"]["n1"]["op1"].getInput().isSame( s["r"]["user"]["n1_op1"] ) ) self.assertTrue( s["r"]["out"].getInput().isSame( s["r"]["n1"]["sum"] ) ) s["r"]["user"]["n1_op1"].setValue( 25 ) self.assertEqual( s["r"]["out"].getValue(), 25 ) ss = s.serialise() # referenced nodes should be referenced only, and not # explicitly mentioned in the serialisation at all. self.assertTrue( "AddNode" not in ss ) # but the values of user plugs should be stored, so # they can override the values from the reference. self.assertTrue( "\"n1_op1\"" in ss ) s2 = Gaffer.ScriptNode() s2.execute( ss ) self.assertTrue( "n1" in s2["r"] ) self.assertTrue( s2["r"]["out"].getInput().isSame( s2["r"]["n1"]["sum"] ) ) self.assertEqual( s2["r"]["out"].getValue(), 25 ) def testReload( self ) : s = Gaffer.ScriptNode() s["n1"] = GafferTest.AddNode() s["n2"] = GafferTest.AddNode() s["n3"] = GafferTest.AddNode() s["n2"]["op1"].setInput( s["n1"]["sum"] ) s["n3"]["op1"].setInput( s["n2"]["sum"] ) b = Gaffer.Box.create( s, Gaffer.StandardSet( [ s["n2"] ] ) ) b.promotePlug( b["n2"]["op2"] ) b.exportForReference( "/tmp/test.grf" ) s2 = Gaffer.ScriptNode() s2["n1"] = GafferTest.AddNode() s2["n3"] = GafferTest.AddNode() s2["r"] = Gaffer.Reference() s2["r"].load( "/tmp/test.grf" ) s2["r"]["in"].setInput( s2["n1"]["sum"] ) s2["r"]["user"]["n2_op2"].setValue( 1001 ) s2["n3"]["op1"].setInput( s2["r"]["out"] ) self.assertTrue( "n2" in s2["r"] ) self.assertTrue( s2["r"]["n2"]["op1"].getInput().isSame( s2["r"]["in"] ) ) self.assertTrue( s2["r"]["n2"]["op2"].getInput().isSame( s2["r"]["user"]["n2_op2"] ) ) self.assertEqual( s2["r"]["user"]["n2_op2"].getValue(), 1001 ) self.assertTrue( s2["r"]["out"].getInput().isSame( s2["r"]["n2"]["sum"] ) ) self.assertTrue( s2["r"]["in"].getInput().isSame( s2["n1"]["sum"] ) ) self.assertTrue( s2["n3"]["op1"].getInput().isSame( s2["r"]["out"] ) ) originalReferencedNames = s2["r"].keys() b["anotherNode"] = GafferTest.AddNode() b.promotePlug( b["anotherNode"]["op2"] ) s.serialiseToFile( "/tmp/test.grf", b ) s2["r"].load( "/tmp/test.grf" ) self.assertTrue( "n2" in s2["r"] ) self.assertEqual( set( s2["r"].keys() ), set( originalReferencedNames + [ "anotherNode" ] ) ) self.assertTrue( s2["r"]["n2"]["op1"].getInput().isSame( s2["r"]["in"] ) ) self.assertTrue( s2["r"]["n2"]["op2"].getInput().isSame( s2["r"]["user"]["n2_op2"] ) ) self.assertEqual( s2["r"]["user"]["n2_op2"].getValue(), 1001 ) self.assertTrue( s2["r"]["anotherNode"]["op2"].getInput().isSame( s2["r"]["user"]["anotherNode_op2"] ) ) self.assertTrue( s2["r"]["out"].getInput().isSame( s2["r"]["n2"]["sum"] ) ) self.assertTrue( s2["r"]["in"].getInput().isSame( s2["n1"]["sum"] ) ) self.assertTrue( s2["n3"]["op1"].getInput().isSame( s2["r"]["out"] ) ) def testReloadDoesntRemoveCustomPlugs( self ) : # plugs unrelated to referencing shouldn't disappear when a reference is # reloaded. various parts of the ui might be using them for other purposes. s = Gaffer.ScriptNode() s["n1"] = GafferTest.AddNode() s["n2"] = GafferTest.AddNode() s["n2"]["op1"].setInput( s["n1"]["sum"] ) b = Gaffer.Box.create( s, Gaffer.StandardSet( [ s["n1"] ] ) ) b.exportForReference( "/tmp/test.grf" ) s2 = Gaffer.ScriptNode() s2["r"] = Gaffer.Reference() s2["r"].load( "/tmp/test.grf" ) s2["r"]["__mySpecialPlug"] = Gaffer.IntPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ) s2["r"].load( "/tmp/test.grf" ) self.assertTrue( "__mySpecialPlug" in s2["r"] ) def testLoadScriptWithReference( self ) : s = Gaffer.ScriptNode() s["n1"] = GafferTest.AddNode() s["n2"] = GafferTest.AddNode() s["n3"] = GafferTest.AddNode() s["n2"]["op1"].setInput( s["n1"]["sum"] ) s["n3"]["op1"].setInput( s["n2"]["sum"] ) b = Gaffer.Box.create( s, Gaffer.StandardSet( [ s["n2"] ] ) ) b.promotePlug( b["n2"]["op2"] ) b.exportForReference( "/tmp/test.grf" ) s2 = Gaffer.ScriptNode() s2["r"] = Gaffer.Reference() s2["r"].load( "/tmp/test.grf" ) s2["a"] = GafferTest.AddNode() s2["r"]["user"]["n2_op2"].setValue( 123 ) s2["r"]["in"].setInput( s2["a"]["sum"] ) self.assertTrue( "n2_op2" in s2["r"]["user"] ) self.assertTrue( "n2" in s2["r"] ) self.assertTrue( "out" in s2["r"] ) self.assertEqual( s2["r"]["user"]["n2_op2"].getValue(), 123 ) self.assertTrue( s2["r"]["in"].getInput().isSame( s2["a"]["sum"] ) ) s2["fileName"].setValue( "/tmp/test.gfr" ) s2.save() s3 = Gaffer.ScriptNode() s3["fileName"].setValue( "/tmp/test.gfr" ) s3.load() self.assertEqual( s3["r"].keys(), s2["r"].keys() ) self.assertEqual( s3["r"]["user"].keys(), s2["r"]["user"].keys() ) self.assertEqual( s3["r"]["user"]["n2_op2"].getValue(), 123 ) self.assertTrue( s3["r"]["in"].getInput().isSame( s3["a"]["sum"] ) ) def testReferenceExportCustomPlugsFromBoxes( self ) : s = Gaffer.ScriptNode() s["n1"] = GafferTest.AddNode() b = Gaffer.Box.create( s, Gaffer.StandardSet( [ s["n1"] ] ) ) b["myCustomPlug"] = Gaffer.IntPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ) b["__invisiblePlugThatShouldntGetExported"] = Gaffer.IntPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ) b.exportForReference( "/tmp/test.grf" ) s2 = Gaffer.ScriptNode() s2["r"] = Gaffer.Reference() s2["r"].load( "/tmp/test.grf" ) self.assertTrue( "myCustomPlug" in s2["r"] ) self.assertTrue( "__invisiblePlugThatShouldntGetExported" not in s2["r"] ) def testPlugMetadata( self ) : s = Gaffer.ScriptNode() s["n1"] = GafferTest.AddNode() b = Gaffer.Box.create( s, Gaffer.StandardSet( [ s["n1"] ] ) ) p = b.promotePlug( b["n1"]["op1"] ) Gaffer.Metadata.registerPlugValue( p, "description", "ppp" ) b.exportForReference( "/tmp/test.grf" ) s2 = Gaffer.ScriptNode() s2["r"] = Gaffer.Reference() s2["r"].load( "/tmp/test.grf" ) self.assertEqual( Gaffer.Metadata.plugValue( s2["r"].descendant( p.relativeName( b ) ), "description" ), "ppp" ) def testMetadataIsntResaved( self ) : s = Gaffer.ScriptNode() s["n1"] = GafferTest.AddNode() b = Gaffer.Box.create( s, Gaffer.StandardSet( [ s["n1"] ] ) ) p = b.promotePlug( b["n1"]["op1"] ) Gaffer.Metadata.registerPlugValue( p, "description", "ppp" ) b.exportForReference( "/tmp/test.grf" ) s2 = Gaffer.ScriptNode() s2["r"] = Gaffer.Reference() s2["r"].load( "/tmp/test.grf" ) self.assertTrue( "Metadata" not in s2.serialise() ) def testSinglePlugWithMetadata( self ) : s = Gaffer.ScriptNode() s["b"] = Gaffer.Box() s["b"]["user"]["p"] = Gaffer.Plug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ) Gaffer.Metadata.registerPlugValue( s["b"]["user"]["p"], "description", "ddd" ) s["b"].exportForReference( "/tmp/test.grf" ) s["r"] = Gaffer.Reference() s["r"].load( "/tmp/test.grf" ) self.assertEqual( Gaffer.Metadata.plugValue( s["r"]["user"]["p"], "description" ), "ddd" ) def testReloadWithUnconnectedPlugs( self ) : s = Gaffer.ScriptNode() s["b"] = Gaffer.Box() s["b"]["user"]["p"] = Gaffer.Plug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ) s["b"].exportForReference( "/tmp/test.grf" ) s["r"] = Gaffer.Reference() s["r"].load( "/tmp/test.grf" ) self.assertEqual( s["r"]["user"].keys(), [ "p" ] ) s2 = Gaffer.ScriptNode() s2.execute( s.serialise() ) self.assertEqual( s2["r"]["user"].keys(), [ "p" ] ) def testReloadRefreshesMetadata( self ) : s = Gaffer.ScriptNode() s["b"] = Gaffer.Box() s["b"]["user"]["p"] = Gaffer.Plug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ) s["b"].exportForReference( "/tmp/test.grf" ) s["r"] = Gaffer.Reference() s["r"].load( "/tmp/test.grf" ) self.assertEqual( Gaffer.Metadata.plugValue( s["r"]["user"]["p"], "test" ), None ) Gaffer.Metadata.registerPlugValue( s["b"]["user"]["p"], "test", 10 ) s["b"].exportForReference( "/tmp/test.grf" ) s["r"].load( "/tmp/test.grf" ) self.assertEqual( Gaffer.Metadata.plugValue( s["r"]["user"]["p"], "test" ), 10 ) def testDefaultValueClashes( self ) : # export a reference where a promoted plug is not at # its default value. s = Gaffer.ScriptNode() s["b"] = Gaffer.Box() s["b"]["n"] = GafferTest.AddNode() p = s["b"].promotePlug( s["b"]["n"]["op1"] ) p.setValue( p.defaultValue() + 10 ) s["b"].exportForReference( "/tmp/test.grf" ) # reference it in to a new script, set the value back to # its default, and save the script. s2 = Gaffer.ScriptNode() s2["r"] = Gaffer.Reference() s2["r"].load( "/tmp/test.grf" ) p2 = s2["r"].descendant( p.relativeName( s["b"] ) ) self.assertEqual( p2.getValue(), p2.defaultValue() + 10 ) p2.setToDefault() self.assertEqual( p2.getValue(), p2.defaultValue() ) s2["fileName"].setValue( "/tmp/test.gfr" ) s2.save() # load the script, and check that the value is at the default. s3 = Gaffer.ScriptNode() s3["fileName"].setValue( "/tmp/test.gfr" ) s3.load() p3 = s3["r"].descendant( p.relativeName( s["b"] ) ) self.assertEqual( p3.getValue(), p3.defaultValue() ) def testLoadThrowsExceptionsOnError( self ) : s = Gaffer.ScriptNode() s["b"] = Gaffer.Box() s["b"]["n"] = GafferTest.SphereNode() s["b"].exportForReference( "/tmp/test.grf" ) del GafferTest.SphereNode # induce a failure during loading s2 = Gaffer.ScriptNode() s2["r"] = Gaffer.Reference() self.assertRaises( Exception, s2["r"].load, "/tmp/test.grf" ) def testErrorTolerantLoading( self ) : # make a box containing 2 nodes, and export it. s = Gaffer.ScriptNode() s["b"] = Gaffer.Box() s["b"]["s"] = GafferTest.SphereNode() s["b"]["a"] = GafferTest.AddNode() s["b"].exportForReference( "/tmp/test.grf" ) # import it into a script. s2 = Gaffer.ScriptNode() s2["r"] = Gaffer.Reference() s2["r"].load( "/tmp/test.grf" ) self.assertTrue( "a" in s2["r"] ) self.assertTrue( isinstance( s2["r"]["a"], GafferTest.AddNode ) ) # save that script, and then mysteriously # disable GafferTest.SphereNode. s2["fileName"].setValue( "/tmp/test.gfr" ) s2.save() del GafferTest.SphereNode # load the script, and check that we could at least # load in the other referenced node. s3 = Gaffer.ScriptNode() s3["fileName"].setValue( "/tmp/test.gfr" ) with IECore.CapturingMessageHandler() as mh : s3.load( continueOnError=True ) self.assertTrue( len( mh.messages ) ) self.assertTrue( "a" in s3["r"] ) self.assertTrue( isinstance( s3["r"]["a"], GafferTest.AddNode ) ) def testDependencyNode( self ) : s = Gaffer.ScriptNode() # Make a reference, and check it's a DependencyNode s["r"] = Gaffer.Reference() self.assertTrue( isinstance( s["r"], Gaffer.DependencyNode ) ) self.assertTrue( s["r"].isInstanceOf( Gaffer.DependencyNode.staticTypeId() ) ) self.assertTrue( isinstance( s["r"], Gaffer.SubGraph ) ) self.assertTrue( s["r"].isInstanceOf( Gaffer.SubGraph.staticTypeId() ) ) # create a box with a promoted output: s["b"] = Gaffer.Box() s["b"]["n"] = GafferTest.AddNode() s["b"].promotePlug( s["b"]["n"]["sum"], asUserPlug = False ) s["b"].exportForReference( "/tmp/test.grf" ) # load onto reference: s["r"].load( "/tmp/test.grf" ) self.assertEqual( s["r"].correspondingInput( s["r"]["n_sum"] ), None ) self.assertEqual( s["r"].enabledPlug(), None ) # Wire it up to support enabledPlug() and correspondingInput() s["b"].promotePlug( s["b"]["n"]["op1"], asUserPlug = False ) s["b"]["n"]["op2"].setValue( 10 ) s["b"].exportForReference( "/tmp/test.grf" ) # reload reference and test: s["r"].load( "/tmp/test.grf" ) self.assertEqual( s["r"].correspondingInput( s["r"]["n_sum"] ), None ) self.assertEqual( s["r"].enabledPlug(), None ) # add an enabled plug: s["b"]["enabled"] = Gaffer.BoolPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ) s["b"].exportForReference( "/tmp/test.grf" ) # reload reference and test that's now visible via enabledPlug(): s["r"].load( "/tmp/test.grf" ) self.assertEqual( s["r"].correspondingInput( s["r"]["n_sum"] ), None ) self.assertTrue( s["r"].enabledPlug().isSame( s["r"]["enabled"] ) ) # hook up the enabled plug inside the box: s["b"]["n"]["enabled"].setInput( s["b"]["enabled"] ) s["b"].exportForReference( "/tmp/test.grf" ) # reload reference and test that's now visible via enabledPlug(): s["r"].load( "/tmp/test.grf" ) self.assertTrue( s["r"].enabledPlug().isSame( s["r"]["enabled"] ) ) self.assertTrue( s["r"].correspondingInput( s["r"]["n_sum"] ).isSame( s["r"]["n_op1"] ) ) # Connect it into a network, delete it, and check that we get nice auto-reconnect behaviour s["a"] = GafferTest.AddNode() s["r"]["n_op1"].setInput( s["a"]["sum"] ) s["c"] = GafferTest.AddNode() s["c"]["op1"].setInput( s["r"]["n_sum"] ) s.deleteNodes( filter = Gaffer.StandardSet( [ s["r"] ] ) ) self.assertTrue( s["c"]["op1"].getInput().isSame( s["a"]["sum"] ) ) def testPlugFlagsOnReload( self ): s = Gaffer.ScriptNode() s["b"] = Gaffer.Box() s["b"]["s"] = GafferTest.SphereNode() s["b"]["a"] = GafferTest.AddNode() s["b"].exportForReference( "/tmp/test.grf" ) # import it into a script. s2 = Gaffer.ScriptNode() s2["r"] = Gaffer.Reference() s2["r"].load( "/tmp/test.grf" ) s2["r"]["__pluggy"] = Gaffer.CompoundPlug( flags = Gaffer.Plug.Flags.Dynamic | Gaffer.Plug.Flags.Default ) s2["r"]["__pluggy"]["int"] = Gaffer.IntPlug( flags = Gaffer.Plug.Flags.Dynamic | Gaffer.Plug.Flags.Default ) s2["r"]["__pluggy"]["compound"] = Gaffer.CompoundPlug( flags = Gaffer.Plug.Flags.Dynamic | Gaffer.Plug.Flags.Default ) s2["r"]["__pluggy"]["compound"]["int"] = Gaffer.IntPlug( flags = Gaffer.Plug.Flags.Dynamic | Gaffer.Plug.Flags.Default ) self.assertEqual( s2["r"]["__pluggy"].getFlags(), Gaffer.Plug.Flags.Dynamic | Gaffer.Plug.Flags.Default ) self.assertEqual( s2["r"]["__pluggy"]["int"].getFlags(), Gaffer.Plug.Flags.Dynamic | Gaffer.Plug.Flags.Default ) self.assertEqual( s2["r"]["__pluggy"]["compound"].getFlags(), Gaffer.Plug.Flags.Dynamic | Gaffer.Plug.Flags.Default ) self.assertEqual( s2["r"]["__pluggy"]["compound"]["int"].getFlags(), Gaffer.Plug.Flags.Dynamic | Gaffer.Plug.Flags.Default ) s2["r"].load( "/tmp/test.grf" ) self.assertEqual( s2["r"]["__pluggy"].getFlags(), Gaffer.Plug.Flags.Dynamic | Gaffer.Plug.Flags.Default ) self.assertEqual( s2["r"]["__pluggy"]["int"].getFlags(), Gaffer.Plug.Flags.Dynamic | Gaffer.Plug.Flags.Default ) self.assertEqual( s2["r"]["__pluggy"]["compound"].getFlags(), Gaffer.Plug.Flags.Dynamic | Gaffer.Plug.Flags.Default ) self.assertEqual( s2["r"]["__pluggy"]["compound"]["int"].getFlags(), Gaffer.Plug.Flags.Dynamic | Gaffer.Plug.Flags.Default ) def tearDown( self ) : GafferTest.SphereNode = self.__SphereNode for f in ( "/tmp/test.grf", "/tmp/test.gfr", ) : if os.path.exists( f ) : os.remove( f ) if __name__ == "__main__": unittest.main()
start = 0 try: import time import random import re import pycurl import hmac import urllib import urllib2 import simplejson import sys import calendar import options from hashlib import sha256 try: from io import BytesIO except ImportError: from StringIO import StringIO as BytesIO # If you change these delays, you will exceed the Instagram API rate-limit # Or, the bot will be running slower than necessary LIKE_DELAY = 36 REL_DELAY = 60 API_DELAY = 2 # DO NOT CHANGE ANYTHING BELOW THIS POINT NO_FOLLOW = 0 FOLLOWS = 1 likedDict = {} headers = {} dataDict = "" count = 0 response = "500" totalFollows = 0 totalUnfollows = 0 totalAPICalls = 0 totalLikes = 0 totalErrors = 0 globErrorMessage = "" errorLevel = 0 class tCol: HEADER = '\033[95m' OKBLUE = '\033[94m' OKGREEN = '\033[92m' WARNING = '\033[93m' FAIL = '\033[91m' ENDC = '\033[0m' BOLD = '\033[1m' UNDERLINE = '\033[4m' def currentTime(): theTime = calendar.timegm(time.gmtime()) return theTime lastLike = currentTime() - LIKE_DELAY lastRel = currentTime() - REL_DELAY lastAPI = currentTime() - API_DELAY userArray = [] likeArray = [] APIArray = [] followArray = [] followedArray = [] relArray = [] picArray = [] def printMsg(message, prefix="FLIB", level="OKGREEN"): print ("[" + getattr(tCol, level) + prefix + tCol.ENDC + "] " + getattr(tCol, level) + message + tCol.ENDC) return start = 1 def execPause(length): if length > 3: if length > 60: multiplier = int(float(length)/60) i = 0 while i < multiplier: printMsg('Paused for ' + tCol.FAIL + str(length) + tCol.WARNING + ' seconds...', "TIME", "WARNING") i = i + 1 length = length - 60 printMsg('Paused for ' + tCol.FAIL + str(length) + tCol.WARNING + ' seconds...', "TIME", "WARNING") time.sleep(length) if options.ACCESS_TOKEN == "changeme" or options.CLIENT_ID == "changeme": print printMsg("You must change all variables which equal 'changeme'", "FAIL", "FAIL") sys.exit(1) elif options.CLIENT_SECRET == "changeme" or options.IP == "changeme": print printMsg("You must change all variables which equal 'changeme'", "FAIL", "FAIL") sys.exit(1) def headerFunction(header_line): if ':' not in header_line: return name, value = header_line.split(':', 1) name = name.strip() value = value.strip() name = name.lower() headers[name] = value def reqURL(url, post="", proto="GET", reqType="API"): global count, dataDict, response, globErrorMessage global API_DELAY, LIKE_DELAY, REL_DELAY global totalAPICalls, totalErrors, errorLevel, lastAPI bytesIO = BytesIO() pc = pycurl.Curl() signature = hmac.new( options.CLIENT_SECRET, options.IP, sha256).hexdigest() header = '|'.join([options.IP, signature]) header = ["X-Insta-Forwarded-For: " + header] post_data = {'access_token': options.ACCESS_TOKEN, 'client_id': options.CLIENT_ID} post_data.update(post) postfields = urllib.urlencode(post_data) if proto == "POST": pc.setopt(pc.CUSTOMREQUEST, 'POST') pc.setopt(pc.POSTFIELDS, postfields) else: getURL = url url = url + "?" + postfields pc.setopt(pc.CUSTOMREQUEST, 'GET') pc.setopt(pc.URL, str(url)) pc.setopt(pc.WRITEFUNCTION, bytesIO.write) pc.setopt(pc.HEADERFUNCTION, headerFunction) pc.setopt(pycurl.HTTPHEADER, header) count = count + 1 timeDifference = currentTime() - lastAPI if timeDifference < API_DELAY: execPause(API_DELAY - timeDifference) if len(APIArray) > 0: while APIArray[0] <= currentTime() - 3600: del APIArray[0] if len(relArray) >= 4999: waitTime = currentTime() - APIArray[0] - 3600 execPause(waitTime) try: totalAPICalls = totalAPICalls + 1 pc.perform() response = str(pc.getinfo(pc.HTTP_CODE)) pc.close() encoding = None if 'content-type' in headers: content_type = headers['content-type'].lower() match = re.search('charset=(\S+)', content_type) if match: encoding = match.group(1) if encoding is None: encoding = 'iso-8859-1' body = bytesIO.getvalue() dataDict = simplejson.loads(body) printMsg(tCol.BOLD + 'Request #' + str(count), "NUM#", "HEADER") try: printMsg('Remaining API calls: ' + tCol.FAIL + headers['x-ratelimit-remaining'] + '/' + headers['x-ratelimit-limit'] + tCol.ENDC, "RATE", "OKBLUE") except Exception: execPause(1) except Exception as e: dataDict = "" response = "500" error_message = e errorLevel = errorLevel + 1 if errorLevel > 8: printMsg("Error level exceeded, check options.", "ERRO", "FAIL") sys.exit(1) if proto == "POST": printMsg(url, "RURL", "OKBLUE") else: printMsg(getURL, "RURL", "OKBLUE") printMsg(postfields, "FLDS", "OKBLUE") printMsg(proto, "HTTP", "OKBLUE") try: if response == "200": lastAPI = currentTime() errorLevel = 0 printMsg(response, "CODE") APIArray.append(currentTime()) elif response == "500": totalErrors = totalErrors + 1 globErrorMessage = str(error_message) if globErrorMessage == "(23, 'Failed writing header')": print "" printMsg(tCol.BOLD + "Keyboard Interrupt!", "INPT", "FAIL") sys.exit(1) printMsg(str(error_message), "ERRO", "FAIL") elif response != "200": totalErrors = totalErrors + 1 error_message = dataDict["meta"]["error_message"] error_type = dataDict["meta"]["error_type"] printMsg(response, "CODE", "FAIL") printMsg(error_type, "TYPE", "FAIL") printMsg(error_message, "FAIL", "FAIL") if response == "400" and \ error_type == "OAuthAccessTokenException": sys.exit(1) if response == "429": rates = [int(s) for s in error_message.split() if s.isdigit()] printMsg("Rate exceeded: " + tCol.FAIL + str(rates[0]) + "/" + str(rates[1]) + tCol.WARNING + " in the last hour.", "RATE", "WARNING") if reqType == "Like": LIKE_DELAY = LIKE_DELAY + 1 rateArray = likeArray rateLen = 99 elif reqType == "Relation": REL_DELAY = REL_DELAY + 1 rateArray = relArray rateLen = 99 else: API_DELAY = API_DELAY + 1 rateArray = APIArray rateLen = 4999 rateDiff = rateLen - len(rateArray) if rateDiff > 0: while len(rateArray) < rateLen: rateArray.append(currentTime()) rateArray[0] = currentTime() - 3900 waitTime = 0 waitTime = currentTime() - rateArray[0] - 3600 execPause(waitTime) reqURL(url, post, proto, reqType) except Exception: return return dataDict def getAccessToken(): if options.ACCESS_TOKEN == "": tokenURL = "https://api.instagram.com/oauth/access_token" post = {'client_secret': options.CLIENT_SECRET, 'redirect_uri': options.REDIRECT_URI, 'code': options.CODE, 'grant_type': 'authorization_code'} data = reqURL(tokenURL, post, "POST") options.ACCESS_TOKEN = str(data["access_token"]) printMsg("Here is your ACCESS_TOKEN: " + tCol.OKGREEN + options.ACCESS_TOKEN, "ACST", "WARNING") printMsg("Replace the value in options.py", "REPL", "WARNING") execPause(60) return def getUsers(next_cursor=None, num_users=0, stage=0): global userArray if stage == 0: userURL = options.INSTAGRAM_API + "users/self/follows" arrayName = followArray elif stage == 1: userURL = options.INSTAGRAM_API + "users/self/followed-by" arrayName = followedArray else: userArray = list(set(followArray) - set(followedArray)) printMsg(tCol.FAIL + tCol.BOLD + str(num_users) + tCol.ENDC + tCol.WARNING + " users added to interaction blacklist", "USER", "WARNING") return if next_cursor is not None: post = {'cursor': next_cursor} else: post = {} data = reqURL(userURL, post) if response != "200": if globErrorMessage == "(23, 'Failed writing header')": print globErrorMessage sys.exit(1) printMsg("Retrying request...", "RTRY", "WARNING") getUsers(next_cursor, num_users, stage) return dataPage = data["pagination"] next_cursor = None if dataPage: next_cursor = data["pagination"]["next_cursor"] for user in data["data"]: for k, v in user.iteritems(): if k == "id": userID = v arrayName.append(userID) num_users = num_users + 1 if next_cursor is None: stage = stage + 1 getUsers(next_cursor, num_users, stage) def getFollowing(next_cursor=None, num_users=0): global userArray userURL = options.INSTAGRAM_API + "users/self/follows" if next_cursor is not None: post = {'cursor': next_cursor} else: post = {} data = reqURL(userURL, post) if response != "200": if globErrorMessage == "(23, 'Failed writing header')": print globErrorMessage sys.exit(1) printMsg("Retrying request...", "RTRY", "WARNING") getFollowing(next_cursor, num_users) return dataPage = data["pagination"] next_cursor = None if dataPage: next_cursor = data["pagination"]["next_cursor"] for user in data["data"]: for k, v in user.iteritems(): if k == "id": userID = v followArray.append(userID) num_users = num_users + 1 if next_cursor is None: userArray = list(set(followArray)) printMsg(tCol.FAIL + tCol.BOLD + str(num_users) + tCol.ENDC + tCol.WARNING + " users added to interaction blacklist", "USER", "WARNING") return getFollowing(next_cursor, num_users) def getPics(next_max_like_id=None, num_likes=0): likeURL = options.INSTAGRAM_API + "users/self/media/liked" if next_max_like_id is not None: post = {'max_like_id': next_max_like_id} else: post = {} data = reqURL(likeURL, post) if response != "200": if globErrorMessage == "(23, 'Failed writing header')": sys.exit(1) printMsg("Retrying request...", "RTRY", "WARNING") getPics(next_max_like_id, num_likes) return dataPage = data["pagination"] next_max_like_id = None if dataPage: next_max_like_id = data["pagination"]["next_max_like_id"] for image in data["data"]: for k, v in image.iteritems(): if k == "id": imageID = v picArray.append(imageID) num_likes = num_likes + 1 if next_max_like_id is not None: getPics(next_max_like_id, num_likes) else: printMsg(tCol.FAIL + tCol.BOLD + str(num_likes) + tCol.ENDC + tCol.WARNING + " pictures added to interaction blacklist", "LIKE", "WARNING") def followCheck(): userURL = options.INSTAGRAM_API + "users/self" data = reqURL(userURL) if response != "200": return try: followsCount = int(data['data']['counts']['follows']) except Exception: printMsg("Failed to get follow count. Skipping...", "FLLW", "FAIL") return if followsCount >= 7499 and options.ACTION != "UNFOLLOW_ALL": execPause(86400) printMsg("Following cap exceeded. Unfollowing all users", "UFLW") options.ACTION = "UNFOLLOW_ALL" begin() sys.exit(1) elif followsCount <= 1 and options.ACTION == "UNFOLLOW_ALL": printMsg("All users unfollowed. Following users.", "UFLW") options.ACTION = "LIKE_FOLLOW" begin() sys.exit(1) printMsg("Following count: " + str(followsCount), "FLLW") return # Like `pictureID` def likePicture(pictureID): if pictureID in picArray: printMsg("You already like picture " + tCol.WARNING + pictureID, "LIKE", "FAIL") return global totalLikes global lastLike likeURL = options.INSTAGRAM_API + "media/%s/likes" % (pictureID) printMsg("Liking picture " + pictureID, "LIKE") timeDifference = currentTime() - lastLike if timeDifference < LIKE_DELAY: execPause(LIKE_DELAY - timeDifference) if len(likeArray) > 0: while likeArray[0] <= currentTime() - 3600: del likeArray[0] if len(likeArray) >= 99: waitTime = currentTime() - likeArray[0] - 3600 if waitTime > 0: execPause(waitTime) reqURL(likeURL, "", "POST", "Like") if response != "200": return lastLike = currentTime() likeArray.append(currentTime()) totalLikes = totalLikes + 1 # Follow or unfollow `userID` def modUser(userID, action): global lastRel userURL = options.INSTAGRAM_API + "users/%s" % (userID) modURL = userURL + "/relationship" data = reqURL(userURL) if response != "200": return try: followsCount = int(data['data']['counts']['follows']) followedByCount = int(data['data']['counts']['followed_by']) except Exception: printMsg( "Failed to get follow counts. Skipping...", "FLLW", "FAIL") return post = {'action': action} if action == "follow": if userID in userArray: printMsg("You are already following user " + tCol.WARNING + userID, "FLLW", "FAIL") return if followsCount < (followedByCount / 2): printMsg("User " + tCol.WARNING + userID + tCol.FAIL + " is following < half of their follower count.", "FLLW", "FAIL") return verbAct = "Following" swap = 0 elif action == "unfollow": if userID not in userArray: printMsg("You are not following user " + tCol.WARNING + userID, "FLLW", "FAIL") return verbAct = "Unfollowing" swap = 1 elif action == "block": verbAct = "Blocking" swap = 1 timeDifference = currentTime() - lastRel if timeDifference < REL_DELAY: execPause(REL_DELAY - timeDifference) if len(relArray) > 0: while relArray[0] <= currentTime() - 3600: del relArray[0] if len(relArray) >= 99: waitTime = currentTime() - relArray[0] - 3600 if waitTime > 0: execPause(waitTime) followCheck() printMsg(verbAct + " user " + userID, "RLAT") reqURL(modURL, post, "POST", "Relation") if response != "200": return if action == "follow": if userID not in userArray: userArray.append(userID) else: if userID in userArray: userArray.remove(userID) lastRel = currentTime() relArray.append(currentTime()) if action != "block": getRelationship(userID, "outgoing", swap) # Return relationship to `userID` def getRelationship(userID, direction="incoming", swap=0): global totalFollows, totalUnfollows followURL = options.INSTAGRAM_API + "users/%s/relationship" % (userID) data = reqURL(followURL) if response != "200": return status = data["data"] incoming = status["incoming_status"] outgoing = status["outgoing_status"] if swap == 1: followLevel = "FAIL" noFollowLevel = "OKGREEN" else: followLevel = "OKGREEN" noFollowLevel = "FAIL" if direction == "outgoing": if outgoing == "follows": if swap == 0: totalFollows = totalFollows + 1 printMsg("You are following user " + userID, "GREL", followLevel) return FOLLOWS else: if swap == 1: totalUnfollows = totalUnfollows + 1 printMsg("You are not following user " + userID, "GREL", noFollowLevel) return NO_FOLLOW else: if incoming != "followed_by": printMsg("User " + userID + " does not follow you", "GREL", noFollowLevel) return NO_FOLLOW else: printMsg("User " + userID + " follows you", followLevel) return FOLLOWS # Unfollow users who are not following back def unfollowUsers(allUsers=False): num_unfollows = 0 for userID in userArray: if allUsers is True: followCheck() modUser(userID, "unfollow") num_unfollows = num_unfollows + 1 elif allUsers is False: relationship = getRelationship(userID) if relationship == NO_FOLLOW: modUser(userID, "unfollow") num_unfollows = num_unfollows + 1 secs = random.randint(1, options.MAX_SECS) time.sleep(secs) print num_unfollows if num_unfollows % 10 == 0: print "Unfollowed %s users " % num_unfollows printMsg("Number of users unfollowed is " + str(num_unfollows), "UNFL") options.ACTION = "LIKE_FOLLOW" begin() return num_unfollows def likeUsers(max_results, max_id, tag, likeCount, followCount): urlFindLike = options.INSTAGRAM_API + "tags/%s/media/recent" % (tag) post = {'max_id': max_id} data = reqURL(urlFindLike, post) if response != "200": return likeCount = 0 followCount = 0 for likeObj in data['data']: user = likeObj['user'] userID = user['id'] if userID not in userArray: try: likeFollowCount = likeAndFollowUser(userID) likeCount = likeCount + likeFollowCount except Exception: return if (options.ACTION == "LIKE_FOLLOW"): followCount = followCount + 1 secs = random.randint(1, options.MAX_SECS) time.sleep(secs) if (likeCount % 10 == 0 and likeCount != 0): printMsg('Liked ' + str(likeCount) + ' pictures from #' + tag, 'LIKE') if (options.ACTION == "LIKE_FOLLOW"): if (followCount % 10 == 0 and followCount != 0): printMsg('Followed ' + str(followCount) + ' users from #' + tag, 'FLLW') if (followCount == max_results): break elif (options.ACTION == "LIKE"): if (likeCount == max_results): break # if(likeCount != max_results): # likeUsers(max_results, max_id, tag, likeCount, followCount) printMsg('Liked ' + str(likeCount) + ' pictures and followed ' + str(followCount) + ' users from tag #' + tag, 'TAGS') return # Like and follow users def likeAndFollowUser(userID, follow=True): numLikesFollows = 0 userURL = options.INSTAGRAM_API + "users/%s" % (userID) urlUserMedia = userURL + "/media/recent" data = reqURL(userURL) if response != "200": return followsCount = data['data']['counts']['follows'] followedByCount = data['data']['counts']['followed_by'] if followsCount < (followedByCount / 2): printMsg("User " + tCol.WARNING + userID + tCol.FAIL + " is following less than half of their follower count.", "FLLW", "FAIL") return data = reqURL(urlUserMedia) if response != "200": return picsToLike = random.randint(1, 4) printMsg("Liking " + str(picsToLike) + " pictures for user " + str(userID)) countPicViews = 0 for picture in data['data']: if picture['id'] not in likeArray: likePicture(picture['id']) countPicViews = countPicViews + 1 numLikesFollows = numLikesFollows + 1 if(countPicViews == picsToLike): break if follow: modUser(userID, "follow") return numLikesFollows def popFunction(): urlPopular = options.INSTAGRAM_API + "media/popular" data = reqURL(urlPopular) if response != "200": return followCount = 0 likeCount = 0 for obj in data['data']: for comment in obj['likes']['data']: myid = comment['id'] result = likeAndFollowUser(myid) if(result > 0): followCount = followCount + 1 likeCount = likeCount + 1 if(followCount % 10 == 0): printMsg("Followed " + str(followCount) + " users", "followCount") seconds = random.randint(1, options.MAX_SECS) time.sleep(seconds) if (followCount == options.MAX_COUNT): break if (followCount == options.MAX_COUNT): break printMsg("Followed " + str(followCount) + " users", "followCount") printMsg("Liked " + str(likeCount) + " pictures", "LIKE") def decider(): if(options.ACTION == "LIKE" or options.ACTION == "LIKE_FOLLOW"): getUsers() getPics() for tag in options.TAGS: likeUsers(options.MAX_COUNT, 0, tag, 0, 0) elif(options.ACTION == "POPULAR"): getUsers() getPics() popFunction() elif(options.ACTION == "UNFOLLOW"): getUsers() unfollowUsers(False) elif(options.ACTION == "UNFOLLOW_ALL"): getFollowing() unfollowUsers(True) else: printMsg("Invalid ACTION specified", "ACTO", "FAIL") def begin(): getAccessToken() decider() printMsg("Repeating script", "REPT", "WARNING") begin() print "" printMsg("----------------------", "FLIB", "HEADER") printMsg(" Welcome to " + tCol.WARNING + "Flibber ", "FLIB", "HEADER") printMsg(" Chip (itschip.com) ", "FLIB", "HEADER") printMsg(tCol.OKGREEN + " @ChipIsTheName ", "FLIB", "HEADER") printMsg("----------------------", "FLIB", "HEADER") print "" time.sleep(5) begin() except KeyboardInterrupt: print "" if start == 1: printMsg(tCol.BOLD + "Keyboard Interrupt!", "INPT", "FAIL") else: print "Keyboard Interrupt" finally: if start == 1: print "" printMsg(tCol.UNDERLINE + "Statistics from run:", "STAT", "WARNING") printMsg("Unfollows: " + tCol.BOLD + str(totalUnfollows), "STAT", "FAIL") printMsg("Follows: " + tCol.BOLD + str(totalFollows), "STAT", "OKGREEN") printMsg("Likes: " + tCol.BOLD + str(totalLikes), "STAT", "OKBLUE") printMsg("API Calls: " + tCol.BOLD + str(totalAPICalls), "STAT", "HEADER") print ""
from datetime import datetime from typing import Dict, Any, Optional, Union, List from blinker import signal from slack_sdk.models.attachments import Attachment from slack_sdk.models.blocks import Block from machine.clients.slack import SlackClient from machine.models import Channel from machine.models import User from machine.storage import PluginStorage from machine.utils.collections import CaseInsensitiveDict class MachineBasePlugin: """Base class for all Slack Machine plugins The purpose of this class is two-fold: 1. It acts as a marker-class so Slack Machine can recognize plugins as such 2. It provides a lot of common functionality and convenience methods for plugins to interact with channels and users :var settings: Slack Machine settings object that contains all settings that were defined through ``local_settings.py`` Plugin developers can use any settings that are defined by the user, and ask users to add new settings specifically for their plugin. """ def __init__(self, client: SlackClient, settings: CaseInsensitiveDict, storage: PluginStorage): self._client = client self.storage = storage self.settings = settings self._fq_name = "{}.{}".format(self.__module__, self.__class__.__name__) def init(self): """Initialize plugin This method can be implemented by concrete plugin classes. It will be called **once** for each plugin, when that plugin is first loaded. You can refer to settings via ``self.settings``, and access storage through ``self.storage``, but the Slack client has not been initialized yet, so you cannot send or process messages during initialization. :return: None """ pass @property def users(self) -> Dict[str, User]: """Dictionary of all users in the Slack workspace :return: a dictionary of all users in the Slack workspace, where the key is the user id and the value is a :py:class:`~machine.models.user.User` object """ return self._client.users @property def channels(self) -> Dict[str, Channel]: """List of all channels in the Slack workspace This is a list of all channels in the Slack workspace that the bot is aware of. This includes all public channels, all private channels the bot is a member of and all DM channels the bot is a member of. :return: a list of all channels in the Slack workspace, where each channel is a :py:class:`~machine.models.channel.Channel` object """ return self._client.channels def find_channel_by_name(self, channel_name: str) -> Optional[Channel]: """Find a channel by its name, irrespective of a preceding pound symbol. This does not include DMs. :param channel_name: The name of the channel to retrieve. :return: The channel if found, None otherwise. """ if channel_name.startswith('#'): channel_name = channel_name[1:] for c in self.channels.values(): if c.name_normalized and channel_name.lower() == c.name_normalized.lower(): return c @property def bot_info(self) -> Dict[str, str]: """Information about the bot user in Slack This will return a dictionary with information about the bot user in Slack that represents Slack Machine :return: Bot user """ return self._client.bot_info def at(self, user: User) -> str: """Create a mention of the provided user Create a mention of the provided user in the form of ``<@[user_id]>``. This method is convenient when you want to include mentions in your message. This method does not send a message, but should be used together with methods like :py:meth:`~machine.plugins.base.MachineBasePlugin.say` :param user: user your want to mention :return: user mention """ return user.fmt_mention() def say(self, channel: Union[Channel, str], text: str, attachments: Union[List[Attachment], List[Dict[str, Any]], None] = None, blocks: Union[List[Block], List[Dict[str, Any]], None] = None, thread_ts: Optional[str] = None, ephemeral_user: Union[User, str, None] = None, **kwargs): """Send a message to a channel Send a message to a channel using the WebAPI. Allows for rich formatting using `blocks`_ and/or `attachments`_. You can provide blocks and attachments as Python dicts or you can use the `convenient classes`_ that the underlying slack client provides. Can also reply in-thread and send ephemeral messages, visible to only one user. Ephemeral messages and threaded messages are mutually exclusive, and ``ephemeral_user`` takes precedence over ``thread_ts`` Any extra kwargs you provide, will be passed on directly to the `chat.postMessage`_ or `chat.postEphemeral`_ request. .. _attachments: https://api.slack.com/docs/message-attachments .. _blocks: https://api.slack.com/reference/block-kit/blocks .. _convenient classes: https://github.com/slackapi/python-slackclient/tree/master/slack/web/classes :param channel: :py:class:`~machine.models.channel.Channel` object or id of channel to send message to. Can be public or private (group) channel, or DM channel. :param text: message text :param attachments: optional attachments (see `attachments`_) :param blocks: optional blocks (see `blocks`_) :param thread_ts: optional timestamp of thread, to send a message in that thread :param ephemeral_user: optional user name or id if the message needs to visible to a specific user only :return: Dictionary deserialized from `chat.postMessage`_ request, or `chat.postEphemeral`_ if `ephemeral_user` is True. .. _chat.postMessage: https://api.slack.com/methods/chat.postMessage .. _chat.postEphemeral: https://api.slack.com/methods/chat.postEphemeral """ return self._client.send(channel, text=text, attachments=attachments, blocks=blocks, thread_ts=thread_ts, ephemeral_user=ephemeral_user, **kwargs) def say_scheduled(self, when: datetime, channel: Union[Channel, str], text: str, attachments: Union[List[Attachment], List[Dict[str, Any]], None] = None, blocks: Union[List[Block], List[Dict[str, Any]], None] = None, thread_ts: Optional[str] = None, ephemeral_user: Union[User, str, None] = None, **kwargs): """Schedule a message to a channel This is the scheduled version of :py:meth:`~machine.plugins.base.MachineBasePlugin.say`. It behaves the same, but will send the message at the scheduled time. :param when: when you want the message to be sent, as :py:class:`datetime.datetime` instance :param channel: :py:class:`~machine.models.channel.Channel` object or id of channel to send message to. Can be public or private (group) channel, or DM channel. :param text: message text :param attachments: optional attachments (see `attachments`_) :param blocks: optional blocks (see `blocks`_) :param thread_ts: optional timestamp of thread, to send a message in that thread :param ephemeral_user: optional :py:class:`~machine.models.user.User` object or id of user if the message needs to visible to that specific user only :return: None .. _attachments: https://api.slack.com/docs/message-attachments .. _blocks: https://api.slack.com/reference/block-kit/blocks """ self._client.send_scheduled(when, channel, text=text, attachments=attachments, blocks=blocks, thread_ts=thread_ts, ephemeral_user=ephemeral_user, **kwargs) def react(self, channel: Union[Channel, str], ts: str, emoji: str): """React to a message in a channel Add a reaction to a message in a channel. What message to react to, is determined by the combination of the channel and the timestamp of the message. :param channel: :py:class:`~machine.models.channel.Channel` object or id of channel to send message to. Can be public or private (group) channel, or DM channel. :param ts: timestamp of the message to react to :param emoji: what emoji to react with (should be a string, like 'angel', 'thumbsup', etc.) :return: Dictionary deserialized from `reactions.add`_ request. .. _reactions.add: https://api.slack.com/methods/reactions.add """ return self._client.react(channel, ts, emoji) def send_dm(self, user: Union[User, str], text: str, attachments: Union[List[Attachment], List[Dict[str, Any]], None] = None, blocks: Union[List[Block], List[Dict[str, Any]], None] = None, **kwargs): """Send a Direct Message Send a Direct Message to a user by opening a DM channel and sending a message to it. Allows for rich formatting using `blocks`_ and/or `attachments`_. You can provide blocks and attachments as Python dicts or you can use the `convenient classes`_ that the underlying slack client provides. Any extra kwargs you provide, will be passed on directly to the `chat.postMessage`_ request. .. _attachments: https://api.slack.com/docs/message-attachments .. _blocks: https://api.slack.com/reference/block-kit/blocks .. _convenient classes: https://github.com/slackapi/python-slackclient/tree/master/slack/web/classes :param user: :py:class:`~machine.models.user.User` object or id of user to send DM to. :param text: message text :param attachments: optional attachments (see `attachments`_) :param blocks: optional blocks (see `blocks`_) :return: Dictionary deserialized from `chat.postMessage`_ request. .. _chat.postMessage: https://api.slack.com/methods/chat.postMessage """ return self._client.send_dm(user, text, attachments=attachments, blocks=blocks, **kwargs) def send_dm_scheduled(self, when: datetime, user: Union[User, str], text: str, attachments: Union[List[Attachment], List[Dict[str, Any]], None] = None, blocks: Union[List[Block], List[Dict[str, Any]], None] = None, **kwargs): """Schedule a Direct Message This is the scheduled version of :py:meth:`~machine.plugins.base.MachineBasePlugin.send_dm`. It behaves the same, but will send the DM at the scheduled time. :param when: when you want the message to be sent, as :py:class:`datetime.datetime` instance :param user: :py:class:`~machine.models.user.User` object or id of user to send DM to. :param text: message text :param attachments: optional attachments (see `attachments`_) :param blocks: optional blocks (see `blocks`_) :return: None .. _attachments: https://api.slack.com/docs/message-attachments .. _blocks: https://api.slack.com/reference/block-kit/blocks """ self._client.send_dm_scheduled(when, user, text=text, attachments=attachments, blocks=blocks, **kwargs) def emit(self, event: str, **kwargs): """Emit an event Emit an event that plugins can listen for. You can include arbitrary data as keyword arguments. :param event: name of the event :param kwargs: any data you want to emit with the event :return: None """ e = signal(event) e.send(self, **kwargs) class Message: """A message that was received by the bot This class represents a message that was received by the bot and passed to one or more plugins. It contains the message (text) itself, and metadata about the message, such as the sender of the message, the channel the message was sent to. The ``Message`` class also contains convenience methods for replying to the message in the right channel, replying to the sender, etc. """ def __init__(self, client: SlackClient, msg_event: Dict[str, Any], plugin_class_name: str): self._client = client self._msg_event = msg_event self._fq_plugin_name = plugin_class_name @property def sender(self) -> User: """The sender of the message :return: the User the message was sent by """ return self._client.users[self._msg_event['user']] @property def channel(self) -> Channel: """The channel the message was sent to :return: the Channel the message was sent to """ return self._client.channels[self._msg_event['channel']] @property def is_dm(self) -> bool: channel_id = self._msg_event['channel'] return not (channel_id.startswith('C') or channel_id.startswith('G')) @property def text(self) -> str: """The body of the actual message :return: the body (text) of the actual message """ return self._msg_event['text'] @property def at_sender(self) -> str: """The sender of the message formatted as mention :return: a string representation of the sender of the message, formatted as `mention`_, to be used in messages .. _mention: https://api.slack.com/docs/message-formatting#linking_to_channels_and_users """ return self.sender.fmt_mention() def say(self, text: str, attachments: Union[List[Attachment], List[Dict[str, Any]], None] = None, blocks: Union[List[Block], List[Dict[str, Any]], None] = None, thread_ts: Optional[str] = None, ephemeral: bool = False, **kwargs): """Send a new message to the channel the original message was received in Send a new message to the channel the original message was received in, using the WebAPI. Allows for rich formatting using `blocks`_ and/or `attachments`_. You can provide blocks and attachments as Python dicts or you can use the `convenient classes`_ that the underlying slack client provides. Can also reply to a thread and send an ephemeral message only visible to the sender of the original message. Ephemeral messages and threaded messages are mutually exclusive, and ``ephemeral`` takes precedence over ``thread_ts`` Any extra kwargs you provide, will be passed on directly to the `chat.postMessage`_ or `chat.postEphemeral`_ request. .. _attachments: https://api.slack.com/docs/message-attachments .. _blocks: https://api.slack.com/reference/block-kit/blocks .. _convenient classes: https://github.com/slackapi/python-slackclient/tree/master/slack/web/classes :param text: message text :param attachments: optional attachments (see `attachments`_) :param blocks: optional blocks (see `blocks`_) :param thread_ts: optional timestamp of thread, to send a message in that thread :param ephemeral: ``True/False`` wether to send the message as an ephemeral message, only visible to the sender of the original message :return: Dictionary deserialized from `chat.postMessage`_ request, or `chat.postEphemeral`_ if `ephemeral` is True. .. _chat.postMessage: https://api.slack.com/methods/chat.postMessage .. _chat.postEphemeral: https://api.slack.com/methods/chat.postEphemeral """ if ephemeral: ephemeral_user = self.sender.id else: ephemeral_user = None return self._client.send( self.channel.id, text=text, attachments=attachments, blocks=blocks, thread_ts=thread_ts, ephemeral_user=ephemeral_user, **kwargs ) def say_scheduled(self, when: datetime, text: str, attachments: Union[List[Attachment], List[Dict[str, Any]], None] = None, blocks: Union[List[Block], List[Dict[str, Any]], None] = None, thread_ts: Optional[str] = None, ephemeral: bool = False, **kwargs): """Schedule a message This is the scheduled version of :py:meth:`~machine.plugins.base.Message.say`. It behaves the same, but will send the message at the scheduled time. :param when: when you want the message to be sent, as :py:class:`datetime.datetime` instance :param text: message text :param attachments: optional attachments (see `attachments`_) :param blocks: optional blocks (see `blocks`_) :param thread_ts: optional timestamp of thread, to send a message in that thread :param ephemeral: ``True/False`` wether to send the message as an ephemeral message, only visible to the sender of the original message :return: None .. _attachments: https://api.slack.com/docs/message-attachments .. _blocks: https://api.slack.com/reference/block-kit/blocks """ if ephemeral: ephemeral_user = self.sender.id else: ephemeral_user = None self._client.send_scheduled(when, self.channel.id, text=text, attachments=attachments, blocks=blocks, thread_ts=thread_ts, ephemeral_user=ephemeral_user, **kwargs) def reply(self, text, attachments: Union[List[Attachment], List[Dict[str, Any]], None] = None, blocks: Union[List[Block], List[Dict[str, Any]], None] = None, in_thread: bool = False, ephemeral: bool = False, **kwargs): """Reply to the sender of the original message Reply to the sender of the original message with a new message, mentioning that user. Rich formatting using `blocks`_ and/or `attachments`_ is possible. You can provide blocks and attachments as Python dicts or you can use the `convenient classes`_ that the underlying slack client provides. Can also reply to a thread and send an ephemeral message only visible to the sender of the original message. In the case of in-thread response, the sender of the original message will not be mentioned. Ephemeral messages and threaded messages are mutually exclusive, and ``ephemeral`` takes precedence over ``in_thread`` Any extra kwargs you provide, will be passed on directly to the `chat.postMessage`_ or `chat.postEphemeral`_ request. .. _attachments: https://api.slack.com/docs/message-attachments .. _blocks: https://api.slack.com/reference/block-kit/blocks .. _convenient classes: https://github.com/slackapi/python-slackclient/tree/master/slack/web/classes :param text: message text :param attachments: optional attachments (see `attachments`_) :param blocks: optional blocks (see `blocks`_) :param in_thread: ``True/False`` wether to reply to the original message in-thread :param ephemeral: ``True/False`` wether to send the message as an ephemeral message, only visible to the sender of the original message :return: Dictionary deserialized from `chat.postMessage`_ request, or `chat.postEphemeral`_ if `ephemeral` is True. .. _chat.postMessage: https://api.slack.com/methods/chat.postMessage .. _chat.postEphemeral: https://api.slack.com/methods/chat.postEphemeral """ if in_thread and not ephemeral: return self.say(text, attachments=attachments, blocks=blocks, thread_ts=self.ts, **kwargs) else: text = self._create_reply(text) return self.say(text, attachments=attachments, blocks=blocks, ephemeral=ephemeral, **kwargs) def reply_scheduled(self, when: datetime, text: str, attachments: Union[List[Attachment], List[Dict[str, Any]], None] = None, blocks: Union[List[Block], List[Dict[str, Any]], None] = None, in_thread: bool = False, ephemeral: bool = False, **kwargs): """Schedule a reply and send it This is the scheduled version of :py:meth:`~machine.plugins.base.Message.reply`. It behaves the same, but will send the reply at the scheduled time. :param when: when you want the message to be sent, as :py:class:`datetime.datetime` instance :param text: message text :param attachments: optional attachments (see `attachments`_) :param blocks: optional blocks (see `blocks`_) :param in_thread: ``True/False`` wether to reply to the original message in-thread :param ephemeral: ``True/False`` wether to send the message as an ephemeral message, only visible to the sender of the original message :return: None .. _attachments: https://api.slack.com/docs/message-attachments .. _blocks: https://api.slack.com/reference/block-kit/blocks """ if in_thread and not ephemeral: return self.say_scheduled(when, text, attachments=attachments, blocks=blocks, thread_ts=self.ts, **kwargs) else: text = self._create_reply(text) return self.say_scheduled(when, text, attachments=attachments, blocks=blocks, ephemeral=ephemeral, **kwargs) def reply_dm(self, text: str, attachments: Union[List[Attachment], List[Dict[str, Any]], None] = None, blocks: Union[List[Block], List[Dict[str, Any]], None] = None, **kwargs): """Reply to the sender of the original message with a DM Reply in a Direct Message to the sender of the original message by opening a DM channel and sending a message to it. Allows for rich formatting using `blocks`_ and/or `attachments`_. You can provide blocks and attachments as Python dicts or you can use the `convenient classes`_ that the underlying slack client provides. Any extra kwargs you provide, will be passed on directly to the `chat.postMessage`_ request. .. _attachments: https://api.slack.com/docs/message-attachments .. _blocks: https://api.slack.com/reference/block-kit/blocks .. _convenient classes: https://github.com/slackapi/python-slackclient/tree/master/slack/web/classes :param text: message text :param attachments: optional attachments (see `attachments`_) :param blocks: optional blocks (see `blocks`_) :return: Dictionary deserialized from `chat.postMessage`_ request. .. _chat.postMessage: https://api.slack.com/methods/chat.postMessage """ return self._client.send_dm(self.sender.id, text, attachments=attachments, blocks=blocks, **kwargs) def reply_dm_scheduled(self, when: datetime, text: str, attachments: Union[List[Attachment], List[Dict[str, Any]], None] = None, blocks: Union[List[Block], List[Dict[str, Any]], None] = None, **kwargs): """Schedule a DM reply and send it This is the scheduled version of :py:meth:`~machine.plugins.base.Message.reply_dm`. It behaves the same, but will send the DM at the scheduled time. :param when: when you want the message to be sent, as :py:class:`datetime.datetime` instance :param text: message text :param attachments: optional attachments (see `attachments`_) :param blocks: optional blocks (see `blocks`_) :return: None .. _attachments: https://api.slack.com/docs/message-attachments .. _blocks: https://api.slack.com/reference/block-kit/blocks """ self._client.send_dm_scheduled(when, self.sender.id, text=text, attachments=attachments, blocks=blocks, **kwargs) def react(self, emoji: str): """React to the original message Add a reaction to the original message :param emoji: what emoji to react with (should be a string, like 'angel', 'thumbsup', etc.) :return: Dictionary deserialized from `reactions.add`_ request. .. _reactions.add: https://api.slack.com/methods/reactions.add """ return self._client.react(self.channel.id, self._msg_event['ts'], emoji) def _create_reply(self, text): if not self.is_dm: return f"{self.at_sender}: {text}" else: return text @property def ts(self) -> str: """The timestamp of the message :return: the timestamp of the message """ return self._msg_event['ts'] @property def in_thread(self): """Is message in a thread :return: bool """ return 'thread_ts' in self._msg_event def __str__(self): if self.channel.is_im: message = "Message '{}', sent by user @{} in DM".format( self.text, self.sender.profile.real_name ) else: message = "Message '{}', sent by user @{} in channel #{}".format( self.text, self.sender.profile.real_name, self.channel.name ) return message def __repr__(self): return "Message(text={}, sender={}, channel={})".format( repr(self.text), repr(self.sender.profile.real_name), repr(self.channel.name) )
"""Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.""" from __future__ import absolute_import import json import os import matplotlib as mpl import matplotlib.pyplot as plt import numpy as np import PIL.Image import skfmm import digits from digits.utils import subclass, override from digits.utils.constants import COLOR_PALETTE_ATTRIBUTE from .forms import ConfigForm from ..interface import VisualizationInterface CONFIG_TEMPLATE = "config_template.html" HEADER_TEMPLATE = "header_template.html" APP_BEGIN_TEMPLATE = "app_begin_template.html" APP_END_TEMPLATE = "app_end_template.html" VIEW_TEMPLATE = "view_template.html" @subclass class Visualization(VisualizationInterface): """A visualization extension to display the network output as an image.""" def __init__(self, dataset, **kwargs): """Constructor for Visualization class. :param dataset: :type dataset: :param kwargs: :type kwargs: """ # memorize view template for later use extension_dir = os.path.dirname(os.path.abspath(__file__)) self.view_template = open( os.path.join(extension_dir, VIEW_TEMPLATE), "r").read() # view options if kwargs['colormap'] == 'dataset': if COLOR_PALETTE_ATTRIBUTE not in dataset.extension_userdata or \ not dataset.extension_userdata[COLOR_PALETTE_ATTRIBUTE]: raise ValueError("No palette found in dataset - choose other colormap") palette = dataset.extension_userdata[COLOR_PALETTE_ATTRIBUTE] # assume 8-bit RGB palette and convert to N*3 numpy array palette = np.array(palette).reshape((len(palette) / 3, 3)) / 255. # normalize input pixels to [0,1] norm = mpl.colors.Normalize(vmin=0, vmax=255) # create map cmap = mpl.colors.ListedColormap(palette) self.map = plt.cm.ScalarMappable(norm=norm, cmap=cmap) elif kwargs['colormap'] == 'paired': cmap = plt.cm.get_cmap('Paired') self.map = plt.cm.ScalarMappable(norm=None, cmap=cmap) elif kwargs['colormap'] == 'none': self.map = None else: raise ValueError("Unknown color map option: %s" % kwargs['colormap']) # memorize class labels if 'class_labels' in dataset.extension_userdata: self.class_labels = dataset.extension_userdata['class_labels'] else: self.class_labels = None @staticmethod def get_config_form(): """Utility function. returns: ConfigForm(). """ return ConfigForm() @staticmethod def get_config_template(form): """Get the template and context. parameters: - form: form returned by get_config_form(). This may be populated with values if the job was cloned returns: - (template, context) tuple - template is a Jinja template to use for rendering config options - context is a dictionary of context variables to use for rendering the form """ extension_dir = os.path.dirname(os.path.abspath(__file__)) template = open( os.path.join(extension_dir, CONFIG_TEMPLATE), "r").read() return (template, {'form': form}) def get_legend_for(self, found_classes, skip_classes=[]): """Return the legend color image squares and text for each class. :param found_classes: list of class indices :param skip_classes: list of class indices to skip :return: list of dicts of text hex_color for each class """ legend = [] for c in (x for x in found_classes if x not in skip_classes): # create hex color associated with the category ID if self.map: rgb_color = self.map.to_rgba([c])[0, :3] hex_color = mpl.colors.rgb2hex(rgb_color) else: # make a grey scale hex color h = hex(int(c)).split('x')[1].zfill(2) hex_color = '#%s%s%s' % (h, h, h) if self.class_labels: text = self.class_labels[int(c)] else: text = "Class #%d" % c legend.append({'index': c, 'text': text, 'hex_color': hex_color}) return legend @override def get_header_template(self): """Implement get_header_template method from view extension interface.""" extension_dir = os.path.dirname(os.path.abspath(__file__)) template = open( os.path.join(extension_dir, HEADER_TEMPLATE), "r").read() return template, {} @override def get_ng_templates(self): """Implement get_ng_templates method from view extension interface.""" extension_dir = os.path.dirname(os.path.abspath(__file__)) header = open(os.path.join(extension_dir, APP_BEGIN_TEMPLATE), "r").read() footer = open(os.path.join(extension_dir, APP_END_TEMPLATE), "r").read() return header, footer @staticmethod def get_id(): """returns: id string that identifies the extension.""" return 'image-segmentation' @staticmethod def get_title(): """returns: name string to display in html.""" return 'Image Segmentation' @staticmethod def get_dirname(): """returns: extension dir name to locate static dir.""" return 'imageSegmentation' @override def get_view_template(self, data): """Get the view template. returns: - (template, context) tuple - template is a Jinja template to use for rendering config options - context is a dictionary of context variables to use for rendering the form """ return self.view_template, { 'input_id': data['input_id'], 'input_image': digits.utils.image.embed_image_html(data['input_image']), 'fill_image': digits.utils.image.embed_image_html(data['fill_image']), 'line_image': digits.utils.image.embed_image_html(data['line_image']), 'seg_image': digits.utils.image.embed_image_html(data['seg_image']), 'mask_image': digits.utils.image.embed_image_html(data['mask_image']), 'legend': data['legend'], 'is_binary': data['is_binary'], 'class_data': json.dumps(data['class_data'].tolist()), } @override def process_data(self, input_id, input_data, output_data): """Process one inference and return data to visualize.""" # assume the only output is a CHW image where C is the number # of classes, H and W are the height and width of the image class_data = output_data[output_data.keys()[0]].astype('float32') # Is this binary segmentation? is_binary = class_data.shape[0] == 2 # retain only the top class for each pixel class_data = np.argmax(class_data, axis=0).astype('uint8') # remember the classes we found found_classes = np.unique(class_data) # convert using color map (assume 8-bit output) if self.map: fill_data = (self.map.to_rgba(class_data) * 255).astype('uint8') else: fill_data = np.ndarray((class_data.shape[0], class_data.shape[1], 4), dtype='uint8') for x in xrange(3): fill_data[:, :, x] = class_data.copy() # Assuming that class 0 is the background mask = np.greater(class_data, 0) fill_data[:, :, 3] = mask * 255 line_data = fill_data.copy() seg_data = fill_data.copy() # Black mask of non-segmented pixels mask_data = np.zeros(fill_data.shape, dtype='uint8') mask_data[:, :, 3] = (1 - mask) * 255 def normalize(array): mn = array.min() mx = array.max() return (array - mn) * 255 / (mx - mn) if (mx - mn) > 0 else array * 255 try: PIL.Image.fromarray(input_data) except TypeError: # If input_data can not be converted to an image, # normalize and convert to uint8 input_data = normalize(input_data).astype('uint8') # Generate outlines around segmented classes if len(found_classes) > 1: # Assuming that class 0 is the background. line_mask = np.zeros(class_data.shape, dtype=bool) max_distance = np.zeros(class_data.shape, dtype=float) + 1 for c in (x for x in found_classes if x != 0): c_mask = np.equal(class_data, c) # Find the signed distance from the zero contour distance = skfmm.distance(c_mask.astype('float32') - 0.5) # Accumulate the mask for all classes line_width = 3 line_mask |= c_mask & np.less(distance, line_width) max_distance = np.maximum(max_distance, distance + 128) line_data[:, :, 3] = line_mask * 255 max_distance = np.maximum(max_distance, np.zeros(max_distance.shape, dtype=float)) max_distance = np.minimum(max_distance, np.zeros(max_distance.shape, dtype=float) + 255) seg_data[:, :, 3] = max_distance # Input image with outlines input_max = input_data.max() input_min = input_data.min() input_range = input_max - input_min if input_range > 255: input_data = (input_data - input_min) * 255.0 / input_range elif input_min < 0: input_data -= input_min input_image = PIL.Image.fromarray(input_data.astype('uint8')) input_image.format = 'png' # Fill image fill_image = PIL.Image.fromarray(fill_data) fill_image.format = 'png' # Fill image line_image = PIL.Image.fromarray(line_data) line_image.format = 'png' # Seg image seg_image = PIL.Image.fromarray(seg_data) seg_image.format = 'png' seg_image.save('seg.png') # Mask image mask_image = PIL.Image.fromarray(mask_data) mask_image.format = 'png' # legend for this instance legend = self.get_legend_for(found_classes, skip_classes=[0]) return { 'input_id': input_id, 'input_image': input_image, 'fill_image': fill_image, 'line_image': line_image, 'seg_image': seg_image, 'mask_image': mask_image, 'legend': legend, 'is_binary': is_binary, 'class_data': class_data, }
# Copyright 2010 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import base64 import fixtures import sys import traceback import mock import netaddr import six from nova.compute import manager from nova import exception from nova.openstack.common import importutils from nova.openstack.common import jsonutils from nova.openstack.common import log as logging from nova import test from nova.tests.image import fake as fake_image from nova.tests import utils as test_utils from nova.tests.virt.libvirt import fake_libvirt_utils from nova.virt import event as virtevent from nova.virt import fake LOG = logging.getLogger(__name__) def catch_notimplementederror(f): """Decorator to simplify catching drivers raising NotImplementedError If a particular call makes a driver raise NotImplementedError, we log it so that we can extract this information afterwards to automatically generate a hypervisor/feature support matrix. """ def wrapped_func(self, *args, **kwargs): try: return f(self, *args, **kwargs) except NotImplementedError: frame = traceback.extract_tb(sys.exc_info()[2])[-1] LOG.error('%(driver)s does not implement %(method)s' % { 'driver': type(self.connection), 'method': frame[2]}) wrapped_func.__name__ = f.__name__ wrapped_func.__doc__ = f.__doc__ return wrapped_func class _FakeDriverBackendTestCase(object): def _setup_fakelibvirt(self): # So that the _supports_direct_io does the test based # on the current working directory, instead of the # default instances_path which doesn't exist self.flags(instances_path=self.useFixture(fixtures.TempDir()).path) # Put fakelibvirt in place if 'libvirt' in sys.modules: self.saved_libvirt = sys.modules['libvirt'] else: self.saved_libvirt = None import nova.tests.virt.libvirt.fake_imagebackend as fake_imagebackend import nova.tests.virt.libvirt.fake_libvirt_utils as fake_libvirt_utils import nova.tests.virt.libvirt.fakelibvirt as fakelibvirt sys.modules['libvirt'] = fakelibvirt import nova.virt.libvirt.driver import nova.virt.libvirt.firewall self.useFixture(fixtures.MonkeyPatch( 'nova.virt.libvirt.driver.imagebackend', fake_imagebackend)) self.useFixture(fixtures.MonkeyPatch( 'nova.virt.libvirt.driver.libvirt', fakelibvirt)) self.useFixture(fixtures.MonkeyPatch( 'nova.virt.libvirt.driver.libvirt_utils', fake_libvirt_utils)) self.useFixture(fixtures.MonkeyPatch( 'nova.virt.libvirt.imagebackend.libvirt_utils', fake_libvirt_utils)) self.useFixture(fixtures.MonkeyPatch( 'nova.virt.libvirt.firewall.libvirt', fakelibvirt)) self.flags(rescue_image_id="2", rescue_kernel_id="3", rescue_ramdisk_id=None, snapshots_directory='./', group='libvirt') def fake_extend(image, size): pass def fake_migrateToURI(*a): pass def fake_make_drive(_self, _path): pass def fake_get_instance_disk_info(_self, instance, xml=None, block_device_info=None): return '[]' def fake_delete_instance_files(_self, _instance): pass self.stubs.Set(nova.virt.libvirt.driver.LibvirtDriver, 'get_instance_disk_info', fake_get_instance_disk_info) self.stubs.Set(nova.virt.libvirt.driver.disk, 'extend', fake_extend) self.stubs.Set(nova.virt.libvirt.driver.LibvirtDriver, '_delete_instance_files', fake_delete_instance_files) # Like the existing fakelibvirt.migrateToURI, do nothing, # but don't fail for these tests. self.stubs.Set(nova.virt.libvirt.driver.libvirt.Domain, 'migrateToURI', fake_migrateToURI) # We can't actually make a config drive v2 because ensure_tree has # been faked out self.stubs.Set(nova.virt.configdrive.ConfigDriveBuilder, 'make_drive', fake_make_drive) def _teardown_fakelibvirt(self): # Restore libvirt if self.saved_libvirt: sys.modules['libvirt'] = self.saved_libvirt def setUp(self): super(_FakeDriverBackendTestCase, self).setUp() # TODO(sdague): it would be nice to do this in a way that only # the relevant backends where replaced for tests, though this # should not harm anything by doing it for all backends fake_image.stub_out_image_service(self.stubs) self._setup_fakelibvirt() def tearDown(self): fake_image.FakeImageService_reset() self._teardown_fakelibvirt() super(_FakeDriverBackendTestCase, self).tearDown() class VirtDriverLoaderTestCase(_FakeDriverBackendTestCase, test.TestCase): """Test that ComputeManager can successfully load both old style and new style drivers and end up with the correct final class. """ # if your driver supports being tested in a fake way, it can go here # # both long form and short form drivers are supported new_drivers = { 'nova.virt.fake.FakeDriver': 'FakeDriver', 'nova.virt.libvirt.LibvirtDriver': 'LibvirtDriver', 'fake.FakeDriver': 'FakeDriver', 'libvirt.LibvirtDriver': 'LibvirtDriver' } def test_load_new_drivers(self): for cls, driver in self.new_drivers.iteritems(): self.flags(compute_driver=cls) # NOTE(sdague) the try block is to make it easier to debug a # failure by knowing which driver broke try: cm = manager.ComputeManager() except Exception as e: self.fail("Couldn't load driver %s - %s" % (cls, e)) self.assertEqual(cm.driver.__class__.__name__, driver, "Could't load driver %s" % cls) def test_fail_to_load_new_drivers(self): self.flags(compute_driver='nova.virt.amiga') def _fake_exit(error): raise test.TestingException() self.stubs.Set(sys, 'exit', _fake_exit) self.assertRaises(test.TestingException, manager.ComputeManager) class _VirtDriverTestCase(_FakeDriverBackendTestCase): def setUp(self): super(_VirtDriverTestCase, self).setUp() self.flags(instances_path=self.useFixture(fixtures.TempDir()).path) self.connection = importutils.import_object(self.driver_module, fake.FakeVirtAPI()) self.ctxt = test_utils.get_test_admin_context() self.image_service = fake_image.FakeImageService() def _get_running_instance(self, obj=False): instance_ref = test_utils.get_test_instance(obj=obj) network_info = test_utils.get_test_network_info() network_info[0]['network']['subnets'][0]['meta']['dhcp_server'] = \ '1.1.1.1' image_info = test_utils.get_test_image_info(None, instance_ref) self.connection.spawn(self.ctxt, instance_ref, image_info, [], 'herp', network_info=network_info) return instance_ref, network_info @catch_notimplementederror def test_init_host(self): self.connection.init_host('myhostname') @catch_notimplementederror def test_list_instances(self): self.connection.list_instances() @catch_notimplementederror def test_list_instance_uuids(self): self.connection.list_instance_uuids() @catch_notimplementederror def test_spawn(self): instance_ref, network_info = self._get_running_instance() domains = self.connection.list_instances() self.assertIn(instance_ref['name'], domains) num_instances = self.connection.get_num_instances() self.assertEqual(1, num_instances) @catch_notimplementederror def test_snapshot_not_running(self): instance_ref = test_utils.get_test_instance() img_ref = self.image_service.create(self.ctxt, {'name': 'snap-1'}) self.assertRaises(exception.InstanceNotRunning, self.connection.snapshot, self.ctxt, instance_ref, img_ref['id'], lambda *args, **kwargs: None) @catch_notimplementederror def test_snapshot_running(self): img_ref = self.image_service.create(self.ctxt, {'name': 'snap-1'}) instance_ref, network_info = self._get_running_instance() self.connection.snapshot(self.ctxt, instance_ref, img_ref['id'], lambda *args, **kwargs: None) @catch_notimplementederror def test_reboot(self): reboot_type = "SOFT" instance_ref, network_info = self._get_running_instance() self.connection.reboot(self.ctxt, instance_ref, network_info, reboot_type) @catch_notimplementederror def test_get_host_ip_addr(self): host_ip = self.connection.get_host_ip_addr() # Will raise an exception if it's not a valid IP at all ip = netaddr.IPAddress(host_ip) # For now, assume IPv4. self.assertEqual(ip.version, 4) @catch_notimplementederror def test_set_admin_password(self): instance, network_info = self._get_running_instance(obj=True) self.connection.set_admin_password(instance, 'p4ssw0rd') @catch_notimplementederror def test_inject_file(self): instance_ref, network_info = self._get_running_instance() self.connection.inject_file(instance_ref, base64.b64encode('/testfile'), base64.b64encode('testcontents')) @catch_notimplementederror def test_resume_state_on_host_boot(self): instance_ref, network_info = self._get_running_instance() self.connection.resume_state_on_host_boot(self.ctxt, instance_ref, network_info) @catch_notimplementederror def test_rescue(self): instance_ref, network_info = self._get_running_instance() self.connection.rescue(self.ctxt, instance_ref, network_info, None, '') @catch_notimplementederror def test_unrescue_unrescued_instance(self): instance_ref, network_info = self._get_running_instance() self.connection.unrescue(instance_ref, network_info) @catch_notimplementederror def test_unrescue_rescued_instance(self): instance_ref, network_info = self._get_running_instance() self.connection.rescue(self.ctxt, instance_ref, network_info, None, '') self.connection.unrescue(instance_ref, network_info) @catch_notimplementederror def test_poll_rebooting_instances(self): instances = [self._get_running_instance()] self.connection.poll_rebooting_instances(10, instances) @catch_notimplementederror def test_migrate_disk_and_power_off(self): instance_ref, network_info = self._get_running_instance() flavor_ref = test_utils.get_test_flavor() self.connection.migrate_disk_and_power_off( self.ctxt, instance_ref, 'dest_host', flavor_ref, network_info) @catch_notimplementederror def test_power_off(self): instance_ref, network_info = self._get_running_instance() self.connection.power_off(instance_ref) @catch_notimplementederror def test_power_on_running(self): instance_ref, network_info = self._get_running_instance() self.connection.power_on(self.ctxt, instance_ref, network_info, None) @catch_notimplementederror def test_power_on_powered_off(self): instance_ref, network_info = self._get_running_instance() self.connection.power_off(instance_ref) self.connection.power_on(self.ctxt, instance_ref, network_info, None) @catch_notimplementederror def test_soft_delete(self): instance_ref, network_info = self._get_running_instance() self.connection.soft_delete(instance_ref) @catch_notimplementederror def test_restore_running(self): instance_ref, network_info = self._get_running_instance() self.connection.restore(instance_ref) @catch_notimplementederror def test_restore_soft_deleted(self): instance_ref, network_info = self._get_running_instance() self.connection.soft_delete(instance_ref) self.connection.restore(instance_ref) @catch_notimplementederror def test_pause(self): instance_ref, network_info = self._get_running_instance() self.connection.pause(instance_ref) @catch_notimplementederror def test_unpause_unpaused_instance(self): instance_ref, network_info = self._get_running_instance() self.connection.unpause(instance_ref) @catch_notimplementederror def test_unpause_paused_instance(self): instance_ref, network_info = self._get_running_instance() self.connection.pause(instance_ref) self.connection.unpause(instance_ref) @catch_notimplementederror def test_suspend(self): instance_ref, network_info = self._get_running_instance() self.connection.suspend(instance_ref) @catch_notimplementederror def test_resume_unsuspended_instance(self): instance_ref, network_info = self._get_running_instance() self.connection.resume(self.ctxt, instance_ref, network_info) @catch_notimplementederror def test_resume_suspended_instance(self): instance_ref, network_info = self._get_running_instance() self.connection.suspend(instance_ref) self.connection.resume(self.ctxt, instance_ref, network_info) @catch_notimplementederror def test_destroy_instance_nonexistent(self): fake_instance = {'id': 42, 'name': 'I just made this up!', 'uuid': 'bda5fb9e-b347-40e8-8256-42397848cb00'} network_info = test_utils.get_test_network_info() self.connection.destroy(self.ctxt, fake_instance, network_info) @catch_notimplementederror def test_destroy_instance(self): instance_ref, network_info = self._get_running_instance() self.assertIn(instance_ref['name'], self.connection.list_instances()) self.connection.destroy(self.ctxt, instance_ref, network_info) self.assertNotIn(instance_ref['name'], self.connection.list_instances()) @catch_notimplementederror def test_get_volume_connector(self): result = self.connection.get_volume_connector({'id': 'fake'}) self.assertIn('ip', result) self.assertIn('initiator', result) self.assertIn('host', result) @catch_notimplementederror def test_attach_detach_volume(self): instance_ref, network_info = self._get_running_instance() connection_info = { "driver_volume_type": "fake", "serial": "fake_serial", } self.connection.attach_volume(None, connection_info, instance_ref, '/dev/sda') self.connection.detach_volume(connection_info, instance_ref, '/dev/sda') @catch_notimplementederror def test_swap_volume(self): instance_ref, network_info = self._get_running_instance() self.connection.attach_volume(None, {'driver_volume_type': 'fake'}, instance_ref, '/dev/sda') self.connection.swap_volume({'driver_volume_type': 'fake'}, {'driver_volume_type': 'fake'}, instance_ref, '/dev/sda') @catch_notimplementederror def test_attach_detach_different_power_states(self): instance_ref, network_info = self._get_running_instance() connection_info = { "driver_volume_type": "fake", "serial": "fake_serial", } self.connection.power_off(instance_ref) self.connection.attach_volume(None, connection_info, instance_ref, '/dev/sda') bdm = { 'root_device_name': None, 'swap': None, 'ephemerals': [], 'block_device_mapping': [{ 'instance_uuid': instance_ref['uuid'], 'connection_info': {'driver_volume_type': 'fake'}, 'mount_device': '/dev/sda', 'delete_on_termination': False, 'virtual_name': None, 'snapshot_id': None, 'volume_id': 'abcdedf', 'volume_size': None, 'no_device': None }] } self.connection.power_on(self.ctxt, instance_ref, network_info, bdm) self.connection.detach_volume(connection_info, instance_ref, '/dev/sda') @catch_notimplementederror def test_get_info(self): instance_ref, network_info = self._get_running_instance() info = self.connection.get_info(instance_ref) self.assertIn('state', info) self.assertIn('max_mem', info) self.assertIn('mem', info) self.assertIn('num_cpu', info) self.assertIn('cpu_time', info) @catch_notimplementederror def test_get_info_for_unknown_instance(self): self.assertRaises(exception.NotFound, self.connection.get_info, {'name': 'I just made this name up'}) @catch_notimplementederror def test_get_diagnostics(self): instance_ref, network_info = self._get_running_instance() self.connection.get_diagnostics(instance_ref) @catch_notimplementederror def test_block_stats(self): instance_ref, network_info = self._get_running_instance() stats = self.connection.block_stats(instance_ref['name'], 'someid') self.assertEqual(len(stats), 5) @catch_notimplementederror def test_interface_stats(self): instance_ref, network_info = self._get_running_instance() stats = self.connection.interface_stats(instance_ref['name'], 'someid') self.assertEqual(len(stats), 8) @catch_notimplementederror def test_get_console_output(self): fake_libvirt_utils.files['dummy.log'] = '' instance_ref, network_info = self._get_running_instance() console_output = self.connection.get_console_output(self.ctxt, instance_ref) self.assertIsInstance(console_output, six.string_types) @catch_notimplementederror def test_get_vnc_console(self): instance, network_info = self._get_running_instance(obj=True) vnc_console = self.connection.get_vnc_console(self.ctxt, instance) self.assertIn('internal_access_path', vnc_console) self.assertIn('host', vnc_console) self.assertIn('port', vnc_console) @catch_notimplementederror def test_get_spice_console(self): instance_ref, network_info = self._get_running_instance() spice_console = self.connection.get_spice_console(self.ctxt, instance_ref) self.assertIn('internal_access_path', spice_console) self.assertIn('host', spice_console) self.assertIn('port', spice_console) self.assertIn('tlsPort', spice_console) @catch_notimplementederror def test_get_rdp_console(self): instance_ref, network_info = self._get_running_instance() rdp_console = self.connection.get_rdp_console(self.ctxt, instance_ref) self.assertIn('internal_access_path', rdp_console) self.assertIn('host', rdp_console) self.assertIn('port', rdp_console) @catch_notimplementederror def test_get_console_pool_info(self): instance_ref, network_info = self._get_running_instance() console_pool = self.connection.get_console_pool_info(instance_ref) self.assertIn('address', console_pool) self.assertIn('username', console_pool) self.assertIn('password', console_pool) @catch_notimplementederror def test_refresh_security_group_rules(self): # FIXME: Create security group and add the instance to it instance_ref, network_info = self._get_running_instance() self.connection.refresh_security_group_rules(1) @catch_notimplementederror def test_refresh_security_group_members(self): # FIXME: Create security group and add the instance to it instance_ref, network_info = self._get_running_instance() self.connection.refresh_security_group_members(1) @catch_notimplementederror def test_refresh_provider_fw_rules(self): instance_ref, network_info = self._get_running_instance() self.connection.refresh_provider_fw_rules() @catch_notimplementederror def test_ensure_filtering_for_instance(self): instance_ref = test_utils.get_test_instance() network_info = test_utils.get_test_network_info() self.connection.ensure_filtering_rules_for_instance(instance_ref, network_info) @catch_notimplementederror def test_unfilter_instance(self): instance_ref = test_utils.get_test_instance() network_info = test_utils.get_test_network_info() self.connection.unfilter_instance(instance_ref, network_info) @catch_notimplementederror def test_live_migration(self): instance_ref, network_info = self._get_running_instance() self.connection.live_migration(self.ctxt, instance_ref, 'otherhost', lambda *a: None, lambda *a: None) @catch_notimplementederror def _check_available_resource_fields(self, host_status): keys = ['vcpus', 'memory_mb', 'local_gb', 'vcpus_used', 'memory_mb_used', 'hypervisor_type', 'hypervisor_version', 'hypervisor_hostname', 'cpu_info', 'disk_available_least', 'supported_instances'] for key in keys: self.assertIn(key, host_status) @catch_notimplementederror def test_get_host_stats(self): host_status = self.connection.get_host_stats() self._check_available_resource_fields(host_status) self.assertTrue(isinstance(host_status['hypervisor_version'], int)) @catch_notimplementederror def test_get_available_resource(self): available_resource = self.connection.get_available_resource( 'myhostname') self._check_available_resource_fields(available_resource) @catch_notimplementederror def _check_host_cpu_status_fields(self, host_cpu_status): self.assertIn('kernel', host_cpu_status) self.assertIn('idle', host_cpu_status) self.assertIn('user', host_cpu_status) self.assertIn('iowait', host_cpu_status) self.assertIn('frequency', host_cpu_status) @catch_notimplementederror def test_get_host_cpu_stats(self): host_cpu_status = self.connection.get_host_cpu_stats() self._check_host_cpu_status_fields(host_cpu_status) @catch_notimplementederror def test_set_host_enabled(self): self.connection.set_host_enabled('a useless argument?', True) @catch_notimplementederror def test_get_host_uptime(self): self.connection.get_host_uptime('a useless argument?') @catch_notimplementederror def test_host_power_action_reboot(self): self.connection.host_power_action('a useless argument?', 'reboot') @catch_notimplementederror def test_host_power_action_shutdown(self): self.connection.host_power_action('a useless argument?', 'shutdown') @catch_notimplementederror def test_host_power_action_startup(self): self.connection.host_power_action('a useless argument?', 'startup') @catch_notimplementederror def test_add_to_aggregate(self): self.connection.add_to_aggregate(self.ctxt, 'aggregate', 'host') @catch_notimplementederror def test_remove_from_aggregate(self): self.connection.remove_from_aggregate(self.ctxt, 'aggregate', 'host') def test_events(self): got_events = [] def handler(event): got_events.append(event) self.connection.register_event_listener(handler) event1 = virtevent.LifecycleEvent( "cef19ce0-0ca2-11df-855d-b19fbce37686", virtevent.EVENT_LIFECYCLE_STARTED) event2 = virtevent.LifecycleEvent( "cef19ce0-0ca2-11df-855d-b19fbce37686", virtevent.EVENT_LIFECYCLE_PAUSED) self.connection.emit_event(event1) self.connection.emit_event(event2) want_events = [event1, event2] self.assertEqual(want_events, got_events) event3 = virtevent.LifecycleEvent( "cef19ce0-0ca2-11df-855d-b19fbce37686", virtevent.EVENT_LIFECYCLE_RESUMED) event4 = virtevent.LifecycleEvent( "cef19ce0-0ca2-11df-855d-b19fbce37686", virtevent.EVENT_LIFECYCLE_STOPPED) self.connection.emit_event(event3) self.connection.emit_event(event4) want_events = [event1, event2, event3, event4] self.assertEqual(want_events, got_events) def test_event_bad_object(self): # Passing in something which does not inherit # from virtevent.Event def handler(event): pass self.connection.register_event_listener(handler) badevent = { "foo": "bar" } self.assertRaises(ValueError, self.connection.emit_event, badevent) def test_event_bad_callback(self): # Check that if a callback raises an exception, # it does not propagate back out of the # 'emit_event' call def handler(event): raise Exception("Hit Me!") self.connection.register_event_listener(handler) event1 = virtevent.LifecycleEvent( "cef19ce0-0ca2-11df-855d-b19fbce37686", virtevent.EVENT_LIFECYCLE_STARTED) self.connection.emit_event(event1) def test_set_bootable(self): self.assertRaises(NotImplementedError, self.connection.set_bootable, 'instance', True) class AbstractDriverTestCase(_VirtDriverTestCase, test.TestCase): def setUp(self): self.driver_module = "nova.virt.driver.ComputeDriver" super(AbstractDriverTestCase, self).setUp() class FakeConnectionTestCase(_VirtDriverTestCase, test.TestCase): def setUp(self): self.driver_module = 'nova.virt.fake.FakeDriver' fake.set_nodes(['myhostname']) super(FakeConnectionTestCase, self).setUp() def _check_available_resource_fields(self, host_status): super(FakeConnectionTestCase, self)._check_available_resource_fields( host_status) hypervisor_type = host_status['hypervisor_type'] supported_instances = host_status['supported_instances'] try: # supported_instances could be JSON wrapped supported_instances = jsonutils.loads(supported_instances) except TypeError: pass self.assertTrue(any(hypervisor_type in x for x in supported_instances)) class LibvirtConnTestCase(_VirtDriverTestCase, test.TestCase): def setUp(self): # Point _VirtDriverTestCase at the right module self.driver_module = 'nova.virt.libvirt.LibvirtDriver' super(LibvirtConnTestCase, self).setUp() self.stubs.Set(self.connection, 'set_host_enabled', mock.MagicMock()) self.useFixture(fixtures.MonkeyPatch( 'nova.context.get_admin_context', self._fake_admin_context)) def _fake_admin_context(self, *args, **kwargs): return self.ctxt def test_force_hard_reboot(self): self.flags(wait_soft_reboot_seconds=0, group='libvirt') self.test_reboot() def test_migrate_disk_and_power_off(self): # there is lack of fake stuff to execute this method. so pass. self.skipTest("Test nothing, but this method" " needed to override superclass.") def test_set_host_enabled(self): self.mox.UnsetStubs() service_mock = mock.MagicMock() # Previous status of the service: disabled: False # service_mock.__getitem__.return_value = False service_mock.configure_mock(disabled_reason='None', disabled=False) from nova.objects import service as service_obj self.mox.StubOutWithMock(service_obj.Service, 'get_by_compute_host') service_obj.Service.get_by_compute_host(self.ctxt, 'fake-mini').AndReturn(service_mock) self.mox.ReplayAll() self.connection.set_host_enabled('my_test_host', 'ERROR!') self.assertTrue(service_mock.disabled) self.assertEqual(service_mock.disabled_reason, 'AUTO: ERROR!') def test_set_host_enabled_when_auto_disabled(self): self.mox.UnsetStubs() service_mock = mock.MagicMock() # Previous status of the service: disabled: True, 'AUTO: ERROR' service_mock.configure_mock(disabled_reason='AUTO: ERROR', disabled=True) from nova.objects import service as service_obj self.mox.StubOutWithMock(service_obj.Service, 'get_by_compute_host') service_obj.Service.get_by_compute_host(self.ctxt, 'fake-mini').AndReturn(service_mock) self.mox.ReplayAll() self.connection.set_host_enabled('my_test_host', True) self.assertFalse(service_mock.disabled) self.assertEqual(service_mock.disabled_reason, 'None') def test_set_host_enabled_when_manually_disabled(self): self.mox.UnsetStubs() service_mock = mock.MagicMock() # Previous status of the service: disabled: True, 'Manually disabled' service_mock.configure_mock(disabled_reason='Manually disabled', disabled=True) from nova.objects import service as service_obj self.mox.StubOutWithMock(service_obj.Service, 'get_by_compute_host') service_obj.Service.get_by_compute_host(self.ctxt, 'fake-mini').AndReturn(service_mock) self.mox.ReplayAll() self.connection.set_host_enabled('my_test_host', True) self.assertTrue(service_mock.disabled) self.assertEqual(service_mock.disabled_reason, 'Manually disabled') def test_set_host_enabled_dont_override_manually_disabled(self): self.mox.UnsetStubs() service_mock = mock.MagicMock() # Previous status of the service: disabled: True, 'Manually disabled' service_mock.configure_mock(disabled_reason='Manually disabled', disabled=True) from nova.objects import service as service_obj self.mox.StubOutWithMock(service_obj.Service, 'get_by_compute_host') service_obj.Service.get_by_compute_host(self.ctxt, 'fake-mini').AndReturn(service_mock) self.mox.ReplayAll() self.connection.set_host_enabled('my_test_host', 'ERROR!') self.assertTrue(service_mock.disabled) self.assertEqual(service_mock.disabled_reason, 'Manually disabled')
# -*- encoding: utf-8 -*- # # Copyright Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import argparse import os import sys from argparse import ArgumentParser from datetime import datetime from dciclient.version import __version__ _default_dci_cs_url = "http://127.0.0.1:5000" _default_sso_url = "http://127.0.0.1:8180" def _create_boolean_flags(parser, flags, default, dest=None): flags = flags.split("/") dest = dest if dest else flags[0].strip("--") group = parser.add_mutually_exclusive_group() group.add_argument(flags[0], action="store_true", default=default, dest=dest) group.add_argument(flags[1], action="store_false", dest=dest) def _create_array_argument(parser, argument_name, help): parser.add_argument( argument_name, type=lambda x: [v.strip() for v in x.split(",")], help=help, default=[], ) def _date_isoformat(v): try: datetime.strptime(v, "%Y-%m-%dT%H:%M:%S.%f") except ValueError: raise argparse.ArgumentTypeError("'%s' is not an iso format date" % v) return v def parse_arguments(args, environment={}): base_parser = ArgumentParser(add_help=False) base_parser.add_argument("--verbose", "--long", default=False, action="store_true") parser = ArgumentParser(prog="dcictl") parser.add_argument( "--version", action="version", version="%(prog)s " + __version__ ) parser.add_argument( "--dci-login", default=environment.get("DCI_LOGIN", None), help="DCI login or 'DCI_LOGIN' environment variable.", ) parser.add_argument( "--dci-password", default=environment.get("DCI_PASSWORD", None), help="DCI password or 'DCI_PASSWORD' environment variable.", ) parser.add_argument( "--dci-client-id", default=environment.get("DCI_CLIENT_ID", None), help="DCI CLIENt ID or 'DCI_CLIENT_ID' environment variable.", ) parser.add_argument( "--dci-api-secret", default=environment.get("DCI_API_SECRET", None), help="DCI API secret or 'DCI_API_SECRET' environment variable.", ) parser.add_argument( "--dci-cs-url", default=environment.get("DCI_CS_URL", _default_dci_cs_url), help="DCI control server url, default to '%s'." % _default_dci_cs_url, ) parser.add_argument( "--sso-url", default=environment.get("SSO_URL", _default_sso_url), help="SSO url, default to '%s'." % _default_sso_url, ) parser.add_argument( "--sso-username", default=environment.get("SSO_USERNAME"), help="SSO username or 'SSO_USERNAME' environment variable.", ) parser.add_argument( "--sso-password", default=environment.get("SSO_PASSWORD"), help="SSO password or 'SSO_PASSWORD' environment variable.", ) parser.add_argument( "--sso-token", default=environment.get("SSO_TOKEN"), help="SSO token or 'SSO_TOKEN' environment variable.", ) parser.add_argument( "--refresh-sso-token", default=False, action="store_true", help="Refresh the token", ) parser.add_argument( "--format", default=os.environ.get("DCI_FORMAT", "table"), choices=["table", "json", "csv", "tsv"], help="Output format", ) subparsers = parser.add_subparsers() # user commands p = subparsers.add_parser( "user-list", help="List all users.", parents=[base_parser] ) p.add_argument("--sort", default="-created_at") p.add_argument("--limit", default=50) p.add_argument("--offset", default=0) p.add_argument("--where", help="Optional filter criteria", required=False) p.set_defaults(command="user-list") p = subparsers.add_parser( "user-create", help="Create a user.", parents=[base_parser] ) p.add_argument("--name", required=True) p.add_argument("--password", required=True) p.add_argument("--email", required=True) p.add_argument("--fullname") _create_boolean_flags(p, "--active/--no-active", default=True, dest="state") p.add_argument("--team-id") p.set_defaults(command="user-create") p = subparsers.add_parser( "user-update", help="Update a user.", parents=[base_parser] ) p.add_argument("id") p.add_argument("--etag", required=True) p.add_argument("--name") p.add_argument("--fullname", default="") p.add_argument("--email") p.add_argument("--password") p.add_argument("--team-id") _create_boolean_flags(p, "--active/--no-active", default=None, dest="state") p.set_defaults(command="user-update") p = subparsers.add_parser( "user-delete", help="Update a user.", parents=[base_parser] ) p.add_argument("id") p.add_argument("--etag", required=True) p.set_defaults(command="user-delete") p = subparsers.add_parser("user-show", help="Show a user.", parents=[base_parser]) p.add_argument("id") p.set_defaults(command="user-show") # team commands p = subparsers.add_parser( "team-list", help="List all teams.", parents=[base_parser] ) p.add_argument("--sort", default="-created_at") p.add_argument("--limit", default=50) p.add_argument("--offset", default=0) p.add_argument("--where", help="Optional filter criteria", required=False) p.set_defaults(command="team-list") p = subparsers.add_parser( "team-create", help="Create a team.", parents=[base_parser] ) p.add_argument("--name", required=True) p.add_argument("--country") _create_boolean_flags(p, "--active/--no-active", default=True, dest="state") p.set_defaults(command="team-create") p = subparsers.add_parser( "team-update", help="Update a team.", parents=[base_parser] ) p.add_argument("id") p.add_argument("--etag", required=True) p.add_argument("--name") p.add_argument("--country") _create_boolean_flags(p, "--active/--no-active", default=None, dest="state") _create_boolean_flags(p, "--external/--no-external", default=None, dest="external") p.set_defaults(command="team-update") p = subparsers.add_parser( "team-delete", help="Update a team.", parents=[base_parser] ) p.add_argument("id") p.add_argument("--etag", required=True) p.set_defaults(command="team-delete") p = subparsers.add_parser("team-show", help="Show a team.", parents=[base_parser]) p.add_argument("id") p.set_defaults(command="team-show") # product commands p = subparsers.add_parser( "product-list", help="List all products.", parents=[base_parser] ) p.add_argument("--sort", default="-created_at") p.add_argument("--limit", default=50) p.add_argument("--offset", default=0) p.add_argument("--where", help="Optional filter criteria", required=False) p.set_defaults(command="product-list") p = subparsers.add_parser( "product-create", help="Create a product.", parents=[base_parser] ) p.add_argument("--name", required=True) p.add_argument("--label") p.add_argument("--description") _create_boolean_flags(p, "--active/--no-active", default=True, dest="state") p.set_defaults(command="product-create") p = subparsers.add_parser( "product-update", help="Update a product.", parents=[base_parser] ) p.add_argument("id") p.add_argument("--etag", required=True) p.add_argument("--name") p.add_argument("--label") p.add_argument("--description") _create_boolean_flags(p, "--active/--no-active", default=None, dest="state") _create_boolean_flags(p, "--external/--no-external", default=None, dest="external") p.set_defaults(command="product-update") p = subparsers.add_parser( "product-delete", help="Update a product.", parents=[base_parser] ) p.add_argument("id") p.add_argument("--etag", required=True) p.set_defaults(command="product-delete") p = subparsers.add_parser( "product-show", help="Show a product.", parents=[base_parser] ) p.add_argument("id") p.set_defaults(command="product-show") p = subparsers.add_parser( "product-attach-team", help="Attach team to a product.", parents=[base_parser] ) p.add_argument("id") p.add_argument("--team-id") p.set_defaults(command="product-attach-team") p = subparsers.add_parser( "product-detach-team", help="Detach team to a product.", parents=[base_parser] ) p.add_argument("id") p.add_argument("--team-id") p.set_defaults(command="product-detach-team") p = subparsers.add_parser( "product-list-teams", help="List all teams attached to a product.", parents=[base_parser], ) p.add_argument("id") p.add_argument("--sort", default="-created_at") p.add_argument("--limit", default=50) p.add_argument("--offset", default=0) p.add_argument("--where", help="Optional filter criteria", required=False) p.set_defaults(command="product-list-teams") # feeder commands p = subparsers.add_parser( "feeder-list", help="List all feeders.", parents=[base_parser] ) p.add_argument("--sort", default="-created_at") p.add_argument("--limit", default=50) p.add_argument("--offset", default=0) p.add_argument("--where", help="Optional filter criteria", required=False) p.set_defaults(command="feeder-list") p = subparsers.add_parser( "feeder-create", help="Create a feeder.", parents=[base_parser] ) p.add_argument("--name", required=True) p.add_argument("--data") _create_boolean_flags(p, "--active/--no-active", default=True, dest="state") p.add_argument("--team-id") p.set_defaults(command="feeder-create") p = subparsers.add_parser( "feeder-update", help="Update a feeder.", parents=[base_parser] ) p.add_argument("id") p.add_argument("--etag", required=True) p.add_argument("--name") p.add_argument("--data") p.add_argument("--team-id") _create_boolean_flags(p, "--active/--no-active", default=None, dest="state") p.set_defaults(command="feeder-update") p = subparsers.add_parser( "feeder-delete", help="Update a feeder.", parents=[base_parser] ) p.add_argument("id") p.add_argument("--etag", required=True) p.set_defaults(command="feeder-delete") p = subparsers.add_parser( "feeder-show", help="Show a feeder.", parents=[base_parser] ) p.add_argument("id") p.set_defaults(command="feeder-show") p = subparsers.add_parser( "feeder-reset-api-secret", help="reset api secret for a feeder.", parents=[base_parser], ) p.add_argument("id") p.add_argument("--etag", required=True) p.set_defaults(command="feeder-reset-api-secret") # topic commands p = subparsers.add_parser( "topic-list", help="List all topics.", parents=[base_parser] ) p.add_argument("--sort", default="-created_at") p.add_argument("--limit", default=50) p.add_argument("--offset", default=0) p.add_argument("--where", help="Optional filter criteria", required=False) p.set_defaults(command="topic-list") p = subparsers.add_parser( "topic-create", help="Create a topic.", parents=[base_parser] ) p.add_argument("--name", required=True) p.add_argument("--product-id") p.add_argument( "--component_types", default=None, help="Component types separated by commas." ) _create_boolean_flags(p, "--active/--no-active", default=True, dest="state") _create_boolean_flags( p, "--export-control/--no-export-control", default=False, dest="export_control" ) p.add_argument("--data") p.set_defaults(command="topic-create") p = subparsers.add_parser( "topic-update", help="Update a topic.", parents=[base_parser] ) p.add_argument("id") p.add_argument("--etag", required=True) p.add_argument("--name") p.add_argument( "--component_types", default=None, help="Component types separated by commas." ) p.add_argument("--next-topic-id") _create_boolean_flags(p, "--active/--no-active", default=False, dest="state") _create_boolean_flags( p, "--export-control/--no-export-control", default=None, dest="export_control" ) p.add_argument("--product-id") p.add_argument("--data") p.set_defaults(command="topic-update") p = subparsers.add_parser( "topic-delete", help="Delete a topic.", parents=[base_parser] ) p.add_argument("id") p.add_argument("--etag", required=True) p.set_defaults(command="topic-delete") p = subparsers.add_parser("topic-show", help="Show a topic.", parents=[base_parser]) p.add_argument("id") p.set_defaults(command="topic-show") p = subparsers.add_parser( "topic-attach-team", help="Attach a team to a topic.", parents=[base_parser] ) p.add_argument("id") p.add_argument("--team-id") p.set_defaults(command="topic-attach-team") p = subparsers.add_parser( "topic-unattach-team", help="Unattach a team to a topic.", parents=[base_parser] ) p.add_argument("id") p.add_argument("--team-id") p.set_defaults(command="topic-unattach-team") p = subparsers.add_parser( "topic-list-team", help="List teams attached to a topic.", parents=[base_parser] ) p.add_argument("id") p.add_argument("--sort", default="-created_at") p.add_argument("--limit", default=50) p.add_argument("--offset", default=0) p.add_argument("--where", help="Optional filter criteria", required=False) p.set_defaults(command="topic-list-team") # jobstate commands p = subparsers.add_parser( "jobstate-show", help="Show a jobstate.", parents=[base_parser] ) p.add_argument("id") p.set_defaults(command="jobstate-show") # component commands p = subparsers.add_parser( "component-list", help="List all components.", parents=[base_parser] ) p.add_argument("--topic-id", required=True, dest="id") p.add_argument("--sort", default="-created_at") p.add_argument("--limit", default=50) p.add_argument("--offset", default=0) p.add_argument("--where", help="Optional filter criteria", required=False) p.set_defaults(command="component-list") p = subparsers.add_parser( "component-create", help="Create a component.", parents=[base_parser] ) p.add_argument("--name", required=True, help="Name of component") p.add_argument("--type", required=True, help="Type of component") p.add_argument("--topic-id", required=True, help="Topic ID") p.add_argument("--team-id") _create_array_argument(p, "--tags", help="Comma separated list of tags") p.add_argument( "--canonical_project_name", default=None, help="Canonical project name." ) p.add_argument("--title", help="Title of component") p.add_argument("--message", help="Component message") p.add_argument("--url", help="URL to look for the component") _create_boolean_flags(p, "--active/--no-active", default=True, dest="state") p.add_argument("--data", default="{}", help="Data to pass (JSON)") p.add_argument('--released-at', default=None, type=_date_isoformat, help="The release date") p.set_defaults(command="component-create") p = subparsers.add_parser( "component-update", help="Update a component.", parents=[base_parser] ) p.add_argument("id") _create_boolean_flags(p, "--active/--no-active", default=None, dest="state") p.add_argument("--name", required=False, help="Name of component") p.add_argument("--type", required=False, help="Type of component") _create_array_argument(p, "--tags", help="Comma separated list of tags") p.add_argument( "--canonical_project_name", default=None, help="Canonical project name." ) p.add_argument("--title", help="Title of component") p.add_argument("--message", help="Component message") p.add_argument("--url", help="URL to look for the component") p.add_argument("--data", default="{}", help="Data to pass (JSON)") p.set_defaults(command="component-update") p = subparsers.add_parser( "component-delete", help="Delete a component.", parents=[base_parser] ) p.add_argument("id") p.add_argument("--etag", required=True) p.set_defaults(command="component-delete") p = subparsers.add_parser( "component-show", help="Show a component.", parents=[base_parser] ) p.add_argument("id") p.set_defaults(command="component-show") p = subparsers.add_parser( "component-attach-issue", help="Attach an issue to a component.", parents=[base_parser], ) p.add_argument("id") p.add_argument("--url", required=True) p.set_defaults(command="component-attach-issue") p = subparsers.add_parser( "component-unattach-issue", help="Unattach an issue to a component.", parents=[base_parser], ) p.add_argument("id") p.add_argument("--issue-id", required=True) p.set_defaults(command="component-unattach-issue") p = subparsers.add_parser( "component-list-issue", help="List all component attached issues.", parents=[base_parser], ) p.add_argument("id") p.add_argument("--sort", default="-created_at") p.add_argument("--limit", default=50) p.add_argument("--offset", default=0) p.set_defaults(command="component-list-issue") p = subparsers.add_parser( "component-file-upload", help="Attach a file to a component.", parents=[base_parser], ) p.add_argument("id") p.add_argument("--path", required=True) p.set_defaults(command="component-file-upload") p = subparsers.add_parser( "component-file-show", help="Show a component file.", parents=[base_parser] ) p.add_argument("id") p.add_argument("--file-id", required=True) p.set_defaults(command="component-file-show") p = subparsers.add_parser( "component-file-download", help="Retrieve a component file.", parents=[base_parser], ) p.add_argument("id") p.add_argument("--file-id", required=True) p.add_argument("--target", required=True) p.set_defaults(command="component-file-download") p = subparsers.add_parser( "component-file-list", help="List files attached to a component.", parents=[base_parser], ) p.add_argument("id") p.add_argument("--sort", default="-created_at") p.add_argument("--limit", default=50) p.add_argument("--offset", default=0) p.add_argument("--where", help="Optional filter criteria", required=False) p.set_defaults(command="component-file-list") p = subparsers.add_parser( "component-file-delete", help="Delete a component file.", parents=[base_parser] ) p.add_argument("id") p.add_argument("--file-id", required=True) p.add_argument("--etag", required=True) p.set_defaults(command="component-file-delete") # file commands p = subparsers.add_parser( "file-list", help="List all files.", parents=[base_parser] ) p.add_argument("job_id") p.add_argument("--sort", default="-created_at") p.add_argument("--limit", default=50) p.add_argument("--offset", default=0) p.add_argument("--where", help="Optional filter criteria", required=False) p.set_defaults(command="file-list") p = subparsers.add_parser("file-show", help="Show a file.", parents=[base_parser]) p.add_argument("id") p.set_defaults(command="file-show") p = subparsers.add_parser( "file-delete", help="Delete a file.", parents=[base_parser] ) p.add_argument("id") p.set_defaults(command="file-delete") # job commands p = subparsers.add_parser("job-list", help="List all jobs.", parents=[base_parser]) p.add_argument("--sort", default="-created_at") p.add_argument("--limit", default=10) p.add_argument("--offset", default=0) p.add_argument("--where", help="Optional filter criteria", required=False) p.set_defaults(command="job-list") p = subparsers.add_parser("job-show", help="Show a job.", parents=[base_parser]) p.add_argument("id") p.set_defaults(command="job-show") p = subparsers.add_parser("job-delete", help="Delete a job.", parents=[base_parser]) p.add_argument("id") p.add_argument("--etag", required=True) p.set_defaults(command="job-delete") p = subparsers.add_parser( "job-results", help="List all job results.", parents=[base_parser] ) p.add_argument("id") p.add_argument("--sort", default="-created_at") p.add_argument("--limit", default=50) p.add_argument("--offset", default=0) p.set_defaults(command="job-results") p = subparsers.add_parser( "job-output", help="Show the job output.", parents=[base_parser] ) p.add_argument("id") p.set_defaults(command="job-output") p = subparsers.add_parser( "job-list-test", help="List all job tests.", parents=[base_parser] ) p.add_argument("id") p.add_argument("--sort", default="-created_at") p.add_argument("--limit", default=50) p.add_argument("--offset", default=0) p.add_argument("--where", help="Optional filter criteria", required=False) p.set_defaults(command="job-list-test") p = subparsers.add_parser( "job-upload-file", help="Attach a file to a job.", parents=[base_parser] ) p.add_argument("job_id") p.add_argument("--name", required=True) p.add_argument("--path", required=True, dest="file_path") p.add_argument("--jobstate-id") p.add_argument("--test-id") p.add_argument("--mime") p.set_defaults(command="job-upload-file") p = subparsers.add_parser( "job-download-file", help="Retrieve a job file.", parents=[base_parser] ) p.add_argument("id") p.add_argument("--file-id", required=True) p.add_argument("--target", required=True) p.set_defaults(command="job-download-file") p = subparsers.add_parser( "job-show-file", help="Show a job file.", parents=[base_parser] ) p.add_argument("id") p.add_argument("--file-id", required=True) p.set_defaults(command="job-show-file") p = subparsers.add_parser( "job-list-file", help="List files attached to a job.", parents=[base_parser] ) p.add_argument("id") p.add_argument("--sort", default="-created_at") p.add_argument("--limit", default=50) p.add_argument("--offset", default=0) p.add_argument("--where", help="Optional filter criteria", required=False) p.set_defaults(command="job-list-file") p = subparsers.add_parser( "job-delete-file", help="Delete a job file.", parents=[base_parser] ) p.add_argument("id") p.add_argument("--file-id", required=True) p.set_defaults(command="job-delete-file") # test commands p = subparsers.add_parser( "test-list", help="List all tests.", parents=[base_parser] ) p.add_argument("--sort", default="-created_at") p.add_argument("--limit", default=50) p.add_argument("--offset", default=0) p.add_argument("--where", help="Optional filter criteria", required=False) p.set_defaults(command="test-list") p = subparsers.add_parser( "test-create", help="Create a test.", parents=[base_parser] ) p.add_argument("--name") p.add_argument("--data", default="{}") _create_boolean_flags(p, "--active/--no-active", default=True, dest="state") p.set_defaults(command="test-create") p = subparsers.add_parser( "test-update", help="Update a test.", parents=[base_parser] ) p.add_argument("id") p.add_argument("--etag", required=True) p.add_argument("--name") p.add_argument("--data", default="{}") _create_boolean_flags(p, "--active/--no-active", default=None, dest="state") p.set_defaults(command="test-update") p = subparsers.add_parser( "test-delete", help="Delete a test.", parents=[base_parser] ) p.add_argument("id") p.set_defaults(command="test-delete") p = subparsers.add_parser("test-show", help="Show a test.", parents=[base_parser]) p.add_argument("id") p.set_defaults(command="test-show") # remoteci commands p = subparsers.add_parser( "remoteci-list", help="List all remotecis.", parents=[base_parser] ) p.add_argument("--sort", default="-created_at") p.add_argument("--limit", default=50) p.add_argument("--offset", default=0) p.add_argument("--where", help="Optional filter criteria", required=False) p.set_defaults(command="remoteci-list") p = subparsers.add_parser( "remoteci-create", help="Create a remoteci.", parents=[base_parser] ) p.add_argument("--name", required=True) p.add_argument("--team-id", required=False) p.add_argument("--data", default="{}") _create_boolean_flags(p, "--active/--no-active", default=None, dest="state") p.set_defaults(command="remoteci-create") p = subparsers.add_parser( "remoteci-update", help="Update a remoteci.", parents=[base_parser] ) p.add_argument("id") p.add_argument("--etag", required=True) p.add_argument("--name") p.add_argument("--team-id") p.add_argument("--data", default="{}") _create_boolean_flags(p, "--active/--no-active", default=None, dest="state") p.set_defaults(command="remoteci-update") p = subparsers.add_parser( "remoteci-delete", help="Delete a remoteci.", parents=[base_parser] ) p.add_argument("id") p.add_argument("--etag", required=True) p.set_defaults(command="remoteci-delete") p = subparsers.add_parser( "remoteci-show", help="Show a remoteci.", parents=[base_parser] ) p.add_argument("id") p.set_defaults(command="remoteci-show") p = subparsers.add_parser( "remoteci-get-data", help="Retrieve data field from a remoteci.", parents=[base_parser], ) p.add_argument("id") p.add_argument("--keys", default=None) p.set_defaults(command="remoteci-get-data") p = subparsers.add_parser( "remoteci-attach-test", help="Attach a test to a remoteci.", parents=[base_parser], ) p.add_argument("id") p.add_argument("--test-id", required=True) p.set_defaults(command="remoteci-attach-test") p = subparsers.add_parser( "remoteci-unattach-test", help="Unattach a test to a remoteci.", parents=[base_parser], ) p.add_argument("id") p.add_argument("--test-id", required=True) p.set_defaults(command="remoteci-unattach-test") p = subparsers.add_parser( "remoteci-list-test", help="List tests attached to a remoteci.", parents=[base_parser], ) p.add_argument("id") p.add_argument("--sort", default="-created_at") p.add_argument("--limit", default=50) p.add_argument("--offset", default=0) p.add_argument("--where", help="Optional filter criteria", required=False) p.set_defaults(command="remoteci-list-test") p = subparsers.add_parser( "remoteci-reset-api-secret", help="Reset a remoteci api secret.", parents=[base_parser], ) p.add_argument("id") p.add_argument("--etag", required=True) p.set_defaults(command="remoteci-reset-api-secret") p = subparsers.add_parser( "remoteci-refresh-keys", help="Refresh a remoteci key pair.", parents=[base_parser], ) p.add_argument("id") p.add_argument("--etag", required=True) p.set_defaults(command="remoteci-refresh-keys") p = subparsers.add_parser( "remoteci-attach-user", help="Attach a user to a remoteci.", parents=[base_parser], ) p.add_argument("id") p.add_argument("--user-id", required=True) p.set_defaults(command="remoteci-attach-user") p = subparsers.add_parser( "remoteci-unattach-user", help="Unattach a user to a remoteci.", parents=[base_parser], ) p.add_argument("id") p.add_argument("--user-id", required=True) p.set_defaults(command="remoteci-unattach-user") p = subparsers.add_parser( "remoteci-list-user", help="List users attached to a remoteci.", parents=[base_parser], ) p.add_argument("id") p.add_argument("--sort", default="-created_at") p.add_argument("--limit", default=50) p.add_argument("--offset", default=0) p.add_argument("--where", help="Optional filter criteria", required=False) p.set_defaults(command="remoteci-list-user") # purge commands p = subparsers.add_parser( "purge", help="Purge soft-deleted resources.", parents=[base_parser] ) p.add_argument( "--resource", default=None, help="Comma separated list of resource to purge." ) p.add_argument( "--force", default=False, action="store_true", help="Purge resources." ) p.set_defaults(command="purge") args = parser.parse_args(args) if "command" not in args: parser.print_help() sys.exit() return args
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import TYPE_CHECKING import warnings from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import HttpRequest, HttpResponse from azure.mgmt.core.exceptions import ARMErrorFormat from .. import models as _models if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports from typing import Any, Callable, Dict, Generic, Optional, TypeVar T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] class PricingsOperations(object): """PricingsOperations operations. You should not instantiate this class directly. Instead, you should create a Client instance that instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. :type models: ~azure.mgmt.security.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ models = _models def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config def list( self, **kwargs # type: Any ): # type: (...) -> "_models.PricingList" """Lists Security Center pricing configurations in the subscription. :keyword callable cls: A custom type or function that will be passed the direct response :return: PricingList, or the result of cls(response) :rtype: ~azure.mgmt.security.models.PricingList :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.PricingList"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2018-06-01" accept = "application/json" # Construct URL url = self.list.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', pattern=r'^[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}$'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.get(url, query_parameters, header_parameters) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('PricingList', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Security/pricings'} # type: ignore def get( self, pricing_name, # type: str **kwargs # type: Any ): # type: (...) -> "_models.Pricing" """Gets a provided Security Center pricing configuration in the subscription. :param pricing_name: name of the pricing configuration. :type pricing_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: Pricing, or the result of cls(response) :rtype: ~azure.mgmt.security.models.Pricing :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.Pricing"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2018-06-01" accept = "application/json" # Construct URL url = self.get.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', pattern=r'^[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}$'), 'pricingName': self._serialize.url("pricing_name", pricing_name, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.get(url, query_parameters, header_parameters) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('Pricing', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Security/pricings/{pricingName}'} # type: ignore def update( self, pricing_name, # type: str pricing, # type: "_models.Pricing" **kwargs # type: Any ): # type: (...) -> "_models.Pricing" """Updates a provided Security Center pricing configuration in the subscription. :param pricing_name: name of the pricing configuration. :type pricing_name: str :param pricing: Pricing object. :type pricing: ~azure.mgmt.security.models.Pricing :keyword callable cls: A custom type or function that will be passed the direct response :return: Pricing, or the result of cls(response) :rtype: ~azure.mgmt.security.models.Pricing :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.Pricing"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2018-06-01" content_type = kwargs.pop("content_type", "application/json") accept = "application/json" # Construct URL url = self.update.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', pattern=r'^[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}$'), 'pricingName': self._serialize.url("pricing_name", pricing_name, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') body_content_kwargs = {} # type: Dict[str, Any] body_content = self._serialize.body(pricing, 'Pricing') body_content_kwargs['content'] = body_content request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('Pricing', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized update.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Security/pricings/{pricingName}'} # type: ignore
#!/usr/bin/env python import json import time import argparse """ Generates the include/bohrium/bh_opcode.h and core/bh_opcode based on the definitnion in /core/codegen/opcodes.json. """ def gen_headerfile( opcodes ): enums = (" %s = %s,\t\t// %s" % (opcode['opcode'], opcode['id'], opcode['doc']) for opcode in opcodes) stamp = time.strftime("%d/%m/%Y") l = [int(o['id']) for o in opcodes] l = [x for x in l if l.count(x) > 1] if len(l) > 0: raise ValueError("opcodes.json contains id duplicates: %s" % str(l)) l = [o['opcode'] for o in opcodes] l = [x for x in l if l.count(x) > 1] if len(l) > 0: raise ValueError("opcodes.json contains opcode duplicates: %s" % str(l)) max_ops = max([int(o['id']) for o in opcodes]) return """ /* * Do not edit this file. It has been auto generated by * ../core/codegen/gen_opcodes.py at __TIMESTAMP__. */ #pragma once #include <bohrium/bh_type.hpp> #ifdef __cplusplus extern "C" { #endif /* Codes for known oparations */ enum { /* bh_opcode */ __OPCODES__ BH_NO_OPCODES = __NO_OPCODES__, // The amount of opcodes BH_MAX_OPCODE_ID = __MAX_OP__ // The extension method offset }; /* Text string for operation * * @opcode Opcode for operation * @return Text string. */ const char* bh_opcode_text(bh_opcode opcode); /* Determines if the operation is a system operation * * @opcode The operation opcode * @return The boolean answer */ bool bh_opcode_is_system(bh_opcode opcode); /* Determines if the operation is a reduction operation * * @opcode The operation opcode * @return The boolean answer */ bool bh_opcode_is_reduction(bh_opcode opcode); /* Determines if the operation is an accumulate operation * * @opcode The operation opcode * @return The boolean answer */ bool bh_opcode_is_accumulate(bh_opcode opcode); /* Determines if the operation is performed elementwise * * @opcode Opcode for operation * @return TRUE if the operation is performed elementwise, FALSE otherwise */ bool bh_opcode_is_elementwise(bh_opcode opcode); /* Determines whether the opcode is a sweep opcode * i.e. either a reduction or an accumulate * * @opcode * @return The boolean answer */ bool bh_opcode_is_sweep(bh_opcode opcode); #ifdef __cplusplus } #endif """.replace('__TIMESTAMP__', stamp).replace('__OPCODES__', '\n'.join(enums)).replace('__NO_OPCODES__', str(len(opcodes))).replace('__MAX_OP__',str(max_ops)) def gen_cfile(opcodes): text = [' case %s: return "%s";' % (opcode['opcode'], opcode['opcode']) for opcode in opcodes] nops = [' case %s: return %s;' % (opcode['opcode'], opcode['nop']) for opcode in opcodes] sys_op = [' case %s: ' % opcode['opcode'] for opcode in opcodes if opcode['system_opcode']] elem_op = [' case %s: ' % opcode['opcode'] for opcode in opcodes if opcode['elementwise']] reduce_op = [' case %s: ' % opcode['opcode'] for opcode in opcodes if opcode['reduction']] accum_op = [' case %s: ' % opcode['opcode'] for opcode in opcodes if opcode['accumulate']] stamp = time.strftime("%d/%m/%Y") return """ /* * Do not edit this file. It has been auto generated by * ../core/codegen/gen_opcodes.py at __TIMESTAMP__. */ #include <stdlib.h> #include <stdio.h> #include <bohrium/bh_opcode.h> #include <bohrium/bh_instruction.hpp> #include <stdbool.h> /* Text descriptions for a given operation */ const char* _opcode_text[BH_NONE+1]; bool _opcode_text_initialized = false; /* Text string for operation * * @opcode Opcode for operation * @return Text string. */ const char* bh_opcode_text(bh_opcode opcode) { switch(opcode) { __TEXT__ default: return "Unknown opcode"; } } /* Determines if the operation is a system operation * * @opcode The operation opcode * @return The boolean answer */ bool bh_opcode_is_system(bh_opcode opcode) { switch(opcode) { __SYS_OP__ return true; default: return false; } } /* Determines if the operation is an elementwise operation * * @opcode The operation opcode * @return The boolean answer */ bool bh_opcode_is_elementwise(bh_opcode opcode) { switch(opcode) { __ELEM_OP__ return true; default: return false; } } /* Determines if the operation is a reduction operation * * @opcode The operation opcode * @return The boolean answer */ bool bh_opcode_is_reduction(bh_opcode opcode) { switch(opcode) { __REDUCE_OP__ return true; default: return false; } } /* Determines if the operation is an accumulate operation * * @opcode The operation opcode * @return The boolean answer */ bool bh_opcode_is_accumulate(bh_opcode opcode) { switch(opcode) { __ACCUM_OP__ return true; default: return false; } } /* Determines whether the opcode is a sweep opcode * i.e. either a reduction or an accumulate * * @opcode * @return The boolean answer */ bool bh_opcode_is_sweep(bh_opcode opcode) { return (bh_opcode_is_reduction(opcode) || bh_opcode_is_accumulate(opcode)); } """.replace('__TIMESTAMP__', stamp)\ .replace('__TEXT__', '\n'.join(text))\ .replace('__SYS_OP__', '\n'.join(sys_op))\ .replace('__ELEM_OP__', '\n'.join(elem_op))\ .replace('__REDUCE_OP__', '\n'.join(reduce_op))\ .replace('__ACCUM_OP__', '\n'.join(accum_op)) def main(args): # Read the opcode definitions from opcodes.json. opcodes = json.loads(args.opcode_json.read()) # Write the header file hfile = gen_headerfile(opcodes) args.opcode_h.write(hfile) # Write the c file cfile = gen_cfile(opcodes) args.opcode_cpp.write(cfile) if __name__ == "__main__": parser = argparse.ArgumentParser(description='Generates bh_opcode.cpp and bh_opcode.h') parser.add_argument( 'opcode_json', type=argparse.FileType('r'), help="The opcode.json file that defines all Bohrium opcodes." ) parser.add_argument( 'opcode_h', type=argparse.FileType('w'), help="The bh_opcode.h to write." ) parser.add_argument( 'opcode_cpp', type=argparse.FileType('w'), help="The bh_opcode.cpp to write." ) main(parser.parse_args())
#/usr/bin/python # -*- coding: utf-8 -*- """ Objectives: Extract content from log files. """ from details import * import os, re, time, shutil, string os.chdir(os.getcwd()) # Open one by one file from given directory and returns the multilist def ProcessMyAllFiles(InDirName, OutDirName): AllFilesData = [] for fils in os.listdir(InDirName): print(fils) if fils.endswith(".log"): currentPath = ("%s/%s")%(InDirName,fils) movePath = OutDirName #print(movePath) sampleBlock = 0 aa = -1 DataAll = [] DataTemp = [] for Uline in open(currentPath): if Uline != '\n':#given line is not blank Uline2 = re.split(",",Uline) #Uline2 = Uline2.rstrip('\n') GSMrecord = GetGSMrecord(Uline2) ActiveGSM = GetActiveGSM(Uline2) GSMlines = GetGSMlines(Uline2) NetLocation = GetNetLocation(Uline2) BestLocation = GetBestLocation(Uline2) if GSMrecord != None: DataTemp.append(sampleBlock) aa = aa+1 DataAll = [] DataAll.append(GSMrecord) if ActiveGSM != None: DataAll.append(ActiveGSM) if GSMlines != None: DataAll.append(GSMlines) if NetLocation != None: DataAll.append(NetLocation) if BestLocation != None: DataAll.append(BestLocation) DataTemp[aa]=DataAll AllFilesData.append(DataTemp) shutil.move(currentPath, movePath) return AllFilesData # Colllect IMP part one by one def GetGSMrecord(LineData): myRecord = [] if LineData[0] =='Record': myRecord.append(LineData[1]) myRecord.append(LineData[2]) myRecord.append(LineData[3]) myRecord.append(LineData[4]) myRecord.append(LineData[5].rstrip('\n')) return myRecord # Get active GSM def GetActiveGSM(activeLine): ActiveGSMdetails = [] #print activeLine[0] if activeLine[0] == 'ACTIVE GSM': ActiveGSMdetails.append(activeLine[1]) ActiveGSMdetails.append(activeLine[2]) ActiveGSMdetails.append(activeLine[3].rstrip('\n')) return ActiveGSMdetails # Get GSM lines if any def GetGSMlines(gsmLine): myGSMline = [] if gsmLine[0] =='GSM': myGSMline.append(gsmLine[1]) myGSMline.append(gsmLine[2]) myGSMline.append(gsmLine[3]) myGSMline.append(gsmLine[4]) myGSMline.append(gsmLine[5].rstrip('\n')) return myGSMline # Get network location def GetNetLocation(NetLoc): myNetLocation=[] if NetLoc[0]=='Network Location': myNetLocation.append(NetLoc[2])#Lat myNetLocation.append(NetLoc[3].rstrip('\n'))#Lon myNetLocation.append(NetLoc[1])#Provider return myNetLocation # Get best location def GetBestLocation(BestLoc): myBestLocation=[] if BestLoc[0]=='Best Location': myBestLocation.append(BestLoc[2])#Lat myBestLocation.append(BestLoc[3].rstrip('\n'))#Lon myBestLocation.append(BestLoc[1])#Provider return myBestLocation # Generate SQL with best location best signal # structure (1 Record, 1 ACTIVE GSM, 0 to n GSM, 1 Network Location, 1 Best Location) # seperate only Active GSM and Best Location def GetBsBl(GSMSampleBlock): dataInSQL = [] #print (GSMSampleBlock[1],GSMSampleBlock[-1]) SignalProcess = ProcessMe(GSMSampleBlock[1][2]) SignalProcess[1] = removeGtLt(SignalProcess[1]) if len(SignalProcess[1])>7: SignalProcess[1] = SignalProcess[1][2:5] else: SignalProcess[1] = SignalProcess[1][2:4] LatProcess = ProcessMe(GSMSampleBlock[-1][0]) LonProcess = ProcessMe(GSMSampleBlock[-1][1]) dataInSQL.append(LatProcess[1]) dataInSQL.append(LonProcess[1]) dataInSQL.append(SignalProcess[1]) return dataInSQL # Process for equal to sign def ProcessMe(dataToProcess): ProcessedData = re.split("=",dataToProcess) return ProcessedData # Remove greater than less than sign def removeGtLt(dataIn): table = string.maketrans( '', '', ) DataIn = dataIn.translate(table,"<>") return DataIn # Generate SQL from list for given table def GenerateSQL(ListData, tableName): listLen = len(ListData) # e.g. # INSERT INTO field_obs_antena_3 VALUES (0, '2014-01-31 16:34:38.718' , NULL, NULL, 'N','75', GeometryFromText('MULTIPOINT ((78.210785 21.454251))',-4326)); SQLstatement = ("INSERT INTO %s VALUES(0, '%s',GeometryFromText('MULTIPOINT (%s %s)',-4326));\n")%(tableName, ListData[2], ListData[1], ListData[0]) return SQLstatement # Generate all network for best location # structure (1 Record, 1 ACTIVE GSM, 0 to n GSM, 1 Network Location, 1 Best Location) def GetAllNetBestLoc(MyGSMsmBl): DataAllGood = [] dataAllNetBest = [] # to seperate samples with no other GSM signals if len(MyGSMsmBl)<5: mYSignal = ProcessMe(MyGSMsmBl[1][2]) mYSignal[1] = removeGtLt(mYSignal[1]) if len(mYSignal[1])>7: mYSignal[1] = mYSignal[1][2:5] else: mYSignal[1] = mYSignal[1][2:4] LatProcess = ProcessMe(MyGSMsmBl[-1][0]) LonProcess = ProcessMe(MyGSMsmBl[-1][1]) dataAllNetBest.append(LatProcess[1]) dataAllNetBest.append(LonProcess[1]) dataAllNetBest.append(mYSignal[1]) DataAllGood.append(dataAllNetBest) else: mYSignal = ProcessMe(MyGSMsmBl[1][2]) mYSignal[1] = removeGtLt(mYSignal[1]) if len(mYSignal[1])>7: mYSignal[1] = mYSignal[1][2:5] else: mYSignal[1] = mYSignal[1][2:4] LatProcess = ProcessMe(MyGSMsmBl[-1][0]) LonProcess = ProcessMe(MyGSMsmBl[-1][1]) dataAllNetBest.append(LatProcess[1]) dataAllNetBest.append(LonProcess[1]) dataAllNetBest.append(mYSignal[1]) DataAllGood.append(dataAllNetBest) # tricky part to extract the GSM data GSMDataLength = len(MyGSMsmBl)-2 GSMPureData = MyGSMsmBl[2:GSMDataLength] for aa in range(0,len(GSMPureData)): DataAllNewGSM = [] GSMPureSig = ProcessMe(GSMPureData[aa][-1]) GSMPureSig[1] = removeGtLt(GSMPureSig[1]) if len(GSMPureSig[1])>7: GSMPureSig[1] = GSMPureSig[1][2:5] else: GSMPureSig[1] = GSMPureSig[1][2:4] DataAllNewGSM.append(LatProcess[1]) DataAllNewGSM.append(LonProcess[1]) DataAllNewGSM.append(GSMPureSig[1]) DataAllGood.append(DataAllNewGSM) return DataAllGood # Generate all network for best location with all other details # structure (1 Record, 1 ACTIVE GSM, 0 to n GSM, 1 Network Location, 1 Best Location) def GetAllLogDetails(AllDetailData): DataAllGood = [] dataAllNetBest = [] # process datetime DateTime = AllDetailData[0][0] DateTime = ProcessMe(DateTime) DateTime = removeGMT(DateTime[1]) #print DateTime # phone Type PhoneType = AllDetailData[0][1] PhoneType = ProcessMe(PhoneType) PhoneType = PhoneType[1] #print PhoneType # network Type NetworkType = AllDetailData[0][2] NetworkType = ProcessMe(NetworkType) NetworkType = NetworkType[1] #print NetworkType # operator identification OperatorID = AllDetailData[0][4] OperatorID = ProcessMe(OperatorID) OperatorID = OperatorID[1] #print OperatorID # to seperate samples with no other GSM signals if len(AllDetailData)<5: # network Antena id (e.g. ACTIVE GSM, LAC=1377, CID=3533, Signal= >-52dBm) NetworkAntena = AllDetailData[1][1] NetworkAntena = ProcessMe(NetworkAntena) NetworkAntena = NetworkAntena[1] #print NetworkAntena # mYSignal = ProcessMe(AllDetailData[1][2]) mYSignal[1] = removeGtLt(mYSignal[1]) if len(mYSignal[1])>7: mYSignal[1] = mYSignal[1][2:5] else: mYSignal[1] = mYSignal[1][2:4] # Get latitude LatProcess = ProcessMe(AllDetailData[-1][0]) # Get longitude LonProcess = ProcessMe(AllDetailData[-1][1]) # Get gps_source myGPSsource = ProcessMe(AllDetailData[-1][2]) #print (LatProcess,LonProcess, myGPSsource) # dataAllNetBest.append(LatProcess[1]) dataAllNetBest.append(LonProcess[1]) dataAllNetBest.append(mYSignal[1]) dataAllNetBest.append(DateTime) dataAllNetBest.append(PhoneType) dataAllNetBest.append(NetworkType) dataAllNetBest.append(OperatorID) dataAllNetBest.append(NetworkAntena) dataAllNetBest.append(myGPSsource[1]) # DataAllGood.append(dataAllNetBest) else: # network Antena id (e.g. ACTIVE GSM, LAC=1377, CID=3533, Signal= >-52dBm) NetworkAntena = AllDetailData[1][1] NetworkAntena = ProcessMe(NetworkAntena) NetworkAntena = NetworkAntena[1] #print ('my %s')%(NetworkAntena) # mYSignal = ProcessMe(AllDetailData[1][2]) mYSignal[1] = removeGtLt(mYSignal[1]) if len(mYSignal[1])>7: mYSignal[1] = mYSignal[1][2:5] else: mYSignal[1] = mYSignal[1][2:4] # Get latitude LatProcess = ProcessMe(AllDetailData[-1][0]) # Get longitude LonProcess = ProcessMe(AllDetailData[-1][1]) # Get gps_source myGPSsource = ProcessMe(AllDetailData[-1][2]) #print (LatProcess,LonProcess, myGPSsource) # dataAllNetBest.append(LatProcess[1]) dataAllNetBest.append(LonProcess[1]) dataAllNetBest.append(mYSignal[1]) dataAllNetBest.append(DateTime) dataAllNetBest.append(PhoneType) dataAllNetBest.append(NetworkType) dataAllNetBest.append(OperatorID) dataAllNetBest.append(NetworkAntena) dataAllNetBest.append(myGPSsource[1]) DataAllGood.append(dataAllNetBest) # tricky part to extract the GSM data GSMDataLength = len(AllDetailData)-2 GSMPureData = AllDetailData[2:GSMDataLength] for aa in range(0,len(GSMPureData)): DataAllNewGSM = [] # GSM network Antena id (e.g. GSM, LAC=1377, CID=36392, PSC=-1, TYP=1, Signal= -103dBm) GSMNetworkAntena = GSMPureData[aa][1] GSMNetworkAntena = ProcessMe(GSMNetworkAntena) GSMNetworkAntena = GSMNetworkAntena[1] #print ('my pure %s')%(GSMNetworkAntena) # Signal Strength GSMPureSig = ProcessMe(GSMPureData[aa][-1]) GSMPureSig[1] = removeGtLt(GSMPureSig[1]) if len(GSMPureSig[1])>7: GSMPureSig[1] = GSMPureSig[1][2:5] else: GSMPureSig[1] = GSMPureSig[1][2:4] DataAllNewGSM.append(LatProcess[1]) DataAllNewGSM.append(LonProcess[1]) DataAllNewGSM.append(GSMPureSig[1]) DataAllNewGSM.append(DateTime) DataAllNewGSM.append(PhoneType) DataAllNewGSM.append(NetworkType) DataAllNewGSM.append(OperatorID) DataAllNewGSM.append(GSMNetworkAntena) DataAllNewGSM.append(myGPSsource[1]) DataAllGood.append(DataAllNewGSM) return DataAllGood # Remove GMT timestamp def removeGMT(DateTime): MyTable = string.maketrans( '', '', ) DateTime = DateTime.translate(MyTable,'"') MyDateTime = re.split(" GMT",DateTime) MyDateTime = MyDateTime[0] MyDateTime = re.split(":",MyDateTime) MyDateTime = ('%s:%s:%s.%s')%(MyDateTime[0],MyDateTime[1],MyDateTime[2],MyDateTime[3]) return MyDateTime # Generate Detail SQL from list for given table def GenerateDetailSQL(ListData, tableName): #print (ListData) # e.g. # INSERT INTO field_obs_antena_3 VALUES (0, TimeStamp, network_antena, gps_source, phone_type, network_type, operator_id, signal_strength, GeometryFromText('MULTIPOINT ((78.210785 21.454251))',-4326)); SQLstatement = ("INSERT INTO %s VALUES(0, '%s', '%s', '%s', '%s', '%s', '%s', '%s',GeometryFromText('MULTIPOINT (%s %s)',-4326));\n")%(tableName, ListData[3], ListData[7], ListData[8], ListData[4], ListData[5], ListData[6], ListData[2], ListData[1], ListData[0]) return SQLstatement #************************************************
#!/usr/bin/env python3 # # Copyright (c) 2013-2016 Molly White # # Permission is hereby granted, free of charge, to any person obtaining a copy of this software # and associated documentation files (the "Software"), to deal in the Software without # restriction, including without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all copies or # substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING # BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, # DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. from configure import Configurator from executor import Executor import json import logging from logging import handlers from message import * import os import pickle import queue import re import socket import threading from time import sleep, strftime, time class Bot(object): """The core of the IRC bot. It maintains the IRC connection, and delegates other tasks.""" def __init__(self): self.base_path = os.path.dirname(os.path.abspath(__file__)) self.config_path = os.path.abspath(os.path.join(self.base_path, "..", "config.json")) self.log_path = os.path.abspath(os.path.join(self.base_path, 'logs')) self.configuration = None self.configuration_name = None self.last_message_sent = time() self.last_ping_sent = time() self.last_received = None self.logger = None self.shutdown = threading.Event() self.response_lock = threading.Lock() self.socket = None self.message_q = queue.Queue() self.executor = Executor(self, self.message_q, self.shutdown) self.header = {"User-Agent": "GorillaBot (https://github.com/molly/GorillaBot)"} self.initialize() def action(self, target, message): """Perform an action to target on the server.""" self.private_message(target, "\x01ACTION " + message + "\x01") def caffeinate(self): """Make sure the connection stays open.""" now = time() if now - self.last_received > 150: if self.last_ping_sent < self.last_received: self.ping() elif now - self.last_received > 60: self.logger.warning('No ping response in 60 seconds. Shutting down.') self.shutdown.set() def connect(self): """Connect to the IRC server.""" self.logger.debug('Thread created.') self.socket = socket.socket() self.socket.settimeout(5) try: self.logger.info('Initiating connection.') self.socket.connect(("chat.freenode.net", 6667)) except OSError: self.logger.error("Unable to connect to IRC server. Check your Internet connection.") self.shutdown.set() else: if self.configuration["password"]: self.send("PASS {0}".format(self.configuration["password"]), hide=True) self.send("NICK {0}".format(self.configuration["nick"])) self.send("USER {0} 0 * :{1}".format(self.configuration["ident"], self.configuration["realname"])) self.private_message("NickServ", "ACC") self.loop() def dispatch(self, line): """Inspect this line and determine if further processing is necessary.""" length = len(line) message = None if 2 >= length >= 1: if line[0] == "PING": message = Ping(self, *line) if length >= 2: if line[1] == "PONG": message = Ping(self, *line) elif line[1].isdigit(): message = Numeric(self, *line) elif line[1] == "NOTICE": message = Notice(self, *line) elif line[1] == "PRIVMSG": nick = self.configuration["nick"] if (length >= 3 and line[2] == nick) or (length >= 4 and ( line[3].startswith(":!") or line[3].startswith(":" + nick))): message = Command(self, *line) else: message = Privmsg(self, *line) if message: self.message_q.put(message) else: print(line) def get_admin(self, nick=None): """Get the hostnames for the bot admins. If nick is supplied, add that user as an admin.""" botops = self.configuration["botops"] if nick: ops = [nick] else: ops = botops.keys() self.response_lock.acquire() ignored_messages = [] for op in ops: self.send("WHOIS " + op) while True: try: msg = self.message_q.get(True, 120) except queue.Empty: self.logger.error("No response while getting admins. Shutting down.") self.shutdown.set() break else: if type(msg) is Numeric: if msg.number == '311': # User info line = msg.body.split() botops.update({op: {"user": line[1], "host": line[2]}}) self.logger.info( "Adding {0} {1} to bot ops".format(line[1], line[2],)) break elif msg.number == '318': # End of WHOIS break elif msg.number == '401': # No such user self.logger.info("No user {0} logged in.".format(op)) break ignored_messages.append(msg) self.response_lock.release() for msg in ignored_messages: self.message_q.put(msg) self.configuration["botops"] = botops self.update_configuration(self.configuration) def get_configuration(self): """Get the configuration dict for the active configuration.""" with open(self.config_path, 'r') as f: blob = json.load(f) return blob[self.configuration_name] def get_setting(self, setting, chan): """Get the value of the given setting for the given channel.""" if chan not in self.configuration["chans"]: self.logger.warning("Tried to get settings for nonexistant channel {}.".format(chan)) return None if setting not in self.configuration["chans"][chan]["settings"]: return None return self.configuration["chans"][chan]["settings"][setting] def initialize(self): """Initialize the bot. Parse command-line options, configure, and set up logging.""" self.admin_commands, self.commands = self.load_commands() self.setup_logging() print('\n ."`".' '\n / _=_ \\ \x1b[32m __ __ __ . . . __ __ __ ' '___\x1b[0m\n(,(oYo),) \x1b[32m / _` / \ |__) | | | |__| ' '|__) / \ | \x1b[0m\n| " | \x1b[32m \__| \__/ | \ | |__ ' '|__ | | |__) \__/ | \x1b[0m \n \(\_/)/\n') try: self.configuration_name = Configurator().configure() self.configuration = self.get_configuration() except KeyboardInterrupt: self.logger.info("Caught KeyboardInterrupt. Shutting down.") self.start() def is_admin(self, user): """Check if user is a bot admin.""" botops = self.configuration["botops"].keys() mask = self.parse_hostmask(user) for op in botops: op_info = self.configuration["botops"][op] if op_info["host"] == mask["host"]: return True elif op == mask["nick"]: # User is on the list of ops, but wasn't joined when the bot entered self.get_admin(op) return True return False def join(self, chans=None): """Join the given channel, list of channels, or if no channel is specified, join any channels that exist in the config but are not already joined.""" if chans is None: chans = self.configuration["chans"] if chans: for chan in chans.keys(): if not chans[chan]["joined"]: self.logger.info("Joining {0}.".format(chan)) self.send('JOIN ' + chan) self.configuration["chans"][chan]["joined"] = True else: for chan in chans: self.logger.info("Joining {0}.".format(chan)) self.send('JOIN ' + chan) self.configuration["chans"].update({chan: {"joined": True, "settings": {}}}) self.update_configuration(self.configuration) def load_commands(self): """Load commands from the pickle files if they exist.""" try: with open(self.base_path + '/plugins/commands.pkl', 'rb') as admin_file: admin_commands = pickle.load(admin_file) except (OSError, IOError): admin_commands = None try: with open(self.base_path + '/plugins/admincommands.pkl', 'rb') as command_file: commands = pickle.load(command_file) except (OSError, IOError): commands = None return admin_commands, commands def loop(self): """Main connection loop.""" while not self.shutdown.is_set(): try: buffer = '' buffer += str(self.socket.recv(4096)) except socket.timeout: # No messages to deal with, move along pass except IOError: # Something actually went wrong # TODO: Reconnect self.logger.exception("Unexpected socket error") break else: self.last_received = time() if buffer.startswith("b'"): buffer = buffer[2:] if buffer.endswith("'"): buffer = buffer[:-1] list_of_lines = buffer.split('\\r\\n') list_of_lines = filter(None, list_of_lines) for line in list_of_lines: line = line.strip().split() if line != "": self.dispatch(line) self.caffeinate() self.send("QUIT :Shut down from command line.") self.socket.close() def parse_hostmask(self, nick): """Parse out the parts of the hostmask.""" m = re.match(':?(?P<nick>.*?)!~?(?P<user>.*?)@(?P<host>.*)', nick) if m: return {"nick": m.group("nick"), "user": m.group("user"), "host": m.group("host")} else: return None def ping(self): """Send a ping to the server.""" self.logger.debug("Pinging chat.freenode.net.") self.send("PING chat.freenode.net") self.last_ping_sent = time() def pong(self, server): """Respond to a ping from the server.""" self.logger.debug("Ponging {}.".format(server)) self.send('PONG {}'.format(server)) def private_message(self, target, message, hide=False): """Send a private message to a target on the server.""" self.send('PRIVMSG {0} :{1}'.format(target, message), hide) def send(self, message, hide=False): """Send message to the server.""" if (time() - self.last_message_sent) < 1: sleep(1) try: message = re.sub(r'(\n|\r)', "", message) self.socket.sendall(bytes((message[:510] + "\r\n"), "utf-8")) except socket.error: self.shutdown.set() self.logger.error("Message '" + message + "' failed to send. Shutting down.") else: if not hide: self.logger.debug("Sent: " + message) self.last_message_sent = time() def setup_logging(self): """Set up logging to a logfile and the console.""" self.logger = logging.getLogger('GorillaBot') # Set logging level self.logger.setLevel(logging.DEBUG) # Create the file logger file_formatter = logging.Formatter( "%(asctime)s - %(filename)s - %(threadName)s - %(levelname)s : %(message)s") if not os.path.isdir(self.log_path): os.mkdir(self.log_path) logname = (self.log_path + "/{0}.log").format(strftime("%H%M_%m%d%y")) # Files are saved in the logs sub-directory as HHMM_mmddyy.log # This log file rolls over every seven days. filehandler = logging.handlers.TimedRotatingFileHandler(logname, 'd', 7) filehandler.setFormatter(file_formatter) filehandler.setLevel(logging.INFO) self.logger.addHandler(filehandler) self.logger.info("File logger created; saving logs to {}.".format(logname)) # Create the console logger console_formatter = logging.Formatter( "%(asctime)s - %(threadName)s - %(levelname)s: %(message)s", datefmt="%I:%M:%S %p") consolehandler = logging.StreamHandler() consolehandler.setFormatter(console_formatter) self.logger.addHandler(consolehandler) self.logger.info("Console logger created.") def start(self): """Begin the threads. The "IO" thread is the loop that receives commands from the IRC channels, and responds. The "Executor" thread is the thread used for simple commands that do not require threads of their own. More complex commands will create new threads as needed from this thread. """ try: io_thread = threading.Thread(name='IO', target=self.connect) io_thread.start() threading.Thread(name='Executor', target=self.executor.loop).start() while io_thread.isAlive(): io_thread.join(1) except KeyboardInterrupt: self.logger.info("Caught KeyboardInterrupt. Shutting down.") self.shutdown.set() def update_configuration(self, updated_configuration): """Update the full configuration blob with the new settings, then write it to the file. Also updates the stored self.configuration dict.""" with open(self.config_path, 'r') as f: blob = json.load(f) updated_configuration = {self.configuration_name: updated_configuration} if blob: blob.update(updated_configuration) else: blob = updated_configuration with open(self.config_path, "w") as f: json.dump(blob, f, indent=4) self.configuration = blob[self.configuration_name] if __name__ == "__main__": bot = Bot()
# -*- coding: utf-8 -*- from django.db import models from django.contrib.auth.models import User from djforms.processors.models import Contact class DonationContact(Contact): """Donation contact details for an order.""" COLUMNS = { 0: 'last_name', 1: 'first_name', 2: 'order_cc_name', 3: 'created_at', 4: 'email', 5: 'twitter', 6: 'phone', 7: 'address', 8: 'city', 9: 'state', 10: 'postal_code', 11: 'spouse', 12: 'relation', 13: 'honouring', 14: 'class_of', 15: 'order_promo', 16: 'order_transid', 17: 'order_status', 18: 'order_total', 19: 'order_comments', 20: 'anonymous', 21: 'hidden', } honouring = models.CharField( 'In Honor Of', max_length=254, null=True, blank=True, ) endowment = models.CharField( 'Specific endowed scholarship and athletic team designations', max_length=254, null=True, blank=True, ) spouse = models.CharField( 'Spouse full name', max_length=100, null=True, blank=True, ) spouse_class = models.CharField( "Spouse's Class", max_length=4, null=True, blank=True, ) relation = models.CharField( 'Relation to Carthage', max_length=100, null=True, blank=True, ) class_of = models.CharField(max_length=4, null=True, blank=True) matching_company = models.BooleanField( verbose_name="I/we are employed by a matching gift company.", ) opt_in = models.BooleanField( verbose_name=''' I would like more information about planned gifts such as charitable trusts, charitable gifts annuities, life insurance, or will inclusions. ''', ) anonymous = models.BooleanField( verbose_name=''' I would like my gift to remain anonymous, and not be published on any donor list or in the annual report. ''', ) hidden = models.BooleanField(default=False) twitter = models.CharField( 'Twitter Handle', max_length=128, null=True, blank=True, ) def order_cc_name(self): """Return the name on the credit card.""" try: name = self.order.all().first().cc_name except Exception: name = None return name def order_promo(self): """Return the promotion with which this transaction was associated.""" try: promo = self.order.all().first().promotion except Exception: promo = None return promo def order_status(self): """Return the status of the order.""" try: stat = self.order.all().first().status.lower() except Exception: stat = None return stat def order_oid(self): """Return the ID of the order.""" try: oid = self.order.all().first().id except Exception: oid = 0 return oid def order_transid(self): """Return the transaction ID from the credit card processor.""" try: tid = self.order.all().first().transid except Exception: tid = None return tid def order_total(self): """Return the order total.""" try: tid = self.order.all().first().total except Exception: tid = None return tid def order_cycle(self): """Return the recurring payment cycle.""" try: cycle = self.order.all().first().cycle except Exception: cycle = None return cycle def order_payments(self): """Return the payments type.""" try: payments = self.order.all().first().payments except Exception: payments = None return payments def order_start_date(self): """Return the start date for recurring payments.""" try: sdate = self.order.all().first().start_date except Exception: sdate = None return sdate def order_comments(self): """Return the comments on a transaction.""" try: com = self.order.all().first().comments except Exception: com = None return com def order_statement(self): """Return the statment.""" try: sta = self.order.all().first().statement except Exception: sta = None return sta def order_binary(self): """Return the binary value.""" try: bny = self.order.all().first().binary except Exception: bny = None return bny class PaverContact(Contact): """Paver contact details for an order.""" class_of = models.CharField( max_length=4, null=True, blank=True, ) inscription_1 = models.CharField(max_length=24) inscription_2 = models.CharField(max_length=24) inscription_3 = models.CharField(max_length=24) inscription_4 = models.CharField(max_length=24, null=True, blank=True) inscription_5 = models.CharField(max_length=24, null=True, blank=True) inscription_6 = models.CharField(max_length=24, null=True, blank=True) inscription_7 = models.CharField(max_length=24, null=True, blank=True) def order_cc_name(self): """Return the name on the credit card.""" try: name = self.order.all().first().cc_name except Exception: name = None return name def order_promo(self): """Return the promotion with which this transaction was associated.""" try: promo = self.order.all().first().promotion except Exception: promo = None return promo def order_status(self): """Return the status of the order.""" try: stat = self.order.all().first().status except Exception: stat = None return stat def order_transid(self): """Return the transaction ID from the credit card processor.""" try: tid = self.order.all().first().transid except Exception: tid = None return tid def order_total(self): """Return the order total.""" try: tid = self.order.all().first().total except Exception: tid = None return tid def order_cycle(self): """Return the recurring payment cycle.""" try: cycle = self.order.all().first().cycle except Exception: cycle = None return cycle def order_payments(self): """Return the payments type.""" try: payments = self.order.all().first().payments except Exception: payments = None return payments def order_start_date(self): """Return the start date for recurring payments.""" try: sdate = self.order.all().first().start_date except Exception: sdate = None return sdate def order_comments(self): """Return the comments on a transaction.""" try: com = self.order.all().first().comments except Exception: com = None return com
"""Test the init file for the Insteon component.""" import asyncio from pyinsteon.address import Address from homeassistant.components import insteon from homeassistant.components.insteon.const import ( CONF_CAT, CONF_OVERRIDE, CONF_SUBCAT, CONF_X10, DOMAIN, PORT_HUB_V1, PORT_HUB_V2, ) from homeassistant.const import ( CONF_ADDRESS, CONF_DEVICE, CONF_HOST, CONF_PASSWORD, CONF_PORT, CONF_USERNAME, EVENT_HOMEASSISTANT_STOP, ) from homeassistant.helpers.typing import HomeAssistantType from homeassistant.setup import async_setup_component from .const import ( MOCK_ADDRESS, MOCK_CAT, MOCK_IMPORT_CONFIG_PLM, MOCK_IMPORT_FULL_CONFIG_HUB_V1, MOCK_IMPORT_FULL_CONFIG_HUB_V2, MOCK_IMPORT_FULL_CONFIG_PLM, MOCK_IMPORT_MINIMUM_HUB_V1, MOCK_IMPORT_MINIMUM_HUB_V2, MOCK_SUBCAT, MOCK_USER_INPUT_PLM, PATCH_CONNECTION, ) from .mock_devices import MockDevices from tests.async_mock import patch from tests.common import MockConfigEntry async def mock_successful_connection(*args, **kwargs): """Return a successful connection.""" return True async def mock_failed_connection(*args, **kwargs): """Return a failed connection.""" raise ConnectionError("Connection failed") async def test_setup_entry(hass: HomeAssistantType): """Test setting up the entry.""" config_entry = MockConfigEntry(domain=DOMAIN, data=MOCK_USER_INPUT_PLM) config_entry.add_to_hass(hass) with patch.object( insteon, "async_connect", new=mock_successful_connection ), patch.object(insteon, "async_close") as mock_close, patch.object( insteon, "devices", new=MockDevices() ): assert await async_setup_component( hass, insteon.DOMAIN, {}, ) await hass.async_block_till_done() hass.bus.async_fire(EVENT_HOMEASSISTANT_STOP) await hass.async_block_till_done() # pylint: disable=no-member assert insteon.devices.async_save.call_count == 1 assert mock_close.called async def test_import_plm(hass: HomeAssistantType): """Test setting up the entry from YAML to a PLM.""" config = {} config[DOMAIN] = MOCK_IMPORT_CONFIG_PLM with patch.object( insteon, "async_connect", new=mock_successful_connection ), patch.object(insteon, "close_insteon_connection"), patch.object( insteon, "devices", new=MockDevices() ), patch( PATCH_CONNECTION, new=mock_successful_connection ): assert await async_setup_component( hass, insteon.DOMAIN, config, ) await hass.async_block_till_done() await asyncio.sleep(0.01) assert hass.config_entries.async_entries(DOMAIN) data = hass.config_entries.async_entries(DOMAIN)[0].data assert data[CONF_DEVICE] == MOCK_IMPORT_CONFIG_PLM[CONF_PORT] assert CONF_PORT not in data async def test_import_hub1(hass: HomeAssistantType): """Test setting up the entry from YAML to a hub v1.""" config = {} config[DOMAIN] = MOCK_IMPORT_MINIMUM_HUB_V1 with patch.object( insteon, "async_connect", new=mock_successful_connection ), patch.object(insteon, "close_insteon_connection"), patch.object( insteon, "devices", new=MockDevices() ), patch( PATCH_CONNECTION, new=mock_successful_connection ): assert await async_setup_component( hass, insteon.DOMAIN, config, ) await hass.async_block_till_done() await asyncio.sleep(0.01) assert hass.config_entries.async_entries(DOMAIN) data = hass.config_entries.async_entries(DOMAIN)[0].data assert data[CONF_HOST] == MOCK_IMPORT_FULL_CONFIG_HUB_V1[CONF_HOST] assert data[CONF_PORT] == PORT_HUB_V1 assert CONF_USERNAME not in data assert CONF_PASSWORD not in data async def test_import_hub2(hass: HomeAssistantType): """Test setting up the entry from YAML to a hub v2.""" config = {} config[DOMAIN] = MOCK_IMPORT_MINIMUM_HUB_V2 with patch.object( insteon, "async_connect", new=mock_successful_connection ), patch.object(insteon, "close_insteon_connection"), patch.object( insteon, "devices", new=MockDevices() ), patch( PATCH_CONNECTION, new=mock_successful_connection ): assert await async_setup_component( hass, insteon.DOMAIN, config, ) await hass.async_block_till_done() await asyncio.sleep(0.01) assert hass.config_entries.async_entries(DOMAIN) data = hass.config_entries.async_entries(DOMAIN)[0].data assert data[CONF_HOST] == MOCK_IMPORT_FULL_CONFIG_HUB_V2[CONF_HOST] assert data[CONF_PORT] == PORT_HUB_V2 assert data[CONF_USERNAME] == MOCK_IMPORT_MINIMUM_HUB_V2[CONF_USERNAME] assert data[CONF_PASSWORD] == MOCK_IMPORT_MINIMUM_HUB_V2[CONF_PASSWORD] async def test_import_options(hass: HomeAssistantType): """Test setting up the entry from YAML including options.""" config = {} config[DOMAIN] = MOCK_IMPORT_FULL_CONFIG_PLM with patch.object( insteon, "async_connect", new=mock_successful_connection ), patch.object(insteon, "close_insteon_connection"), patch.object( insteon, "devices", new=MockDevices() ), patch( PATCH_CONNECTION, new=mock_successful_connection ): assert await async_setup_component( hass, insteon.DOMAIN, config, ) await hass.async_block_till_done() await asyncio.sleep(0.01) # Need to yield to async processes # pylint: disable=no-member assert insteon.devices.add_x10_device.call_count == 2 assert insteon.devices.set_id.call_count == 1 options = hass.config_entries.async_entries(DOMAIN)[0].options assert len(options[CONF_OVERRIDE]) == 1 assert options[CONF_OVERRIDE][0][CONF_ADDRESS] == str(Address(MOCK_ADDRESS)) assert options[CONF_OVERRIDE][0][CONF_CAT] == MOCK_CAT assert options[CONF_OVERRIDE][0][CONF_SUBCAT] == MOCK_SUBCAT assert len(options[CONF_X10]) == 2 assert options[CONF_X10][0] == MOCK_IMPORT_FULL_CONFIG_PLM[CONF_X10][0] assert options[CONF_X10][1] == MOCK_IMPORT_FULL_CONFIG_PLM[CONF_X10][1] async def test_import_failed_connection(hass: HomeAssistantType): """Test a failed connection in import does not create a config entry.""" config = {} config[DOMAIN] = MOCK_IMPORT_CONFIG_PLM with patch.object( insteon, "async_connect", new=mock_failed_connection ), patch.object(insteon, "async_close"), patch.object( insteon, "devices", new=MockDevices(connected=False) ): assert await async_setup_component( hass, insteon.DOMAIN, config, ) await hass.async_block_till_done() assert not hass.config_entries.async_entries(DOMAIN) async def test_setup_entry_failed_connection(hass: HomeAssistantType, caplog): """Test setting up the entry with a failed connection.""" config_entry = MockConfigEntry(domain=DOMAIN, data=MOCK_USER_INPUT_PLM) config_entry.add_to_hass(hass) with patch.object( insteon, "async_connect", new=mock_failed_connection ), patch.object(insteon, "devices", new=MockDevices(connected=False)): assert await async_setup_component( hass, insteon.DOMAIN, {}, ) assert "Could not connect to Insteon modem" in caplog.text
# -*- coding: utf-8 -*- from south.utils import datetime_utils as datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'BlogCategoryTranslation' db.create_table(u'djangocms_blog_blogcategory_translation', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('language_code', self.gf('django.db.models.fields.CharField')(max_length=15, db_index=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=200)), ('slug', self.gf('django.db.models.fields.SlugField')(max_length=50, blank=True, db_index=True)), ('master', self.gf('django.db.models.fields.related.ForeignKey')(related_name='translations', null=True, to=orm['djangocms_blog.BlogCategory'])), )) db.send_create_signal(u'djangocms_blog', ['BlogCategoryTranslation']) # Adding unique constraint on 'BlogCategoryTranslation', fields ['language_code', 'slug'] db.create_unique(u'djangocms_blog_blogcategory_translation', ['language_code', 'slug']) # Adding unique constraint on 'BlogCategoryTranslation', fields ['language_code', 'master'] db.create_unique(u'djangocms_blog_blogcategory_translation', ['language_code', 'master_id']) # Adding model 'BlogCategory' db.create_table(u'djangocms_blog_blogcategory', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('parent', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['djangocms_blog.BlogCategory'], null=True, blank=True)), ('date_created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('date_modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), )) db.send_create_signal(u'djangocms_blog', ['BlogCategory']) # Adding model 'PostTranslation' db.create_table(u'djangocms_blog_post_translation', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('language_code', self.gf('django.db.models.fields.CharField')(max_length=15, db_index=True)), ('title', self.gf('django.db.models.fields.CharField')(max_length=255)), ('slug', self.gf('django.db.models.fields.SlugField')(db_index=True, max_length=50, blank=True)), ('abstract', self.gf('djangocms_text_ckeditor.fields.HTMLField')()), ('master', self.gf('django.db.models.fields.related.ForeignKey')(related_name='translations', null=True, to=orm['djangocms_blog.Post'])), )) db.send_create_signal(u'djangocms_blog', ['PostTranslation']) # Adding unique constraint on 'PostTranslation', fields ['language_code', 'slug'] db.create_unique(u'djangocms_blog_post_translation', ['language_code', 'slug']) # Adding unique constraint on 'PostTranslation', fields ['language_code', 'master'] db.create_unique(u'djangocms_blog_post_translation', ['language_code', 'master_id']) # Adding model 'Post' db.create_table(u'djangocms_blog_post', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('author', self.gf('django.db.models.fields.related.ForeignKey')(to=orm[user_orm_label])), ('date_created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('date_modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), ('date_published', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)), ('date_published_end', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)), ('publish', self.gf('django.db.models.fields.BooleanField')(default=False)), ('main_image', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['filer.Image'], null=True, blank=True)), ('main_image_thumbnail', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='blog_post_thumbnail', null=True, to=orm['cmsplugin_filer_image.ThumbnailOption'])), ('main_image_full', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='blog_post_full', null=True, to=orm['cmsplugin_filer_image.ThumbnailOption'])), ('content', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cms.Placeholder'], null=True)), )) db.send_create_signal(u'djangocms_blog', ['Post']) # Adding M2M table for field categories on 'Post' m2m_table_name = db.shorten_name(u'djangocms_blog_post_categories') db.create_table(m2m_table_name, ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('post', models.ForeignKey(orm[u'djangocms_blog.post'], null=False)), ('blogcategory', models.ForeignKey(orm[u'djangocms_blog.blogcategory'], null=False)) )) db.create_unique(m2m_table_name, ['post_id', 'blogcategory_id']) # Adding model 'LatestPostsPlugin' db.create_table(u'cmsplugin_latestpostsplugin', ( (u'cmsplugin_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['cms.CMSPlugin'], unique=True, primary_key=True)), ('latest_posts', self.gf('django.db.models.fields.IntegerField')(default=5)), )) db.send_create_signal(u'djangocms_blog', ['LatestPostsPlugin']) # Adding M2M table for field tags on 'LatestPostsPlugin' m2m_table_name = db.shorten_name(u'djangocms_blog_latestpostsplugin_tags') db.create_table(m2m_table_name, ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('latestpostsplugin', models.ForeignKey(orm[u'djangocms_blog.latestpostsplugin'], null=False)), ('tag', models.ForeignKey(orm[u'taggit.tag'], null=False)) )) db.create_unique(m2m_table_name, ['latestpostsplugin_id', 'tag_id']) # Adding M2M table for field categories on 'LatestPostsPlugin' m2m_table_name = db.shorten_name(u'djangocms_blog_latestpostsplugin_categories') db.create_table(m2m_table_name, ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('latestpostsplugin', models.ForeignKey(orm[u'djangocms_blog.latestpostsplugin'], null=False)), ('blogcategory', models.ForeignKey(orm[u'djangocms_blog.blogcategory'], null=False)) )) db.create_unique(m2m_table_name, ['latestpostsplugin_id', 'blogcategory_id']) # Adding model 'AuthorEntriesPlugin' db.create_table(u'cmsplugin_authorentriesplugin', ( (u'cmsplugin_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['cms.CMSPlugin'], unique=True, primary_key=True)), ('latest_posts', self.gf('django.db.models.fields.IntegerField')(default=5)), )) db.send_create_signal(u'djangocms_blog', ['AuthorEntriesPlugin']) # Adding M2M table for field authors on 'AuthorEntriesPlugin' m2m_table_name = db.shorten_name(u'djangocms_blog_authorentriesplugin_authors') db.create_table(m2m_table_name, ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('authorentriesplugin', models.ForeignKey(orm[u'djangocms_blog.authorentriesplugin'], null=False)), ('user', models.ForeignKey(orm[u'auth.user'], null=False)) )) db.create_unique(m2m_table_name, ['authorentriesplugin_id', 'user_id']) def backwards(self, orm): # Removing unique constraint on 'PostTranslation', fields ['language_code', 'master'] db.delete_unique(u'djangocms_blog_post_translation', ['language_code', 'master_id']) # Removing unique constraint on 'PostTranslation', fields ['language_code', 'slug'] db.delete_unique(u'djangocms_blog_post_translation', ['language_code', 'slug']) # Removing unique constraint on 'BlogCategoryTranslation', fields ['language_code', 'master'] db.delete_unique(u'djangocms_blog_blogcategory_translation', ['language_code', 'master_id']) # Removing unique constraint on 'BlogCategoryTranslation', fields ['language_code', 'slug'] db.delete_unique(u'djangocms_blog_blogcategory_translation', ['language_code', 'slug']) # Deleting model 'BlogCategoryTranslation' db.delete_table(u'djangocms_blog_blogcategory_translation') # Deleting model 'BlogCategory' db.delete_table(u'djangocms_blog_blogcategory') # Deleting model 'PostTranslation' db.delete_table(u'djangocms_blog_post_translation') # Deleting model 'Post' db.delete_table(u'djangocms_blog_post') # Removing M2M table for field categories on 'Post' db.delete_table(db.shorten_name(u'djangocms_blog_post_categories')) # Deleting model 'LatestPostsPlugin' db.delete_table(u'cmsplugin_latestpostsplugin') # Removing M2M table for field tags on 'LatestPostsPlugin' db.delete_table(db.shorten_name(u'djangocms_blog_latestpostsplugin_tags')) # Removing M2M table for field categories on 'LatestPostsPlugin' db.delete_table(db.shorten_name(u'djangocms_blog_latestpostsplugin_categories')) # Deleting model 'AuthorEntriesPlugin' db.delete_table(u'cmsplugin_authorentriesplugin') # Removing M2M table for field authors on 'AuthorEntriesPlugin' db.delete_table(db.shorten_name(u'djangocms_blog_authorentriesplugin_authors')) models = { u'auth.group': { 'Meta': {'object_name': 'Group'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, u'auth.permission': { 'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, u'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'cms.cmsplugin': { 'Meta': {'object_name': 'CMSPlugin'}, 'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}), 'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}), 'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}), 'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}), 'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}), 'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}), 'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}), 'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}), 'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}) }, 'cms.placeholder': { 'Meta': {'object_name': 'Placeholder'}, 'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}) }, u'cmsplugin_filer_image.thumbnailoption': { 'Meta': {'ordering': "('width', 'height')", 'object_name': 'ThumbnailOption'}, 'crop': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'height': ('django.db.models.fields.IntegerField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'upscale': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'width': ('django.db.models.fields.IntegerField', [], {}) }, u'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, u'djangocms_blog.authorentriesplugin': { 'Meta': {'object_name': 'AuthorEntriesPlugin', 'db_table': "u'cmsplugin_authorentriesplugin'", '_ormbases': ['cms.CMSPlugin']}, 'authors': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm[user_orm_label]", 'symmetrical': 'False'}), u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}), 'latest_posts': ('django.db.models.fields.IntegerField', [], {'default': '5'}) }, u'djangocms_blog.blogcategory': { 'Meta': {'object_name': 'BlogCategory'}, 'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['djangocms_blog.BlogCategory']", 'null': 'True', 'blank': 'True'}) }, u'djangocms_blog.blogcategorytranslation': { 'Meta': {'unique_together': "[('language_code', 'slug'), ('language_code', 'master')]", 'object_name': 'BlogCategoryTranslation', 'db_table': "u'djangocms_blog_blogcategory_translation'"}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'language_code': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}), 'master': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'translations'", 'null': 'True', 'to': u"orm['djangocms_blog.BlogCategory']"}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'blank': 'True'}) }, u'djangocms_blog.latestpostsplugin': { 'Meta': {'object_name': 'LatestPostsPlugin', 'db_table': "u'cmsplugin_latestpostsplugin'", '_ormbases': ['cms.CMSPlugin']}, 'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['djangocms_blog.BlogCategory']", 'symmetrical': 'False', 'blank': 'True'}), u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}), 'latest_posts': ('django.db.models.fields.IntegerField', [], {'default': '5'}), 'tags': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['taggit.Tag']", 'symmetrical': 'False', 'blank': 'True'}) }, u'djangocms_blog.post': { 'Meta': {'ordering': "('-date_published', '-date_created')", 'object_name': 'Post'}, 'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm[user_orm_label]"}), 'categories': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'blog_posts'", 'symmetrical': 'False', 'to': u"orm['djangocms_blog.BlogCategory']"}), 'content': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}), 'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'date_published': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'date_published_end': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'main_image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['filer.Image']", 'null': 'True', 'blank': 'True'}), 'main_image_full': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'blog_post_full'", 'null': 'True', 'to': u"orm['cmsplugin_filer_image.ThumbnailOption']"}), 'main_image_thumbnail': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'blog_post_thumbnail'", 'null': 'True', 'to': u"orm['cmsplugin_filer_image.ThumbnailOption']"}), 'publish': ('django.db.models.fields.BooleanField', [], {'default': 'False'}) }, u'djangocms_blog.posttranslation': { 'Meta': {'unique_together': "[('language_code', 'slug'), ('language_code', 'master')]", 'object_name': 'PostTranslation', 'db_table': "u'djangocms_blog_post_translation'"}, 'abstract': ('djangocms_text_ckeditor.fields.HTMLField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'language_code': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}), 'master': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'translations'", 'null': 'True', 'to': u"orm['djangocms_blog.Post']"}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'blank': 'True'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}) }, 'filer.file': { 'Meta': {'object_name': 'File'}, '_file_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'file': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'all_files'", 'null': 'True', 'to': "orm['filer.Folder']"}), 'has_all_mandatory_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}), 'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'owned_files'", 'null': 'True', 'to': u"orm[user_orm_label]"}), 'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_filer.file_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}), 'sha1': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '40', 'blank': 'True'}), 'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}) }, 'filer.folder': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('parent', 'name'),)", 'object_name': 'Folder'}, 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), u'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}), u'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}), 'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'filer_owned_folders'", 'null': 'True', 'to': u"orm[user_orm_label]"}), 'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['filer.Folder']"}), u'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}), u'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}), 'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}) }, 'filer.image': { 'Meta': {'object_name': 'Image', '_ormbases': ['filer.File']}, '_height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), '_width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'author': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'default_alt_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'default_caption': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), u'file_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['filer.File']", 'unique': 'True', 'primary_key': 'True'}), 'must_always_publish_author_credit': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'must_always_publish_copyright': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'related_url': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}), 'subject_location': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '64', 'null': 'True', 'blank': 'True'}) }, u'taggit.tag': { 'Meta': {'object_name': 'Tag'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}) } } complete_apps = ['djangocms_blog']
"""Defines the API class. Copyright 2013 by Rackspace Hosting, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import re from falcon import api_helpers as helpers from falcon.request import Request from falcon.response import Response import falcon.responders from falcon.status_codes import HTTP_416 from falcon import util from falcon.http_error import HTTPError from falcon import DEFAULT_MEDIA_TYPE class API(object): """Provides routing and such for building a web service application This class is the main entry point into a Falcon-based app. It provides a callable WSGI interface and a simple routing engine based on URI templates. """ __slots__ = ('_after', '_before', '_error_handlers', '_media_type', '_routes', '_default_route', '_sinks') def __init__(self, media_type=DEFAULT_MEDIA_TYPE, before=None, after=None): """Initialize a new Falcon API instances Args: media_type: Default media type to use as the value for the Content-Type header on responses. (default 'application/json') before: A global action hook (or list of hooks) to call before each on_* responder, for all resources. Similar to the 'falcon.before' decorator, but applies to the entire API. When more than one action function is given, they will be executed in natural order (starting with the first in the list). after: A global action hook (or list of hooks) to call after each on_* responder, for all resources. Similar to the 'after' decorator, but applies to the entire API. """ self._routes = [] self._sinks = [] self._default_route = None self._media_type = media_type self._before = helpers.prepare_global_hooks(before) self._after = helpers.prepare_global_hooks(after) self._error_handlers = [] def __call__(self, env, start_response): """WSGI "app" method Makes instances of API callable by any WSGI server. See also PEP 333. Args: env: A WSGI environment dictionary start_response: A WSGI helper method for setting status and headers on a response. """ req = Request(env) resp = Response() responder, params = self._get_responder( req.path, req.method) try: # NOTE(kgriffs): Using an inner try..except in order to # address the case when err_handler raises HTTPError. # # NOTE(kgriffs): Coverage is giving false negatives, # so disabled on relevant lines. All paths are tested # afaict. try: responder(req, resp, **params) # pragma: no cover except Exception as ex: for err_type, err_handler in self._error_handlers: if isinstance(ex, err_type): err_handler(ex, req, resp, params) break # pragma: no cover else: # PERF(kgriffs): This will propagate HTTPError to # the handler below. It makes handling HTTPError # less efficient, but that is OK since error cases # don't need to be as fast as the happy path, and # indeed, should perhaps be slower to create # backpressure on clients that are issuing bad # requests. raise except HTTPError as ex: resp.status = ex.status if ex.headers is not None: resp.set_headers(ex.headers) if req.client_accepts('application/json'): resp.body = ex.json() # # Set status and headers # use_body = not helpers.should_ignore_body(resp.status, req.method) if use_body: helpers.set_content_length(resp) body = helpers.get_body(resp) else: # Default: return an empty body body = [] # Set content type if needed use_content_type = (body or req.method == 'HEAD' or resp.status == HTTP_416) if use_content_type: media_type = self._media_type else: media_type = None headers = resp._wsgi_headers(media_type) # Return the response per the WSGI spec start_response(resp.status, headers) return body def add_route(self, uri_template, resource): """Associate a URI path with a resource A resource is an instance of a class that defines various on_* "responder" methods, one for each HTTP method the resource allows. For example, to support GET, simply define an `on_get` responder. If a client requests an unsupported method, Falcon will respond with "405 Method not allowed". Responders must always define at least two arguments to receive request and response objects, respectively. For example: def on_post(self, req, resp): pass In addition, if the route's uri template contains field expressions, any responder that desires to receive requests for that route must accept arguments named after the respective field names defined in the template. For example, given the following uri template: /das/{thing} A PUT request to "/das/code" would be routed to: def on_put(self, req, resp, thing): pass If, on the other hand, the responder had been defined thus: def on_put(self, req, resp): pass Args: uri_template: Relative URI template. Currently only Level 1 templates are supported. See also RFC 6570. Care must be taken to ensure the template does not mask any sink patterns (see also add_sink). resource: Object which represents an HTTP/REST "resource". Falcon will pass "GET" requests to on_get, "PUT" requests to on_put, etc. If any HTTP methods are not supported by your resource, simply don't define the corresponding request handlers, and Falcon will do the right thing. """ uri_fields, path_template = helpers.compile_uri_template(uri_template) method_map = helpers.create_http_method_map( resource, uri_fields, self._before, self._after) # Insert at the head of the list in case we get duplicate # adds (will cause the last one to win). self._routes.insert(0, (path_template, method_map)) def add_sink(self, sink, prefix=r'/'): """Add a "sink" responder to the API. If no route matches a request, but the path in the requested URI matches the specified prefix, Falcon will pass control to the given sink, regardless of the HTTP method requested. Args: sink: A callable of the form: func(req, resp) prefix: A regex string, typically starting with '/', which will trigger the sink if it matches the path portion of the request's URI. Both strings and precompiled regex objects may be specified. Characters are matched starting at the beginning of the URI path. Named groups are converted to kwargs and passed to the sink as such. If the route collides with a route's URI template, the route will mask the sink (see also add_route). """ if not hasattr(prefix, 'match'): # Assume it is a string prefix = re.compile(prefix) # NOTE(kgriffs): Insert at the head of the list such that # in the case of a duplicate prefix, the last one added # is preferred. self._sinks.insert(0, (prefix, sink)) # TODO(kgriffs): Remove this functionality in Falcon version 0.2.0 @util.deprecated('Please migrate to add_sink(...) ASAP.') def set_default_route(self, default_resource): """DEPRECATED: Route all the unrouted requests to a default resource NOTE: If a default route is defined, all sinks are ignored. Args: default_resource: Object which works like an HTTP/REST resource. Falcon will pass "GET" requests to on_get, "PUT" requests to on_put, etc. If you want to exclude some HTTP method from the default routing, just simply don't define the corresponding request handlers. """ self._default_route = helpers.create_http_method_map( default_resource, set(), self._before, self._after) def add_error_handler(self, exception, handler=None): """Adds a handler for a given exception type Args: exception: Whenever an exception occurs when handling a request that is an instance of this exception class, the given handler callable will be used to handle the exception. handler: Callable that gets called with (ex, req, resp, params) when there is a matching exception when handling a request. If not specified, the handler will default to exception.handle, in which case the method is expected to be static (i.e., decorated with @staticmethod) and take the same params described above. Note: A handler can either raise an instance of HTTPError or modify resp manually in order to communicate information about the issue to the client. """ if handler is None: try: handler = exception.handle except AttributeError: raise AttributeError('handler must either be specified ' 'explicitly or defined as a static' 'method named "handle" that is a ' 'member of the given exception class.') # Insert at the head of the list in case we get duplicate # adds (will cause the last one to win). self._error_handlers.insert(0, (exception, handler)) #---------------------------------------------------------------------------- # Helpers #---------------------------------------------------------------------------- def _get_responder(self, path, method): """Searches routes for a matching responder Args: path: URI path to search (without query string) method: HTTP method (uppercase) requested Returns: A 3-member tuple, containing a responder callable, a dict containing parsed path fields (if any were specified in the matching route's URI template), and a reference to the "method not allowed" responder for the resource. """ for route in self._routes: path_template, method_map = route m = path_template.match(path) if m: params = m.groupdict() try: responder = method_map[method] except KeyError: responder = falcon.responders.bad_request break else: params = {} if self._default_route is None: for pattern, sink in self._sinks: m = pattern.match(path) if m: params = m.groupdict() responder = sink break else: responder = falcon.responders.path_not_found else: method_map = self._default_route try: responder = method_map[method] except KeyError: responder = falcon.responders.bad_request return (responder, params)
# coding=utf-8 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import (absolute_import, division, generators, nested_scopes, print_function, unicode_literals, with_statement) from collections import defaultdict from pants.option.arg_splitter import GLOBAL_SCOPE from pants.option.global_options import GlobalOptionsRegistrar from pants.option.option_util import is_list_option from pants.option.parser import Parser from pants.option.parser_hierarchy import enclosing_scope from pants.option.ranked_value import RankedValue class _FakeOptionValues(object): def __init__(self, option_values): self._option_values = option_values def __getitem__(self, key): return getattr(self, key) def get(self, key, default=None): if hasattr(self, key): return getattr(self, key, default) return default def __getattr__(self, key): try: value = self._option_values[key] except KeyError: # Instead of letting KeyError raise here, re-raise an AttributeError to not break getattr(). raise AttributeError(key) return value.value if isinstance(value, RankedValue) else value def get_rank(self, key): value = self._option_values[key] return value.rank if isinstance(value, RankedValue) else RankedValue.FLAG def is_flagged(self, key): return self.get_rank(key) == RankedValue.FLAG def is_default(self, key): return self.get_rank(key) in (RankedValue.NONE, RankedValue.HARDCODED) def _options_registration_function(defaults, fingerprintables): def register(*args, **kwargs): option_name = Parser.parse_dest(*args, **kwargs) default = kwargs.get('default') if default is None: if kwargs.get('type') == bool: default = False if kwargs.get('type') == list: default = [] defaults[option_name] = RankedValue(RankedValue.HARDCODED, default) fingerprint = kwargs.get('fingerprint', False) if fingerprint: if is_list_option(kwargs): val_type = kwargs.get('member_type', str) else: val_type = kwargs.get('type', str) fingerprintables[option_name] = val_type return register def create_options(options, passthru_args=None, fingerprintable_options=None): """Create a fake Options object for testing. Note that the returned object only provides access to the provided options values. There is no registration mechanism on this object. Code under test shouldn't care about resolving cmd-line flags vs. config vs. env vars etc. etc. :param dict options: A dict of scope -> (dict of option name -> value). :param list passthru_args: A list of passthrough command line argument values. :param dict fingerprintable_options: A dict of scope -> (dict of option name -> option type). This registry should contain entries for any of the `options` that are expected to contribute to fingerprinting. :returns: An fake `Options` object encapsulating the given scoped options. """ fingerprintable = fingerprintable_options or defaultdict(dict) class FakeOptions(object): def for_scope(self, scope): scoped_options = options[scope] # TODO(John Sirois): Some users pass in A dict of scope -> _FakeOptionValues instead of a # dict of scope -> (dict of option name -> value). Clean up these usages and kill this # accomodation. if isinstance(scoped_options, _FakeOptionValues): return scoped_options else: return _FakeOptionValues(scoped_options) def for_global_scope(self): return self.for_scope('') def passthru_args_for_scope(self, scope): return passthru_args or [] def items(self): return options.items() @property def scope_to_flags(self): return {} def get_fingerprintable_for_scope(self, scope, include_passthru=False): pairs = [] if include_passthru and passthru_args: pairs.extend((str, passthru_arg) for passthru_arg in passthru_args) option_values = self.for_scope(scope) pairs.extend((option_type, option_values[option_name]) for option_name, option_type in fingerprintable[scope].items()) return pairs def __getitem__(self, key): return self.for_scope(key) return FakeOptions() def create_options_for_optionables(optionables, extra_scopes=None, options=None, passthru_args=None): """Create a fake Options object for testing with appropriate defaults for the given optionables. Any scoped `options` provided will override defaults, behaving as-if set on the command line. :param iterable optionables: A series of `Optionable` types to register default options for. :param iterable extra_scopes: An optional series of extra known scopes in play. :param dict options: A dict of scope -> (dict of option name -> value) representing option values explicitly set via the command line. :param list passthru_args: A list of passthrough args (specified after `--` on the command line). :returns: A fake `Options` object with defaults populated for the given `optionables` and any explicitly set `options` overlayed. """ all_options = defaultdict(dict) fingerprintable_options = defaultdict(dict) bootstrap_option_values = None def complete_scopes(scopes): """Return all enclosing scopes. This is similar to what `complete_scopes` does in `pants.option.options.Options` without creating `ScopeInfo`s. """ completed_scopes = set(scopes) for scope in scopes: while scope != '': if scope not in completed_scopes: completed_scopes.add(scope) scope = enclosing_scope(scope) return completed_scopes def register_func(on_scope): scoped_options = all_options[on_scope] scoped_fingerprintables = fingerprintable_options[on_scope] register = _options_registration_function(scoped_options, scoped_fingerprintables) register.bootstrap = bootstrap_option_values register.scope = on_scope return register # TODO: This sequence is a bit repetitive of the real registration sequence. # Register bootstrap options and grab their default values for use in subsequent registration. GlobalOptionsRegistrar.register_bootstrap_options(register_func(GLOBAL_SCOPE)) bootstrap_option_values = _FakeOptionValues(all_options[GLOBAL_SCOPE].copy()) # Now register the full global scope options. GlobalOptionsRegistrar.register_options(register_func(GLOBAL_SCOPE)) for optionable in optionables: optionable.register_options(register_func(optionable.options_scope)) # Make inner scopes inherit option values from their enclosing scopes. all_scopes = set(all_options.keys()) # TODO(John Sirois): Kill extra scopes one this goes in: # https://github.com/pantsbuild/pants/issues/1957 # For now we need a way for users of this utility to provide extra derived scopes out of band. # With #1957 resolved, the extra scopes will be embedded in the Optionable's option_scope # directly. if extra_scopes: all_scopes.update(extra_scopes) all_scopes = complete_scopes(all_scopes) # We need to update options before completing them based on inner/outer relation. if options: for scope, opts in options.items(): all_options[scope].update(opts) # Iterating in sorted order guarantees that we see outer scopes before inner scopes, # and therefore only have to inherit from our immediately enclosing scope. for s in sorted(all_scopes): if s != GLOBAL_SCOPE: scope = enclosing_scope(s) opts = all_options[s] for key, val in all_options.get(scope, {}).items(): if key not in opts: # Inner scope values override the inherited ones. opts[key] = val return create_options(all_options, passthru_args=passthru_args, fingerprintable_options=fingerprintable_options)
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Options specific to resources managed by Keystone (Domain, User, etc).""" import six from keystone.common import validation from keystone.i18n import _ def _validator(value): return def boolean_validator(value): if value not in (True, False): raise TypeError(_('Expected boolean value, got %r') % type(value)) def ref_mapper_to_dict_options(ref): """Convert the values in _resource_option_mapper to options dict. NOTE: this is to be called from the relevant `to_dict` methods or similar and must be called from within the active session context. :param ref: the DB model ref to extract options from :returns: Dict of options as expected to be returned out of to_dict in the `options` key. """ options = {} for opt in ref._resource_option_mapper.values(): if opt.option_id in ref.resource_options_registry.option_ids: r_opt = ref.resource_options_registry.get_option_by_id( opt.option_id) if r_opt is not None: options[r_opt.option_name] = opt.option_value return options def resource_options_ref_to_mapper(ref, option_class): """Convert the _resource_options property-dict to options attr map. The model must have the resource option mapper located in the ``_resource_option_mapper`` attribute. The model must have the resource option registry located in the ``resource_options_registry`` attribute. The option dict with key(opt_id), value(opt_value) will be pulled from ``ref._resource_options``. NOTE: This function MUST be called within the active writer session context! :param ref: The DB model reference that is actually stored to the backend. :param option_class: Class that is used to store the resource option in the DB. """ options = getattr(ref, '_resource_options', None) if options is not None: # To ensure everything is clean, no lingering refs. delattr(ref, '_resource_options') else: # _resource_options didn't exist. Work from an empty set. options = {} # NOTE(notmorgan): explicitly use .keys() here as the attribute mapper # has some oddities at times. This guarantees we are working with keys. set_options = set(ref._resource_option_mapper.keys()) # Get any options that are not registered and slate them for removal from # the DB. This will delete unregistered options. clear_options = set_options.difference( ref.resource_options_registry.option_ids) options.update({x: None for x in clear_options}) # Set the resource options for user in the Attribute Mapping. for r_opt_id, r_opt_value in options.items(): if r_opt_value is None: # Delete any option set explicitly to None, ignore unset # options. ref._resource_option_mapper.pop(r_opt_id, None) else: # Set any options on the user_ref itself. opt_obj = option_class( option_id=r_opt_id, option_value=r_opt_value) ref._resource_option_mapper[r_opt_id] = opt_obj class ResourceOptionRegistry(object): def __init__(self, registry_name): self._registered_options = {} self._registry_type = registry_name @property def option_names(self): return set([opt.option_name for opt in self.options]) @property def options_by_name(self): return {opt.option_name: opt for opt in self._registered_options.values()} @property def options(self): return self._registered_options.values() @property def option_ids(self): return set(self._registered_options.keys()) def get_option_by_id(self, opt_id): return self._registered_options.get(opt_id, None) def get_option_by_name(self, name): for option in self._registered_options.values(): if name == option.option_name: return option return None @property def json_schema(self): schema = {'type': 'object', 'properties': {}, 'additionalProperties': False} for opt in self.options: if opt.json_schema is not None: # NOTE(notmorgan): All options are nullable. Null indicates # the option should be reset and removed from the DB store. schema['properties'][opt.option_name] = validation.nullable( opt.json_schema) else: # NOTE(notmorgan): without 'type' being specified, this # can be of any-type. We are simply specifying no interesting # values beyond that the property may exist here. schema['properties'][opt.option_name] = {} return schema def register_option(self, option): if option in self.options: # Re-registering the exact same option does nothing. return if option.option_id in self._registered_options: raise ValueError(_('Option %(option_id)s already defined in ' '%(registry)s.') % {'option_id': option.option_id, 'registry': self._registry_type}) if option.option_name in self.option_names: raise ValueError(_('Option %(option_name)s already defined in ' '%(registry)s') % {'option_name': option.option_name, 'registry': self._registry_type}) self._registered_options[option.option_id] = option class ResourceOption(object): def __init__(self, option_id, option_name, validator=_validator, json_schema_validation=None): """The base object to define the option(s) to be stored in the DB. :param option_id: The ID of the option. This will be used to lookup the option value from the DB and should not be changed once defined as the values will no longer be correctly mapped to the keys in the user_ref when retrieving the data from the DB. :type option_id: str :param option_name: The name of the option. This value will be used to map the value from the user request on a resource update to the correct option id to be stored in the database. This value should not be changed once defined as it will change the resulting keys in the user_ref. :type option_name: str :param validator: A callable that raises TypeError if the value to be persisted is incorrect. A single argument of the value to be persisted will be passed to it. No return value is expected. :type validator: callable :param json_schema_validation: Dictionary defining the JSON schema validation for the option itself. This is used to generate the JSON Schema validator(s) used at the API layer :type json_schema_validation: dict """ if not isinstance(option_id, six.string_types) and len(option_id) == 4: raise TypeError(_('`option_id` must be a string, got %r') % option_id) elif len(option_id) != 4: raise ValueError(_('`option_id` must be 4 characters in ' 'length. Got %r') % option_id) if not isinstance(option_name, six.string_types): raise TypeError(_('`option_name` must be a string. ' 'Got %r') % option_name) self._option_id = option_id self._option_name = option_name self.validator = validator self._json_schema_validation = json_schema_validation @property def json_schema(self): return self._json_schema_validation or None @property def option_name(self): # NOTE(notmorgan) Option IDs should never be set outside of definition # time. return self._option_name @property def option_id(self): # NOTE(notmorgan) Option IDs should never be set outside of definition # time. return self._option_id
from common import * import cStringIO as StringIO import datetime import struct import simplejson from xml.dom import minidom # get the real OrderedDict if we're on 2.7 try: from collections import OrderedDict except ImportError, e: from ordered_dict import OrderedDict TYPES = { "int8": (int, long), "int16": (int, long), "int32": (int, long), "int64": (int, long), "float32": (float,), "float64": (float,), "boolean": (bool,), "string": (str, unicode), "bytes": (str,), "date": (datetime.datetime,), } MAXES = { "int8": (1 << 7) - 1, "int16": (1 << 15) - 1, "int32": (1 << 31) - 1, "int64": (1 << 63) - 1, "float32": struct.unpack(">f", "\x7f\x7f\xff\xff")[0], "float64": struct.unpack(">d", "\x7f\xef\xff\xff\xff\xff\xff\xff")[0], } MINS = { "int8": -(1 << 7), "int16": -(1 << 15), "int32": -(1 << 31), "int64": -(1 << 63), "float32": struct.unpack(">f", "\x00\x00\x00\x01")[0], "float64": struct.unpack(">d", "\x00\x00\x00\x00\x00\x00\x00\x01")[0], } def _is_valid_float(f, typedef): return abs(f) <= MAXES[typedef] and (f == 0.0 or abs(f) > MINS[typedef]) def _is_valid_int(i, typedef): return i > MINS[typedef] and i <= MAXES[typedef] RANGE_FNS = { "int8": _is_valid_int, "int16": _is_valid_int, "int32": _is_valid_int, "int64": _is_valid_int, "float32": _is_valid_float, "float64": _is_valid_float, } FORMATS = { "int8": ">b", "int16": ">h", "int32": ">i", "int64": ">q", "float32": ">f", "float64": ">d", } MAX_SEQ_LENGTH = 0x3FFFFFFF EPOCH = datetime.datetime(1970, 1, 1, 0, 0, 0, 0) def _to_java_date(date): """ Converts a python datetime into an int64 representing milliseconds since 00:00:00 1/1/1970 GMT. This is the expected input to the Java Date class. Microseconds in the Python datetime representation are truncated. >>> d = datetime.datetime(2010, 11, 24, 11, 50, 34, 237861) >>> _to_java_date(d) 1290599434237 >>> _from_java_date(_to_java_date(d)) datetime.datetime(2010, 11, 24, 11, 50, 34, 237000) """ td = date - EPOCH ms = td.days * 24 * 3600 * 1000 + td.seconds * 1000 + td.microseconds / 1000 return ms def _from_java_date(javaDate): """ Converts an int64 representing a Java Date as milliseconds since 00:00:00 1/1/1970 GMT into a Python datetime. >>> _from_java_date(1000000000000) datetime.datetime(2001, 9, 9, 1, 46, 40) >>> _to_java_date(_from_java_date(1000000000000)) 1000000000000 """ td = datetime.timedelta(microseconds = javaDate * 1000) return EPOCH + td class JsonTypeSerializer(object): """ Python implementation of the Voldemort JsonTypeSerializer class, which converts structured types consisting of lists, dicts, and primitive types (strings, ints, floats, dates) into the proprietary binary representation used by Voldemort. """ def __init__(self, typedef, has_version=False): r""" Constructor. The typedef is either a json string containing the schema definition or a map from integer version number to schema json. A simple json schema results in a non-versioned serializer (note that the output is only the 4-byte binary value WITHOUT a version number prefix: >>> s = JsonTypeSerializer('"int32"') >>> s.writes(42) '\x00\x00\x00*' A dict typedef will always yield a versioned serializer (note that now there is a version number byte appended to the output: >>> s = JsonTypeSerializer({1: '"int32"'}) >>> s.writes(42) '\x01\x00\x00\x00*' Setting has_version=True will also return a versioned serializer, with an implied version 0: >>> s = JsonTypeSerializer('"int32"', has_version=True) >>> s.writes(42) '\x00\x00\x00\x00*' """ self._has_version = has_version or isinstance(typedef, dict) if not isinstance(typedef, dict): typeobj = simplejson.loads(typedef, object_pairs_hook=OrderedDict) if self._has_version: self._typedef = dict([(0, typeobj)]) else: self._typedef = typeobj else: self._typedef = dict((k, simplejson.loads(v, object_pairs_hook=OrderedDict)) for k, v in typedef.iteritems()) @staticmethod def create_from_xml(node): r""" Static factory method that creates a serializer from then XML description in a stores.xml file. >>> from xml.dom import minidom >>> xml = minidom.parseString( ... '<serializer><type>json</type><schema-info version="0">"int32"</schema-info></serializer>') >>> s = JsonTypeSerializer.create_from_xml(xml) >>> s.writes(42) '\x00\x00\x00\x00*' Output always uses the latest version: >>> xml = minidom.parseString( ... '<serializer><type>json</type>' + ... '<schema-info version="0">"int32"</schema-info>' + ... '<schema-info version="1">"int64"</schema-info></serializer>') >>> s = JsonTypeSerializer.create_from_xml(xml) >>> s.writes(42) '\x01\x00\x00\x00\x00\x00\x00\x00*' Duplicate version numbers are an error: >>> xml = minidom.parseString( ... '<serializer><type>json</type>' + ... '<schema-info version="0">"int32"</schema-info>' + ... '<schema-info version="0">"int64"</schema-info></serializer>') >>> s = JsonTypeSerializer.create_from_xml(xml) Traceback (most recent call last): ... SerializationException: Schema info has duplicates of version: 0 The version "none" means no versioning, and no version will be output in the byte stream: >>> xml = minidom.parseString( ... '<serializer><type>json</type><schema-info version="none">"int32"</schema-info></serializer>') >>> s = JsonTypeSerializer.create_from_xml(xml) >>> s.writes(42) '\x00\x00\x00*' You can't mix a "none" version with a regular version number: >>> xml = minidom.parseString( ... '<serializer><type>json</type>' + ... '<schema-info version="none">"int32"</schema-info>' + ... '<schema-info version="0">"int64"</schema-info></serializer>') >>> s = JsonTypeSerializer.create_from_xml(xml) Traceback (most recent call last): ... SerializationException: Schema info has duplicates of version: 0 JSON with single quotes is NOT valid JSON, even though voldemort doesn't necessarily check for this: >>> xml = minidom.parseString("<serializer><type>json</type>" + ... "<schema-info version=\"0\">{ 'foo':'int32' }</schema-info></serializer>") >>> s = JsonTypeSerializer.create_from_xml(xml) Traceback (most recent call last): ... SerializationException: Error decoding schema JSON """ typedef = dict() has_version = True for schema_info in node.getElementsByTagName('schema-info'): version = schema_info.getAttribute('version') if not version: version = 0 elif version == 'none': version = 0 has_version = False else: version = int(version) if version in typedef: raise SerializationException('Schema info has duplicates of version: %d' % version) typedef[version] = ''.join(elem.data for elem in schema_info.childNodes if elem.nodeType == minidom.Node.TEXT_NODE) if not typedef: raise SerializationException('No schemas specified') if not has_version and len(typedef) > 1: raise SerializationException('Schema info has version="none" and multiple versions') try: if not has_version: return JsonTypeSerializer(typedef[0]) else: return JsonTypeSerializer(typedef) except simplejson.JSONDecodeError: raise SerializationException('Error decoding schema JSON') def read(self, input): r""" Reads a serialized object from the file-like object input: >>> f = StringIO.StringIO('\x00\x00\x00*') >>> s = JsonTypeSerializer('"int32"') >>> s.read(f) 42 >>> versioned = '\00\x01>\x80\x00\x00\x00\x03\x00\x01\x00\x02\x00\x03\x00\x03foo' >>> non_versioned = '\x01>\x80\x00\x00\x00\x03\x00\x01\x00\x02\x00\x03\x00\x03foo' More complex types are also supported: >>> s = JsonTypeSerializer('{ "a":"float32", "b":["int16"], "c":"string" }') >>> f = StringIO.StringIO(non_versioned) >>> s.read(f) == {'a': 0.25, 'b': [1, 2, 3], 'c':u'foo'} True Non-versioned serializers can't read versioned binary representations: >>> f = StringIO.StringIO(versioned) >>> s.read(f) Traceback (most recent call last): ... SerializationException: Unexpected end of input. And vice-version, versioned serializers will only read versioned binary representations: >>> s = JsonTypeSerializer('{ "a":"float32", "b":["int16"], "c":"string" }', has_version=True) >>> f = StringIO.StringIO(versioned) >>> s.read(f) == {'a': 0.25, 'b': [1, 2, 3], 'c': u'foo'} True >>> f = StringIO.StringIO(non_versioned) >>> s.read(f) Traceback (most recent call last): ... KeyError: 1 The error messages from reading improperly versioned representations aren't super helpful... """ if self._has_version: version = self._read_int8(input) typedef = self._typedef[version] else: typedef = self._typedef return self._read(input, typedef) def reads(self, s): r""" Reads a serialized object from given string: >>> s = JsonTypeSerializer('"int32"') >>> s.reads('\x00\x00\x00*') 42 Same rules as read()/write() with regard to versioned representations apply to reads()/writes(): >>> versioned = '\00\x01>\x80\x00\x00\x00\x03\x00\x01\x00\x02\x00\x03\x00\x03foo' >>> non_versioned = '\x01>\x80\x00\x00\x00\x03\x00\x01\x00\x02\x00\x03\x00\x03foo' >>> s = JsonTypeSerializer('{ "a":"float32", "b":["int16"], "c":"string" }') >>> s.reads(non_versioned) == {'a': 0.25, 'c': u'foo', 'b': [1, 2, 3]} True >>> s.reads(versioned) Traceback (most recent call last): ... SerializationException: Unexpected end of input. >>> s = JsonTypeSerializer('{ "a":"float32", "b":["int16"], "c":"string" }', has_version=True) >>> s.reads(versioned) == {'a': 0.25, 'b': [1, 2, 3], 'c': u'foo'} True >>> s.reads(non_versioned) Traceback (most recent call last): ... KeyError: 1 """ return self.read(StringIO.StringIO(s)) def write(self, output, obj): r""" Writes the serialized binary representation of an object to the file-like object output: >>> s = JsonTypeSerializer('{ "a":"float32", "b":["int16"], "c":"string" }') >>> f = StringIO.StringIO() >>> s.write(f, {'a': 0.25, 'b':[1,2,3], 'c':'foo'}) >>> f.getvalue() '\x01>\x80\x00\x00\x00\x03\x00\x01\x00\x02\x00\x03\x00\x03foo' The representation of a versioned serializer will have the version number byte prefix: >>> s = JsonTypeSerializer('{ "a":"float32", "b":["int16"], "c":"string" }', has_version=True) >>> f = StringIO.StringIO() >>> s.write(f, {'a': 0.25, 'b':[1,2,3], 'c':'foo'}) >>> f.getvalue() '\x00\x01>\x80\x00\x00\x00\x03\x00\x01\x00\x02\x00\x03\x00\x03foo' """ if self._has_version: latest = max(self._typedef.keys()) typedef = self._typedef[latest] self._write_int8(output, latest) else: typedef = self._typedef self._write(output, obj, typedef) def writes(self, obj): r""" Returns a string representing the serialized binary representation of obj: >>> s = JsonTypeSerializer('{ "a":"float32", "b":["int16"], "c":"string" }') >>> s.writes({'a': 0.25, 'b':[1,2,3], 'c':'foo'}) '\x01>\x80\x00\x00\x00\x03\x00\x01\x00\x02\x00\x03\x00\x03foo' The representation of a versioned serializer will have the version number byte prefix: >>> s = JsonTypeSerializer('{ "a":"float32", "b":["int16"], "c":"string" }', has_version=True) >>> s.writes({'a': 0.25, 'b':[1,2,3], 'c':'foo'}) '\x00\x01>\x80\x00\x00\x00\x03\x00\x01\x00\x02\x00\x03\x00\x03foo' reads() and writes() are more or less inverses of each other: >>> s = JsonTypeSerializer('"int32"') >>> s.reads(s.writes(42)) 42 Strings may be converted to unicodes: >>> s = JsonTypeSerializer('"string"') >>> s.reads(s.writes('foo')) u'foo' Dates may lose some precision: >>> s = JsonTypeSerializer('"date"') >>> s.reads(s.writes(datetime.datetime(2010, 11, 24, 11, 50, 34, 237861))) datetime.datetime(2010, 11, 24, 11, 50, 34, 237000) Nested types also work: >>> s = JsonTypeSerializer('{ "a":"float32", "b":["int16"], "c":"string" }') >>> s.reads(s.writes({'a': 0.25, 'b':[1,2,3], 'c':'foo'})) == {'a': 0.25, 'b':[1,2,3], 'c':u'foo'} True """ sfile = StringIO.StringIO() self.write(sfile, obj) return sfile.getvalue() def _read(self, input, typedef): r""" Internal routine for reading a complex type: >>> s = JsonTypeSerializer('"int32"') >>> f = StringIO.StringIO('\x01>\x80\x00\x00\x00\x03\x00\x01\x00\x02\x00\x03\x00\x03foo') >>> obj = s._read(f, OrderedDict((('a','float32'), ('b',['int16']), ('c','string')))) >>> obj == {'a': 0.25, 'c': u'foo', 'b': [1, 2, 3]} True """ if isinstance(typedef, dict): return self._read_dict(input, typedef) elif isinstance(typedef, list): return self._read_list(input, typedef) elif isinstance(typedef, str) or isinstance(typedef, unicode): if typedef not in TYPES.keys(): raise SerializationException("Unknown type string: %s" % typedef) return getattr(self, "_read_%s" % typedef)(input) else: raise SerializationException("Unexpected type: %s" % type(typedef)) def _read_boolean(self, input): r""" Internal routine for reading booleans: >>> s = JsonTypeSerializer('"int32"') >>> s._read_boolean(StringIO.StringIO('\x00')) False >>> s._read_boolean(StringIO.StringIO('\x01')) True Negative int8s indicate "None": >>> s._read_boolean(StringIO.StringIO('\xff')) is None True Positive int8s are treated as True, even though _write_boolean() will always write \x01: >>> s._read_boolean(StringIO.StringIO('\x05')) True """ b = self._read_int8(input) if b < 0: return None elif b == 0: return False else: return True def _read_numeric(self, input, typedef): r""" Internal routine for reading numeric types: >>> s = JsonTypeSerializer('"string"') >>> s._read_numeric(StringIO.StringIO('*'), 'int8') 42 >>> s._read_numeric(StringIO.StringIO('\x00*'), 'int16') 42 >>> s._read_numeric(StringIO.StringIO('\x00\x00\x00*'), 'int32') 42 >>> s._read_numeric(StringIO.StringIO('\x00\x00\x00\x00\x00\x00\x00*'), 'int64') 42 >>> s._read_numeric(StringIO.StringIO('>\x80\x00\x00'), 'float32') 0.25 >>> s._read_numeric(StringIO.StringIO('?\xd0\x00\x00\x00\x00\x00\x00'), 'float64') 0.25 Inputs corresponding to the smallest of each respective type are read as None: >>> s._read_numeric(StringIO.StringIO('\x80'), 'int8') is None True >>> s._read_numeric(StringIO.StringIO('\x80\x00'), 'int16') is None True >>> s._read_numeric(StringIO.StringIO('\x80\x00\x00\x00'), 'int32') is None True >>> s._read_numeric(StringIO.StringIO('\x80\x00\x00\x00\x00\x00\x00\x00'), 'int64') is None True >>> s._read_numeric(StringIO.StringIO('\x00\x00\x00\x01'), 'float32') is None True >>> s._read_numeric(StringIO.StringIO('\x00\x00\x00\x00\x00\x00\x00\x01'), 'float64') is None True An insufficiently large input will cause an error: >>> s._read_numeric(StringIO.StringIO('\x00*'), 'int32') Traceback (most recent call last): ... SerializationException: Unexpected end of input. An excessively large one will leave dangling input, which may cause problems down the line as well as returning the wrong value: >>> s._read_numeric(StringIO.StringIO('\x00\x00\x00*'), 'int16') 0 >>> s._read_numeric(StringIO.StringIO('?\xd0\x00\x00\x00\x00\x00\x00'), 'float32') == 0.25 False """ size = struct.calcsize(FORMATS[typedef]) bytes = input.read(size) if len(bytes) < size: raise SerializationException("Unexpected end of input.") val = struct.unpack(FORMATS[typedef], bytes)[0] if val == MINS[typedef]: return None return val # This is totally broken for length values 0x3fff0000-0x3fffffff, the true max length is 0x3ffeffff, # but as written, this is a pure port of the java deserialization stuff def _read_length(self, input): r""" Internal routines that read Voldemort's screwy variable length encoding: >>> s = JsonTypeSerializer('"string"') An int16 -1 value is treated as -1: >>> s._read_length(StringIO.StringIO('\xff\xff')) -1 A positive int16 is treated as a short length: >>> s._read_length(StringIO.StringIO('\x00\x05')) 5 A negative int16 in the first two bytes is a signal that it's really an int32 length: >>> s._read_length(StringIO.StringIO('\xff\x00\x00\x00')) 1056964608 But any 32-bit length between 0x3fff0000-0x3fffffff will be misinterpreted as -1 (this is a bug, but it's identical behavior to the java version): >>> s._read_length(StringIO.StringIO('\xff\xff\x00\x05')) -1 """ firstWord = self._read_int16(input) if firstWord == -1: return -1 if firstWord < -1: secondWord = self._read_int16(input) return ((firstWord & 0x3FFF) << 16) + (secondWord & 0xFFFF) return firstWord def _read_bytes(self, input): r""" Internal routine for reading raw bytes strings. The length is encoded at the start of the string: >>> s = JsonTypeSerializer('"string"') >>> s._read_bytes(StringIO.StringIO('\x00\x03foo')) 'foo' A length of 0 is the empty string: >>> s._read_bytes(StringIO.StringIO('\x00\x00')) '' A negative length is None: >>> s._read_bytes(StringIO.StringIO('\xff\xff')) We get an error if the data is too short: >>> s._read_bytes(StringIO.StringIO('\x00\x0afoo')) Traceback (most recent call last): ... SerializationException: Unexpected end of input. """ size = self._read_length(input) if size < 0: return None if size == 0: return '' bytes = input.read(size) if len(bytes) != size: raise SerializationException("Unexpected end of input.") return bytes def _read_string(self, input): r""" Internal routine for reading UTF-8 encoded strings. >>> s = JsonTypeSerializer('"string"') >>> s._read_string(StringIO.StringIO('\x00\x03foo')) u'foo' A length of 0 is the empty string: >>> s._read_string(StringIO.StringIO('\x00\x00')) u'' A negative length is None: >>> s._read_string(StringIO.StringIO('\xff\xff')) We get an error if the data is too short: >>> s._read_string(StringIO.StringIO('\x00\x0afoo')) Traceback (most recent call last): ... SerializationException: Unexpected end of input. """ bytes = self._read_bytes(input) if bytes is None: return None if not bytes: return u'' return str.decode(bytes, "utf_8") def _read_date(self, input): r""" Internal routine that reads a date: >>> s = JsonTypeSerializer('"string"') >>> s._read_date(StringIO.StringIO('\x00\x00\x01,~\x90\x84\x82')) datetime.datetime(2010, 11, 24, 15, 46, 29, 122000) The byte string corresponding to the smallest int64 deserializes to None: >>> s._read_date(StringIO.StringIO('\x80\x00\x00\x00\x00\x00\x00\x00')) """ javaDate = self._read_int64(input) if javaDate is None: return None return _from_java_date(javaDate) def _read_list(self, input, typedef): r""" Internal routine for reading lists: >>> s = JsonTypeSerializer('"string"') >>> s._read_list(StringIO.StringIO('\x00\x03\x00\x01\x00\x02\x00\x03'), ['int16']) [1, 2, 3] List typedefs must be singleton lists: >>> s._read_list(StringIO.StringIO('\x00\x03\x00\x01\x00\x02\x00\x03'), []) Traceback (most recent call last): ... SerializationException: Expected single element typedef, but got: 0 >>> s._read_list(StringIO.StringIO('\x00\x03\x00\x01\x00\x02\x00\x03'), ['int16', 'int32']) Traceback (most recent call last): ... SerializationException: Expected single element typedef, but got: 2 >>> s._read_list(StringIO.StringIO('\x00\x03\x00\x01\x00\x02\x00\x03'), 'int16') Traceback (most recent call last): ... SerializationException: Wrong type: expected list but got: <type 'str'> A length of -1 return None: >>> s._read_list(StringIO.StringIO('\xff\xff'), ['int16']) is None True A length of 0 is an empty list: >>> s._read_list(StringIO.StringIO('\x00\x00'), ['int16']) [] """ if not isinstance(typedef, list): raise SerializationException("Wrong type: expected list but got: %s" % type(typedef)) if len(typedef) != 1: raise SerializationException("Expected single element typedef, but got: %d" % len(typedef)) size = self._read_length(input) if size < 0: return None entryType = typedef[0] return [self._read(input, entryType) for i in xrange(0, size)] def _read_dict(self, input, typedef): r""" Internal routine for reading dicts: >>> s = JsonTypeSerializer('"string"') >>> obj = s._read_dict(StringIO.StringIO('\x01\x00\x01\x00\x02'), OrderedDict((('a','int16'), ('b','int16')))) >>> obj == {'a': 1, 'b': 2} True Typedef has to be a map: >>> s._read_dict(StringIO.StringIO('\x01\x00\x01\x00\x02'), ['int16']) Traceback (most recent call last): ... SerializationException: Wrong typedef type: expected dict but got: <type 'list'> If the serialized blob starts with (int8) -1, then the map is read as None: >>> s._read_dict(StringIO.StringIO('\xff'), OrderedDict((('a','int16'), ('b','int16')))) is None True """ if self._read_int8(input) == -1: return None if not isinstance(typedef, dict): raise SerializationException("Wrong typedef type: expected dict but got: %s" % type(typedef)) m = {} for key, entrytype in typedef.iteritems(): m[key] = self._read(input, entrytype) return m def _write(self, output, obj, typedef): r""" Internal routine that serializes objects according to the passed in typedef. >>> s = JsonTypeSerializer('"string"') >>> f = StringIO.StringIO() >>> s._write(f, True, 'boolean') >>> f.getvalue() '\x01' >>> f = StringIO.StringIO() >>> s._write(f, 42, 'int8') >>> f.getvalue() '*' >>> f = StringIO.StringIO() >>> s._write(f, 42, 'int16') >>> f.getvalue() '\x00*' >>> f = StringIO.StringIO() >>> s._write(f, 42, 'int32') >>> f.getvalue() '\x00\x00\x00*' >>> f = StringIO.StringIO() >>> s._write(f, 42, 'int64') >>> f.getvalue() '\x00\x00\x00\x00\x00\x00\x00*' >>> f = StringIO.StringIO() >>> s._write(f, 0.25, 'float32') >>> f.getvalue() '>\x80\x00\x00' >>> f = StringIO.StringIO() >>> s._write(f, 0.25, 'float64') >>> f.getvalue() '?\xd0\x00\x00\x00\x00\x00\x00' >>> f = StringIO.StringIO() >>> s._write(f, [1,2,3], ['int16']) >>> f.getvalue() '\x00\x03\x00\x01\x00\x02\x00\x03' >>> f = StringIO.StringIO() >>> s._write(f, {'a':1, 'b':2}, OrderedDict((('a', 'int16'), ('b', 'int16')))) >>> f.getvalue() '\x01\x00\x01\x00\x02' It does some typechecking on the typedef parameter: >>> f = StringIO.StringIO() >>> s._write(f, 42, 0) Traceback (most recent call last): ... SerializationException: Unknown type: 0 """ if isinstance(typedef, dict): if obj is not None and not isinstance(obj, dict): raise SerializationException("Expected dict but got: %s" % type(obj)) self._write_dict(output, obj, typedef) elif isinstance(typedef, list): if obj is not None and not isinstance(obj, list): raise SerializationException("Expected list but got: %s" % type(obj)) self._write_list(output, obj, typedef) elif isinstance(typedef, str) or isinstance(typedef, unicode): getattr(self, "_write_" + typedef)(output, obj) else: raise SerializationException("Unknown type: %s" % typedef) def _write_boolean(self, output, b): r""" Internal routine that writes booleans: >>> s = JsonTypeSerializer('"string"') True is 1: >>> f = StringIO.StringIO() >>> s._write_boolean(f, True) >>> f.getvalue() '\x01' False is 0: >>> f = StringIO.StringIO() >>> s._write_boolean(f, False) >>> f.getvalue() '\x00' None is serialized to (int8)-1: >>> f = StringIO.StringIO() >>> s._write_boolean(f, None) >>> f.getvalue() '\xff' Only booleans are accepted: >>> f = StringIO.StringIO() >>> s._write_boolean(f, 42) Traceback (most recent call last): ... SerializationException: Expected bool but got: <type 'int'> """ if b is None: output.write("\xFF") else: if not isinstance(b, bool): raise SerializationException("Expected bool but got: %s" % type(b)) if b: output.write("\x01") else: output.write("\x00") def _write_numeric(self, output, n, typedef): r""" Internal routine that writes numeric data. The individual _write_* methods are synthesized as calls to this method. >>> s = JsonTypeSerializer('"string"') >>> f = StringIO.StringIO() >>> s._write_numeric(f, 42, 'int8') >>> f.getvalue() '*' >>> f = StringIO.StringIO() >>> s._write_numeric(f, 42, 'int16') >>> f.getvalue() '\x00*' >>> f = StringIO.StringIO() >>> s._write_numeric(f, 42, 'int32') >>> f.getvalue() '\x00\x00\x00*' >>> f = StringIO.StringIO() >>> s._write_numeric(f, 42, 'int64') >>> f.getvalue() '\x00\x00\x00\x00\x00\x00\x00*' >>> f = StringIO.StringIO() >>> s._write_numeric(f, 0.25, 'float32') >>> f.getvalue() '>\x80\x00\x00' >>> f = StringIO.StringIO() >>> s._write_numeric(f, 0.25, 'float64') >>> f.getvalue() '?\xd0\x00\x00\x00\x00\x00\x00' None is serialized to the minimum value for each type: >>> f = StringIO.StringIO() >>> s._write_numeric(f, None, 'int8') >>> f.getvalue() '\x80' >>> f = StringIO.StringIO() >>> s._write_numeric(f, None, 'int16') >>> f.getvalue() '\x80\x00' >>> f = StringIO.StringIO() >>> s._write_numeric(f, None, 'int32') >>> f.getvalue() '\x80\x00\x00\x00' >>> f = StringIO.StringIO() >>> s._write_numeric(f, None, 'int64') >>> f.getvalue() '\x80\x00\x00\x00\x00\x00\x00\x00' >>> f = StringIO.StringIO() >>> s._write_numeric(f, None, 'float32') >>> f.getvalue() '\x00\x00\x00\x01' >>> f = StringIO.StringIO() >>> s._write_numeric(f, None, 'float64') >>> f.getvalue() '\x00\x00\x00\x00\x00\x00\x00\x01' Basic typechecking is done: >>> f = StringIO.StringIO() >>> s._write_numeric(f, 0.25, 'int16') Traceback (most recent call last): ... SerializationException: Invalid type: <type 'float'> for typedef: int16 >>> f = StringIO.StringIO() >>> s._write_numeric(f, 42, 'float32') Traceback (most recent call last): ... SerializationException: Invalid type: <type 'int'> for typedef: float32 Range checking is also done: >>> f = StringIO.StringIO() >>> s._write_numeric(f, 500, 'int8') Traceback (most recent call last): ... SerializationException: Value 500 out of range for typedef: int8 >>> f = StringIO.StringIO() >>> s._write_numeric(f, 1.0e-45, 'float32') Traceback (most recent call last): ... SerializationException: Value 1e-45 out of range for typedef: float32 """ if not isinstance(typedef, str) and not isinstance(typedef, unicode): raise SerializationException("Typedef must be a str, got: %s" % type(typedef)) if typedef not in set(["int8", "int16", "int32", "int64", "float32", "float64"]): raise SerializationException("Invalid typedef: %s" % typedef) if n is None: n = MINS[typedef] else: if not any(isinstance(n, t) for t in TYPES[typedef]): raise SerializationException("Invalid type: %s for typedef: %s" % (type(n), typedef)) if not RANGE_FNS[typedef](n, typedef): raise SerializationException("Value %s out of range for typedef: %s" % (n, typedef)) output.write(struct.pack(FORMATS[typedef], n)) def _write_date(self, output, d): r""" Internal routine that serializes dates. >>> s = JsonTypeSerializer('"string"') >>> d = datetime.datetime(2010, 11, 24, 17, 36, 11, 410413) >>> f = StringIO.StringIO() >>> s._write_date(f, d) >>> f.getvalue() '\x00\x00\x01,~\xf4\xf4\x92' None serializers to the smallest representable int64: >>> f = StringIO.StringIO() >>> s._write_date(f, None) >>> f.getvalue() '\x80\x00\x00\x00\x00\x00\x00\x00' Object passed in must be a date >>> f = StringIO.StringIO() >>> s._write_date(f, 42) Traceback (most recent call last): ... SerializationException: Expected datetime but got: <type 'int'> """ if d is None: self._write_int64(output, None) return if not isinstance(d, datetime.datetime): raise SerializationException("Expected datetime but got: %s" % type(d)) self._write_int64(output, _to_java_date(d)) def _write_bytes(self, output, bs): r""" Internal routine that writes raw bytes. >>> s = JsonTypeSerializer('"string"') >>> f = StringIO.StringIO() >>> s._write_bytes(f, 'foo') >>> f.getvalue() '\x00\x03foo' None is serialized as -1 length byte string: >>> f = StringIO.StringIO() >>> s._write_bytes(f, None) >>> f.getvalue() '\xff\xff' The empty string is a 0 length byte string: >>> f = StringIO.StringIO() >>> s._write_bytes(f, '') >>> f.getvalue() '\x00\x00' """ if bs is None: self._write_length(output, -1) return self._write_length(output, len(bs)) output.write(bs) def _write_length(self, output, size): r""" Internal routine that writes sequence lengths in Voldemort's variable length format. >>> s = JsonTypeSerializer('"string"') Small lengths are encoded as int16s: >>> f = StringIO.StringIO() >>> s._write_length(f, 10) >>> f.getvalue() '\x00\n' Length of -1 (magic length of null sequences) is encoded as (int16)-1: >>> f = StringIO.StringIO() >>> s._write_length(f, -1) >>> f.getvalue() '\xff\xff' Longer lengths are encoded as int32s with the high two bits set: >>> f = StringIO.StringIO() >>> s._write_length(f, 1000000) >>> f.getvalue() '\xc0\x0fB@' Lengths between 0x3fff0000 and 0x3fffffff are supposed to be supported but will serialize to a byte string that won't get decoded properly: >>> f = StringIO.StringIO() >>> s._write_length(f, 0x3fff0001) >>> bytes = f.getvalue() >>> bytes '\xff\xff\x00\x01' >>> s._read_length(StringIO.StringIO(bytes)) -1 """ if size < MAXES["int16"]: self._write_int16(output, size) elif size <= MAX_SEQ_LENGTH: self._write_int32(output, size | -0x40000000) # equivalent to 0xc0000000 else: raise SerializationException("Length of %d exceeds maximum allowed: %d" % (size, MAX_SEQ_LENGTH)) def _write_string(self, output, s): r""" Internal routine for serializing UTF-8 strings. >>> s = JsonTypeSerializer('"string"') >>> f = StringIO.StringIO() >>> s._write_string(f, 'foo') >>> f.getvalue() '\x00\x03foo' Unicode works, too: >>> f = StringIO.StringIO() >>> s._write_string(f, u'foo') >>> f.getvalue() '\x00\x03foo' Other types will cause an error: >>> f = StringIO.StringIO() >>> s._write_string(f, 42) Traceback (most recent call last): ... SerializationException: Expected string or unicode and got: <type 'int'> The empty string turns into a zero-length string: >>> f = StringIO.StringIO() >>> s._write_string(f, u'') >>> f.getvalue() '\x00\x00' None turns into a -1 length string: >>> f = StringIO.StringIO() >>> s._write_string(f, None) >>> f.getvalue() '\xff\xff' """ if s is None: self._write_bytes(output, None) else: if not isinstance(s, str) and not isinstance(s, unicode): raise SerializationException("Expected string or unicode and got: %s" % type(s)) self._write_bytes(output, s.encode("utf_8")) def _write_list(self, output, items, typedef): r""" Internal method for serializing lists. >>> s = JsonTypeSerializer('"string"') >>> f = StringIO.StringIO() >>> s._write_list(f, [1,2,3], ['int16']) >>> f.getvalue() '\x00\x03\x00\x01\x00\x02\x00\x03' Empty lists are encoded as length 0: >>> f = StringIO.StringIO() >>> s._write_list(f, [], ['int16']) >>> f.getvalue() '\x00\x00' None is encoded as length -1: >>> f = StringIO.StringIO() >>> s._write_list(f, None, ['int16']) >>> f.getvalue() '\xff\xff' Input must be a list: >>> f = StringIO.StringIO() >>> s._write_list(f, 1, ['int16']) Traceback (most recent call last): ... TypeError: object of type 'int' has no len() Typedef must be a singleton list: >>> f = StringIO.StringIO() >>> s._write_list(f, [1,2,3], []) Traceback (most recent call last): ... SerializationException: Type declaration of a list must be a singleton list. >>> f = StringIO.StringIO() >>> s._write_list(f, [1,2,3], ['int16', 'int16']) Traceback (most recent call last): ... SerializationException: Type declaration of a list must be a singleton list. >>> f = StringIO.StringIO() >>> s._write_list(f, [1,2,3], 'int16') Traceback (most recent call last): ... SerializationException: Type declaration of a list must be a singleton list. """ if not isinstance(typedef, list) or len(typedef) != 1: raise SerializationException("Type declaration of a list must be a singleton list.") objtype = typedef[0] if items is None: self._write_length(output, -1) else: self._write_length(output, len(items)) for item in items: self._write(output, item, objtype) def _write_dict(self, output, items, typedef): r""" Internal routine that serializes dicts. >>> s = JsonTypeSerializer('"string"') >>> f = StringIO.StringIO() >>> s._write_dict(f, {'a':1, 'b':2}, OrderedDict((('a','int16'), ('b','int32')))) >>> f.getvalue() '\x01\x00\x01\x00\x00\x00\x02' None will serialize as (int8)-1: >>> f = StringIO.StringIO() >>> s._write_dict(f, None, OrderedDict((('a','int16'), ('b','int32')))) >>> f.getvalue() '\xff' The passed in dict must have the same fields as the typedef: >>> f = StringIO.StringIO() >>> s._write_dict(f, {'a':1}, OrderedDict((('a','int16'), ('b','int32')))) Traceback (most recent call last): ... SerializationException: Size mismatch for dict: expected 2 but got 1. >>> f = StringIO.StringIO() >>> s._write_dict(f, {'a':1, 'b':2, 'c':3}, OrderedDict((('a','int16'), ('b','int32')))) Traceback (most recent call last): ... SerializationException: Size mismatch for dict: expected 2 but got 3. >>> f = StringIO.StringIO() >>> s._write_dict(f, {'b':2, 'c':3}, OrderedDict((('a','int16'), ('b','int32')))) Traceback (most recent call last): ... SerializationException: Missing key: 'a' required by type: {'a':'int16', 'b':'int32'} The object being serialized must be a dict: >>> f = StringIO.StringIO() >>> s._write_dict(f, [1,2], OrderedDict((('a','int16'), ('b','int32')))) Traceback (most recent call last): ... SerializationException: Object must be a dict but got: <type 'list'> The typedef must be an OrderedDict: >>> f = StringIO.StringIO() >>> s._write_dict(f, {'b':2, 'c':3}, ['int16']) Traceback (most recent call last): ... SerializationException: Typedef must be an OrderedDict but got: <type 'list'> We can't use plain dicts as the typedef, because ordering is significant and plain old dicts don't guarantee order: >>> f = StringIO.StringIO() >>> s._write_dict(f, {'b':2, 'c':3}, {'a':'int16', 'b':'int16'}) Traceback (most recent call last): ... SerializationException: Typedef must be an OrderedDict but got: <type 'dict'> """ if not isinstance(typedef, OrderedDict): raise SerializationException("Typedef must be an OrderedDict but got: %s" % type(typedef)) if items is None: self._write_int8(output, -1) return if not isinstance(items, dict): raise SerializationException("Object must be a dict but got: %s" % type(items)) if len(items) != len(typedef): raise SerializationException("Size mismatch for dict: expected %d but got %d." % (len(typedef), len(items))) self._write_int8(output, 1) for key, entrytype in typedef.iteritems(): if key not in items: raise SerializationException("Missing key: '%s' required by type: %s" % (key, typedef)) self._write(output, items[key], entrytype) # Generate the various primitive read/write methods: def _make_methods(typedef): """ Creates specialized _read_* and _write_* methods for the numeric types. """ def _writer(self, output, obj): self._write_numeric(output, obj, str(typedef)) def _reader(self, input): return self._read_numeric(input, str(typedef)) setattr(JsonTypeSerializer, "_write_%s" % typedef, _writer) setattr(JsonTypeSerializer, "_read_%s" % typedef, _reader) for typedef in MAXES.iterkeys(): _make_methods(typedef)
#!/usr/bin/env python # # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import getpass import os import re import platform import subprocess import sys import time import errno from re import split from time import sleep BIN = "bin" LIB = "lib" CONF = "conf" LOG = "logs" WEBAPP = "server" + os.sep + "webapp" CONFIG_SETS_CONF = "server" + os.sep + "solr" + os.sep + "configsets" + os.sep + "basic_configs" + os.sep + "conf" DATA = "data" ATLAS_CONF = "ATLAS_CONF" ATLAS_LOG = "ATLAS_LOG_DIR" ATLAS_PID = "ATLAS_PID_DIR" ATLAS_WEBAPP = "ATLAS_EXPANDED_WEBAPP_DIR" ATLAS_SERVER_OPTS = "ATLAS_SERVER_OPTS" ATLAS_OPTS = "ATLAS_OPTS" ATLAS_SERVER_HEAP = "ATLAS_SERVER_HEAP" ATLAS_DATA = "ATLAS_DATA_DIR" ATLAS_HOME = "ATLAS_HOME_DIR" HBASE_CONF_DIR = "HBASE_CONF_DIR" MANAGE_LOCAL_HBASE = "MANAGE_LOCAL_HBASE" MANAGE_LOCAL_SOLR = "MANAGE_LOCAL_SOLR" SOLR_BIN = "SOLR_BIN" SOLR_CONF = "SOLR_CONF" SOLR_PORT = "SOLR_PORT" DEFAULT_SOLR_PORT = "9838" SOLR_SHARDS = "SOLR_SHARDS" DEFAULT_SOLR_SHARDS = "1" SOLR_REPLICATION_FACTOR = "SOLR_REPLICATION_FACTOR" DEFAULT_SOLR_REPLICATION_FACTOR = "1" ENV_KEYS = ["ATLAS_JAVA_HOME", "JAVA_HOME", ATLAS_OPTS, ATLAS_SERVER_OPTS, ATLAS_SERVER_HEAP, ATLAS_LOG, ATLAS_PID, ATLAS_CONF, "ATLASCPPATH", ATLAS_DATA, ATLAS_HOME, ATLAS_WEBAPP, HBASE_CONF_DIR, SOLR_PORT] IS_WINDOWS = platform.system() == "Windows" ON_POSIX = 'posix' in sys.builtin_module_names CONF_FILE="atlas-application.properties" HBASE_STORAGE_CONF_ENTRY="atlas.graph.storage.backend\s*=\s*hbase" HBASE_STORAGE_LOCAL_CONF_ENTRY="atlas.graph.storage.hostname\s*=\s*localhost" SOLR_INDEX_CONF_ENTRY="atlas.graph.index.search.backend\s*=\s*solr5" SOLR_INDEX_LOCAL_CONF_ENTRY="atlas.graph.index.search.solr.zookeeper-url\s*=\s*localhost" SOLR_INDEX_ZK_URL="atlas.graph.index.search.solr.zookeeper-url" TOPICS_TO_CREATE="atlas.notification.topics" DEBUG = False def scriptDir(): """ get the script path """ return os.path.dirname(os.path.realpath(__file__)) def atlasDir(): home = os.path.dirname(scriptDir()) return os.environ.get(ATLAS_HOME, home) def libDir(dir) : return os.path.join(dir, LIB) def confDir(dir): localconf = os.path.join(dir, CONF) return os.environ.get(ATLAS_CONF, localconf) def hbaseBinDir(dir): return os.path.join(dir, "hbase", BIN) def hbaseConfDir(dir): return os.environ.get(HBASE_CONF_DIR, os.path.join(dir, "hbase", CONF)) def solrBinDir(dir): return os.environ.get(SOLR_BIN, os.path.join(dir, "solr", BIN)) def solrConfDir(dir): return os.environ.get(SOLR_CONF, os.path.join(dir, "solr", CONFIG_SETS_CONF)) def solrPort(): return os.environ.get(SOLR_PORT, DEFAULT_SOLR_PORT) def solrShards(): return os.environ.get(SOLR_SHARDS, DEFAULT_SOLR_SHARDS) def solrReplicationFactor(): return os.environ.get(SOLR_REPLICATION_FACTOR, DEFAULT_SOLR_REPLICATION_FACTOR) def logDir(dir): localLog = os.path.join(dir, LOG) return os.environ.get(ATLAS_LOG, localLog) def pidFile(dir): localPid = os.path.join(dir, LOG) return os.path.join(os.environ.get(ATLAS_PID, localPid), 'atlas.pid') def dataDir(dir): data = os.path.join(dir, DATA) return os.environ.get(ATLAS_DATA, data) def webAppDir(dir): webapp = os.path.join(dir, WEBAPP) return os.environ.get(ATLAS_WEBAPP, webapp) def kafkaTopicSetupDir(homeDir): return os.path.join(homeDir, "hook", "kafka-topic-setup") def expandWebApp(dir): webappDir = webAppDir(dir) webAppMetadataDir = os.path.join(webappDir, "atlas") d = os.sep if not os.path.exists(os.path.join(webAppMetadataDir, "WEB-INF")): try: os.makedirs(webAppMetadataDir) except OSError, e: if e.errno != errno.EEXIST: raise e pass atlasWarPath = os.path.join(atlasDir(), "server", "webapp", "atlas.war") if (isCygwin()): atlasWarPath = convertCygwinPath(atlasWarPath) os.chdir(webAppMetadataDir) jar(atlasWarPath) def dirMustExist(dirname): if not os.path.exists(dirname): os.mkdir(dirname) return dirname def executeEnvSh(confDir): envscript = '%s/atlas-env.sh' % confDir if not IS_WINDOWS and os.path.exists(envscript): envCmd = 'source %s && env' % envscript command = ['bash', '-c', envCmd] proc = subprocess.Popen(command, stdout = subprocess.PIPE) for line in proc.stdout: (key, _, value) = line.strip().partition("=") if key in ENV_KEYS: os.environ[key] = value proc.communicate() def java(classname, args, classpath, jvm_opts_list, logdir=None): java_home = os.environ.get("ATLAS_JAVA_HOME", None) if not java_home: java_home = os.environ.get("JAVA_HOME", None) if java_home: prg = os.path.join(java_home, "bin", "java") else: prg = which("java") if prg is None: raise EnvironmentError('The java binary could not be found in your path or JAVA_HOME') commandline = [prg] commandline.extend(jvm_opts_list) commandline.append("-classpath") commandline.append(classpath) commandline.append(classname) commandline.extend(args) return runProcess(commandline, logdir) def jar(path): java_home = os.environ.get("ATLAS_JAVA_HOME", None) if not java_home: java_home = os.environ.get("JAVA_HOME", None) if java_home: prg = os.path.join(java_home, "bin", "jar") else: prg = which("jar") if prg is None: raise EnvironmentError('The jar binary could not be found in your path or JAVA_HOME') commandline = [prg] commandline.append("-xf") commandline.append(path) process = runProcess(commandline) process.wait() def is_exe(fpath): return os.path.isfile(fpath) and os.access(fpath, os.X_OK) def which(program): fpath, fname = os.path.split(program) if fpath: if is_exe(program): return program else: for path in os.environ["PATH"].split(os.pathsep): path = path.strip('"') exe_file = os.path.join(path, program) if is_exe(exe_file): return exe_file return None def runProcess(commandline, logdir=None, shell=False, wait=False): """ Run a process :param commandline: command line :return:the return code """ global finished debug ("Executing : %s" % str(commandline)) timestr = time.strftime("atlas.%Y%m%d-%H%M%S") stdoutFile = None stderrFile = None if logdir: stdoutFile = open(os.path.join(logdir, timestr + ".out"), "w") stderrFile = open(os.path.join(logdir,timestr + ".err"), "w") p = subprocess.Popen(commandline, stdout=stdoutFile, stderr=stderrFile, shell=shell) if wait: p.communicate() return p def print_output(name, src, toStdErr): """ Relay the output stream to stdout line by line :param name: :param src: source stream :param toStdErr: flag set if stderr is to be the dest :return: """ global needPassword debug ("starting printer for %s" % name ) line = "" while not finished: (line, done) = read(src, line) if done: out(toStdErr, line + "\n") flush(toStdErr) if line.find("Enter password for") >= 0: needPassword = True line = "" out(toStdErr, line) # closedown: read remainder of stream c = src.read(1) while c!="" : c = c.decode('utf-8') out(toStdErr, c) if c == "\n": flush(toStdErr) c = src.read(1) flush(toStdErr) src.close() def read_input(name, exe): """ Read input from stdin and send to process :param name: :param process: process to send input to :return: """ global needPassword debug ("starting reader for %s" % name ) while not finished: if needPassword: needPassword = False if sys.stdin.isatty(): cred = getpass.getpass() else: cred = sys.stdin.readline().rstrip() exe.stdin.write(cred + "\n") def debug(text): if DEBUG: print '[DEBUG] ' + text def error(text): print '[ERROR] ' + text sys.stdout.flush() def info(text): print text sys.stdout.flush() def out(toStdErr, text) : """ Write to one of the system output channels. This action does not add newlines. If you want that: write them yourself :param toStdErr: flag set if stderr is to be the dest :param text: text to write. :return: """ if toStdErr: sys.stderr.write(text) else: sys.stdout.write(text) def flush(toStdErr) : """ Flush the output stream :param toStdErr: flag set if stderr is to be the dest :return: """ if toStdErr: sys.stderr.flush() else: sys.stdout.flush() def read(pipe, line): """ read a char, append to the listing if there is a char that is not \n :param pipe: pipe to read from :param line: line being built up :return: (the potentially updated line, flag indicating newline reached) """ c = pipe.read(1) if c != "": o = c.decode('utf-8') if o != '\n': line += o return line, False else: return line, True else: return line, False def writePid(atlas_pid_file, process): f = open(atlas_pid_file, 'w') f.write(str(process.pid)) f.close() def exist_pid(pid): if ON_POSIX: #check if process id exist in the current process table #See man 2 kill - Linux man page for info about the kill(pid,0) system function try: os.kill(pid, 0) except OSError as e : return e.errno == errno.EPERM else: return True elif IS_WINDOWS: #The os.kill approach does not work on Windows with python 2.7 #the output from tasklist command is searched for the process id pidStr = str(pid) command='tasklist /fi "pid eq %s"' % pidStr sub_process=subprocess.Popen(command, stdout = subprocess.PIPE, shell=False) sub_process.communicate() output = subprocess.check_output(command) output=split(" *",output) for line in output: if pidStr in line: return True return False #os other than nt or posix - not supported - need to delete the file to restart server if pid no longer exist return True def wait_for_shutdown(pid, msg, wait): count = 0 sys.stdout.write(msg) while exist_pid(pid): sys.stdout.write('.') sys.stdout.flush() sleep(1) if count > wait: break count = count + 1 sys.stdout.write('\n') def is_hbase(confdir): confdir = os.path.join(confdir, CONF_FILE) return grep(confdir, HBASE_STORAGE_CONF_ENTRY) is not None def is_hbase_local(confdir): if os.environ.get(MANAGE_LOCAL_HBASE, "False").lower() == 'false': return False confdir = os.path.join(confdir, CONF_FILE) return grep(confdir, HBASE_STORAGE_CONF_ENTRY) is not None and grep(confdir, HBASE_STORAGE_LOCAL_CONF_ENTRY) is not None def run_hbase_action(dir, action, hbase_conf_dir = None, logdir = None, wait=True): if IS_WINDOWS: if action == 'start': hbaseScript = 'start-hbase.cmd' else: hbaseScript = 'stop-hbase.cmd' if hbase_conf_dir is not None: cmd = [os.path.join(dir, hbaseScript), '--config', hbase_conf_dir] else: cmd = [os.path.join(dir, hbaseScript)] else: hbaseScript = 'hbase-daemon.sh' if hbase_conf_dir is not None: cmd = [os.path.join(dir, hbaseScript), '--config', hbase_conf_dir, action, 'master'] else: cmd = [os.path.join(dir, hbaseScript), action, 'master'] return runProcess(cmd, logdir, False, wait) def is_solr(confdir): confdir = os.path.join(confdir, CONF_FILE) return grep(confdir, SOLR_INDEX_CONF_ENTRY) is not None def is_solr_local(confdir): if os.environ.get(MANAGE_LOCAL_SOLR, "False").lower() == 'false': return False confdir = os.path.join(confdir, CONF_FILE) return grep(confdir, SOLR_INDEX_CONF_ENTRY) is not None and grep(confdir, SOLR_INDEX_LOCAL_CONF_ENTRY) is not None def get_solr_zk_url(confdir): confdir = os.path.join(confdir, CONF_FILE) return getConfig(confdir, SOLR_INDEX_ZK_URL) def get_topics_to_create(confdir): confdir = os.path.join(confdir, CONF_FILE) topic_list = getConfig(confdir, TOPICS_TO_CREATE) if topic_list is not None: topics = topic_list.split(",") else: topics = ["ATLAS_HOOK", "ATLAS_ENTITIES"] return topics def run_solr(dir, action, zk_url = None, port = None, logdir = None, wait=True): solrScript = "solr" if IS_WINDOWS: solrScript = "solr.cmd" if zk_url is None: if port is None: cmd = [os.path.join(dir, solrScript), action] else: cmd = [os.path.join(dir, solrScript), action, '-p', str(port)] else: if port is None: cmd = [os.path.join(dir, solrScript), action, '-z', zk_url] else: cmd = [os.path.join(dir, solrScript), action, '-z', zk_url, '-p', port] return runProcess(cmd, logdir, False, wait) def create_solr_collection(dir, confdir, index, logdir = None, wait=True): solrScript = "solr" if IS_WINDOWS: solrScript = "solr.cmd" cmd = [os.path.join(dir, solrScript), 'create', '-c', index, '-d', confdir, '-shards', solrShards(), '-replicationFactor', solrReplicationFactor()] return runProcess(cmd, logdir, False, wait) def configure_hbase(dir): env_conf_dir = os.environ.get(HBASE_CONF_DIR) conf_dir = os.path.join(dir, "hbase", CONF) tmpl_dir = os.path.join(dir, CONF, "hbase") data_dir = dataDir(atlasDir()) if env_conf_dir is None or env_conf_dir == conf_dir: hbase_conf_file = "hbase-site.xml" tmpl_file = os.path.join(tmpl_dir, hbase_conf_file + ".template") if IS_WINDOWS: url_prefix="file:///" else: url_prefix="file://" conf_file = os.path.join(conf_dir, hbase_conf_file) if os.path.exists(tmpl_file): debug ("Configuring " + tmpl_file + " to " + conf_file) f = open(tmpl_file,'r') template = f.read() f.close() config = template.replace("${hbase_home}", dir) config = config.replace("${atlas_data}", data_dir) config = config.replace("${url_prefix}", url_prefix) f = open(conf_file,'w') f.write(config) f.close() os.remove(tmpl_file) def server_already_running(pid): print "Atlas server is already running under process %s" % pid sys.exit() def server_pid_not_running(pid): print "The Server is no longer running with pid %s" %pid def grep(file, value): for line in open(file).readlines(): if re.match(value, line): return line return None def getConfig(file, key): key = key + "\s*=" for line in open(file).readlines(): if re.match(key, line): return line.split('=')[1].strip() return None def isCygwin(): return platform.system().startswith("CYGWIN") # Convert the specified cygwin-style pathname to Windows format, # using the cygpath utility. By default, path is assumed # to be a file system pathname. If isClasspath is True, # then path is treated as a Java classpath string. def convertCygwinPath(path, isClasspath=False): if (isClasspath): cygpathArgs = ["cygpath", "-w", "-p", path] else: cygpathArgs = ["cygpath", "-w", path] windowsPath = subprocess.Popen(cygpathArgs, stdout=subprocess.PIPE).communicate()[0] windowsPath = windowsPath.strip() return windowsPath
# MIT License # Copyright (c) 2016 Morgan McDermott & John Carlyle # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import os import os.path import unittest from tests import isolated_filesystem from pipetree.config import PipelineStageConfig from pipetree.providers import LocalDirectoryArtifactProvider,\ LocalFileArtifactProvider,\ ParameterArtifactProvider from pipetree.exceptions import ArtifactSourceDoesNotExistError,\ InvalidConfigurationFileError,\ ArtifactProviderMissingParameterError,\ ArtifactProviderFailedError class TestParameterArtifactProvider(unittest.TestCase): def setUp(self): self.stage_config = PipelineStageConfig("test_stage_name", { "type": "ParameterPipelineStage" }) self.test_parameters = {"int_param": 200, "str_param": "str"} pass def tearDown(self): pass def test_missing_config(self): try: provider = ParameterArtifactProvider( stage_config=None, parameters={}) self.assertEqual(provider, "Provider creation should have failed") except ArtifactProviderMissingParameterError: pass def test_missing_parameters(self): try: provider = ParameterArtifactProvider( stage_config=self.stage_config, parameters={}) self.assertEqual(provider, "Provider creation should have failed") except ArtifactProviderMissingParameterError: pass def test_yield_artifacts(self): provider = ParameterArtifactProvider( stage_config=self.stage_config, parameters=self.test_parameters) arts = provider.yield_artifacts() la = list(arts) self.assertEqual(1, len(la)) yielded_params = la[0].payload for k in self.test_parameters: if k not in yielded_params: raise ArtifactProviderFailedError( provider = self.__class__.__name__, error="Missing parameter "+k ) pass class TestLocalFileArtifactProvider(unittest.TestCase): def setUp(self): self.dirname = 'foo' self.filename = ['foo.bar', 'foo.baz'] self.filedatas = ['foo bar baz', 'helloworld'] self.fs = isolated_filesystem() self.fs.__enter__() self.stage_config = PipelineStageConfig("test_stage_name", { "type": "LocalFilePipelineStage" }) # Build directory structure os.makedirs(self.dirname) for name, data in zip(self.filename, self.filedatas): with open(os.path.join(os.getcwd(), self.dirname, name), 'w') as f: f.write(data) def tearDown(self): self.fs.__exit__(None, None, None) def test_missing_config(self): try: LocalFileArtifactProvider(path='folder/shim.sham', stage_config=None) self.assertEqual(0, "Provider creation should have failed") except ArtifactProviderMissingParameterError: pass def test_load_nonexistant_file(self): try: LocalFileArtifactProvider(path='folder/shim.sham', stage_config=self.stage_config) self.assertTrue(False, 'This was supposed to raise an exception') except ArtifactSourceDoesNotExistError: pass def test_yield_artifacts(self): provider = LocalFileArtifactProvider( path=os.path.join(self.dirname, self.filename[0]), stage_config=self.stage_config, read_content=True) arts = provider.yield_artifacts() la = list(arts) self.assertEqual(len(la), 1) def test_load_file_data(self): provider = LocalFileArtifactProvider( path=os.path.join(self.dirname, self.filename[0]), stage_config=self.stage_config, read_content=True) art = provider._yield_artifact() self.assertEqual(art.item.payload, self.filedatas[0]) class TestLocalDirectoryArtifactProvider(unittest.TestCase): def setUp(self): self.dirname = 'foo' self.filename = ['foo.bar', 'foo.baz'] self.filedatas = ['foo bar baz', 'helloworld'] self.fs = isolated_filesystem() self.fs.__enter__() self.stage_config = PipelineStageConfig("test_stage_name", { "type": "LocalDirectoryPipelineStage" }) # Build directory structure os.makedirs(self.dirname) for name, data in zip(self.filename, self.filedatas): with open(os.path.join(os.getcwd(), self.dirname, name), 'w') as f: f.write(data) def tearDown(self): self.fs.__exit__(None, None, None) def test_missing_config(self): try: LocalDirectoryArtifactProvider(path='folder/', stage_config=None) self.assertEqual(0, "Provider creation should have failed") except ArtifactProviderMissingParameterError: pass def test_load_nonexistant_dir(self): try: LocalDirectoryArtifactProvider(path='folder/', stage_config=self.stage_config) self.assertTrue(False, 'This was supposed to raise an exception') except ArtifactSourceDoesNotExistError: pass def test_load_file_data(self): provider = LocalDirectoryArtifactProvider(path=self.dirname, stage_config=self.stage_config, read_content=True) art = provider._yield_artifact(self.filename[0]) self.assertEqual(art.item.payload.decode('utf-8'), self.filedatas[0]) def test_load_file_names(self): provider = LocalDirectoryArtifactProvider(path=self.dirname, stage_config=self.stage_config) for loaded_name, name in zip(provider.yield_artifacts(), self.filename): self.assertEqual(loaded_name, os.path.join(os.getcwd(), self.dirname, name)) def test_load_multiple_file_contents(self): provider = LocalDirectoryArtifactProvider(path=self.dirname, stage_config=self.stage_config, read_content=True) for art, data in zip(provider.yield_artifacts(), self.filedatas): art_data = art.item.payload self.assertEqual(art_data.decode('utf-8'), data)
#!/usr/bin/env python import numpy as np import matplotlib.pyplot as plt import csv import argparse import pickle import os from sklearn.preprocessing import StandardScaler, LabelEncoder from sklearn.cross_validation import train_test_split, cross_val_score from sklearn.pipeline import Pipeline from sklearn.grid_search import GridSearchCV from sklearn.metrics import f1_score, precision_recall_fscore_support, roc_curve from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC from sklearn.ensemble import RandomForestClassifier, VotingClassifier from sklearn.naive_bayes import GaussianNB from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA import pandas as pd import skimage import scipy.misc, scipy.interpolate, scipy.ndimage from skimage import exposure, filters, morphology, feature, segmentation model_names = ["knn", "gsvm", "rf", "gnb", "lda", "voting"] def get_raw_data(fname): titles = None data = [] with open(fname) as f: c = csv.reader(f) titles = next(c) for line in c: data.append(line) return titles, data def get_fields(titles, data, fields): titles = [titles[x] for x in fields] data = [[line[x] for x in fields] for line in data] data = np.array(data) return titles, data def get_args(): parser = argparse.ArgumentParser() parser.add_argument("input", help="input csv data file") parser.add_argument("-x", "--features", help="indices of feature fields", nargs="+", type=int) parser.add_argument("-y", "--target", help="index of ground truth field", type=int) parser.add_argument("--benchmark", help="benchmark target field", type=int) parser.add_argument("--score",help="scoring metric for determining best model", default="f1") parser.add_argument("-o", "--output", help="output model file, pickled") parser.add_argument("-i","--image", help="apply model to classify an image") parser.add_argument("--select-model", help="The model classes to train, defaults to all.", nargs="*", choices=model_names, default=model_names) parser.add_argument("--unknown-as", help="Treat unknown label as", choices=["0","1","-1","remove"], default="remove") parser.add_argument("--roc", help="show roc curve", action="store_true") parser.add_argument("--njobs",help="number of processors to parallelize",default=1,type=int) return parser.parse_args() if __name__ == "__main__": rand_state = 42 args = get_args() raw_titles, raw_data = get_raw_data(args.input) feature_title, X = get_fields(raw_titles, raw_data, args.features) target_title, y = get_fields(raw_titles, raw_data, [args.target]) X = X.astype(float) y = y.astype(float) y_prime = np.zeros_like(y) if args.benchmark: benchmark_title, y_prime = get_fields(\ raw_titles, raw_data, [args.benchmark]) le = LabelEncoder() le.fit(["False", "True"]) y_prime = le.transform(y_prime[:,0]) else: y_prime = y_prime[:,0] # added by PS to make shapes match y = y[:,0] if args.unknown_as == "remove": X = X[y>-1,:] y_prime = y_prime[y>-1] y = y[y>-1] elif args.unknown_as in ["0", "1", "-1"]: y[y==-1] = int(args.unknown_as) #test split both target and benchmark y_tmp = np.vstack([y, y_prime]).T X_train, X_test, y_train, y_test = train_test_split(\ X, y_tmp, test_size = 1/4.0, random_state = rand_state) y_train, y_prime_train = (y_train[:,0], y_train[:,1]) y_test, y_prime_test = (y_test[:,0], y_test[:,1]) normalizer = StandardScaler() normalizer.fit(X_train) X_train_norm = normalizer.transform(X_train) X_test_norm = normalizer.transform(X_test) classifiers = [ KNeighborsClassifier(), SVC(), RandomForestClassifier(), GaussianNB(), LDA(), ] names = model_names params = [{"n_neighbors": 3. ** np.arange(5)}, {"kernel": ["rbf"], "gamma": 3. ** np.arange(-5, 5), "C": 3. ** np.arange(-5,5)}, {"n_estimators": 3 ** np.arange(6), "max_features": np.arange(1, X.shape[1])}, {}, {}, ] best_estimators = [] best_scores = [] best_params = [] selected_names = [] for name, param, classifier in zip(names, params, classifiers): if args.select_model and name in args.select_model and name != "voting": selected_names.append(name) #lda tend to hang if run parallel clf = GridSearchCV(classifier, param, cv=3, scoring=args.score, verbose=3, n_jobs=args.njobs) clf.fit(X_train_norm, y_train) best_estimators.append(clf.best_estimator_) best_scores.append(clf.best_score_) best_params.append(clf.best_params_) if "voting" in args.select_model: selected_names.append("voting") #vote from the best voting_clf = VotingClassifier(list(zip(names, best_estimators)), voting="hard") score = cross_val_score(voting_clf, X_train_norm, y_train, cv=3, scoring=args.score) best_estimators.append(voting_clf) best_scores.append(score.mean()) best_params.append({}) print(zip(selected_names,best_scores)) print("LDA coef:", zip(feature_title, LDA().fit(X_train_norm, y_train).coef_[0])) best_model_idx = np.argmax(np.array(best_scores)) best_model = Pipeline([("normalize", normalizer), ("estimator", best_estimators[best_model_idx])]) print("Best model is: {}, with parameters {}".format(\ selected_names[best_model_idx], repr(best_params[best_model_idx]))) print("Validation F1 Score: {}".format(best_scores[best_model_idx])) #retrain with all of the training sample best_model.fit(X_train, y_train) pred = best_model.predict(X_test) print("Testing precision: {}, recall: {}, F1 Score: {}, support: {}".format(*precision_recall_fscore_support(y_test, pred, average="binary"))) if args.benchmark: print("Benchmark precision: {}, recall: {}, F1 score: {}, support: {}".format(*precision_recall_fscore_support(y_test, y_prime_test, average="binary"))) if args.output: with open(args.output, "wb") as fo: pickle.dump(best_model, fo) print("Pickled the best model in {}".format(args.output)) print("target: {}").format(target_title) print("features tried {}").format(feature_title) if args.roc: pred_test = best_model.transform(X_test) fpr,tpr,_ = roc_curve(y_test, pred_test) plt.plot(fpr, tpr) plt.xlabel("False positive rate") plt.ylabel("True positive rate") plt.show() if args.image: disksize = 5; corrthres = 98; print("Using settings which may no longer be used. ") print("e.g. disk size {}, correlation threshold {} percentile".format(disksize, corrthres)) imgin = skimage.img_as_uint(scipy.misc.imread(args.image)) minp = np.percentile(imgin, 50) img_iadjust = imgin*(imgin >= minp) tmplt = morphology.disk(disksize) tmplt_matched = skimage.feature.match_template(img_iadjust, tmplt, pad_input=True) tmplt_thresholdbinary = tmplt_matched >= np.percentile(tmplt_matched, corrthres) img_lbl, ncell = scipy.ndimage.measurements.label(tmplt_thresholdbinary,np.ones((3,3), bool)) print("Detected {} objects".format(ncell)) columns = ['otherindex','indexcolumnidislike','imgname','imgnumber','roi','id','label', 'coords','bbox_rmin', 'bbox_rmax','bbox_cmin', 'bbox_cmax','centroidr','centroidc', 'meani','equivdiameter','circularity','eccentricity','area','minor_axis_length', 'major_axis_length','min_intensity','max_intensity'] lbldetails = pd.DataFrame(columns=columns) lblprops = skimage.measure.regionprops(img_lbl,img_iadjust) z=0 for ilbl in lblprops: circularity = (ilbl.perimeter*ilbl.perimeter) / (np.pi*4.0*ilbl.area) lbldetails.loc[z] = ["NA","NA","NA", "NA", "NA", z, ilbl.label, ilbl.coords, ilbl.bbox[0], ilbl.bbox[2], ilbl.bbox[1], ilbl.bbox[3], ilbl.centroid[0], ilbl.centroid[1], ilbl.mean_intensity, ilbl.equivalent_diameter, circularity, ilbl.eccentricity, ilbl.area, ilbl.minor_axis_length, ilbl.major_axis_length, ilbl.min_intensity, ilbl.max_intensity] z += 1 raw_titles = lbldetails.columns.tolist() raw_data = lbldetails.values.tolist() print(raw_titles) print(args.features) feature_title, X = get_fields(raw_titles, raw_data, args.features ) X = X.astype(float) lbldetails['classified'] = best_model.predict(X) # add an object to the front (represents label number 0) lbl_classified = pd.Series([0]) lbl_classified = lbl_classified.append(lbldetails['classified']) # we want objects removed to be labeled 1 (aka True). They are 0 right now lbl_classified = 1 - lbl_classified lbl_classified = np.abs(lbl_classified) lbl_classified = np.array(lbl_classified.astype(bool)) remove_pixel = lbl_classified[img_lbl] img_lbl[remove_pixel] = 0 filename = os.path.splitext(os.path.basename(args.image))[0] print(filename+"_cells.tif") scipy.misc.imsave(filename+"_cells.tif", img_lbl > 0.5)
from .estimator_base import H2OEstimator class H2OGeneralizedLowRankEstimator(H2OEstimator): """Builds a generalized low rank model of a H2O dataset. Parameters ---------- k : int The rank of the resulting decomposition. This must be between 1 and the number of columns in the training frame inclusive. max_iterations : int The maximum number of iterations to run the optimization loop. Each iteration consists of an update of the X matrix, followed by an update of the Y matrix. transform : str A character string that indicates how the training data should be transformed before running GLRM. Possible values are "NONE" for no transformation, "DEMEAN" for subtracting the mean of each column, "DESCALE" for dividing by the standard deviation of each column, "STANDARDIZE" for demeaning and descaling, and "NORMALIZE" for demeaning and dividing each column by its range (max - min). seed : int, optional Random seed used to initialize the X and Y matrices. ignore_const_cols : bool, optional A logical value indicating whether to ignore constant columns in the training frame. A column is constant if all of its non-missing values are the same value. loss : str A character string indicating the default loss function for numeric columns. Possible values are "Quadratic" (default), "Absolute", "Huber", "Poisson", "Hinge", and "Logistic". multi_loss : str A character string indicating the default loss function for enum columns. Possible values are "Categorical" and "Ordinal". loss_by_col : str, optional A list of strings indicating the loss function for specific columns by corresponding index in loss_by_col_idx. Will override loss for numeric columns and multi_loss for enum columns. loss_by_col_idx : str, optional A list of column indices to which the corresponding loss functions in loss_by_col are assigned. Must be zero indexed. regularization_x : str A character string indicating the regularization function for the X matrix. Possible values are "None" (default), "Quadratic", "L2", "L1", "NonNegative", "OneSparse", "UnitOneSparse", and "Simplex". regularization_y : str A character string indicating the regularization function for the Y matrix. Possible values are "None" (default), "Quadratic", "L2", "L1", "NonNegative", "OneSparse", "UnitOneSparse", and "Simplex". gamma_x : float The weight on the X matrix regularization term. gamma_y : float The weight on the Y matrix regularization term. init_step_size : float Initial step size. Divided by number of columns in the training frame when calculating the proximal gradient update. The algorithm begins at init_step_size and decreases the step size at each iteration until a termination condition is reached. min_step_size : float Minimum step size upon which the algorithm is terminated. init : str A character string indicating how to select the initial X and Y matrices. Possible values are "Random" for initialization to a random array from the standard normal distribution, "PlusPlus" for initialization using the clusters from k-means++ initialization, "SVD" for initialization using the first k (approximate) right singular vectors, "User" for user-specified initial X and Y frames (must set user_y and user_x arguments). svd_method : str A character string that indicates how SVD should be calculated during initialization. Possible values are "GramSVD" for distributed computation of the Gram matrix followed by a local SVD using the JAMA package, "Power" for computation of the SVD using the power iteration method, "Randomized" for approximate SVD by projecting onto a random subspace. user_x : H2OFrame, optional An H2OFrame object specifying the initial X matrix. Only used when init = "User". user_y : H2OFrame, optional An H2OFrame object specifying the initial Y matrix. Only used when init = "User". recover_svd : bool A logical value indicating whether the singular values and eigenvectors should be recovered during post-processing of the generalized low rank decomposition. Returns ------- A new H2OGeneralizedLowRankEstimator instance. """ def __init__(self,k=None, max_iterations=None, transform=None, seed=None, ignore_const_cols=None,loss=None, multi_loss=None, loss_by_col=None, loss_by_col_idx=None, regularization_x=None, regularization_y=None, gamma_x=None, gamma_y=None, init_step_size=None, min_step_size=None, init=None, svd_method=None, user_x=None, user_y=None, recover_svd=None): super(H2OGeneralizedLowRankEstimator, self).__init__() self._parms = locals() self._parms = {k:v for k,v in self._parms.items() if k!="self"} self._parms['_rest_version']=3 @property def max_iterations(self): return self._parms["max_iterations"] @max_iterations.setter def max_iterations(self, value): self._parms["max_iterations"] = value @property def transform(self): return self._parms["transform"] @transform.setter def transform(self, value): self._parms["transform"] = value @property def seed(self): return self._parms["seed"] @seed.setter def seed(self, value): self._parms["seed"] = value @property def ignore_const_cols(self): return self._parms["ignore_const_cols"] @ignore_const_cols.setter def ignore_const_cols(self, value): self._parms["ignore_const_cols"] = value @property def loss(self): return self._parms["loss"] @loss.setter def loss(self, value): self._parms["loss"] = value @property def multi_loss(self): return self._parms["multi_loss"] @multi_loss.setter def multi_loss(self, value): self._parms["multi_loss"] = value @property def loss_by_col(self): return self._parms["loss_by_col"] @loss_by_col.setter def loss_by_col(self, value): self._parms["loss_by_col"] = value @property def loss_by_col_idx(self): return self._parms["loss_by_col_idx"] @loss_by_col_idx.setter def loss_by_col_idx(self, value): self._parms["loss_by_col_idx"] = value @property def regularization_x(self): return self._parms["regularization_x"] @regularization_x.setter def regularization_x(self, value): self._parms["regularization_x"] = value @property def regularization_y(self): return self._parms["regularization_y"] @regularization_y.setter def regularization_y(self, value): self._parms["regularization_y"] = value @property def gamma_x(self): return self._parms["gamma_x"] @gamma_x.setter def gamma_x(self, value): self._parms["gamma_x"] = value @property def gamma_y(self): return self._parms["gamma_y"] @gamma_y.setter def gamma_y(self, value): self._parms["gamma_y"] = value @property def init_step_size(self): return self._parms["init_step_size"] @init_step_size.setter def init_step_size(self, value): self._parms["init_step_size"] = value @property def min_step_size(self): return self._parms["min_step_size"] @min_step_size.setter def min_step_size(self, value): self._parms["min_step_size"] = value @property def init(self): return self._parms["init"] @init.setter def init(self, value): self._parms["init"] = value @property def svd_method(self): return self._parms["svd_method"] @svd_method.setter def svd_method(self, value): self._parms["svd_method"] = value @property def user_x(self): return self._parms["user_x"] @user_x.setter def user_x(self, value): self._parms["user_x"] = value @property def user_y(self): return self._parms["user_y"] @user_y.setter def user_y(self, value): self._parms["user_y"] = value @property def recover_svd(self): return self._parms["recover_svd"] @recover_svd.setter def recover_svd(self, value): self._parms["recover_svd"] = value
# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg quota_group = cfg.OptGroup( name='quota', title='Quota Options', help=""" Quota options allow to manage quotas in openstack deployment. """) quota_opts = [ cfg.IntOpt('instances', min=-1, default=10, deprecated_group='DEFAULT', deprecated_name='quota_instances', help=""" The number of instances allowed per project. Possible Values * A positive integer or 0. * -1 to disable the quota. """), cfg.IntOpt('cores', min=-1, default=20, deprecated_group='DEFAULT', deprecated_name='quota_cores', help=""" The number of instance cores or vCPUs allowed per project. Possible values: * A positive integer or 0. * -1 to disable the quota. """), cfg.IntOpt('ram', min=-1, default=50 * 1024, deprecated_group='DEFAULT', deprecated_name='quota_ram', help=""" The number of megabytes of instance RAM allowed per project. Possible values: * A positive integer or 0. * -1 to disable the quota. """), cfg.IntOpt('floating_ips', min=-1, default=10, deprecated_group='DEFAULT', deprecated_name='quota_floating_ips', deprecated_for_removal=True, deprecated_since='15.0.0', deprecated_reason=""" nova-network is deprecated, as are any related configuration options. """, help=""" The number of floating IPs allowed per project. Floating IPs are not allocated to instances by default. Users need to select them from the pool configured by the OpenStack administrator to attach to their instances. Possible values: * A positive integer or 0. * -1 to disable the quota. """), cfg.IntOpt('fixed_ips', min=-1, default=-1, deprecated_group='DEFAULT', deprecated_name='quota_fixed_ips', deprecated_for_removal=True, deprecated_since='15.0.0', deprecated_reason=""" nova-network is deprecated, as are any related configuration options. """, help=""" The number of fixed IPs allowed per project. Unlike floating IPs, fixed IPs are allocated dynamically by the network component when instances boot up. This quota value should be at least the number of instances allowed Possible values: * A positive integer or 0. * -1 to disable the quota. """), cfg.IntOpt('metadata_items', min=-1, default=128, deprecated_group='DEFAULT', deprecated_name='quota_metadata_items', help=""" The number of metadata items allowed per instance. Users can associate metadata with an instance during instance creation. This metadata takes the form of key-value pairs. Possible values: * A positive integer or 0. * -1 to disable the quota. """), cfg.IntOpt('injected_files', min=-1, default=5, deprecated_group='DEFAULT', deprecated_name='quota_injected_files', help=""" The number of injected files allowed. File injection allows users to customize the personality of an instance by injecting data into it upon boot. Only text file injection is permitted: binary or ZIP files are not accepted. During file injection, any existing files that match specified files are renamed to include ``.bak`` extension appended with a timestamp. Possible values: * A positive integer or 0. * -1 to disable the quota. """), cfg.IntOpt('injected_file_content_bytes', min=-1, default=10 * 1024, deprecated_group='DEFAULT', deprecated_name='quota_injected_file_content_bytes', help=""" The number of bytes allowed per injected file. Possible values: * A positive integer or 0. * -1 to disable the quota. """), cfg.IntOpt('injected_file_path_length', min=-1, default=255, deprecated_group='DEFAULT', deprecated_name='quota_injected_file_path_length', help=""" The maximum allowed injected file path length. Possible values: * A positive integer or 0. * -1 to disable the quota. """), cfg.IntOpt('security_groups', min=-1, default=10, deprecated_group='DEFAULT', deprecated_name='quota_security_groups', deprecated_for_removal=True, deprecated_since='15.0.0', deprecated_reason=""" nova-network is deprecated, as are any related configuration options. """, help=""" The number of security groups per project. Possible values: * A positive integer or 0. * -1 to disable the quota. """), cfg.IntOpt('security_group_rules', min=-1, default=20, deprecated_group='DEFAULT', deprecated_name='quota_security_group_rules', deprecated_for_removal=True, deprecated_since='15.0.0', deprecated_reason=""" nova-network is deprecated, as are any related configuration options. """, help=""" The number of security rules per security group. The associated rules in each security group control the traffic to instances in the group. Possible values: * A positive integer or 0. * -1 to disable the quota. """), cfg.IntOpt('key_pairs', min=-1, default=100, deprecated_group='DEFAULT', deprecated_name='quota_key_pairs', help=""" The maximum number of key pairs allowed per user. Users can create at least one key pair for each project and use the key pair for multiple instances that belong to that project. Possible values: * A positive integer or 0. * -1 to disable the quota. """), cfg.IntOpt('server_groups', min=-1, default=10, deprecated_group='DEFAULT', deprecated_name='quota_server_groups', help=""" The maxiumum number of server groups per project. Server groups are used to control the affinity and anti-affinity scheduling policy for a group of servers or instances. Reducing the quota will not affect any existing group, but new servers will not be allowed into groups that have become over quota. Possible values: * A positive integer or 0. * -1 to disable the quota. """), cfg.IntOpt('server_group_members', min=-1, default=10, deprecated_group='DEFAULT', deprecated_name='quota_server_group_members', help=""" The maximum number of servers per server group. Possible values: * A positive integer or 0. * -1 to disable the quota. """), # TODO(stephenfin): This should have a min parameter cfg.IntOpt('reservation_expire', default=86400, deprecated_group='DEFAULT', help=""" The number of seconds until a reservation expires. This quota represents the time period for invalidating quota reservations. """), cfg.IntOpt('until_refresh', min=0, default=0, deprecated_group='DEFAULT', help=""" The count of reservations until usage is refreshed. This defaults to 0 (off) to avoid additional load but it is useful to turn on to help keep quota usage up-to-date and reduce the impact of out of sync usage issues. """), cfg.IntOpt('max_age', min=0, default=0, deprecated_group='DEFAULT', help=""" The number of seconds between subsequent usage refreshes. This defaults to 0 (off) to avoid additional load but it is useful to turn on to help keep quota usage up-to-date and reduce the impact of out of sync usage issues. Note that quotas are not updated on a periodic task, they will update on a new reservation if max_age has passed since the last reservation. """), # TODO(pumaranikar): Add a new config to select between the db_driver and # the no_op driver using stevedore. cfg.StrOpt('driver', default='nova.quota.DbQuotaDriver', deprecated_for_removal=True, deprecated_since='14.0.0', deprecated_group='DEFAULT', deprecated_name='quota_driver', help=""" The quota enforcer driver. Provides abstraction for quota checks. Users can configure a specific driver to use for quota checks. Possible values: * nova.quota.DbQuotaDriver (default) or any string representing fully qualified class name. """), ] def register_opts(conf): conf.register_group(quota_group) conf.register_opts(quota_opts, group=quota_group) def list_opts(): return {quota_group: quota_opts}
# Code generated by font-to-py.py. # Font: Courier Prime.ttf version = '0.1' def height(): return 25 def max_width(): return 17 def hmap(): return True def reverse(): return True def monospaced(): return False _font =\ b'\x11\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x11\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x0e\x00\x00\x0e\x00\x00\x0e\x00'\ b'\x00\x0e\x00\x00\x0e\x00\x00\x06\x00\x00\x06\x00\x00\x06\x00\x00'\ b'\x04\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x00\x0f'\ b'\x00\x00\x0f\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x11\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\xe7\x01\x00\xe7\x01\x00\xc7\x00\x00\xc7\x00'\ b'\x00\xc6\x00\x00\xc6\x00\x00\xc6\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x11\x00\x00\x00\x00\x00\x00\x00\x80'\ b'\x10\x00\xc0\x18\x00\xc0\x18\x00\xc0\x08\x00\x60\x0c\x00\xfe\x7f'\ b'\x00\xfe\x7f\x00\x30\x06\x00\x30\x06\x00\x30\x02\x00\xff\x3f\x00'\ b'\xff\x3f\x00\x18\x01\x00\x8c\x01\x00\x8c\x01\x00\x8c\x01\x00\x84'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x11\x00\x00\x00\x00\x30\x00\x00\x30\x00\x00\x30'\ b'\x00\x00\x7c\x06\x00\xfe\x07\x00\x33\x07\x00\x33\x06\x00\x33\x00'\ b'\x00\x3f\x00\x00\xfe\x01\x00\xfc\x03\x00\xb0\x07\x00\x30\x06\x00'\ b'\x33\x06\x00\x33\x07\x00\xff\x03\x00\xfb\x00\x00\x30\x00\x00\x30'\ b'\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x11\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1c\x00\x00\x3e'\ b'\x20\x00\x63\x30\x00\x63\x18\x00\x63\x0c\x00\x3e\x06\x00\x9c\x03'\ b'\x00\xc0\x01\x00\xe0\x00\x00\x70\x1c\x00\x38\x3e\x00\x1c\x63\x00'\ b'\x0e\x63\x00\x07\x63\x00\x02\x3e\x00\x00\x1c\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x11\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf8\x00\x00\xfc\x03\x00\x0e'\ b'\x03\x00\x06\x03\x00\x06\x00\x00\x06\x00\x00\x0c\x00\x00\x1c\x00'\ b'\x00\x3e\x1e\x00\x67\x1e\x00\xe3\x07\x00\xc3\x03\x00\x83\x03\x00'\ b'\xc7\x07\x00\xfe\x1e\x00\x7c\x1c\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x11\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x07\x00\x00\x07\x00\x00\x07\x00\x00\x07'\ b'\x00\x00\x07\x00\x00\x03\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x11\x00\x00\x00\x00\xc0\x00\x00'\ b'\xf0\x00\x00\x38\x00\x00\x18\x00\x00\x0c\x00\x00\x0e\x00\x00\x06'\ b'\x00\x00\x06\x00\x00\x03\x00\x00\x03\x00\x00\x03\x00\x00\x03\x00'\ b'\x00\x03\x00\x00\x03\x00\x00\x03\x00\x00\x06\x00\x00\x06\x00\x00'\ b'\x06\x00\x00\x0c\x00\x00\x1c\x00\x00\x38\x00\x00\x70\x00\x00\xe0'\ b'\x00\x00\x40\x00\x00\x11\x00\x00\x00\x00\x03\x00\x00\x07\x00\x00'\ b'\x0c\x00\x00\x18\x00\x00\x30\x00\x00\x70\x00\x00\x60\x00\x00\x60'\ b'\x00\x00\xc0\x00\x00\xc0\x00\x00\xc0\x00\x00\xc0\x00\x00\xc0\x00'\ b'\x00\xc0\x00\x00\xc0\x00\x00\xe0\x00\x00\x60\x00\x00\x60\x00\x00'\ b'\x30\x00\x00\x38\x00\x00\x1c\x00\x00\x0e\x00\x00\x07\x00\x00\x01'\ b'\x00\x00\x11\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x30\x00\x00'\ b'\x70\x00\x00\x30\x00\x00\x21\x06\x00\xaf\x07\x00\xff\x07\x00\x70'\ b'\x00\x00\xd8\x00\x00\xdc\x00\x00\x8e\x01\x00\x8e\x01\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x11'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x60\x00\x00\x60\x00\x00\x60\x00\x00\x60\x00\x00\x60\x00\x00\xff'\ b'\x0f\x00\xff\x0f\x00\x60\x00\x00\x60\x00\x00\x60\x00\x00\x60\x00'\ b'\x00\x60\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x11\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1c\x00\x00\x1e\x00'\ b'\x00\x0e\x00\x00\x0e\x00\x00\x06\x00\x00\x07\x00\x00\x03\x00\x00'\ b'\x03\x00\x00\x00\x00\x00\x00\x00\x00\x11\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\x0f\x00\xff\x0f\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x11\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x06\x00\x00\x0f\x00\x00\x0f\x00\x00\x06\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x11\x00\x00\x00\x00\x00\x06\x00\x00\x06\x00\x00\x03'\ b'\x00\x00\x03\x00\x80\x01\x00\x80\x01\x00\xc0\x01\x00\xc0\x00\x00'\ b'\xe0\x00\x00\x60\x00\x00\x60\x00\x00\x30\x00\x00\x30\x00\x00\x18'\ b'\x00\x00\x18\x00\x00\x0c\x00\x00\x0c\x00\x00\x0e\x00\x00\x06\x00'\ b'\x00\x07\x00\x00\x03\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x11\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf8\x00\x00\xfc\x01'\ b'\x00\x8e\x03\x00\x06\x03\x00\x07\x07\x00\x03\x06\x00\x03\x06\x00'\ b'\x03\x06\x00\x03\x06\x00\x03\x06\x00\x03\x06\x00\x07\x07\x00\x06'\ b'\x03\x00\x8e\x03\x00\xfc\x01\x00\xf8\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x11\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x70\x00\x00\x7e\x00\x00\x6f\x00'\ b'\x00\x60\x00\x00\x60\x00\x00\x60\x00\x00\x60\x00\x00\x60\x00\x00'\ b'\x60\x00\x00\x60\x00\x00\x60\x00\x00\x60\x00\x00\x60\x00\x00\x60'\ b'\x00\x00\xfe\x07\x00\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x11\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x7c\x00\x00\xff\x01\x00\x87\x03\x00\x03\x03'\ b'\x00\x03\x03\x00\x00\x03\x00\x80\x03\x00\x80\x01\x00\xc0\x01\x00'\ b'\xe0\x00\x00\x70\x00\x00\x38\x00\x00\x1c\x06\x00\x0e\x06\x00\xff'\ b'\x07\x00\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x11\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x7c\x00\x00\xff\x01\x00\x83\x03\x00\x03\x03\x00\x00\x03'\ b'\x00\x80\x03\x00\xf8\x01\x00\xf8\x01\x00\x80\x03\x00\x00\x06\x00'\ b'\x00\x06\x00\x00\x06\x00\x01\x06\x00\x07\x03\x00\xfe\x03\x00\xf8'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x11\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80'\ b'\x01\x00\xc0\x01\x00\xe0\x01\x00\xb0\x01\x00\xb0\x01\x00\x98\x01'\ b'\x00\x8c\x01\x00\x86\x01\x00\x87\x01\x00\xff\x0f\x00\xff\x0f\x00'\ b'\x80\x01\x00\x80\x01\x00\x80\x01\x00\xf0\x0f\x00\xf0\x0f\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x11\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xfe\x03\x00\xfe'\ b'\x03\x00\x06\x00\x00\x06\x00\x00\x06\x00\x00\xfe\x00\x00\xfe\x03'\ b'\x00\x06\x03\x00\x00\x07\x00\x00\x06\x00\x00\x06\x00\x00\x06\x00'\ b'\x01\x07\x00\x87\x03\x00\xfe\x01\x00\xf8\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x11\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc0\x03\x00\xf0\x03\x00\x78'\ b'\x00\x00\x1c\x00\x00\x0e\x00\x00\x06\x00\x00\xf3\x00\x00\xff\x03'\ b'\x00\x07\x03\x00\x03\x06\x00\x03\x06\x00\x03\x06\x00\x02\x06\x00'\ b'\x06\x03\x00\xfc\x03\x00\xf8\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x11\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\xff\x07\x00\xff\x07\x00\x03\x03\x00\x03'\ b'\x03\x00\x80\x03\x00\x80\x01\x00\x80\x01\x00\xc0\x00\x00\xc0\x00'\ b'\x00\x60\x00\x00\x60\x00\x00\x70\x00\x00\x30\x00\x00\x30\x00\x00'\ b'\x18\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x11\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x78\x00\x00\xfe\x01\x00\x87\x03\x00\x03\x03\x00\x03'\ b'\x03\x00\x86\x03\x00\xfc\x01\x00\xfc\x01\x00\x06\x03\x00\x03\x06'\ b'\x00\x03\x06\x00\x03\x06\x00\x03\x06\x00\x06\x03\x00\xfe\x03\x00'\ b'\xf8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x11\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\xf8\x00\x00\xfe\x01\x00\x86\x03\x00\x03\x02\x00\x03\x06\x00\x03'\ b'\x06\x00\x03\x06\x00\x06\x07\x00\xfe\x07\x00\x78\x06\x00\x00\x03'\ b'\x00\x80\x03\x00\xc0\x01\x00\xf0\x00\x00\x7e\x00\x00\x1e\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x11\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x06\x00\x00\x0f\x00\x00\x0f\x00\x00\x06'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x06\x00\x00\x0f\x00\x00\x0f\x00\x00\x06\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x11'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x0c\x00\x00\x1e\x00\x00\x1e\x00\x00\x0c\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1c\x00'\ b'\x00\x1e\x00\x00\x0e\x00\x00\x0e\x00\x00\x06\x00\x00\x07\x00\x00'\ b'\x03\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x00\x11\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0c\x00'\ b'\x00\x0f\x00\xc0\x03\x00\xf0\x00\x00\x3c\x00\x00\x0f\x00\x00\x0f'\ b'\x00\x00\x3e\x00\x00\xf8\x00\x00\xc0\x03\x00\x00\x0f\x00\x00\x0c'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x11\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\xff\x0f\x00\xff\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\xff\x0f\x00\xff\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x11\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x1e\x00\x00\x78\x00\x00'\ b'\xe0\x01\x00\x80\x07\x00\x00\x1e\x00\x00\x1e\x00\x80\x07\x00\xe0'\ b'\x01\x00\x78\x00\x00\x1e\x00\x00\x07\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x11\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xfc\x00'\ b'\x00\xff\x01\x00\x83\x03\x00\x03\x03\x00\x00\x03\x00\x00\x03\x00'\ b'\x80\x01\x00\xf0\x01\x00\x70\x00\x00\x30\x00\x00\x30\x00\x00\x10'\ b'\x00\x00\x30\x00\x00\x78\x00\x00\x78\x00\x00\x30\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x11\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe0\x07'\ b'\x00\xf0\x0f\x00\x38\x1c\x00\x0c\x30\x00\xc6\x75\x00\xe6\x67\x00'\ b'\x73\x66\x00\x3b\x62\x00\x1b\x62\x00\x1b\x63\x00\x1b\x33\x00\xfb'\ b'\x3e\x00\x76\x1e\x00\x06\x00\x00\x1c\x04\x00\xf8\x0f\x00\xf0\x03'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x11\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\xf8\x03\x00\xf8\x03\x00\xc0\x06'\ b'\x00\xc0\x06\x00\x40\x0e\x00\x60\x0c\x00\x60\x0c\x00\x30\x1c\x00'\ b'\x30\x18\x00\xf0\x1f\x00\xf8\x3f\x00\x18\x30\x00\x18\x30\x00\x0c'\ b'\x60\x00\x3f\xfc\x01\x7f\xfc\x01\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x11\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\xff\x03\x00\xff\x0f\x00\x0c\x1c\x00\x0c\x18'\ b'\x00\x0c\x18\x00\x0c\x18\x00\x0c\x0c\x00\xfc\x0f\x00\xfc\x1f\x00'\ b'\x0c\x38\x00\x0c\x30\x00\x0c\x30\x00\x0c\x30\x00\x0c\x38\x00\xff'\ b'\x1f\x00\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x11\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\xf0\x19\x00\xf8\x1f\x00\x1c\x1e\x00\x0e\x1c\x00\x06\x18'\ b'\x00\x03\x18\x00\x03\x00\x00\x03\x00\x00\x03\x00\x00\x03\x00\x00'\ b'\x03\x00\x00\x06\x00\x00\x06\x10\x00\x1c\x1c\x00\xf8\x0f\x00\xf0'\ b'\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x11\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff'\ b'\x03\x00\xff\x07\x00\x0c\x0e\x00\x0c\x1c\x00\x0c\x18\x00\x0c\x30'\ b'\x00\x0c\x30\x00\x0c\x30\x00\x0c\x30\x00\x0c\x30\x00\x0c\x30\x00'\ b'\x0c\x18\x00\x0c\x1c\x00\x0c\x0e\x00\xff\x07\x00\xff\x03\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x11\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\x3f\x00\xff'\ b'\x3f\x00\x0c\x30\x00\x0c\x30\x00\x0c\x30\x00\x0c\x03\x00\x0c\x03'\ b'\x00\xfc\x03\x00\xfc\x03\x00\x0c\x03\x00\x0c\x33\x00\x0c\x30\x00'\ b'\x0c\x30\x00\x0c\x30\x00\xff\x3f\x00\xff\x3f\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x11\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\x3f\x00\xff\x3f\x00\x18'\ b'\x30\x00\x18\x30\x00\x18\x30\x00\x18\x03\x00\x18\x03\x00\xf8\x03'\ b'\x00\xf8\x03\x00\x18\x03\x00\x18\x03\x00\x18\x00\x00\x18\x00\x00'\ b'\x18\x00\x00\xff\x01\x00\xff\x01\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x11\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\xf0\x1b\x00\xf8\x1f\x00\x1c\x1c\x00\x0e'\ b'\x18\x00\x06\x18\x00\x03\x18\x00\x03\x00\x00\x03\x00\x00\x03\x00'\ b'\x00\x83\x7f\x00\x83\x7f\x00\x06\x18\x00\x06\x18\x00\x1c\x18\x00'\ b'\xf8\x1f\x00\xf0\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x11\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x3f\x7e\x00\x3f\x7e\x00\x0c\x18\x00\x0c\x18\x00\x0c'\ b'\x18\x00\x0c\x18\x00\x0c\x18\x00\xfc\x1f\x00\xfc\x1f\x00\x0c\x18'\ b'\x00\x0c\x18\x00\x0c\x18\x00\x0c\x18\x00\x0c\x18\x00\x3f\x7e\x00'\ b'\x3f\x7e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x11\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\xfe\x07\x00\xfe\x07\x00\x60\x00\x00\x60\x00\x00\x60\x00\x00\x60'\ b'\x00\x00\x60\x00\x00\x60\x00\x00\x60\x00\x00\x60\x00\x00\x60\x00'\ b'\x00\x60\x00\x00\x60\x00\x00\x60\x00\x00\xff\x1f\x00\xff\x1f\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x11\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf8\x3f\x00'\ b'\xfc\x3f\x00\x00\x03\x00\x00\x03\x00\x00\x03\x00\x00\x03\x00\x00'\ b'\x03\x00\x00\x03\x00\x01\x03\x00\x03\x03\x00\x03\x03\x00\x03\x03'\ b'\x00\x03\x03\x00\x87\x03\x00\xfe\x01\x00\xfc\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x11'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x3f\x7e\x00\x3f\x7e\x00'\ b'\x0c\x1c\x00\x0c\x06\x00\x0c\x03\x00\xcc\x01\x00\xec\x00\x00\xfc'\ b'\x00\x00\xfc\x03\x00\x0c\x07\x00\x0c\x06\x00\x0c\x06\x00\x0c\x0c'\ b'\x00\x0c\x0c\x00\x7f\x78\x00\x7f\x78\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x11\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\xff\x01\x00\xff\x01\x00\x18\x00\x00'\ b'\x18\x00\x00\x18\x00\x00\x18\x00\x00\x18\x00\x00\x18\x00\x00\x18'\ b'\x00\x00\x18\x30\x00\x18\x30\x00\x18\x30\x00\x18\x30\x00\x18\x30'\ b'\x00\xff\x3f\x00\xff\x3f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x11\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x1e\xf0\x00\x3e\xf8\x00\x3c\x78\x00\x7c\x7c\x00'\ b'\x6c\x6c\x00\x6c\x6c\x00\xcc\x66\x00\xcc\x66\x00\xcc\x66\x00\x8c'\ b'\x63\x00\x8c\x63\x00\x0c\x61\x00\x0c\x60\x00\x0c\x60\x00\x3f\xf8'\ b'\x01\x3f\xf8\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x11\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x1f\x7e\x00\x1f\x7e\x00\x3c\x18\x00\x3c\x18\x00\x6c\x18\x00'\ b'\x4c\x18\x00\xcc\x18\x00\x8c\x19\x00\x8c\x19\x00\x0c\x1b\x00\x0c'\ b'\x1b\x00\x0c\x1e\x00\x0c\x1e\x00\x0c\x1c\x00\x7f\x1c\x00\x7f\x18'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x11\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe0\x03'\ b'\x00\xf8\x0f\x00\x1c\x1c\x00\x0e\x38\x00\x06\x30\x00\x03\x60\x00'\ b'\x03\x60\x00\x03\x60\x00\x03\x60\x00\x03\x60\x00\x03\x60\x00\x06'\ b'\x30\x00\x0e\x38\x00\x1c\x1c\x00\xf8\x0f\x00\xe0\x03\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x11\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\x07\x00\xff\x1f'\ b'\x00\x18\x1c\x00\x18\x30\x00\x18\x30\x00\x18\x30\x00\x18\x30\x00'\ b'\x18\x18\x00\xf8\x1f\x00\xf8\x07\x00\x18\x00\x00\x18\x00\x00\x18'\ b'\x00\x00\x18\x00\x00\xff\x01\x00\xff\x01\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x11\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\xe0\x03\x00\xf8\x0f\x00\x1c\x1c'\ b'\x00\x0e\x38\x00\x06\x30\x00\x03\x60\x00\x03\x60\x00\x03\x60\x00'\ b'\x03\x60\x00\x03\x60\x00\x03\x60\x00\x06\x30\x00\x0e\x38\x00\x1c'\ b'\x1c\x00\xf8\x0f\x00\xe0\x03\x00\x30\x10\x00\xf8\x3f\x00\xfc\x1f'\ b'\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x11\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\xff\x03\x00\xff\x0f\x00\x0c\x1c\x00\x0c\x18'\ b'\x00\x0c\x18\x00\x0c\x18\x00\x0c\x0c\x00\xfc\x0f\x00\xfc\x01\x00'\ b'\x8c\x07\x00\x0c\x06\x00\x0c\x0c\x00\x0c\x1c\x00\x0c\x18\x00\x3f'\ b'\x78\x00\x3f\x70\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x11\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\xf8\x0c\x00\xfe\x0f\x00\x07\x0f\x00\x03\x0c\x00\x03\x00'\ b'\x00\x03\x00\x00\x1e\x00\x00\xfc\x03\x00\xc0\x0f\x00\x00\x1c\x00'\ b'\x01\x18\x00\x03\x18\x00\x07\x18\x00\x0f\x1c\x00\xff\x0f\x00\xf3'\ b'\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x11\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff'\ b'\x1f\x00\xff\x1f\x00\x63\x18\x00\x63\x18\x00\x63\x18\x00\x63\x18'\ b'\x00\x63\x18\x00\x60\x00\x00\x60\x00\x00\x60\x00\x00\x60\x00\x00'\ b'\x60\x00\x00\x60\x00\x00\x60\x00\x00\xfe\x07\x00\xfe\x07\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x11\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x3f\x7e\x00\x3f'\ b'\x7e\x00\x0c\x18\x00\x0c\x18\x00\x0c\x18\x00\x0c\x18\x00\x0c\x18'\ b'\x00\x0c\x18\x00\x0c\x18\x00\x0c\x18\x00\x0c\x18\x00\x0c\x18\x00'\ b'\x0c\x18\x00\x18\x0c\x00\xf8\x0f\x00\xe0\x03\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x11\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x7f\xf8\x01\x7f\xf8\x01\x0c'\ b'\x60\x00\x18\x30\x00\x18\x30\x00\x18\x38\x00\x30\x18\x00\x30\x18'\ b'\x00\x70\x0c\x00\x60\x0c\x00\x60\x0c\x00\xc0\x06\x00\xc0\x06\x00'\ b'\xc0\x07\x00\x80\x03\x00\x80\x03\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x11\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x3f\xf8\x01\x3f\xf8\x01\x0c\x60\x00\x0c'\ b'\x60\x00\x8c\x63\x00\x8c\x63\x00\x8c\x63\x00\x8c\x67\x00\xcc\x66'\ b'\x00\xd8\x26\x00\xd8\x3c\x00\x78\x3c\x00\x78\x3c\x00\x78\x38\x00'\ b'\x38\x38\x00\x38\x38\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x11\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x7e\x7c\x00\x7e\x7c\x00\x18\x18\x00\x30\x0c\x00\x70'\ b'\x0e\x00\x60\x07\x00\xc0\x03\x00\x80\x01\x00\xc0\x03\x00\xe0\x07'\ b'\x00\x60\x06\x00\x30\x0c\x00\x18\x18\x00\x0c\x30\x00\x3f\xfc\x00'\ b'\x3f\xfc\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x11\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x3f\x7e\x00\x3f\x7e\x00\x0c\x1c\x00\x18\x0c\x00\x18\x06\x00\x30'\ b'\x07\x00\x70\x03\x00\xe0\x01\x00\xc0\x01\x00\xc0\x00\x00\xc0\x00'\ b'\x00\xc0\x00\x00\xc0\x00\x00\xc0\x00\x00\xfc\x0f\x00\xfc\x0f\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x11\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\x0f\x00'\ b'\xff\x0f\x00\x03\x07\x00\x03\x03\x00\x83\x03\x00\xc3\x01\x00\xc0'\ b'\x00\x00\x60\x00\x00\x70\x00\x00\x30\x00\x00\x18\x0c\x00\x1c\x0c'\ b'\x00\x0e\x0c\x00\x06\x0c\x00\xff\x0f\x00\xff\x0f\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x11'\ b'\x00\x00\x00\x00\x7f\x00\x00\x7f\x00\x00\x03\x00\x00\x03\x00\x00'\ b'\x03\x00\x00\x03\x00\x00\x03\x00\x00\x03\x00\x00\x03\x00\x00\x03'\ b'\x00\x00\x03\x00\x00\x03\x00\x00\x03\x00\x00\x03\x00\x00\x03\x00'\ b'\x00\x03\x00\x00\x03\x00\x00\x03\x00\x00\x03\x00\x00\x03\x00\x00'\ b'\x03\x00\x00\x7f\x00\x00\x7f\x00\x00\x00\x00\x00\x11\x00\x00\x00'\ b'\x00\x03\x00\x00\x03\x00\x00\x07\x00\x00\x06\x00\x00\x0e\x00\x00'\ b'\x0c\x00\x00\x0c\x00\x00\x18\x00\x00\x18\x00\x00\x30\x00\x00\x30'\ b'\x00\x00\x60\x00\x00\x60\x00\x00\xe0\x00\x00\xc0\x00\x00\xc0\x01'\ b'\x00\x80\x01\x00\x80\x01\x00\x00\x03\x00\x00\x03\x00\x00\x06\x00'\ b'\x00\x06\x00\x00\x00\x00\x00\x00\x00\x11\x00\x00\x00\x00\x7f\x00'\ b'\x00\x7f\x00\x00\x60\x00\x00\x60\x00\x00\x60\x00\x00\x60\x00\x00'\ b'\x60\x00\x00\x60\x00\x00\x60\x00\x00\x60\x00\x00\x60\x00\x00\x60'\ b'\x00\x00\x60\x00\x00\x60\x00\x00\x60\x00\x00\x60\x00\x00\x60\x00'\ b'\x00\x60\x00\x00\x60\x00\x00\x60\x00\x00\x60\x00\x00\x7f\x00\x00'\ b'\x7f\x00\x00\x00\x00\x00\x11\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x30\x00\x00\x70\x00\x00\x78\x00\x00\xd8\x00\x00\xdc\x00\x00'\ b'\x8c\x01\x00\x8e\x01\x00\x06\x03\x00\x07\x03\x00\x02\x06\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x11\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff'\ b'\x01\xff\xff\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x11\x00\x03\x00\x00\x07\x00\x00\x1f\x00\x00\x78\x00\x00\x40\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x11\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\xf0\x03\x00\xfc\x07\x00\x0e\x0e\x00\x00\x0c\x00\x00\x0c\x00'\ b'\xf8\x0d\x00\xfe\x0f\x00\x07\x0c\x00\x03\x0c\x00\x03\x0e\x00\x87'\ b'\x0f\x00\xfe\x3f\x00\xfc\x38\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x11\x00\x00\x00\x00\x0f'\ b'\x00\x00\x0f\x00\x00\x0c\x00\x00\x0c\x00\x00\x0c\x00\x00\xcc\x07'\ b'\x00\xec\x0f\x00\x3c\x1c\x00\x1c\x18\x00\x0c\x30\x00\x0c\x30\x00'\ b'\x0c\x30\x00\x0c\x30\x00\x0c\x30\x00\x1c\x18\x00\x3c\x1c\x00\xef'\ b'\x0f\x00\xcf\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x11\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf0\x19\x00\xfc\x1f'\ b'\x00\x1e\x1e\x00\x06\x18\x00\x03\x18\x00\x03\x18\x00\x03\x00\x00'\ b'\x03\x00\x00\x03\x00\x00\x06\x10\x00\x0e\x1c\x00\xfc\x0f\x00\xf0'\ b'\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x11\x00\x00\x00\x00\xc0\x0f\x00\xc0\x0f\x00\x00'\ b'\x0c\x00\x00\x0c\x00\x00\x0c\x00\x00\x0c\x00\xf8\x0c\x00\xfc\x0d'\ b'\x00\x0e\x0f\x00\x07\x0e\x00\x03\x0c\x00\x03\x0c\x00\x03\x0c\x00'\ b'\x03\x0c\x00\x07\x0e\x00\x0e\x0f\x00\xfc\x3d\x00\xf8\x3c\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x11\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\xf0\x01\x00\xfc\x07\x00\x0e\x0e\x00\x06\x0c'\ b'\x00\x03\x18\x00\xff\x1f\x00\xff\x1f\x00\x03\x00\x00\x03\x00\x00'\ b'\x07\x00\x00\x0e\x0e\x00\xfc\x0f\x00\xf8\x03\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x11\x00'\ b'\x00\x00\x00\xc0\x0f\x00\xe0\x1f\x00\x70\x18\x00\x30\x00\x00\x30'\ b'\x00\x00\x30\x00\x00\xff\x0f\x00\xff\x0f\x00\x30\x00\x00\x30\x00'\ b'\x00\x30\x00\x00\x30\x00\x00\x30\x00\x00\x30\x00\x00\x30\x00\x00'\ b'\x30\x00\x00\xff\x0f\x00\xff\x0f\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x11\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf8'\ b'\x3c\x00\xfc\x3d\x00\x0e\x0f\x00\x06\x0e\x00\x03\x0c\x00\x03\x0c'\ b'\x00\x03\x0c\x00\x03\x0c\x00\x03\x0c\x00\x06\x0e\x00\x0e\x0f\x00'\ b'\xfc\x0d\x00\xf8\x0c\x00\x00\x0c\x00\x00\x0c\x00\x06\x06\x00\xfe'\ b'\x07\x00\xf8\x01\x00\x00\x00\x00\x11\x00\x00\x00\x00\x0f\x00\x00'\ b'\x0f\x00\x00\x0c\x00\x00\x0c\x00\x00\x0c\x00\x00\x8c\x07\x00\xec'\ b'\x0f\x00\x7c\x1c\x00\x1c\x18\x00\x1c\x18\x00\x0c\x18\x00\x0c\x18'\ b'\x00\x0c\x18\x00\x0c\x18\x00\x0c\x18\x00\x0c\x18\x00\x3f\x7e\x00'\ b'\x3f\x7e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x11\x00\x60\x00\x00\x60\x00\x00\x60\x00\x00'\ b'\x60\x00\x00\x00\x00\x00\x00\x00\x00\x7e\x00\x00\x7e\x00\x00\x60'\ b'\x00\x00\x60\x00\x00\x60\x00\x00\x60\x00\x00\x60\x00\x00\x60\x00'\ b'\x00\x60\x00\x00\x60\x00\x00\x60\x00\x00\xff\x0f\x00\xff\x0f\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x11\x00\x00\x03\x00\x00\x03\x00\x00\x03\x00\x00\x03\x00'\ b'\x00\x00\x00\x00\x00\x00\xfe\x03\x00\xfe\x03\x00\x00\x03\x00\x00'\ b'\x03\x00\x00\x03\x00\x00\x03\x00\x00\x03\x00\x00\x03\x00\x00\x03'\ b'\x00\x00\x03\x00\x00\x03\x00\x00\x03\x00\x00\x03\x00\x00\x03\x00'\ b'\x00\x03\x00\x87\x03\x00\xfe\x01\x00\xf8\x00\x00\x00\x00\x00\x11'\ b'\x00\x00\x00\x00\x0f\x00\x00\x0f\x00\x00\x0c\x00\x00\x0c\x00\x00'\ b'\x0c\x00\x00\x0c\x3f\x00\x0c\x3f\x00\x0c\x0e\x00\x0c\x07\x00\x8c'\ b'\x03\x00\xec\x00\x00\xfc\x01\x00\xbc\x03\x00\x1c\x07\x00\x0c\x0e'\ b'\x00\x0c\x0c\x00\x3f\x7e\x00\x3f\x7e\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x11\x00\x00\x00'\ b'\x00\x7f\x00\x00\x7f\x00\x00\x60\x00\x00\x60\x00\x00\x60\x00\x00'\ b'\x60\x00\x00\x60\x00\x00\x60\x00\x00\x60\x00\x00\x60\x00\x00\x60'\ b'\x00\x00\x60\x00\x00\x60\x00\x00\x60\x00\x00\x60\x00\x00\x60\x00'\ b'\x00\xff\x0f\x00\xff\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x11\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xef\x39\x00'\ b'\xff\x7d\x00\x1c\x67\x00\x1c\x67\x00\x0c\x63\x00\x0c\x63\x00\x0c'\ b'\x63\x00\x0c\x63\x00\x0c\x63\x00\x0c\x63\x00\x0c\x63\x00\x1f\xef'\ b'\x01\x3f\xef\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x11\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x8f\x07\x00\xef\x0f\x00'\ b'\x3c\x1c\x00\x1c\x18\x00\x1c\x18\x00\x0c\x18\x00\x0c\x18\x00\x0c'\ b'\x18\x00\x0c\x18\x00\x0c\x18\x00\x0c\x18\x00\x3f\x7e\x00\x3f\x7e'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x11\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\xf0\x01\x00\xfc\x07\x00\x0e\x0e\x00'\ b'\x06\x0c\x00\x03\x18\x00\x03\x18\x00\x03\x18\x00\x03\x18\x00\x03'\ b'\x18\x00\x06\x0c\x00\x0e\x0e\x00\xfc\x07\x00\xf0\x01\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x11\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\xcf\x07\x00\xef\x0f\x00\x3c\x1c\x00\x1c\x18\x00'\ b'\x0c\x30\x00\x0c\x30\x00\x0c\x30\x00\x0c\x30\x00\x0c\x30\x00\x1c'\ b'\x18\x00\x3c\x1c\x00\xec\x0f\x00\xcc\x07\x00\x0c\x00\x00\x0c\x00'\ b'\x00\x0c\x00\x00\xff\x00\x00\xff\x00\x00\x00\x00\x00\x11\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\xf8\x3c\x00\xfc\x3d\x00\x0e\x0f\x00\x06\x0e\x00\x03\x0c\x00'\ b'\x03\x0c\x00\x03\x0c\x00\x03\x0c\x00\x03\x0c\x00\x06\x0e\x00\x0e'\ b'\x0f\x00\xfc\x0d\x00\xf8\x0c\x00\x00\x0c\x00\x00\x0c\x00\x00\x0c'\ b'\x00\xc0\x3f\x00\xc0\x3f\x00\x00\x00\x00\x11\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x8f\x07'\ b'\x00\xcf\x0f\x00\xec\x04\x00\x3c\x00\x00\x1c\x00\x00\x1c\x00\x00'\ b'\x0c\x00\x00\x0c\x00\x00\x0c\x00\x00\x0c\x00\x00\x0c\x00\x00\xff'\ b'\x01\x00\xff\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x11\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x7c\x06\x00\xfe\x07'\ b'\x00\x07\x07\x00\x03\x06\x00\x03\x00\x00\x7e\x00\x00\xfc\x03\x00'\ b'\x80\x07\x00\x00\x06\x00\x03\x06\x00\x07\x07\x00\xff\x03\x00\xfb'\ b'\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x11\x00\x00\x00\x00\x00\x00\x00\x30\x00\x00\x30'\ b'\x00\x00\x30\x00\x00\x30\x00\x00\x30\x00\x00\xff\x0f\x00\xff\x0f'\ b'\x00\x30\x00\x00\x30\x00\x00\x30\x00\x00\x30\x00\x00\x30\x00\x00'\ b'\x30\x00\x00\x30\x00\x00\x70\x38\x00\xe0\x1f\x00\xc0\x07\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x11\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x0f\x1f\x00\x0f\x1f\x00\x0c\x18\x00\x0c\x18'\ b'\x00\x0c\x18\x00\x0c\x18\x00\x0c\x18\x00\x0c\x18\x00\x0c\x1c\x00'\ b'\x0c\x1c\x00\x1c\x1e\x00\xf8\x7b\x00\xf0\x78\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x11\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x3f\x7e\x00\x7f\x7e\x00\x0c\x18\x00\x1c\x18\x00\x18\x1c'\ b'\x00\x18\x0c\x00\x30\x0e\x00\x30\x06\x00\x70\x06\x00\x60\x03\x00'\ b'\x60\x03\x00\xc0\x03\x00\xc0\x01\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x11\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x3f'\ b'\xf0\x01\x3f\xf0\x01\x06\xc0\x00\x8c\xc3\x00\x8c\x63\x00\x8c\x63'\ b'\x00\xcc\x66\x00\xd8\x36\x00\xd8\x36\x00\x78\x3c\x00\x78\x3c\x00'\ b'\x70\x1c\x00\x30\x18\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x11\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x3e\x3e\x00\x3e'\ b'\x3e\x00\x18\x0c\x00\x30\x06\x00\x60\x03\x00\xc0\x01\x00\xc0\x01'\ b'\x00\x60\x03\x00\x30\x06\x00\x38\x0c\x00\x1c\x1c\x00\x3e\x3e\x00'\ b'\x3f\x7e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x11\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x3f\x7e\x00\x7f\x7e\x00\x0c'\ b'\x18\x00\x1c\x18\x00\x18\x0c\x00\x30\x0c\x00\x30\x06\x00\x60\x06'\ b'\x00\x60\x03\x00\xc0\x03\x00\xc0\x01\x00\x80\x01\x00\xc0\x00\x00'\ b'\xc0\x00\x00\x60\x00\x00\x70\x00\x00\x3e\x00\x00\x0e\x00\x00\x00'\ b'\x00\x00\x11\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\xff\x0f\x00\xff\x0f\x00\x03\x07\x00\x83'\ b'\x03\x00\xc0\x01\x00\xe0\x00\x00\x70\x00\x00\x78\x00\x00\x38\x0c'\ b'\x00\x1c\x0c\x00\x0e\x0c\x00\xff\x0f\x00\xff\x0f\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x11'\ b'\x00\x00\x00\x00\xc0\x03\x00\xe0\x03\x00\x70\x00\x00\x30\x00\x00'\ b'\x30\x00\x00\x30\x00\x00\x30\x00\x00\x30\x00\x00\x30\x00\x00\x38'\ b'\x00\x00\x1f\x00\x00\x1f\x00\x00\x18\x00\x00\x30\x00\x00\x30\x00'\ b'\x00\x30\x00\x00\x30\x00\x00\x30\x00\x00\x30\x00\x00\x30\x00\x00'\ b'\x70\x00\x00\xe0\x03\x00\xc0\x03\x00\x00\x00\x00\x11\x00\x00\x00'\ b'\x00\x03\x00\x00\x03\x00\x00\x03\x00\x00\x03\x00\x00\x03\x00\x00'\ b'\x03\x00\x00\x03\x00\x00\x03\x00\x00\x03\x00\x00\x03\x00\x00\x03'\ b'\x00\x00\x03\x00\x00\x03\x00\x00\x03\x00\x00\x03\x00\x00\x03\x00'\ b'\x00\x03\x00\x00\x03\x00\x00\x03\x00\x00\x03\x00\x00\x03\x00\x00'\ b'\x03\x00\x00\x03\x00\x00\x00\x00\x00\x11\x00\x00\x00\x00\x0f\x00'\ b'\x00\x1f\x00\x00\x38\x00\x00\x30\x00\x00\x30\x00\x00\x30\x00\x00'\ b'\x30\x00\x00\x30\x00\x00\x30\x00\x00\x70\x00\x00\xe0\x03\x00\xe0'\ b'\x03\x00\x70\x00\x00\x30\x00\x00\x30\x00\x00\x30\x00\x00\x30\x00'\ b'\x00\x30\x00\x00\x30\x00\x00\x30\x00\x00\x38\x00\x00\x1f\x00\x00'\ b'\x0f\x00\x00\x00\x00\x00\x11\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x3c\x08\x00\xff\x1f\x00\xc3\x07\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\ b'\x00\x00\x00' _index =\ b'\x00\x00\x4d\x00\x9a\x00\xe7\x00\x34\x01\x81\x01\xce\x01\x1b\x02'\ b'\x68\x02\xb5\x02\x02\x03\x4f\x03\x9c\x03\xe9\x03\x36\x04\x83\x04'\ b'\xd0\x04\x1d\x05\x6a\x05\xb7\x05\x04\x06\x51\x06\x9e\x06\xeb\x06'\ b'\x38\x07\x85\x07\xd2\x07\x1f\x08\x6c\x08\xb9\x08\x06\x09\x53\x09'\ b'\xa0\x09\xed\x09\x3a\x0a\x87\x0a\xd4\x0a\x21\x0b\x6e\x0b\xbb\x0b'\ b'\x08\x0c\x55\x0c\xa2\x0c\xef\x0c\x3c\x0d\x89\x0d\xd6\x0d\x23\x0e'\ b'\x70\x0e\xbd\x0e\x0a\x0f\x57\x0f\xa4\x0f\xf1\x0f\x3e\x10\x8b\x10'\ b'\xd8\x10\x25\x11\x72\x11\xbf\x11\x0c\x12\x59\x12\xa6\x12\xf3\x12'\ b'\x40\x13\x8d\x13\xda\x13\x27\x14\x74\x14\xc1\x14\x0e\x15\x5b\x15'\ b'\xa8\x15\xf5\x15\x42\x16\x8f\x16\xdc\x16\x29\x17\x76\x17\xc3\x17'\ b'\x10\x18\x5d\x18\xaa\x18\xf7\x18\x44\x19\x91\x19\xde\x19\x2b\x1a'\ b'\x78\x1a\xc5\x1a\x12\x1b\x5f\x1b\xac\x1b\xf9\x1b\x46\x1c\x93\x1c'\ def _chr_addr(ordch): offset = 2 * (ordch - 32) return int.from_bytes(_index[offset:offset + 2], 'little') def get_ch(ch): ordch = ord(ch) ordch = ordch if ordch >= 32 and ordch <= 126 else ord('?') offset = _chr_addr(ordch) width = int.from_bytes(_font[offset:offset + 2], 'little') next_offs = _chr_addr(ordch +1) return memoryview(_font[offset + 2:next_offs]), 25, width
""" Helper classes for SSPI authentication via the win32security module. SSPI authentication involves a token-exchange "dance", the exact details of which depends on the authentication provider used. There are also a number of complex flags and constants that need to be used - in most cases, there are reasonable defaults. These classes attempt to hide these details from you until you really need to know. They are not designed to handle all cases, just the common ones. If you need finer control than offered here, just use the win32security functions directly. """ # Based on Roger Upole's sspi demos. # $Id$ import win32security, sspicon error = win32security.error class _BaseAuth(object): def __init__(self): self.reset() def reset(self): """Reset everything to an unauthorized state""" self.ctxt = None self.authenticated = False # The next seq_num for an encrypt/sign operation self.next_seq_num = 0 def _get_next_seq_num(self): """Get the next sequence number for a transmission. Default implementation is to increment a counter """ ret = self.next_seq_num self.next_seq_num = self.next_seq_num + 1 return ret def encrypt(self, data): """Encrypt a string, returning a tuple of (encrypted_data, encryption_data). These can be passed to decrypt to get back the original string. """ pkg_size_info=self.ctxt.QueryContextAttributes(sspicon.SECPKG_ATTR_SIZES) trailersize=pkg_size_info['SecurityTrailer'] encbuf=win32security.PySecBufferDescType() encbuf.append(win32security.PySecBufferType(len(data), sspicon.SECBUFFER_DATA)) encbuf.append(win32security.PySecBufferType(trailersize, sspicon.SECBUFFER_TOKEN)) encbuf[0].Buffer=data self.ctxt.EncryptMessage(0,encbuf,self._get_next_seq_num()) return encbuf[0].Buffer, encbuf[1].Buffer def decrypt(self, data, trailer): """Decrypt a previously encrypted string, returning the orignal data""" encbuf=win32security.PySecBufferDescType() encbuf.append(win32security.PySecBufferType(len(data), sspicon.SECBUFFER_DATA)) encbuf.append(win32security.PySecBufferType(len(trailer), sspicon.SECBUFFER_TOKEN)) encbuf[0].Buffer=data encbuf[1].Buffer=trailer self.ctxt.DecryptMessage(encbuf,self._get_next_seq_num()) return encbuf[0].Buffer def sign(self, data): """sign a string suitable for transmission, returning the signature. Passing the data and signature to verify will determine if the data is unchanged. """ pkg_size_info=self.ctxt.QueryContextAttributes(sspicon.SECPKG_ATTR_SIZES) sigsize=pkg_size_info['MaxSignature'] sigbuf=win32security.PySecBufferDescType() sigbuf.append(win32security.PySecBufferType(len(data), sspicon.SECBUFFER_DATA)) sigbuf.append(win32security.PySecBufferType(sigsize, sspicon.SECBUFFER_TOKEN)) sigbuf[0].Buffer=data self.ctxt.MakeSignature(0,sigbuf,self._get_next_seq_num()) return sigbuf[1].Buffer def verify(self, data, sig): """Verifies data and its signature. If verification fails, an sspi.error will be raised. """ sigbuf=win32security.PySecBufferDescType() sigbuf.append(win32security.PySecBufferType(len(data), sspicon.SECBUFFER_DATA)) sigbuf.append(win32security.PySecBufferType(len(sig), sspicon.SECBUFFER_TOKEN)) sigbuf[0].Buffer=data sigbuf[1].Buffer=sig self.ctxt.VerifySignature(sigbuf,self._get_next_seq_num()) class ClientAuth(_BaseAuth): """Manages the client side of an SSPI authentication handshake """ def __init__(self, pkg_name, # Name of the package to used. client_name = None, # User for whom credentials are used. auth_info = None, # or a tuple of (username, domain, password) targetspn = None, # Target security context provider name. scflags=None, # security context flags datarep=sspicon.SECURITY_NETWORK_DREP): if scflags is None: scflags = sspicon.ISC_REQ_INTEGRITY|sspicon.ISC_REQ_SEQUENCE_DETECT|\ sspicon.ISC_REQ_REPLAY_DETECT|sspicon.ISC_REQ_CONFIDENTIALITY self.scflags=scflags self.datarep=datarep self.targetspn=targetspn self.pkg_info=win32security.QuerySecurityPackageInfo(pkg_name) self.credentials, \ self.credentials_expiry=win32security.AcquireCredentialsHandle( client_name, self.pkg_info['Name'], sspicon.SECPKG_CRED_OUTBOUND, None, auth_info) _BaseAuth.__init__(self) # Perform *one* step of the client authentication process. def authorize(self, sec_buffer_in): if sec_buffer_in is not None and type(sec_buffer_in) != win32security.PySecBufferDescType: # User passed us the raw data - wrap it into a SecBufferDesc sec_buffer_new=win32security.PySecBufferDescType() tokenbuf=win32security.PySecBufferType(self.pkg_info['MaxToken'], sspicon.SECBUFFER_TOKEN) tokenbuf.Buffer=sec_buffer_in sec_buffer_new.append(tokenbuf) sec_buffer_in = sec_buffer_new sec_buffer_out=win32security.PySecBufferDescType() tokenbuf=win32security.PySecBufferType(self.pkg_info['MaxToken'], sspicon.SECBUFFER_TOKEN) sec_buffer_out.append(tokenbuf) ## input context handle should be NULL on first call ctxtin=self.ctxt if self.ctxt is None: self.ctxt=win32security.PyCtxtHandleType() err, attr, exp=win32security.InitializeSecurityContext( self.credentials, ctxtin, self.targetspn, self.scflags, self.datarep, sec_buffer_in, self.ctxt, sec_buffer_out) # Stash these away incase someone needs to know the state from the # final call. self.ctxt_attr = attr self.ctxt_expiry = exp if err in (sspicon.SEC_I_COMPLETE_NEEDED,sspicon.SEC_I_COMPLETE_AND_CONTINUE): self.ctxt.CompleteAuthToken(sec_buffer_out) self.authenticated = err == 0 return err, sec_buffer_out class ServerAuth(_BaseAuth): """Manages the server side of an SSPI authentication handshake """ def __init__(self, pkg_name, spn = None, scflags=None, datarep=sspicon.SECURITY_NETWORK_DREP): self.spn=spn self.datarep=datarep if scflags is None: scflags = sspicon.ASC_REQ_INTEGRITY|sspicon.ASC_REQ_SEQUENCE_DETECT|\ sspicon.ASC_REQ_REPLAY_DETECT|sspicon.ASC_REQ_CONFIDENTIALITY # Should we default to sspicon.KerbAddExtraCredentialsMessage # if pkg_name=='Kerberos'? self.scflags=scflags self.pkg_info=win32security.QuerySecurityPackageInfo(pkg_name) self.credentials, \ self.credentials_expiry=win32security.AcquireCredentialsHandle(spn, self.pkg_info['Name'], sspicon.SECPKG_CRED_INBOUND, None, None) _BaseAuth.__init__(self) # Perform *one* step of the server authentication process. def authorize(self, sec_buffer_in): if sec_buffer_in is not None and type(sec_buffer_in) != win32security.PySecBufferDescType: # User passed us the raw data - wrap it into a SecBufferDesc sec_buffer_new=win32security.PySecBufferDescType() tokenbuf=win32security.PySecBufferType(self.pkg_info['MaxToken'], sspicon.SECBUFFER_TOKEN) tokenbuf.Buffer=sec_buffer_in sec_buffer_new.append(tokenbuf) sec_buffer_in = sec_buffer_new sec_buffer_out=win32security.PySecBufferDescType() tokenbuf=win32security.PySecBufferType(self.pkg_info['MaxToken'], sspicon.SECBUFFER_TOKEN) sec_buffer_out.append(tokenbuf) ## input context handle is None initially, then handle returned from last call thereafter ctxtin=self.ctxt if self.ctxt is None: self.ctxt=win32security.PyCtxtHandleType() err, attr, exp = win32security.AcceptSecurityContext(self.credentials, ctxtin, sec_buffer_in, self.scflags, self.datarep, self.ctxt, sec_buffer_out) # Stash these away incase someone needs to know the state from the # final call. self.ctxt_attr = attr self.ctxt_expiry = exp if err in (sspicon.SEC_I_COMPLETE_NEEDED,sspicon.SEC_I_COMPLETE_AND_CONTINUE): self.ctxt.CompleteAuthToken(sec_buffer_out) self.authenticated = err == 0 return err, sec_buffer_out if __name__=='__main__': # Setup the 2 contexts. sspiclient=ClientAuth("NTLM") sspiserver=ServerAuth("NTLM") # Perform the authentication dance, each loop exchanging more information # on the way to completing authentication. sec_buffer=None while 1: err, sec_buffer = sspiclient.authorize(sec_buffer) err, sec_buffer = sspiserver.authorize(sec_buffer) if err==0: break data = "hello".encode("ascii") # py3k-friendly sig = sspiclient.sign(data) sspiserver.verify(data, sig) data, key = sspiclient.encrypt(data) assert sspiserver.decrypt(data, key) == data print("cool!")
""" Generic XML read and write utility Usage: Either extend xml_reader or add as a class variable. """ ############################################################################ #This software was developed by the University of Tennessee as part of the #Distributed Data Analysis of Neutron Scattering Experiments (DANSE) #project funded by the US National Science Foundation. #If you use DANSE applications to do scientific research that leads to #publication, we ask that you acknowledge the use of the software with the #following sentence: #This work benefited from DANSE software developed under NSF award DMR-0520547. #copyright 2008,2009 University of Tennessee ############################################################################# import logging from lxml import etree from lxml.builder import E from ..file_reader_base_class import FileReader, decode logger = logging.getLogger(__name__) PARSER = etree.ETCompatXMLParser(remove_comments=True, remove_pis=False) class XMLreader(FileReader): """ Generic XML read and write class. Mostly helper functions. Makes reading/writing XML a bit easier than calling lxml libraries directly. :Dependencies: This class requires lxml 2.3 or higher. """ xml = None xmldoc = None xmlroot = None schema = None schemadoc = None encoding = None processing_instructions = None def __init__(self, xml=None, schema=None): super(XMLreader, self).__init__() self.xml = xml self.schema = schema self.processing_instructions = {} if xml is not None: self.set_xml_file(xml) else: self.xmldoc = None self.xmlroot = None if schema is not None: self.set_schema(schema) else: self.schemadoc = None def reader(self): """ Read in an XML file into memory and return an lxml dictionary """ if self.validate_xml(): self.xmldoc = etree.parse(self.xml, parser=PARSER) else: raise etree.XMLSchemaValidateError(self, self.find_invalid_xml()) return self.xmldoc def set_xml_file(self, xml): """ Set the XML file and parse """ try: self.xml = xml self.xmldoc = etree.parse(self.xml, parser=PARSER) self.xmlroot = self.xmldoc.getroot() except etree.XMLSyntaxError as xml_error: logger.info(xml_error) raise xml_error except Exception: self.xml = None self.xmldoc = None self.xmlroot = None def set_xml_string(self, tag_soup): """ Set an XML string as the working XML. :param tag_soup: XML formatted string """ try: self.xml = tag_soup self.xmldoc = tag_soup self.xmlroot = etree.fromstring(tag_soup) except etree.XMLSyntaxError as xml_error: logger.info(xml_error) raise xml_error except Exception as exc: self.xml = None self.xmldoc = None self.xmlroot = None raise exc def set_schema(self, schema): """ Set the schema file and parse """ try: self.schema = schema self.schemadoc = etree.parse(self.schema, parser=PARSER) except etree.XMLSyntaxError as xml_error: logger.info(xml_error) except Exception: self.schema = None self.schemadoc = None def validate_xml(self): """ Checks to see if the XML file meets the schema """ valid = True if self.schema is not None: self.parse_schema_and_doc() schema_check = etree.XMLSchema(self.schemadoc) valid = schema_check.validate(self.xmldoc) return valid def find_invalid_xml(self): """ Finds the first offending element that should not be present in XML file """ first_error = "" self.parse_schema_and_doc() schema = etree.XMLSchema(self.schemadoc) try: first_error = schema.assertValid(self.xmldoc) except etree.DocumentInvalid as err: # Suppress errors for <'any'> elements if "##other" in str(err): return first_error first_error = str(err) return first_error def parse_schema_and_doc(self): """ Creates a dictionary of the parsed schema and xml files. """ self.set_xml_file(self.xml) self.set_schema(self.schema) def to_string(self, elem, pretty_print=False, encoding=None): """ Converts an etree element into a string """ return decode(etree.tostring(elem, pretty_print=pretty_print, encoding=encoding)) def break_processing_instructions(self, string, dic): """ Method to break a processing instruction string apart and add to a dict :param string: A processing instruction as a string :param dic: The dictionary to save the PIs to """ pi_string = string.replace("<?", "").replace("?>", "") split = pi_string.split(" ", 1) pi_name = split[0] attr = split[1] new_pi_name = self._create_unique_key(dic, pi_name) dic[new_pi_name] = attr return dic def set_processing_instructions(self): """ Take out all processing instructions and create a dictionary from them If there is a default encoding, the value is also saved """ dic = {} proc_instr = self.xmlroot.getprevious() while proc_instr is not None: pi_string = self.to_string(proc_instr) if "?>\n<?" in pi_string: pi_string = pi_string.split("?>\n<?") if isinstance(pi_string, str): dic = self.break_processing_instructions(pi_string, dic) elif isinstance(pi_string, list): for item in pi_string: dic = self.break_processing_instructions(item, dic) proc_instr = proc_instr.getprevious() if 'xml' in dic: self.set_encoding(dic['xml']) del dic['xml'] self.processing_instructions = dic def set_encoding(self, attr_str): """ Find the encoding in the xml declaration and save it as a string :param attr_str: All attributes as a string e.g. "foo1="bar1" foo2="bar2" foo3="bar3" ... foo_n="bar_n"" """ attr_str = attr_str.replace(" = ", "=") attr_list = attr_str.split() for item in attr_list: name_value = item.split("\"=") name = name_value[0].lower() value = name_value[1] if name == "encoding": self.encoding = value return self.encoding = None def _create_unique_key(self, dictionary, name, numb=0): """ Create a unique key value for any dictionary to prevent overwriting Recurses until a unique key value is found. :param dictionary: A dictionary with any number of entries :param name: The index of the item to be added to dictionary :param numb: The number to be appended to the name, starts at 0 """ if dictionary.get(name) is not None: numb += 1 name = name.split("_")[0] name += "_{0}".format(numb) name = self._create_unique_key(dictionary, name, numb) return name def create_tree(self, root): """ Create an element tree for processing from an etree element :param root: etree Element(s) """ return etree.ElementTree(root) def create_element_from_string(self, xml_string): """ Create an element from an XML string :param xml_string: A string of xml """ return etree.fromstring(xml_string) def create_element(self, name, attrib=None, nsmap=None): """ Create an XML element for writing to file :param name: The name of the element to be created """ if attrib is None: attrib = {} return etree.Element(name, attrib, nsmap) def write_text(self, elem, text): """ Write text to an etree Element :param elem: etree.Element object :param text: text to write to the element """ elem.text = text return elem def write_attribute(self, elem, attr_name, attr_value): """ Write attributes to an Element :param elem: etree.Element object :param attr_name: attribute name to write :param attr_value: attribute value to set """ attr = elem.attrib attr[attr_name] = attr_value def return_processing_instructions(self): """ Get all processing instructions saved when loading the document :param tree: etree.ElementTree object to write PIs to """ pi_list = [] if self.processing_instructions is not None: for key in self.processing_instructions: value = self.processing_instructions.get(key) pi_item = etree.ProcessingInstruction(key, value) pi_list.append(pi_item) return pi_list def append(self, element, tree): """ Append an etree Element to an ElementTree. :param element: etree Element to append :param tree: ElementTree object to append to """ tree = tree.append(element) return tree def ebuilder(self, parent, elementname, text=None, attrib=None): """ Use lxml E builder class with arbitrary inputs. :param parnet: The parent element to append a child to :param elementname: The name of the child in string form :param text: The element text :param attrib: A dictionary of attribute names to attribute values """ text = str(text) if attrib is None: attrib = {} elem = E(elementname, attrib, text) parent = parent.append(elem) return parent
import functools from veros import runtime_settings as rs, runtime_state as rst from veros.routines import CURRENT_CONTEXT SCATTERED_DIMENSIONS = (("xt", "xu"), ("yt", "yu")) def dist_context_only(function=None, *, noop_return_arg=None): def decorator(function): @functools.wraps(function) def dist_context_only_wrapper(*args, **kwargs): if rst.proc_num == 1 or not CURRENT_CONTEXT.is_dist_safe: # no-op for sequential execution if noop_return_arg is None: return None # return input array unchanged return args[noop_return_arg] return function(*args, **kwargs) return dist_context_only_wrapper if function is not None: return decorator(function) return decorator def send(buf, dest, comm, tag=None): kwargs = {} if tag is not None: kwargs.update(tag=tag) if rs.backend == "jax": from mpi4jax import send token = CURRENT_CONTEXT.mpi4jax_token new_token = send(buf, dest=dest, comm=comm, token=token, **kwargs) CURRENT_CONTEXT.mpi4jax_token = new_token else: comm.Send(ascontiguousarray(buf), dest=dest, **kwargs) def recv(buf, source, comm, tag=None): kwargs = {} if tag is not None: kwargs.update(tag=tag) if rs.backend == "jax": from mpi4jax import recv token = CURRENT_CONTEXT.mpi4jax_token buf, new_token = recv(buf, source=source, comm=comm, token=token, **kwargs) CURRENT_CONTEXT.mpi4jax_token = new_token return buf buf = buf.copy() comm.Recv(buf, source=source, **kwargs) return buf def sendrecv(sendbuf, recvbuf, source, dest, comm, sendtag=None, recvtag=None): kwargs = {} if sendtag is not None: kwargs.update(sendtag=sendtag) if recvtag is not None: kwargs.update(recvtag=recvtag) if rs.backend == "jax": from mpi4jax import sendrecv token = CURRENT_CONTEXT.mpi4jax_token recvbuf, new_token = sendrecv(sendbuf, recvbuf, source=source, dest=dest, comm=comm, token=token, **kwargs) CURRENT_CONTEXT.mpi4jax_token = new_token return recvbuf recvbuf = recvbuf.copy() comm.Sendrecv(sendbuf=ascontiguousarray(sendbuf), recvbuf=recvbuf, source=source, dest=dest, **kwargs) return recvbuf def bcast(buf, comm, root=0): if rs.backend == "jax": from mpi4jax import bcast token = CURRENT_CONTEXT.mpi4jax_token buf, new_token = bcast(buf, root=root, comm=comm, token=token) CURRENT_CONTEXT.mpi4jax_token = new_token return buf return comm.bcast(buf, root=root) def allreduce(buf, op, comm): if rs.backend == "jax": from mpi4jax import allreduce token = CURRENT_CONTEXT.mpi4jax_token buf, new_token = allreduce(buf, op=op, comm=comm, token=token) CURRENT_CONTEXT.mpi4jax_token = new_token return buf from veros.core.operators import numpy as npx recvbuf = npx.empty_like(buf) comm.Allreduce(ascontiguousarray(buf), recvbuf, op=op) return recvbuf def ascontiguousarray(arr): assert rs.backend == "numpy" import numpy return numpy.ascontiguousarray(arr) def validate_decomposition(dimensions): nx, ny = dimensions["xt"], dimensions["yt"] if rs.mpi_comm is None: if rs.num_proc[0] > 1 or rs.num_proc[1] > 1: raise RuntimeError("mpi4py is required for distributed execution") return comm_size = rs.mpi_comm.Get_size() proc_num = rs.num_proc[0] * rs.num_proc[1] if proc_num != comm_size: raise RuntimeError(f"number of processes ({proc_num}) does not match size of communicator ({comm_size})") if nx % rs.num_proc[0]: raise ValueError("processes do not divide domain evenly in x-direction") if ny % rs.num_proc[1]: raise ValueError("processes do not divide domain evenly in y-direction") def get_chunk_size(nx, ny): return (nx // rs.num_proc[0], ny // rs.num_proc[1]) def proc_rank_to_index(rank): return (rank % rs.num_proc[0], rank // rs.num_proc[0]) def proc_index_to_rank(ix, iy): return ix + iy * rs.num_proc[0] def get_chunk_slices(nx, ny, dim_grid, proc_idx=None, include_overlap=False): if not dim_grid: return Ellipsis, Ellipsis if proc_idx is None: proc_idx = proc_rank_to_index(rst.proc_rank) px, py = proc_idx nxl, nyl = get_chunk_size(nx, ny) if include_overlap: sxl = 0 if px == 0 else 2 sxu = nxl + 4 if (px + 1) == rs.num_proc[0] else nxl + 2 syl = 0 if py == 0 else 2 syu = nyl + 4 if (py + 1) == rs.num_proc[1] else nyl + 2 else: sxl = syl = 0 sxu = nxl syu = nyl global_slice, local_slice = [], [] for dim in dim_grid: if dim in SCATTERED_DIMENSIONS[0]: global_slice.append(slice(sxl + px * nxl, sxu + px * nxl)) local_slice.append(slice(sxl, sxu)) elif dim in SCATTERED_DIMENSIONS[1]: global_slice.append(slice(syl + py * nyl, syu + py * nyl)) local_slice.append(slice(syl, syu)) else: global_slice.append(slice(None)) local_slice.append(slice(None)) return tuple(global_slice), tuple(local_slice) def get_process_neighbors(cyclic=False): this_x, this_y = proc_rank_to_index(rst.proc_rank) if this_x == 0: if cyclic: west = rs.num_proc[0] - 1 else: west = None else: west = this_x - 1 if this_x == rs.num_proc[0] - 1: if cyclic: east = 0 else: east = None else: east = this_x + 1 south = this_y - 1 if this_y != 0 else None north = this_y + 1 if this_y != (rs.num_proc[1] - 1) else None neighbors = dict( # direct neighbors west=(west, this_y), south=(this_x, south), east=(east, this_y), north=(this_x, north), # corners southwest=(west, south), southeast=(east, south), northeast=(east, north), northwest=(west, north), ) global_neighbors = {k: proc_index_to_rank(*i) if None not in i else None for k, i in neighbors.items()} return global_neighbors @dist_context_only(noop_return_arg=0) def exchange_overlap(arr, var_grid, cyclic): from veros.core.operators import numpy as npx, update, at # start west, go clockwise send_order = ( "west", "northwest", "north", "northeast", "east", "southeast", "south", "southwest", ) # start east, go clockwise recv_order = ( "east", "southeast", "south", "southwest", "west", "northwest", "north", "northeast", ) if len(var_grid) < 2: d1, d2 = var_grid[0], None else: d1, d2 = var_grid[:2] if d1 not in SCATTERED_DIMENSIONS[0] and d1 not in SCATTERED_DIMENSIONS[1] and d2 not in SCATTERED_DIMENSIONS[1]: # neither x nor y dependent, nothing to do return arr proc_neighbors = get_process_neighbors(cyclic) if d1 in SCATTERED_DIMENSIONS[0] and d2 in SCATTERED_DIMENSIONS[1]: overlap_slices_from = dict( west=(slice(2, 4), slice(0, None), Ellipsis), south=(slice(0, None), slice(2, 4), Ellipsis), east=(slice(-4, -2), slice(0, None), Ellipsis), north=(slice(0, None), slice(-4, -2), Ellipsis), southwest=(slice(2, 4), slice(2, 4), Ellipsis), southeast=(slice(-4, -2), slice(2, 4), Ellipsis), northeast=(slice(-4, -2), slice(-4, -2), Ellipsis), northwest=(slice(2, 4), slice(-4, -2), Ellipsis), ) overlap_slices_to = dict( west=(slice(0, 2), slice(0, None), Ellipsis), south=(slice(0, None), slice(0, 2), Ellipsis), east=(slice(-2, None), slice(0, None), Ellipsis), north=(slice(0, None), slice(-2, None), Ellipsis), southwest=(slice(0, 2), slice(0, 2), Ellipsis), southeast=(slice(-2, None), slice(0, 2), Ellipsis), northeast=(slice(-2, None), slice(-2, None), Ellipsis), northwest=(slice(0, 2), slice(-2, None), Ellipsis), ) else: if d1 in SCATTERED_DIMENSIONS[0]: send_order = ("west", "east") recv_order = ("east", "west") elif d1 in SCATTERED_DIMENSIONS[1]: send_order = ("north", "south") recv_order = ("south", "north") else: raise NotImplementedError() overlap_slices_from = dict( west=(slice(2, 4), Ellipsis), south=(slice(2, 4), Ellipsis), east=(slice(-4, -2), Ellipsis), north=(slice(-4, -2), Ellipsis), ) overlap_slices_to = dict( west=(slice(0, 2), Ellipsis), south=(slice(0, 2), Ellipsis), east=(slice(-2, None), Ellipsis), north=(slice(-2, None), Ellipsis), ) for send_dir, recv_dir in zip(send_order, recv_order): send_proc = proc_neighbors[send_dir] recv_proc = proc_neighbors[recv_dir] if send_proc is None and recv_proc is None: continue recv_idx = overlap_slices_to[recv_dir] recv_arr = npx.empty_like(arr[recv_idx]) send_idx = overlap_slices_from[send_dir] send_arr = arr[send_idx] if send_proc is None: recv_arr = recv(recv_arr, recv_proc, rs.mpi_comm) arr = update(arr, at[recv_idx], recv_arr) elif recv_proc is None: send(send_arr, send_proc, rs.mpi_comm) else: recv_arr = sendrecv(send_arr, recv_arr, source=recv_proc, dest=send_proc, comm=rs.mpi_comm) arr = update(arr, at[recv_idx], recv_arr) return arr def _memoize(function): cached = {} @functools.wraps(function) def memoized(*args): from mpi4py import MPI # MPI Comms are not hashable, so we use the underlying handle instead cache_args = tuple(MPI._handleof(arg) if isinstance(arg, MPI.Comm) else arg for arg in args) if cache_args not in cached: cached[cache_args] = function(*args) return cached[cache_args] return memoized @_memoize def _mpi_comm_along_axis(comm, procs, rank): return comm.Split(procs, rank) @dist_context_only(noop_return_arg=0) def _reduce(arr, op, axis=None): from veros.core.operators import numpy as npx if axis is None: comm = rs.mpi_comm else: assert axis in (0, 1) pi = proc_rank_to_index(rst.proc_rank) other_axis = 1 - axis comm = _mpi_comm_along_axis(rs.mpi_comm, pi[other_axis], rst.proc_rank) if npx.isscalar(arr): squeeze = True arr = npx.array([arr]) else: squeeze = False res = allreduce(arr, op=op, comm=comm) if squeeze: res = res[0] return res @dist_context_only(noop_return_arg=0) def global_and(arr, axis=None): from mpi4py import MPI return _reduce(arr, MPI.LAND, axis=axis) @dist_context_only(noop_return_arg=0) def global_or(arr, axis=None): from mpi4py import MPI return _reduce(arr, MPI.LOR, axis=axis) @dist_context_only(noop_return_arg=0) def global_max(arr, axis=None): from mpi4py import MPI return _reduce(arr, MPI.MAX, axis=axis) @dist_context_only(noop_return_arg=0) def global_min(arr, axis=None): from mpi4py import MPI return _reduce(arr, MPI.MIN, axis=axis) @dist_context_only(noop_return_arg=0) def global_sum(arr, axis=None): from mpi4py import MPI return _reduce(arr, MPI.SUM, axis=axis) @dist_context_only(noop_return_arg=2) def _gather_1d(nx, ny, arr, dim): from veros.core.operators import numpy as npx, update, at assert dim in (0, 1) otherdim = 1 - dim pi = proc_rank_to_index(rst.proc_rank) if pi[otherdim] != 0: return arr dim_grid = ["xt" if dim == 0 else "yt"] + [None] * (arr.ndim - 1) gidx, idx = get_chunk_slices(nx, ny, dim_grid, include_overlap=True) sendbuf = arr[idx] if rst.proc_rank == 0: buffer_list = [] for proc in range(1, rst.proc_num): pi = proc_rank_to_index(proc) if pi[otherdim] != 0: continue idx_g, idx_l = get_chunk_slices(nx, ny, dim_grid, include_overlap=True, proc_idx=pi) recvbuf = npx.empty_like(arr[idx_l]) recvbuf = recv(recvbuf, source=proc, tag=20, comm=rs.mpi_comm) buffer_list.append((idx_g, recvbuf)) out_shape = ((nx + 4, ny + 4)[dim],) + arr.shape[1:] out = npx.empty(out_shape, dtype=arr.dtype) out = update(out, at[gidx], sendbuf) for idx, val in buffer_list: out = update(out, at[idx], val) return out else: send(sendbuf, dest=0, tag=20, comm=rs.mpi_comm) return arr @dist_context_only(noop_return_arg=2) def _gather_xy(nx, ny, arr): from veros.core.operators import numpy as npx, update, at nxi, nyi = get_chunk_size(nx, ny) assert arr.shape[:2] == (nxi + 4, nyi + 4), arr.shape dim_grid = ["xt", "yt"] + [None] * (arr.ndim - 2) gidx, idx = get_chunk_slices(nx, ny, dim_grid, include_overlap=True) sendbuf = arr[idx] if rst.proc_rank == 0: buffer_list = [] for proc in range(1, rst.proc_num): idx_g, idx_l = get_chunk_slices(nx, ny, dim_grid, include_overlap=True, proc_idx=proc_rank_to_index(proc)) recvbuf = npx.empty_like(arr[idx_l]) recvbuf = recv(recvbuf, source=proc, tag=30, comm=rs.mpi_comm) buffer_list.append((idx_g, recvbuf)) out_shape = (nx + 4, ny + 4) + arr.shape[2:] out = npx.empty(out_shape, dtype=arr.dtype) out = update(out, at[gidx], sendbuf) for idx, val in buffer_list: out = update(out, at[idx], val) return out send(sendbuf, dest=0, tag=30, comm=rs.mpi_comm) return arr @dist_context_only(noop_return_arg=0) def gather(arr, dimensions, var_grid): nx, ny = dimensions["xt"], dimensions["yt"] if var_grid is None: return arr if len(var_grid) < 2: d1, d2 = var_grid[0], None else: d1, d2 = var_grid[:2] if d1 not in SCATTERED_DIMENSIONS[0] and d1 not in SCATTERED_DIMENSIONS[1] and d2 not in SCATTERED_DIMENSIONS[1]: # neither x nor y dependent, nothing to do return arr if d1 in SCATTERED_DIMENSIONS[0] and d2 not in SCATTERED_DIMENSIONS[1]: # only x dependent return _gather_1d(nx, ny, arr, 0) elif d1 in SCATTERED_DIMENSIONS[1]: # only y dependent return _gather_1d(nx, ny, arr, 1) elif d1 in SCATTERED_DIMENSIONS[0] and d2 in SCATTERED_DIMENSIONS[1]: # x and y dependent return _gather_xy(nx, ny, arr) else: raise NotImplementedError() @dist_context_only(noop_return_arg=0) def _scatter_constant(arr): return bcast(arr, rs.mpi_comm, root=0) @dist_context_only(noop_return_arg=2) def _scatter_1d(nx, ny, arr, dim): from veros.core.operators import numpy as npx, update, at assert dim in (0, 1) out_nx = get_chunk_size(nx, ny)[dim] dim_grid = ["xt" if dim == 0 else "yt"] + [None] * (arr.ndim - 1) _, local_slice = get_chunk_slices(nx, ny, dim_grid, include_overlap=True) if rst.proc_rank == 0: recvbuf = arr[local_slice] for proc in range(1, rst.proc_num): global_slice, _ = get_chunk_slices( nx, ny, dim_grid, include_overlap=True, proc_idx=proc_rank_to_index(proc) ) sendbuf = arr[global_slice] send(sendbuf, dest=proc, tag=40, comm=rs.mpi_comm) # arr changes shape in main process arr = npx.zeros((out_nx + 4,) + arr.shape[1:], dtype=arr.dtype) else: recvbuf = recv(arr[local_slice], source=0, tag=40, comm=rs.mpi_comm) arr = update(arr, at[local_slice], recvbuf) arr = exchange_overlap(arr, ["xt" if dim == 0 else "yt"], cyclic=False) return arr @dist_context_only(noop_return_arg=2) def _scatter_xy(nx, ny, arr): from veros.core.operators import numpy as npx, update, at nxi, nyi = get_chunk_size(nx, ny) dim_grid = ["xt", "yt"] + [None] * (arr.ndim - 2) _, local_slice = get_chunk_slices(nx, ny, dim_grid, include_overlap=True) if rst.proc_rank == 0: recvbuf = arr[local_slice] for proc in range(1, rst.proc_num): global_slice, _ = get_chunk_slices( nx, ny, dim_grid, include_overlap=True, proc_idx=proc_rank_to_index(proc) ) sendbuf = arr[global_slice] send(sendbuf, dest=proc, tag=50, comm=rs.mpi_comm) # arr changes shape in main process arr = npx.empty((nxi + 4, nyi + 4) + arr.shape[2:], dtype=arr.dtype) else: recvbuf = npx.empty_like(arr[local_slice]) recvbuf = recv(recvbuf, source=0, tag=50, comm=rs.mpi_comm) arr = update(arr, at[local_slice], recvbuf) arr = exchange_overlap(arr, ["xt", "yt"], cyclic=False) return arr @dist_context_only(noop_return_arg=0) def scatter(arr, dimensions, var_grid): from veros.core.operators import numpy as npx if var_grid is None: return _scatter_constant(arr) nx, ny = dimensions["xt"], dimensions["yt"] if len(var_grid) < 2: d1, d2 = var_grid[0], None else: d1, d2 = var_grid[:2] arr = npx.asarray(arr) if d1 not in SCATTERED_DIMENSIONS[0] and d1 not in SCATTERED_DIMENSIONS[1] and d2 not in SCATTERED_DIMENSIONS[1]: # neither x nor y dependent return _scatter_constant(arr) if d1 in SCATTERED_DIMENSIONS[0] and d2 not in SCATTERED_DIMENSIONS[1]: # only x dependent return _scatter_1d(nx, ny, arr, 0) elif d1 in SCATTERED_DIMENSIONS[1]: # only y dependent return _scatter_1d(nx, ny, arr, 1) elif d1 in SCATTERED_DIMENSIONS[0] and d2 in SCATTERED_DIMENSIONS[1]: # x and y dependent return _scatter_xy(nx, ny, arr) else: raise NotImplementedError("unreachable") @dist_context_only def barrier(): rs.mpi_comm.barrier() @dist_context_only def abort(): rs.mpi_comm.Abort()