code
stringlengths
2
1.05M
repo_name
stringlengths
5
104
path
stringlengths
4
251
language
stringclasses
1 value
license
stringclasses
15 values
size
int32
2
1.05M
"""ThreatConnect TI Campaign""" # standard library from typing import TYPE_CHECKING # first-party from tcex.api.tc.v2.threat_intelligence.mappings.group.group import Group if TYPE_CHECKING: # first-party from tcex.api.tc.v2.threat_intelligence.threat_intelligence import ThreatIntelligence class Campaign(Group): """Unique API calls for Campaign API Endpoints Args: ti (ThreatIntelligence): An instance of the ThreatIntelligence Class. name (str, kwargs): [Required for Create] The name for this Group. owner (str, kwargs): The name for this Group. Default to default Org when not provided first_seen (str, kwargs): The first seen datetime expression for this Group. """ def __init__(self, ti: 'ThreatIntelligence', **kwargs): """Initialize Class Properties.""" super().__init__( ti, sub_type='Campaign', api_entity='campaign', api_branch='campaigns', **kwargs ) def first_seen(self, first_seen): """Update the campaign with the new first_seen date. Args: first_seen (str): The first_seen date. Converted to %Y-%m-%dT%H:%M:%SZ date format Returns: requests.Response: The response from the API call. """ if not self.can_update(): self._handle_error(910, [self.type]) first_seen = self._utils.any_to_datetime(first_seen).strftime('%Y-%m-%dT%H:%M:%SZ') self._data['firstSeen'] = first_seen request = {'firstSeen': first_seen} return self.tc_requests.update(self.api_type, self.api_branch, self.unique_id, request)
ThreatConnect-Inc/tcex
tcex/api/tc/v2/threat_intelligence/mappings/group/group_types/campaign.py
Python
apache-2.0
1,625
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 Nebula, Inc. # Copyright 2013 Alessio Ababilov # Copyright 2013 Grid Dynamics # Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging from marconiclient.common.apiclient import exceptions from marconiclient.openstack.common import timeutils _logger = logging.getLogger(__name__) class AuthResponse(dict): """An object for encapsulating a raw authentication response from keystone. The class provides methods for extracting useful values from that token. """ @property def expires(self): """Returns the token expiration (as datetime object) :returns: datetime """ try: return timeutils.parse_isotime(self['access']['token']['expires']) except KeyError: return None @property def token(self): """Returns the token_id associated with the auth request. :returns: str """ try: return self['access']['token']['id'] except KeyError: return None @property def username(self): """Returns the username associated with the authentication request. Follows the pattern defined in the V2 API of first looking for 'name', returning that if available, and falling back to 'username' if name is unavailable. :returns: str """ try: return self['access']['user']['name'] except KeyError: pass try: return self['access']['user']['username'] except KeyError: return None @property def user_id(self): """Returns the user id associated with the authentication request. :returns: str """ try: return self['access']['user']['id'] except KeyError: return None @property def tenant_name(self): """Returns the tenant name associated with the authentication request. :returns: str """ try: return self['access']['token']['tenant']['name'] except KeyError: return None @property def project_name(self): """Synonym for tenant_name.""" return self.tenant_name @property def tenant_id(self): """Returns the tenant id associated with the authentication request. :returns: str """ try: return self['access']['token']['tenant']['id'] except KeyError: return None @property def project_id(self): """Synonym for tenant_id.""" return self.tenant_id @property def scoped(self): """Checks if the authorization token is scoped to a tenant. Additionally verifies that there is a populated service catalog. :returns: bool """ try: if (self['access']['serviceCatalog'] and self['access']['token']['tenant']): return True except KeyError: pass return False def filter_endpoints(self, endpoint_type=None, service_type=None, service_name=None, filter_attrs=None): """Returns a list of endpoints which match provided criteria. """ filter_attrs = filter_attrs or {} matching_endpoints = [] def add_if_appropriate(endpoint): # Ignore 1.0 compute endpoints if (endpoint.get("serviceType") == 'compute' and endpoint.get('versionId', '2') not in ('1.1', '2')): return if endpoint_type and endpoint_type not in endpoint.keys(): return for k, v in filter_attrs.iteritems(): if endpoint.get(k).lower() != v.lower(): return matching_endpoints.append(endpoint) if 'endpoints' in self: # We have a bastardized service catalog. Treat it special. :/ for endpoint in self['endpoints']: add_if_appropriate(endpoint) elif 'access' in self and 'serviceCatalog' in self['access']: # Full catalog ... for service in self['access']['serviceCatalog']: if service_type and service.get("type") != service_type: continue if service_name and service.get('name') != service_name: continue for endpoint in service['endpoints']: endpoint["serviceName"] = service.get("name") endpoint["serviceType"] = service.get("type") add_if_appropriate(endpoint) return matching_endpoints def url_for(self, endpoint_type, service_type, service_name=None, filter_attrs=None): """Returns a unique endpoint which match provided criteria. """ filter_attrs = filter_attrs or {} matching_endpoints = self.filter_endpoints( endpoint_type, service_type, service_name, filter_attrs) if not matching_endpoints: raise exceptions.EndpointNotFound( "Cannot find requested %s endpoint" % service_type) elif len(matching_endpoints) > 1: raise exceptions.AmbiguousEndpoints( endpoints=matching_endpoints) else: return matching_endpoints[0][endpoint_type]
aababilov/python-marconiclient
marconiclient/common/apiclient/auth/response.py
Python
apache-2.0
6,051
# Generates alternating frames of a checkerboard pattern. Q_STARTING_INDEX = 150 UNIVERSE_LIGHTS = 144 #144 for side 1, #116 for side 2 flip = 0 for i in range(1,200): # 5 seconds * 40 / second (frame) print "Record Cue " + str(Q_STARTING_INDEX + i) for j in range (1, UNIVERSE_LIGHTS * 3, 1): # 3 channels / light (channel) value = 255 if flip else 0 flip = not flip print "C"+ str(j)+ " @ #"+str(value)+";" flip = not flip # switch the checkerboard for the next frame print "Record Stop"
ScienceWorldCA/domelights
backend/scripts/checkerboard.py
Python
apache-2.0
510
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*- # ex: set expandtab softtabstop=4 shiftwidth=4: # # Copyright (C) 2008,2009,2010,2011,2012,2013,2014,2015,2016,2017 Contributor # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Contains the logic for `aq map service`.""" from aquilon.worker.broker import BrokerCommand from aquilon.aqdb.model import (Personality, HostEnvironment, ServiceMap, ServiceInstance, NetworkEnvironment) from aquilon.aqdb.model.host_environment import Production from aquilon.worker.dbwrappers.change_management import (validate_prod_personality, enforce_justification) from aquilon.worker.dbwrappers.location import get_location from aquilon.worker.dbwrappers.network import get_network_byip class CommandMapService(BrokerCommand): required_parameters = ["service", "instance"] def doit(self, session, dbmap, dbinstance, dblocation, dbnetwork, dbpersona, dbenv): if not dbmap: dbmap = ServiceMap(service_instance=dbinstance, location=dblocation, network=dbnetwork, personality=dbpersona, host_environment=dbenv) session.add(dbmap) def render(self, session, logger, service, instance, archetype, personality, host_environment, networkip, justification, reason, user, **kwargs): dbinstance = ServiceInstance.get_unique(session, service=service, name=instance, compel=True) dblocation = get_location(session, **kwargs) if networkip: dbnet_env = NetworkEnvironment.get_unique_or_default(session) dbnetwork = get_network_byip(session, networkip, dbnet_env) else: dbnetwork = None dbpersona = None dbenv = None if personality: dbpersona = Personality.get_unique(session, name=personality, archetype=archetype, compel=True) for dbstage in dbpersona.stages.values(): validate_prod_personality(dbstage, user, justification, reason, logger) elif host_environment: dbenv = HostEnvironment.get_instance(session, host_environment) if isinstance(dbenv, Production): enforce_justification(user, justification, reason, logger) else: enforce_justification(user, justification, reason, logger) q = session.query(ServiceMap) q = q.filter_by(service_instance=dbinstance, location=dblocation, network=dbnetwork, personality=dbpersona, host_environment=dbenv) dbmap = q.first() self.doit(session, dbmap, dbinstance, dblocation, dbnetwork, dbpersona, dbenv) session.flush() return
guillaume-philippon/aquilon
lib/aquilon/worker/commands/map_service.py
Python
apache-2.0
3,474
# coding=utf-8 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import (absolute_import, division, generators, nested_scopes, print_function, unicode_literals, with_statement) import os from pants.base.build_environment import get_buildroot from pants.base.workunit import WorkUnit from pants.util.dirutil import safe_mkdir from pants.contrib.cpp.tasks.cpp_task import CppTask class CppCompile(CppTask): """Compiles object files from C++ sources.""" @classmethod def register_options(cls, register): super(CppCompile, cls).register_options(register) register('--cc-options', help='Append these options to the compiler command line.') register('--cc-extensions', default=['cc', 'cxx', 'cpp'], help=('The list of extensions (without the .) to consider when ' 'determining if a file is a C++ source file.')) @classmethod def product_types(cls): return ['objs'] @property def cache_target_dirs(self): return True def execute(self): """Compile all sources in a given target to object files.""" def is_cc(source): _, ext = os.path.splitext(source) return ext[1:] in self.get_options().cc_extensions targets = self.context.targets(self.is_cpp) # Compile source files to objects. with self.invalidated(targets, invalidate_dependents=True) as invalidation_check: obj_mapping = self.context.products.get('objs') for vt in invalidation_check.all_vts: for source in vt.target.sources_relative_to_buildroot(): if is_cc(source): if not vt.valid: with self.context.new_workunit(name='cpp-compile', labels=[WorkUnit.MULTITOOL]): # TODO: Parallelise the compilation. # TODO: Only recompile source files that have changed since the # object file was last written. Also use the output from # gcc -M to track dependencies on headers. self._compile(vt.target, vt.results_dir, source) objpath = self._objpath(vt.target, vt.results_dir, source) obj_mapping.add(vt.target, vt.results_dir).append(objpath) def _objpath(self, target, results_dir, source): abs_source_root = os.path.join(get_buildroot(), target.target_base) abs_source = os.path.join(get_buildroot(), source) rel_source = os.path.relpath(abs_source, abs_source_root) root, _ = os.path.splitext(rel_source) obj_name = root + '.o' return os.path.join(results_dir, obj_name) def _compile(self, target, results_dir, source): """Compile given source to an object file.""" obj = self._objpath(target, results_dir, source) abs_source = os.path.join(get_buildroot(), source) # TODO: include dir should include dependent work dir when headers are copied there. include_dirs = [] for dep in target.dependencies: if self.is_library(dep): include_dirs.extend([os.path.join(get_buildroot(), dep.target_base)]) cmd = [self.cpp_toolchain.compiler] cmd.extend(['-c']) cmd.extend(('-I{0}'.format(i) for i in include_dirs)) cmd.extend(['-o' + obj, abs_source]) if self.get_options().cc_options != None: cmd.extend([self.get_options().cc_options]) # TODO: submit_async_work with self.run_command, [(cmd)] as a Work object. with self.context.new_workunit(name='cpp-compile', labels=[WorkUnit.COMPILER]) as workunit: self.run_command(cmd, workunit) self.context.log.info('Built c++ object: {0}'.format(obj))
digwanderlust/pants
contrib/cpp/src/python/pants/contrib/cpp/tasks/cpp_compile.py
Python
apache-2.0
3,678
# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """RADOS Block Device Driver""" from __future__ import absolute_import import io import json import math import os import tempfile import urllib from eventlet import tpool from oslo_config import cfg from oslo_log import log as logging from oslo_utils import encodeutils from oslo_utils import units import six from cinder import exception from cinder.i18n import _, _LE, _LI, _LW from cinder.image import image_utils from cinder.openstack.common import fileutils from cinder.volume import driver try: import rados import rbd except ImportError: rados = None rbd = None LOG = logging.getLogger(__name__) rbd_opts = [ cfg.StrOpt('rbd_cluster_name', default='ceph', help='The name of ceph cluster'), cfg.StrOpt('rbd_pool', default='rbd', help='The RADOS pool where rbd volumes are stored'), cfg.StrOpt('rbd_user', default=None, help='The RADOS client name for accessing rbd volumes ' '- only set when using cephx authentication'), cfg.StrOpt('rbd_ceph_conf', default='', # default determined by librados help='Path to the ceph configuration file'), cfg.BoolOpt('rbd_flatten_volume_from_snapshot', default=False, help='Flatten volumes created from snapshots to remove ' 'dependency from volume to snapshot'), cfg.StrOpt('rbd_secret_uuid', default=None, help='The libvirt uuid of the secret for the rbd_user ' 'volumes'), cfg.StrOpt('volume_tmp_dir', default=None, help='Directory where temporary image files are stored ' 'when the volume driver does not write them directly ' 'to the volume. Warning: this option is now deprecated, ' 'please use image_conversion_dir instead.'), cfg.IntOpt('rbd_max_clone_depth', default=5, help='Maximum number of nested volume clones that are ' 'taken before a flatten occurs. Set to 0 to disable ' 'cloning.'), cfg.IntOpt('rbd_store_chunk_size', default=4, help=_('Volumes will be chunked into objects of this size ' '(in megabytes).')), cfg.IntOpt('rados_connect_timeout', default=-1, help=_('Timeout value (in seconds) used when connecting to ' 'ceph cluster. If value < 0, no timeout is set and ' 'default librados value is used.')) ] CONF = cfg.CONF CONF.register_opts(rbd_opts) class RBDImageMetadata(object): """RBD image metadata to be used with RBDImageIOWrapper.""" def __init__(self, image, pool, user, conf): self.image = image self.pool = encodeutils.safe_encode(pool) self.user = encodeutils.safe_encode(user) self.conf = encodeutils.safe_encode(conf) class RBDImageIOWrapper(io.RawIOBase): """Enables LibRBD.Image objects to be treated as Python IO objects. Calling unimplemented interfaces will raise IOError. """ def __init__(self, rbd_meta): super(RBDImageIOWrapper, self).__init__() self._rbd_meta = rbd_meta self._offset = 0 def _inc_offset(self, length): self._offset += length @property def rbd_image(self): return self._rbd_meta.image @property def rbd_user(self): return self._rbd_meta.user @property def rbd_pool(self): return self._rbd_meta.pool @property def rbd_conf(self): return self._rbd_meta.conf def read(self, length=None): offset = self._offset total = self._rbd_meta.image.size() # NOTE(dosaboy): posix files do not barf if you read beyond their # length (they just return nothing) but rbd images do so we need to # return empty string if we have reached the end of the image. if (offset >= total): return '' if length is None: length = total if (offset + length) > total: length = total - offset self._inc_offset(length) return self._rbd_meta.image.read(int(offset), int(length)) def write(self, data): self._rbd_meta.image.write(data, self._offset) self._inc_offset(len(data)) def seekable(self): return True def seek(self, offset, whence=0): if whence == 0: new_offset = offset elif whence == 1: new_offset = self._offset + offset elif whence == 2: new_offset = self._rbd_meta.image.size() new_offset += offset else: raise IOError(_("Invalid argument - whence=%s not supported") % (whence)) if (new_offset < 0): raise IOError(_("Invalid argument")) self._offset = new_offset def tell(self): return self._offset def flush(self): try: self._rbd_meta.image.flush() except AttributeError: LOG.warning(_LW("flush() not supported in " "this version of librbd")) def fileno(self): """RBD does not have support for fileno() so we raise IOError. Raising IOError is recommended way to notify caller that interface is not supported - see http://docs.python.org/2/library/io.html#io.IOBase """ raise IOError(_("fileno() not supported by RBD()")) # NOTE(dosaboy): if IO object is not closed explicitly, Python auto closes # it which, if this is not overridden, calls flush() prior to close which # in this case is unwanted since the rbd image may have been closed prior # to the autoclean - currently triggering a segfault in librbd. def close(self): pass class RBDVolumeProxy(object): """Context manager for dealing with an existing rbd volume. This handles connecting to rados and opening an ioctx automatically, and otherwise acts like a librbd Image object. The underlying librados client and ioctx can be accessed as the attributes 'client' and 'ioctx'. """ def __init__(self, driver, name, pool=None, snapshot=None, read_only=False): client, ioctx = driver._connect_to_rados(pool) if snapshot is not None: snapshot = encodeutils.safe_encode(snapshot) try: self.volume = driver.rbd.Image(ioctx, encodeutils.safe_encode(name), snapshot=snapshot, read_only=read_only) except driver.rbd.Error: LOG.exception(_LE("error opening rbd image %s"), name) driver._disconnect_from_rados(client, ioctx) raise self.driver = driver self.client = client self.ioctx = ioctx def __enter__(self): return self def __exit__(self, type_, value, traceback): try: self.volume.close() finally: self.driver._disconnect_from_rados(self.client, self.ioctx) def __getattr__(self, attrib): return getattr(self.volume, attrib) class RADOSClient(object): """Context manager to simplify error handling for connecting to ceph.""" def __init__(self, driver, pool=None): self.driver = driver self.cluster, self.ioctx = driver._connect_to_rados(pool) def __enter__(self): return self def __exit__(self, type_, value, traceback): self.driver._disconnect_from_rados(self.cluster, self.ioctx) @property def features(self): features = self.cluster.conf_get('rbd_default_features') if ((features is None) or (int(features) == 0)): features = self.driver.rbd.RBD_FEATURE_LAYERING return int(features) class RBDDriver(driver.RetypeVD, driver.TransferVD, driver.ExtendVD, driver.CloneableVD, driver.CloneableImageVD, driver.SnapshotVD, driver.BaseVD): """Implements RADOS block device (RBD) volume commands.""" VERSION = '1.2.0' def __init__(self, *args, **kwargs): super(RBDDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(rbd_opts) self._stats = {} # allow overrides for testing self.rados = kwargs.get('rados', rados) self.rbd = kwargs.get('rbd', rbd) # All string args used with librbd must be None or utf-8 otherwise # librbd will break. for attr in ['rbd_cluster_name', 'rbd_user', 'rbd_ceph_conf', 'rbd_pool']: val = getattr(self.configuration, attr) if val is not None: setattr(self.configuration, attr, encodeutils.safe_encode(val)) def check_for_setup_error(self): """Returns an error if prerequisites aren't met.""" if rados is None: msg = _('rados and rbd python libraries not found') raise exception.VolumeBackendAPIException(data=msg) # NOTE: Checking connection to ceph # RADOSClient __init__ method invokes _connect_to_rados # so no need to check for self.rados.Error here. with RADOSClient(self): pass def RBDProxy(self): return tpool.Proxy(self.rbd.RBD()) def _ceph_args(self): args = [] if self.configuration.rbd_user: args.extend(['--id', self.configuration.rbd_user]) if self.configuration.rbd_ceph_conf: args.extend(['--conf', self.configuration.rbd_ceph_conf]) if self.configuration.rbd_cluster_name: args.extend(['--cluster', self.configuration.rbd_cluster_name]) return args def _connect_to_rados(self, pool=None): LOG.debug("opening connection to ceph cluster (timeout=%s).", self.configuration.rados_connect_timeout) # NOTE (e0ne): rados is binding to C lbirary librados. # It blocks eventlet loop so we need to run it in a native # python thread. client = tpool.Proxy( self.rados.Rados( rados_id=self.configuration.rbd_user, clustername=self.configuration.rbd_cluster_name, conffile=self.configuration.rbd_ceph_conf)) if pool is not None: pool = encodeutils.safe_encode(pool) else: pool = self.configuration.rbd_pool try: if self.configuration.rados_connect_timeout >= 0: client.connect(timeout= self.configuration.rados_connect_timeout) else: client.connect() ioctx = client.open_ioctx(pool) return client, ioctx except self.rados.Error: msg = _("Error connecting to ceph cluster.") LOG.exception(msg) # shutdown cannot raise an exception client.shutdown() raise exception.VolumeBackendAPIException(data=msg) def _disconnect_from_rados(self, client, ioctx): # closing an ioctx cannot raise an exception ioctx.close() client.shutdown() def _get_backup_snaps(self, rbd_image): """Get list of any backup snapshots that exist on this volume. There should only ever be one but accept all since they need to be deleted before the volume can be. """ # NOTE(dosaboy): we do the import here otherwise we get import conflict # issues between the rbd driver and the ceph backup driver. These # issues only seem to occur when NOT using them together and are # triggered when the ceph backup driver imports the rbd volume driver. from cinder.backup.drivers import ceph return ceph.CephBackupDriver.get_backup_snaps(rbd_image) def _get_mon_addrs(self): args = ['ceph', 'mon', 'dump', '--format=json'] args.extend(self._ceph_args()) out, _ = self._execute(*args) lines = out.split('\n') if lines[0].startswith('dumped monmap epoch'): lines = lines[1:] monmap = json.loads('\n'.join(lines)) addrs = [mon['addr'] for mon in monmap['mons']] hosts = [] ports = [] for addr in addrs: host_port = addr[:addr.rindex('/')] host, port = host_port.rsplit(':', 1) hosts.append(host.strip('[]')) ports.append(port) return hosts, ports def _update_volume_stats(self): stats = { 'vendor_name': 'Open Source', 'driver_version': self.VERSION, 'storage_protocol': 'ceph', 'total_capacity_gb': 'unknown', 'free_capacity_gb': 'unknown', 'reserved_percentage': 0, } backend_name = self.configuration.safe_get('volume_backend_name') stats['volume_backend_name'] = backend_name or 'RBD' try: with RADOSClient(self) as client: ret, outbuf, _outs = client.cluster.mon_command( '{"prefix":"df", "format":"json"}', '') if ret != 0: LOG.warning(_LW('Unable to get rados pool stats.')) else: outbuf = json.loads(outbuf) pool_stats = [pool for pool in outbuf['pools'] if pool['name'] == self.configuration.rbd_pool][0]['stats'] stats['free_capacity_gb'] = ( pool_stats['max_avail'] / units.Gi) used_capacity_gb = pool_stats['bytes_used'] / units.Gi stats['total_capacity_gb'] = (stats['free_capacity_gb'] + used_capacity_gb) except self.rados.Error: # just log and return unknown capacities LOG.exception(_LE('error refreshing volume stats')) self._stats = stats def get_volume_stats(self, refresh=False): """Return the current state of the volume service. If 'refresh' is True, run the update first. """ if refresh: self._update_volume_stats() return self._stats def _get_clone_depth(self, client, volume_name, depth=0): """Returns the number of ancestral clones (if any) of the given volume. """ parent_volume = self.rbd.Image(client.ioctx, volume_name) try: _pool, parent, _snap = self._get_clone_info(parent_volume, volume_name) finally: parent_volume.close() if not parent: return depth # If clone depth was reached, flatten should have occurred so if it has # been exceeded then something has gone wrong. if depth > CONF.rbd_max_clone_depth: raise Exception(_("clone depth exceeds limit of %s") % (CONF.rbd_max_clone_depth)) return self._get_clone_depth(client, parent, depth + 1) def create_cloned_volume(self, volume, src_vref): """Create a cloned volume from another volume. Since we are cloning from a volume and not a snapshot, we must first create a snapshot of the source volume. The user has the option to limit how long a volume's clone chain can be by setting rbd_max_clone_depth. If a clone is made of another clone and that clone has rbd_max_clone_depth clones behind it, the source volume will be flattened. """ src_name = encodeutils.safe_encode(src_vref['name']) dest_name = encodeutils.safe_encode(volume['name']) flatten_parent = False # Do full copy if requested if CONF.rbd_max_clone_depth <= 0: with RBDVolumeProxy(self, src_name, read_only=True) as vol: vol.copy(vol.ioctx, dest_name) return # Otherwise do COW clone. with RADOSClient(self) as client: depth = self._get_clone_depth(client, src_name) # If source volume is a clone and rbd_max_clone_depth reached, # flatten the source before cloning. Zero rbd_max_clone_depth means # infinite is allowed. if depth == CONF.rbd_max_clone_depth: LOG.debug("maximum clone depth (%d) has been reached - " "flattening source volume", CONF.rbd_max_clone_depth) flatten_parent = True src_volume = self.rbd.Image(client.ioctx, src_name) try: # First flatten source volume if required. if flatten_parent: _pool, parent, snap = self._get_clone_info(src_volume, src_name) # Flatten source volume LOG.debug("flattening source volume %s", src_name) src_volume.flatten() # Delete parent clone snap parent_volume = self.rbd.Image(client.ioctx, parent) try: parent_volume.unprotect_snap(snap) parent_volume.remove_snap(snap) finally: parent_volume.close() # Create new snapshot of source volume clone_snap = "%s.clone_snap" % dest_name LOG.debug("creating snapshot='%s'", clone_snap) src_volume.create_snap(clone_snap) src_volume.protect_snap(clone_snap) except Exception: # Only close if exception since we still need it. src_volume.close() raise # Now clone source volume snapshot try: LOG.debug("cloning '%(src_vol)s@%(src_snap)s' to " "'%(dest)s'", {'src_vol': src_name, 'src_snap': clone_snap, 'dest': dest_name}) self.RBDProxy().clone(client.ioctx, src_name, clone_snap, client.ioctx, dest_name, features=client.features) except Exception: src_volume.unprotect_snap(clone_snap) src_volume.remove_snap(clone_snap) raise finally: src_volume.close() if volume['size'] != src_vref['size']: LOG.debug("resize volume '%(dst_vol)s' from %(src_size)d to " "%(dst_size)d", {'dst_vol': volume['name'], 'src_size': src_vref['size'], 'dst_size': volume['size']}) self._resize(volume) LOG.debug("clone created successfully") def create_volume(self, volume): """Creates a logical volume.""" size = int(volume['size']) * units.Gi LOG.debug("creating volume '%s'", volume['name']) chunk_size = CONF.rbd_store_chunk_size * units.Mi order = int(math.log(chunk_size, 2)) with RADOSClient(self) as client: self.RBDProxy().create(client.ioctx, encodeutils.safe_encode(volume['name']), size, order, old_format=False, features=client.features) def _flatten(self, pool, volume_name): LOG.debug('flattening %(pool)s/%(img)s', dict(pool=pool, img=volume_name)) with RBDVolumeProxy(self, volume_name, pool) as vol: vol.flatten() def _clone(self, volume, src_pool, src_image, src_snap): LOG.debug('cloning %(pool)s/%(img)s@%(snap)s to %(dst)s', dict(pool=src_pool, img=src_image, snap=src_snap, dst=volume['name'])) with RADOSClient(self, src_pool) as src_client: with RADOSClient(self) as dest_client: self.RBDProxy().clone(src_client.ioctx, encodeutils.safe_encode(src_image), encodeutils.safe_encode(src_snap), dest_client.ioctx, encodeutils.safe_encode(volume['name']), features=src_client.features) def _resize(self, volume, **kwargs): size = kwargs.get('size', None) if not size: size = int(volume['size']) * units.Gi with RBDVolumeProxy(self, volume['name']) as vol: vol.resize(size) def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot.""" self._clone(volume, self.configuration.rbd_pool, snapshot['volume_name'], snapshot['name']) if self.configuration.rbd_flatten_volume_from_snapshot: self._flatten(self.configuration.rbd_pool, volume['name']) if int(volume['size']): self._resize(volume) def _delete_backup_snaps(self, rbd_image): backup_snaps = self._get_backup_snaps(rbd_image) if backup_snaps: for snap in backup_snaps: rbd_image.remove_snap(snap['name']) else: LOG.debug("volume has no backup snaps") def _get_clone_info(self, volume, volume_name, snap=None): """If volume is a clone, return its parent info. Returns a tuple of (pool, parent, snap). A snapshot may optionally be provided for the case where a cloned volume has been flattened but it's snapshot still depends on the parent. """ try: if snap: volume.set_snap(snap) pool, parent, parent_snap = tuple(volume.parent_info()) if snap: volume.set_snap(None) # Strip the tag off the end of the volume name since it will not be # in the snap name. if volume_name.endswith('.deleted'): volume_name = volume_name[:-len('.deleted')] # Now check the snap name matches. if parent_snap == "%s.clone_snap" % volume_name: return pool, parent, parent_snap except self.rbd.ImageNotFound: LOG.debug("volume %s is not a clone", volume_name) volume.set_snap(None) return (None, None, None) def _delete_clone_parent_refs(self, client, parent_name, parent_snap): """Walk back up the clone chain and delete references. Deletes references i.e. deleted parent volumes and snapshots. """ parent_rbd = self.rbd.Image(client.ioctx, parent_name) parent_has_snaps = False try: # Check for grandparent _pool, g_parent, g_parent_snap = self._get_clone_info(parent_rbd, parent_name, parent_snap) LOG.debug("deleting parent snapshot %s", parent_snap) parent_rbd.unprotect_snap(parent_snap) parent_rbd.remove_snap(parent_snap) parent_has_snaps = bool(list(parent_rbd.list_snaps())) finally: parent_rbd.close() # If parent has been deleted in Cinder, delete the silent reference and # keep walking up the chain if it is itself a clone. if (not parent_has_snaps) and parent_name.endswith('.deleted'): LOG.debug("deleting parent %s", parent_name) self.RBDProxy().remove(client.ioctx, parent_name) # Now move up to grandparent if there is one if g_parent: self._delete_clone_parent_refs(client, g_parent, g_parent_snap) def delete_volume(self, volume): """Deletes a logical volume.""" # NOTE(dosaboy): this was broken by commit cbe1d5f. Ensure names are # utf-8 otherwise librbd will barf. volume_name = encodeutils.safe_encode(volume['name']) with RADOSClient(self) as client: try: rbd_image = self.rbd.Image(client.ioctx, volume_name) except self.rbd.ImageNotFound: LOG.info(_LI("volume %s no longer exists in backend"), volume_name) return clone_snap = None parent = None # Ensure any backup snapshots are deleted self._delete_backup_snaps(rbd_image) # If the volume has non-clone snapshots this delete is expected to # raise VolumeIsBusy so do so straight away. try: snaps = rbd_image.list_snaps() for snap in snaps: if snap['name'].endswith('.clone_snap'): LOG.debug("volume has clone snapshot(s)") # We grab one of these and use it when fetching parent # info in case the volume has been flattened. clone_snap = snap['name'] break raise exception.VolumeIsBusy(volume_name=volume_name) # Determine if this volume is itself a clone _pool, parent, parent_snap = self._get_clone_info(rbd_image, volume_name, clone_snap) finally: rbd_image.close() if clone_snap is None: LOG.debug("deleting rbd volume %s", volume_name) try: self.RBDProxy().remove(client.ioctx, volume_name) except self.rbd.ImageBusy: msg = (_("ImageBusy error raised while deleting rbd " "volume. This may have been caused by a " "connection from a client that has crashed and, " "if so, may be resolved by retrying the delete " "after 30 seconds has elapsed.")) LOG.warning(msg) # Now raise this so that volume stays available so that we # delete can be retried. raise exception.VolumeIsBusy(msg, volume_name=volume_name) except self.rbd.ImageNotFound: LOG.info(_LI("RBD volume %s not found, allowing delete " "operation to proceed."), volume_name) return # If it is a clone, walk back up the parent chain deleting # references. if parent: LOG.debug("volume is a clone so cleaning references") self._delete_clone_parent_refs(client, parent, parent_snap) else: # If the volume has copy-on-write clones we will not be able to # delete it. Instead we will keep it as a silent volume which # will be deleted when it's snapshot and clones are deleted. new_name = "%s.deleted" % (volume_name) self.RBDProxy().rename(client.ioctx, volume_name, new_name) def create_snapshot(self, snapshot): """Creates an rbd snapshot.""" with RBDVolumeProxy(self, snapshot['volume_name']) as volume: snap = encodeutils.safe_encode(snapshot['name']) volume.create_snap(snap) volume.protect_snap(snap) def delete_snapshot(self, snapshot): """Deletes an rbd snapshot.""" # NOTE(dosaboy): this was broken by commit cbe1d5f. Ensure names are # utf-8 otherwise librbd will barf. volume_name = encodeutils.safe_encode(snapshot['volume_name']) snap_name = encodeutils.safe_encode(snapshot['name']) with RBDVolumeProxy(self, volume_name) as volume: try: volume.unprotect_snap(snap_name) except self.rbd.ImageBusy: raise exception.SnapshotIsBusy(snapshot_name=snap_name) volume.remove_snap(snap_name) def retype(self, context, volume, new_type, diff, host): """Retypes a volume, allows QoS change only.""" LOG.debug('Retype volume request %(vol)s to be %(type)s ' '(host: %(host)s), diff %(diff)s.', { 'vol': volume['name'], 'type': new_type, 'host': host, 'diff': diff }) if volume['host'] != host['host']: LOG.error(_LE('Retype with host migration not supported')) return False if diff['encryption']: LOG.error(_LE('Retype of encryption type not supported')) return False if diff['extra_specs']: LOG.error(_LE('Retype of extra_specs not supported')) return False return True def ensure_export(self, context, volume): """Synchronously recreates an export for a logical volume.""" pass def create_export(self, context, volume): """Exports the volume.""" pass def remove_export(self, context, volume): """Removes an export for a logical volume.""" pass def initialize_connection(self, volume, connector): hosts, ports = self._get_mon_addrs() data = { 'driver_volume_type': 'rbd', 'data': { 'name': '%s/%s' % (self.configuration.rbd_pool, volume['name']), 'hosts': hosts, 'ports': ports, 'auth_enabled': (self.configuration.rbd_user is not None), 'auth_username': self.configuration.rbd_user, 'secret_type': 'ceph', 'secret_uuid': self.configuration.rbd_secret_uuid, } } LOG.debug('connection data: %s', data) return data def terminate_connection(self, volume, connector, **kwargs): pass def _parse_location(self, location): prefix = 'rbd://' if not location.startswith(prefix): reason = _('Not stored in rbd') raise exception.ImageUnacceptable(image_id=location, reason=reason) pieces = map(urllib.unquote, location[len(prefix):].split('/')) if any(map(lambda p: p == '', pieces)): reason = _('Blank components') raise exception.ImageUnacceptable(image_id=location, reason=reason) if len(pieces) != 4: reason = _('Not an rbd snapshot') raise exception.ImageUnacceptable(image_id=location, reason=reason) return pieces def _get_fsid(self): with RADOSClient(self) as client: return client.cluster.get_fsid() def _is_cloneable(self, image_location, image_meta): try: fsid, pool, image, snapshot = self._parse_location(image_location) except exception.ImageUnacceptable as e: LOG.debug('not cloneable: %s', six.text_type(e)) return False if self._get_fsid() != fsid: LOG.debug('%s is in a different ceph cluster', image_location) return False if image_meta['disk_format'] != 'raw': LOG.debug(("rbd image clone requires image format to be " "'raw' but image {0} is '{1}'").format( image_location, image_meta['disk_format'])) return False # check that we can read the image try: with RBDVolumeProxy(self, image, pool=pool, snapshot=snapshot, read_only=True): return True except self.rbd.Error as e: LOG.debug('Unable to open image %(loc)s: %(err)s', dict(loc=image_location, err=e)) return False def clone_image(self, context, volume, image_location, image_meta, image_service): image_location = image_location[0] if image_location else None if image_location is None or not self._is_cloneable( image_location, image_meta): return ({}, False) _prefix, pool, image, snapshot = self._parse_location(image_location) self._clone(volume, pool, image, snapshot) self._resize(volume) return {'provider_location': None}, True def _image_conversion_dir(self): tmpdir = (self.configuration.volume_tmp_dir or CONF.image_conversion_dir or tempfile.gettempdir()) if tmpdir == self.configuration.volume_tmp_dir: LOG.warning(_LW('volume_tmp_dir is now deprecated, please use ' 'image_conversion_dir')) # ensure temporary directory exists if not os.path.exists(tmpdir): os.makedirs(tmpdir) return tmpdir def copy_image_to_volume(self, context, volume, image_service, image_id): tmp_dir = self._image_conversion_dir() with tempfile.NamedTemporaryFile(dir=tmp_dir) as tmp: image_utils.fetch_to_raw(context, image_service, image_id, tmp.name, self.configuration.volume_dd_blocksize, size=volume['size']) self.delete_volume(volume) chunk_size = CONF.rbd_store_chunk_size * units.Mi order = int(math.log(chunk_size, 2)) # keep using the command line import instead of librbd since it # detects zeroes to preserve sparseness in the image args = ['rbd', 'import', '--pool', self.configuration.rbd_pool, '--order', order, tmp.name, volume['name'], '--new-format'] args.extend(self._ceph_args()) self._try_execute(*args) self._resize(volume) def copy_volume_to_image(self, context, volume, image_service, image_meta): tmp_dir = self._image_conversion_dir() tmp_file = os.path.join(tmp_dir, volume['name'] + '-' + image_meta['id']) with fileutils.remove_path_on_error(tmp_file): args = ['rbd', 'export', '--pool', self.configuration.rbd_pool, volume['name'], tmp_file] args.extend(self._ceph_args()) self._try_execute(*args) image_utils.upload_volume(context, image_service, image_meta, tmp_file) os.unlink(tmp_file) def backup_volume(self, context, backup, backup_service): """Create a new backup from an existing volume.""" volume = self.db.volume_get(context, backup['volume_id']) with RBDVolumeProxy(self, volume['name'], self.configuration.rbd_pool) as rbd_image: rbd_meta = RBDImageMetadata(rbd_image, self.configuration.rbd_pool, self.configuration.rbd_user, self.configuration.rbd_ceph_conf) rbd_fd = RBDImageIOWrapper(rbd_meta) backup_service.backup(backup, rbd_fd) LOG.debug("volume backup complete.") def restore_backup(self, context, backup, volume, backup_service): """Restore an existing backup to a new or existing volume.""" with RBDVolumeProxy(self, volume['name'], self.configuration.rbd_pool) as rbd_image: rbd_meta = RBDImageMetadata(rbd_image, self.configuration.rbd_pool, self.configuration.rbd_user, self.configuration.rbd_ceph_conf) rbd_fd = RBDImageIOWrapper(rbd_meta) backup_service.restore(backup, volume['id'], rbd_fd) LOG.debug("volume restore complete.") def extend_volume(self, volume, new_size): """Extend an existing volume.""" old_size = volume['size'] try: size = int(new_size) * units.Gi self._resize(volume, size=size) except Exception: msg = _('Failed to Extend Volume ' '%(volname)s') % {'volname': volume['name']} LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) LOG.debug("Extend volume from %(old_size)s GB to %(new_size)s GB.", {'old_size': old_size, 'new_size': new_size}) def manage_existing(self, volume, existing_ref): """Manages an existing image. Renames the image name to match the expected name for the volume. Error checking done by manage_existing_get_size is not repeated. :param volume: volume ref info to be set :param existing_ref: existing_ref is a dictionary of the form: {'source-name': <name of rbd image>} """ # Raise an exception if we didn't find a suitable rbd image. with RADOSClient(self) as client: rbd_name = existing_ref['source-name'] self.RBDProxy().rename(client.ioctx, encodeutils.safe_encode(rbd_name), encodeutils.safe_encode(volume['name'])) def manage_existing_get_size(self, volume, existing_ref): """Return size of an existing image for manage_existing. :param volume: volume ref info to be set :param existing_ref: existing_ref is a dictionary of the form: {'source-name': <name of rbd image>} """ # Check that the reference is valid if 'source-name' not in existing_ref: reason = _('Reference must contain source-name element.') raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=reason) rbd_name = encodeutils.safe_encode(existing_ref['source-name']) with RADOSClient(self) as client: # Raise an exception if we didn't find a suitable rbd image. try: rbd_image = self.rbd.Image(client.ioctx, rbd_name) image_size = rbd_image.size() except self.rbd.ImageNotFound: kwargs = {'existing_ref': rbd_name, 'reason': 'Specified rbd image does not exist.'} raise exception.ManageExistingInvalidReference(**kwargs) finally: rbd_image.close() # RBD image size is returned in bytes. Attempt to parse # size as a float and round up to the next integer. try: convert_size = int(math.ceil(int(image_size))) / units.Gi return convert_size except ValueError: exception_message = (_("Failed to manage existing volume " "%(name)s, because reported size " "%(size)s was not a floating-point" " number.") % {'name': rbd_name, 'size': image_size}) raise exception.VolumeBackendAPIException( data=exception_message)
julianwang/cinder
cinder/volume/drivers/rbd.py
Python
apache-2.0
40,525
# Copyright (c) 2015 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the License); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an AS IS BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and# # limitations under the License. """Migration of objects from SRC to DST clouds.""" from cloudferry.lib.utils import utils CLOUD = 'cloud' SRC, DST = 'src', 'dst' class Migration(object): """ Map SRC objects to corresponding DST objects they migrated to.""" def __init__(self, src_cloud, dst_cloud, resource): self.cloud = { SRC: src_cloud, DST: dst_cloud, } self.obj_map = {} if resource not in utils.RESOURCE_TYPES: raise NotImplementedError('Unknown resource: %s', resource) self.default_resource_type = utils.RESOURCE_TYPES[resource] self.resource = { SRC: self.cloud[SRC].resources.get(resource), DST: self.cloud[DST].resources.get(resource), } def get_default(self, resource_type): """ Get default ID by `resource_type` or None. :return: str """ if resource_type in (utils.TENANTS_TYPE, utils.USERS_TYPE): return self.resource[DST].get_default_id(resource_type) def map_migrated_objects(self, resource_type=None): """Build map SRC -> DST object IDs. :return: dict """ if not resource_type: resource_type = self.default_resource_type objs = { pos: self.read_objects(pos, resource_type) for pos in (SRC, DST) } # objects -> object body = resource_type[:-1] obj_map = dict( [(src[body]['id'], dst[body]['id']) for src in objs[SRC] for dst in objs[DST] if self.obj_identical(src[body], dst[body])]) return obj_map def migrated_id(self, src_object_id, resource_type=None): """ Get migrated object ID by SRC object ID. :return: DST object ID """ if not resource_type: resource_type = self.default_resource_type if resource_type not in self.obj_map: self.obj_map[resource_type] = \ self.map_migrated_objects(resource_type) return self.obj_map[resource_type].get(src_object_id, self.get_default(resource_type)) def identical(self, src_id, dst_id, resource_type=None): """ Check if SRC object with `src_id` === DST object with `dst_id`. :return: boolean """ if not resource_type: resource_type = self.default_resource_type return dst_id == self.migrated_id(src_id, resource_type=resource_type) def obj_identical(self, src_obj, dst_obj): """Compare src and dst objects from resource info. :return: boolean """ dst_res = self.resource[DST] return dst_res.identical(src_obj, dst_obj) def read_objects(self, position, resource_type): """Read objects info from `position` cloud. :return: list """ res = self.resource[position] objs = res.read_info()[resource_type] return objs.values() if isinstance(objs, dict) else objs
SVilgelm/CloudFerry
cloudferry/lib/base/migration.py
Python
apache-2.0
3,610
# Copyright 2014 Cisco Systems, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json from neutron.db import l3_db from neutron.extensions import vpnaas from neutron import manager from neutron.plugins.common import constants class VpnReferenceValidator(object): """Baseline validation routines for VPN resources.""" IP_MIN_MTU = {4: 68, 6: 1280} @property def l3_plugin(self): try: return self._l3_plugin except AttributeError: self._l3_plugin = manager.NeutronManager.get_service_plugins().get( constants.L3_ROUTER_NAT) return self._l3_plugin @property def core_plugin(self): try: return self._core_plugin except AttributeError: self._core_plugin = manager.NeutronManager.get_plugin() return self._core_plugin def _check_dpd(self, ipsec_sitecon): """Ensure that DPD timeout is greater than DPD interval.""" if ipsec_sitecon['dpd_timeout'] <= ipsec_sitecon['dpd_interval']: raise vpnaas.IPsecSiteConnectionDpdIntervalValueError( attr='dpd_timeout') def _check_mtu(self, context, mtu, ip_version): if mtu < VpnReferenceValidator.IP_MIN_MTU[ip_version]: raise vpnaas.IPsecSiteConnectionMtuError(mtu=mtu, version=ip_version) def assign_sensible_ipsec_sitecon_defaults(self, ipsec_sitecon, prev_conn=None): """Provide defaults for optional items, if missing. Flatten the nested DPD information, and set default values for any missing information. For connection updates, the previous values will be used as defaults for any missing items. """ if not prev_conn: prev_conn = {'dpd_action': 'hold', 'dpd_interval': 30, 'dpd_timeout': 120} dpd = ipsec_sitecon.get('dpd', {}) ipsec_sitecon['dpd_action'] = dpd.get('action', prev_conn['dpd_action']) ipsec_sitecon['dpd_interval'] = dpd.get('interval', prev_conn['dpd_interval']) ipsec_sitecon['dpd_timeout'] = dpd.get('timeout', prev_conn['dpd_timeout']) def validate_ipsec_site_connection(self, context, ipsec_sitecon, ip_version): """Reference implementation of validation for IPSec connection.""" self._check_dpd(ipsec_sitecon) mtu = ipsec_sitecon.get('mtu') if mtu: self._check_mtu(context, mtu, ip_version) def _check_router(self, context, router_id): router = self.l3_plugin.get_router(context, router_id) if not router.get(l3_db.EXTERNAL_GW_INFO): raise vpnaas.RouterIsNotExternal(router_id=router_id) def _check_subnet_id(self, context, router_id, subnet_id): ports = self.core_plugin.get_ports( context, filters={ 'fixed_ips': {'subnet_id': [subnet_id]}, 'device_id': [router_id]}) if not ports: raise vpnaas.SubnetIsNotConnectedToRouter( subnet_id=subnet_id, router_id=router_id) def validate_vpnservice(self, context, vpnservice): self._check_router(context, vpnservice['router_id']) self._check_subnet_id(context, vpnservice['router_id'], vpnservice['subnet_id']) def validate_vpnservice_ngfw(self, context, vpnservice): try: if vpnservice.has_key('description'): description = json.loads(vpnservice["description"]) else: return except ValueError: raise vpnaas.DescriptionInvalid(description=vpnservice["description"]) else: tenant_router_id = description.get("tenant_router_id", None) if not tenant_router_id: raise vpnaas.TenantRouterIdMustBeSet() subnets = description.get("subnets", []) if not subnets: raise vpnaas.SubnetMustBeSet() for subnet in subnets: self._check_subnet_id(context, tenant_router_id, subnet)
nash-x/hws
neutron/db/vpn/vpn_validator.py
Python
apache-2.0
4,980
#!/usr/bin/env python # -*- coding: utf-8 -*- import sys import random sys.path.append('.') from twisted.internet import reactor import marionette_tg.driver import marionette_tg.multiplexer import marionette_tg.record_layer import marionette_tg.updater EVENT_LOOP_FREQUENCY_S = 0.01 AUTOUPDATE_DELAY = 5 class MarionetteException(Exception): pass class Client(object): def __init__(self, format_name, format_version): self.multiplexer_outgoing_ = marionette_tg.multiplexer.BufferOutgoing() self.multiplexer_incoming_ = marionette_tg.multiplexer.BufferIncoming() self.multiplexer_incoming_.addCallback(self.process_cell) self.streams_ = {} self.stream_counter_ = random.randint(1,2**32-1) self.set_driver(format_name, format_version) self.reload_ = False # first update must be reactor.callLater(AUTOUPDATE_DELAY, self.check_for_update) def set_driver(self, format_name, format_version=None): self.format_name_ = format_name if format_version == None: self.format_version_ = marionette_tg.dsl.get_latest_version( 'client', format_name) else: self.format_version_ = format_version self.driver_ = marionette_tg.driver.ClientDriver("client") self.driver_.set_multiplexer_incoming(self.multiplexer_incoming_) self.driver_.set_multiplexer_outgoing(self.multiplexer_outgoing_) self.driver_.setFormat(self.format_name_, self.format_version_) def get_format(self): retval = str(self.format_name_) + \ ':' + \ str(self.format_version_) return retval def execute(self, reactor): if self.driver_.isRunning(): self.driver_.execute(reactor) else: if self.reload_: self.set_driver(self.format_name_) self.reload_ = False self.driver_.reset() reactor.callLater(EVENT_LOOP_FREQUENCY_S, self.execute, reactor) def process_cell(self, cell_obj): payload = cell_obj.get_payload() if payload: stream_id = cell_obj.get_stream_id() self.streams_[stream_id].srv_queue.put(payload) def start_new_stream(self, srv_queue=None): stream = marionette_tg.multiplexer.MarionetteStream( self.multiplexer_incoming_, self.multiplexer_outgoing_, self.stream_counter_, srv_queue) stream.host = self self.streams_[self.stream_counter_] = stream self.stream_counter_ = random.randint(1,2**32-1) return stream def terminate(self, stream_id): del self.streams_[stream_id] # call this function if you want reload formats from disk # at the next possible time def reload_driver(self): self.reload_ = True def check_for_update(self): # uncomment the following line to check for updates every N seconds # instead of just on startup # reactor.callLater(N, self.check_for_update, reactor) if marionette_tg.conf.get("general.autoupdate"): self.do_update(self.reload_driver) def do_update(self, callback): # could be replaced with code that updates from a different # source (e.g., local computations) update_server = marionette_tg.conf.get("general.update_server") updater = marionette_tg.updater.FormatUpdater(update_server, use_marionette=True, callback=callback) return updater.do_update() class Server(object): factory = None def __init__(self, format_name): self.multiplexer_outgoing_ = marionette_tg.multiplexer.BufferOutgoing() self.multiplexer_incoming_ = marionette_tg.multiplexer.BufferIncoming() self.multiplexer_incoming_.addCallback(self.process_cell) self.factory_instances = {} if self.check_for_update(): self.do_update() self.set_driver(format_name) self.reload_ = False def set_driver(self, format_name): self.format_name_ = format_name self.driver_ = marionette_tg.driver.ServerDriver("server") self.driver_.set_multiplexer_incoming(self.multiplexer_incoming_) self.driver_.set_multiplexer_outgoing(self.multiplexer_outgoing_) self.driver_.setFormat(self.format_name_) def execute(self, reactor): if not self.driver_.isRunning(): if self.reload_: self.set_driver(self.format_name_) self.reload_ = False self.driver_.execute(reactor) reactor.callLater(EVENT_LOOP_FREQUENCY_S, self.execute, reactor) def process_cell(self, cell_obj): cell_type = cell_obj.get_cell_type() stream_id = cell_obj.get_stream_id() if cell_type == marionette_tg.record_layer.END_OF_STREAM: self.factory_instances[stream_id].connectionLost() del self.factory_instances[stream_id] elif cell_type == marionette_tg.record_layer.NORMAL: if not self.factory_instances.get(stream_id): stream = marionette_tg.multiplexer.MarionetteStream( self.multiplexer_incoming_, self.multiplexer_outgoing_, stream_id) self.factory_instances[stream_id] = self.factory() self.factory_instances[stream_id].connectionMade(stream) payload = cell_obj.get_payload() if payload: self.factory_instances[stream_id].dataReceived(payload) # call this function if you want reload formats from disk # at the next possible time def reload_driver(self): self.reload_ = True def check_for_update(self): # uncomment the following line to check for updates every N seconds # instead of just on startup # reactor.callLater(N, self.check_for_update, reactor) if marionette_tg.conf.get("general.autoupdate"): self.do_update(self.reload_driver) def do_update(self, callback): # could be replaced with code that updates from a different # source (e.g., local computations) update_server = marionette_tg.conf.get("general.update_server") updater = marionette_tg.updater.FormatUpdater(update_server, use_marionette=False, callback=callback) return updater.do_update()
irdan/marionette
marionette_tg/__init__.py
Python
apache-2.0
6,400
# Fuck you Disyer. Stealing my fucking paypal. GET FUCKED: contextlib import sys from functools import wraps from warnings import warn __all__ = ['contextmanager', 'nested', 'closing'] class GeneratorContextManager(object): def __init__(self, gen): self.gen = gen def __enter__(self): try: return self.gen.next() except StopIteration: raise RuntimeError("generator didn't yield") def __exit__(self, type, value, traceback): if type is None: try: self.gen.next() except StopIteration: return raise RuntimeError("generator didn't stop") else: if value is None: value = type() try: self.gen.throw(type, value, traceback) raise RuntimeError("generator didn't stop after throw()") except StopIteration as exc: return exc is not value except: if sys.exc_info()[1] is not value: raise return def contextmanager(func): @wraps(func) def helper(*args, **kwds): return GeneratorContextManager(func(*args, **kwds)) return helper @contextmanager def nested(*managers): warn('With-statements now directly support multiple context managers', DeprecationWarning, 3) exits = [] vars = [] exc = (None, None, None) try: for mgr in managers: exit = mgr.__exit__ enter = mgr.__enter__ vars.append(enter()) exits.append(exit) yield vars except: exc = sys.exc_info() finally: while exits: exit = exits.pop() try: if exit(*exc): exc = (None, None, None) except: exc = sys.exc_info() if exc != (None, None, None): raise exc[0], exc[1], exc[2] return class closing(object): def __init__(self, thing): self.thing = thing def __enter__(self): return self.thing def __exit__(self, *exc_info): self.thing.close()
DedMemez/ODS-August-2017
contextlib.py
Python
apache-2.0
2,267
""" P1 tests for Dedicating Guest Vlan Ranges """ # Import Local Modules from marvin.cloudstackAPI import * from marvin.cloudstackTestCase import * from marvin.lib.base import * from marvin.lib.common import * from marvin.lib.utils import * from nose.plugins.attrib import attr class TestDedicateGuestVlanRange(cloudstackTestCase): @classmethod def setUpClass(cls): testClient = super(TestDedicateGuestVlanRange, cls).getClsTestClient() cls.apiclient = testClient.getApiClient() cls.services = testClient.getParsedTestDataConfig() # Get Zone, Domain cls.domain = get_domain(cls.apiclient) cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests()) # Create Account cls.account = Account.create( cls.apiclient, cls.services["account"], domainid=cls.domain.id ) cls._cleanup = [ cls.account, ] cls.physical_network, cls.free_vlan = setNonContiguousVlanIds(cls.apiclient, cls.zone.id) return @classmethod def tearDownClass(cls): try: # Cleanup resources used removeGuestVlanRangeResponse = \ cls.physical_network.update(cls.apiclient, id=cls.physical_network.id, vlan=cls.physical_network.vlan) cleanup_resources(cls.apiclient, cls._cleanup) except Exception as e: raise Exception("Warning: Exception during cleanup : %s" % e) return def setUp(self): self.apiclient = self.testClient.getApiClient() self.dbclient = self.testClient.getDbConnection() self.cleanup = [] return def tearDown(self): try: # Clean up cleanup_resources(self.apiclient, self.cleanup) except Exception as e: raise Exception("Warning: Exception during cleanup : %s" % e) return @attr(tags=["advanced", "guestvlanrange", "dedicate", "release"], required_hardware="false") def test_dedicateGuestVlanRange(self): """Test guest vlan range dedication """ """Assume a physical network is available """ """ # Validate the following: # 1. List the available physical network using ListPhysicalNetwork # 2. Add a Guest Vlan range to the available physical network using UpdatePhysicalNetwork # 3. Dedicate the created guest vlan range to user account using DedicateGuestVlanRange # 4. Verify vlan range is dedicated with listDedicatedGuestVlanRanges # 5. Release the dedicated guest vlan range back to the system # 6. Verify guest vlan range has been released, verify with listDedicatedGuestVlanRanges # 7. Remove the added guest vlan range using UpdatePhysicalNetwork """ self.debug("Adding guest vlan range") new_vlan = self.physical_network.vlan + "," + self.free_vlan["partial_range"][0] # new_vlan = self.free_vlan["partial_range"][0] addGuestVlanRangeResponse = self.physical_network.update(self.apiclient, id=self.physical_network.id, vlan=new_vlan) # id=self.physical_network.id, vlan=self.free_vlan["partial_range"][0]) self.debug("Dedicating guest vlan range"); dedicate_guest_vlan_range_response = PhysicalNetwork.dedicate( self.apiclient, self.free_vlan["partial_range"][0], physicalnetworkid=self.physical_network.id, account=self.account.name, domainid=self.account.domainid ) list_dedicated_guest_vlan_range_response = PhysicalNetwork.listDedicated( self.apiclient, id=dedicate_guest_vlan_range_response.id ) dedicated_guest_vlan_response = list_dedicated_guest_vlan_range_response[0] self.assertEqual( dedicated_guest_vlan_response.account, self.account.name, "Check account name is in listDedicatedGuestVlanRanges as the account the range is dedicated to" ) self.debug("Releasing guest vlan range"); dedicate_guest_vlan_range_response.release(self.apiclient) list_dedicated_guest_vlan_range_response = PhysicalNetwork.listDedicated(self.apiclient) self.assertEqual( list_dedicated_guest_vlan_range_response, None, "Check vlan range is not available in listDedicatedGuestVlanRanges" )
remibergsma/cosmic
cosmic-core/test/integration/smoke/test_guest_vlan_range.py
Python
apache-2.0
4,610
#Copyright (c) 2014 Sony Computer Entertainment America LLC. See License.txt. import sys sys.path.append("./CommonTestScripts") import Test doc = atfDocService.OpenNewDocument(editor) #===================== 0: root ================================== Test.Equal(0, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(treeLister.TreeView.DomNode)), "Verify root child count") package = editingContext.Insert[UIPackage](DomNode(UISchema.UIPackageType.Type), treeLister.TreeView.DomNode) Test.Equal(1, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(treeLister.TreeView.DomNode)), "Verify root child count") print "Trying to add objects that cannot be a child of the root" editingContext.Insert[UIForm](DomNode(UISchema.UIFormType.Type), treeLister.TreeView.DomNode) Test.Equal(1, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(treeLister.TreeView.DomNode)), "Verify root child count does not increase when adding a form") editingContext.Insert[UIShader](DomNode(UISchema.UIShaderType.Type), treeLister.TreeView.DomNode) Test.Equal(1, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(treeLister.TreeView.DomNode)), "Verify root child count does not increase when adding a shader") editingContext.Insert[UITexture](DomNode(UISchema.UITextureType.Type), treeLister.TreeView.DomNode) Test.Equal(1, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(treeLister.TreeView.DomNode)), "Verify root child count does not increase when adding a texture") editingContext.Insert[UIFont](DomNode(UISchema.UIFontType.Type), treeLister.TreeView.DomNode) Test.Equal(1, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(treeLister.TreeView.DomNode)), "Verify root child count does not increase when adding a font") editingContext.Insert[UISprite](DomNode(UISchema.UISpriteType.Type), treeLister.TreeView.DomNode) Test.Equal(1, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(treeLister.TreeView.DomNode)), "Verify root child count does not increase when adding a sprite") editingContext.Insert[UITextItem](DomNode(UISchema.UITextItemType.Type), treeLister.TreeView.DomNode) Test.Equal(1, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(treeLister.TreeView.DomNode)), "Verify root child count does not increase when adding a text") editingContext.Insert[UIAnimation](DomNode(UISchema.UIAnimationType.Type), treeLister.TreeView.DomNode) Test.Equal(1, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(treeLister.TreeView.DomNode)), "Verify root child count does not increase when adding an animation") #===================== 1: Package ================================== print "Adding children to a package" Test.Equal(0, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(package.DomNode)), "Verify package child count") form = editingContext.Insert[UIForm](DomNode(UISchema.UIFormType.Type), package.DomNode) Test.Equal(1, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(package.DomNode)), "Verify package child count after adding form") shader = editingContext.Insert[UIShader](DomNode(UISchema.UIShaderType.Type), package.DomNode) Test.Equal(2, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(package.DomNode)), "Verify package child count after adding shader") texture = editingContext.Insert[UITexture](DomNode(UISchema.UITextureType.Type), package.DomNode) Test.Equal(3, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(package.DomNode)), "Verify package child count after adding texture") font = editingContext.Insert[UIFont](DomNode(UISchema.UIFontType.Type), package.DomNode) Test.Equal(4, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(package.DomNode)), "Verify package child count after adding font") packageChildCount = 4 print "Trying to add objects that cannot be a child of a package" editingContext.Insert[UIPackage](DomNode(UISchema.UIPackageType.Type), package.DomNode) Test.Equal(packageChildCount, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(package.DomNode)), "Verify package child count does not increase after adding package") editingContext.Insert[UISprite](DomNode(UISchema.UISpriteType.Type), package.DomNode) Test.Equal(packageChildCount, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(package.DomNode)), "Verify package child count does not increase after adding sprite") editingContext.Insert[UITextItem](DomNode(UISchema.UITextItemType.Type), package.DomNode) Test.Equal(packageChildCount, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(package.DomNode)), "Verify package child count does not increase after adding text") editingContext.Insert[UIAnimation](DomNode(UISchema.UIAnimationType.Type), package.DomNode) Test.Equal(packageChildCount, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(package.DomNode)), "Verify package child count does not increase after adding animation") #===================== 2: Form ================================== print "Adding children to a form" Test.Equal(0, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(form.DomNode)), "Verify form child count") sprite = editingContext.Insert[UISprite](DomNode(UISchema.UISpriteType.Type), form.DomNode) Test.Equal(1, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(form.DomNode)), "Verify form child count after adding sprite") text = editingContext.Insert[UITextItem](DomNode(UISchema.UITextItemType.Type), form.DomNode) Test.Equal(2, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(form.DomNode)), "Verify form child count after adding text") animation = editingContext.Insert[UIAnimation](DomNode(UISchema.UIAnimationType.Type), form.DomNode) Test.Equal(3, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(form.DomNode)), "Verify form child count after adding animation") print "Trying to add objects that cannot be a child of a form" editingContext.Insert[UIPackage](DomNode(UISchema.UIPackageType.Type), form.DomNode) Test.Equal(3, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(form.DomNode)), "Verify form child count does not increase after adding a package") editingContext.Insert[UIForm](DomNode(UISchema.UIFormType.Type), form.DomNode) Test.Equal(3, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(form.DomNode)), "Verify form child count does not increase after adding a form") editingContext.Insert[UIShader](DomNode(UISchema.UIShaderType.Type), form.DomNode) Test.Equal(3, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(form.DomNode)), "Verify form child count does not increase after adding a shader") editingContext.Insert[UITexture](DomNode(UISchema.UITextureType.Type), form.DomNode) Test.Equal(3, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(form.DomNode)), "Verify form child count does not increase after adding a texture") editingContext.Insert[UIFont](DomNode(UISchema.UIFontType.Type), form.DomNode) Test.Equal(3, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(form.DomNode)), "Verify form child count does not increase after adding a font") #===================== 3: Shader ================================== print "Verify cannot add children to a shader" editingContext.Insert[UIPackage](DomNode(UISchema.UIPackageType.Type), shader.DomNode) Test.Equal(0, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(shader.DomNode)), "Verify shader child count does not increase when adding a package") editingContext.Insert[UIForm](DomNode(UISchema.UIFormType.Type), shader.DomNode) Test.Equal(0, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(shader.DomNode)), "Verify shader child count does not increase when adding a form") editingContext.Insert[UIShader](DomNode(UISchema.UIShaderType.Type), shader.DomNode) Test.Equal(0, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(shader.DomNode)), "Verify shader child count does not increase when adding a shader") editingContext.Insert[UITexture](DomNode(UISchema.UITextureType.Type), shader.DomNode) Test.Equal(0, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(shader.DomNode)), "Verify shader child count does not increase when adding a texture") editingContext.Insert[UIFont](DomNode(UISchema.UIFontType.Type), shader.DomNode) Test.Equal(0, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(shader.DomNode)), "Verify shader child count does not increase when adding a font") editingContext.Insert[UISprite](DomNode(UISchema.UISpriteType.Type), shader.DomNode) Test.Equal(0, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(shader.DomNode)), "Verify shader child count does not increase when adding a sprite") editingContext.Insert[UITextItem](DomNode(UISchema.UITextItemType.Type), shader.DomNode) Test.Equal(0, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(shader.DomNode)), "Verify shader child count does not increase when adding a text") editingContext.Insert[UIAnimation](DomNode(UISchema.UIAnimationType.Type), shader.DomNode) Test.Equal(0, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(shader.DomNode)), "Verify shader child count does not increase when adding an animation") #===================== 4: Texture ================================== print "Verify cannot add children to a texture" editingContext.Insert[UIPackage](DomNode(UISchema.UIPackageType.Type), texture.DomNode) Test.Equal(0, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(texture.DomNode)), "Verify texture child count does not increase when adding a package") editingContext.Insert[UIForm](DomNode(UISchema.UIFormType.Type), texture.DomNode) Test.Equal(0, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(texture.DomNode)), "Verify texture child count does not increase when adding a form") editingContext.Insert[UIShader](DomNode(UISchema.UIShaderType.Type), texture.DomNode) Test.Equal(0, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(texture.DomNode)), "Verify texture child count does not increase when adding a shader") editingContext.Insert[UITexture](DomNode(UISchema.UITextureType.Type), texture.DomNode) Test.Equal(0, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(texture.DomNode)), "Verify texture child count does not increase when adding a texture") editingContext.Insert[UIFont](DomNode(UISchema.UIFontType.Type), texture.DomNode) Test.Equal(0, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(texture.DomNode)), "Verify texture child count does not increase when adding a font") editingContext.Insert[UISprite](DomNode(UISchema.UISpriteType.Type), texture.DomNode) Test.Equal(0, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(texture.DomNode)), "Verify texture child count does not increase when adding a sprite") editingContext.Insert[UITextItem](DomNode(UISchema.UITextItemType.Type), texture.DomNode) Test.Equal(0, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(texture.DomNode)), "Verify texture child count does not increase when adding a text") editingContext.Insert[UIAnimation](DomNode(UISchema.UIAnimationType.Type), texture.DomNode) Test.Equal(0, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(texture.DomNode)), "Verify texture child count does not increase when adding an animation") #===================== 5: Font ================================== print "Verify cannot add children to a font" editingContext.Insert[UIPackage](DomNode(UISchema.UIPackageType.Type), font.DomNode) Test.Equal(0, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(font.DomNode)), "Verify font child count does not increase when adding a package") editingContext.Insert[UIForm](DomNode(UISchema.UIFormType.Type), font.DomNode) Test.Equal(0, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(font.DomNode)), "Verify font child count does not increase when adding a form") editingContext.Insert[UIShader](DomNode(UISchema.UIShaderType.Type), font.DomNode) Test.Equal(0, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(font.DomNode)), "Verify font child count does not increase when adding a shader") editingContext.Insert[UITexture](DomNode(UISchema.UITextureType.Type), font.DomNode) Test.Equal(0, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(font.DomNode)), "Verify font child count does not increase when adding a texture") editingContext.Insert[UIFont](DomNode(UISchema.UIFontType.Type), font.DomNode) Test.Equal(0, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(font.DomNode)), "Verify font child count does not increase when adding a font") editingContext.Insert[UISprite](DomNode(UISchema.UISpriteType.Type), font.DomNode) Test.Equal(0, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(font.DomNode)), "Verify font child count does not increase when adding a sprite") editingContext.Insert[UITextItem](DomNode(UISchema.UITextItemType.Type), font.DomNode) Test.Equal(0, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(font.DomNode)), "Verify font child count does not increase when adding a text") editingContext.Insert[UIAnimation](DomNode(UISchema.UIAnimationType.Type), font.DomNode) Test.Equal(0, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(font.DomNode)), "Verify font child count does not increase when adding an animation") #===================== 6: Sprite ================================== print "Adding children to a sprite" Test.Equal(2, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(sprite.DomNode)), "Verify sprite child count (starts with a transform and an empty ref)") spriteUnderSprite = editingContext.Insert[UISprite](DomNode(UISchema.UISpriteType.Type), sprite.DomNode) Test.Equal(3, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(sprite.DomNode)), "Verify sprite child count after adding sprite") textUnderSprite = editingContext.Insert[UITextItem](DomNode(UISchema.UITextItemType.Type), sprite.DomNode) Test.Equal(4, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(sprite.DomNode)), "Verify sprite child count after adding text") animationUnderSprite = editingContext.Insert[UIAnimation](DomNode(UISchema.UIAnimationType.Type), sprite.DomNode) Test.Equal(5, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(sprite.DomNode)), "Verify sprite child count after adding animation") #must be added as ref: shaderUnderSprite = editingContext.InsertAsRef[UIShader](DomNode(UISchema.UIShaderType.Type), sprite.DomNode) Test.Equal(5, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(sprite.DomNode)), "Verify sprite child count after adding shader") #refs will be added as real objects to the package packageChildCount = packageChildCount + 1 Test.Equal(packageChildCount, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(package.DomNode)), "Verify package child count increases after adding a ref") print "Trying to add objects that cannot be a child of a sprite" editingContext.Insert[UIPackage](DomNode(UISchema.UIPackageType.Type), sprite.DomNode) Test.Equal(5, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(sprite.DomNode)), "Verify sprite child count does not increase when adding a package") editingContext.Insert[UIForm](DomNode(UISchema.UIFormType.Type), sprite.DomNode) Test.Equal(5, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(sprite.DomNode)), "Verify sprite child count does not increase when adding a form") editingContext.Insert[UITexture](DomNode(UISchema.UITextureType.Type), sprite.DomNode) Test.Equal(5, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(sprite.DomNode)), "Verify sprite child count does not increase when adding a texture") editingContext.Insert[UIFont](DomNode(UISchema.UIFontType.Type), sprite.DomNode) Test.Equal(5, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(sprite.DomNode)), "Verify sprite child count does not increase when adding a font") editingContext.Insert[UIShader](DomNode(UISchema.UIShaderType.Type), sprite.DomNode) Test.Equal(5, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(sprite.DomNode)), "Verify sprite child count does not increase when adding a shader") #===================== 7: Text ================================== print "Adding children to a text" Test.Equal(2, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(text.DomNode)), "Verify text child count (starts with a transform and an empty ref)") spriteUnderText = editingContext.Insert[UISprite](DomNode(UISchema.UISpriteType.Type), text.DomNode) Test.Equal(3, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(text.DomNode)), "Verify text child count after adding sprite") textUnderText = editingContext.Insert[UITextItem](DomNode(UISchema.UITextItemType.Type), text.DomNode) Test.Equal(4, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(text.DomNode)), "Verify text child count after adding text") animationUnderText = editingContext.Insert[UIAnimation](DomNode(UISchema.UIAnimationType.Type), text.DomNode) Test.Equal(5, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(text.DomNode)), "Verify text child count after adding animation") #must be added as ref: fontUnderText = editingContext.InsertAsRef[UIFont](DomNode(UISchema.UIFontType.Type), text.DomNode) Test.Equal(5, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(text.DomNode)), "Verify text child count after adding font as ref") packageChildCount = packageChildCount + 1 Test.Equal(packageChildCount, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(package.DomNode)), "Verify package child count increases after adding a ref") print "Trying to add objects that cannot be a child of a text" editingContext.Insert[UIPackage](DomNode(UISchema.UIPackageType.Type), text.DomNode) Test.Equal(5, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(text.DomNode)), "Verify text child count does not increase when adding a package") editingContext.Insert[UIForm](DomNode(UISchema.UIFormType.Type), text.DomNode) Test.Equal(5, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(text.DomNode)), "Verify text child count does not increase when adding a form") editingContext.Insert[UIShader](DomNode(UISchema.UIShaderType.Type), text.DomNode) Test.Equal(5, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(text.DomNode)), "Verify text child count does not increase when adding a shader") editingContext.Insert[UITexture](DomNode(UISchema.UITextureType.Type), text.DomNode) Test.Equal(5, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(text.DomNode)), "Verify text child count does not increase when adding a texture") editingContext.Insert[UIFont](DomNode(UISchema.UIFontType.Type), text.DomNode) Test.Equal(5, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(text.DomNode)), "Verify text child count does not increase when adding a font") #===================== 8: Animation ================================== print "Verify cannot add children to an animation" animCount = Test.GetEnumerableCount(treeLister.TreeView.GetChildren(animation.DomNode)) editingContext.Insert[UIPackage](DomNode(UISchema.UIPackageType.Type), animation.DomNode) Test.Equal(Test.GetEnumerableCount(treeLister.TreeView.GetChildren(animation.DomNode)), animCount, "Verify animation child count does not increase when adding a package") editingContext.Insert[UIForm](DomNode(UISchema.UIFormType.Type), animation.DomNode) Test.Equal(Test.GetEnumerableCount(treeLister.TreeView.GetChildren(animation.DomNode)), animCount, "Verify animation child count does not increase when adding a form") editingContext.Insert[UIShader](DomNode(UISchema.UIShaderType.Type), animation.DomNode) Test.Equal(Test.GetEnumerableCount(treeLister.TreeView.GetChildren(animation.DomNode)), animCount, "Verify animation child count does not increase when adding a shader") editingContext.Insert[UITexture](DomNode(UISchema.UITextureType.Type), animation.DomNode) Test.Equal(Test.GetEnumerableCount(treeLister.TreeView.GetChildren(animation.DomNode)), animCount, "Verify animation child count does not increase when adding a texture") editingContext.Insert[UIFont](DomNode(UISchema.UIFontType.Type), animation.DomNode) Test.Equal(Test.GetEnumerableCount(treeLister.TreeView.GetChildren(animation.DomNode)), animCount, "Verify animation child count does not increase when adding a font") editingContext.Insert[UISprite](DomNode(UISchema.UISpriteType.Type), animation.DomNode) Test.Equal(Test.GetEnumerableCount(treeLister.TreeView.GetChildren(animation.DomNode)), animCount, "Verify animation child count does not increase when adding a sprite") editingContext.Insert[UITextItem](DomNode(UISchema.UITextItemType.Type), animation.DomNode) Test.Equal(Test.GetEnumerableCount(treeLister.TreeView.GetChildren(animation.DomNode)), animCount, "Verify animation child count does not increase when adding a text") editingContext.Insert[UIAnimation](DomNode(UISchema.UIAnimationType.Type), animation.DomNode) Test.Equal(Test.GetEnumerableCount(treeLister.TreeView.GetChildren(animation.DomNode)), animCount, "Verify animation child count does not increase when adding an animation") print Test.SUCCESS
mindbaffle/ATF
Test/FunctionalTests/DomTreeEditorTestScripts/AddAllItems.py
Python
apache-2.0
20,874
import codecs from setuptools import setup with codecs.open('README.rst', encoding='utf-8') as f: long_description = f.read() setup( name="shadowsocks", version="2.8.2.1", license='http://www.apache.org/licenses/LICENSE-2.0', description="A fast tunnel proxy that help you get through firewalls", author='clowwindy', author_email='clowwindy42@gmail.com', url='https://github.com/shadowsocks/shadowsocks', packages=['shadowsocks', 'shadowsocks.crypto'], package_data={ 'shadowsocks': ['README.rst', 'LICENSE'] }, install_requires=[], entry_points=""" [console_scripts] sslocal = shadowsocks.local:main ssserver = shadowsocks.server:main """, classifiers=[ 'License :: OSI Approved :: Apache Software License', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: Implementation :: CPython', 'Programming Language :: Python :: Implementation :: PyPy', 'Topic :: Internet :: Proxy Servers', ], long_description=long_description, )
gitchs/shadowsocks
setup.py
Python
apache-2.0
1,323
#!/usr/bin/python # # Copyright 2008 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Author: mwu@google.com (Mingyu Wu) """Unittest for baserunner module.""" __author__ = 'mwu@google.com (Mingyu Wu)' import os import shutil import sys import tempfile import time import unittest from lib import baserunner from lib import filesystemhandlerextend from lib import mock_emailmessage from lib import mock_reporter from lib import mock_scanscripts from lib import pyreringconfig from lib import pyreringutil global_settings = pyreringconfig.GlobalPyreRingConfig.settings class BaseRunnerTest(unittest.TestCase): """Unit test cases for BaseRunner class.""" def setUp(self): # I should config global_settings here instead of read it from file system. self.tempdir = tempfile.mkdtemp() root_dir = os.path.abspath(os.path.join(os.path.split(sys.argv[0])[0], '../')) global_settings.update( {'report_dir': os.path.join(self.tempdir, 'report'), 'email_recipients': os.getenv('LOGNAME'), 'host_name': 'test.host', 'log_file': 'pyrering.log', 'file_errors': False, 'project_name': 'pyrering_unittest', 'root_dir': root_dir, 'sendmail': False, 'runner': 'baserunner', 'source_dir': os.path.join(root_dir, 'test'), 'tester': os.getenv('LOGNAME'), 'FATAL_STRING': 'Fatal:', 'header_file': 'header_info.txt', 'time': time.strftime('%Y%m%d%H%M'), 'skip_setup': False, }) # get a default config and mocks self.one_config = pyreringutil.PRConfigParser().Default() self.scanner = mock_scanscripts.MockScanScripts() self.emailmessage = mock_emailmessage.MockEmailMessage() self.reporter = mock_reporter.MockTxtReporter() self.runner = baserunner.BaseRunner( name='test', scanner=self.scanner, email_message=self.emailmessage, filesystem=filesystemhandlerextend.FileSystemHandlerExtend(), reporter=self.reporter) self.runner.Prepare() if not os.path.isdir(global_settings['report_dir']): os.makedirs(global_settings['report_dir']) # I don't want the unit test to mess with the original log file. global_settings['log_file'] += '.unittest' def tearDown(self): self.runner.CleanUp() self.runner = '' pyreringconfig.Reset() self.scanner.CleanConfig() shutil.rmtree(self.tempdir) def testFindHeaderInfoFile(self): global_settings['header_file'] = os.path.join(self.tempdir, 'header.txt') fh = open(global_settings['header_file'], 'w') fh.write('test info') fh.close() self.one_config['TEST_SCRIPT'] = 'echo 1' self.scanner.SetConfig([self.one_config]) result = self.runner.Run(['testFindHeaderInfoFile'], False) self.assertEqual(self.reporter.header, 'test info') self.assertEqual(result, 0) self.assertEqual(self.runner.passed, 1) # Positive Test Cases: def testOneCommand(self): """A simple sleep command takes some time to finish.""" # prepare the test script here self.one_config['TEST_SCRIPT'] = 'sleep 3' # set the mock scanscript to return this thing. self.scanner.SetConfig([self.one_config]) # now run the test and return should be expected. result = self.runner.Run(['testOneCommand'], False) self.assertEqual(result, 0) self.assertEqual(self.runner.passed, 1) def testEchoCommand(self): """A simple command has output on stdout.""" self.one_config['TEST_SCRIPT'] = 'echo testEchoCommand' self.scanner.SetConfig([self.one_config]) result = self.runner.Run(['testEchoCommand'], False) self.assertEqual(result, 0) self.assertEqual(self.runner.passed, 1) #TODO(mwu): need to check the log file has this hello line def testEchoToSTDERRCommand(self): """A simple command has output redirect to stderr.""" self.one_config['TEST_SCRIPT'] = 'echo testEchoToSTDERRCommand >&2' self.scanner.SetConfig([self.one_config]) result = self.runner.Run(['testEchoSTDERRCommand'], False) self.assertEqual(result, 0) self.assertEqual(self.runner.passed, 1) #TODO(mwu): need to check the log file has this hello line def testRunScript(self): """A real script to run.""" self.one_config['TEST_SCRIPT'] = os.path.join(global_settings['root_dir'], 'test/test1_echo.sh') self.scanner.SetConfig([self.one_config]) result = self.runner.Run(['testRunScript'], False) self.assertEqual(result, 0) self.assertEqual(self.runner.passed, 1) #TODO(mwu): need to check the log file has the echo output def testRunScripts(self): """2 scripts to be run.""" self.one_config['TEST_SCRIPT'] = 'echo testRunScripts1' config2 = pyreringutil.PRConfigParser().Default() config2['TEST_SCRIPT'] = 'echo testRunScripts2' self.scanner.SetConfig([self.one_config, config2]) result = self.runner.Run(['testRunScripts'], False) self.assertEqual(result, 0) self.assertEqual(self.runner.passed, 2) # TODO(mwu): verify both scripts run fine def testEmailSend(self): """Test Email should be send.""" self.one_config['TEST_SCRIPT'] = 'echo send_email_test;exit 1' self.scanner.SetConfig([self.one_config]) try: self.runner.Run(['testEmailSend'], True) except self.emailmessage.EmailCalledError: self.assertTrue(True) else: self.fail(msg='Send email was not called') def testEmailNotSendIfTestPass(self): """Test email should not go if all tests pass.""" self.one_config['TEST_SCRIPT'] = 'echo send_email_test;exit 0' self.scanner.SetConfig([self.one_config]) try: self.runner.Run(['testEmailSend'], True) except self.emailmessage.EmailCalledError: self.fail() # Negative Test Cases def testTimeoutCommand(self): """A command times out.""" self.one_config['TEST_SCRIPT'] = 'echo timeouttest; sleep 8' self.one_config['TIMEOUT'] = 2 self.scanner.SetConfig([self.one_config]) result = self.runner.Run(['testTimeoutCommand'], False) self.assertEqual(result, 1) self.assertEqual(self.runner.timeout, 1) def testNonExistCommand(self): """Test a wrong system command.""" self.one_config['TEST_SCRIPT'] = 'nonexist_command' self.scanner.SetConfig([self.one_config]) result = self.runner.Run(['testNonExistCommand'], False) self.assertEqual(result, 1) self.assertEqual(self.runner.failed, 1) def testNonExistScript(self): """Test a nonexist script.""" self.one_config['TEST_SCRIPT'] = '/tmp/nonexist_script.sh' self.scanner.SetConfig([self.one_config]) result = self.runner.Run(['testNonExistScript'], False) self.assertEqual(result, 1) self.assertEqual(self.runner.failed, 1) def testPermissionDenied(self): """Test something without permission.""" self.one_config['TEST_SCRIPT'] = 'touch /pyrering.txt' self.scanner.SetConfig([self.one_config]) result = self.runner.Run(['testPermissionDenied'], False) self.assertEqual(result, 1) self.assertEqual(self.runner.failed, 1) def testCatchWarningMessage(self): """Test a command has warning output.""" self.one_config['TEST_SCRIPT'] = 'echo warn message' self.scanner.SetConfig([self.one_config]) result = self.runner.Run(['testCatchWarningMessage'], False) self.assertEqual(result, 0) self.assertEqual(self.runner.passed, 1) def testCatchFatalMessage(self): """Test a command has fatal error message even exit code still 0.""" self.one_config['TEST_SCRIPT'] = 'echo Fatal:;echo anotherline' self.scanner.SetConfig([self.one_config]) result = self.runner.Run(['testCatchFatalMessage'], False) self.assertEqual(result, 1) self.assertEqual(self.runner.failed, 1) def testOutputLargeMessage(self): """Test a test can have large screen output. As default the stdout only has a 4k buffer limit, so the code should clean up the buffer while running the test, otherwise the writing to buffer will be blocked when the buffer is full. """ self.one_config['TEST_SCRIPT'] = os.path.join(global_settings['root_dir'], 'test/outputlargetxt.py') self.one_config['TIMEOUT'] = 4 self.scanner.SetConfig([self.one_config]) result = self.runner.Run(['testLargeOutput'], False) self.assertEqual(result, 0) self.assertEqual(self.runner.passed, 1) def testExitWithError(self): """Test a test have an error exit, which is not a failure.""" self.one_config['TEST_SCRIPT'] = 'exit 255' self.scanner.SetConfig([self.one_config]) result = self.runner.Run(['testExitWithError'], False) self.assertEqual(result, 1) self.assertEqual(self.runner.failed, 0) self.assertEqual(self.runner.error, 1) def testSetupTestPassed(self): """Test a setup test case passes.""" self.one_config['TEST_SCRIPT'] = 'exit 0' self.scanner.SetConfig([self.one_config]) config2 = pyreringutil.PRConfigParser().Default() config2['TEST_SCRIPT'] = 'exit 0' self.scanner.SetConfig([config2], 'setup') result = self.runner.Run(['testSetupTestFailed'], False) self.assertEqual(result, 0) self.assertEqual(self.runner.failed, 0) def testSetupTestFailed(self): """Test a setup test case failed, the test should exit at once.""" self.one_config['TEST_SCRIPT'] = 'exit 0' self.scanner.SetConfig([self.one_config]) config2 = pyreringutil.PRConfigParser().Default() config2['TEST_SCRIPT'] = 'exit 1' self.scanner.SetConfig([config2], 'setup') result = self.runner.Run(['testSetupTestFailed'], False) self.assertEqual(result, 1) self.assertEqual(self.runner.failed, 1) def testTearDownFailed(self): """Test a teardown test case failed, the test still reports.""" self.one_config['TEST_SCRIPT'] = 'exit 0' self.scanner.SetConfig([self.one_config]) config2 = pyreringutil.PRConfigParser().Default() config2['TEST_SCRIPT'] = 'exit 1' self.scanner.SetConfig([config2], 'teardown') result = self.runner.Run(['testTearDownTestFailed'], False) self.assertEqual(result, 4) self.assertEqual(self.runner.failed, 4) if __name__ == '__main__': unittest.main()
kdlucas/pyrering
lib/baserunner_test.py
Python
apache-2.0
10,873
__author__ = 'Joe Linn' #import pylastica import pylastica.response class Response(pylastica.response.Response): def __init__(self, response_data, action, op_type): """ @param response_data: @type response_data: dict or str @param action: @type action: pylastica.bulk.action.Action @param op_type: bulk operation type @type op_type: str """ assert isinstance(action, pylastica.bulk.action.Action), "action must be an instance of Action: %r" % action super(Response, self).__init__(response_data) self._action = action self._op_type = op_type @property def action(self): """ @return: @rtype: pylastica.bulk.action.Action """ return self._action @property def op_type(self): """ @return: @rtype: str """ return self._op_type
jlinn/pylastica
pylastica/bulk/response.py
Python
apache-2.0
926
# # Copyright (c) 2017 nexB Inc. and others. All rights reserved. # http://nexb.com and https://github.com/nexB/scancode-toolkit/ # The ScanCode software is licensed under the Apache License version 2.0. # Data generated with ScanCode require an acknowledgment. # ScanCode is a trademark of nexB Inc. # # You may not use this software except in compliance with the License. # You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software distributed # under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR # CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. # # When you publish or redistribute any data created with ScanCode or any ScanCode # derivative work, you must accompany this data with the following acknowledgment: # # Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES # OR CONDITIONS OF ANY KIND, either express or implied. No content created from # ScanCode should be considered or used as legal advice. Consult an Attorney # for any legal advice. # ScanCode is a free software code scanning tool from nexB Inc. and others. # Visit https://github.com/nexB/scancode-toolkit/ for support and download. from __future__ import absolute_import from __future__ import print_function from __future__ import unicode_literals import codecs import json import os from unittest.case import expectedFailure from commoncode.testcase import FileBasedTesting from commoncode.text import as_unicode from extractcode import patch class TestIsPatch(FileBasedTesting): test_data_dir = os.path.join(os.path.dirname(__file__), 'data') def test_is_not_patch(self): test_dir = self.get_test_loc('patch/not_patches', copy=True) for r, _, files in os.walk(test_dir): for f in files: test_file = os.path.join(r, f) assert not patch.is_patch(test_file) def test_is_patch(self): test_dir = self.get_test_loc('patch/patches', copy=True) for r, _, files in os.walk(test_dir): for f in files: if not f.endswith('expected'): test_file = os.path.join(r, f) assert patch.is_patch(test_file) def check_patch(test_file, expected_file, regen=False): result = [list(pi) for pi in patch.patch_info(test_file)] result = [[as_unicode(s), as_unicode(t), map(as_unicode, lines)] for s, t, lines in result] if regen: with codecs.open(expected_file, 'wb', encoding='utf-8') as regened: json.dump(result, regened, indent=2) with codecs.open(expected_file, 'rb', encoding='utf-8') as expect: expected = json.load(expect) assert expected == result class TestPatchInfoFailing(FileBasedTesting): test_data_dir = os.path.join(os.path.dirname(__file__), 'data') # FIXME: these tests need love and eventually a bug report upstream @expectedFailure def test_patch_info_patch_patches_misc_webkit_opensource_patches_sync_xhr_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/webkit/opensource/patches/sync_xhr.patch') # fails with Exception Unable to parse patch file list(patch.patch_info(test_file)) @expectedFailure def test_patch_info_patch_patches_problematic_opensso_patch(self): test_file = self.get_test_loc(u'patch/patches/problematic/OpenSSO.patch') # fails with Exception Unable to parse patch file list(patch.patch_info(test_file)) class TestPatchInfo(FileBasedTesting): test_data_dir = os.path.join(os.path.dirname(__file__), 'data') def test_patch_info_patch_patches_dnsmasq_2_63_1_diff(self): test_file = self.get_test_loc(u'patch/patches/dnsmasq_2.63-1.diff') expected_file = self.get_test_loc('patch/patches/dnsmasq_2.63-1.diff.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_dropbear_2012_55_1_diff(self): test_file = self.get_test_loc(u'patch/patches/dropbear_2012.55-1.diff') expected_file = self.get_test_loc('patch/patches/dropbear_2012.55-1.diff.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_electricfence_2_0_5_longjmp_patch(self): test_file = self.get_test_loc(u'patch/patches/ElectricFence-2.0.5-longjmp.patch') expected_file = self.get_test_loc('patch/patches/ElectricFence-2.0.5-longjmp.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_electricfence_2_1_vaarg_patch(self): test_file = self.get_test_loc(u'patch/patches/ElectricFence-2.1-vaarg.patch') expected_file = self.get_test_loc('patch/patches/ElectricFence-2.1-vaarg.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_electricfence_2_2_2_madvise_patch(self): test_file = self.get_test_loc(u'patch/patches/ElectricFence-2.2.2-madvise.patch') expected_file = self.get_test_loc('patch/patches/ElectricFence-2.2.2-madvise.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_electricfence_2_2_2_pthread_patch(self): test_file = self.get_test_loc(u'patch/patches/ElectricFence-2.2.2-pthread.patch') expected_file = self.get_test_loc('patch/patches/ElectricFence-2.2.2-pthread.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_libmediainfo_0_7_43_diff(self): test_file = self.get_test_loc(u'patch/patches/libmediainfo-0.7.43.diff') expected_file = self.get_test_loc('patch/patches/libmediainfo-0.7.43.diff.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_avahi_0_6_25_patches_configure_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/avahi-0.6.25/patches/configure.patch') expected_file = self.get_test_loc('patch/patches/misc/avahi-0.6.25/patches/configure.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_avahi_0_6_25_patches_main_c_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/avahi-0.6.25/patches/main.c.patch') expected_file = self.get_test_loc('patch/patches/misc/avahi-0.6.25/patches/main.c.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_busybox_patches_fix_subarch_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/busybox/patches/fix-subarch.patch') expected_file = self.get_test_loc('patch/patches/misc/busybox/patches/fix-subarch.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_busybox_patches_gtrick_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/busybox/patches/gtrick.patch') expected_file = self.get_test_loc('patch/patches/misc/busybox/patches/gtrick.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_busybox_patches_workaround_old_uclibc_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/busybox/patches/workaround_old_uclibc.patch') expected_file = self.get_test_loc('patch/patches/misc/busybox/patches/workaround_old_uclibc.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_curl_patches_ekioh_cookie_fix_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/curl/patches/ekioh_cookie_fix.patch') expected_file = self.get_test_loc('patch/patches/misc/curl/patches/ekioh_cookie_fix.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_e2fsprogs_1_37_uuidlibs_blkidlibs_only_target_makefile_in_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/e2fsprogs-1.37/uuidlibs_blkidlibs_only_target_Makefile.in.patch') expected_file = self.get_test_loc('patch/patches/misc/e2fsprogs-1.37/uuidlibs_blkidlibs_only_target_Makefile.in.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_ekioh_svg_opensource_patches_patch_ekioh_config_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/ekioh-svg/opensource/patches/patch_ekioh_config.patch') expected_file = self.get_test_loc('patch/patches/misc/ekioh-svg/opensource/patches/patch_ekioh_config.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_webkit_opensource_patches_accelerated_blit_webcore_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/webkit/opensource/patches/accelerated_blit_webcore.patch') expected_file = self.get_test_loc('patch/patches/misc/webkit/opensource/patches/accelerated_blit_webcore.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_webkit_opensource_patches_accelerated_blit_webkit_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/webkit/opensource/patches/accelerated_blit_webkit.patch') expected_file = self.get_test_loc('patch/patches/misc/webkit/opensource/patches/accelerated_blit_webkit.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_webkit_opensource_patches_animated_gif_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/webkit/opensource/patches/animated_gif.patch') expected_file = self.get_test_loc('patch/patches/misc/webkit/opensource/patches/animated_gif.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_webkit_opensource_patches_computed_style_for_transform_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/webkit/opensource/patches/computed_style_for_transform.patch') expected_file = self.get_test_loc('patch/patches/misc/webkit/opensource/patches/computed_style_for_transform.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_webkit_opensource_patches_cookies_fixes_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/webkit/opensource/patches/cookies_fixes.patch') expected_file = self.get_test_loc('patch/patches/misc/webkit/opensource/patches/cookies_fixes.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_webkit_opensource_patches_dlna_image_security_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/webkit/opensource/patches/dlna_image_security.patch') expected_file = self.get_test_loc('patch/patches/misc/webkit/opensource/patches/dlna_image_security.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_webkit_opensource_patches_draw_pattern_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/webkit/opensource/patches/draw_pattern.patch') expected_file = self.get_test_loc('patch/patches/misc/webkit/opensource/patches/draw_pattern.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_webkit_opensource_patches_enable_logs_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/webkit/opensource/patches/enable_logs.patch') expected_file = self.get_test_loc('patch/patches/misc/webkit/opensource/patches/enable_logs.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_webkit_opensource_patches_enable_proxy_setup_log_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/webkit/opensource/patches/enable_proxy_setup_log.patch') expected_file = self.get_test_loc('patch/patches/misc/webkit/opensource/patches/enable_proxy_setup_log.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_webkit_opensource_patches_file_secure_mode_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/webkit/opensource/patches/file_secure_mode.patch') expected_file = self.get_test_loc('patch/patches/misc/webkit/opensource/patches/file_secure_mode.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_webkit_opensource_patches_http_secure_mode_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/webkit/opensource/patches/http_secure_mode.patch') expected_file = self.get_test_loc('patch/patches/misc/webkit/opensource/patches/http_secure_mode.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_webkit_opensource_patches_javascript_screen_resolution_fix_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/webkit/opensource/patches/javascript_screen_resolution_fix.patch') expected_file = self.get_test_loc('patch/patches/misc/webkit/opensource/patches/javascript_screen_resolution_fix.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_webkit_opensource_patches_keycode_webkit_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/webkit/opensource/patches/keycode_webkit.patch') expected_file = self.get_test_loc('patch/patches/misc/webkit/opensource/patches/keycode_webkit.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_webkit_opensource_patches_local_file_access_whitelist_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/webkit/opensource/patches/local_file_access_whitelist.patch') expected_file = self.get_test_loc('patch/patches/misc/webkit/opensource/patches/local_file_access_whitelist.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_webkit_opensource_patches_lower_case_css_attributes_for_transform_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/webkit/opensource/patches/lower_case_css_attributes_for_transform.patch') expected_file = self.get_test_loc('patch/patches/misc/webkit/opensource/patches/lower_case_css_attributes_for_transform.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_webkit_opensource_patches_moving_empty_image_leaves_garbage_on_screen_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/webkit/opensource/patches/moving_empty_image_leaves_garbage_on_screen.patch') expected_file = self.get_test_loc('patch/patches/misc/webkit/opensource/patches/moving_empty_image_leaves_garbage_on_screen.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_webkit_opensource_patches_open_in_new_window_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/webkit/opensource/patches/open_in_new_window.patch') expected_file = self.get_test_loc('patch/patches/misc/webkit/opensource/patches/open_in_new_window.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_webkit_opensource_patches_plugin_thread_async_call_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/webkit/opensource/patches/plugin_thread_async_call.patch') expected_file = self.get_test_loc('patch/patches/misc/webkit/opensource/patches/plugin_thread_async_call.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_webkit_opensource_patches_ram_cache_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/webkit/opensource/patches/ram_cache.patch') expected_file = self.get_test_loc('patch/patches/misc/webkit/opensource/patches/ram_cache.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_webkit_opensource_patches_ram_cache_meta_expires_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/webkit/opensource/patches/ram_cache_meta_expires.patch') expected_file = self.get_test_loc('patch/patches/misc/webkit/opensource/patches/ram_cache_meta_expires.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_webkit_opensource_patches_speedup_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/webkit/opensource/patches/speedup.patch') expected_file = self.get_test_loc('patch/patches/misc/webkit/opensource/patches/speedup.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_webkit_opensource_patches_sync_xhr_https_access_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/webkit/opensource/patches/sync_xhr_https_access.patch') expected_file = self.get_test_loc('patch/patches/misc/webkit/opensource/patches/sync_xhr_https_access.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_webkit_opensource_patches_useragent_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/webkit/opensource/patches/useragent.patch') expected_file = self.get_test_loc('patch/patches/misc/webkit/opensource/patches/useragent.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_webkit_opensource_patches_webcore_keyevent_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/webkit/opensource/patches/webcore_keyevent.patch') expected_file = self.get_test_loc('patch/patches/misc/webkit/opensource/patches/webcore_keyevent.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_webkit_opensource_patches_webcore_videoplane_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/webkit/opensource/patches/webcore_videoplane.patch') expected_file = self.get_test_loc('patch/patches/misc/webkit/opensource/patches/webcore_videoplane.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_webkit_opensource_patches_webkit_cssparser_parsetransitionshorthand_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/webkit/opensource/patches/webkit_CSSParser_parseTransitionShorthand.patch') expected_file = self.get_test_loc('patch/patches/misc/webkit/opensource/patches/webkit_CSSParser_parseTransitionShorthand.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_webkit_opensource_patches_webkit_database_support_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/webkit/opensource/patches/webkit_database_support.patch') expected_file = self.get_test_loc('patch/patches/misc/webkit/opensource/patches/webkit_database_support.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_webkit_opensource_patches_webkit_dlna_images_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/webkit/opensource/patches/webkit_dlna_images.patch') expected_file = self.get_test_loc('patch/patches/misc/webkit/opensource/patches/webkit_dlna_images.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_webkit_opensource_patches_webkit_finish_animations_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/webkit/opensource/patches/webkit_finish_animations.patch') expected_file = self.get_test_loc('patch/patches/misc/webkit/opensource/patches/webkit_finish_animations.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_webkit_opensource_patches_webkit_xmlhttprequest_cross_domain_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/webkit/opensource/patches/webkit_xmlhttprequest_cross_domain.patch') expected_file = self.get_test_loc('patch/patches/misc/webkit/opensource/patches/webkit_xmlhttprequest_cross_domain.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_webkit_opensource_prepatches_moto_createobject_null_check_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/webkit/opensource/prepatches/moto-createobject-null-check.patch') expected_file = self.get_test_loc('patch/patches/misc/webkit/opensource/prepatches/moto-createobject-null-check.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_webkit_opensource_prepatches_moto_dump_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/webkit/opensource/prepatches/moto-dump.patch') expected_file = self.get_test_loc('patch/patches/misc/webkit/opensource/prepatches/moto-dump.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_webkit_opensource_prepatches_moto_getopensourcenotice_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/webkit/opensource/prepatches/moto-getopensourcenotice.patch') expected_file = self.get_test_loc('patch/patches/misc/webkit/opensource/prepatches/moto-getopensourcenotice.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_webkit_opensource_prepatches_moto_jsvalue_equal_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/webkit/opensource/prepatches/moto-jsvalue-equal.patch') expected_file = self.get_test_loc('patch/patches/misc/webkit/opensource/prepatches/moto-jsvalue-equal.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_webkit_opensource_prepatches_moto_timer_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/webkit/opensource/prepatches/moto-timer.patch') expected_file = self.get_test_loc('patch/patches/misc/webkit/opensource/prepatches/moto-timer.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_webkit_opensource_prepatches_support_parallel_idl_gen_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/webkit/opensource/prepatches/support_parallel_idl_gen.patch') expected_file = self.get_test_loc('patch/patches/misc/webkit/opensource/prepatches/support_parallel_idl_gen.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_webkit_opensource_prepatches_webcore_accept_click_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/webkit/opensource/prepatches/webcore_accept_click.patch') expected_file = self.get_test_loc('patch/patches/misc/webkit/opensource/prepatches/webcore_accept_click.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_webkit_opensource_prepatches_webcore_videoplane_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/webkit/opensource/prepatches/webcore_videoplane.patch') expected_file = self.get_test_loc('patch/patches/misc/webkit/opensource/prepatches/webcore_videoplane.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_icu_patches_ekioh_config_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/icu/patches/ekioh-config.patch') expected_file = self.get_test_loc('patch/patches/misc/icu/patches/ekioh-config.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_jfsutils_patches_largefile_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/jfsutils/patches/largefile.patch') expected_file = self.get_test_loc('patch/patches/misc/jfsutils/patches/largefile.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_libasyncns_asyncns_h_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/libasyncns/asyncns.h.patch') expected_file = self.get_test_loc('patch/patches/misc/libasyncns/asyncns.h.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_libasyncns_configure_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/libasyncns/configure.patch') expected_file = self.get_test_loc('patch/patches/misc/libasyncns/configure.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_libdaemon_0_13_patches_configure_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/libdaemon-0.13/patches/configure.patch') expected_file = self.get_test_loc('patch/patches/misc/libdaemon-0.13/patches/configure.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_libiconv_patches_cp932_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/libiconv/patches/cp932.patch') expected_file = self.get_test_loc('patch/patches/misc/libiconv/patches/cp932.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_libiconv_patches_make_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/libiconv/patches/make.patch') expected_file = self.get_test_loc('patch/patches/misc/libiconv/patches/make.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_libjpeg_v6b_patches_config_sub_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/libjpeg-v6b/patches/config.sub.patch') expected_file = self.get_test_loc('patch/patches/misc/libjpeg-v6b/patches/config.sub.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_libjpeg_v6b_patches_configure_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/libjpeg-v6b/patches/configure.patch') expected_file = self.get_test_loc('patch/patches/misc/libjpeg-v6b/patches/configure.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_libjpeg_v6b_patches_makefile_cfg_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/libjpeg-v6b/patches/makefile.cfg.patch') expected_file = self.get_test_loc('patch/patches/misc/libjpeg-v6b/patches/makefile.cfg.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_libpng_1_2_8_makefile_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/libpng-1.2.8/makefile.patch') expected_file = self.get_test_loc('patch/patches/misc/libpng-1.2.8/makefile.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_libpng_1_2_8_pngconf_h_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/libpng-1.2.8/pngconf.h.patch') expected_file = self.get_test_loc('patch/patches/misc/libpng-1.2.8/pngconf.h.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_libpng_1_2_8_pngrutil_c_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/libpng-1.2.8/pngrutil.c.patch') expected_file = self.get_test_loc('patch/patches/misc/libpng-1.2.8/pngrutil.c.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_libxml2_patches_iconv_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/libxml2/patches/iconv.patch') expected_file = self.get_test_loc('patch/patches/misc/libxml2/patches/iconv.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_0001_stmmac_updated_the_driver_and_added_several_fixes_a_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/0001-stmmac-updated-the-driver-and-added-several-fixes-a.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/0001-stmmac-updated-the-driver-and-added-several-fixes-a.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_addrspace_h_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/addrspace.h.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/addrspace.h.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_arch_sh_kernel_cpu_init_c_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/arch_sh_kernel_cpu_init.c.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/arch_sh_kernel_cpu_init.c.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_arch_sh_makefile_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/arch_sh_Makefile.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/arch_sh_Makefile.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_arch_sh_mm_init_c_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/arch_sh_mm_init.c.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/arch_sh_mm_init.c.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_bigphysarea_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/bigphysarea.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/bigphysarea.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_bugs_h_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/bugs.h.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/bugs.h.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_cache_sh4_c_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/cache-sh4.c.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/cache-sh4.c.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_cfi_cmdset_0001_c_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/cfi_cmdset_0001.c.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/cfi_cmdset_0001.c.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_cfi_util_c_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/cfi_util.c.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/cfi_util.c.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_char_build_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/char_build.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/char_build.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_cmdlinepart_c_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/cmdlinepart.c.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/cmdlinepart.c.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_console_printk_loglevel_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/console_printk_loglevel.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/console_printk_loglevel.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_delayed_i2c_read_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/delayed_i2c_read.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/delayed_i2c_read.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_devinet_c_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/devinet.c.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/devinet.c.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_disable_carrier_sense_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/disable_carrier_sense.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/disable_carrier_sense.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_disable_unaligned_printks_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/disable_unaligned_printks.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/disable_unaligned_printks.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_dma_api_c_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/dma-api.c.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/dma-api.c.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_do_mounts_c_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/do_mounts.c.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/do_mounts.c.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_drivers_net_makefile_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/drivers_net_Makefile.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/drivers_net_Makefile.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_fan_ctrl_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/fan_ctrl.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/fan_ctrl.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_hcd_stm_c_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/hcd_stm.c.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/hcd_stm.c.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_head_s_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/head.S.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/head.S.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_i2c_stm_c_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/i2c-stm.c.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/i2c-stm.c.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_i2c_stm_c_patch2(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/i2c-stm.c.patch2') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/i2c-stm.c.patch2.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_i2c_nostop_for_bitbanging_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/i2c_nostop_for_bitbanging.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/i2c_nostop_for_bitbanging.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_i2c_rate_normal_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/i2c_rate_normal.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/i2c_rate_normal.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_i2c_revert_to_117_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/i2c_revert_to_117.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/i2c_revert_to_117.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_if_ppp_h_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/if_ppp.h.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/if_ppp.h.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_inittmpfs_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/inittmpfs.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/inittmpfs.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_init_kconfig_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/init_Kconfig.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/init_Kconfig.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_init_main_c_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/init_main.c.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/init_main.c.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_ioremap_c_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/ioremap.c.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/ioremap.c.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_ipconfig_c_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/ipconfig.c.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/ipconfig.c.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_kernel_extable_c_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/kernel_extable.c.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/kernel_extable.c.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_kernel_resource_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/kernel_resource.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/kernel_resource.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_kexec_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/kexec.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/kexec.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_ksymhash_elflib_c_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/ksymhash_elflib.c.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/ksymhash_elflib.c.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_libata_sense_data_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/libata_sense_data.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/libata_sense_data.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_localversion_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/localversion.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/localversion.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_mach_c_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/mach.c.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/mach.c.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_marvell_88e3015_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/marvell_88e3015.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/marvell_88e3015.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_mb442_setup_c_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/mb442_setup.c.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/mb442_setup.c.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_mmu_context_h_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/mmu_context.h.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/mmu_context.h.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_motorola_make_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/motorola_make.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/motorola_make.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_motorola_rootdisk_c_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/motorola_rootdisk.c.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/motorola_rootdisk.c.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_namespace_c_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/namespace.c.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/namespace.c.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_nand_flash_based_bbt_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/nand_flash_based_bbt.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/nand_flash_based_bbt.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_nand_old_oob_layout_for_yaffs2_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/nand_old_oob_layout_for_yaffs2.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/nand_old_oob_layout_for_yaffs2.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_netconsole_c_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/netconsole.c.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/netconsole.c.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_netconsole_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/netconsole.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/netconsole.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_nfsroot_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/nfsroot.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/nfsroot.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_page_h_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/page.h.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/page.h.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_page_alloc_c_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/page_alloc.c.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/page_alloc.c.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_pgtable_h_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/pgtable.h.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/pgtable.h.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_phy_c_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/phy.c.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/phy.c.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_phy_h_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/phy.h.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/phy.h.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_phy_device_c_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/phy_device.c.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/phy_device.c.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_pid_c_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/pid.c.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/pid.c.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_pio_irq_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/pio-irq.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/pio-irq.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_pmb_c_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/pmb.c.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/pmb.c.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_process_c_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/process.c.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/process.c.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_sample_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/sample.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/sample.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_sched_cfs_v2_6_23_12_v24_1_mod_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/sched-cfs-v2.6.23.12-v24.1.mod.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/sched-cfs-v2.6.23.12-v24.1.mod.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_setup_stb7100_c_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/setup-stb7100.c.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/setup-stb7100.c.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_setup_stx7105_c_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/setup-stx7105.c.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/setup-stx7105.c.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_setup_c_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/setup.c.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/setup.c.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_sh_kernel_setup_c_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/sh_kernel_setup.c.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/sh_kernel_setup.c.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_sh_ksyms_c_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/sh_ksyms.c.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/sh_ksyms.c.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_smsc_c_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/smsc.c.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/smsc.c.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_smsc_makefile_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/smsc_makefile.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/smsc_makefile.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_soc_h_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/soc.h.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/soc.h.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_squashfs3_3_revert_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/squashfs3.3_revert.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/squashfs3.3_revert.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_squashfs3_3_revert1_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/squashfs3.3_revert1.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/squashfs3.3_revert1.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_squashfs3_3_revert2_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/squashfs3.3_revert2.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/squashfs3.3_revert2.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_squashfs3_3_revert3_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/squashfs3.3_revert3.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/squashfs3.3_revert3.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_squashfs3_4_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/squashfs3.4.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/squashfs3.4.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_stasc_c_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/stasc.c.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/stasc.c.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_stmmac_main_c_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/stmmac_main.c.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/stmmac_main.c.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_suppress_igmp_report_listening_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/suppress_igmp_report_listening.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/suppress_igmp_report_listening.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_time_c_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/time.c.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/time.c.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_unionfs_2_5_1_for_2_6_23_17_diff(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/unionfs-2.5.1_for_2.6.23.17.diff') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/unionfs-2.5.1_for_2.6.23.17.diff.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_unionfs_remove_debug_printouts_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/unionfs_remove_debug_printouts.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/unionfs_remove_debug_printouts.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_vip19x0_vidmem_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/vip19x0_vidmem.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/vip19x0_vidmem.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_vip19x3_board_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/vip19x3_board.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/vip19x3_board.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_vip19xx_h_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/vip19xx.h.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/vip19xx.h.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_vip19xx_nand_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/vip19xx_nand.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/vip19xx_nand.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_vip19xx_nor_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/vip19xx_nor.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/vip19xx_nor.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_vt_c_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/vt.c.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/vt.c.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_yaffs2_2008_07_15_for_2_6_23_17_yaffs_guts_c_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17-yaffs_guts.c.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17-yaffs_guts.c.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_linux_st710x_patches_yaffs2_2008_07_15_for_2_6_23_17_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch') expected_file = self.get_test_loc('patch/patches/misc/linux-st710x/patches/yaffs2-2008.07.15_for_2.6.23.17.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_npapi_patches_npapi_h_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/npapi/patches/npapi.h.patch') expected_file = self.get_test_loc('patch/patches/misc/npapi/patches/npapi.h.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_openssl_0_9_8_patches_configure_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/openssl-0.9.8/patches/Configure.patch') expected_file = self.get_test_loc('patch/patches/misc/openssl-0.9.8/patches/Configure.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_sqlite_patches_permissions_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/sqlite/patches/permissions.patch') expected_file = self.get_test_loc('patch/patches/misc/sqlite/patches/permissions.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_udhcp_0_9_8_patch_arpping_c_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/udhcp-0.9.8/patch/arpping.c.patch') expected_file = self.get_test_loc('patch/patches/misc/udhcp-0.9.8/patch/arpping.c.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_udhcp_0_9_8_patch_clientpacket_c_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/udhcp-0.9.8/patch/clientpacket.c.patch') expected_file = self.get_test_loc('patch/patches/misc/udhcp-0.9.8/patch/clientpacket.c.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_udhcp_0_9_8_patch_clientpacket_h_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/udhcp-0.9.8/patch/clientpacket.h.patch') expected_file = self.get_test_loc('patch/patches/misc/udhcp-0.9.8/patch/clientpacket.h.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_udhcp_0_9_8_patch_debug_h_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/udhcp-0.9.8/patch/debug.h.patch') expected_file = self.get_test_loc('patch/patches/misc/udhcp-0.9.8/patch/debug.h.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_udhcp_0_9_8_patch_dhcpc_c_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/udhcp-0.9.8/patch/dhcpc.c.patch') expected_file = self.get_test_loc('patch/patches/misc/udhcp-0.9.8/patch/dhcpc.c.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_udhcp_0_9_8_patch_dhcpc_h_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/udhcp-0.9.8/patch/dhcpc.h.patch') expected_file = self.get_test_loc('patch/patches/misc/udhcp-0.9.8/patch/dhcpc.h.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_udhcp_0_9_8_patch_dhcpd_h_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/udhcp-0.9.8/patch/dhcpd.h.patch') expected_file = self.get_test_loc('patch/patches/misc/udhcp-0.9.8/patch/dhcpd.h.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_udhcp_0_9_8_patch_makefile_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/udhcp-0.9.8/patch/Makefile.patch') expected_file = self.get_test_loc('patch/patches/misc/udhcp-0.9.8/patch/Makefile.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_udhcp_0_9_8_patch_options_c_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/udhcp-0.9.8/patch/options.c.patch') expected_file = self.get_test_loc('patch/patches/misc/udhcp-0.9.8/patch/options.c.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_udhcp_0_9_8_patch_options_h_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/udhcp-0.9.8/patch/options.h.patch') expected_file = self.get_test_loc('patch/patches/misc/udhcp-0.9.8/patch/options.h.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_udhcp_0_9_8_patch_packet_c_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/udhcp-0.9.8/patch/packet.c.patch') expected_file = self.get_test_loc('patch/patches/misc/udhcp-0.9.8/patch/packet.c.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_udhcp_0_9_8_patch_packet_h_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/udhcp-0.9.8/patch/packet.h.patch') expected_file = self.get_test_loc('patch/patches/misc/udhcp-0.9.8/patch/packet.h.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_udhcp_0_9_8_patch_route_patch1(self): test_file = self.get_test_loc(u'patch/patches/misc/udhcp-0.9.8/patch/route.patch1') expected_file = self.get_test_loc('patch/patches/misc/udhcp-0.9.8/patch/route.patch1.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_udhcp_0_9_8_patch_script_c_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/udhcp-0.9.8/patch/script.c.patch') expected_file = self.get_test_loc('patch/patches/misc/udhcp-0.9.8/patch/script.c.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_udhcp_0_9_8_patch_t1t2_patch1(self): test_file = self.get_test_loc(u'patch/patches/misc/udhcp-0.9.8/patch/t1t2.patch1') expected_file = self.get_test_loc('patch/patches/misc/udhcp-0.9.8/patch/t1t2.patch1.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_vqec_patch_build_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/vqec/patch/BUILD.patch') expected_file = self.get_test_loc('patch/patches/misc/vqec/patch/BUILD.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_vqec_patch_cross_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/vqec/patch/cross.patch') expected_file = self.get_test_loc('patch/patches/misc/vqec/patch/cross.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_vqec_patch_uclibc_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/vqec/patch/uclibc.patch') expected_file = self.get_test_loc('patch/patches/misc/vqec/patch/uclibc.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_vqec_patch_vqec_ifclient_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/vqec/patch/vqec_ifclient.patch') expected_file = self.get_test_loc('patch/patches/misc/vqec/patch/vqec_ifclient.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_vqec_patch_vqec_wv_c_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/vqec/patch/vqec_wv.c.patch') expected_file = self.get_test_loc('patch/patches/misc/vqec/patch/vqec_wv.c.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_misc_vqec_patch_vqec_wv_h_patch(self): test_file = self.get_test_loc(u'patch/patches/misc/vqec/patch/vqec_wv.h.patch') expected_file = self.get_test_loc('patch/patches/misc/vqec/patch/vqec_wv.h.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_postgrey_1_30_group_patch(self): test_file = self.get_test_loc(u'patch/patches/postgrey-1.30-group.patch') expected_file = self.get_test_loc('patch/patches/postgrey-1.30-group.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_windows_drupal_upload_patch(self): test_file = self.get_test_loc(u'patch/patches/windows/drupal_upload.patch') expected_file = self.get_test_loc('patch/patches/windows/drupal_upload.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_windows_ether_patch_1_patch(self): test_file = self.get_test_loc(u'patch/patches/windows/ether_patch_1.patch') expected_file = self.get_test_loc('patch/patches/windows/ether_patch_1.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_windows_js_delete_patch(self): test_file = self.get_test_loc(u'patch/patches/windows/js_delete.patch') expected_file = self.get_test_loc('patch/patches/windows/js_delete.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_windows_plugin_explorer_patch(self): test_file = self.get_test_loc(u'patch/patches/windows/plugin explorer.patch') expected_file = self.get_test_loc('patch/patches/windows/plugin explorer.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_windows_resolveentity32_patch(self): test_file = self.get_test_loc(u'patch/patches/windows/resolveentity32.patch') expected_file = self.get_test_loc('patch/patches/windows/resolveentity32.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_windows_sift_patch(self): test_file = self.get_test_loc(u'patch/patches/windows/sift.patch') expected_file = self.get_test_loc('patch/patches/windows/sift.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_windows_thumbnail_support_0_patch(self): test_file = self.get_test_loc(u'patch/patches/windows/thumbnail_support_0.patch') expected_file = self.get_test_loc('patch/patches/windows/thumbnail_support_0.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_windows_thumbnail_support_0_patch_1(self): test_file = self.get_test_loc(u'patch/patches/windows/thumbnail_support_0.patch.1') expected_file = self.get_test_loc('patch/patches/windows/thumbnail_support_0.patch.1.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_windows_webform_3_0_conditional_constructor_0_patch(self): test_file = self.get_test_loc(u'patch/patches/windows/webform-3.0-conditional_constructor_0.patch') expected_file = self.get_test_loc('patch/patches/windows/webform-3.0-conditional_constructor_0.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_windows_xml_rpc_addspace_patch(self): test_file = self.get_test_loc(u'patch/patches/windows/xml_rpc_addSpace.patch') expected_file = self.get_test_loc('patch/patches/windows/xml_rpc_addSpace.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_xvidcap_1_1_6_docdir_patch(self): test_file = self.get_test_loc(u'patch/patches/xvidcap-1.1.6-docdir.patch') expected_file = self.get_test_loc('patch/patches/xvidcap-1.1.6-docdir.patch.expected') check_patch(test_file, expected_file) def test_patch_info_patch_patches_xvidcap_xorg_patch(self): test_file = self.get_test_loc(u'patch/patches/xvidcap-xorg.patch') expected_file = self.get_test_loc('patch/patches/xvidcap-xorg.patch.expected') check_patch(test_file, expected_file)
yashdsaraf/scancode-toolkit
tests/extractcode/test_patch.py
Python
apache-2.0
72,541
# Copyright 2001 by Tarjei Mikkelsen. All rights reserved. # Copyright 2007 by Michiel de Hoon. All rights reserved. # This code is part of the Biopython distribution and governed by its # license. Please see the LICENSE file that should have been included # as part of this package. """Code to work with the KEGG Enzyme database. Functions: parse - Returns an iterator giving Record objects. Classes: Record -- Holds the information from a KEGG Enzyme record. """ from __future__ import print_function from Bio.KEGG import _write_kegg from Bio.KEGG import _wrap_kegg # Set up line wrapping rules (see Bio.KEGG._wrap_kegg) rxn_wrap = [0, "", (" + ", "", 1, 1), (" = ", "", 1, 1), (" ", "$", 1, 1), ("-", "$", 1, 1)] name_wrap = [0, "", (" ", "$", 1, 1), ("-", "$", 1, 1)] id_wrap = lambda indent: [indent, "", (" ", "", 1, 0)] struct_wrap = lambda indent: [indent, "", (" ", "", 1, 1)] class Record(object): """Holds info from a KEGG Enzyme record. Members: entry The EC number (withou the 'EC '). name A list of the enzyme names. classname A list of the classification terms. sysname The systematic name of the enzyme. reaction A list of the reaction description strings. substrate A list of the substrates. product A list of the products. inhibitor A list of the inhibitors. cofactor A list of the cofactors. effector A list of the effectors. comment A list of the comment strings. pathway A list of 3-tuples: (database, id, pathway) genes A list of 2-tuples: (organism, list of gene ids) disease A list of 3-tuples: (database, id, disease) structures A list of 2-tuples: (database, list of struct ids) dblinks A list of 2-tuples: (database, list of db ids) """ def __init__(self): """__init___(self) Create a new Record. """ self.entry = "" self.name = [] self.classname = [] self.sysname = [] self.reaction = [] self.substrate = [] self.product = [] self.inhibitor = [] self.cofactor = [] self.effector = [] self.comment = [] self.pathway = [] self.genes = [] self.disease = [] self.structures = [] self.dblinks = [] def __str__(self): """__str__(self) Returns a string representation of this Record. """ return self._entry() + \ self._name() + \ self._classname() + \ self._sysname() + \ self._reaction() + \ self._substrate() + \ self._product() + \ self._inhibitor() + \ self._cofactor() + \ self._effector() + \ self._comment() + \ self._pathway() + \ self._genes() + \ self._disease() + \ self._structures() + \ self._dblinks() + \ "///" def _entry(self): return _write_kegg("ENTRY", ["EC " + self.entry]) def _name(self): return _write_kegg("NAME", [_wrap_kegg(l, wrap_rule=name_wrap) for l in self.name]) def _classname(self): return _write_kegg("CLASS", self.classname) def _sysname(self): return _write_kegg("SYSNAME", [_wrap_kegg(l, wrap_rule=name_wrap) for l in self.sysname]) def _reaction(self): return _write_kegg("REACTION", [_wrap_kegg(l, wrap_rule=rxn_wrap) for l in self.reaction]) def _substrate(self): return _write_kegg("SUBSTRATE", [_wrap_kegg(l, wrap_rule=name_wrap) for l in self.substrate]) def _product(self): return _write_kegg("PRODUCT", [_wrap_kegg(l, wrap_rule=name_wrap) for l in self.product]) def _inhibitor(self): return _write_kegg("INHIBITOR", [_wrap_kegg(l, wrap_rule=name_wrap) for l in self.inhibitor]) def _cofactor(self): return _write_kegg("COFACTOR", [_wrap_kegg(l, wrap_rule=name_wrap) for l in self.cofactor]) def _effector(self): return _write_kegg("EFFECTOR", [_wrap_kegg(l, wrap_rule=name_wrap) for l in self.effector]) def _comment(self): return _write_kegg("COMMENT", [_wrap_kegg(l, wrap_rule=id_wrap(0)) for l in self.comment]) def _pathway(self): s = [] for entry in self.pathway: s.append(entry[0] + ": " + entry[1] + " " + entry[2]) return _write_kegg("PATHWAY", [_wrap_kegg(l, wrap_rule=id_wrap(16)) for l in s]) def _genes(self): s = [] for entry in self.genes: s.append(entry[0] + ": " + " ".join(entry[1])) return _write_kegg("GENES", [_wrap_kegg(l, wrap_rule=id_wrap(5)) for l in s]) def _disease(self): s = [] for entry in self.disease: s.append(entry[0] + ": " + entry[1] + " " + entry[2]) return _write_kegg("DISEASE", [_wrap_kegg(l, wrap_rule=id_wrap(13)) for l in s]) def _structures(self): s = [] for entry in self.structures: s.append(entry[0] + ": " + " ".join(entry[1]) + " ") return _write_kegg("STRUCTURES", [_wrap_kegg(l, wrap_rule=struct_wrap(5)) for l in s]) def _dblinks(self): # This is a bit of a cheat that won't work if enzyme entries # have more than one link id per db id. For now, that's not # the case - storing links ids in a list is only to make # this class similar to the Compound.Record class. s = [] for entry in self.dblinks: s.append(entry[0] + ": " + " ".join(entry[1])) return _write_kegg("DBLINKS", s) def parse(handle): """Parse a KEGG Enzyme file, returning Record objects. This is an iterator function, typically used in a for loop. For example, using one of the example KEGG files in the Biopython test suite, >>> with open("KEGG/enzyme.sample") as handle: ... for record in parse(handle): ... print("%s %s" % (record.entry, record.name[0])) ... 1.1.1.1 Alcohol dehydrogenase 1.1.1.62 Estradiol 17beta-dehydrogenase 1.1.1.68 Transferred to EC 1.7.99.5 1.6.5.3 NADH dehydrogenase (ubiquinone) 1.14.13.28 3,9-Dihydroxypterocarpan 6a-monooxygenase 2.4.1.68 Glycoprotein 6-alpha-L-fucosyltransferase 3.1.1.6 Acetylesterase 2.7.2.1 Acetate kinase """ record = Record() for line in handle: if line[:3] == "///": yield record record = Record() continue if line[:12] != " ": keyword = line[:12] data = line[12:].strip() if keyword == "ENTRY ": words = data.split() record.entry = words[1] elif keyword == "CLASS ": record.classname.append(data) elif keyword == "COFACTOR ": record.cofactor.append(data) elif keyword == "COMMENT ": record.comment.append(data) elif keyword == "DBLINKS ": if ":" in data: key, values = data.split(":") values = values.split() row = (key, values) record.dblinks.append(row) else: row = record.dblinks[-1] key, values = row values.extend(data.split()) row = key, values record.dblinks[-1] = row elif keyword == "DISEASE ": if ":" in data: database, data = data.split(":") number, name = data.split(None, 1) row = (database, number, name) record.disease.append(row) else: row = record.disease[-1] database, number, name = row name = name + " " + data row = database, number, name record.disease[-1] = row elif keyword == "EFFECTOR ": record.effector.append(data.strip(";")) elif keyword == "GENES ": if data[3:5] == ': ': key, values = data.split(":", 1) values = [value.split("(")[0] for value in values.split()] row = (key, values) record.genes.append(row) else: row = record.genes[-1] key, values = row for value in data.split(): value = value.split("(")[0] values.append(value) row = key, values record.genes[-1] = row elif keyword == "INHIBITOR ": record.inhibitor.append(data.strip(";")) elif keyword == "NAME ": record.name.append(data.strip(";")) elif keyword == "PATHWAY ": if data[:5] == 'PATH:': _, map_num, name = data.split(None, 2) pathway = ('PATH', map_num, name) record.pathway.append(pathway) else: ec_num, name = data.split(None, 1) pathway = 'PATH', ec_num, name record.pathway.append(pathway) elif keyword == "PRODUCT ": record.product.append(data.strip(";")) elif keyword == "REACTION ": record.reaction.append(data.strip(";")) elif keyword == "STRUCTURES ": if data[:4] == 'PDB:': database = data[:3] accessions = data[4:].split() row = (database, accessions) record.structures.append(row) else: row = record.structures[-1] database, accessions = row accessions.extend(data.split()) row = (database, accessions) record.structures[-1] = row elif keyword == "SUBSTRATE ": record.substrate.append(data.strip(";")) elif keyword == "SYSNAME ": record.sysname.append(data.strip(";")) if __name__ == "__main__": from Bio._utils import run_doctest run_doctest()
poojavade/Genomics_Docker
Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/Bio/KEGG/Enzyme/__init__.py
Python
apache-2.0
10,978
# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from StringIO import StringIO import unittest2 from mock import (call, patch, Mock, MagicMock) import paramiko from st2actions.runners.ssh.paramiko_ssh import ParamikoSSHClient from st2tests.fixturesloader import get_resources_base_path import st2tests.config as tests_config tests_config.parse_args() class ParamikoSSHClientTests(unittest2.TestCase): @patch('paramiko.SSHClient', Mock) def setUp(self): """ Creates the object patching the actual connection. """ conn_params = {'hostname': 'dummy.host.org', 'port': 8822, 'username': 'ubuntu', 'key': '~/.ssh/ubuntu_ssh', 'timeout': '600'} self.ssh_cli = ParamikoSSHClient(**conn_params) @patch('paramiko.SSHClient', Mock) def test_create_with_password(self): conn_params = {'hostname': 'dummy.host.org', 'username': 'ubuntu', 'password': 'ubuntu'} mock = ParamikoSSHClient(**conn_params) mock.connect() expected_conn = {'username': 'ubuntu', 'password': 'ubuntu', 'allow_agent': False, 'hostname': 'dummy.host.org', 'look_for_keys': False, 'port': 22} mock.client.connect.assert_called_once_with(**expected_conn) @patch('paramiko.SSHClient', Mock) def test_deprecated_key_argument(self): conn_params = {'hostname': 'dummy.host.org', 'username': 'ubuntu', 'key': 'id_rsa'} mock = ParamikoSSHClient(**conn_params) mock.connect() expected_conn = {'username': 'ubuntu', 'allow_agent': False, 'hostname': 'dummy.host.org', 'look_for_keys': False, 'key_filename': 'id_rsa', 'port': 22} mock.client.connect.assert_called_once_with(**expected_conn) def test_key_files_and_key_material_arguments_are_mutual_exclusive(self): conn_params = {'hostname': 'dummy.host.org', 'username': 'ubuntu', 'key_files': 'id_rsa', 'key_material': 'key'} expected_msg = ('key_files and key_material arguments are mutually ' 'exclusive') self.assertRaisesRegexp(ValueError, expected_msg, ParamikoSSHClient, **conn_params) @patch('paramiko.SSHClient', Mock) def test_key_material_argument(self): path = os.path.join(get_resources_base_path(), 'ssh', 'dummy_rsa') with open(path, 'r') as fp: private_key = fp.read() conn_params = {'hostname': 'dummy.host.org', 'username': 'ubuntu', 'key_material': private_key} mock = ParamikoSSHClient(**conn_params) mock.connect() pkey = paramiko.RSAKey.from_private_key(StringIO(private_key)) expected_conn = {'username': 'ubuntu', 'allow_agent': False, 'hostname': 'dummy.host.org', 'look_for_keys': False, 'pkey': pkey, 'port': 22} mock.client.connect.assert_called_once_with(**expected_conn) @patch('paramiko.SSHClient', Mock) def test_key_material_argument_invalid_key(self): conn_params = {'hostname': 'dummy.host.org', 'username': 'ubuntu', 'key_material': 'id_rsa'} mock = ParamikoSSHClient(**conn_params) expected_msg = 'Invalid or unsupported key type' self.assertRaisesRegexp(paramiko.ssh_exception.SSHException, expected_msg, mock.connect) @patch('paramiko.SSHClient', Mock) def test_create_with_key(self): conn_params = {'hostname': 'dummy.host.org', 'username': 'ubuntu', 'key_files': 'id_rsa'} mock = ParamikoSSHClient(**conn_params) mock.connect() expected_conn = {'username': 'ubuntu', 'allow_agent': False, 'hostname': 'dummy.host.org', 'look_for_keys': False, 'key_filename': 'id_rsa', 'port': 22} mock.client.connect.assert_called_once_with(**expected_conn) @patch('paramiko.SSHClient', Mock) def test_create_with_password_and_key(self): conn_params = {'hostname': 'dummy.host.org', 'username': 'ubuntu', 'password': 'ubuntu', 'key': 'id_rsa'} mock = ParamikoSSHClient(**conn_params) mock.connect() expected_conn = {'username': 'ubuntu', 'password': 'ubuntu', 'allow_agent': False, 'hostname': 'dummy.host.org', 'look_for_keys': False, 'key_filename': 'id_rsa', 'port': 22} mock.client.connect.assert_called_once_with(**expected_conn) @patch('paramiko.SSHClient', Mock) def test_create_without_credentials(self): """ Initialize object with no credentials. Just to have better coverage, initialize the object without 'password' neither 'key'. """ conn_params = {'hostname': 'dummy.host.org', 'username': 'ubuntu'} mock = ParamikoSSHClient(**conn_params) mock.connect() expected_conn = {'username': 'ubuntu', 'hostname': 'dummy.host.org', 'allow_agent': True, 'look_for_keys': True, 'port': 22} mock.client.connect.assert_called_once_with(**expected_conn) @patch.object(ParamikoSSHClient, '_consume_stdout', MagicMock(return_value=StringIO(''))) @patch.object(ParamikoSSHClient, '_consume_stderr', MagicMock(return_value=StringIO(''))) @patch.object(os.path, 'exists', MagicMock(return_value=True)) @patch.object(os, 'stat', MagicMock(return_value=None)) def test_basic_usage_absolute_path(self): """ Basic execution. """ mock = self.ssh_cli # script to execute sd = "/root/random_script.sh" # Connect behavior mock.connect() mock_cli = mock.client # The actual mocked object: SSHClient expected_conn = {'username': 'ubuntu', 'key_filename': '~/.ssh/ubuntu_ssh', 'allow_agent': False, 'hostname': 'dummy.host.org', 'look_for_keys': False, 'timeout': '600', 'port': 8822} mock_cli.connect.assert_called_once_with(**expected_conn) mock.put(sd, sd, mirror_local_mode=False) mock_cli.open_sftp().put.assert_called_once_with(sd, sd) mock.run(sd) # Make assertions over 'run' method mock_cli.get_transport().open_session().exec_command \ .assert_called_once_with(sd) mock.close() def test_delete_script(self): """ Provide a basic test with 'delete' action. """ mock = self.ssh_cli # script to execute sd = '/root/random_script.sh' mock.connect() mock.delete_file(sd) # Make assertions over the 'delete' method mock.client.open_sftp().unlink.assert_called_with(sd) mock.close() @patch.object(ParamikoSSHClient, 'exists', return_value=False) def test_put_dir(self, *args): mock = self.ssh_cli mock.connect() local_dir = os.path.join(get_resources_base_path(), 'packs') mock.put_dir(local_path=local_dir, remote_path='/tmp') mock_cli = mock.client # The actual mocked object: SSHClient # Assert that expected dirs are created on remote box. calls = [call('/tmp/packs/pythonactions'), call('/tmp/packs/pythonactions/actions')] mock_cli.open_sftp().mkdir.assert_has_calls(calls, any_order=True) # Assert that expected files are copied to remote box. local_file = os.path.join(get_resources_base_path(), 'packs/pythonactions/actions/pascal_row.py') remote_file = os.path.join('/tmp', 'packs/pythonactions/actions/pascal_row.py') calls = [call(local_file, remote_file)] mock_cli.open_sftp().put.assert_has_calls(calls, any_order=True)
Itxaka/st2
st2actions/tests/unit/test_paramiko_ssh.py
Python
apache-2.0
9,657
# Copyright 2013 NEC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_serialization import jsonutils as json from tempest.lib.api_schema.response.compute.v2_1 import aggregates as schema from tempest.lib.common import rest_client from tempest.lib import exceptions as lib_exc class AggregatesClient(rest_client.RestClient): def list_aggregates(self): """Get aggregate list.""" resp, body = self.get("os-aggregates") body = json.loads(body) self.validate_response(schema.list_aggregates, resp, body) return rest_client.ResponseBody(resp, body) def show_aggregate(self, aggregate_id): """Get details of the given aggregate.""" resp, body = self.get("os-aggregates/%s" % aggregate_id) body = json.loads(body) self.validate_response(schema.get_aggregate, resp, body) return rest_client.ResponseBody(resp, body) def create_aggregate(self, **kwargs): """Create a new aggregate. Available params: see http://developer.openstack.org/ api-ref-compute-v2.1.html#createaggregate """ post_body = json.dumps({'aggregate': kwargs}) resp, body = self.post('os-aggregates', post_body) body = json.loads(body) self.validate_response(schema.create_aggregate, resp, body) return rest_client.ResponseBody(resp, body) def update_aggregate(self, aggregate_id, **kwargs): """Update an aggregate. Available params: see http://developer.openstack.org/ api-ref-compute-v2.1.html#updateaggregate """ put_body = json.dumps({'aggregate': kwargs}) resp, body = self.put('os-aggregates/%s' % aggregate_id, put_body) body = json.loads(body) self.validate_response(schema.update_aggregate, resp, body) return rest_client.ResponseBody(resp, body) def delete_aggregate(self, aggregate_id): """Delete the given aggregate.""" resp, body = self.delete("os-aggregates/%s" % aggregate_id) self.validate_response(schema.delete_aggregate, resp, body) return rest_client.ResponseBody(resp, body) def is_resource_deleted(self, id): try: self.show_aggregate(id) except lib_exc.NotFound: return True return False @property def resource_type(self): """Return the primary type of resource this client works with.""" return 'aggregate' def add_host(self, aggregate_id, **kwargs): """Add a host to the given aggregate. Available params: see http://developer.openstack.org/ api-ref-compute-v2.1.html#addhost """ post_body = json.dumps({'add_host': kwargs}) resp, body = self.post('os-aggregates/%s/action' % aggregate_id, post_body) body = json.loads(body) self.validate_response(schema.aggregate_add_remove_host, resp, body) return rest_client.ResponseBody(resp, body) def remove_host(self, aggregate_id, **kwargs): """Remove a host from the given aggregate. Available params: see http://developer.openstack.org/ api-ref-compute-v2.1.html#removehost """ post_body = json.dumps({'remove_host': kwargs}) resp, body = self.post('os-aggregates/%s/action' % aggregate_id, post_body) body = json.loads(body) self.validate_response(schema.aggregate_add_remove_host, resp, body) return rest_client.ResponseBody(resp, body) def set_metadata(self, aggregate_id, **kwargs): """Replace the aggregate's existing metadata with new metadata.""" post_body = json.dumps({'set_metadata': kwargs}) resp, body = self.post('os-aggregates/%s/action' % aggregate_id, post_body) body = json.loads(body) self.validate_response(schema.aggregate_set_metadata, resp, body) return rest_client.ResponseBody(resp, body)
nuagenetworks/tempest
tempest/lib/services/compute/aggregates_client.py
Python
apache-2.0
4,667
"""Auto-generated file, do not edit by hand. GG metadata""" from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata PHONE_METADATA_GG = PhoneMetadata(id='GG', country_code=44, international_prefix='00', general_desc=PhoneNumberDesc(national_number_pattern='[135789]\\d{6,9}', possible_length=(7, 9, 10), possible_length_local_only=(6,)), fixed_line=PhoneNumberDesc(national_number_pattern='1481[25-9]\\d{5}', example_number='1481256789', possible_length=(10,), possible_length_local_only=(6,)), mobile=PhoneNumberDesc(national_number_pattern='7(?:781\\d|839\\d|911[17])\\d{5}', example_number='7781123456', possible_length=(10,)), toll_free=PhoneNumberDesc(national_number_pattern='80(?:0(?:1111|\\d{6,7})|8\\d{7})', example_number='8001234567', possible_length=(7, 9, 10)), premium_rate=PhoneNumberDesc(national_number_pattern='(?:87[123]|9(?:[01]\\d|8[0-3]))\\d{7}', example_number='9012345678', possible_length=(10,)), shared_cost=PhoneNumberDesc(national_number_pattern='8(?:4(?:5464\\d|[2-5]\\d{7})|70\\d{7})', example_number='8431234567', possible_length=(7, 10)), personal_number=PhoneNumberDesc(national_number_pattern='70\\d{8}', example_number='7012345678', possible_length=(10,)), voip=PhoneNumberDesc(national_number_pattern='56\\d{8}', example_number='5612345678', possible_length=(10,)), pager=PhoneNumberDesc(national_number_pattern='76(?:0[012]|2[356]|4[0134]|5[49]|6[0-369]|77|81|9[39])\\d{6}', example_number='7640123456', possible_length=(10,)), uan=PhoneNumberDesc(national_number_pattern='(?:3[0347]|55)\\d{8}', example_number='5512345678', possible_length=(10,)), national_prefix='0', national_prefix_for_parsing='0')
gencer/python-phonenumbers
python/phonenumbers/data/region_GG.py
Python
apache-2.0
1,706
import requests import logging from fetcher import fetch from os.path import join from urlobj import URLObj from urllib.parse import urljoin, urlsplit, urlunsplit class RobotsParser: def __init__(self, domain): self.domain = domain # Check if the file even exists first. def exists(self): resp = fetch(URLObj(join(self.domain, 'robots.txt'))) return (resp is not None) and (resp.status_code == requests.codes.ok) # Actually parse the file. def parse(self): logging.info("Parsing robots.txt") blackpaths = [] resp = fetch(URLObj(join(self.domain, 'robots.txt'))) for line in resp.text.split('\n'): line = line.strip() if line.startswith('#'): continue elif line is None: continue elif line.startswith('Disallow'): badpath = line.split(':')[1].strip().strip('/') blackpaths.append(badpath) return [join(self.domain, b) for b in blackpaths]
get9/monkeyshines
robotsparser.py
Python
apache-2.0
1,038
"""Tests for the init module.""" import asyncio from asynctest import Mock, patch from pyheos import CommandError, const import pytest from homeassistant.components.heos import ( ControllerManager, async_setup_entry, async_unload_entry, ) from homeassistant.components.heos.const import ( DATA_CONTROLLER_MANAGER, DATA_SOURCE_MANAGER, DOMAIN, ) from homeassistant.components.media_player.const import DOMAIN as MEDIA_PLAYER_DOMAIN from homeassistant.const import CONF_HOST from homeassistant.exceptions import ConfigEntryNotReady from homeassistant.setup import async_setup_component async def test_async_setup_creates_entry(hass, config): """Test component setup creates entry from config.""" assert await async_setup_component(hass, DOMAIN, config) await hass.async_block_till_done() entries = hass.config_entries.async_entries(DOMAIN) assert len(entries) == 1 entry = entries[0] assert entry.title == "Controller (127.0.0.1)" assert entry.data == {CONF_HOST: "127.0.0.1"} async def test_async_setup_updates_entry(hass, config_entry, config, controller): """Test component setup updates entry from config.""" config[DOMAIN][CONF_HOST] = "127.0.0.2" config_entry.add_to_hass(hass) assert await async_setup_component(hass, DOMAIN, config) await hass.async_block_till_done() entries = hass.config_entries.async_entries(DOMAIN) assert len(entries) == 1 entry = entries[0] assert entry.title == "Controller (127.0.0.2)" assert entry.data == {CONF_HOST: "127.0.0.2"} async def test_async_setup_returns_true(hass, config_entry, config): """Test component setup from config.""" config_entry.add_to_hass(hass) assert await async_setup_component(hass, DOMAIN, config) await hass.async_block_till_done() entries = hass.config_entries.async_entries(DOMAIN) assert len(entries) == 1 assert entries[0] == config_entry async def test_async_setup_no_config_returns_true(hass, config_entry): """Test component setup from entry only.""" config_entry.add_to_hass(hass) assert await async_setup_component(hass, DOMAIN, {}) await hass.async_block_till_done() entries = hass.config_entries.async_entries(DOMAIN) assert len(entries) == 1 assert entries[0] == config_entry async def test_async_setup_entry_loads_platforms( hass, config_entry, controller, input_sources, favorites ): """Test load connects to heos, retrieves players, and loads platforms.""" config_entry.add_to_hass(hass) with patch.object(hass.config_entries, "async_forward_entry_setup") as forward_mock: assert await async_setup_entry(hass, config_entry) # Assert platforms loaded await hass.async_block_till_done() assert forward_mock.call_count == 1 assert controller.connect.call_count == 1 assert controller.get_players.call_count == 1 assert controller.get_favorites.call_count == 1 assert controller.get_input_sources.call_count == 1 controller.disconnect.assert_not_called() assert hass.data[DOMAIN][DATA_CONTROLLER_MANAGER].controller == controller assert hass.data[DOMAIN][MEDIA_PLAYER_DOMAIN] == controller.players assert hass.data[DOMAIN][DATA_SOURCE_MANAGER].favorites == favorites assert hass.data[DOMAIN][DATA_SOURCE_MANAGER].inputs == input_sources async def test_async_setup_entry_not_signed_in_loads_platforms( hass, config_entry, controller, input_sources, caplog ): """Test setup does not retrieve favorites when not logged in.""" config_entry.add_to_hass(hass) controller.is_signed_in = False controller.signed_in_username = None with patch.object(hass.config_entries, "async_forward_entry_setup") as forward_mock: assert await async_setup_entry(hass, config_entry) # Assert platforms loaded await hass.async_block_till_done() assert forward_mock.call_count == 1 assert controller.connect.call_count == 1 assert controller.get_players.call_count == 1 assert controller.get_favorites.call_count == 0 assert controller.get_input_sources.call_count == 1 controller.disconnect.assert_not_called() assert hass.data[DOMAIN][DATA_CONTROLLER_MANAGER].controller == controller assert hass.data[DOMAIN][MEDIA_PLAYER_DOMAIN] == controller.players assert hass.data[DOMAIN][DATA_SOURCE_MANAGER].favorites == {} assert hass.data[DOMAIN][DATA_SOURCE_MANAGER].inputs == input_sources assert ( "127.0.0.1 is not logged in to a HEOS account and will be unable " "to retrieve HEOS favorites: Use the 'heos.sign_in' service to " "sign-in to a HEOS account" in caplog.text ) async def test_async_setup_entry_connect_failure(hass, config_entry, controller): """Connection failure raises ConfigEntryNotReady.""" config_entry.add_to_hass(hass) errors = [ConnectionError, asyncio.TimeoutError] for error in errors: controller.connect.side_effect = error with pytest.raises(ConfigEntryNotReady): await async_setup_entry(hass, config_entry) await hass.async_block_till_done() assert controller.connect.call_count == 1 assert controller.disconnect.call_count == 1 controller.connect.reset_mock() controller.disconnect.reset_mock() async def test_async_setup_entry_player_failure(hass, config_entry, controller): """Failure to retrieve players/sources raises ConfigEntryNotReady.""" config_entry.add_to_hass(hass) errors = [ConnectionError, asyncio.TimeoutError] for error in errors: controller.get_players.side_effect = error with pytest.raises(ConfigEntryNotReady): await async_setup_entry(hass, config_entry) await hass.async_block_till_done() assert controller.connect.call_count == 1 assert controller.disconnect.call_count == 1 controller.connect.reset_mock() controller.disconnect.reset_mock() async def test_unload_entry(hass, config_entry, controller): """Test entries are unloaded correctly.""" controller_manager = Mock(ControllerManager) hass.data[DOMAIN] = {DATA_CONTROLLER_MANAGER: controller_manager} with patch.object( hass.config_entries, "async_forward_entry_unload", return_value=True ) as unload: assert await async_unload_entry(hass, config_entry) await hass.async_block_till_done() assert controller_manager.disconnect.call_count == 1 assert unload.call_count == 1 assert DOMAIN not in hass.data async def test_update_sources_retry(hass, config_entry, config, controller, caplog): """Test update sources retries on failures to max attempts.""" config_entry.add_to_hass(hass) assert await async_setup_component(hass, DOMAIN, config) controller.get_favorites.reset_mock() controller.get_input_sources.reset_mock() source_manager = hass.data[DOMAIN][DATA_SOURCE_MANAGER] source_manager.retry_delay = 0 source_manager.max_retry_attempts = 1 controller.get_favorites.side_effect = CommandError("Test", "test", 0) controller.dispatcher.send( const.SIGNAL_CONTROLLER_EVENT, const.EVENT_SOURCES_CHANGED, {} ) # Wait until it's finished while "Unable to update sources" not in caplog.text: await asyncio.sleep(0.1) assert controller.get_favorites.call_count == 2
fbradyirl/home-assistant
tests/components/heos/test_init.py
Python
apache-2.0
7,448
# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Composer Extension Downloads, installs and runs Composer. """ import os import os.path import sys import logging import re import json import StringIO from build_pack_utils import utils from build_pack_utils import stream_output from extension_helpers import ExtensionHelper from build_pack_utils.compile_extensions import CompileExtensions _log = logging.getLogger('composer') def find_composer_paths(ctx): build_dir = ctx['BUILD_DIR'] webdir = ctx['WEBDIR'] json_path = None lock_path = None json_paths = [ os.path.join(build_dir, 'composer.json'), os.path.join(build_dir, webdir, 'composer.json') ] lock_paths = [ os.path.join(build_dir, 'composer.lock'), os.path.join(build_dir, webdir, 'composer.lock') ] env_path = os.getenv('COMPOSER_PATH') if env_path is not None: json_paths = json_paths + [ os.path.join(build_dir, env_path, 'composer.json'), os.path.join(build_dir, webdir, env_path, 'composer.json') ] lock_paths = lock_paths + [ os.path.join(build_dir, env_path, 'composer.lock'), os.path.join(build_dir, webdir, env_path, 'composer.lock') ] for path in json_paths: if os.path.exists(path): json_path = path for path in lock_paths: if os.path.exists(path): lock_path = path return (json_path, lock_path) class ComposerConfiguration(object): def __init__(self, ctx): self._ctx = ctx self._log = _log self._init_composer_paths() def _init_composer_paths(self): (self.json_path, self.lock_path) = \ find_composer_paths(self._ctx) def read_exts_from_path(self, path): exts = [] if path: req_pat = re.compile(r'"require"\s?\:\s?\{(.*?)\}', re.DOTALL) ext_pat = re.compile(r'"ext-(.*?)"') with open(path, 'rt') as fp: data = fp.read() for req_match in req_pat.finditer(data): for ext_match in ext_pat.finditer(req_match.group(1)): exts.append(ext_match.group(1)) return exts def pick_php_version(self, requested): selected = None if requested is None: selected = self._ctx['PHP_VERSION'] elif requested == '5.5.*' or requested == '>=5.5': selected = self._ctx['PHP_55_LATEST'] elif requested == '5.6.*' or requested == '>=5.6': selected = self._ctx['PHP_56_LATEST'] elif requested == '7.0.*' or requested == '>=7.0': selected = self._ctx['PHP_70_LATEST'] elif requested.startswith('5.5.'): selected = requested elif requested.startswith('5.6.'): selected = requested elif requested.startswith('7.0.'): selected = requested else: selected = self._ctx['PHP_VERSION'] return selected def read_version_from_composer(self, key): (json_path, lock_path) = find_composer_paths(self._ctx) if json_path is not None: composer = json.load(open(json_path, 'r')) require = composer.get('require', {}) return require.get(key, None) if lock_path is not None: composer = json.load(open(lock_path, 'r')) platform = composer.get('platform', {}) return platform.get(key, None) return None def configure(self): if self.json_path or self.lock_path: exts = [] # include any existing extensions exts.extend(self._ctx.get('PHP_EXTENSIONS', [])) # add 'openssl' extension exts.append('openssl') # add platform extensions from composer.json & composer.lock exts.extend(self.read_exts_from_path(self.json_path)) exts.extend(self.read_exts_from_path(self.lock_path)) # update context with new list of extensions, # if composer.json exists php_version = self.read_version_from_composer('php') self._log.debug('Composer picked PHP Version [%s]', php_version) self._ctx['PHP_VERSION'] = self.pick_php_version(php_version) self._ctx['PHP_EXTENSIONS'] = utils.unique(exts) self._ctx['PHP_VM'] = 'php' class ComposerExtension(ExtensionHelper): def __init__(self, ctx): ExtensionHelper.__init__(self, ctx) self._log = _log def _defaults(self): manifest_file_path = os.path.join(self._ctx["BP_DIR"], "manifest.yml") compile_ext = CompileExtensions(self._ctx["BP_DIR"]) _, default_version = compile_ext.default_version_for(manifest_file_path=manifest_file_path, dependency="composer") return { 'COMPOSER_VERSION': default_version, 'COMPOSER_PACKAGE': 'composer.phar', 'COMPOSER_DOWNLOAD_URL': '/composer/' '{COMPOSER_VERSION}/{COMPOSER_PACKAGE}', 'COMPOSER_INSTALL_OPTIONS': ['--no-interaction', '--no-dev'], 'COMPOSER_VENDOR_DIR': '{BUILD_DIR}/{LIBDIR}/vendor', 'COMPOSER_BIN_DIR': '{BUILD_DIR}/php/bin', 'COMPOSER_CACHE_DIR': '{CACHE_DIR}/composer' } def _should_compile(self): (json_path, lock_path) = \ find_composer_paths(self._ctx) return (json_path is not None or lock_path is not None) def _compile(self, install): self._builder = install.builder self.composer_runner = ComposerCommandRunner(self._ctx, self._builder) self.move_local_vendor_folder() self.install() self.run() def move_local_vendor_folder(self): vendor_path = os.path.join(self._ctx['BUILD_DIR'], self._ctx['WEBDIR'], 'vendor') if os.path.exists(vendor_path): self._log.debug("Vendor [%s] exists, moving to LIBDIR", vendor_path) (self._builder.move() .under('{BUILD_DIR}/{WEBDIR}') .into('{BUILD_DIR}/{LIBDIR}') .where_name_matches('^%s/.*$' % vendor_path) .done()) def install(self): self._builder.install().package('PHP').done() if self._ctx['COMPOSER_VERSION'] == 'latest': dependencies_path = os.path.join(self._ctx['BP_DIR'], 'dependencies') if os.path.exists(dependencies_path): raise RuntimeError('"COMPOSER_VERSION": "latest" ' \ 'is not supported in the cached buildpack. Please vendor your preferred version of composer with your app, or use the provided default composer version.') self._ctx['COMPOSER_DOWNLOAD_URL'] = \ 'https://getcomposer.org/composer.phar' self._builder.install()._installer.install_binary_direct( self._ctx['COMPOSER_DOWNLOAD_URL'], None, os.path.join(self._ctx['BUILD_DIR'], 'php', 'bin'), extract=False) else: self._builder.install()._installer._install_binary_from_manifest( self._ctx['COMPOSER_DOWNLOAD_URL'], os.path.join(self._ctx['BUILD_DIR'], 'php', 'bin'), extract=False) def _github_oauth_token_is_valid(self, candidate_oauth_token): stringio_writer = StringIO.StringIO() curl_command = 'curl -H "Authorization: token %s" ' \ 'https://api.github.com/rate_limit' % candidate_oauth_token stream_output(stringio_writer, curl_command, env=os.environ, cwd=self._ctx['BUILD_DIR'], shell=True) github_response = stringio_writer.getvalue() github_response_json = json.loads(github_response) return 'resources' in github_response_json def _github_rate_exceeded(self, token_is_valid): stringio_writer = StringIO.StringIO() if token_is_valid: candidate_oauth_token = os.getenv('COMPOSER_GITHUB_OAUTH_TOKEN') curl_command = 'curl -H "Authorization: token %s" ' \ 'https://api.github.com/rate_limit' % candidate_oauth_token else: curl_command = 'curl https://api.github.com/rate_limit' stream_output(stringio_writer, curl_command, env=os.environ, cwd=self._ctx['BUILD_DIR'], shell=True) github_response = stringio_writer.getvalue() github_response_json = json.loads(github_response) rate = github_response_json['rate'] num_remaining = rate['remaining'] return num_remaining <= 0 def setup_composer_github_token(self): github_oauth_token = os.getenv('COMPOSER_GITHUB_OAUTH_TOKEN') if self._github_oauth_token_is_valid(github_oauth_token): print('-----> Using custom GitHub OAuth token in' ' $COMPOSER_GITHUB_OAUTH_TOKEN') self.composer_runner.run('config', '-g', 'github-oauth.github.com', '"%s"' % github_oauth_token) return True else: print('-----> The GitHub OAuth token supplied from ' '$COMPOSER_GITHUB_OAUTH_TOKEN is invalid') return False def check_github_rate_exceeded(self, token_is_valid): if self._github_rate_exceeded(token_is_valid): print('-----> The GitHub api rate limit has been exceeded. ' 'Composer will continue by downloading from source, which might result in slower downloads. ' 'You can increase your rate limit with a GitHub OAuth token. ' 'Please obtain a GitHub OAuth token by registering your application at ' 'https://github.com/settings/applications/new. ' 'Then set COMPOSER_GITHUB_OAUTH_TOKEN in your environment to the value of this token.') def run(self): # Move composer files into root directory (json_path, lock_path) = find_composer_paths(self._ctx) if json_path is not None and os.path.dirname(json_path) != self._ctx['BUILD_DIR']: (self._builder.move() .under(os.path.dirname(json_path)) .where_name_is('composer.json') .into('BUILD_DIR') .done()) if lock_path is not None and os.path.dirname(lock_path) != self._ctx['BUILD_DIR']: (self._builder.move() .under(os.path.dirname(lock_path)) .where_name_is('composer.lock') .into('BUILD_DIR') .done()) # Sanity Checks if not os.path.exists(os.path.join(self._ctx['BUILD_DIR'], 'composer.lock')): msg = ( 'PROTIP: Include a `composer.lock` file with your ' 'application! This will make sure the exact same version ' 'of dependencies are used when you deploy to CloudFoundry.') self._log.warning(msg) print msg # dump composer version, if in debug mode if self._ctx.get('BP_DEBUG', False): self.composer_runner.run('-V') if not os.path.exists(os.path.join(self._ctx['BP_DIR'], 'dependencies')): token_is_valid = False # config composer to use github token, if provided if os.getenv('COMPOSER_GITHUB_OAUTH_TOKEN', False): token_is_valid = self.setup_composer_github_token() # check that the api rate limit has not been exceeded, otherwise exit self.check_github_rate_exceeded(token_is_valid) # install dependencies w/Composer self.composer_runner.run('install', '--no-progress', *self._ctx['COMPOSER_INSTALL_OPTIONS']) class ComposerCommandRunner(object): def __init__(self, ctx, builder): self._log = _log self._ctx = ctx self._strategy = PHPComposerStrategy(ctx) self._php_path = self._strategy.binary_path() self._composer_path = os.path.join(ctx['BUILD_DIR'], 'php', 'bin', 'composer.phar') self._strategy.write_config(builder) def _build_composer_environment(self): env = {} for key in os.environ.keys(): val = self._ctx.get(key, '') env[key] = val if type(val) == str else json.dumps(val) # add basic composer vars env['COMPOSER_VENDOR_DIR'] = self._ctx['COMPOSER_VENDOR_DIR'] env['COMPOSER_BIN_DIR'] = self._ctx['COMPOSER_BIN_DIR'] env['COMPOSER_CACHE_DIR'] = self._ctx['COMPOSER_CACHE_DIR'] # prevent key system variables from being overridden env['LD_LIBRARY_PATH'] = self._strategy.ld_library_path() env['PHPRC'] = self._ctx['TMPDIR'] env['PATH'] = ':'.join(filter(None, [env.get('PATH', ''), os.path.dirname(self._php_path)])) self._log.debug("ENV IS: %s", '\n'.join(["%s=%s (%s)" % (key, val, type(val)) for (key, val) in env.iteritems()])) return env def run(self, *args): try: cmd = [self._php_path, self._composer_path] cmd.extend(args) self._log.debug("Running command [%s]", ' '.join(cmd)) stream_output(sys.stdout, ' '.join(cmd), env=self._build_composer_environment(), cwd=self._ctx['BUILD_DIR'], shell=True) except: print "-----> Composer command failed" raise class PHPComposerStrategy(object): def __init__(self, ctx): self._ctx = ctx def binary_path(self): return os.path.join( self._ctx['BUILD_DIR'], 'php', 'bin', 'php') def write_config(self, builder): # rewrite a temp copy of php.ini for use by composer (builder.copy() .under('{BUILD_DIR}/php/etc') .where_name_is('php.ini') .into('TMPDIR') .done()) utils.rewrite_cfgs(os.path.join(self._ctx['TMPDIR'], 'php.ini'), {'TMPDIR': self._ctx['TMPDIR'], 'HOME': self._ctx['BUILD_DIR']}, delim='@') def ld_library_path(self): return os.path.join( self._ctx['BUILD_DIR'], 'php', 'lib') # Extension Methods def configure(ctx): config = ComposerConfiguration(ctx) config.configure() def preprocess_commands(ctx): composer = ComposerExtension(ctx) return composer.preprocess_commands() def service_commands(ctx): composer = ComposerExtension(ctx) return composer.service_commands() def service_environment(ctx): composer = ComposerExtension(ctx) return composer.service_environment() def compile(install): composer = ComposerExtension(install.builder._ctx) return composer.compile(install)
chregu/cf-php-varnish-buildpack
extensions/composer/extension.py
Python
apache-2.0
16,172
"""Defines the data needed for executing a job""" from __future__ import unicode_literals import logging import os from numbers import Integral from job.configuration.data.data_file import DATA_FILE_PARSE_SAVER, DATA_FILE_STORE from job.configuration.data.exceptions import InvalidData from job.configuration.results.job_results import JobResults from job.execution.container import SCALE_JOB_EXE_INPUT_PATH from storage.brokers.broker import FileDownload from storage.models import ScaleFile from util.environment import normalize_env_var_name logger = logging.getLogger(__name__) DEFAULT_VERSION = '1.0' class ValidationWarning(object): """Tracks job data configuration warnings during validation that may not prevent the job from working.""" def __init__(self, key, details): """Constructor sets basic attributes. :param key: A unique identifier clients can use to recognize the warning. :type key: string :param details: A user-friendly description of the problem, including field names and/or associated values. :type details: string """ self.key = key self.details = details class JobData(object): """Represents the data needed for executing a job. Data includes details about the data inputs, links needed to connect shared resources to resource instances in Scale, and details needed to store all resulting output. """ def __init__(self, data=None): """Creates a job data object from the given dictionary. The general format is checked for correctness, but the actual input and output details are not checked for correctness against the job interface. If the data is invalid, a :class:`job.configuration.data.exceptions.InvalidData` will be thrown. :param data: The job data :type data: dict """ if not data: data = {} self.data_dict = data self.param_names = set() self.data_inputs_by_name = {} # string -> dict self.data_outputs_by_name = {} # string -> dict if 'version' not in self.data_dict: self.data_dict['version'] = DEFAULT_VERSION if not self.data_dict['version'] == '1.0': raise InvalidData('Invalid job data: %s is an unsupported version number' % self.data_dict['version']) if 'input_data' not in self.data_dict: self.data_dict['input_data'] = [] for data_input in self.data_dict['input_data']: if 'name' not in data_input: raise InvalidData('Invalid job data: Every data input must have a "name" field') name = data_input['name'] if name in self.param_names: raise InvalidData('Invalid job data: %s cannot be defined more than once' % name) else: self.param_names.add(name) self.data_inputs_by_name[name] = data_input if 'output_data' not in self.data_dict: self.data_dict['output_data'] = [] for data_output in self.data_dict['output_data']: if 'name' not in data_output: raise InvalidData('Invalid job data: Every data output must have a "name" field') name = data_output['name'] if name in self.param_names: raise InvalidData('Invalid job data: %s cannot be defined more than once' % name) else: self.param_names.add(name) self.data_outputs_by_name[name] = data_output def add_file_input(self, input_name, file_id): """Adds a new file parameter to this job data. This method does not perform validation on the job data. :param input_name: The file parameter name :type input_name: string :param file_id: The ID of the file :type file_id: long """ if input_name in self.param_names: raise Exception('Data already has a parameter named %s' % input_name) self.param_names.add(input_name) file_input = {'name': input_name, 'file_id': file_id} self.data_dict['input_data'].append(file_input) self.data_inputs_by_name[input_name] = file_input def add_file_list_input(self, input_name, file_ids): """Adds a new files parameter to this job data. This method does not perform validation on the job data. :param input_name: The files parameter name :type input_name: string :param file_ids: The ID of the file :type file_ids: [long] """ if input_name in self.param_names: raise Exception('Data already has a parameter named %s' % input_name) self.param_names.add(input_name) files_input = {'name': input_name, 'file_ids': file_ids} self.data_dict['input_data'].append(files_input) self.data_inputs_by_name[input_name] = files_input def add_file_output(self, data, add_to_internal=True): """Adds a new output files to this job data with a workspace ID. :param data: The output parameter dict :type data: dict :param add_to_internal: Whether we should add to private data dict. Unneeded when used from __init__ :type add_to_internal: bool """ # Call to legacy method self.add_output(data['name'], data['workspace_id']) def add_output(self, output_name, workspace_id): """Adds a new output parameter to this job data with a workspace ID. This method does not perform validation on the job data. :param output_name: The output parameter name :type output_name: string :param workspace_id: The ID of the workspace :type workspace_id: int """ if output_name in self.param_names: raise Exception('Data already has a parameter named %s' % output_name) self.param_names.add(output_name) output = {'name': output_name, 'workspace_id': workspace_id} self.data_dict['output_data'].append(output) self.data_outputs_by_name[output_name] = output def add_property_input(self, input_name, value): """Adds a new property parameter to this job data. This method does not perform validation on the job data. :param input_name: The property parameter name :type input_name: string :param value: The value of the property :type value: string """ if input_name in self.param_names: raise Exception('Data already has a parameter named %s' % input_name) self.param_names.add(input_name) prop_input = {'name': input_name, 'value': value} self.data_dict['input_data'].append(prop_input) self.data_inputs_by_name[input_name] = prop_input def get_all_properties(self): """Retrieves all properties from this job data and returns them in ascending order of their names :returns: List of strings containing name=value :rtype: [string] """ properties = [] names = sorted(self.data_inputs_by_name.keys()) for name in names: the_input = self.data_inputs_by_name[name] if 'value' in the_input: properties.append(name + '=' + the_input['value']) return properties def get_dict(self): """Returns the internal dictionary that represents this job data :returns: The internal dictionary :rtype: dict """ return self.data_dict def get_input_file_ids(self): """Returns a set of scale file identifiers for each file in the job input data. :returns: Set of scale file identifiers :rtype: {int} """ file_ids = set() for data_input in self.data_dict['input_data']: if 'file_id' in data_input: file_ids.add(data_input['file_id']) elif 'file_ids' in data_input: file_ids.update(data_input['file_ids']) return file_ids def get_input_file_ids_by_input(self): """Returns the list of file IDs for each input that holds files :returns: Dict where each file input name maps to its list of file IDs :rtype: dict """ file_ids = {} for data_input in self.data_dict['input_data']: if 'file_id' in data_input: file_ids[data_input['name']] = [data_input['file_id']] elif 'file_ids' in data_input: file_ids[data_input['name']] = data_input['file_ids'] return file_ids def get_input_file_info(self): """Returns a set of scale file identifiers and input names for each file in the job input data. :returns: Set of scale file identifiers and names :rtype: set[tuple] """ file_info = set() for data_input in self.data_dict['input_data']: if 'file_id' in data_input: file_info.add((data_input['file_id'], data_input['name'])) elif 'file_ids' in data_input: for file_id in data_input['file_ids']: file_info.add((file_id, data_input['name'])) return file_info def get_output_workspace_ids(self): """Returns a list of the IDs for every workspace used to store the output files for this data :returns: List of workspace IDs :rtype: [int] """ workspace_ids = set() for name in self.data_outputs_by_name: file_output = self.data_outputs_by_name[name] workspace_id = file_output['workspace_id'] workspace_ids.add(workspace_id) return list(workspace_ids) def get_output_workspaces(self): """Returns a dict of the output parameter names mapped to their output workspace ID :returns: A dict mapping output parameters to workspace IDs :rtype: dict """ workspaces = {} for name in self.data_outputs_by_name: file_output = self.data_outputs_by_name[name] workspace_id = file_output['workspace_id'] workspaces[name] = workspace_id return workspaces def get_property_values(self, property_names): """Retrieves the values contained in this job data for the given property names. If no value is available for a property name, it will not be included in the returned dict. :param property_names: List of property names :type property_names: [string] :returns: Dict with each property name mapping to its value :rtype: {string: string} """ property_values = {} for name in property_names: if name in self.data_inputs_by_name: property_input = self.data_inputs_by_name[name] if 'value' not in property_input: raise Exception('Property %s is missing required "value" field' % name) property_values[name] = property_input['value'] return property_values def get_injected_input_values(self, input_files_dict): """Apply all execution time values to job data :param input_files: Mapping of input names to InputFiles :type input_files: {str, :class:`job.execution.configuration.input_file.InputFile`} :return: Mapping of all input keys to their true file / property values :rtype: {str, str} """ input_values = {} for data_input in self.get_dict()['input_data']: input_name = data_input['name'] if 'value' in data_input: input_values[input_name] = data_input['value'] if 'file_id' in data_input: input_file = input_files_dict[input_name][0] file_name = input_file.file_name if input_file.local_file_name: file_name = input_file.local_file_name input_values[input_name] = os.path.join(SCALE_JOB_EXE_INPUT_PATH, input_name, file_name) elif 'file_ids' in data_input: input_values[input_name] = os.path.join(SCALE_JOB_EXE_INPUT_PATH, input_name) return input_values def get_injected_env_vars(self, input_files_dict): """Apply all execution time values to job data :param input_files: Mapping of input names to InputFiles :type input_files: {str, :class:`job.execution.configuration.input_file.InputFile`} :return: Mapping of all input keys to their true file / property values :rtype: {str, str} """ env_vars = {} for data_input in self.get_dict()['input_data']: input_name = data_input['name'] if 'value' in data_input: env_vars[normalize_env_var_name(input_name)] = data_input['value'] if 'file_id' in data_input: input_file = input_files_dict[input_name][0] file_name = os.path.basename(input_file.workspace_path) if input_file.local_file_name: file_name = input_file.local_file_name env_vars[normalize_env_var_name(input_name)] = os.path.join(SCALE_JOB_EXE_INPUT_PATH, input_name, file_name) elif 'file_ids' in data_input: env_vars[normalize_env_var_name(input_name)] = os.path.join(SCALE_JOB_EXE_INPUT_PATH, input_name) return env_vars def has_workspaces(self): """Whether this job data contains output wrkspaces :returns: Whether this job data contains output wrkspaces :rtype: bool """ return True def retrieve_input_data_files(self, data_files): """Retrieves the given data input files and writes them to the given local directories. Any given file parameters that do not appear in the data will not be returned in the results. :param data_files: Dict with each file parameter name mapping to a bool indicating if the parameter accepts multiple files (True), an absolute directory path and bool indicating if job supports partial file download (True). :type data_files: {string: tuple(bool, string, bool)} :returns: Dict with each file parameter name mapping to a list of absolute file paths of the written files :rtype: {string: [string]} """ # Organize the data files param_file_ids = {} # Parameter name -> [file IDs] files_to_retrieve = {} # File ID -> tuple(string, bool) for relative dir path and if partially accessed for name in data_files: multiple = data_files[name][0] dir_path = data_files[name][1] partial = data_files[name][2] if name not in self.data_inputs_by_name: continue file_input = self.data_inputs_by_name[name] file_ids = [] # TODO: Remove with legacy job types. This is a protection against multiple being specified for a single file or no file if multiple and 'file_ids' in file_input: for file_id in file_input['file_ids']: file_id = long(file_id) file_ids.append(file_id) files_to_retrieve[file_id] = (dir_path, partial) else: file_id = long(file_input['file_id']) file_ids.append(file_id) files_to_retrieve[file_id] = (dir_path, partial) param_file_ids[name] = file_ids # Retrieve all files retrieved_files = self._retrieve_files(files_to_retrieve) for file_id in retrieved_files: del files_to_retrieve[file_id] if files_to_retrieve: msg = 'Failed to retrieve file with ID %i' % files_to_retrieve.keys()[0] raise Exception(msg) # Organize the results retrieved_params = {} # Parameter name -> [file paths] for name in param_file_ids: file_path_list = [] for file_id in param_file_ids[name]: file_path_list.append(retrieved_files[file_id]) retrieved_params[name] = file_path_list return retrieved_params def save_parse_results(self, parse_results): """Saves the given parse results :param parse_results: Dict with each input file name mapping to a tuple of GeoJSON containing GIS meta-data (optionally None), the start time of the data contained in the file (optionally None), the end time of the data contained in the file (optionally None), the list of data types, and the new workspace path (optionally None) :type parse_results: {string: tuple(string, :class:`datetime.datetime`, :class:`datetime.datetime`, [], string, string)} """ input_file_ids = [] for name in self.data_inputs_by_name: data_input = self.data_inputs_by_name[name] if 'file_ids' in data_input: file_ids = data_input['file_ids'] for file_id in file_ids: input_file_ids.append(file_id) elif 'file_id' in data_input: file_id = data_input['file_id'] input_file_ids.append(file_id) data_file_parse_saver = DATA_FILE_PARSE_SAVER['DATA_FILE_PARSE_SAVER'] if not data_file_parse_saver: raise Exception('No data file parse saver found') data_file_parse_saver.save_parse_results(parse_results, input_file_ids) def setup_job_dir(self, data_files): """Sets up the directory structure for a job execution and downloads the given files :param data_files: Dict with each file parameter name mapping to a bool indicating if the parameter accepts multiple files (True) and an absolute directory path :type data_files: {string: tuple(bool, string)} :returns: Dict with each file parameter name mapping to a list of absolute file paths of the written files :rtype: {string: [string]} """ # Download the job execution input files self.retrieve_input_data_files(data_files) def store_output_data_files(self, data_files, job_exe): """Stores the given data output files :param data_files: Dict with each file parameter name mapping to a list of ProductFileMetadata classes :type data_files: {string: [`ProductFileMetadata`]} :param job_exe: The job execution model (with related job and job_type fields) that is storing the output data files :type job_exe: :class:`job.models.JobExecution` :returns: The job results :rtype: :class:`job.configuration.results.job_results.JobResults` """ # Organize the data files workspace_files = {} # Workspace ID -> [(absolute local file path, media type)] params_by_file_path = {} # Absolute local file path -> output parameter name output_workspaces = JobData.create_output_workspace_dict(data_files.keys(), self, job_exe) for name in data_files: workspace_id = output_workspaces[name] if workspace_id in workspace_files: workspace_file_list = workspace_files[workspace_id] else: workspace_file_list = [] workspace_files[workspace_id] = workspace_file_list data_file_entry = data_files[name] if isinstance(data_file_entry, list): for file_entry in data_file_entry: file_path = os.path.normpath(file_entry.local_path) if not os.path.isfile(file_path): raise Exception('%s is not a valid file' % file_path) params_by_file_path[file_path] = name workspace_file_list.append(file_entry) else: file_path = os.path.normpath(data_file_entry.local_path) if not os.path.isfile(file_path): raise Exception('%s is not a valid file' % file_path) params_by_file_path[file_path] = name data_file_entry.local_path = file_path workspace_file_list.append(data_file_entry) data_file_store = DATA_FILE_STORE['DATA_FILE_STORE'] if not data_file_store: raise Exception('No data file store found') stored_files = data_file_store.store_files(workspace_files, self.get_input_file_ids(), job_exe) # Organize results param_file_ids = {} # Output parameter name -> file ID or [file IDs] for file_path in stored_files: file_id = stored_files[file_path] name = params_by_file_path[file_path] if isinstance(data_files[name], list): if name in param_file_ids: file_id_list = param_file_ids[name] else: file_id_list = [] param_file_ids[name] = file_id_list file_id_list.append(file_id) else: param_file_ids[name] = file_id # Create job results results = JobResults() for name in param_file_ids: param_entry = param_file_ids[name] if isinstance(param_entry, list): results.add_file_list_parameter(name, param_entry) else: results.add_file_parameter(name, param_entry) return results def validate_input_files(self, files): """Validates the given file parameters to make sure they are valid with respect to the job interface. :param files: Dict of file parameter names mapped to a tuple with three items: whether the parameter is required (True), if the parameter is for multiple files (True), and the description of the expected file meta-data :type files: {string: tuple(bool, bool, :class:`job.configuration.interface.scale_file.ScaleFileDescription`)} :returns: A list of warnings discovered during validation. :rtype: [:class:`job.configuration.data.job_data.ValidationWarning`] :raises :class:`job.configuration.data.exceptions.InvalidData`: If there is a configuration problem. """ warnings = [] for name in files: required = files[name][0] multiple = files[name][1] file_desc = files[name][2] if name in self.data_inputs_by_name: # Have this input, make sure it is valid file_input = self.data_inputs_by_name[name] file_ids = [] if multiple: if 'file_ids' not in file_input: if 'file_id' in file_input: file_input['file_ids'] = [file_input['file_id']] else: msg = ('Invalid job data: Data input %s is a list of files and must have a "file_ids" or ' '"file_id" field') raise InvalidData(msg % name) if 'file_id' in file_input: del file_input['file_id'] value = file_input['file_ids'] if not isinstance(value, list): msg = 'Invalid job data: Data input %s must have a list of integers in its "file_ids" field' raise InvalidData(msg % name) for file_id in value: if not isinstance(file_id, Integral): msg = ('Invalid job data: Data input %s must have a list of integers in its "file_ids" ' 'field') raise InvalidData(msg % name) file_ids.append(long(file_id)) else: if 'file_id' not in file_input: msg = 'Invalid job data: Data input %s is a file and must have a "file_id" field' % name raise InvalidData(msg) if 'file_ids' in file_input: del file_input['file_ids'] file_id = file_input['file_id'] if not isinstance(file_id, Integral): msg = 'Invalid job data: Data input %s must have an integer in its "file_id" field' % name raise InvalidData(msg) file_ids.append(long(file_id)) warnings.extend(self._validate_file_ids(file_ids, file_desc)) else: # Don't have this input, check if it is required if required: raise InvalidData('Invalid job data: Data input %s is required and was not provided' % name) # Handle extra inputs in the data that are not defined in the interface for name in list(self.data_inputs_by_name.keys()): data_input = self.data_inputs_by_name[name] if 'file_id' in data_input or 'file_ids' in data_input: if name not in files: warn = ValidationWarning('unknown_input', 'Unknown input %s will be ignored' % name) warnings.append(warn) self._delete_input(name) return warnings def validate_output_files(self, files): """Validates the given file parameters to make sure they are valid with respect to the job interface. :param files: List of file parameter names :type files: [string] :returns: A list of warnings discovered during validation. :rtype: [:class:`job.configuration.data.job_data.ValidationWarning`] :raises :class:`job.configuration.data.exceptions.InvalidData`: If there is a configuration problem. """ warnings = [] workspace_ids = set() for name in files: if name not in self.data_outputs_by_name: raise InvalidData('Invalid job data: Data output %s was not provided' % name) file_output = self.data_outputs_by_name[name] if 'workspace_id' not in file_output: raise InvalidData('Invalid job data: Data output %s must have a "workspace_id" field' % name) workspace_id = file_output['workspace_id'] if not isinstance(workspace_id, Integral): msg = 'Invalid job data: Data output %s must have an integer in its "workspace_id" field' % name raise InvalidData(msg) workspace_ids.add(workspace_id) data_file_store = DATA_FILE_STORE['DATA_FILE_STORE'] if not data_file_store: raise Exception('No data file store found') workspaces = data_file_store.get_workspaces(workspace_ids) for workspace_id in workspaces: active = workspaces[workspace_id] if not active: raise InvalidData('Invalid job data: Workspace for ID %i is not active' % workspace_id) workspace_ids.remove(workspace_id) # Check if there were any workspace IDs that weren't found if workspace_ids: raise InvalidData('Invalid job data: Workspace for ID(s): %s do not exist' % str(workspace_ids)) return warnings def validate_properties(self, property_names): """Validates the given property names to ensure they are all populated correctly and exist if they are required. :param property_names: Dict of property names mapped to a bool indicating if they are required :type property_names: {string: bool} :returns: A list of warnings discovered during validation. :rtype: [:class:`job.configuration.data.job_data.ValidationWarning`] :raises :class:`job.configuration.data.exceptions.InvalidData`: If there is a configuration problem. """ warnings = [] for name in property_names: if name in self.data_inputs_by_name: # Have this input, make sure it is a valid property property_input = self.data_inputs_by_name[name] if 'value' not in property_input: msg = 'Invalid job data: Data input %s is a property and must have a "value" field' % name raise InvalidData(msg) value = property_input['value'] if not isinstance(value, basestring): raise InvalidData('Invalid job data: Data input %s must have a string in its "value" field' % name) else: # Don't have this input, check if it is required if property_names[name]: raise InvalidData('Invalid job data: Data input %s is required and was not provided' % name) # Handle extra inputs in the data that are not defined in the interface for name in list(self.data_inputs_by_name.keys()): data_input = self.data_inputs_by_name[name] if 'value' in data_input: if name not in property_names: warn = ValidationWarning('unknown_input', 'Unknown input %s will be ignored' % name) warnings.append(warn) self._delete_input(name) return warnings def _delete_input(self, name): """Deletes the input with the given name :param name: The name of the input to delete :type name: string """ if name in self.data_inputs_by_name: del self.data_inputs_by_name[name] self.param_names.discard(name) new_input_data = [] for data_input in self.data_dict['input_data']: if data_input['name'] != name: new_input_data.append(data_input) self.data_dict['input_data'] = new_input_data def _retrieve_files(self, data_files): """Retrieves the given data files and writes them to the given local directories. If no file with a given ID exists, it will not be retrieved and returned in the results. :param data_files: Dict with each file ID mapping to an absolute directory path for downloading and bool indicating if job supports partial file download (True). :type data_files: {long: type(string, bool)} :returns: Dict with each file ID mapping to its absolute local path :rtype: {long: string} :raises ArchivedWorkspace: If any of the files has an archived workspace (no longer active) :raises DeletedFile: If any of the files has been deleted """ file_ids = data_files.keys() files = ScaleFile.objects.filter(id__in=file_ids) file_downloads = [] results = {} local_paths = set() # Pay attention to file name collisions and update file name if needed counter = 0 for scale_file in files: partial = data_files[scale_file.id][1] local_path = os.path.join(data_files[scale_file.id][0], scale_file.file_name) while local_path in local_paths: # Path collision, try a different file name counter += 1 new_file_name = '%i_%s' % (counter, scale_file.file_name) local_path = os.path.join(data_files[scale_file.id][0], new_file_name) local_paths.add(local_path) file_downloads.append(FileDownload(scale_file, local_path, partial)) results[scale_file.id] = local_path ScaleFile.objects.download_files(file_downloads) return results def _validate_file_ids(self, file_ids, file_desc): """Validates the files with the given IDs against the given file description. If invalid, a :class:`job.configuration.data.exceptions.InvalidData` will be thrown. :param file_ids: List of file IDs :type file_ids: [long] :param file_desc: The description of the required file meta-data for validation :type file_desc: :class:`job.configuration.interface.scale_file.ScaleFileDescription` :returns: A list of warnings discovered during validation. :rtype: [:class:`job.configuration.data.job_data.ValidationWarning`] :raises :class:`job.configuration.data.exceptions.InvalidData`: If any of the files are missing. """ warnings = [] found_ids = set() for scale_file in ScaleFile.objects.filter(id__in=file_ids): found_ids.add(scale_file.id) media_type = scale_file.media_type if not file_desc.is_media_type_allowed(media_type): warn = ValidationWarning('media_type', 'Invalid media type for file: %i -> %s' % (scale_file.id, media_type)) warnings.append(warn) # Check if there were any file IDs that weren't found in the query for file_id in file_ids: if file_id not in found_ids: raise InvalidData('Invalid job data: Data file for ID %i does not exist' % file_id) return warnings @staticmethod def create_output_workspace_dict(output_params, job_data, job_exe): """Creates the mapping from output to workspace both ways: the old way from job data and the new way from job configuration :param output_params: The list of output parameter names :type output_params: :func:`list` :param job_data: The job data :type job_data: 1.0? 2.0? WHO KNOWZ? :param job_exe: The job execution model (with related job and job_type fields) :type job_exe: :class:`job.models.JobExecution` :return: Dict where output param name maps to workspace ID :rtype: dict """ workspace_dict = {} # {Output name: workspace ID} if job_data.has_workspaces(): # Do the old way of getting output workspaces from job data for name, output_dict in job_data.data_outputs_by_name.items(): workspace_id = output_dict['workspace_id'] workspace_dict[name] = workspace_id config = job_exe.job.get_job_configuration() if config and (config.default_output_workspace or config.output_workspaces): workspace_names_dict = {} # {Output name: workspace name} # Do the new way, grabbing output workspaces from job configuration for name in output_params: if name in config.output_workspaces: workspace_names_dict[name] = config.output_workspaces[name] elif config.default_output_workspace: workspace_names_dict[name] = config.default_output_workspace else: raise Exception('No output workspace configured for output \'%s\'' % name) from storage.models import Workspace workspace_mapping = {w.name: w.id for w in Workspace.objects.filter(name__in=workspace_names_dict.values())} for output_name, workspace_name in workspace_names_dict.items(): if workspace_name not in workspace_mapping: raise Exception('Workspace with name %s does not exist!' % workspace_name) workspace_dict[output_name] = workspace_mapping[workspace_name] return workspace_dict
ngageoint/scale
scale/job/configuration/data/job_data.py
Python
apache-2.0
35,293
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for ops which manipulate lists of tensors.""" # pylint: disable=g-bad-name from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized import numpy as np # pylint: disable=unused-import from tensorflow.python.client import session from tensorflow.python.eager import backprop from tensorflow.python.eager import context from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import gradients_impl from tensorflow.python.ops import list_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import state_ops from tensorflow.python.ops import variable_scope as vs from tensorflow.python.platform import test @test_util.run_all_in_graph_and_eager_modes class ListOpsTest(test_util.TensorFlowTestCase, parameterized.TestCase): def _testPushPop(self, max_num_elements): l = list_ops.empty_tensor_list( element_dtype=dtypes.float32, element_shape=[], max_num_elements=max_num_elements) l = list_ops.tensor_list_push_back(l, constant_op.constant(1.0)) l, e = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32) self.assertAllEqual(self.evaluate(e), 1.0) @parameterized.named_parameters(("NoMaxNumElements", None), ("WithMaxNumElements", 2)) def testPushPop(self, max_num_elements): self._testPushPop(max_num_elements) @parameterized.named_parameters(("NoMaxNumElements", None), ("WithMaxNumElements", 2)) def testPushPopGPU(self, max_num_elements): if not context.num_gpus(): return with context.device("gpu:0"): self._testPushPop(max_num_elements) @test_util.run_deprecated_v1 def testPushInFullListFails(self): l = list_ops.empty_tensor_list( element_dtype=dtypes.float32, element_shape=[], max_num_elements=1) l = list_ops.tensor_list_push_back(l, constant_op.constant(1.0)) with self.assertRaisesRegexp(errors.InvalidArgumentError, "Tried to push item into a full list"): l = list_ops.tensor_list_push_back(l, 2.) self.evaluate(l) @parameterized.named_parameters(("NoMaxNumElements", None), ("WithMaxNumElements", 2)) @test_util.run_deprecated_v1 def testPopFromEmptyTensorListFails(self, max_num_elements): l = list_ops.empty_tensor_list( element_dtype=dtypes.float32, element_shape=[], max_num_elements=max_num_elements) with self.assertRaisesRegexp(errors.InvalidArgumentError, "Trying to pop from an empty list"): l = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32) self.evaluate(l) def _testStack(self, max_num_elements): l = list_ops.empty_tensor_list( element_dtype=dtypes.float32, element_shape=[], max_num_elements=max_num_elements) l = list_ops.tensor_list_push_back(l, constant_op.constant(1.0)) l = list_ops.tensor_list_push_back(l, constant_op.constant(2.0)) t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32) if not context.executing_eagerly(): self.assertAllEqual(t.shape.as_list(), [None]) self.assertAllEqual(self.evaluate(t), [1.0, 2.0]) @parameterized.named_parameters(("NoMaxNumElements", None), ("WithMaxNumElements", 2)) def testStack(self, max_num_elements): self._testStack(max_num_elements) @parameterized.named_parameters(("NoMaxNumElements", None), ("WithMaxNumElements", 2)) def testStackGPU(self, max_num_elements): if not context.num_gpus(): return with context.device("gpu:0"): self._testStack(max_num_elements) @parameterized.named_parameters(("NoMaxNumElements", None), ("WithMaxNumElements", 3)) @test_util.run_deprecated_v1 def testStackWithUnknownElementShape(self, max_num_elements): l = list_ops.empty_tensor_list( element_dtype=dtypes.float32, element_shape=None, max_num_elements=max_num_elements) l = list_ops.tensor_list_push_back(l, constant_op.constant(1.0)) l = list_ops.tensor_list_push_back(l, constant_op.constant(2.0)) t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32) self.assertAllEqual(self.evaluate(t), [1.0, 2.0]) # Should raise an error when the element tensors do not all have the same # shape. with self.assertRaisesRegexp(errors.InvalidArgumentError, "unequal shapes"): l = list_ops.tensor_list_push_back(l, constant_op.constant([3.0, 4.0])) t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32) self.evaluate(t) @parameterized.named_parameters(("NoMaxNumElements", None), ("WithMaxNumElements", 3)) @test_util.run_deprecated_v1 def testStackWithPartiallyDefinedElementShape(self, max_num_elements): l = list_ops.empty_tensor_list( element_dtype=dtypes.float32, element_shape=[None], max_num_elements=max_num_elements) l = list_ops.tensor_list_push_back(l, constant_op.constant([1.0])) l = list_ops.tensor_list_push_back(l, constant_op.constant([2.0])) t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32) self.assertAllEqual(self.evaluate(t), [[1.0], [2.0]]) # Should raise an error when the element tensors do not all have the same # shape. with self.assertRaisesRegexp(errors.InvalidArgumentError, "unequal shapes"): l = list_ops.tensor_list_push_back(l, constant_op.constant([2.0, 3.0])) t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32) self.evaluate(t) @parameterized.named_parameters(("NoMaxNumElements", None), ("WithMaxNumElements", 2)) @test_util.run_deprecated_v1 def testStackEmptyList(self, max_num_elements): # Should be able to stack empty lists with fully defined element_shape. l = list_ops.empty_tensor_list( element_dtype=dtypes.float32, element_shape=[1, 2], max_num_elements=max_num_elements) t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32) self.assertAllEqual(self.evaluate(t).shape, (0, 1, 2)) # Should not be able to stack empty lists with partially defined # element_shape. with self.assertRaisesRegexp(errors.InvalidArgumentError, "non-fully-defined"): l = list_ops.empty_tensor_list( element_dtype=dtypes.float32, element_shape=[None, 2], max_num_elements=max_num_elements) t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32) self.evaluate(t) # Should not be able to stack empty lists with undefined element_shape. with self.assertRaisesRegexp(errors.InvalidArgumentError, "non-fully-defined"): l = list_ops.empty_tensor_list( element_dtype=dtypes.float32, element_shape=None, max_num_elements=max_num_elements) t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32) self.evaluate(t) @parameterized.named_parameters(("NoMaxNumElements", None), ("WithMaxNumElements", 2)) def testGatherGrad(self, max_num_elements): with backprop.GradientTape() as tape: l = list_ops.empty_tensor_list( element_dtype=dtypes.float32, element_shape=[], max_num_elements=max_num_elements) c0 = constant_op.constant(1.0) tape.watch(c0) l = list_ops.tensor_list_push_back(l, c0) l = list_ops.tensor_list_push_back(l, constant_op.constant(2.0)) t = list_ops.tensor_list_gather(l, [1, 0], element_dtype=dtypes.float32) self.assertAllEqual(self.evaluate(t), [2.0, 1.0]) s = (t[0] + t[1]) * (t[0] + t[1]) dt = tape.gradient(s, c0) self.assertAllEqual(self.evaluate(dt), 6.0) @parameterized.named_parameters(("NoMaxNumElements", None), ("WithMaxNumElements", 3)) @test_util.run_deprecated_v1 def testGatherWithUnknownElementShape(self, max_num_elements): l = list_ops.empty_tensor_list( element_dtype=dtypes.float32, element_shape=None, max_num_elements=max_num_elements) l = list_ops.tensor_list_push_back(l, constant_op.constant(1.0)) l = list_ops.tensor_list_push_back(l, constant_op.constant(2.0)) l = list_ops.tensor_list_push_back(l, constant_op.constant([3.0, 4.0])) t = list_ops.tensor_list_gather(l, [1, 0], element_dtype=dtypes.float32) self.assertAllEqual(self.evaluate(t), [2.0, 1.0]) t = list_ops.tensor_list_gather(l, [2], element_dtype=dtypes.float32) self.assertAllEqual(self.evaluate(t), [[3.0, 4.0]]) # Should raise an error when the requested tensors do not all have the same # shape. with self.assertRaisesRegexp(errors.InvalidArgumentError, "unequal shapes"): t = list_ops.tensor_list_gather(l, [0, 2], element_dtype=dtypes.float32) self.evaluate(t) @parameterized.named_parameters(("NoMaxNumElements", None), ("WithMaxNumElements", 3)) @test_util.run_deprecated_v1 def testGatherWithPartiallyDefinedElementShape(self, max_num_elements): l = list_ops.empty_tensor_list( element_dtype=dtypes.float32, element_shape=[None], max_num_elements=max_num_elements) l = list_ops.tensor_list_push_back(l, constant_op.constant([1.0])) l = list_ops.tensor_list_push_back(l, constant_op.constant([2.0, 3.0])) l = list_ops.tensor_list_push_back(l, constant_op.constant([4.0, 5.0])) t = list_ops.tensor_list_gather(l, [0], element_dtype=dtypes.float32) self.assertAllEqual(self.evaluate(t), [[1.0]]) t = list_ops.tensor_list_gather(l, [1, 2], element_dtype=dtypes.float32) self.assertAllEqual(self.evaluate(t), [[2.0, 3.0], [4.0, 5.0]]) # Should raise an error when the requested tensors do not all have the same # shape. with self.assertRaisesRegexp(errors.InvalidArgumentError, "unequal shapes"): t = list_ops.tensor_list_gather(l, [0, 2], element_dtype=dtypes.float32) self.evaluate(t) @parameterized.named_parameters(("NoMaxNumElements", None), ("WithMaxNumElements", 3)) @test_util.run_deprecated_v1 def testGatherEmptyList(self, max_num_elements): # Should be able to gather from empty lists with fully defined # element_shape. l = list_ops.empty_tensor_list( element_dtype=dtypes.float32, element_shape=[1, 2], max_num_elements=max_num_elements) t = list_ops.tensor_list_gather(l, [], element_dtype=dtypes.float32) self.assertAllEqual((0, 1, 2), self.evaluate(t).shape) # Should not be able to gather from empty lists with partially defined # element_shape. with self.assertRaisesRegexp(errors.InvalidArgumentError, "non-fully-defined"): l = list_ops.empty_tensor_list( element_dtype=dtypes.float32, element_shape=[None, 2], max_num_elements=max_num_elements) t = list_ops.tensor_list_gather(l, [], element_dtype=dtypes.float32) self.evaluate(t) # Should not be able to gather from empty lists with undefined # element_shape. with self.assertRaisesRegexp(errors.InvalidArgumentError, "non-fully-defined"): l = list_ops.empty_tensor_list( element_dtype=dtypes.float32, element_shape=None, max_num_elements=max_num_elements) t = list_ops.tensor_list_gather(l, [], element_dtype=dtypes.float32) self.evaluate(t) def testScatterGrad(self): with backprop.GradientTape() as tape: c0 = constant_op.constant([1.0, 2.0]) tape.watch(c0) l = list_ops.tensor_list_scatter( c0, [1, 0], ops.convert_to_tensor([], dtype=dtypes.int32)) t0 = list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32) t1 = list_ops.tensor_list_get_item(l, 1, element_dtype=dtypes.float32) self.assertAllEqual(self.evaluate(t0), 2.0) self.assertAllEqual(self.evaluate(t1), 1.0) loss = t0 * t0 + t1 * t1 dt = tape.gradient(loss, c0) self.assertAllEqual(self.evaluate(dt), [2., 4.]) def testTensorListFromTensor(self): t = constant_op.constant([1.0, 2.0]) l = list_ops.tensor_list_from_tensor(t, element_shape=[]) l, e = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32) self.assertAllEqual(self.evaluate(e), 2.0) l, e = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32) self.assertAllEqual(self.evaluate(e), 1.0) self.assertAllEqual(self.evaluate(list_ops.tensor_list_length(l)), 0) def testFromTensorGPU(self): if not context.num_gpus(): return with context.device("gpu:0"): self.testTensorListFromTensor() def testGetSetItem(self): t = constant_op.constant([1.0, 2.0]) l = list_ops.tensor_list_from_tensor(t, element_shape=[]) e0 = list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32) self.assertAllEqual(self.evaluate(e0), 1.0) l = list_ops.tensor_list_set_item(l, 0, 3.0) t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32) self.assertAllEqual(self.evaluate(t), [3.0, 2.0]) def testGetSetGPU(self): if not context.num_gpus(): return with context.device("gpu:0"): self.testGetSetItem() def testSetGetGrad(self): with backprop.GradientTape() as tape: t = constant_op.constant(5.) tape.watch(t) l = list_ops.tensor_list_reserve( element_dtype=dtypes.float32, element_shape=[], num_elements=3) l = list_ops.tensor_list_set_item(l, 1, 2. * t) e = list_ops.tensor_list_get_item(l, 1, element_dtype=dtypes.float32) self.assertAllEqual(self.evaluate(e), 10.0) self.assertAllEqual(self.evaluate(tape.gradient(e, t)), 2.0) @test_util.run_deprecated_v1 @test_util.enable_control_flow_v2 def testSkipEagerSetItemIndexOutOfBounds(self): l = list_ops.empty_tensor_list( element_dtype=dtypes.float32, element_shape=[]) e0 = constant_op.constant(5.) l = list_ops.tensor_list_set_item( l, 0, 2. * e0, resize_if_index_out_of_bounds=True) l = list_ops.tensor_list_set_item( l, 1, 1., resize_if_index_out_of_bounds=True) t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32) grad = gradients_impl.gradients(t, e0)[0] self.assertAllEqual(self.evaluate(grad), 2.) @test_util.run_deprecated_v1 def testSetOnEmptyListWithMaxNumElementsFails(self): l = list_ops.empty_tensor_list( element_dtype=dtypes.float32, element_shape=[], max_num_elements=3) with self.assertRaisesRegexp( errors.InvalidArgumentError, "Trying to modify element 0 in a list with 0 elements."): l = list_ops.tensor_list_set_item(l, 0, 1.) self.evaluate(l) def testUnknownShape(self): l = list_ops.empty_tensor_list( element_dtype=dtypes.float32, element_shape=None) l = list_ops.tensor_list_push_back(l, constant_op.constant(1.0)) l = list_ops.tensor_list_push_back(l, constant_op.constant([1.0, 2.0])) l, e = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32) self.assertAllEqual(self.evaluate(e), [1.0, 2.0]) l, e = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32) self.assertAllEqual(self.evaluate(e), 1.0) def testCPUGPUCopy(self): if not context.num_gpus(): return t = constant_op.constant([1.0, 2.0]) l = list_ops.tensor_list_from_tensor(t, element_shape=[]) with context.device("gpu:0"): l_gpu = array_ops.identity(l) self.assertAllEqual( self.evaluate( list_ops.tensor_list_pop_back( l_gpu, element_dtype=dtypes.float32)[1]), 2.0) l_cpu = array_ops.identity(l_gpu) self.assertAllEqual( self.evaluate( list_ops.tensor_list_pop_back( l_cpu, element_dtype=dtypes.float32)[1]), 2.0) def testCPUGPUCopyNested(self): if not context.num_gpus(): return t = constant_op.constant([1.0, 2.0]) child_l = list_ops.tensor_list_from_tensor(t, element_shape=[]) l = list_ops.empty_tensor_list( element_shape=constant_op.constant([], dtype=dtypes.int32), element_dtype=dtypes.variant) l = list_ops.tensor_list_push_back(l, child_l) with context.device("gpu:0"): l_gpu = array_ops.identity(l) _, child_l_gpu = list_ops.tensor_list_pop_back( l_gpu, element_dtype=dtypes.variant) self.assertAllEqual( self.evaluate( list_ops.tensor_list_pop_back( child_l_gpu, element_dtype=dtypes.float32)[1]), 2.0) l_cpu = array_ops.identity(l_gpu) _, child_l_cpu = list_ops.tensor_list_pop_back( l_cpu, element_dtype=dtypes.variant) self.assertAllEqual( self.evaluate( list_ops.tensor_list_pop_back( child_l_cpu, element_dtype=dtypes.float32)[1]), 2.0) def testGraphStack(self): with self.cached_session(): tl = list_ops.empty_tensor_list( element_shape=constant_op.constant([1], dtype=dtypes.int32), element_dtype=dtypes.int32) tl = list_ops.tensor_list_push_back(tl, [1]) self.assertAllEqual( self.evaluate( list_ops.tensor_list_stack(tl, element_dtype=dtypes.int32)), [[1]]) def testSkipEagerStackInLoop(self): with self.cached_session(): t1 = list_ops.empty_tensor_list( element_shape=constant_op.constant([], dtype=dtypes.int32), element_dtype=dtypes.int32) i = constant_op.constant(0, dtype=dtypes.int32) def body(i, t1): t1 = list_ops.tensor_list_push_back(t1, i) i += 1 return i, t1 i, t1 = control_flow_ops.while_loop(lambda i, t1: math_ops.less(i, 4), body, [i, t1]) s1 = list_ops.tensor_list_stack(t1, element_dtype=dtypes.int32) self.assertAllEqual(self.evaluate(s1), [0, 1, 2, 3]) def testSkipEagerStackSwitchDtype(self): with self.cached_session(): list_ = list_ops.empty_tensor_list( element_shape=constant_op.constant([], dtype=dtypes.int32), element_dtype=dtypes.int32) m = constant_op.constant([1, 2, 3], dtype=dtypes.float32) def body(list_, m): list_ = control_flow_ops.cond( math_ops.equal(list_ops.tensor_list_length(list_), 0), lambda: list_ops.empty_tensor_list(m.shape, m.dtype), lambda: list_) list_ = list_ops.tensor_list_push_back(list_, m) return list_, m for _ in range(2): list_, m = body(list_, m) s1 = list_ops.tensor_list_stack(list_, element_dtype=dtypes.float32) np_s1 = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.float32) self.assertAllEqual(self.evaluate(s1), np_s1) def testSkipEagerStackInLoopSwitchDtype(self): with self.cached_session(): t1 = list_ops.empty_tensor_list( element_shape=constant_op.constant([], dtype=dtypes.int32), element_dtype=dtypes.int32) i = constant_op.constant(0, dtype=dtypes.float32) m = constant_op.constant([1, 2, 3], dtype=dtypes.float32) def body(i, m, t1): t1 = control_flow_ops.cond( math_ops.equal(list_ops.tensor_list_length(t1), 0), lambda: list_ops.empty_tensor_list(m.shape, m.dtype), lambda: t1) t1 = list_ops.tensor_list_push_back(t1, m * i) i += 1.0 return i, m, t1 i, m, t1 = control_flow_ops.while_loop( lambda i, m, t1: math_ops.less(i, 4), body, [i, m, t1]) s1 = list_ops.tensor_list_stack(t1, element_dtype=dtypes.float32) np_s1 = np.vstack([np.arange(1, 4) * i for i in range(4)]) self.assertAllEqual(self.evaluate(s1), np_s1) def testSerialize(self): worker = test_util.create_local_cluster(num_workers=1, num_ps=1)[0][0] with ops.Graph().as_default(), session.Session(target=worker.target): with ops.device("/job:worker"): t = constant_op.constant([[1.0], [2.0]]) l = list_ops.tensor_list_from_tensor(t, element_shape=[1]) with ops.device("/job:ps"): l_ps = array_ops.identity(l) l_ps, e = list_ops.tensor_list_pop_back( l_ps, element_dtype=dtypes.float32) with ops.device("/job:worker"): worker_e = array_ops.identity(e) self.assertAllEqual(self.evaluate(worker_e), [2.0]) def testSerializeListWithInvalidTensors(self): worker = test_util.create_local_cluster(num_workers=1, num_ps=1)[0][0] with ops.Graph().as_default(), session.Session(target=worker.target): with ops.device("/job:worker"): l = list_ops.tensor_list_reserve( element_dtype=dtypes.float32, element_shape=[], num_elements=2) l = list_ops.tensor_list_set_item(l, 0, 1.) with ops.device("/job:ps"): l_ps = array_ops.identity(l) l_ps = list_ops.tensor_list_set_item(l_ps, 1, 2.) t = list_ops.tensor_list_stack(l_ps, element_dtype=dtypes.float32) with ops.device("/job:worker"): worker_t = array_ops.identity(t) self.assertAllEqual(self.evaluate(worker_t), [1.0, 2.0]) def testSerializeListWithUnknownRank(self): worker = test_util.create_local_cluster(num_workers=1, num_ps=1)[0][0] with ops.Graph().as_default(), session.Session(target=worker.target): with ops.device("/job:worker"): t = constant_op.constant([[1.0], [2.0]]) l = list_ops.tensor_list_from_tensor(t, element_shape=None) with ops.device("/job:ps"): l_ps = array_ops.identity(l) element_shape = list_ops.tensor_list_element_shape( l_ps, shape_type=dtypes.int32) with ops.device("/job:worker"): element_shape = array_ops.identity(element_shape) self.assertEqual(self.evaluate(element_shape), -1) def testSerializeListWithMaxNumElements(self): if context.num_gpus(): # TODO(b/119151861): Enable on GPU. return worker = test_util.create_local_cluster(num_workers=1, num_ps=1)[0][0] with ops.Graph().as_default(), session.Session(target=worker.target): with ops.device("/job:worker"): l = list_ops.empty_tensor_list( element_shape=None, element_dtype=dtypes.float32, max_num_elements=2) l = list_ops.tensor_list_push_back(l, 1.) with ops.device("/job:ps"): l_ps = array_ops.identity(l) l_ps = list_ops.tensor_list_push_back(l_ps, 2.) with self.assertRaisesRegexp(errors.InvalidArgumentError, "Tried to push item into a full list"): with ops.device("/job:worker"): l_worker = array_ops.identity(l_ps) l_worker = list_ops.tensor_list_push_back(l_worker, 3.0) self.evaluate(l_worker) def testPushPopGradients(self): with backprop.GradientTape() as tape: l = list_ops.empty_tensor_list( element_dtype=dtypes.float32, element_shape=[]) c = constant_op.constant(1.0) tape.watch(c) l = list_ops.tensor_list_push_back(l, c) l, e = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32) e = 2 * e self.assertAllEqual(self.evaluate(tape.gradient(e, [c])[0]), 2.0) def testStackFromTensorGradients(self): with backprop.GradientTape() as tape: c = constant_op.constant([1.0, 2.0]) tape.watch(c) l = list_ops.tensor_list_from_tensor(c, element_shape=[]) c2 = list_ops.tensor_list_stack( l, element_dtype=dtypes.float32, num_elements=2) result = c2 * 2.0 grad = tape.gradient(result, [c])[0] self.assertAllEqual(self.evaluate(grad), [2.0, 2.0]) def testGetSetGradients(self): with backprop.GradientTape() as tape: c = constant_op.constant([1.0, 2.0]) tape.watch(c) l = list_ops.tensor_list_from_tensor(c, element_shape=[]) c2 = constant_op.constant(3.0) tape.watch(c2) l = list_ops.tensor_list_set_item(l, 0, c2) e = list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32) ee = list_ops.tensor_list_get_item(l, 1, element_dtype=dtypes.float32) y = e * e + ee * ee grad_c, grad_c2 = tape.gradient(y, [c, c2]) self.assertAllEqual(self.evaluate(grad_c), [0.0, 4.0]) self.assertAllEqual(self.evaluate(grad_c2), 6.0) @test_util.run_deprecated_v1 def testSetOutOfBounds(self): c = constant_op.constant([1.0, 2.0]) l = list_ops.tensor_list_from_tensor(c, element_shape=[]) with self.assertRaises(errors.InvalidArgumentError): self.evaluate(list_ops.tensor_list_set_item(l, 20, 3.0)) @test_util.run_deprecated_v1 def testSkipEagerSetItemWithMismatchedShapeFails(self): with self.cached_session() as sess: ph = array_ops.placeholder(dtypes.float32) c = constant_op.constant([1.0, 2.0]) l = list_ops.tensor_list_from_tensor(c, element_shape=[]) # Set a placeholder with unknown shape to satisfy the shape inference # at graph building time. l = list_ops.tensor_list_set_item(l, 0, ph) l_0 = list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32) with self.assertRaisesRegexp(errors.InvalidArgumentError, "incompatible shape"): sess.run(l_0, {ph: [3.0]}) def testResourceVariableScatterGather(self): c = constant_op.constant([1.0, 2.0], dtype=dtypes.float32) l = list_ops.tensor_list_from_tensor(c, element_shape=[]) v = vs.get_variable("var", initializer=[l] * 10, use_resource=True) v_r_0_stacked = list_ops.tensor_list_stack(v[0], dtypes.float32) self.evaluate(v.initializer) self.assertAllEqual([1.0, 2.0], self.evaluate(v_r_0_stacked)) v_r_sparse_stacked = list_ops.tensor_list_stack( v.sparse_read(0), dtypes.float32) self.assertAllEqual([1.0, 2.0], self.evaluate(v_r_sparse_stacked)) l_new_0 = list_ops.tensor_list_from_tensor([3.0, 4.0], element_shape=[]) l_new_1 = list_ops.tensor_list_from_tensor([5.0, 6.0], element_shape=[]) updated_v = state_ops.scatter_update(v, [3, 5], [l_new_0, l_new_1]) updated_v_elems = array_ops.unstack(updated_v) updated_v_stacked = [ list_ops.tensor_list_stack(el, dtypes.float32) for el in updated_v_elems ] expected = ([[1.0, 2.0]] * 3 + [[3.0, 4.0], [1.0, 2.0], [5.0, 6.0]] + [[1.0, 2.0]] * 4) self.assertAllEqual(self.evaluate(updated_v_stacked), expected) @test_util.run_deprecated_v1 def testConcat(self): c = constant_op.constant([1.0, 2.0], dtype=dtypes.float32) l0 = list_ops.tensor_list_from_tensor(c, element_shape=[]) l1 = list_ops.tensor_list_from_tensor([-1.0], element_shape=[]) l_batch_0 = array_ops.stack([l0, l1]) l_batch_1 = array_ops.stack([l1, l0]) l_concat_01 = list_ops.tensor_list_concat_lists( l_batch_0, l_batch_1, element_dtype=dtypes.float32) l_concat_10 = list_ops.tensor_list_concat_lists( l_batch_1, l_batch_0, element_dtype=dtypes.float32) l_concat_00 = list_ops.tensor_list_concat_lists( l_batch_0, l_batch_0, element_dtype=dtypes.float32) l_concat_11 = list_ops.tensor_list_concat_lists( l_batch_1, l_batch_1, element_dtype=dtypes.float32) expected_00 = [[1.0, 2.0, 1.0, 2.0], [-1.0, -1.0]] expected_01 = [[1.0, 2.0, -1.0], [-1.0, 1.0, 2.0]] expected_10 = [[-1.0, 1.0, 2.0], [1.0, 2.0, -1.0]] expected_11 = [[-1.0, -1.0], [1.0, 2.0, 1.0, 2.0]] for i, (concat, expected) in enumerate(zip( [l_concat_00, l_concat_01, l_concat_10, l_concat_11], [expected_00, expected_01, expected_10, expected_11])): splitted = array_ops.unstack(concat) splitted_stacked_ret = self.evaluate( (list_ops.tensor_list_stack(splitted[0], dtypes.float32), list_ops.tensor_list_stack(splitted[1], dtypes.float32))) print("Test concat %d: %s, %s, %s, %s" % (i, expected[0], splitted_stacked_ret[0], expected[1], splitted_stacked_ret[1])) self.assertAllClose(expected[0], splitted_stacked_ret[0]) self.assertAllClose(expected[1], splitted_stacked_ret[1]) # Concatenating mismatched shapes fails. with self.assertRaises((errors.InvalidArgumentError, ValueError)): self.evaluate( list_ops.tensor_list_concat_lists( l_batch_0, list_ops.empty_tensor_list([], dtypes.float32), element_dtype=dtypes.float32)) with self.assertRaisesRegexp(errors.InvalidArgumentError, "element shapes are not identical at index 0"): l_batch_of_vec_tls = array_ops.stack( [list_ops.tensor_list_from_tensor([[1.0]], element_shape=[1])] * 2) self.evaluate( list_ops.tensor_list_concat_lists(l_batch_0, l_batch_of_vec_tls, element_dtype=dtypes.float32)) with self.assertRaisesRegexp(errors.InvalidArgumentError, r"input_b\[0\].dtype != element_dtype."): l_batch_of_int_tls = array_ops.stack( [list_ops.tensor_list_from_tensor([1], element_shape=[])] * 2) self.evaluate( list_ops.tensor_list_concat_lists(l_batch_0, l_batch_of_int_tls, element_dtype=dtypes.float32)) @test_util.run_deprecated_v1 def testPushBackBatch(self): c = constant_op.constant([1.0, 2.0], dtype=dtypes.float32) l0 = list_ops.tensor_list_from_tensor(c, element_shape=[]) l1 = list_ops.tensor_list_from_tensor([-1.0], element_shape=[]) l_batch = array_ops.stack([l0, l1]) l_push = list_ops.tensor_list_push_back_batch(l_batch, [3.0, 4.0]) l_unstack = array_ops.unstack(l_push) l0_ret = list_ops.tensor_list_stack(l_unstack[0], dtypes.float32) l1_ret = list_ops.tensor_list_stack(l_unstack[1], dtypes.float32) self.assertAllClose([1.0, 2.0, 3.0], self.evaluate(l0_ret)) self.assertAllClose([-1.0, 4.0], self.evaluate(l1_ret)) with ops.control_dependencies([l_push]): l_unstack_orig = array_ops.unstack(l_batch) l0_orig_ret = list_ops.tensor_list_stack(l_unstack_orig[0], dtypes.float32) l1_orig_ret = list_ops.tensor_list_stack(l_unstack_orig[1], dtypes.float32) # Check that without aliasing, push_back_batch still works; and # that it doesn't modify the input. l0_r_v, l1_r_v, l0_orig_v, l1_orig_v = self.evaluate( (l0_ret, l1_ret, l0_orig_ret, l1_orig_ret)) self.assertAllClose([1.0, 2.0, 3.0], l0_r_v) self.assertAllClose([-1.0, 4.0], l1_r_v) self.assertAllClose([1.0, 2.0], l0_orig_v) self.assertAllClose([-1.0], l1_orig_v) # Pushing back mismatched shapes fails. with self.assertRaises((errors.InvalidArgumentError, ValueError)): self.evaluate(list_ops.tensor_list_push_back_batch(l_batch, [])) with self.assertRaisesRegexp(errors.InvalidArgumentError, "incompatible shape to a list at index 0"): self.evaluate( list_ops.tensor_list_push_back_batch(l_batch, [[3.0], [4.0]])) with self.assertRaisesRegexp(errors.InvalidArgumentError, "Invalid data type at index 0"): self.evaluate(list_ops.tensor_list_push_back_batch(l_batch, [3, 4])) def testZerosLike(self): for dtype in (dtypes.uint8, dtypes.uint16, dtypes.int8, dtypes.int16, dtypes.int32, dtypes.int64, dtypes.float16, dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128, dtypes.bool): l_empty = list_ops.empty_tensor_list( element_dtype=dtype, element_shape=[]) l_empty_zeros = array_ops.zeros_like(l_empty) t_empty_zeros = list_ops.tensor_list_stack( l_empty_zeros, element_dtype=dtype) l_full = list_ops.tensor_list_push_back(l_empty, math_ops.cast(0, dtype=dtype)) l_full = list_ops.tensor_list_push_back(l_full, math_ops.cast(1, dtype=dtype)) l_full_zeros = array_ops.zeros_like(l_full) t_full_zeros = list_ops.tensor_list_stack( l_full_zeros, element_dtype=dtype) self.assertAllEqual(self.evaluate(t_empty_zeros), []) self.assertAllEqual( self.evaluate(t_full_zeros), np.zeros( (2,), dtype=dtype.as_numpy_dtype)) def testZerosLikeNested(self): for dtype in (dtypes.uint8, dtypes.uint16, dtypes.int8, dtypes.int16, dtypes.int32, dtypes.int64, dtypes.float16, dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128, dtypes.bool): l = list_ops.empty_tensor_list( element_dtype=dtypes.variant, element_shape=[]) sub_l = list_ops.empty_tensor_list(element_dtype=dtype, element_shape=[]) l = list_ops.tensor_list_push_back(l, sub_l) sub_l = list_ops.tensor_list_push_back(sub_l, math_ops.cast( 1, dtype=dtype)) l = list_ops.tensor_list_push_back(l, sub_l) sub_l = list_ops.tensor_list_push_back(sub_l, math_ops.cast( 2, dtype=dtype)) l = list_ops.tensor_list_push_back(l, sub_l) # l : [[], # [1], # [1, 2]] # # l_zeros : [[], # [0], # [0, 0]] l_zeros = array_ops.zeros_like(l) outputs = [] for _ in range(3): l_zeros, out = list_ops.tensor_list_pop_back( l_zeros, element_dtype=dtypes.variant) outputs.append(list_ops.tensor_list_stack(out, element_dtype=dtype)) # Note: `outputs` contains popped values so the order is reversed. self.assertAllEqual(self.evaluate(outputs[2]), []) self.assertAllEqual( self.evaluate(outputs[1]), np.zeros((1,), dtype=dtype.as_numpy_dtype)) self.assertAllEqual( self.evaluate(outputs[0]), np.zeros((2,), dtype=dtype.as_numpy_dtype)) def testElementShape(self): l = list_ops.empty_tensor_list( element_dtype=dtypes.float32, element_shape=None) shape = list_ops.tensor_list_element_shape(l, shape_type=dtypes.int32) self.assertEqual(self.evaluate(shape), -1) def testZerosLikeUninitialized(self): l0 = list_ops.tensor_list_reserve([], 3, element_dtype=dtypes.float32) l1 = list_ops.tensor_list_set_item(l0, 0, 1.) # [1., _, _] zeros_1 = array_ops.zeros_like(l1) # [0., _, _] l2 = list_ops.tensor_list_set_item(l1, 2, 2.) # [1., _, 2.] zeros_2 = array_ops.zeros_like(l2) # [0., _, 0.] # Gather indices with zeros in `zeros_1`. res_1 = list_ops.tensor_list_gather( zeros_1, [0], element_dtype=dtypes.float32) # Gather indices with zeros in `zeros_2`. res_2 = list_ops.tensor_list_gather( zeros_2, [0, 2], element_dtype=dtypes.float32) self.assertAllEqual(self.evaluate(res_1), [0.]) self.assertAllEqual(self.evaluate(res_2), [0., 0.]) @test_util.run_deprecated_v1 def testSkipEagerTensorListGetItemGradAggregation(self): l = list_ops.tensor_list_reserve( element_shape=[], num_elements=1, element_dtype=dtypes.float32) x = constant_op.constant(1.0) l = list_ops.tensor_list_set_item(l, 0, x) l_read1 = list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32) l_read2 = list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32) grad = gradients_impl.gradients([l_read1, l_read2], [x]) with self.cached_session() as sess: self.assertSequenceEqual(self.evaluate(grad), [2.]) @test_util.run_deprecated_v1 def testSkipEagerBuildElementShape(self): fn = list_ops._build_element_shape # Unknown shape -> -1. self.assertEqual(fn(None), -1) self.assertEqual(fn(tensor_shape.unknown_shape()), -1) # Scalar shape -> [] with type int32. self.assertEqual(fn([]).dtype, dtypes.int32) self.assertEqual(fn(tensor_shape.scalar()).dtype, dtypes.int32) self.assertAllEqual(self.evaluate(fn([])), np.array([], np.int32)) self.assertAllEqual( self.evaluate(fn(tensor_shape.scalar())), np.array([], np.int32)) # Tensor -> Tensor shape = constant_op.constant(1) self.assertIs(fn(shape), shape) # Shape with unknown dims -> shape list with -1's. shape = [None, 5] self.assertAllEqual(fn(shape), [-1, 5]) self.assertAllEqual(fn(tensor_shape.TensorShape(shape)), [-1, 5]) # Shape with unknown dims and tensor dims -> shape list with -1's and tensor # dims. t = array_ops.placeholder(dtypes.int32) shape = [None, 5, t] result = fn(shape) self.assertAllEqual(result[:2], [-1, 5]) self.assertIs(result[2], t) def testAddN(self): l1 = list_ops.tensor_list_from_tensor([1.0, 2.0], element_shape=[]) l2 = list_ops.tensor_list_from_tensor([3.0, 4.0], element_shape=[]) l3 = list_ops.tensor_list_from_tensor([5.0, 6.0], element_shape=[]) result = math_ops.add_n((l1, l2, l3)) result_t = list_ops.tensor_list_stack(result, element_dtype=dtypes.float32) self.assertAllEqual(self.evaluate(result_t), [9., 12.]) def testAddNNestedList(self): l1 = list_ops.tensor_list_from_tensor([1.0, 2.0], element_shape=[]) l2 = list_ops.tensor_list_from_tensor([3.0, 4.0], element_shape=[]) l3 = list_ops.tensor_list_from_tensor([5.0, 6.0], element_shape=[]) l4 = list_ops.tensor_list_from_tensor([7.0, 8.0], element_shape=[]) a = list_ops.empty_tensor_list( element_dtype=dtypes.variant, element_shape=[]) a = list_ops.tensor_list_push_back(a, l1) a = list_ops.tensor_list_push_back(a, l2) b = list_ops.empty_tensor_list( element_dtype=dtypes.variant, element_shape=[]) b = list_ops.tensor_list_push_back(b, l3) b = list_ops.tensor_list_push_back(b, l4) result = math_ops.add_n((a, b)) result_0 = list_ops.tensor_list_stack( list_ops.tensor_list_get_item(result, 0, element_dtype=dtypes.variant), element_dtype=dtypes.float32) result_1 = list_ops.tensor_list_stack( list_ops.tensor_list_get_item(result, 1, element_dtype=dtypes.variant), element_dtype=dtypes.float32) self.assertAllEqual(self.evaluate(result_0), [6., 8.]) self.assertAllEqual(self.evaluate(result_1), [10., 12.]) @test_util.run_deprecated_v1 def testSkipEagerConcatShapeInference(self): def BuildTensor(element_shape): l = list_ops.empty_tensor_list( element_dtype=dtypes.float32, element_shape=element_shape) return list_ops.tensor_list_concat(l, element_dtype=dtypes.float32) self.assertIsNone(BuildTensor(None).shape.rank) self.assertAllEqual(BuildTensor([None, 2, 3]).shape.as_list(), [None, 2, 3]) self.assertAllEqual( BuildTensor([None, 2, None]).shape.as_list(), [None, 2, None]) self.assertAllEqual(BuildTensor([1, 2, 3]).shape.as_list(), [None, 2, 3]) def testConcatWithFullyDefinedElementShape(self): l = list_ops.empty_tensor_list( element_dtype=dtypes.float32, element_shape=[2, 2]) l = list_ops.tensor_list_push_back(l, [[0., 1.], [2., 3.]]) l = list_ops.tensor_list_push_back(l, [[4., 5.], [6., 7.]]) t = list_ops.tensor_list_concat(l, element_dtype=dtypes.float32) self.assertAllEqual( self.evaluate(t), [[0., 1.], [2., 3.], [4., 5.], [6., 7.]]) def testConcatWithNonFullyDefinedElementShape(self): l = list_ops.empty_tensor_list( element_dtype=dtypes.float32, element_shape=[None, 2]) l = list_ops.tensor_list_push_back(l, [[0., 1.]]) l = list_ops.tensor_list_push_back(l, [[2., 3.], [4., 5.]]) t = list_ops.tensor_list_concat(l, element_dtype=dtypes.float32) self.assertAllEqual(self.evaluate(t), [[0., 1.], [2., 3.], [4., 5.]]) def testConcatWithMismatchingTensorShapesFails(self): l = list_ops.empty_tensor_list( element_dtype=dtypes.float32, element_shape=None) l = list_ops.tensor_list_push_back(l, [[0., 1.]]) l = list_ops.tensor_list_push_back(l, [[2.], [4.]]) with self.assertRaisesRegexp( errors.InvalidArgumentError, r"Tried to concat tensors with unequal shapes: " r"\[2\] vs \[1\]"): t = list_ops.tensor_list_concat(l, element_dtype=dtypes.float32) self.evaluate(t) def testConcatEmptyListWithFullyDefinedElementShape(self): l = list_ops.empty_tensor_list( element_dtype=dtypes.float32, element_shape=[5, 2]) t = list_ops.tensor_list_concat(l, element_dtype=dtypes.float32) self.assertAllEqual(self.evaluate(t).shape, (0, 2)) l = list_ops.empty_tensor_list( element_dtype=dtypes.float32, element_shape=[None, 2]) t = list_ops.tensor_list_concat(l, element_dtype=dtypes.float32) self.assertAllEqual(self.evaluate(t).shape, (0, 2)) def testConcatEmptyListWithUnknownElementShapeFails(self): l = list_ops.empty_tensor_list( element_dtype=dtypes.float32, element_shape=None) with self.assertRaisesRegexp( errors.InvalidArgumentError, "All except the first dimension must be fully" " defined when concating an empty tensor list"): t = list_ops.tensor_list_concat(l, element_dtype=dtypes.float32) self.evaluate(t) def testConcatEmptyListWithPartiallyDefinedElementShapeFails(self): l = list_ops.empty_tensor_list( element_dtype=dtypes.float32, element_shape=[2, None]) with self.assertRaisesRegexp( errors.InvalidArgumentError, "All except the first dimension must be fully" " defined when concating an empty tensor list"): t = list_ops.tensor_list_concat(l, element_dtype=dtypes.float32) self.evaluate(t) def testConcatListWithScalarElementShapeFails(self): l = list_ops.empty_tensor_list( element_dtype=dtypes.float32, element_shape=tensor_shape.scalar()) with self.assertRaisesRegexp( errors.InvalidArgumentError, "Concat requires elements to be at least vectors, " "found scalars instead"): t = list_ops.tensor_list_concat(l, element_dtype=dtypes.float32) self.evaluate(t) def testConcatListWithScalarElementsFails(self): l = list_ops.empty_tensor_list( element_dtype=dtypes.float32, element_shape=None) l1 = list_ops.tensor_list_push_back(l, 1.) with self.assertRaisesRegexp( errors.InvalidArgumentError, "Concat saw a scalar shape at index 0" " but requires at least vectors"): t = list_ops.tensor_list_concat(l1, element_dtype=dtypes.float32) self.evaluate(t) l1 = list_ops.tensor_list_push_back(l, [1.]) l1 = list_ops.tensor_list_push_back(l1, 2.) with self.assertRaisesRegexp( errors.InvalidArgumentError, "Concat saw a scalar shape at index 1" " but requires at least vectors"): t = list_ops.tensor_list_concat(l1, element_dtype=dtypes.float32) self.evaluate(t) def testEvenSplit(self): def RunTest(input_tensor, lengths, expected_stacked_output): l = list_ops.tensor_list_split( input_tensor, element_shape=None, lengths=lengths) self.assertAllEqual( list_ops.tensor_list_stack(l, element_dtype=dtypes.float32), expected_stacked_output) RunTest([1., 2., 3.], [1, 1, 1], [[1.], [2.], [3.]]) RunTest([1., 2., 3., 4.], [2, 2], [[1., 2.], [3., 4.]]) RunTest([[1., 2.], [3., 4.]], [1, 1], [[[1., 2.]], [[3., 4.]]]) def testUnevenSplit(self): l = list_ops.tensor_list_split([1., 2., 3., 4., 5], element_shape=None, lengths=[3, 2]) self.assertAllEqual(list_ops.tensor_list_length(l), 2) self.assertAllEqual( list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32), [1., 2., 3.]) self.assertAllEqual( list_ops.tensor_list_get_item(l, 1, element_dtype=dtypes.float32), [4., 5.]) @test_util.run_deprecated_v1 def testSkipEagerSplitWithInvalidTensorShapeFails(self): with self.cached_session(): tensor = array_ops.placeholder(dtype=dtypes.float32) l = list_ops.tensor_list_split(tensor, element_shape=None, lengths=[1]) with self.assertRaisesRegexp( errors.InvalidArgumentError, r"Tensor must be at least a vector, but saw shape: \[\]"): l.eval({tensor: 1}) @test_util.run_deprecated_v1 def testSkipEagerSplitWithInvalidLengthsShapeFails(self): with self.cached_session(): lengths = array_ops.placeholder(dtype=dtypes.int64) l = list_ops.tensor_list_split([1., 2.], element_shape=None, lengths=lengths) with self.assertRaisesRegexp( errors.InvalidArgumentError, r"Expected lengths to be a vector, received shape: \[\]"): l.eval({lengths: 1}) def testSplitWithInvalidLengthsFails(self): with self.assertRaisesRegexp(errors.InvalidArgumentError, r"Invalid value in lengths: -1"): l = list_ops.tensor_list_split([1., 2.], element_shape=None, lengths=[1, -1]) self.evaluate(l) with self.assertRaisesRegexp( errors.InvalidArgumentError, r"Attempting to slice \[0, 3\] from tensor with length 2"): l = list_ops.tensor_list_split([1., 2.], element_shape=None, lengths=[3]) self.evaluate(l) with self.assertRaisesRegexp( errors.InvalidArgumentError, r"Unused values in tensor. Length of tensor: 2 Values used: 1"): l = list_ops.tensor_list_split([1., 2.], element_shape=None, lengths=[1]) self.evaluate(l) @test_util.run_deprecated_v1 def testSkipEagerSplitWithScalarElementShapeFails(self): with self.assertRaisesRegexp(ValueError, r"Shapes must be equal rank, but are 1 and 0"): l = list_ops.tensor_list_split([1., 2.], element_shape=[], lengths=[1, 1]) with self.cached_session(): with self.assertRaisesRegexp( errors.InvalidArgumentError, r"TensorListSplit requires element_shape to be at least of rank 1, " r"but saw: \[\]"): element_shape = array_ops.placeholder(dtype=dtypes.int32) l = list_ops.tensor_list_split([1., 2.], element_shape=element_shape, lengths=[1, 1]) l.eval({element_shape: []}) def testEagerOnlySplitWithScalarElementShapeFails(self): if context.executing_eagerly(): with self.assertRaisesRegexp( errors.InvalidArgumentError, r"TensorListSplit requires element_shape to be at least of rank 1, " r"but saw: \[\]"): list_ops.tensor_list_split([1., 2.], element_shape=[], lengths=[1, 1]) @test_util.run_deprecated_v1 def testSkipEagerSplitWithIncompatibleTensorShapeAndElementShapeFails(self): with self.assertRaisesRegexp(ValueError, r"Shapes must be equal rank, but are 2 and 1"): l = list_ops.tensor_list_split([[1.], [2.]], element_shape=[1], lengths=[1, 1]) with self.cached_session(): with self.assertRaisesRegexp( errors.InvalidArgumentError, r"tensor shape \[2,1\] is not compatible with element_shape \[1\]"): element_shape = array_ops.placeholder(dtype=dtypes.int32) l = list_ops.tensor_list_split([[1.], [2.]], element_shape=element_shape, lengths=[1, 1]) l.eval({element_shape: [1]}) def testEagerOnlySplitWithIncompatibleTensorShapeAndElementShapeFails(self): if context.executing_eagerly(): with self.assertRaisesRegexp( errors.InvalidArgumentError, r"tensor shape \[2,1\] is not compatible with element_shape \[1\]"): list_ops.tensor_list_split([[1.], [2.]], element_shape=[1], lengths=[1, 1]) def testResizeGrow(self): l = list_ops.tensor_list_from_tensor([1., 2.], element_shape=[]) l = list_ops.tensor_list_resize(l, 4) self.assertEqual(self.evaluate(list_ops.tensor_list_length(l)), 4) self.assertEqual( self.evaluate( list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32)), 1.) self.assertEqual( self.evaluate( list_ops.tensor_list_get_item(l, 1, element_dtype=dtypes.float32)), 2.) def testResizeShrink(self): l = list_ops.tensor_list_from_tensor([1., 2., 3.], element_shape=[]) l = list_ops.tensor_list_resize(l, 2) self.assertEqual(self.evaluate(list_ops.tensor_list_length(l)), 2) self.assertAllEqual( self.evaluate( list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)), [1., 2.]) def testResizeWithInvalidSizeFails(self): with self.assertRaisesRegexp( errors.InvalidArgumentError, "TensorListSlice expects size to be non-negative"): l = list_ops.tensor_list_from_tensor([1., 2., 3.], element_shape=[]) l = list_ops.tensor_list_resize(l, -1) self.evaluate(l) @test_util.run_deprecated_v1 @test_util.enable_control_flow_v2 def testSkipEagerResizeGrad(self): t = constant_op.constant([1., 2., 3.]) l = list_ops.tensor_list_from_tensor(t, element_shape=[]) l = list_ops.tensor_list_set_item( l, 3, 4., resize_if_index_out_of_bounds=True) t1 = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32) grad = gradients_impl.gradients(t1, t)[0] self.assertAllEqual(self.evaluate(grad), [1., 1., 1.]) if __name__ == "__main__": test.main()
hfp/tensorflow-xsmm
tensorflow/python/kernel_tests/list_ops_test.py
Python
apache-2.0
51,102
from rest_framework import permissions from rest_framework.permissions import BasePermission class IsAuthenticatedOrCreate(permissions.IsAuthenticated): def has_permission(self, request, view): if request.method == 'POST': return True return super(IsAuthenticatedOrCreate, self).has_permission(request, view) class IsOwner(BasePermission): message = "You must be the owner of this object." def has_object_permission(self, request, view, obj): my_safe_methods = [] if request.method in my_safe_methods: return True return obj.owner == request.user
CornellProjects/hlthpal
web/project/main/permissions.py
Python
apache-2.0
624
#!/usr/bin/env python """Load all flows so that they are visible in the registry.""" from __future__ import absolute_import from __future__ import division from __future__ import unicode_literals # pylint: disable=unused-import # These imports populate the Flow registry from grr_response_server.flows.general import administrative from grr_response_server.flows.general import apple_firmware from grr_response_server.flows.general import artifact_fallbacks from grr_response_server.flows.general import ca_enroller from grr_response_server.flows.general import checks from grr_response_server.flows.general import collectors from grr_response_server.flows.general import discovery from grr_response_server.flows.general import export from grr_response_server.flows.general import file_finder from grr_response_server.flows.general import filesystem from grr_response_server.flows.general import filetypes from grr_response_server.flows.general import find from grr_response_server.flows.general import fingerprint from grr_response_server.flows.general import hardware from grr_response_server.flows.general import memory from grr_response_server.flows.general import network from grr_response_server.flows.general import osquery from grr_response_server.flows.general import processes from grr_response_server.flows.general import registry from grr_response_server.flows.general import timeline from grr_response_server.flows.general import transfer from grr_response_server.flows.general import webhistory from grr_response_server.flows.general import windows_vsc
dunkhong/grr
grr/server/grr_response_server/flows/general/registry_init.py
Python
apache-2.0
1,568
input = raw_input(); print input
dheerajgoudb/Hackerrank
Python/Introduction/Reading_raw_input.py
Python
apache-2.0
33
# -*- coding: utf-8 -*- # flake8: noqa # Disable Flake8 because of all the sphinx imports # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # Airflow documentation build configuration file, created by # sphinx-quickstart on Thu Oct 9 20:50:01 2014. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. """Configuration of Airflow Docs""" import os import sys from typing import Dict import airflow from airflow.configuration import default_config_yaml try: import sphinx_airflow_theme # pylint: disable=unused-import airflow_theme_is_available = True except ImportError: airflow_theme_is_available = False autodoc_mock_imports = [ 'MySQLdb', 'adal', 'analytics', 'azure', 'azure.cosmos', 'azure.datalake', 'azure.mgmt', 'boto3', 'botocore', 'bson', 'cassandra', 'celery', 'cloudant', 'cryptography', 'cx_Oracle', 'datadog', 'distributed', 'docker', 'google', 'google_auth_httplib2', 'googleapiclient', 'grpc', 'hdfs', 'httplib2', 'jaydebeapi', 'jenkins', 'jira', 'kubernetes', 'mesos', 'msrestazure', 'pandas', 'pandas_gbq', 'paramiko', 'pinotdb', 'psycopg2', 'pydruid', 'pyhive', 'pyhive', 'pymongo', 'pymssql', 'pysftp', 'qds_sdk', 'redis', 'simple_salesforce', 'slackclient', 'smbclient', 'snowflake', 'sshtunnel', 'tenacity', 'vertica_python', 'winrm', 'zdesk', ] # Hack to allow changing for piece of the code to behave differently while # the docs are being built. The main objective was to alter the # behavior of the utils.apply_default that was hiding function headers os.environ['BUILDING_AIRFLOW_DOCS'] = 'TRUE' # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.append(os.path.join(os.path.dirname(__file__), 'exts')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.coverage', 'sphinx.ext.viewcode', 'sphinx.ext.graphviz', 'sphinxarg.ext', 'sphinxcontrib.httpdomain', 'sphinxcontrib.jinja', 'sphinx.ext.intersphinx', 'autoapi.extension', 'exampleinclude', 'docroles', 'removemarktransform', ] autodoc_default_options = { 'show-inheritance': True, 'members': True } jinja_contexts = { 'config_ctx': {"configs": default_config_yaml()} } viewcode_follow_imported_members = True # Add any paths that contain templates here, relative to this directory. templates_path = ['templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Airflow' # copyright = u'' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. # version = '1.0.0' version = airflow.__version__ # The full version, including alpha/beta/rc tags. # release = '1.0.0' release = airflow.__version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [ '_api/airflow/_vendor', '_api/airflow/api', '_api/airflow/bin', '_api/airflow/config_templates', '_api/airflow/configuration', '_api/airflow/contrib/auth', '_api/airflow/contrib/example_dags', '_api/airflow/contrib/index.rst', '_api/airflow/contrib/kubernetes', '_api/airflow/contrib/task_runner', '_api/airflow/contrib/utils', '_api/airflow/dag', '_api/airflow/default_login', '_api/airflow/example_dags', '_api/airflow/exceptions', '_api/airflow/index.rst', '_api/airflow/jobs', '_api/airflow/lineage', '_api/airflow/logging_config', '_api/airflow/macros', '_api/airflow/migrations', '_api/airflow/plugins_manager', '_api/airflow/security', '_api/airflow/serialization', '_api/airflow/settings', '_api/airflow/sentry', '_api/airflow/stats', '_api/airflow/task', '_api/airflow/ti_deps', '_api/airflow/utils', '_api/airflow/version', '_api/airflow/www', '_api/airflow/www_rbac', '_api/main', 'autoapi_templates', 'howto/operator/gcp/_partials', ] # The reST default role (used for this markup: `text`) to use for all # documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. keep_warnings = True intersphinx_mapping = { 'boto3': ('https://boto3.amazonaws.com/v1/documentation/api/latest/', None), 'mongodb': ('https://api.mongodb.com/python/current/', None), 'pandas': ('https://pandas.pydata.org/pandas-docs/stable/', None), 'python': ('https://docs.python.org/3/', None), 'requests': ('https://requests.readthedocs.io/en/master/', None), 'sqlalchemy': ('https://docs.sqlalchemy.org/en/latest/', None), 'hdfs': ('https://hdfscli.readthedocs.io/en/latest/', None), # google-cloud-python 'google-cloud-automl': ('https://googleapis.dev/python/automl/latest', None), 'google-cloud-bigquery': ('https://googleapis.dev/python/bigquery/latest', None), 'google-cloud-bigquery-datatransfer': ('https://googleapis.dev/python/bigquerydatatransfer/latest', None), 'google-cloud-bigquery-storage': ('https://googleapis.dev/python/bigquerystorage/latest', None), 'google-cloud-bigtable': ('https://googleapis.dev/python/bigtable/latest', None), 'google-cloud-container': ('https://googleapis.dev/python/container/latest', None), 'google-cloud-core': ('https://googleapis.dev/python/google-cloud-core/latest', None), 'google-cloud-datastore': ('https://googleapis.dev/python/datastore/latest', None), 'google-cloud-dlp': ('https://googleapis.dev/python/dlp/latest', None), 'google-cloud-kms': ('https://googleapis.dev/python/cloudkms/latest', None), 'google-cloud-language': ('https://googleapis.dev/python/language/latest', None), 'google-cloud-pubsub': ('https://googleapis.dev/python/pubsub/latest', None), 'google-cloud-redis': ('https://googleapis.dev/python/redis/latest', None), 'google-cloud-spanner': ('https://googleapis.dev/python/spanner/latest', None), 'google-cloud-speech': ('https://googleapis.dev/python/speech/latest', None), 'google-cloud-storage': ('https://googleapis.dev/python/storage/latest', None), 'google-cloud-tasks': ('https://googleapis.dev/python/cloudtasks/latest', None), 'google-cloud-texttospeech': ('https://googleapis.dev/python/texttospeech/latest', None), 'google-cloud-translate': ('https://googleapis.dev/python/translation/latest', None), 'google-cloud-videointelligence': ('https://googleapis.dev/python/videointelligence/latest', None), 'google-cloud-vision': ('https://googleapis.dev/python/vision/latest', None), } # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'sphinx_rtd_theme' if airflow_theme_is_available: html_theme = 'sphinx_airflow_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] import sphinx_rtd_theme # pylint: disable=wrong-import-position,wrong-import-order html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". html_title = "Airflow Documentation" # A shorter title for the navigation bar. Default is the same as html_title. html_short_title = "" # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None html_favicon = "../airflow/www/static/pin_32.png" # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. # html_extra_path = [] # A list of JavaScript filename. The entry must be a filename string or a # tuple containing the filename string and the attributes dictionary. The # filename must be relative to the html_static_path, or a full URI with # scheme like http://example.org/script.js. html_js_files = ['jira-links.js'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. # html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. if airflow_theme_is_available: html_sidebars = { '**': [ 'version-selector.html', 'searchbox.html', 'globaltoc.html', ] } # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. html_show_copyright = False # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'Airflowdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # 'preamble': '', } # type: Dict[str,str] # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'Airflow.tex', u'Airflow Documentation', u'Apache Airflow', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'airflow', u'Airflow Documentation', [u'Apache Airflow'], 1) ] # If true, show URL addresses after external links. # man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [( 'index', 'Airflow', u'Airflow Documentation', u'Apache Airflow', 'Airflow', 'Airflow is a system to programmatically author, schedule and monitor data pipelines.', 'Miscellaneous' ), ] # Documents to append as an appendix to all manuals. # texinfo_appendices = [] # If false, no module index is generated. # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. # texinfo_no_detailmenu = False # sphinx-autoapi configuration # See: # https://sphinx-autoapi.readthedocs.io/en/latest/config.html # Paths (relative or absolute) to the source code that you wish to generate # your API documentation from. autoapi_dirs = [ os.path.abspath('../airflow'), ] # A directory that has user-defined templates to override our default templates. autoapi_template_dir = 'autoapi_templates' # A list of patterns to ignore when finding files autoapi_ignore = [ # These modules are backcompat shims, don't build docs for them '*/airflow/contrib/operators/s3_to_gcs_transfer_operator.py', '*/airflow/contrib/operators/gcs_to_gcs_transfer_operator.py', '*/airflow/contrib/operators/gcs_to_gcs_transfer_operator.py', '*/node_modules/*', '*/migrations/*', ] # Keep the AutoAPI generated files on the filesystem after the run. # Useful for debugging. autoapi_keep_files = True # Relative path to output the AutoAPI files into. This can also be used to place the generated documentation # anywhere in your documentation hierarchy. autoapi_root = '_api' # -- Options for examole include ------------------------------------------ exampleinclude_sourceroot = os.path.abspath('..') # -- Additional HTML Context variable html_context = { # Google Analytics ID. # For more information look at: # https://github.com/readthedocs/sphinx_rtd_theme/blob/master/sphinx_rtd_theme/layout.html#L222-L232 'theme_analytics_id': 'UA-140539454-1', } if airflow_theme_is_available: html_context = { # Variables used to build a button for editing the source code # # The path is created according to the following template: # # https://{{ github_host|default("github.com") }}/{{ github_user }}/{{ github_repo }}/ # {{ theme_vcs_pageview_mode|default("blob") }}/{{ github_version }}{{ conf_py_path }} # {{ pagename }}{{ suffix }} # # More information: # https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/doc_builder/templates/doc_builder/conf.py.tmpl#L100-L103 # https://github.com/readthedocs/sphinx_rtd_theme/blob/master/sphinx_rtd_theme/breadcrumbs.html#L45 # https://github.com/apache/airflow-site/blob/91f760c/sphinx_airflow_theme/sphinx_airflow_theme/suggest_change_button.html#L36-L40 # 'theme_vcs_pageview_mode': 'edit', 'conf_py_path': '/docs/', 'github_user': 'apache', 'github_repo': 'airflow', 'github_version': 'master', 'display_github': 'master', 'suffix': '.rst', }
owlabs/incubator-airflow
docs/conf.py
Python
apache-2.0
17,740
"""Support for Hangouts.""" import logging from hangups.auth import GoogleAuthError import voluptuous as vol from homeassistant import config_entries from homeassistant.components.conversation.util import create_matcher from homeassistant.config_entries import ConfigEntry from homeassistant.const import EVENT_HOMEASSISTANT_STOP from homeassistant.core import HomeAssistant from homeassistant.helpers import dispatcher, intent import homeassistant.helpers.config_validation as cv # We need an import from .config_flow, without it .config_flow is never loaded. from .config_flow import HangoutsFlowHandler # noqa: F401 from .const import ( CONF_BOT, CONF_DEFAULT_CONVERSATIONS, CONF_ERROR_SUPPRESSED_CONVERSATIONS, CONF_INTENTS, CONF_MATCHERS, CONF_REFRESH_TOKEN, CONF_SENTENCES, DOMAIN, EVENT_HANGOUTS_CONNECTED, EVENT_HANGOUTS_CONVERSATIONS_CHANGED, EVENT_HANGOUTS_CONVERSATIONS_RESOLVED, INTENT_HELP, INTENT_SCHEMA, MESSAGE_SCHEMA, SERVICE_RECONNECT, SERVICE_SEND_MESSAGE, SERVICE_UPDATE, TARGETS_SCHEMA, ) from .hangouts_bot import HangoutsBot from .intents import HelpIntent _LOGGER = logging.getLogger(__name__) CONFIG_SCHEMA = vol.Schema( { DOMAIN: vol.Schema( { vol.Optional(CONF_INTENTS, default={}): vol.Schema( {cv.string: INTENT_SCHEMA} ), vol.Optional(CONF_DEFAULT_CONVERSATIONS, default=[]): [TARGETS_SCHEMA], vol.Optional(CONF_ERROR_SUPPRESSED_CONVERSATIONS, default=[]): [ TARGETS_SCHEMA ], } ) }, extra=vol.ALLOW_EXTRA, ) async def async_setup(hass, config): """Set up the Hangouts bot component.""" if (config := config.get(DOMAIN)) is None: hass.data[DOMAIN] = { CONF_INTENTS: {}, CONF_DEFAULT_CONVERSATIONS: [], CONF_ERROR_SUPPRESSED_CONVERSATIONS: [], } return True hass.data[DOMAIN] = { CONF_INTENTS: config[CONF_INTENTS], CONF_DEFAULT_CONVERSATIONS: config[CONF_DEFAULT_CONVERSATIONS], CONF_ERROR_SUPPRESSED_CONVERSATIONS: config[ CONF_ERROR_SUPPRESSED_CONVERSATIONS ], } if ( hass.data[DOMAIN][CONF_INTENTS] and INTENT_HELP not in hass.data[DOMAIN][CONF_INTENTS] ): hass.data[DOMAIN][CONF_INTENTS][INTENT_HELP] = {CONF_SENTENCES: ["HELP"]} for data in hass.data[DOMAIN][CONF_INTENTS].values(): matchers = [] for sentence in data[CONF_SENTENCES]: matchers.append(create_matcher(sentence)) data[CONF_MATCHERS] = matchers hass.async_create_task( hass.config_entries.flow.async_init( DOMAIN, context={"source": config_entries.SOURCE_IMPORT} ) ) return True async def async_setup_entry(hass: HomeAssistant, config: ConfigEntry) -> bool: """Set up a config entry.""" try: bot = HangoutsBot( hass, config.data.get(CONF_REFRESH_TOKEN), hass.data[DOMAIN][CONF_INTENTS], hass.data[DOMAIN][CONF_DEFAULT_CONVERSATIONS], hass.data[DOMAIN][CONF_ERROR_SUPPRESSED_CONVERSATIONS], ) hass.data[DOMAIN][CONF_BOT] = bot except GoogleAuthError as exception: _LOGGER.error("Hangouts failed to log in: %s", str(exception)) return False dispatcher.async_dispatcher_connect( hass, EVENT_HANGOUTS_CONNECTED, bot.async_handle_update_users_and_conversations ) dispatcher.async_dispatcher_connect( hass, EVENT_HANGOUTS_CONVERSATIONS_CHANGED, bot.async_resolve_conversations ) dispatcher.async_dispatcher_connect( hass, EVENT_HANGOUTS_CONVERSATIONS_RESOLVED, bot.async_update_conversation_commands, ) config.async_on_unload( hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, bot.async_handle_hass_stop) ) await bot.async_connect() hass.services.async_register( DOMAIN, SERVICE_SEND_MESSAGE, bot.async_handle_send_message, schema=MESSAGE_SCHEMA, ) hass.services.async_register( DOMAIN, SERVICE_UPDATE, bot.async_handle_update_users_and_conversations, schema=vol.Schema({}), ) hass.services.async_register( DOMAIN, SERVICE_RECONNECT, bot.async_handle_reconnect, schema=vol.Schema({}) ) intent.async_register(hass, HelpIntent(hass)) return True async def async_unload_entry(hass: HomeAssistant, _: ConfigEntry) -> bool: """Unload a config entry.""" bot = hass.data[DOMAIN].pop(CONF_BOT) await bot.async_disconnect() return True
mezz64/home-assistant
homeassistant/components/hangouts/__init__.py
Python
apache-2.0
4,741
import asyncio import inspect import logging from typing import List, Tuple, Callable, NamedTuple from lightbus.schema.schema import Parameter from lightbus.message import EventMessage from lightbus.client.subclients.base import BaseSubClient from lightbus.client.utilities import validate_event_or_rpc_name, queue_exception_checker, OnError from lightbus.client.validator import validate_outgoing, validate_incoming from lightbus.exceptions import ( UnknownApi, EventNotFound, InvalidEventArguments, InvalidEventListener, ListenersAlreadyStarted, DuplicateListenerName, ) from lightbus.log import L, Bold from lightbus.client.commands import ( SendEventCommand, AcknowledgeEventCommand, ConsumeEventsCommand, CloseCommand, ) from lightbus.utilities.async_tools import run_user_provided_callable, cancel_and_log_exceptions from lightbus.utilities.internal_queue import InternalQueue from lightbus.utilities.casting import cast_to_signature from lightbus.utilities.deforming import deform_to_bus from lightbus.utilities.singledispatch import singledispatchmethod logger = logging.getLogger(__name__) class EventClient(BaseSubClient): def __init__(self, **kwargs): super().__init__(**kwargs) self._event_listeners: List[Listener] = [] self._event_listener_tasks = set() self._listeners_started = False async def fire_event( self, api_name, name, kwargs: dict = None, options: dict = None ) -> EventMessage: kwargs = kwargs or {} try: api = self.api_registry.get(api_name) except UnknownApi: raise UnknownApi( "Lightbus tried to fire the event {api_name}.{name}, but no API named {api_name}" " was found in the registry. An API being in the registry implies you are an" " authority on that API. Therefore, Lightbus requires the API to be in the registry" " as it is a bad idea to fire events on behalf of remote APIs. However, this could" " also be caused by a typo in the API name or event name, or be because the API" " class has not been registered using bus.client.register_api(). ".format( **locals() ) ) validate_event_or_rpc_name(api_name, "event", name) try: event = api.get_event(name) except EventNotFound: raise EventNotFound( "Lightbus tried to fire the event {api_name}.{name}, but the API {api_name} does" " not seem to contain an event named {name}. You may need to define the event, you" " may also be using the incorrect API. Also check for typos.".format(**locals()) ) p: Parameter parameter_names = {p.name if isinstance(p, Parameter) else p for p in event.parameters} required_parameter_names = { p.name if isinstance(p, Parameter) else p for p in event.parameters if getattr(p, "is_required", True) } if required_parameter_names and not required_parameter_names.issubset(set(kwargs.keys())): raise InvalidEventArguments( "Missing required arguments when firing event {}.{}. Attempted to fire event with " "{} arguments: {}. Event requires {}: {}".format( api_name, name, len(kwargs), sorted(kwargs.keys()), len(parameter_names), sorted(parameter_names), ) ) extra_arguments = set(kwargs.keys()) - parameter_names if extra_arguments: raise InvalidEventArguments( "Unexpected argument supplied when firing event {}.{}. Attempted to fire event with" " {} arguments: {}. Unexpected argument(s): {}".format( api_name, name, len(kwargs), sorted(kwargs.keys()), sorted(extra_arguments), ) ) kwargs = deform_to_bus(kwargs) event_message = EventMessage( api_name=api.meta.name, event_name=name, kwargs=kwargs, version=api.meta.version ) validate_outgoing(self.config, self.schema, event_message) await self.hook_registry.execute("before_event_sent", event_message=event_message) logger.info(L("📤 Sending event {}.{}".format(Bold(api_name), Bold(name)))) await self.producer.send(SendEventCommand(message=event_message, options=options)).wait() await self.hook_registry.execute("after_event_sent", event_message=event_message) return event_message def listen( self, events: List[Tuple[str, str]], listener: Callable, listener_name: str, options: dict = None, on_error: OnError = OnError.SHUTDOWN, ): if self._listeners_started: # We are actually technically able to support starting listeners after worker # startup, but it seems like it is a bad idea and a bit of an edge case. # We may revisit this if sufficient demand arises. raise ListenersAlreadyStarted( "You are trying to register a new listener after the worker has started running." " Listeners should be setup in your @bus.client.on_start() hook, in your bus.py" " file." ) sanity_check_listener(listener) for listener_api_name, _ in events: duplicate_listener = self.get_event_listener(listener_api_name, listener_name) if duplicate_listener: raise DuplicateListenerName( f"A listener with name '{listener_name}' is already registered for API" f" '{listener_api_name}'. You cannot have multiple listeners with the same name" " for a given API. Rename one of your listeners to resolve this problem." ) for api_name, name in events: validate_event_or_rpc_name(api_name, "event", name) self._event_listeners.append( Listener( callable=listener, options=options or {}, events=events, name=listener_name, on_error=on_error, ) ) def get_event_listener(self, api_name: str, listener_name: str): for listener in self._event_listeners: if listener.name == listener_name: for listener_api_name, _ in listener.events: if listener_api_name == api_name: return listener return None async def _on_message( self, event_message: EventMessage, listener: Callable, options: dict, on_error: OnError ): # TODO: Check events match those requested logger.info( L( "📩 Received event {}.{} with ID {}".format( Bold(event_message.api_name), Bold(event_message.event_name), event_message.id ) ) ) validate_incoming(self.config, self.schema, event_message) await self.hook_registry.execute("before_event_execution", event_message=event_message) if self.config.api(event_message.api_name).cast_values: parameters = cast_to_signature(parameters=event_message.kwargs, callable=listener) else: parameters = event_message.kwargs # Call the listener. # Pass the event message as a positional argument, # thereby allowing listeners to have flexibility in the argument names. # (And therefore allowing listeners to use the `event` parameter themselves) if on_error == OnError.SHUTDOWN: # Run the callback in the queue_exception_checker(). This will # put any errors into Lightbus' error queue, and therefore # cause a shutdown await queue_exception_checker( run_user_provided_callable(listener, args=[event_message], kwargs=parameters), self.error_queue, help=( f"An error occurred while {listener} was handling an event. Lightbus will now" " shutdown. If you wish to continue you can use the on_error parameter when" " setting up your event. For example:\n\n bus.my_api.my_event.listen(fn," " listener_name='example', on_error=lightbus.OnError.ACKNOWLEDGE_AND_LOG)" ), ) elif on_error == on_error.ACKNOWLEDGE_AND_LOG: try: await listener(event_message, **parameters) except asyncio.CancelledError: raise except Exception as e: # Log here. Acknowledgement will follow in below logger.exception(e) # Acknowledge the successfully processed message await self.producer.send( AcknowledgeEventCommand(message=event_message, options=options) ).wait() await self.hook_registry.execute("after_event_execution", event_message=event_message) async def close(self): await super().close() await cancel_and_log_exceptions(*self._event_listener_tasks) await self.producer.send(CloseCommand()).wait() await self.consumer.close() await self.producer.close() @singledispatchmethod async def handle(self, command): raise NotImplementedError(f"Did not recognise command {command.__class__.__name__}") async def start_registered_listeners(self): """Start all listeners which have been previously registered via listen()""" self._listeners_started = True for listener in self._event_listeners: await self._start_listener(listener) async def _start_listener(self, listener: "Listener"): # Setting the maxsize to 1 ensures the transport cannot load # messages faster than we can consume them queue: InternalQueue[EventMessage] = InternalQueue(maxsize=1) async def consume_events(): while True: logger.debug("Event listener now waiting for event on the internal queue") event_message = await queue.get() logger.debug( "Event listener has now received an event on the internal queue, processing now" ) await self._on_message( event_message=event_message, listener=listener.callable, options=listener.options, on_error=listener.on_error, ) queue.task_done() # Start the consume_events() consumer running task = asyncio.ensure_future(queue_exception_checker(consume_events(), self.error_queue)) self._event_listener_tasks.add(task) await self.producer.send( ConsumeEventsCommand( events=listener.events, destination_queue=queue, listener_name=listener.name, options=listener.options, ) ).wait() class Listener(NamedTuple): callable: Callable options: dict events: List[Tuple[str, str]] name: str on_error: OnError def sanity_check_listener(listener): if not callable(listener): raise InvalidEventListener( f"The specified event listener {listener} is not callable. Perhaps you called the" " function rather than passing the function itself?" ) total_positional_args = 0 has_variable_positional_args = False # Eg: *args for parameter in inspect.signature(listener).parameters.values(): if parameter.kind in ( inspect.Parameter.POSITIONAL_ONLY, inspect.Parameter.POSITIONAL_OR_KEYWORD, ): total_positional_args += 1 elif parameter.kind == inspect.Parameter.VAR_POSITIONAL: has_variable_positional_args = True if has_variable_positional_args: return if not total_positional_args: raise InvalidEventListener( f"The specified event listener {listener} must take at one positional argument. " "This will be the event message. For example: " "my_listener(event, other, ...)" )
adamcharnock/lightbus
lightbus/client/subclients/event.py
Python
apache-2.0
12,494
# -*- coding: utf-8 -*- from operator import attrgetter from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType from pyangbind.lib.yangtypes import RestrictedClassType from pyangbind.lib.yangtypes import TypedListType from pyangbind.lib.yangtypes import YANGBool from pyangbind.lib.yangtypes import YANGListType from pyangbind.lib.yangtypes import YANGDynClass from pyangbind.lib.yangtypes import ReferenceType from pyangbind.lib.base import PybindBase from collections import OrderedDict from decimal import Decimal from bitarray import bitarray import six # PY3 support of some PY2 keywords (needs improved) if six.PY3: import builtins as __builtin__ long = int elif six.PY2: import __builtin__ from . import state from . import unknown_subtlv from . import unreserved_bandwidths from . import administrative_groups class sub_tlv(PybindBase): """ This class was auto-generated by the PythonClass plugin for PYANG from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa-types/lsa-type/lsas/lsa/opaque-lsa/traffic-engineering/tlvs/tlv/link/sub-tlvs/sub-tlv. Each member element of the container is represented as a class variable - with a specific YANG type. YANG Description: The Sub-TLVs included within the Traffic Engineering LSA's sub-TLV """ __slots__ = ( "_path_helper", "_extmethods", "__state", "__unknown_subtlv", "__unreserved_bandwidths", "__administrative_groups", ) _yang_name = "sub-tlv" _pybind_generated_by = "container" def __init__(self, *args, **kwargs): self._path_helper = False self._extmethods = False self.__state = YANGDynClass( base=state.state, is_container="container", yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=False, ) self.__unknown_subtlv = YANGDynClass( base=unknown_subtlv.unknown_subtlv, is_container="container", yang_name="unknown-subtlv", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=False, ) self.__unreserved_bandwidths = YANGDynClass( base=unreserved_bandwidths.unreserved_bandwidths, is_container="container", yang_name="unreserved-bandwidths", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=False, ) self.__administrative_groups = YANGDynClass( base=administrative_groups.administrative_groups, is_container="container", yang_name="administrative-groups", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=False, ) load = kwargs.pop("load", None) if args: if len(args) > 1: raise TypeError("cannot create a YANG container with >1 argument") all_attr = True for e in self._pyangbind_elements: if not hasattr(args[0], e): all_attr = False break if not all_attr: raise ValueError("Supplied object did not have the correct attributes") for e in self._pyangbind_elements: nobj = getattr(args[0], e) if nobj._changed() is False: continue setmethod = getattr(self, "_set_%s" % e) if load is None: setmethod(getattr(args[0], e)) else: setmethod(getattr(args[0], e), load=load) def _path(self): if hasattr(self, "_parent"): return self._parent._path() + [self._yang_name] else: return [ "network-instances", "network-instance", "protocols", "protocol", "ospfv2", "areas", "area", "lsdb", "lsa-types", "lsa-type", "lsas", "lsa", "opaque-lsa", "traffic-engineering", "tlvs", "tlv", "link", "sub-tlvs", "sub-tlv", ] def _get_state(self): """ Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/traffic_engineering/tlvs/tlv/link/sub_tlvs/sub_tlv/state (container) YANG Description: State parameters of the Link Sub-TLV """ return self.__state def _set_state(self, v, load=False): """ Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/traffic_engineering/tlvs/tlv/link/sub_tlvs/sub_tlv/state (container) If this variable is read-only (config: false) in the source YANG file, then _set_state is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_state() directly. YANG Description: State parameters of the Link Sub-TLV """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=state.state, is_container="container", yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=False, ) except (TypeError, ValueError): raise ValueError( { "error-string": """state must be of a type compatible with container""", "defined-type": "container", "generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""", } ) self.__state = t if hasattr(self, "_set"): self._set() def _unset_state(self): self.__state = YANGDynClass( base=state.state, is_container="container", yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=False, ) def _get_unknown_subtlv(self): """ Getter method for unknown_subtlv, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/traffic_engineering/tlvs/tlv/link/sub_tlvs/sub_tlv/unknown_subtlv (container) YANG Description: An unknown SubTLV within the context. Unknown Sub-TLV are defined to be the set of SubTLVs that are not modelled by the OpenConfig schema, or are unknown to the local system such that it cannot decode their value. """ return self.__unknown_subtlv def _set_unknown_subtlv(self, v, load=False): """ Setter method for unknown_subtlv, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/traffic_engineering/tlvs/tlv/link/sub_tlvs/sub_tlv/unknown_subtlv (container) If this variable is read-only (config: false) in the source YANG file, then _set_unknown_subtlv is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_unknown_subtlv() directly. YANG Description: An unknown SubTLV within the context. Unknown Sub-TLV are defined to be the set of SubTLVs that are not modelled by the OpenConfig schema, or are unknown to the local system such that it cannot decode their value. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=unknown_subtlv.unknown_subtlv, is_container="container", yang_name="unknown-subtlv", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=False, ) except (TypeError, ValueError): raise ValueError( { "error-string": """unknown_subtlv must be of a type compatible with container""", "defined-type": "container", "generated-type": """YANGDynClass(base=unknown_subtlv.unknown_subtlv, is_container='container', yang_name="unknown-subtlv", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""", } ) self.__unknown_subtlv = t if hasattr(self, "_set"): self._set() def _unset_unknown_subtlv(self): self.__unknown_subtlv = YANGDynClass( base=unknown_subtlv.unknown_subtlv, is_container="container", yang_name="unknown-subtlv", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=False, ) def _get_unreserved_bandwidths(self): """ Getter method for unreserved_bandwidths, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/traffic_engineering/tlvs/tlv/link/sub_tlvs/sub_tlv/unreserved_bandwidths (container) YANG Description: The unreserved link bandwidths for the Traffic Engineering LSA - utilised when the sub-TLV type indicates that the sub-TLV describes unreserved bandwidth """ return self.__unreserved_bandwidths def _set_unreserved_bandwidths(self, v, load=False): """ Setter method for unreserved_bandwidths, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/traffic_engineering/tlvs/tlv/link/sub_tlvs/sub_tlv/unreserved_bandwidths (container) If this variable is read-only (config: false) in the source YANG file, then _set_unreserved_bandwidths is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_unreserved_bandwidths() directly. YANG Description: The unreserved link bandwidths for the Traffic Engineering LSA - utilised when the sub-TLV type indicates that the sub-TLV describes unreserved bandwidth """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=unreserved_bandwidths.unreserved_bandwidths, is_container="container", yang_name="unreserved-bandwidths", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=False, ) except (TypeError, ValueError): raise ValueError( { "error-string": """unreserved_bandwidths must be of a type compatible with container""", "defined-type": "container", "generated-type": """YANGDynClass(base=unreserved_bandwidths.unreserved_bandwidths, is_container='container', yang_name="unreserved-bandwidths", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""", } ) self.__unreserved_bandwidths = t if hasattr(self, "_set"): self._set() def _unset_unreserved_bandwidths(self): self.__unreserved_bandwidths = YANGDynClass( base=unreserved_bandwidths.unreserved_bandwidths, is_container="container", yang_name="unreserved-bandwidths", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=False, ) def _get_administrative_groups(self): """ Getter method for administrative_groups, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/traffic_engineering/tlvs/tlv/link/sub_tlvs/sub_tlv/administrative_groups (container) YANG Description: The administrative groups that are set for the Traffic Engineering LSA - utilised when the sub-TLV type indicates that the sub-TLV describes administrative groups """ return self.__administrative_groups def _set_administrative_groups(self, v, load=False): """ Setter method for administrative_groups, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/traffic_engineering/tlvs/tlv/link/sub_tlvs/sub_tlv/administrative_groups (container) If this variable is read-only (config: false) in the source YANG file, then _set_administrative_groups is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_administrative_groups() directly. YANG Description: The administrative groups that are set for the Traffic Engineering LSA - utilised when the sub-TLV type indicates that the sub-TLV describes administrative groups """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=administrative_groups.administrative_groups, is_container="container", yang_name="administrative-groups", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=False, ) except (TypeError, ValueError): raise ValueError( { "error-string": """administrative_groups must be of a type compatible with container""", "defined-type": "container", "generated-type": """YANGDynClass(base=administrative_groups.administrative_groups, is_container='container', yang_name="administrative-groups", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""", } ) self.__administrative_groups = t if hasattr(self, "_set"): self._set() def _unset_administrative_groups(self): self.__administrative_groups = YANGDynClass( base=administrative_groups.administrative_groups, is_container="container", yang_name="administrative-groups", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=False, ) state = __builtin__.property(_get_state) unknown_subtlv = __builtin__.property(_get_unknown_subtlv) unreserved_bandwidths = __builtin__.property(_get_unreserved_bandwidths) administrative_groups = __builtin__.property(_get_administrative_groups) _pyangbind_elements = OrderedDict( [ ("state", state), ("unknown_subtlv", unknown_subtlv), ("unreserved_bandwidths", unreserved_bandwidths), ("administrative_groups", administrative_groups), ] ) from . import state from . import unknown_subtlv from . import unreserved_bandwidths from . import administrative_groups class sub_tlv(PybindBase): """ This class was auto-generated by the PythonClass plugin for PYANG from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa-types/lsa-type/lsas/lsa/opaque-lsa/traffic-engineering/tlvs/tlv/link/sub-tlvs/sub-tlv. Each member element of the container is represented as a class variable - with a specific YANG type. YANG Description: The Sub-TLVs included within the Traffic Engineering LSA's sub-TLV """ __slots__ = ( "_path_helper", "_extmethods", "__state", "__unknown_subtlv", "__unreserved_bandwidths", "__administrative_groups", ) _yang_name = "sub-tlv" _pybind_generated_by = "container" def __init__(self, *args, **kwargs): self._path_helper = False self._extmethods = False self.__state = YANGDynClass( base=state.state, is_container="container", yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=False, ) self.__unknown_subtlv = YANGDynClass( base=unknown_subtlv.unknown_subtlv, is_container="container", yang_name="unknown-subtlv", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=False, ) self.__unreserved_bandwidths = YANGDynClass( base=unreserved_bandwidths.unreserved_bandwidths, is_container="container", yang_name="unreserved-bandwidths", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=False, ) self.__administrative_groups = YANGDynClass( base=administrative_groups.administrative_groups, is_container="container", yang_name="administrative-groups", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=False, ) load = kwargs.pop("load", None) if args: if len(args) > 1: raise TypeError("cannot create a YANG container with >1 argument") all_attr = True for e in self._pyangbind_elements: if not hasattr(args[0], e): all_attr = False break if not all_attr: raise ValueError("Supplied object did not have the correct attributes") for e in self._pyangbind_elements: nobj = getattr(args[0], e) if nobj._changed() is False: continue setmethod = getattr(self, "_set_%s" % e) if load is None: setmethod(getattr(args[0], e)) else: setmethod(getattr(args[0], e), load=load) def _path(self): if hasattr(self, "_parent"): return self._parent._path() + [self._yang_name] else: return [ "network-instances", "network-instance", "protocols", "protocol", "ospfv2", "areas", "area", "lsdb", "lsa-types", "lsa-type", "lsas", "lsa", "opaque-lsa", "traffic-engineering", "tlvs", "tlv", "link", "sub-tlvs", "sub-tlv", ] def _get_state(self): """ Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/traffic_engineering/tlvs/tlv/link/sub_tlvs/sub_tlv/state (container) YANG Description: State parameters of the Link Sub-TLV """ return self.__state def _set_state(self, v, load=False): """ Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/traffic_engineering/tlvs/tlv/link/sub_tlvs/sub_tlv/state (container) If this variable is read-only (config: false) in the source YANG file, then _set_state is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_state() directly. YANG Description: State parameters of the Link Sub-TLV """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=state.state, is_container="container", yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=False, ) except (TypeError, ValueError): raise ValueError( { "error-string": """state must be of a type compatible with container""", "defined-type": "container", "generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""", } ) self.__state = t if hasattr(self, "_set"): self._set() def _unset_state(self): self.__state = YANGDynClass( base=state.state, is_container="container", yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=False, ) def _get_unknown_subtlv(self): """ Getter method for unknown_subtlv, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/traffic_engineering/tlvs/tlv/link/sub_tlvs/sub_tlv/unknown_subtlv (container) YANG Description: An unknown SubTLV within the context. Unknown Sub-TLV are defined to be the set of SubTLVs that are not modelled by the OpenConfig schema, or are unknown to the local system such that it cannot decode their value. """ return self.__unknown_subtlv def _set_unknown_subtlv(self, v, load=False): """ Setter method for unknown_subtlv, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/traffic_engineering/tlvs/tlv/link/sub_tlvs/sub_tlv/unknown_subtlv (container) If this variable is read-only (config: false) in the source YANG file, then _set_unknown_subtlv is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_unknown_subtlv() directly. YANG Description: An unknown SubTLV within the context. Unknown Sub-TLV are defined to be the set of SubTLVs that are not modelled by the OpenConfig schema, or are unknown to the local system such that it cannot decode their value. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=unknown_subtlv.unknown_subtlv, is_container="container", yang_name="unknown-subtlv", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=False, ) except (TypeError, ValueError): raise ValueError( { "error-string": """unknown_subtlv must be of a type compatible with container""", "defined-type": "container", "generated-type": """YANGDynClass(base=unknown_subtlv.unknown_subtlv, is_container='container', yang_name="unknown-subtlv", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""", } ) self.__unknown_subtlv = t if hasattr(self, "_set"): self._set() def _unset_unknown_subtlv(self): self.__unknown_subtlv = YANGDynClass( base=unknown_subtlv.unknown_subtlv, is_container="container", yang_name="unknown-subtlv", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=False, ) def _get_unreserved_bandwidths(self): """ Getter method for unreserved_bandwidths, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/traffic_engineering/tlvs/tlv/link/sub_tlvs/sub_tlv/unreserved_bandwidths (container) YANG Description: The unreserved link bandwidths for the Traffic Engineering LSA - utilised when the sub-TLV type indicates that the sub-TLV describes unreserved bandwidth """ return self.__unreserved_bandwidths def _set_unreserved_bandwidths(self, v, load=False): """ Setter method for unreserved_bandwidths, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/traffic_engineering/tlvs/tlv/link/sub_tlvs/sub_tlv/unreserved_bandwidths (container) If this variable is read-only (config: false) in the source YANG file, then _set_unreserved_bandwidths is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_unreserved_bandwidths() directly. YANG Description: The unreserved link bandwidths for the Traffic Engineering LSA - utilised when the sub-TLV type indicates that the sub-TLV describes unreserved bandwidth """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=unreserved_bandwidths.unreserved_bandwidths, is_container="container", yang_name="unreserved-bandwidths", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=False, ) except (TypeError, ValueError): raise ValueError( { "error-string": """unreserved_bandwidths must be of a type compatible with container""", "defined-type": "container", "generated-type": """YANGDynClass(base=unreserved_bandwidths.unreserved_bandwidths, is_container='container', yang_name="unreserved-bandwidths", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""", } ) self.__unreserved_bandwidths = t if hasattr(self, "_set"): self._set() def _unset_unreserved_bandwidths(self): self.__unreserved_bandwidths = YANGDynClass( base=unreserved_bandwidths.unreserved_bandwidths, is_container="container", yang_name="unreserved-bandwidths", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=False, ) def _get_administrative_groups(self): """ Getter method for administrative_groups, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/traffic_engineering/tlvs/tlv/link/sub_tlvs/sub_tlv/administrative_groups (container) YANG Description: The administrative groups that are set for the Traffic Engineering LSA - utilised when the sub-TLV type indicates that the sub-TLV describes administrative groups """ return self.__administrative_groups def _set_administrative_groups(self, v, load=False): """ Setter method for administrative_groups, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/traffic_engineering/tlvs/tlv/link/sub_tlvs/sub_tlv/administrative_groups (container) If this variable is read-only (config: false) in the source YANG file, then _set_administrative_groups is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_administrative_groups() directly. YANG Description: The administrative groups that are set for the Traffic Engineering LSA - utilised when the sub-TLV type indicates that the sub-TLV describes administrative groups """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=administrative_groups.administrative_groups, is_container="container", yang_name="administrative-groups", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=False, ) except (TypeError, ValueError): raise ValueError( { "error-string": """administrative_groups must be of a type compatible with container""", "defined-type": "container", "generated-type": """YANGDynClass(base=administrative_groups.administrative_groups, is_container='container', yang_name="administrative-groups", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""", } ) self.__administrative_groups = t if hasattr(self, "_set"): self._set() def _unset_administrative_groups(self): self.__administrative_groups = YANGDynClass( base=administrative_groups.administrative_groups, is_container="container", yang_name="administrative-groups", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=False, ) state = __builtin__.property(_get_state) unknown_subtlv = __builtin__.property(_get_unknown_subtlv) unreserved_bandwidths = __builtin__.property(_get_unreserved_bandwidths) administrative_groups = __builtin__.property(_get_administrative_groups) _pyangbind_elements = OrderedDict( [ ("state", state), ("unknown_subtlv", unknown_subtlv), ("unreserved_bandwidths", unreserved_bandwidths), ("administrative_groups", administrative_groups), ] )
napalm-automation/napalm-yang
napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/traffic_engineering/tlvs/tlv/link/sub_tlvs/sub_tlv/__init__.py
Python
apache-2.0
37,904
try: from django.apps import AppConfig except ImportError: pass else: class GargoyleAppConfig(AppConfig): name = 'gargoyle' def ready(self): try: import nexus except ImportError: pass else: from gargoyle.nexus_modules import GargoyleModule nexus.site.register(GargoyleModule, 'gargoyle')
blueprinthealth/gargoyle
gargoyle/apps.py
Python
apache-2.0
417
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import proto # type: ignore from google.ads.googleads.v10.common.types import criteria from google.ads.googleads.v10.common.types import extensions from google.ads.googleads.v10.enums.types import keyword_match_type from google.ads.googleads.v10.enums.types import recommendation_type from google.ads.googleads.v10.enums.types import ( target_cpa_opt_in_recommendation_goal, ) from google.ads.googleads.v10.resources.types import ad as gagr_ad __protobuf__ = proto.module( package="google.ads.googleads.v10.resources", marshal="google.ads.googleads.v10", manifest={"Recommendation",}, ) class Recommendation(proto.Message): r"""A recommendation. This message has `oneof`_ fields (mutually exclusive fields). For each oneof, at most one member field can be set at the same time. Setting any member of the oneof automatically clears all other members. .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields Attributes: resource_name (str): Immutable. The resource name of the recommendation. ``customers/{customer_id}/recommendations/{recommendation_id}`` type_ (google.ads.googleads.v10.enums.types.RecommendationTypeEnum.RecommendationType): Output only. The type of recommendation. impact (google.ads.googleads.v10.resources.types.Recommendation.RecommendationImpact): Output only. The impact on account performance as a result of applying the recommendation. campaign_budget (str): Output only. The budget targeted by this recommendation. This will be set only when the recommendation affects a single campaign budget. This field will be set for the following recommendation types: CAMPAIGN_BUDGET, FORECASTING_CAMPAIGN_BUDGET, MARGINAL_ROI_CAMPAIGN_BUDGET, MOVE_UNUSED_BUDGET This field is a member of `oneof`_ ``_campaign_budget``. campaign (str): Output only. The campaign targeted by this recommendation. This will be set only when the recommendation affects a single campaign. This field will be set for the following recommendation types: CALL_EXTENSION, CALLOUT_EXTENSION, ENHANCED_CPC_OPT_IN, USE_BROAD_MATCH_KEYWORD, KEYWORD, KEYWORD_MATCH_TYPE, MAXIMIZE_CLICKS_OPT_IN, MAXIMIZE_CONVERSIONS_OPT_IN, OPTIMIZE_AD_ROTATION, RESPONSIVE_SEARCH_AD, RESPONSIVE_SEARCH_AD_ASSET, SEARCH_PARTNERS_OPT_IN, SITELINK_EXTENSION, TARGET_CPA_OPT_IN, TARGET_ROAS_OPT_IN, TEXT_AD This field is a member of `oneof`_ ``_campaign``. ad_group (str): Output only. The ad group targeted by this recommendation. This will be set only when the recommendation affects a single ad group. This field will be set for the following recommendation types: KEYWORD, OPTIMIZE_AD_ROTATION, RESPONSIVE_SEARCH_AD, RESPONSIVE_SEARCH_AD_ASSET, TEXT_AD This field is a member of `oneof`_ ``_ad_group``. dismissed (bool): Output only. Whether the recommendation is dismissed or not. This field is a member of `oneof`_ ``_dismissed``. campaign_budget_recommendation (google.ads.googleads.v10.resources.types.Recommendation.CampaignBudgetRecommendation): Output only. The campaign budget recommendation. This field is a member of `oneof`_ ``recommendation``. forecasting_campaign_budget_recommendation (google.ads.googleads.v10.resources.types.Recommendation.CampaignBudgetRecommendation): Output only. The forecasting campaign budget recommendation. This field is a member of `oneof`_ ``recommendation``. keyword_recommendation (google.ads.googleads.v10.resources.types.Recommendation.KeywordRecommendation): Output only. The keyword recommendation. This field is a member of `oneof`_ ``recommendation``. text_ad_recommendation (google.ads.googleads.v10.resources.types.Recommendation.TextAdRecommendation): Output only. Add expanded text ad recommendation. This field is a member of `oneof`_ ``recommendation``. target_cpa_opt_in_recommendation (google.ads.googleads.v10.resources.types.Recommendation.TargetCpaOptInRecommendation): Output only. The TargetCPA opt-in recommendation. This field is a member of `oneof`_ ``recommendation``. maximize_conversions_opt_in_recommendation (google.ads.googleads.v10.resources.types.Recommendation.MaximizeConversionsOptInRecommendation): Output only. The MaximizeConversions Opt-In recommendation. This field is a member of `oneof`_ ``recommendation``. enhanced_cpc_opt_in_recommendation (google.ads.googleads.v10.resources.types.Recommendation.EnhancedCpcOptInRecommendation): Output only. The Enhanced Cost-Per-Click Opt-In recommendation. This field is a member of `oneof`_ ``recommendation``. search_partners_opt_in_recommendation (google.ads.googleads.v10.resources.types.Recommendation.SearchPartnersOptInRecommendation): Output only. The Search Partners Opt-In recommendation. This field is a member of `oneof`_ ``recommendation``. maximize_clicks_opt_in_recommendation (google.ads.googleads.v10.resources.types.Recommendation.MaximizeClicksOptInRecommendation): Output only. The MaximizeClicks Opt-In recommendation. This field is a member of `oneof`_ ``recommendation``. optimize_ad_rotation_recommendation (google.ads.googleads.v10.resources.types.Recommendation.OptimizeAdRotationRecommendation): Output only. The Optimize Ad Rotation recommendation. This field is a member of `oneof`_ ``recommendation``. callout_extension_recommendation (google.ads.googleads.v10.resources.types.Recommendation.CalloutExtensionRecommendation): Output only. The Callout extension recommendation. This field is a member of `oneof`_ ``recommendation``. sitelink_extension_recommendation (google.ads.googleads.v10.resources.types.Recommendation.SitelinkExtensionRecommendation): Output only. The Sitelink extension recommendation. This field is a member of `oneof`_ ``recommendation``. call_extension_recommendation (google.ads.googleads.v10.resources.types.Recommendation.CallExtensionRecommendation): Output only. The Call extension recommendation. This field is a member of `oneof`_ ``recommendation``. keyword_match_type_recommendation (google.ads.googleads.v10.resources.types.Recommendation.KeywordMatchTypeRecommendation): Output only. The keyword match type recommendation. This field is a member of `oneof`_ ``recommendation``. move_unused_budget_recommendation (google.ads.googleads.v10.resources.types.Recommendation.MoveUnusedBudgetRecommendation): Output only. The move unused budget recommendation. This field is a member of `oneof`_ ``recommendation``. target_roas_opt_in_recommendation (google.ads.googleads.v10.resources.types.Recommendation.TargetRoasOptInRecommendation): Output only. The Target ROAS opt-in recommendation. This field is a member of `oneof`_ ``recommendation``. responsive_search_ad_recommendation (google.ads.googleads.v10.resources.types.Recommendation.ResponsiveSearchAdRecommendation): Output only. The add responsive search ad recommendation. This field is a member of `oneof`_ ``recommendation``. marginal_roi_campaign_budget_recommendation (google.ads.googleads.v10.resources.types.Recommendation.CampaignBudgetRecommendation): Output only. The marginal ROI campaign budget recommendation. This field is a member of `oneof`_ ``recommendation``. use_broad_match_keyword_recommendation (google.ads.googleads.v10.resources.types.Recommendation.UseBroadMatchKeywordRecommendation): Output only. The use broad match keyword recommendation. This field is a member of `oneof`_ ``recommendation``. responsive_search_ad_asset_recommendation (google.ads.googleads.v10.resources.types.Recommendation.ResponsiveSearchAdAssetRecommendation): Output only. The add responsive search ad asset recommendation. This field is a member of `oneof`_ ``recommendation``. """ class RecommendationImpact(proto.Message): r"""The impact of making the change as described in the recommendation. Some types of recommendations may not have impact information. Attributes: base_metrics (google.ads.googleads.v10.resources.types.Recommendation.RecommendationMetrics): Output only. Base metrics at the time the recommendation was generated. potential_metrics (google.ads.googleads.v10.resources.types.Recommendation.RecommendationMetrics): Output only. Estimated metrics if the recommendation is applied. """ base_metrics = proto.Field( proto.MESSAGE, number=1, message="Recommendation.RecommendationMetrics", ) potential_metrics = proto.Field( proto.MESSAGE, number=2, message="Recommendation.RecommendationMetrics", ) class RecommendationMetrics(proto.Message): r"""Weekly account performance metrics. For some recommendation types, these are averaged over the past 90-day period and hence can be fractional. Attributes: impressions (float): Output only. Number of ad impressions. This field is a member of `oneof`_ ``_impressions``. clicks (float): Output only. Number of ad clicks. This field is a member of `oneof`_ ``_clicks``. cost_micros (int): Output only. Cost (in micros) for advertising, in the local currency for the account. This field is a member of `oneof`_ ``_cost_micros``. conversions (float): Output only. Number of conversions. This field is a member of `oneof`_ ``_conversions``. video_views (float): Output only. Number of video views for a video ad campaign. This field is a member of `oneof`_ ``_video_views``. """ impressions = proto.Field(proto.DOUBLE, number=6, optional=True,) clicks = proto.Field(proto.DOUBLE, number=7, optional=True,) cost_micros = proto.Field(proto.INT64, number=8, optional=True,) conversions = proto.Field(proto.DOUBLE, number=9, optional=True,) video_views = proto.Field(proto.DOUBLE, number=10, optional=True,) class CampaignBudgetRecommendation(proto.Message): r"""The budget recommendation for budget constrained campaigns. Attributes: current_budget_amount_micros (int): Output only. The current budget amount in micros. This field is a member of `oneof`_ ``_current_budget_amount_micros``. recommended_budget_amount_micros (int): Output only. The recommended budget amount in micros. This field is a member of `oneof`_ ``_recommended_budget_amount_micros``. budget_options (Sequence[google.ads.googleads.v10.resources.types.Recommendation.CampaignBudgetRecommendation.CampaignBudgetRecommendationOption]): Output only. The budget amounts and associated impact estimates for some values of possible budget amounts. """ class CampaignBudgetRecommendationOption(proto.Message): r"""The impact estimates for a given budget amount. Attributes: budget_amount_micros (int): Output only. The budget amount for this option. This field is a member of `oneof`_ ``_budget_amount_micros``. impact (google.ads.googleads.v10.resources.types.Recommendation.RecommendationImpact): Output only. The impact estimate if budget is changed to amount specified in this option. """ budget_amount_micros = proto.Field( proto.INT64, number=3, optional=True, ) impact = proto.Field( proto.MESSAGE, number=2, message="Recommendation.RecommendationImpact", ) current_budget_amount_micros = proto.Field( proto.INT64, number=7, optional=True, ) recommended_budget_amount_micros = proto.Field( proto.INT64, number=8, optional=True, ) budget_options = proto.RepeatedField( proto.MESSAGE, number=3, message="Recommendation.CampaignBudgetRecommendation.CampaignBudgetRecommendationOption", ) class KeywordRecommendation(proto.Message): r"""The keyword recommendation. Attributes: keyword (google.ads.googleads.v10.common.types.KeywordInfo): Output only. The recommended keyword. recommended_cpc_bid_micros (int): Output only. The recommended CPC (cost-per-click) bid. This field is a member of `oneof`_ ``_recommended_cpc_bid_micros``. """ keyword = proto.Field( proto.MESSAGE, number=1, message=criteria.KeywordInfo, ) recommended_cpc_bid_micros = proto.Field( proto.INT64, number=3, optional=True, ) class TextAdRecommendation(proto.Message): r"""The text ad recommendation. Attributes: ad (google.ads.googleads.v10.resources.types.Ad): Output only. Recommended ad. creation_date (str): Output only. Creation date of the recommended ad. YYYY-MM-DD format, e.g., 2018-04-17. This field is a member of `oneof`_ ``_creation_date``. auto_apply_date (str): Output only. Date, if present, is the earliest when the recommendation will be auto applied. YYYY-MM-DD format, e.g., 2018-04-17. This field is a member of `oneof`_ ``_auto_apply_date``. """ ad = proto.Field(proto.MESSAGE, number=1, message=gagr_ad.Ad,) creation_date = proto.Field(proto.STRING, number=4, optional=True,) auto_apply_date = proto.Field(proto.STRING, number=5, optional=True,) class TargetCpaOptInRecommendation(proto.Message): r"""The Target CPA opt-in recommendation. Attributes: options (Sequence[google.ads.googleads.v10.resources.types.Recommendation.TargetCpaOptInRecommendation.TargetCpaOptInRecommendationOption]): Output only. The available goals and corresponding options for Target CPA strategy. recommended_target_cpa_micros (int): Output only. The recommended average CPA target. See required budget amount and impact of using this recommendation in options list. This field is a member of `oneof`_ ``_recommended_target_cpa_micros``. """ class TargetCpaOptInRecommendationOption(proto.Message): r"""The Target CPA opt-in option with impact estimate. Attributes: goal (google.ads.googleads.v10.enums.types.TargetCpaOptInRecommendationGoalEnum.TargetCpaOptInRecommendationGoal): Output only. The goal achieved by this option. target_cpa_micros (int): Output only. Average CPA target. This field is a member of `oneof`_ ``_target_cpa_micros``. required_campaign_budget_amount_micros (int): Output only. The minimum campaign budget, in local currency for the account, required to achieve the target CPA. Amount is specified in micros, where one million is equivalent to one currency unit. This field is a member of `oneof`_ ``_required_campaign_budget_amount_micros``. impact (google.ads.googleads.v10.resources.types.Recommendation.RecommendationImpact): Output only. The impact estimate if this option is selected. """ goal = proto.Field( proto.ENUM, number=1, enum=target_cpa_opt_in_recommendation_goal.TargetCpaOptInRecommendationGoalEnum.TargetCpaOptInRecommendationGoal, ) target_cpa_micros = proto.Field( proto.INT64, number=5, optional=True, ) required_campaign_budget_amount_micros = proto.Field( proto.INT64, number=6, optional=True, ) impact = proto.Field( proto.MESSAGE, number=4, message="Recommendation.RecommendationImpact", ) options = proto.RepeatedField( proto.MESSAGE, number=1, message="Recommendation.TargetCpaOptInRecommendation.TargetCpaOptInRecommendationOption", ) recommended_target_cpa_micros = proto.Field( proto.INT64, number=3, optional=True, ) class MaximizeConversionsOptInRecommendation(proto.Message): r"""The Maximize Conversions Opt-In recommendation. Attributes: recommended_budget_amount_micros (int): Output only. The recommended new budget amount. This field is a member of `oneof`_ ``_recommended_budget_amount_micros``. """ recommended_budget_amount_micros = proto.Field( proto.INT64, number=2, optional=True, ) class EnhancedCpcOptInRecommendation(proto.Message): r"""The Enhanced Cost-Per-Click Opt-In recommendation. """ class SearchPartnersOptInRecommendation(proto.Message): r"""The Search Partners Opt-In recommendation. """ class MaximizeClicksOptInRecommendation(proto.Message): r"""The Maximize Clicks opt-in recommendation. Attributes: recommended_budget_amount_micros (int): Output only. The recommended new budget amount. Only set if the current budget is too high. This field is a member of `oneof`_ ``_recommended_budget_amount_micros``. """ recommended_budget_amount_micros = proto.Field( proto.INT64, number=2, optional=True, ) class OptimizeAdRotationRecommendation(proto.Message): r"""The Optimize Ad Rotation recommendation. """ class CalloutExtensionRecommendation(proto.Message): r"""The Callout extension recommendation. Attributes: recommended_extensions (Sequence[google.ads.googleads.v10.common.types.CalloutFeedItem]): Output only. Callout extensions recommended to be added. """ recommended_extensions = proto.RepeatedField( proto.MESSAGE, number=1, message=extensions.CalloutFeedItem, ) class SitelinkExtensionRecommendation(proto.Message): r"""The Sitelink extension recommendation. Attributes: recommended_extensions (Sequence[google.ads.googleads.v10.common.types.SitelinkFeedItem]): Output only. Sitelink extensions recommended to be added. """ recommended_extensions = proto.RepeatedField( proto.MESSAGE, number=1, message=extensions.SitelinkFeedItem, ) class CallExtensionRecommendation(proto.Message): r"""The Call extension recommendation. Attributes: recommended_extensions (Sequence[google.ads.googleads.v10.common.types.CallFeedItem]): Output only. Call extensions recommended to be added. """ recommended_extensions = proto.RepeatedField( proto.MESSAGE, number=1, message=extensions.CallFeedItem, ) class KeywordMatchTypeRecommendation(proto.Message): r"""The keyword match type recommendation. Attributes: keyword (google.ads.googleads.v10.common.types.KeywordInfo): Output only. The existing keyword where the match type should be more broad. recommended_match_type (google.ads.googleads.v10.enums.types.KeywordMatchTypeEnum.KeywordMatchType): Output only. The recommended new match type. """ keyword = proto.Field( proto.MESSAGE, number=1, message=criteria.KeywordInfo, ) recommended_match_type = proto.Field( proto.ENUM, number=2, enum=keyword_match_type.KeywordMatchTypeEnum.KeywordMatchType, ) class MoveUnusedBudgetRecommendation(proto.Message): r"""The move unused budget recommendation. Attributes: excess_campaign_budget (str): Output only. The excess budget's resource_name. This field is a member of `oneof`_ ``_excess_campaign_budget``. budget_recommendation (google.ads.googleads.v10.resources.types.Recommendation.CampaignBudgetRecommendation): Output only. The recommendation for the constrained budget to increase. """ excess_campaign_budget = proto.Field( proto.STRING, number=3, optional=True, ) budget_recommendation = proto.Field( proto.MESSAGE, number=2, message="Recommendation.CampaignBudgetRecommendation", ) class TargetRoasOptInRecommendation(proto.Message): r"""The Target ROAS opt-in recommendation. Attributes: recommended_target_roas (float): Output only. The recommended target ROAS (revenue per unit of spend). The value is between 0.01 and 1000.0, inclusive. This field is a member of `oneof`_ ``_recommended_target_roas``. required_campaign_budget_amount_micros (int): Output only. The minimum campaign budget, in local currency for the account, required to achieve the target ROAS. Amount is specified in micros, where one million is equivalent to one currency unit. This field is a member of `oneof`_ ``_required_campaign_budget_amount_micros``. """ recommended_target_roas = proto.Field( proto.DOUBLE, number=1, optional=True, ) required_campaign_budget_amount_micros = proto.Field( proto.INT64, number=2, optional=True, ) class ResponsiveSearchAdAssetRecommendation(proto.Message): r"""The add responsive search ad asset recommendation. Attributes: current_ad (google.ads.googleads.v10.resources.types.Ad): Output only. The current ad to be updated. recommended_assets (google.ads.googleads.v10.resources.types.Ad): Output only. The recommended assets. This is populated only with the new headlines and/or descriptions, and is otherwise empty. """ current_ad = proto.Field(proto.MESSAGE, number=1, message=gagr_ad.Ad,) recommended_assets = proto.Field( proto.MESSAGE, number=2, message=gagr_ad.Ad, ) class ResponsiveSearchAdRecommendation(proto.Message): r"""The add responsive search ad recommendation. Attributes: ad (google.ads.googleads.v10.resources.types.Ad): Output only. Recommended ad. """ ad = proto.Field(proto.MESSAGE, number=1, message=gagr_ad.Ad,) class UseBroadMatchKeywordRecommendation(proto.Message): r"""The use broad match keyword recommendation. Attributes: keyword (Sequence[google.ads.googleads.v10.common.types.KeywordInfo]): Output only. Sample of keywords to be expanded to Broad Match. suggested_keywords_count (int): Output only. Total number of keywords to be expanded to Broad Match in the campaign. campaign_keywords_count (int): Output only. Total number of keywords in the campaign. campaign_uses_shared_budget (bool): Output only. Whether the associated campaign uses a shared budget. required_campaign_budget_amount_micros (int): Output only. The budget recommended to avoid becoming budget constrained after applying the recommendation. """ keyword = proto.RepeatedField( proto.MESSAGE, number=1, message=criteria.KeywordInfo, ) suggested_keywords_count = proto.Field(proto.INT64, number=2,) campaign_keywords_count = proto.Field(proto.INT64, number=3,) campaign_uses_shared_budget = proto.Field(proto.BOOL, number=4,) required_campaign_budget_amount_micros = proto.Field( proto.INT64, number=5, ) resource_name = proto.Field(proto.STRING, number=1,) type_ = proto.Field( proto.ENUM, number=2, enum=recommendation_type.RecommendationTypeEnum.RecommendationType, ) impact = proto.Field(proto.MESSAGE, number=3, message=RecommendationImpact,) campaign_budget = proto.Field(proto.STRING, number=24, optional=True,) campaign = proto.Field(proto.STRING, number=25, optional=True,) ad_group = proto.Field(proto.STRING, number=26, optional=True,) dismissed = proto.Field(proto.BOOL, number=27, optional=True,) campaign_budget_recommendation = proto.Field( proto.MESSAGE, number=4, oneof="recommendation", message=CampaignBudgetRecommendation, ) forecasting_campaign_budget_recommendation = proto.Field( proto.MESSAGE, number=22, oneof="recommendation", message=CampaignBudgetRecommendation, ) keyword_recommendation = proto.Field( proto.MESSAGE, number=8, oneof="recommendation", message=KeywordRecommendation, ) text_ad_recommendation = proto.Field( proto.MESSAGE, number=9, oneof="recommendation", message=TextAdRecommendation, ) target_cpa_opt_in_recommendation = proto.Field( proto.MESSAGE, number=10, oneof="recommendation", message=TargetCpaOptInRecommendation, ) maximize_conversions_opt_in_recommendation = proto.Field( proto.MESSAGE, number=11, oneof="recommendation", message=MaximizeConversionsOptInRecommendation, ) enhanced_cpc_opt_in_recommendation = proto.Field( proto.MESSAGE, number=12, oneof="recommendation", message=EnhancedCpcOptInRecommendation, ) search_partners_opt_in_recommendation = proto.Field( proto.MESSAGE, number=14, oneof="recommendation", message=SearchPartnersOptInRecommendation, ) maximize_clicks_opt_in_recommendation = proto.Field( proto.MESSAGE, number=15, oneof="recommendation", message=MaximizeClicksOptInRecommendation, ) optimize_ad_rotation_recommendation = proto.Field( proto.MESSAGE, number=16, oneof="recommendation", message=OptimizeAdRotationRecommendation, ) callout_extension_recommendation = proto.Field( proto.MESSAGE, number=17, oneof="recommendation", message=CalloutExtensionRecommendation, ) sitelink_extension_recommendation = proto.Field( proto.MESSAGE, number=18, oneof="recommendation", message=SitelinkExtensionRecommendation, ) call_extension_recommendation = proto.Field( proto.MESSAGE, number=19, oneof="recommendation", message=CallExtensionRecommendation, ) keyword_match_type_recommendation = proto.Field( proto.MESSAGE, number=20, oneof="recommendation", message=KeywordMatchTypeRecommendation, ) move_unused_budget_recommendation = proto.Field( proto.MESSAGE, number=21, oneof="recommendation", message=MoveUnusedBudgetRecommendation, ) target_roas_opt_in_recommendation = proto.Field( proto.MESSAGE, number=23, oneof="recommendation", message=TargetRoasOptInRecommendation, ) responsive_search_ad_recommendation = proto.Field( proto.MESSAGE, number=28, oneof="recommendation", message=ResponsiveSearchAdRecommendation, ) marginal_roi_campaign_budget_recommendation = proto.Field( proto.MESSAGE, number=29, oneof="recommendation", message=CampaignBudgetRecommendation, ) use_broad_match_keyword_recommendation = proto.Field( proto.MESSAGE, number=30, oneof="recommendation", message=UseBroadMatchKeywordRecommendation, ) responsive_search_ad_asset_recommendation = proto.Field( proto.MESSAGE, number=31, oneof="recommendation", message=ResponsiveSearchAdAssetRecommendation, ) __all__ = tuple(sorted(__protobuf__.manifest))
googleads/google-ads-python
google/ads/googleads/v10/resources/types/recommendation.py
Python
apache-2.0
31,171
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # 文档测试 class Dict(dict): ''' Simple dict but also support access as x.y style >>> d1 = Dict() >>> d1['x'] = 100 >>> d1.x 100 >>> d1.y = 200 >>> d1['y'] 200 >>> d2 = Dict(a=1, b=2, c='3') >>> d2.c '3' >>> d2['empty'] Traceback (most recent call last): ... KeyError: 'empty' >>> d2.empty Traceback (most recent call last): ... AttributeError: 'Dict' object has no attribute 'empty' ''' def __init__(self, **kw): super().__init__(**kw) def __getattr__(self, key): try: return self[key] except KeyError: raise AttributeError(r"'Dict' object has no attribute '%s'" % key) def __setattr__(self, key, value): self[key] = value if __name__ == '__main__': import doctest doctest.testmod()
henryneu/Python
sample/mydict2.py
Python
apache-2.0
836
# -*- coding: utf-8 -*- """ Created on Wed Dec 21 20:05:40 2016 @author: lucia """ def funcion1(): print('Primera funcion de mi projecttwo') print('Modifico la primera funcion 2')
luciamc/projecttwo
funcion1.py
Python
apache-2.0
190
import asyncio from itertools import compress import traceback from proxypool.rules.rule_base import CrawlerRuleBase from proxypool.utils import page_download, page_download_phantomjs, logger, Result class ProxyCrawler(object): """Crawl proxies according to the rules.""" def __init__(self, proxies, rules=None): """Crawler init. Args: proxies: aysncio.Queue object rules: crawler rules of each proxy web, should be iterable object flag: stop flag for page downloading """ self._proxies = proxies self._stop_flag = asyncio.Event() # stop flag for crawler, not for validator self._pages = asyncio.Queue() self._rules = rules if rules else CrawlerRuleBase.__subclasses__() async def _parse_page(self): while 1: page = await self._pages.get() await self._parse_proxy(page.rule, page.content) self._pages.task_done() async def _parse_proxy(self, rule, page): ips = page.xpath(rule.ip_xpath) ports = page.xpath(rule.port_xpath) if not ips or not ports: logger.warning('{2} crawler could not get ip(len={0}) or port(len={1}), please check the xpaths or network'. format(len(ips), len(ports), rule.__rule_name__)) return proxies = map(lambda x, y: '{0}:{1}'.format(x.text.strip(), y.text.strip()), ips, ports) if rule.filters: # filter proxies filters = [] for i, ft in enumerate(rule.filters_xpath): field = page.xpath(ft) if not field: logger.warning('{1} crawler could not get {0} field, please check the filter xpath'. format(rule.filters[i], rule.__rule_name__)) continue filters.append(map(lambda x: x.text.strip(), field)) filters = zip(*filters) selector = map(lambda x: x == rule.filters, filters) proxies = compress(proxies, selector) for proxy in proxies: await self._proxies.put(proxy) # put proxies in Queue to validate @staticmethod def _url_generator(rule): """Url generator of next page. Returns: url of next page, like: 'http://www.example.com/page/2'. """ page = yield Result(rule.start_url, rule) for i in range(2, rule.page_count + 1): if rule.urls_format: yield yield Result(rule.urls_format.format(rule.start_url, i), rule) elif rule.next_page_xpath: if page is None: break next_page = page.xpath(rule.next_page_xpath) if next_page: yield page = yield Result(rule.next_page_host + str(next_page[0]).strip(), rule) else: break async def _parser(self, count): to_parse = [self._parse_page() for _ in range(count)] await asyncio.wait(to_parse) async def _downloader(self, rule): if not rule.use_phantomjs: await page_download(ProxyCrawler._url_generator(rule), self._pages, self._stop_flag) else: await page_download_phantomjs(ProxyCrawler._url_generator(rule), self._pages, rule.phantomjs_load_flag, self._stop_flag) async def _crawler(self, rule): logger.debug('{0} crawler started'.format(rule.__rule_name__)) parser = asyncio.ensure_future(self._parser(rule.page_count)) await self._downloader(rule) await self._pages.join() parser.cancel() logger.debug('{0} crawler finished'.format(rule.__rule_name__)) async def start(self): to_crawl = [self._crawler(rule) for rule in self._rules] await asyncio.wait(to_crawl) def stop(self): self._stop_flag.set() # set crawler's stop flag logger.warning('proxy crawler was stopping...') def reset(self): self._stop_flag = asyncio.Event() # once setted, create a new Event object logger.debug('proxy crawler reseted') def proxy_crawler_run(proxies, rules = None): pc = ProxyCrawler(proxies, rules) loop = asyncio.get_event_loop() try: loop.run_until_complete(pc.start()) except: logger.error(traceback.format_exc()) finally: loop.close() def proxy_crawler_test_run(proxies, count, rules = None): pc = ProxyCrawler(proxies, rules) loop = asyncio.get_event_loop() try: loop.run_until_complete(pc.start()) count.value = proxies.qsize() except: logger.error(traceback.format_exc()) finally: loop.close() if __name__ == '__main__': proxies = asyncio.Queue() proxy_crawler_run(proxies)
arrti/proxypool
proxypool/proxy_crawler.py
Python
apache-2.0
4,906
import hashlib from unittest import TestCase import uuid from falcon import request from stoplight import validate from stoplight.exceptions import ValidationFailed from deuce.transport import validation as v from deuce.transport.wsgi import errors class MockRequest(object): pass class InvalidSeparatorError(Exception): """Invalid Separator Error is raised whenever a invalid separator is set for joining query strings in a url""" def __init__(self, msg): Exception.__init__(self, msg) class TestRulesBase(TestCase): @staticmethod def build_request(params=None, separator='&'): """Build a request object to use for testing :param params: list of tuples containing the name and value pairs for parameters to add to the QUERY_STRING """ mock_env = { 'wsgi.errors': 'mock', 'wsgi.input': 'mock', 'REQUEST_METHOD': 'PUT', 'PATH_INFO': '/', 'SERVER_NAME': 'mock', 'SERVER_PORT': '8888', 'QUERY_STRING': None } if params is not None: for param in params: name = param[0] value = param[1] param_set = '{0}='.format(name) if value is not None and len(value): param_set = '{0}={1}'.format(name, value) if mock_env['QUERY_STRING'] is None: mock_env['QUERY_STRING'] = param_set else: if separator in ('&', ';'): mock_env['QUERY_STRING'] = '{1}{0}{2}'.format( separator, mock_env['QUERY_STRING'], param_set) else: raise InvalidSeparatorError('separator in query string' 'must be & or ;') if mock_env['QUERY_STRING'] is None: del mock_env['QUERY_STRING'] return request.Request(mock_env) def cases_with_none_okay(self): positive_cases = self.__class__.positive_cases[:] positive_cases.append(None) negative_cases = self.__class__.negative_cases[:] while negative_cases.count(None): negative_cases.remove(None) while negative_cases.count(''): negative_cases.remove('') return (positive_cases, negative_cases) class TestRequests(TestRulesBase): def test_request(self): positive_case = [TestRulesBase.build_request()] negative_case = [MockRequest()] for case in positive_case: v.is_request(case) for case in negative_case: with self.assertRaises(ValidationFailed): v.is_request(none_ok=True)(case) class TestVaultRules(TestRulesBase): positive_cases = [ 'a', '0', '__vault_id____', '-_-_-_-_-_-_-_-', 'snake_case_is_ok', 'So-are-hyphonated-names', 'a' * v.VAULT_ID_MAX_LEN ] negative_cases = [ '', # empty case should raise '.', '!', '@', '#', '$', '%', '^', '&', '*', '[', ']', '/', '@#$@#$@#^@%$@#@#@#$@!!!@$@$@', '\\', 'a' * (v.VAULT_ID_MAX_LEN + 1), None ] @validate(vault_id=v.VaultGetRule) def utilize_get_vault_id(self, vault_id): return True @validate(vault_id=v.VaultPutRule) def utilize_put_vault_id(self, vault_id): return True @validate(req=v.RequestRule(v.VaultMarkerRule)) def utilize_request(self, req, raiseme=False): if raiseme: raise RuntimeError('QUERY_STRING: {0}'.format(req.query_string)) else: return True def test_vault_id(self): for name in self.__class__.positive_cases: v.val_vault_id(name) for name in self.__class__.negative_cases: with self.assertRaises(ValidationFailed): v.val_vault_id()(name) def test_vault_get(self): for p_case in self.__class__.positive_cases: self.assertTrue(self.utilize_get_vault_id(p_case)) for case in self.__class__.negative_cases: with self.assertRaises(errors.HTTPNotFound): self.utilize_get_vault_id(case) def test_vault_put(self): for p_case in self.__class__.positive_cases: self.assertTrue(self.utilize_put_vault_id(p_case)) for case in self.__class__.negative_cases: with self.assertRaises(errors.HTTPBadRequestAPI): self.utilize_put_vault_id(case) def test_vault_id_marker(self): positive_cases, negative_cases = self.cases_with_none_okay() for vault_id in positive_cases: vault_id_req = TestRulesBase.build_request(params=[('marker', vault_id)]) self.assertTrue(self.utilize_request(vault_id_req)) # We currently skip the negative test for the VaultMarkerRule # due to the nature of the negative cases for the Vault Name. # Leaving the code in below should we figure out a good way to # capture the data for the URL encoding. # # Note: It is not a failure of build_request()'s QUERY_STRING building # but a miss-match between it, urllib.parse.urlencode(), and Falcon. # Use of urllib.parse.urlencode() has other issues here as well. # # for vault_id in negative_cases: # vault_id_req = TestRulesBase.build_request(params=[('marker', # vault_id)]) # with self.assertRaises(errors.HTTPNotFound): # self.utilize_request(vault_id_req, raiseme=True) class TestMetadataBlockRules(TestRulesBase): positive_cases = [ 'da39a3ee5e6b4b0d3255bfef95601890afd80709', 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 'ffffffffffffffffffffffffffffffffffffffff', 'a' * 40, ] negative_cases = [ '', '.', 'a', '0', 'f', 'F', 'z', '#', '$', '?', 'a39a3ee5e6b4b0d3255bfef95601890afd80709', # one char short 'da39a3ee5e6b4b0d3255bfef95601890afd80709a', # one char long 'DA39A3EE5E6B4B0D3255BFEF95601890AFD80709', 'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF', 'AaaAaaAaaaaAaAaaaAaaaaaaaAAAAaaaaAaaaaaa' * 2, 'AaaAaaAaaaaAaAaaaAaaaaaaaAAAAaaaaAaaaaaa' * 3, 'AaaAaaAaaaaAaAaaaAaaaaaaaAAAAaaaaAaaaaaa' * 4, None ] @validate(metadata_block_id=v.BlockGetRule) def utilize_get_metadata_block_get(self, metadata_block_id): return True @validate(metadata_block_id=v.BlockPutRule) def utilize_put_metadata_block_id(self, metadata_block_id): return True @validate(metadata_block_id=v.BlockPostRule) def utilize_post_metadata_block_id(self, metadata_block_id): return True @validate(metadata_block_id=v.BlockGetRuleNoneOk) def utilize_get_metadata_block_get_none_okay(self, metadata_block_id): return True @validate(metadata_block_id=v.BlockPutRuleNoneOk) def utilize_put_metadata_block_id_none_okay(self, metadata_block_id): return True @validate(metadata_block_id=v.BlockPostRuleNoneOk) def utilize_post_metadata_block_id_none_okay(self, metadata_block_id): return True @validate(req=v.RequestRule(v.BlockMarkerRule)) def utilize_request(self, req, raiseme=False): if raiseme: raise RuntimeError('QUERY_STRING: {0}'.format(req.query_string)) else: return True def test_block_id(self): for blockid in self.__class__.positive_cases: v.val_block_id(blockid) for blockid in self.__class__.negative_cases: with self.assertRaises(v.ValidationFailed): v.val_block_id()(blockid) def test_get_block_id(self): for blockid in self.__class__.positive_cases: self.utilize_get_metadata_block_get(blockid) for blockid in self.__class__.negative_cases: with self.assertRaises(errors.HTTPNotFound): self.utilize_get_metadata_block_get(blockid) def test_put_block_id(self): for blockid in self.__class__.positive_cases: self.utilize_put_metadata_block_id(blockid) for blockid in self.__class__.negative_cases: with self.assertRaises(errors.HTTPBadRequestAPI): self.utilize_put_metadata_block_id(blockid) def test_get_block_id_none_okay(self): positive_cases, negative_cases = self.cases_with_none_okay() for blockid in positive_cases: self.utilize_get_metadata_block_get_none_okay(blockid) for blockid in negative_cases: with self.assertRaises(errors.HTTPNotFound): self.utilize_get_metadata_block_get_none_okay(blockid) def test_put_block_id_none_okay(self): positive_cases, negative_cases = self.cases_with_none_okay() for blockid in positive_cases: self.utilize_put_metadata_block_id_none_okay(blockid) for blockid in negative_cases: with self.assertRaises(errors.HTTPBadRequestAPI): self.utilize_put_metadata_block_id_none_okay(blockid) def test_post_block_id(self): for blockid in self.__class__.positive_cases: self.utilize_post_metadata_block_id(blockid) for blockid in self.__class__.negative_cases: with self.assertRaises(errors.HTTPBadRequestAPI): self.utilize_post_metadata_block_id(blockid) def test_post_block_id_none_okay(self): positive_cases, negative_cases = self.cases_with_none_okay() for blockid in positive_cases: self.utilize_post_metadata_block_id_none_okay(blockid) for blockid in negative_cases: with self.assertRaises(errors.HTTPBadRequestAPI): self.utilize_post_metadata_block_id_none_okay(blockid) def test_block_id_marker(self): positive_cases, negative_cases = self.cases_with_none_okay() for block_id in positive_cases: block_id_req = TestRulesBase.build_request(params=[('marker', block_id)]) self.assertTrue(self.utilize_request(block_id_req)) for block_id in negative_cases: block_id_req = TestRulesBase.build_request(params=[('marker', block_id)]) with self.assertRaises(errors.HTTPNotFound): self.utilize_request(block_id_req, raiseme=True) class TestStorageBlockRules(TestRulesBase): positive_cases = [hashlib.sha1(bytes(i)).hexdigest() + '_' + str(uuid.uuid4()) for i in range(0, 1000)] negative_cases = [ '', 'fecfd28bbc9345891a66d7c1b8ff46e60192d' '2840c3de7c4-5fe9-4b2e-b19a-9cf81364997b', # note no '_' between sha1 # and uuid 'e7bf692b-ec7b-40ad-b0d1-45ce6798fb6z', # note trailing z str(uuid.uuid4()).upper(), # Force case sensitivity None ] @validate(storage_block_id=v.StorageBlockGetRule) def utilize_get_storage_block_get(self, storage_block_id): return True @validate(storage_block_id=v.StorageBlockPutRule) def utilize_put_storage_block_id(self, storage_block_id): return True @validate(storage_block_id=v.StorageBlockRuleGetNoneOk) def utilize_get_storage_block_get_none_okay(self, storage_block_id): return True @validate(storage_block_id=v.StorageBlockRulePutNoneOk) def utilize_put_storage_block_id_none_okay(self, storage_block_id): return True @validate(req=v.RequestRule(v.StorageBlockMarkerRule)) def utilize_request(self, req, raiseme=False): if raiseme: raise RuntimeError('QUERY_STRING: {0}'.format(req.query_string)) else: return True def test_storage_storage_block_id(self): for storage_id in self.__class__.positive_cases: v.val_storage_block_id(storage_id) for storage_id in self.__class__.negative_cases: with self.assertRaises(ValidationFailed): v.val_storage_block_id()(storage_id) def test_get_storage_block_id(self): for storage_id in self.__class__.positive_cases: self.utilize_get_storage_block_get(storage_id) for storage_id in self.__class__.negative_cases: with self.assertRaises(errors.HTTPNotFound): self.utilize_get_storage_block_get(storage_id) def test_put_storage_block_id(self): for storage_id in self.__class__.positive_cases: self.utilize_put_storage_block_id(storage_id) for storage_id in self.__class__.negative_cases: with self.assertRaises(errors.HTTPBadRequestAPI): self.utilize_put_storage_block_id(storage_id) def test_get_storage_block_id_none_okay(self): positive_cases, negative_cases = self.cases_with_none_okay() for storage_id in positive_cases: self.utilize_get_storage_block_get_none_okay(storage_id) for storage_id in negative_cases: with self.assertRaises(errors.HTTPNotFound): self.utilize_get_storage_block_get_none_okay(storage_id) def test_put_storage_block_id_none_okay(self): positive_cases, negative_cases = self.cases_with_none_okay() for storage_id in positive_cases: self.utilize_put_storage_block_id_none_okay(storage_id) for storage_id in negative_cases: with self.assertRaises(errors.HTTPBadRequestAPI): self.utilize_put_storage_block_id_none_okay(storage_id) def test_storage_block_id_marker(self): positive_cases, negative_cases = self.cases_with_none_okay() for storage_id in positive_cases: storage_id_req = TestRulesBase.build_request(params=[('marker', storage_id)]) self.assertTrue(self.utilize_request(storage_id_req)) for storage_id in negative_cases: storage_id_req = TestRulesBase.build_request(params=[('marker', storage_id)]) with self.assertRaises(errors.HTTPNotFound): self.utilize_request(storage_id_req, raiseme=True) class TestFileRules(TestRulesBase): # Let's try try to append some UUIds and check for faileus positive_cases = [str(uuid.uuid4()) for _ in range(0, 1000)] negative_cases = [ '', 'e7bf692b-ec7b-40ad-b0d1-45ce6798fb6z', # note trailing z str(uuid.uuid4()).upper(), # Force case sensitivity None ] @validate(file_id=v.FileGetRule) def utilize_file_id_get(self, file_id): return True @validate(file_id=v.FilePutRule) def utilize_file_id_put(self, file_id): return True @validate(file_id=v.FilePostRule) def utilize_file_id_post(self, file_id): return True @validate(file_id=v.FileGetRuleNoneOk) def utilize_file_id_get_none_okay(self, file_id): return True @validate(file_id=v.FilePutRuleNoneOk) def utilize_file_id_put_none_okay(self, file_id): return True @validate(file_id=v.FilePostRuleNoneOk) def utilize_file_id_post_none_okay(self, file_id): return True @validate(req=v.RequestRule(v.FileMarkerRule)) def utilize_request(self, req, raiseme=False): if raiseme: raise RuntimeError('QUERY_STRING: {0}'.format(req.query_string)) else: return True def test_file_id(self): for fileid in self.__class__.positive_cases: v.val_file_id(fileid) for fileid in self.__class__.negative_cases: with self.assertRaises(ValidationFailed): v.val_file_id()(fileid) def test_get_file_id(self): for file_id in self.__class__.positive_cases: self.utilize_file_id_get(file_id) for file_id in self.__class__.negative_cases: with self.assertRaises(errors.HTTPNotFound): self.utilize_file_id_get(file_id) def test_put_file_id(self): for file_id in self.__class__.positive_cases: self.utilize_file_id_put(file_id) for file_id in self.__class__.negative_cases: with self.assertRaises(errors.HTTPBadRequestAPI): self.utilize_file_id_put(file_id) def test_post_file_id(self): for file_id in self.__class__.positive_cases: self.utilize_file_id_post(file_id) for file_id in self.__class__.negative_cases: with self.assertRaises(errors.HTTPBadRequestAPI): self.utilize_file_id_post(file_id) def test_get_file_id_none_okay(self): positive_cases, negative_cases = self.cases_with_none_okay() for file_id in positive_cases: self.utilize_file_id_get_none_okay(file_id) for file_id in negative_cases: with self.assertRaises(errors.HTTPNotFound): self.utilize_file_id_get_none_okay(file_id) def test_put_file_id_none_okay(self): positive_cases, negative_cases = self.cases_with_none_okay() for file_id in positive_cases: self.utilize_file_id_put_none_okay(file_id) for file_id in negative_cases: with self.assertRaises(errors.HTTPBadRequestAPI): self.utilize_file_id_put_none_okay(file_id) def test_post_file_id_none_okay(self): positive_cases, negative_cases = self.cases_with_none_okay() for file_id in positive_cases: self.utilize_file_id_post_none_okay(file_id) for file_id in negative_cases: with self.assertRaises(errors.HTTPBadRequestAPI): self.utilize_file_id_post_none_okay(file_id) def test_file_id_marker(self): positive_cases, negative_cases = self.cases_with_none_okay() for file_id in positive_cases: file_id_req = TestRulesBase.build_request(params=[('marker', file_id)]) self.assertTrue(self.utilize_request(file_id_req)) for file_id in negative_cases: file_id_req = TestRulesBase.build_request(params=[('marker', file_id)]) with self.assertRaises(errors.HTTPNotFound): self.utilize_request(file_id_req, raiseme=True) class TestOffsetRules(TestRulesBase): positive_cases = [ '0', '1', '2', '3', '55', '100', '101010', '99999999999999999999999999999' ] negative_cases = [ '-1', '-23', 'O', 'zero', 'one', '-999', '1.0', '1.3', '0.0000000000001', None ] @validate(req=v.RequestRule(v.OffsetMarkerRule)) def utilize_request(self, req, raiseme=False): if raiseme: raise RuntimeError('QUERY_STRING: {0}'.format(req.query_string)) else: return True def test_offset(self): for offset in self.__class__.positive_cases: v.val_offset()(offset) for offset in self.__class__.negative_cases: with self.assertRaises(ValidationFailed): v.val_offset()(offset) def test_offset_marker(self): positive_cases, negative_cases = self.cases_with_none_okay() for offset in positive_cases: offset_req = TestRulesBase.build_request(params=[('marker', offset)]) self.assertTrue(self.utilize_request(offset_req)) for offset in negative_cases: offset_req = TestRulesBase.build_request(params=[('marker', offset)]) with self.assertRaises(errors.HTTPNotFound): self.utilize_request(offset_req, raiseme=True) class TestLimitRules(TestRulesBase): positive_cases = [ '0', '100', '100000000', '100' ] negative_cases = [ '-1', 'blah', None ] @validate(req=v.RequestRule(v.LimitRule)) def utilize_request(self, req, raiseme=False): if raiseme: raise RuntimeError('QUERY_STRING: {0}'.format(req.query_string)) else: return True def test_limit(self): for limit in self.__class__.positive_cases: v.val_limit()(limit) for limit in self.__class__.negative_cases: with self.assertRaises(ValidationFailed): v.val_limit()(limit) v.val_limit(empty_ok=True)('') v.val_limit(none_ok=True)(None) with self.assertRaises(ValidationFailed): v.val_limit()('') with self.assertRaises(ValidationFailed): v.val_limit()(None) def test_limit_marker(self): positive_cases, negative_cases = self.cases_with_none_okay() for limit in positive_cases: limit_req = TestRulesBase.build_request(params=[('limit', limit)]) self.assertTrue(self.utilize_request(limit_req)) for limit in negative_cases: limit_req = TestRulesBase.build_request(params=[('limit', limit)]) with self.assertRaises(errors.HTTPNotFound): self.utilize_request(limit_req, raiseme=True)
rackerlabs/deuce
deuce/tests/test_validation.py
Python
apache-2.0
21,839
from selenium_test_case import SeleniumTestCase class DocsTest(SeleniumTestCase): def test_links_between_pages(self): self.open_path('/help') self.assert_text_present('Frequently Asked Questions') self.click_and_wait('link=Terms of Service') self.assert_text_present('Terms of Service for Google Resource Finder') self.click_and_wait('link=Privacy') self.assert_text_present('Google Resource Finder Privacy Policy') self.click_and_wait('link=Help') self.assert_text_present('Frequently Asked Questions') def test_languages(self): # English (en) self.open_path('/help?lang=en') self.assert_text_present('Frequently Asked Questions') self.click_and_wait('link=Terms of Service') self.assert_text_present('Terms of Service for Google Resource Finder') self.click_and_wait('link=Privacy') self.assert_text_present('Google Resource Finder Privacy Policy') self.click_and_wait('link=Help') self.assert_text_present('Frequently Asked Questions') # Spanish (es-419) self.open_path('/help?lang=es') self.assert_text_present('Preguntas frecuentes') self.click_and_wait('link=Condiciones del servicio') self.assert_text_present( 'Condiciones del servicio del Buscador de recursos de Google') self.click_and_wait(u'link=Privacidad') self.assert_text_present( u'Pol\u00edtica de privacidad del Buscador de recursos de Google') self.click_and_wait(u'link=Ayuda') self.assert_text_present('Preguntas frecuentes') # French (fr) self.open_path('/help?lang=fr') self.assert_text_present(u'Questions fr\u00e9quentes') self.click_and_wait('link=Conditions d\'utilisation') self.assert_text_present( u'Conditions d\'utilisation de Google Resource Finder') self.click_and_wait(u'link=Confidentialit\u00e9') self.assert_text_present( u'R\u00e8gles de confidentialit\u00e9 de Google Resource Finder') self.click_and_wait(u'link=Aide') self.assert_text_present(u'Questions fr\u00e9quentes') # Kreyol (ht) self.open_path('/help?lang=ht') self.assert_text_present(u'Kesyon Div\u00e8s Moun Poze Tout Tan') self.click_and_wait(u'link=Kondisyon S\u00e8vis yo') self.assert_text_present( u'Kondisyon S\u00e8vis pou Resource Finder Google') self.click_and_wait(u'link=Vi prive') self.assert_text_present(u'Politik Resp\u00e8 Pou Moun ak ' + u'\u201cResource Finder\u201d nan Google') self.click_and_wait(u'link=Ed') self.assert_text_present(u'Kesyon Div\u00e8s Moun Poze Tout Tan')
Princessgladys/googleresourcefinder
tests/docs_test.py
Python
apache-2.0
2,824
#!/usr/bin/env python # coding=utf-8 __author__ = 'pyphrb' from .import index from flask import render_template from model import NameFrom @index.route('/', methods=['GET', 'POST']) def index(): name = None form = NameFrom() if form.validate_on_submit(): name = form.name.data form.name.data = '' return render_template('index/index.html', form=form, name=name)
pyphrb/myweb
app/index/view.py
Python
apache-2.0
394
config = { "interfaces": { "google.spanner.admin.database.v1.DatabaseAdmin": { "retry_codes": { "idempotent": ["DEADLINE_EXCEEDED", "UNAVAILABLE"], "non_idempotent": [], }, "retry_params": { "default": { "initial_retry_delay_millis": 1000, "retry_delay_multiplier": 1.3, "max_retry_delay_millis": 32000, "initial_rpc_timeout_millis": 60000, "rpc_timeout_multiplier": 1.0, "max_rpc_timeout_millis": 60000, "total_timeout_millis": 600000, } }, "methods": { "ListDatabases": { "timeout_millis": 30000, "retry_codes_name": "idempotent", "retry_params_name": "default", }, "CreateDatabase": { "timeout_millis": 3600000, "retry_codes_name": "non_idempotent", "retry_params_name": "default", }, "GetDatabase": { "timeout_millis": 30000, "retry_codes_name": "idempotent", "retry_params_name": "default", }, "UpdateDatabaseDdl": { "timeout_millis": 3600000, "retry_codes_name": "idempotent", "retry_params_name": "default", }, "DropDatabase": { "timeout_millis": 3600000, "retry_codes_name": "idempotent", "retry_params_name": "default", }, "GetDatabaseDdl": { "timeout_millis": 30000, "retry_codes_name": "idempotent", "retry_params_name": "default", }, "SetIamPolicy": { "timeout_millis": 30000, "retry_codes_name": "non_idempotent", "retry_params_name": "default", }, "GetIamPolicy": { "timeout_millis": 30000, "retry_codes_name": "idempotent", "retry_params_name": "default", }, "TestIamPermissions": { "timeout_millis": 30000, "retry_codes_name": "non_idempotent", "retry_params_name": "default", }, }, } } }
tseaver/google-cloud-python
spanner/google/cloud/spanner_admin_database_v1/gapic/database_admin_client_config.py
Python
apache-2.0
2,633
from django.conf import settings from django.db import models from django.core.cache import cache from django.dispatch import receiver from seahub.base.fields import LowerCaseCharField from seahub.profile.settings import EMAIL_ID_CACHE_PREFIX, EMAIL_ID_CACHE_TIMEOUT from registration.signals import user_registered class ProfileManager(models.Manager): def add_or_update(self, username, nickname, intro, lang_code=None): """Add or update user profile. """ try: profile = self.get(user=username) profile.nickname = nickname profile.intro = intro profile.lang_code = lang_code except Profile.DoesNotExist: profile = self.model(user=username, nickname=nickname, intro=intro, lang_code=lang_code) profile.save(using=self._db) return profile def get_profile_by_user(self, username): """Get a user's profile. """ try: return super(ProfileManager, self).get(user=username) except Profile.DoesNotExist: return None def get_user_language(self, username): """Get user's language from profile. Return default language code if user has no preferred language. Arguments: - `self`: - `username`: """ try: profile = self.get(user=username) if profile.lang_code is not None: return profile.lang_code else: return settings.LANGUAGE_CODE except Profile.DoesNotExist: return settings.LANGUAGE_CODE def delete_profile_by_user(self, username): self.filter(user=username).delete() class Profile(models.Model): user = models.EmailField(unique=True) nickname = models.CharField(max_length=64, blank=True) intro = models.TextField(max_length=256, blank=True) lang_code = models.TextField(max_length=50, null=True, blank=True) objects = ProfileManager() def set_lang_code(self, lang_code): self.lang_code = lang_code self.save() class DetailedProfileManager(models.Manager): def add_detailed_profile(self, username, department, telephone): d_profile = self.model(user=username, department=department, telephone=telephone) d_profile.save(using=self._db) return d_profile def add_or_update(self, username, department, telephone): try: d_profile = self.get(user=username) d_profile.department = department d_profile.telephone = telephone except DetailedProfile.DoesNotExist: d_profile = self.model(user=username, department=department, telephone=telephone) d_profile.save(using=self._db) return d_profile def get_detailed_profile_by_user(self, username): """Get a user's profile. """ try: return super(DetailedProfileManager, self).get(user=username) except DetailedProfile.DoesNotExist: return None class DetailedProfile(models.Model): user = LowerCaseCharField(max_length=255, db_index=True) department = models.CharField(max_length=512) telephone = models.CharField(max_length=100) objects = DetailedProfileManager() ########## signal handler @receiver(user_registered) def clean_email_id_cache(sender, **kwargs): from seahub.utils import normalize_cache_key user = kwargs['user'] key = normalize_cache_key(user.email, EMAIL_ID_CACHE_PREFIX) cache.set(key, user.id, EMAIL_ID_CACHE_TIMEOUT)
skmezanul/seahub
seahub/profile/models.py
Python
apache-2.0
3,686
# Copyright 2014 Cisco Systems, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from networking_cisco.plugins.cisco.cfg_agent.service_helpers import ( service_helper) from neutron.common import rpc as n_rpc from neutron import context as n_context from neutron.i18n import _LE from neutron.plugins.common import constants from oslo_log import helpers as log_helpers from oslo_log import log as logging import oslo_messaging from neutron_fwaas.services.firewall.drivers.cisco import csr_acl_driver LOG = logging.getLogger(__name__) CSR_FW_EVENT_Q_NAME = 'csr_fw_event_q' CSR_FW_EVENT_CREATE = 'FW_EVENT_CREATE' CSR_FW_EVENT_UPDATE = 'FW_EVENT_UPDATE' CSR_FW_EVENT_DELETE = 'FW_EVENT_DELETE' class CsrFirewalllPluginApi(object): """CsrFirewallServiceHelper (Agent) side of the ACL RPC API.""" @log_helpers.log_method_call def __init__(self, topic, host): self.host = host target = oslo_messaging.Target(topic=topic, version='1.0') self.client = n_rpc.get_client(target) @log_helpers.log_method_call def get_firewalls_for_device(self, context, **kwargs): """Get Firewalls with rules for a device from Plugin.""" cctxt = self.client.prepare() return cctxt.call(context, 'get_firewalls_for_device', host=self.host) @log_helpers.log_method_call def get_firewalls_for_tenant(self, context, **kwargs): """Get Firewalls with rules for a tenant from the Plugin.""" cctxt = self.client.prepare() return cctxt.call(context, 'get_firewalls_for_tenant', host=self.host) @log_helpers.log_method_call def get_tenants_with_firewalls(self, context, **kwargs): """Get Tenants that have Firewalls configured from plugin.""" cctxt = self.client.prepare() return cctxt.call(context, 'get_tenants_with_firewalls', host=self.host) @log_helpers.log_method_call def set_firewall_status(self, context, fw_id, status, status_data=None): """Make a RPC to set the status of a firewall.""" cctxt = self.client.prepare() return cctxt.call(context, 'set_firewall_status', host=self.host, firewall_id=fw_id, status=status, status_data=status_data) def firewall_deleted(self, context, firewall_id): """Make a RPC to indicate that the firewall resources are deleted.""" cctxt = self.client.prepare() return cctxt.call(context, 'firewall_deleted', host=self.host, firewall_id=firewall_id) class CsrFirewallServiceHelper(object): @log_helpers.log_method_call def __init__(self, host, conf, cfg_agent): super(CsrFirewallServiceHelper, self).__init__() self.conf = conf self.cfg_agent = cfg_agent self.fullsync = True self.event_q = service_helper.QueueMixin() self.fw_plugin_rpc = CsrFirewalllPluginApi( 'CISCO_FW_PLUGIN', conf.host) self.topic = 'CISCO_FW' self._setup_rpc() self.acl_driver = csr_acl_driver.CsrAclDriver() def _setup_rpc(self): self.conn = n_rpc.create_connection(new=True) self.endpoints = [self] self.conn.create_consumer(self.topic, self.endpoints, fanout=True) self.conn.consume_in_threads() ### Notifications from Plugin #### def create_firewall(self, context, firewall, host): """Handle Rpc from plugin to create a firewall.""" LOG.debug("create_firewall: firewall %s", firewall) event_data = {'event': CSR_FW_EVENT_CREATE, 'context': context, 'firewall': firewall, 'host': host} self.event_q.enqueue(CSR_FW_EVENT_Q_NAME, event_data) def update_firewall(self, context, firewall, host): """Handle Rpc from plugin to update a firewall.""" LOG.debug("update_firewall: firewall %s", firewall) event_data = {'event': CSR_FW_EVENT_UPDATE, 'context': context, 'firewall': firewall, 'host': host} self.event_q.enqueue(CSR_FW_EVENT_Q_NAME, event_data) def delete_firewall(self, context, firewall, host): """Handle Rpc from plugin to delete a firewall.""" LOG.debug("delete_firewall: firewall %s", firewall) event_data = {'event': CSR_FW_EVENT_DELETE, 'context': context, 'firewall': firewall, 'host': host} self.event_q.enqueue(CSR_FW_EVENT_Q_NAME, event_data) def _invoke_firewall_driver(self, context, firewall, func_name): LOG.debug("_invoke_firewall_driver: %s", func_name) try: if func_name == 'delete_firewall': return_code = self.acl_driver.__getattribute__(func_name)( None, None, firewall) if not return_code: LOG.debug("firewall %s", firewall['id']) self.fw_plugin_rpc.set_firewall_status( context, firewall['id'], constants.ERROR) else: self.fw_plugin_rpc.firewall_deleted( context, firewall['id']) else: return_code, status = self.acl_driver.__getattribute__( func_name)(None, None, firewall) if not return_code: LOG.debug("firewall %s", firewall['id']) self.fw_plugin_rpc.set_firewall_status( context, firewall['id'], constants.ERROR) else: LOG.debug("status %s", status) self.fw_plugin_rpc.set_firewall_status( context, firewall['id'], constants.ACTIVE, status) except Exception: LOG.debug("_invoke_firewall_driver: PRC failure") self.fullsync = True def _process_firewall_pending_op(self, context, firewall_list): for firewall in firewall_list: firewall_status = firewall['status'] if firewall_status == 'PENDING_CREATE': self._invoke_firewall_driver( context, firewall, 'create_firewall') elif firewall_status == 'PENDING_UPDATE': self._invoke_firewall_driver( context, firewall, 'update_firewall') elif firewall_status == 'PENDING_DELETE': self._invoke_firewall_driver( context, firewall, 'delete_firewall') def _process_fullsync(self): LOG.debug("_process_fullsync") try: context = n_context.get_admin_context() tenants = self.fw_plugin_rpc.get_tenants_with_firewalls( context) LOG.debug("tenants with firewall: %s", tenants) for tenant_id in tenants: ctx = n_context.Context('', tenant_id) firewall_list = self.fw_plugin_rpc.get_firewalls_for_tenant( ctx) self._process_firewall_pending_op(ctx, firewall_list) except Exception: LOG.debug("_process_fullsync: RPC failure") self.fullsync = True def _process_devices(self, device_ids): LOG.debug("_process_devices: device_ids %s", device_ids) try: for device_id in device_ids: ctx = n_context.Context('', device_id) firewall_list = self.fw_plugin_rpc.get_firewalls_for_device( ctx) self._process_firewall_pending_op(ctx, firewall_list) except Exception: LOG.debug("_process_devices: RPC failure") self.fullsync = True def _process_event_q(self): while True: try: event_data = self.event_q.dequeue(CSR_FW_EVENT_Q_NAME) if not event_data: return except ValueError: LOG.debug("_process_event_q: no queue yet") return LOG.debug("_process_event_q: event_data %s", event_data) event = event_data['event'] context = event_data['context'] firewall = event_data['firewall'] if event == CSR_FW_EVENT_CREATE: self._invoke_firewall_driver( context, firewall, 'create_firewall') elif event == CSR_FW_EVENT_UPDATE: self._invoke_firewall_driver( context, firewall, 'update_firewall') elif event == CSR_FW_EVENT_DELETE: self._invoke_firewall_driver( context, firewall, 'delete_firewall') else: LOG.error(_LE("invalid event %s"), event) def process_service(self, device_ids=None, removed_devices_info=None): try: if self.fullsync: self.fullsync = False self._process_fullsync() else: if device_ids: self._process_devices(device_ids) if removed_devices_info: LOG.debug("process_service: removed_devices_info %s", removed_devices_info) # do nothing for now else: self._process_event_q() except Exception: LOG.exception(_LE('process_service exception ERROR'))
gaolichuang/neutron-fwaas
neutron_fwaas/services/firewall/drivers/cisco/csr_firewall_svc_helper.py
Python
apache-2.0
10,054
from collections import deque class onetimequeue(object): def __init__ (self): self._q = deque() self._seen = set() def push(self, obj): if obj in self._seen: return self._seen.add(obj) self._q.append(obj) def pop(self): return self._q.popleft() def __len__(self): return len(self._q)
bniemczyk/symbolic
symath/datastructures/onetimequeue.py
Python
bsd-2-clause
333
from flask_wtf import Form from wtforms import StringField, PasswordField, SubmitField from wtforms.validators import DataRequired class LoginForm(Form): username = StringField('ID or Email Address', validators=[DataRequired()]) password = PasswordField('Password', validators=[DataRequired()]) submit = SubmitField('Log In', validators=[DataRequired()])
ACM-CSUSB/Website
service/forms/login.py
Python
bsd-2-clause
370
from datetime import datetime from unittest import skipIf import copy import uuid from cassandra.cqlengine import ValidationError as CQLValidationError from django.core import validators from django.forms import fields from common.models import CassandraFamilyMember from django_cassandra_engine.test import TestCase as CassandraTestCase class TestDjangoCassandraModel(CassandraTestCase): def setUp(self): self.some_uuid = uuid.uuid4() self.family_member = CassandraFamilyMember.objects.create( id=self.some_uuid, first_name="Homer", last_name="Simpson", is_real=False, favourite_number=666, favourite_float_number=43.4, created_on=datetime.now(), ) def test_model_is_hashable(self): models = set() models.add(self.family_member) self.assertEqual(1, len(models)) def test_serializable_value(self): self.assertEqual(self.some_uuid, self.family_member.serializable_value("id")) self.assertEqual( self.family_member.first_name, self.family_member.serializable_value("first_name"), ) def test_clone_queryset(self): qset = CassandraFamilyMember.objects.filter(id=self.some_uuid) self.assertNotEqual(id(qset._clone()), id(qset)) def test_create(self): family_member = self.family_member self.assertEqual(family_member.first_name, "Homer") self.assertEqual(family_member.last_name, "Simpson") self.assertEqual(family_member.is_real, False) self.assertEqual(family_member.favourite_number, 666) self.assertEqual(family_member.favourite_float_number, 43.4) def test_get_by_pk(self): got_family_member = CassandraFamilyMember.objects.allow_filtering().get( pk=self.family_member.id ) self.assertIsNotNone(got_family_member) def test_exclude(self): results = CassandraFamilyMember.objects.exclude(id=self.some_uuid) for model in results: self.assertNotEqual(model.id, self.some_uuid) def test_exclude_after_filter(self): results = CassandraFamilyMember.objects.filter(id=self.some_uuid).exclude( last_name="Simpson" ) self.assertEqual(len(results), 0) def test_exclude_after_all(self): keeper = CassandraFamilyMember.objects.create( id=uuid.uuid4(), first_name="Ned", last_name="Flanders", is_real=False, favourite_number=666, favourite_float_number=43.4, created_on=datetime.now(), ) results = CassandraFamilyMember.objects.all().exclude(last_name="Simpson") self.assertEqual(len(results), 1) self.assertEqual(results[0].id, keeper.id) def test_get_by_pk_returns_primary_key_instead_of_partition_key(self): got_family_member = CassandraFamilyMember.objects.allow_filtering().get( pk=self.family_member.id ) self.assertEqual(got_family_member.pk, self.family_member.id) def test_default_manager_is_set(self): self.assertTrue( isinstance( CassandraFamilyMember._default_manager, type(CassandraFamilyMember.objects), ) ) self.assertTrue( isinstance( CassandraFamilyMember._base_manager, type(CassandraFamilyMember.objects), ) ) self.assertTrue(hasattr(CassandraFamilyMember._default_manager, "all")) self.assertTrue(hasattr(CassandraFamilyMember._default_manager, "filter")) def test_get_queryset(self): results = CassandraFamilyMember.objects.get_queryset() self.assertTrue(results[0].id, self.some_uuid) def test_calling_queryset_methods_not_through_manager_raises(self): with self.assertRaises(AttributeError): CassandraFamilyMember.all() with self.assertRaises(AttributeError): CassandraFamilyMember.get() with self.assertRaises(AttributeError): CassandraFamilyMember.filter() def test_manager_has_a_name(self): self.assertEqual(CassandraFamilyMember._default_manager.name, "objects") def test_can_migrate(self): self.assertFalse(CassandraFamilyMember._meta.can_migrate(connection=None)) def test_get_all_related_objects_with_model(self): self.assertEqual( CassandraFamilyMember._meta.get_all_related_objects_with_model(), [], ) def test_related_objects_property(self): self.assertEqual(CassandraFamilyMember._meta.related_objects, []) def test_db_table(self): self.assertEqual( CassandraFamilyMember._meta.db_table, "common_cassandrafamilymember", ) def test_pk_attribute(self): self.assertEqual( CassandraFamilyMember._meta.pk, CassandraFamilyMember._meta.get_field("id"), ) def test_get_fields(self): expected_field_names = [ "id", "first_name", "last_name", "is_real", "favourite_number", "favourite_float_number", "created_on", ] fields = CassandraFamilyMember._meta._get_fields() self.assertEqual(len(fields), len(expected_field_names)) self.assertEqual([f.name for f in fields], expected_field_names) def test_meta_attrs(self): self.assertEqual( CassandraFamilyMember._meta.model_name, "cassandrafamilymember" ) self.assertEqual(CassandraFamilyMember._meta.swappable, False) self.assertEqual(CassandraFamilyMember._meta.managed, False) def test_values_list_with_id_pk_field_returns_it(self): all_things = CassandraFamilyMember.objects.allow_filtering().filter( id=self.some_uuid ) self.assertEqual( list(all_things.values_list("id", flat=True)), [self.some_uuid] ) def test_values_list_with_pk_returns_the_primary_key_field_uuid(self): all_things = CassandraFamilyMember.objects.allow_filtering().filter( id=self.some_uuid ) model = all_things[0] self.assertEqual( list(all_things.values_list("pk")), [ [ model.id, model.first_name, model.last_name, model.favourite_float_number, ] ], ) def test_values_list_with_pk_can_return_multiple_pks(self): some_uuid = uuid.uuid4() family_member = CassandraFamilyMember.objects.create( id=some_uuid, first_name="Homer", last_name="Simpson", is_real=False, favourite_number=666, favourite_float_number=43.4, created_on=datetime.now(), ) all_things = CassandraFamilyMember.objects.allow_filtering().filter( id=some_uuid ) expected = [ [ family_member.id, family_member.first_name, family_member.last_name, family_member.favourite_float_number, ] ] self.assertEqual(len(all_things.values_list("pk")), len(expected)) def test_private_fields_are_set(self): private_fields = [f.name for f in CassandraFamilyMember._meta.private_fields] expected_private_fields = [ "id", "first_name", "last_name", "is_real", "favourite_number", "favourite_float_number", "created_on", ] self.assertEqual(private_fields, expected_private_fields) def test_model_doesnotexist_is_raised_when_record_not_found(self): with self.assertRaises(CassandraFamilyMember.DoesNotExist): not_found_uuid = uuid.uuid4() CassandraFamilyMember.objects.allow_filtering().get(id=not_found_uuid) class TestDjangoCassandraField(CassandraTestCase): def setUp(self): self.some_uuid = uuid.uuid4() self.family_member = CassandraFamilyMember.objects.create( id=self.some_uuid, first_name="Homer", last_name="Simpson", is_real=False, favourite_number=666, favourite_float_number=43.4, created_on=datetime.now(), ) def test_attributes(self): model_fields = self.family_member._meta._get_fields() for field in model_fields: allow_null = ( not field.required and not field.is_primary_key and not field.partition_key ) or field.has_default self.assertEqual(field.unique_for_date, None) self.assertEqual(field.unique_for_month, None) self.assertEqual(field.unique_for_year, None) self.assertEqual(field.db_column, None) self.assertEqual(field.db_index, field.index) self.assertEqual(field.null, allow_null) self.assertEqual(field.blank, allow_null) self.assertEqual(field.choices, []) self.assertEqual(field.flatchoices, []) self.assertEqual(field.help_text, "") self.assertEqual(field.concrete, True) self.assertEqual(field.editable, True) self.assertEqual(field.many_to_many, False) self.assertEqual(field.many_to_one, False) self.assertEqual(field.one_to_many, False) self.assertEqual(field.one_to_one, False) self.assertEqual(field.hidden, False) self.assertEqual(field.serialize, True) self.assertEqual(field.name, field.db_field_name) self.assertEqual(field.verbose_name, field.db_field_name) self.assertEqual(field._verbose_name, field.db_field_name) self.assertEqual(field.field, field) self.assertEqual(field.model, type(self.family_member)) self.assertEqual(field.related_query_name(), None) self.assertEqual(field.auto_created, False) self.assertEqual(field.is_relation, False) self.assertEqual(field.remote_field, None) self.assertEqual(field.rel, None) self.assertEqual(field.rel, None) self.assertEqual(field.unique, field.is_primary_key) self.assertEqual(field.attname, field.column_name) self.assertEqual(field.validators, []) self.assertEqual(field.empty_values, list(validators.EMPTY_VALUES)) def test_methods(self): model_fields = self.family_member._meta._get_fields() for field in model_fields: self.assertEqual(field.get_attname(), field.attname) self.assertEqual(field.get_cache_name(), "_{}_cache".format(field.name)) self.assertEqual( field.value_to_string(self.family_member), str(getattr(self.family_member, field.name)), ) self.assertEqual( field.pre_save(self.family_member, True), getattr(self.family_member, field.name), ) self.assertEqual( field.get_prep_value(self.family_member.id), self.some_uuid ) self.assertEqual( field.get_db_prep_save(self.family_member.id, connection=None), self.some_uuid, ) self.assertTrue(isinstance(field.formfield(), fields.CharField)) self.assertEqual(field.get_internal_type(), field.__class__.__name__) self.assertEqual( field.get_attname_column(), (field.db_field_name, field.db_field_name), ) self.assertEqual(field.get_db_converters(), []) field_with_default = self.family_member._meta.get_field("id") self.assertTrue( isinstance(field_with_default.get_default(), type(self.family_member.id)) ) # in Django, 'has_default' is a function, while in python-driver # it is a property unfortunately. self.assertEqual(field_with_default.has_default, True) text_field = self.family_member._meta.get_field("last_name") text_field.save_form_data(instance=self.family_member, data="new data") self.assertEqual(self.family_member.last_name, "new data") self.assertIsNone(field.run_validators(text_field.value)) def test_methods_which_are_not_implemented_raise(self): model_fields = self.family_member._meta._get_fields() methods_that_should_raise = ( "get_choices", "get_choices_default", "select_format", "deconstruct", "db_type_suffix", "get_prep_lookup", "get_db_prep_lookup", "set_attributes_from_name", "db_parameters", "get_col", ) for field in model_fields: for method_name in methods_that_should_raise: with self.assertRaises(NotImplementedError): getattr(field, method_name)() def test_get_pk_value_on_save_returns_true_if_field_has_default(self): field_with_default = self.family_member._meta.get_field("id") self.assertTrue( field_with_default.get_pk_value_on_save(instance=self.family_member), self.family_member.id, ) def test_get_pk_value_on_save_returns_none_if_field_no_default(self): field_without_default = self.family_member._meta.get_field("last_name") self.assertIsNone( field_without_default.get_pk_value_on_save(instance=self.family_member), ) def test_formfield_uses_specified_form_class(self): text_field = self.family_member._meta.get_field("last_name") form_field = text_field.formfield(form_class=fields.BooleanField) self.assertTrue(isinstance(form_field, fields.BooleanField)) def test_field_check_returns_error_when_name_is_pk(self): text_field = copy.deepcopy(self.family_member._meta.get_field("last_name")) text_field.name = "pk" check_errors = text_field.check() self.assertEqual(len(check_errors), 1) def test_field_check_returns_error_when_name_ends_underscore(self): text_field = copy.deepcopy(self.family_member._meta.get_field("last_name")) text_field.name = "name_" check_errors = text_field.check() self.assertEqual(len(check_errors), 1) def test_field_check_returns_error_when_name_contains_double_under(self): text_field = copy.deepcopy(self.family_member._meta.get_field("last_name")) text_field.name = "some__name" check_errors = text_field.check() self.assertEqual(len(check_errors), 1) def test_field_clean(self): text_field = copy.deepcopy(self.family_member._meta.get_field("last_name")) self.assertEqual(text_field.clean("some val", self.family_member), "some val") def test_field_client_raises_when_value_is_not_valid(self): text_field = copy.deepcopy(self.family_member._meta.get_field("last_name")) with self.assertRaises(CQLValidationError): text_field.clean(123, self.family_member) def test_get_filter_kwargs_for_object(self): text_field = self.family_member._meta.get_field("last_name") self.assertEqual( text_field.get_filter_kwargs_for_object(obj=self.family_member), {"last_name": self.family_member.last_name}, ) id_field = self.family_member._meta.get_field("id") self.assertEqual( id_field.get_filter_kwargs_for_object(obj=self.family_member), {"id": self.family_member.id}, )
r4fek/django-cassandra-engine
testproject/common/tests/test_django_cassandra_models.py
Python
bsd-2-clause
15,971
import datetime from pystogram.tree import PrefixTree SECOND = 1 MINUTE = SECOND * 60 HOUR = MINUTE * 60 DAY = HOUR * 24 MONTH = DAY * 30 YEAR = DAY * 365 # The multiplier applied when testing timestamp interval to guess a resolution. # A value of 2.0 means the timestamp interval must be greater than 24 months in # order to use a resolution of years RESOLUTION_SCALE = 2.0 # FIXME: Where to put this? def prefix(timestamp, resolution): """ Compute and return a key prefix for this timestamp. """ # FIXME: Improve? length = 1 if resolution < YEAR: length += 1 if resolution < MONTH: length += 1 if resolution < DAY: length += 1 if resolution < HOUR: length += 1 if resolution < MINUTE: length += 1 return timestamp.timetuple()[:length] # FIXME: Missing domain concepts: timestamp (essentially a datetime), key (essentially a time.struct_time tuple) class Histogram(object): """ An informal histogram useful for counting time-series data, dividing samples into equally-sized intervals (buckets), and computing aggregate counts of the samples within each bucket. """ def __init__(self): """ Construct a Histogram instance. """ self.tree = PrefixTree() def count(self, timestamp): """ Increment the count for this timestamp. """ self.tree.incr(timestamp) @property def first_sample(self): # FIXME: Subclass PrefixTree into DateTimePrefixTree so we don't have to do this conversion here? return datetime.datetime(*self.tree.least()) @property def last_sample(self): # FIXME: Subclass PrefixTree into DateTimePrefixTree so we don't have to do this conversion here? return datetime.datetime(*self.tree.greatest()) @property def sample_interval(self): return (self.last_sample - self.first_sample).total_seconds() @property def sample_resolution(self): """ Compute a reasonable bucket resolution based on the sample interval. """ # FIXME: Improve? interval = self.sample_interval if interval > YEAR * RESOLUTION_SCALE: return YEAR elif interval > MONTH * RESOLUTION_SCALE: return MONTH elif interval > DAY * RESOLUTION_SCALE: return DAY elif interval > HOUR * RESOLUTION_SCALE: return HOUR elif interval > MINUTE * RESOLUTION_SCALE: return MINUTE else: return SECOND def buckets(self, resolution=None): """ Generate and yield buckets sized according to the passed or guessed resolution. """ # Cache these properties locally first_sample = self.first_sample last_sample = self.last_sample # Compute the bucket resolution and interval (width) if resolution is None: resolution = self.sample_resolution bucket_interval = datetime.timedelta(seconds=resolution) timestamp = first_sample while timestamp <= last_sample: node = self.tree.insert(prefix(timestamp, resolution)) bucket = Bucket(timestamp, node) yield bucket timestamp += bucket_interval class Bucket(object): """ Histogram bucket for a given time interval. """ def __init__(self, start, node): self.start = start self.node = node self.count = node.sum()
claymation/pystogram
pystogram/histogram.py
Python
bsd-2-clause
3,484
import logging import traceback from functools import wraps import os import re from django.conf import settings from django.db import connection from django.db.models import ManyToManyField logger = logging.getLogger(__name__) def debug_pg_notices(f): @wraps(f) def wrapped(*args, **kwargs): r = None if connection.connection: del connection.connection.notices[:] try: r = f(*args, **kwargs) finally: # Show triggers output allnotices = [] current = '' if connection.connection: notices = [] for notice in connection.connection.notices: try: notice, context = notice.split('CONTEXT:', 1) context = re.sub(r"\s+", " ", context) except ValueError: context = '' notices.append((context, notice)) if context != current: allnotices.append(notices) notices = [] current = context allnotices.append(notices) current = '' for notices in allnotices: for context, notice in notices: if context != current: if context != '': logger.debug('Context %s...:' % context.strip()[:80]) current = context notice = notice.replace('NOTICE: ', '') prefix = '' logger.debug('%s%s' % (prefix, notice.strip())) return r return wrapped def load_sql_files(app, stage): """ Look for SQL files in Django app, and load them into database. We remove RAISE NOTICE instructions from SQL outside unit testing since they lead to interpolation errors of '%' character in python. """ app_dir = app.path sql_dir = os.path.normpath(os.path.join(app_dir, 'sql')) custom_sql_dir = os.path.join(settings.VAR_DIR, 'conf/extra_sql', app.label) sql_files = [] r = re.compile(r'^{}_.*\.sql$'.format(stage)) if os.path.exists(sql_dir): sql_files += [ os.path.join(sql_dir, f) for f in os.listdir(sql_dir) if r.match(f) is not None ] if os.path.exists(custom_sql_dir): sql_files += [ os.path.join(custom_sql_dir, f) for f in os.listdir(custom_sql_dir) if r.match(f) is not None ] sql_files.sort() cursor = connection.cursor() for sql_file in sql_files: try: logger.info("Loading initial SQL data from '%s'" % sql_file) f = open(sql_file) sql = f.read() f.close() if not settings.TEST and not settings.DEBUG: # Remove RAISE NOTICE (/!\ only one-liners) sql = re.sub(r"\n.*RAISE NOTICE.*\n", "\n", sql) # TODO: this is the ugliest driver hack ever sql = sql.replace('%', '%%') # Replace curly braces with settings values pattern = re.compile(r'{{\s*([^\s]*)\s*}}') for m in pattern.finditer(sql): value = getattr(settings, m.group(1)) sql = sql.replace(m.group(0), str(value)) # Replace sharp braces with schemas pattern = re.compile(r'{#\s*([^\s]*)\s*#}') for m in pattern.finditer(sql): try: value = settings.DATABASE_SCHEMAS[m.group(1)] except KeyError: value = settings.DATABASE_SCHEMAS.get('default', 'public') sql = sql.replace(m.group(0), str(value)) cursor.execute(sql) except Exception as e: logger.critical("Failed to install custom SQL file '%s': %s\n" % (sql_file, e)) traceback.print_exc() raise def set_search_path(): # Set search path with all existing schema + new ones cursor = connection.cursor() cursor.execute('SELECT schema_name FROM information_schema.schemata') search_path = set([s[0] for s in cursor.fetchall() if not s[0].startswith('pg_')]) search_path |= set(settings.DATABASE_SCHEMAS.values()) search_path.discard('public') search_path.discard('information_schema') search_path = ('public', ) + tuple(search_path) cursor.execute('SET search_path TO {}'.format(', '.join(search_path))) def move_models_to_schemas(app): """ Move models tables to PostgreSQL schemas. Views, functions and triggers will be moved in Geotrek app SQL files. """ default_schema = settings.DATABASE_SCHEMAS.get('default', 'public') app_schema = settings.DATABASE_SCHEMAS.get(app.name, default_schema) table_schemas = {} for model in app.get_models(): model_name = model._meta.model_name table_name = model._meta.db_table model_schema = settings.DATABASE_SCHEMAS.get(model_name, app_schema) table_schemas.setdefault(model_schema, []).append(table_name) for field in model._meta.get_fields(): if isinstance(field, ManyToManyField): table_schemas[model_schema].append(field.m2m_db_table()) cursor = connection.cursor() for schema_name in table_schemas.keys(): sql = "CREATE SCHEMA IF NOT EXISTS %s;" % model_schema cursor.execute(sql) logger.info("Created schema %s" % model_schema) for schema_name, tables in table_schemas.items(): for table_name in tables: sql = "SELECT 1 FROM information_schema.tables WHERE table_name=%s AND table_schema!=%s" cursor.execute(sql, [table_name, schema_name]) if cursor.fetchone(): sql = "ALTER TABLE %s SET SCHEMA %s;" % (table_name, schema_name) cursor.execute(sql) logger.info("Moved %s to schema %s" % (table_name, schema_name)) # For Django, search_path is set in connection options. # But when accessing the database using QGis or ETL, search_path must be # set database level (for all users, and for this database only). if app.name == 'geotrek.common': dbname = settings.DATABASES['default']['NAME'] dbuser = settings.DATABASES['default']['USER'] search_path = ', '.join(('public', ) + tuple(set(settings.DATABASE_SCHEMAS.values()))) sql = "ALTER ROLE %s IN DATABASE %s SET search_path=%s;" % (dbuser, dbname, search_path) cursor.execute(sql)
GeotrekCE/Geotrek-admin
geotrek/common/utils/postgresql.py
Python
bsd-2-clause
6,597
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding unique constraint on 'Assessment', fields ['user', 'sample_result'] db.create_unique('assessment', ['user_id', 'sample_result_id']) def backwards(self, orm): # Removing unique constraint on 'Assessment', fields ['user', 'sample_result'] db.delete_unique('assessment', ['user_id', 'sample_result_id']) models = { 'assessments.assessment': { 'Meta': {'unique_together': "(('sample_result', 'user'),)", 'object_name': 'Assessment', 'db_table': "'assessment'"}, 'assessment_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['assessments.AssessmentCategory']", 'null': 'True', 'blank': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'evidence_details': ('django.db.models.fields.TextField', [], {'null': 'True'}), 'father_result': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'father'", 'to': "orm['assessments.ParentalResult']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'mother_result': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'mother'", 'to': "orm['assessments.ParentalResult']"}), 'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'pathogenicity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['assessments.Pathogenicity']"}), 'sample_result': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['samples.Result']"}), 'sanger_requested': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'sanger_result': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['assessments.SangerResult']", 'null': 'True', 'blank': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'assessments.assessmentcategory': { 'Meta': {'object_name': 'AssessmentCategory', 'db_table': "'assessment_category'"}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}) }, 'assessments.parentalresult': { 'Meta': {'object_name': 'ParentalResult', 'db_table': "'parental_result'"}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}) }, 'assessments.pathogenicity': { 'Meta': {'object_name': 'Pathogenicity', 'db_table': "'pathogenicity'"}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}) }, 'assessments.sangerresult': { 'Meta': {'object_name': 'SangerResult', 'db_table': "'sanger_result'"}, 'confirmed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}) }, 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'genome.chromosome': { 'Meta': {'ordering': "['order']", 'object_name': 'Chromosome', 'db_table': "'chromosome'"}, 'code': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'label': ('django.db.models.fields.CharField', [], {'max_length': '2'}), 'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'value': ('django.db.models.fields.CharField', [], {'max_length': '2', 'db_index': 'True'}) }, 'genome.genotype': { 'Meta': {'object_name': 'Genotype', 'db_table': "'genotype'"}, 'code': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'label': ('django.db.models.fields.CharField', [], {'max_length': '20'}), 'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'value': ('django.db.models.fields.CharField', [], {'max_length': '3'}) }, 'literature.pubmed': { 'Meta': {'object_name': 'PubMed', 'db_table': "'pubmed'"}, 'pmid': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}) }, 'phenotypes.phenotype': { 'Meta': {'object_name': 'Phenotype', 'db_table': "'phenotype'"}, 'articles': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['literature.PubMed']", 'symmetrical': 'False'}), 'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'hpo_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'term': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '1000'}) }, 'samples.batch': { 'Meta': {'ordering': "('project', 'label')", 'unique_together': "(('project', 'name'),)", 'object_name': 'Batch', 'db_table': "'batch'"}, 'count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'investigator': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'batches'", 'to': "orm['samples.Project']"}), 'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}) }, 'samples.person': { 'Meta': {'object_name': 'Person', 'db_table': "'person'"}, 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'mrn': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}), 'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'proband': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'relations': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['samples.Person']", 'through': "orm['samples.Relation']", 'symmetrical': 'False'}), 'sex': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}) }, 'samples.project': { 'Meta': {'unique_together': "(('name',),)", 'object_name': 'Project', 'db_table': "'project'"}, 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}) }, 'samples.relation': { 'Meta': {'ordering': "('person', '-generation')", 'object_name': 'Relation', 'db_table': "'relation'"}, 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'generation': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'person': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'family'", 'to': "orm['samples.Person']"}), 'relative': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'relative_of'", 'to': "orm['samples.Person']"}), 'type': ('django.db.models.fields.CharField', [], {'max_length': '20'}) }, 'samples.result': { 'Meta': {'unique_together': "(('sample', 'variant'),)", 'object_name': 'Result', 'db_table': "'sample_result'"}, 'base_counts': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'baseq_rank_sum': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}), 'coverage_alt': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'coverage_ref': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'downsampling': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}), 'fisher_strand': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}), 'genotype': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['genome.Genotype']", 'null': 'True', 'blank': 'True'}), 'genotype_quality': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}), 'haplotype_score': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}), 'homopolymer_run': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'in_dbsnp': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'mq': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}), 'mq0': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}), 'mq_rank_sum': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}), 'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'phred_scaled_likelihood': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'quality': ('django.db.models.fields.FloatField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}), 'quality_by_depth': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}), 'raw_read_depth': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}), 'read_depth': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}), 'read_pos_rank_sum': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}), 'sample': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'results'", 'to': "orm['samples.Sample']"}), 'spanning_deletions': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}), 'strand_bias': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}), 'variant': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['variants.Variant']"}) }, 'samples.sample': { 'Meta': {'ordering': "('project', 'batch', 'label')", 'unique_together': "(('batch', 'name', 'version'),)", 'object_name': 'Sample', 'db_table': "'sample'"}, 'batch': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'samples'", 'to': "orm['samples.Batch']"}), 'bio_sample': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'md5': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'person': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'samples'", 'null': 'True', 'to': "orm['samples.Person']"}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'samples'", 'to': "orm['samples.Project']"}), 'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'version': ('django.db.models.fields.IntegerField', [], {}) }, 'variants.variant': { 'Meta': {'unique_together': "(('chr', 'pos', 'ref', 'alt'),)", 'object_name': 'Variant', 'db_table': "'variant'"}, 'alt': ('django.db.models.fields.TextField', [], {'db_index': 'True'}), 'articles': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['literature.PubMed']", 'db_table': "'variant_pubmed'", 'symmetrical': 'False'}), 'chr': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['genome.Chromosome']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'liftover': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}), 'md5': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'phenotypes': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['phenotypes.Phenotype']", 'through': "orm['variants.VariantPhenotype']", 'symmetrical': 'False'}), 'pos': ('django.db.models.fields.IntegerField', [], {}), 'ref': ('django.db.models.fields.TextField', [], {'db_index': 'True'}), 'rsid': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['variants.VariantType']", 'null': 'True'}) }, 'variants.variantphenotype': { 'Meta': {'object_name': 'VariantPhenotype', 'db_table': "'variant_phenotype'"}, 'hgmd_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '30', 'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'phenotype': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['phenotypes.Phenotype']"}), 'variant': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'variant_phenotypes'", 'to': "orm['variants.Variant']"}) }, 'variants.varianttype': { 'Meta': {'ordering': "['order']", 'object_name': 'VariantType', 'db_table': "'variant_type'"}, 'code': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'label': ('django.db.models.fields.CharField', [], {'max_length': '20'}), 'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'value': ('django.db.models.fields.CharField', [], {'max_length': '20'}) } } complete_apps = ['assessments']
chop-dbhi/varify-data-warehouse
vdw/assessments/migrations/0007_auto__add_unique_assessment_user_sample_result.py
Python
bsd-2-clause
20,310
from django.test import TestCase, Client from sendgrid import utils, signals import json class SignalTestCase(TestCase): def setUp(self): self.client = Client() self.email_data = {'subject': 'Test Subject', 'body': 'Hi, I am a test body', 'from_email': 'email@example.com', 'to': ('other_email@example.com', )} def test_received_email(self): """ Test signals triggered by sendgrid callback. """ data = [] def email_event_handler(sender, signal): data.append((sender, signal, )) signals.email_event.connect(email_event_handler) # check if we received signals self.assertEqual(len(data), 0) message = utils.SendgridEmailMessage(**self.email_data) message.send() # check if we received the signal triggered by the email creation self.assertEqual(len(data), 1) response = self.client.post('/sendgrid_callback/', data=json.dumps([{ 'email': 'other_email@example.com', 'uuid': message.uuid, 'event': 'processed', 'timestamp': '123456789', }, ]), content_type='application/json') # verify that we received a signal self.assertEqual(len(data), 2) self.assertEqual(data[1][0].event, 'processed') self.assertEqual(data[1][0].uuid, message.uuid) self.assertEqual(response.status_code, 200) def test_dupe_signals(self): """ Test handling of duplicate signals. """ data = [] def email_event_handler(sender, signal): data.append((sender, signal, )) signals.email_event.connect(email_event_handler) # check if we received signals self.assertEqual(len(data), 0) message = utils.SendgridEmailMessage(**self.email_data) message.send() # check if we received the signal triggered by the email creation self.assertEqual(len(data), 1) response = self.client.post('/sendgrid_callback/', data=json.dumps([{ 'email': 'other_email@example.com', 'uuid': message.uuid, 'event': 'delivered', 'timestamp': '123456789', }, ]), content_type='application/json') # verify that we received a signal self.assertEqual(len(data), 2) self.assertEqual(data[1][0].event, 'delivered') self.assertEqual(data[1][0].uuid, message.uuid) self.assertEqual(response.status_code, 200) response = self.client.post('/sendgrid_callback/', data=json.dumps([{ 'email': 'other_email@example.com', 'uuid': message.uuid, 'event': 'delivered', 'timestamp': '123456790', }, ]), content_type='application/json') # verify that we received a signal self.assertEqual(len(data), 2) self.assertEqual(response.status_code, 200)
resmio/django-sendgrid
sendgrid/tests/test_signals.py
Python
bsd-2-clause
3,625
#! /usr/bin/env python import signal import pickle import platform import socket import time from pika import BasicProperties import portbuild.qthreads as qthreads import portbuild.torrent as torrent import portbuild.util as util ready = [] class AgentProducer(qthreads.QueueProducerThread): def __init__(self): qthreads.QueueProducerThread.__init__(self) def setup(self): """Set up AgentProducer.""" self.channel = self.connection.channel() self.channel.add_on_return_callback(self.notify_return) util.debug("AgentProducer is all set!") def teardown(self): """Clean up AgentProducer.""" self.channel.close() def notify(self, queue): """Send notification back to the head node.""" message = "Finished" # FIXME util.log("Sending notification on {0}".format(queue)) props = BasicProperties(content_type="text/plain", delivery_mode=1) self.channel.basic_publish(exchange="", routing_key=queue, mandatory=True, immediate=True, body=pickle.dumps(message), properties=props) def notify_return(self, method, header, body): """Notification was returned. Head node isn't listening anymore.""" # This actually is only triggered when the exchange doesn't exists, # not when the queue doesn't exist. print "Message returned!" class AgentConsumer(qthreads.QueueConsumerThread): def __init__(self): qthreads.QueueConsumerThread.__init__(self) def setup(self): """Set up AgentConsumer.""" self.arch = platform.machine() self.hostname = socket.gethostname() self.channel = self.connection.channel() self.channel.queue_declare(queue=self.arch, durable=True, exclusive=False, auto_delete=False) self.channel.basic_qos(prefetch_count=1) self.channel.basic_consume(self.handle_delivery, queue=self.arch) util.debug("AgentConsumer is all set!") util.debug("AgentConsumer listening on queue {0}...".format(self.arch)) def teardown(self): """Clean up AgentConsumer.""" self.channel.close() def handle_delivery(self, channel, method, header, body): """Message received callback.""" global producer message = pickle.loads(body) p = message["package"] arch = message["arch"] branch = message["branch"] buildid = message["buildid"] torrents = message["torrents"] reply_to = message["reply_to"] self.buildname = "{0}/{1}/{2}".format(arch, branch, buildid) if buildid in ready: util.log("Building {0} for build {1}".format(p.name, self.buildname)) channel.basic_ack(delivery_tag=method.delivery_tag) else: time.sleep(5) channel.basic_reject(delivery_tag=method.delivery_tag) util.log("Setting up build {0}".format(self.buildname)) ts = torrent.TorrentSession() for t in torrents: t.dest="." # XXX - Temporary while I'm testing locally. ts.add(t) while not ts.all_seeding(): ts.status() time.sleep(1) util.debug("Finished downloading components.") ts.terminate() producer.notify(reply_to) ready.append(buildid) consumer = AgentConsumer() consumer.start() producer = AgentProducer() producer.start() def shutdown(signum, stack): global loop loop = False signal.signal(signal.SIGINT, shutdown) loop = True while loop: time.sleep(3) consumer.stop() producer.stop() # vim: tabstop=2 shiftwidth=2 softtabstop=2 expandtab
flz/portbuild-ng
bin/qmanager-agent.py
Python
bsd-2-clause
3,560
""" WSGI config for auth_example project. This module contains the WSGI application used by Django's development server and any production WSGI deployments. It should expose a module-level variable named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover this application via the ``WSGI_APPLICATION`` setting. Usually you will have the standard Django WSGI application here, but it also might make sense to replace the whole Django WSGI application with a custom one that later delegates to the Django one. For example, you could introduce WSGI middleware here, or combine a Django application with an application of another framework. """ import os # We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks # if running multiple sites in the same mod_wsgi process. To fix this, use # mod_wsgi daemon mode with each site in its own daemon process, or use # os.environ["DJANGO_SETTINGS_MODULE"] = "auth_example.settings" os.environ.setdefault("DJANGO_SETTINGS_MODULE", "auth_example.settings") # This application object is used by any WSGI server configured to use this # file. This includes Django's development server, if the WSGI_APPLICATION # setting points here. from django.core.wsgi import get_wsgi_application application = get_wsgi_application() # Apply WSGI middleware here. # from helloworld.wsgi import HelloWorldApplication # application = HelloWorldApplication(application)
18F/django-rest-swagger
tests/auth_example/auth_example/wsgi.py
Python
bsd-2-clause
1,437
# -*- coding: utf-8 -*- # Copyright (c) 2015, Michael Droettboom All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # The views and conclusions contained in the software and # documentation are those of the authors and should not be interpreted # as representing official policies, either expressed or implied, of # the FreeBSD Project. from __future__ import print_function, unicode_literals, absolute_import Bitmap__init__ = """ A structure used to describe a bitmap or pixmap to the raster. `Bitmap` supports the Python buffer interface, so it is easy to convert it to a Numpy array. For example:: >>> import numpy as np >>> a = np.asarray(bitmap) """ Bitmap_buffer = """ Get the bitmap's contents as a buffer. In most cases, the preferred method to get the data is to cast the `Bitmap` object to a memoryview, since that will also have size and type information. """ Bitmap_convert = """ Convert a `Bitmap` to 8 bits per pixel. Given a `Bitmap` with depth 1bpp, 2bpp, 4bpp, or 8bpp converts it to one with depth 8bpp, making the number of used bytes per line (a.k.a. the ‘pitch’) a multiple of `alignment`. Parameters ---------- alignment : int, optional The pitch of the bitmap is a multiple of this parameter. Common values are 1, 2, or 4. Returns ------- target : Bitmap The bitmap, converted to 8bpp. """ Bitmap_num_grays = """ The number of gray levels used in the bitmap. This field is only used with `PIXEL_MODE.GRAY`. """ Bitmap_pitch = """ The number of bytes taken by one bitmap row. Includes padding. The pitch is positive when the bitmap has a ‘down’ flow, and negative when it has an ‘up’ flow. In all cases, the pitch is an offset to add to a bitmap pointer in order to go down one row. Note that ‘padding’ means the alignment of a bitmap to a byte border, and FreeType functions normally align to the smallest possible integer value. For the B/W rasterizer, `pitch` is always an even number. To change the pitch of a bitmap (say, to make it a multiple of 4), use `Bitmap.convert`. Alternatively, you might use callback functions to directly render to the application's surface. """ Bitmap_pixel_mode = """ The `PIXEL_MODE`, i.e., how pixel bits are stored. """ Bitmap_rows = """ The number of bitmap rows. """ Bitmap_to_list = """ |freetypy| Convert the bitmap to a nested list. """ Bitmap_width = """ The number of pixels in bitmap row. """ PIXEL_MODE = """ Constants related to the pixel mode of bitmaps. - `MONO`: A monochrome bitmap, using 1 bit per pixel. Note that pixels are stored in most-significant order (MSB), which means that the left-most pixel in a byte has value 128. - `GRAY`: An 8-bit bitmap, generally used to represent anti-aliased glyph images. Each pixel is stored in one byte. Note that the number of ‘gray’ levels is stored in the ‘num_grays’ field of the Bitmap structure (it generally is 256). - `GRAY2`: A 2-bit per pixel bitmap, used to represent embedded anti-aliased bitmaps in font files according to the OpenType specification. We haven't found a single font using this format, however. - `GRAY4`: A 4-bit per pixel bitmap, representing embedded anti-aliased bitmaps in font files according to the OpenType specification. We haven't found a single font using this format, however. - `LCD`: An 8-bit bitmap, representing RGB or BGR decimated glyph images used for display on LCD displays; the bitmap is three times wider than the original glyph image. See also `RENDER_MODE.LCD`. On many freetype builds, this functionality will be disabled due to patent restrictions, in which case the resulting bitmap will be grayscale. - `LCD_V`: An 8-bit bitmap, representing RGB or BGR decimated glyph images used for display on rotated LCD displays; the bitmap is three times taller than the original glyph image. See also `RENDER_MODE.LCD_V`. On many freetype builds, this functionality will be disabled due to patent restrictions, in which case the resulting bitmap will be grayscale. """
mdboom/freetypy
docstrings/bitmap.py
Python
bsd-2-clause
5,308
# -*- coding: UTF-8 -*- # Copyright 2009-2014 by Luc Saffre. # License: BSD, see file LICENSE for more details. """ Documented in :ref:`dpy`. """ import logging logger = logging.getLogger(__name__) from StringIO import StringIO import os import imp from decimal import Decimal from django.conf import settings from django.db import models from django.utils.module_loading import import_by_path from django.db import IntegrityError from django.db.models.fields import NOT_PROVIDED from django.core.serializers import base from django.core.exceptions import ValidationError, ObjectDoesNotExist from django.contrib.contenttypes.models import ContentType from django.contrib.sessions.models import Session from django.utils.encoding import smart_unicode, is_protected_type, force_unicode from django.utils import translation from djangosite.dbutils import obj2str, sorted_models_list, full_model_name from north import dbutils SUFFIX = '.py' def create_mti_child(parent_model, pk_, child_model, **kw): """ Similar to :func:`insert_child`, but very tricky. Used by :mod:`north.dumppy` The return value is an "almost normal" model instance, whose `save` and `full_clean` methods have been hacked. They are the only methods that will be called by :class:`north.dpy.Deserializer`. You should not use this instance for anything else and throw it away when the save() has been called. """ parent_link_field = child_model._meta.parents.get(parent_model, None) if parent_link_field is None: raise ValidationError("A %s cannot be parent for a %s" % ( parent_model.__name__, child_model.__name__)) if True: ignored = {} for f in parent_model._meta.fields: if f.name in kw: ignored[f.name] = kw.pop(f.name) kw[parent_link_field.name + "_id"] = pk_ if ignored: raise Exception( "create_mti_child() %s %s from %s : ignored non-local fields %s" % ( child_model.__name__, pk_, parent_model.__name__, ignored)) child_obj = child_model(**kw) else: attrs = {} attrs[parent_link_field.name + "_id"] = pk_ #~ for lf in child_model._meta.local_fields: # backwards compat 20111211 : python fixtures created by Version 1.2.8 still # specify also field values of parent_model. Ignore these silently # otherwise Django would also try to create a parent_model record. for f, m in child_model._meta.get_fields_with_model(): if m is None or not issubclass(m, child_model): #~ if m is None or m is child_model or not issubclass(m,parent_model): if f.name in kw: attrs[f.name] = kw.pop(f.name) if kw: logging.warning( "create_mti_child() %s %s from %s : ignored non-local fields %s", child_model.__name__, pk_, parent_model.__name__, kw) child_obj = child_model(**attrs) def full_clean(*args, **kw): pass def save(*args, **kw): kw.update(raw=True, force_insert=True) child_obj.save_base(**kw) child_obj.save = save child_obj.full_clean = full_clean return child_obj class Serializer(base.Serializer): """Serializes a QuerySet to a py stream. Usage: ``manage.py dumpdata --format py`` DEPRECATED. The problem with this approach is that a serializer creates -by definition- one single file. And Python needs -understandably- to load a module completely into memory before it can be executed. Use :manage:`dump2py` instead. """ internal_use_only = False write_preamble = True # may be set to False e.g. by testcases models = None def serialize(self, queryset, **options): self.options = options self.stream = options.get("stream", StringIO()) self.selected_fields = options.get("fields") self.use_natural_keys = options.get("use_natural_keys", False) if self.write_preamble: current_version = settings.SITE.version if '+' in current_version: logger.warning( "Dumpdata from intermediate version %s" % current_version) self.stream.write('# -*- coding: UTF-8 -*-\n') self.stream.write('''\ """ This is a `Python dump <http://north.lino-framework.org>`_ created using %s. ''' % settings.SITE.using_text()) #~ self.stream.write(settings.SITE.welcome_text()) self.stream.write(''' """ from __future__ import unicode_literals ''') self.stream.write('SOURCE_VERSION = %r\n' % str(current_version)) self.stream.write('from decimal import Decimal\n') self.stream.write('from datetime import datetime as dt\n') self.stream.write('from datetime import time,date\n') #~ self.stream.write('from north import dbutils\n') self.stream.write('from north.dpy import create_mti_child\n') self.stream.write('from north.dbutils import resolve_model\n') self.stream.write( 'from django.contrib.contenttypes.models import ContentType\n') self.stream.write('from django.conf import settings\n') self.stream.write(''' def new_content_type_id(m): if m is None: return m # if not fmn: return None # m = resolve_model(fmn) ct = ContentType.objects.get_for_model(m) if ct is None: return None return ct.pk ''') #~ s = ','.join([ #~ '%s=values[%d]' % (k,i) #~ for i,k in enumerate(settings.SITE.AVAILABLE_LANGUAGES)]) s = ','.join([ '%s=values[%d]' % (lng.name, lng.index) for lng in settings.SITE.languages]) self.stream.write(''' def bv2kw(fieldname,values): """ Needed if `Site.languages` changed between dumpdata and loaddata """ return settings.SITE.babelkw(fieldname,%s) ''' % s) #~ model = queryset.model if self.models is None: self.models = sorted_models_list() # models.get_models() if self.write_preamble: for model in self.models: self.stream.write('%s = resolve_model("%s")\n' % ( full_model_name(model, '_'), full_model_name(model))) self.stream.write('\n') for model in self.models: fields = [f for f, m in model._meta.get_fields_with_model() if m is None] for f in fields: if getattr(f, 'auto_now_add', False): raise Exception("%s.%s.auto_now_add is True : values will be lost!" % ( full_model_name(model), f.name)) #~ fields = model._meta.local_fields #~ fields = [f for f in model._meta.fields if f.serialize] #~ fields = [f for f in model._meta.local_fields if f.serialize] self.stream.write('def create_%s(%s):\n' % ( model._meta.db_table, ', '.join([f.attname for f in fields if not getattr(f, '_lino_babel_field', False)]))) if model._meta.parents: if len(model._meta.parents) != 1: msg = "%s : model._meta.parents is %r" % ( model, model._meta.parents) raise Exception(msg) pm, pf = model._meta.parents.items()[0] child_fields = [f for f in fields if f != pf] if child_fields: attrs = ',' + ','.join([ '%s=%s' % (f.attname, f.attname) for f in child_fields]) else: attrs = '' #~ self.stream.write(' return insert_child(%s.objects.get(pk=%s),%s%s)\n' % ( #~ full_model_name(pm,'_'),pf.attname,full_model_name(model,'_'),attrs)) self.stream.write(' return create_mti_child(%s,%s,%s%s)\n' % ( full_model_name(pm, '_'), pf.attname, full_model_name(model, '_'), attrs)) else: self.stream.write(" kw = dict()\n") for f in fields: if getattr(f, '_lino_babel_field', False): continue elif isinstance(f, (dbutils.BabelCharField, dbutils.BabelTextField)): self.stream.write( ' if %s is not None: kw.update(bv2kw(%r,%s))\n' % ( f.attname, f.attname, f.attname)) else: if isinstance(f, models.DecimalField): self.stream.write( ' if %s is not None: %s = Decimal(%s)\n' % ( f.attname, f.attname, f.attname)) elif isinstance(f, models.ForeignKey) and f.rel.to is ContentType: #~ self.stream.write( #~ ' %s = ContentType.objects.get_for_model(%s).pk\n' % ( #~ f.attname,f.attname)) self.stream.write( ' %s = new_content_type_id(%s)\n' % ( f.attname, f.attname)) self.stream.write( ' kw.update(%s=%s)\n' % (f.attname, f.attname)) self.stream.write(' return %s(**kw)\n\n' % full_model_name(model, '_')) #~ self.start_serialization() self.stream.write('\n') model = None all_models = [] for obj in queryset: if isinstance(obj, ContentType): continue if isinstance(obj, Session): continue #~ if isinstance(obj,Permission): continue if obj.__class__ != model: model = obj.__class__ if model in all_models: raise Exception("%s instances weren't grouped!" % model) all_models.append(model) self.stream.write('\ndef %s_objects():\n' % model._meta.db_table) fields = [f for f, m in model._meta.get_fields_with_model() if m is None] fields = [ f for f in fields if not getattr(f, '_lino_babel_field', False)] self.stream.write(' yield create_%s(%s)\n' % ( obj._meta.db_table, ','.join([self.value2string(obj, f) for f in fields]))) self.stream.write('\n\ndef objects():\n') all_models = self.sort_models(all_models) for model in all_models: #~ self.stream.write(' for o in %s_objects(): yield o\n' % model._meta.db_table) self.stream.write(' yield %s_objects()\n' % model._meta.db_table) # self.stream.write('\nsettings.SITE.install_migrations(globals())\n') def sort_models(self, unsorted): sorted = [] hope = True """ 20121120 if we convert the list to a set, we gain some performance for the ``in`` tests, but we obtain a random sorting order for all independent models, making the double dump test less evident. """ #~ 20121120 unsorted = set(unsorted) while len(unsorted) and hope: hope = False guilty = dict() #~ print "hope for", [m.__name__ for m in unsorted] for model in unsorted: deps = set([f.rel.to for f in model._meta.fields if f.rel is not None and f.rel.to is not model and f.rel.to in unsorted]) #~ deps += [m for m in model._meta.parents.keys()] for m in sorted: if m in deps: deps.remove(m) if len(deps): guilty[model] = deps else: sorted.append(model) unsorted.remove(model) hope = True break #~ ok = True #~ for d in deps: #~ if d in unsorted: #~ ok = False #~ if ok: #~ sorted.append(model) #~ unsorted.remove(model) #~ hope = True #~ break #~ else: #~ guilty[model] = deps #~ print model.__name__, "depends on", [m.__name__ for m in deps] if unsorted: assert len(unsorted) == len(guilty) msg = "There are %d models with circular dependencies :\n" % len( unsorted) msg += "- " + '\n- '.join([ full_model_name(m) + ' (depends on %s)' % ", ".join([full_model_name(d) for d in deps]) for m, deps in guilty.items()]) for ln in msg.splitlines(): self.stream.write('\n # %s' % ln) logger.info(msg) sorted.extend(unsorted) return sorted #~ def start_serialization(self): #~ self._current = None #~ self.objects = [] #~ def end_serialization(self): #~ pass #~ def start_object(self, obj): #~ self._current = {} #~ def end_object(self, obj): #~ self.objects.append({ #~ "model" : smart_unicode(obj._meta), #~ "pk" : smart_unicode(obj._get_pk_val(), strings_only=True), #~ "fields" : self._current #~ }) #~ self._current = None def value2string(self, obj, field): if isinstance(field, (dbutils.BabelCharField, dbutils.BabelTextField)): #~ return repr([repr(x) for x in dbutils.field2args(obj,field.name)]) return repr(settings.SITE.field2args(obj, field.name)) value = field._get_val_from_obj(obj) # Protected types (i.e., primitives like None, numbers, dates, # and Decimals) are passed through as is. All other values are # converted to string first. if value is None: #~ if value is None or value is NOT_PROVIDED: return 'None' if isinstance(field, models.DateTimeField): d = value return 'dt(%d,%d,%d,%d,%d,%d)' % ( d.year, d.month, d.day, d.hour, d.minute, d.second) if isinstance(field, models.TimeField): d = value return 'time(%d,%d,%d)' % (d.hour, d.minute, d.second) if isinstance(field, models.ForeignKey) and field.rel.to is ContentType: ct = ContentType.objects.get(pk=value) return full_model_name(ct.model_class(), '_') #~ return "'"+full_model_name(ct.model_class())+"'" #~ return repr(tuple(value.app_label,value.model)) if isinstance(field, models.DateField): d = value return 'date(%d,%d,%d)' % (d.year, d.month, d.day) #~ return 'i2d(%4d%02d%02d)' % (d.year,d.month,d.day) if isinstance(value, (float, Decimal)): return repr(str(value)) if isinstance(value, (int, long)): return str(value) return repr(field.value_to_string(obj)) def handle_fk_field(self, obj, field): related = getattr(obj, field.name) if related is not None: if self.use_natural_keys and hasattr(related, 'natural_key'): related = related.natural_key() else: if field.rel.field_name == related._meta.pk.name: # Related to remote object via primary key related = related._get_pk_val() else: # Related to remote object via other field related = smart_unicode( getattr(related, field.rel.field_name), strings_only=True) self._current[field.name] = related def handle_m2m_field(self, obj, field): if field.rel.through._meta.auto_created: if self.use_natural_keys and hasattr(field.rel.to, 'natural_key'): m2m_value = lambda value: value.natural_key() else: m2m_value = lambda value: smart_unicode( value._get_pk_val(), strings_only=True) self._current[field.name] = [m2m_value(related) for related in getattr(obj, field.name).iterator()] #~ def getvalue(self): #~ return self.objects SUPPORT_EMPTY_FIXTURES = False # trying, but doesn't yet work if SUPPORT_EMPTY_FIXTURES: from django_site.utils import AttrDict class DummyDeserializedObject(base.DeserializedObject): class FakeObject: _meta = AttrDict(db_table='') object = FakeObject() def __init__(self): pass def save(self, *args, **kw): pass class FakeDeserializedObject(base.DeserializedObject): """ Imitates DeserializedObject required by loaddata. Unlike normal DeserializedObject, we *don't want* to bypass pre_save and validation methods on the individual objects. """ def __init__(self, deserializer, object): self.object = object #~ self.name = name self.deserializer = deserializer def save(self, *args, **kw): """ """ #~ print 'dpy.py',self.object #~ logger.info("Loading %s...",self.name) self.try_save(*args, **kw) #~ if self.try_save(*args,**kw): #~ self.deserializer.saved += 1 #~ else: #~ self.deserializer.save_later.append(self) def try_save(self, *args, **kw): """Try to save the specified Model instance `obj`. Return `True` on success, `False` if this instance wasn't saved and should be deferred. """ obj = self.object try: """ """ m = getattr(obj, 'before_dumpy_save', None) if m is not None: m() if not self.deserializer.quick: obj.full_clean() obj.save(*args, **kw) logger.debug("%s has been saved" % obj2str(obj)) self.deserializer.register_success() return True #~ except ValidationError,e: #~ except ObjectDoesNotExist,e: #~ except (ValidationError,ObjectDoesNotExist), e: #~ except (ValidationError,ObjectDoesNotExist,IntegrityError), e: except Exception, e: if True: if not settings.SITE.loading_from_dump: # hand-written fixtures are expected to yield in savable # order logger.warning("Failed to save %s:" % obj2str(obj)) raise deps = [f.rel.to for f in obj._meta.fields if f.rel is not None] if not deps: logger.exception(e) raise Exception( "Failed to save independent %s." % obj2str(obj)) self.deserializer.register_failure(self, e) return False #~ except Exception,e: #~ logger.exception(e) #~ raise Exception("Failed to save %s. Abandoned." % obj2str(obj)) class FlushDeferredObjects: """ Indicator class object. Fixture may yield a `FlushDeferredObjects` to indicate that all deferred objects should get saved before going on. """ pass class LoaderBase(object): quick = False def __init__(self): #~ logger.info("20120225 DpyLoader.__init__()") self.save_later = {} self.saved = 0 self.count_objects = 0 self.AFTER_LOAD_HANDLERS = [] # populated by Migrator.after_load(), but remains empty in a DpyDeserializer self.before_load_handlers = [] def flush_deferred_objects(self): """ Flush the list of deferred objects. """ while self.saved and self.save_later: try_again = [] for msg_objlist in self.save_later.values(): for objlist in msg_objlist.values(): try_again += objlist logger.info("Trying to save %d deferred objects.", len(try_again)) self.save_later = {} self.saved = 0 for obj in try_again: obj.try_save() # ,*args,**kw): logger.info("Saved %d objects.", self.saved) def expand(self, obj): if obj is None: pass # ignore None values elif obj is FlushDeferredObjects: self.flush_deferred_objects() elif isinstance(obj, models.Model): yield FakeDeserializedObject(self, obj) elif hasattr(obj, '__iter__'): #~ if type(obj) is GeneratorType: #~ logger.info("20120225 expand iterable %r",obj) for o in obj: for so in self.expand(o): yield so #~ elif isinstance(obj,MtiChildWrapper): # the return value of create_mti_child() #~ yield FakeDeserializedObject(self,obj) #~ obj.deserializer = self #~ yield obj else: logger.warning("Ignored unknown object %r", obj) def register_success(self): self.saved += 1 self.count_objects += 1 def register_failure(self, obj, e): msg = force_unicode(e) d = self.save_later.setdefault(obj.object.__class__, {}) l = d.setdefault(msg, []) if len(l) == 0: logger.info("Deferred %s : %s", obj2str(obj.object), msg) l.append(obj) def initialize(self): """To be called after initdb and before starting to load the dumped data.""" for h in self.before_load_handlers: logger.info("Running before_load handler %s", h.__doc__) h(self) def finalize(self): """ """ self.flush_deferred_objects() if len(self.AFTER_LOAD_HANDLERS): logger.info( "Finalize %d after_load handlers", len(self.AFTER_LOAD_HANDLERS)) for h in self.AFTER_LOAD_HANDLERS: logger.info("Running after_load handler %s", h.__doc__) h(self) logger.info("Loaded %d objects", self.count_objects) if self.save_later: count = 0 s = '' for model, msg_objects in self.save_later.items(): for msg, objects in msg_objects.items(): if False: # detailed content of the first object s += "\n- %s %s (%d object(s), e.g. %s)" % ( full_model_name(model), msg, len(objects), obj2str(objects[0].object, force_detailed=True)) else: # pk of all objects s += "\n- %s %s (%d object(s) with primary key %s)" % ( full_model_name(model), msg, len(objects), ', '.join([unicode(o.object.pk) for o in objects])) count += len(objects) msg = "Abandoning with %d unsaved instances:%s" % (count, s) logger.warning(msg) # Don't raise an exception. The unsaved instances got lost and # the loaddata should be done again, but meanwhile the database # is not necessarily invalid and may be used for further testing. # And anyway, loaddata would catch it and still continue. # raise Exception(msg) class DpyLoader(LoaderBase): """Instantiated by `restore.py`. """ def __init__(self, globals_dict): self.globals_dict = globals_dict super(DpyLoader, self).__init__() site = globals_dict['settings'].SITE site.startup() site.install_migrations(self) def save(self, obj): for o in self.expand(obj): o.try_save() class DpyDeserializer(LoaderBase): """The Django deserializer for :ref:`dpy`. """ def deserialize(self, fp, **options): #~ logger.info("20120225 DpyLoader.deserialize()") if isinstance(fp, basestring): raise NotImplementedError #~ dbutils.set_language(settings.SITE.DEFAULT_LANGUAGE.django_code) #~ dbutils.set_language() translation.activate(settings.SITE.get_default_language()) #~ self.count += 1 fqname = 'north.dpy_tmp_%s' % hash(self) if False: parts = fp.name.split(os.sep) #~ parts = os.path.split(fp.name) print parts #~ fqname = parts[-1] fqname = '.'.join([p for p in parts if not ':' in p]) assert fqname.endswith(SUFFIX) fqname = fqname[:-len(SUFFIX)] print fqname desc = (SUFFIX, 'r', imp.PY_SOURCE) logger.info("Loading %s...", fp.name) module = imp.load_module(fqname, fp, fp.name, desc) #~ module = __import__(filename) for o in self.deserialize_module(module, **options): yield o def deserialize_module(self, module, **options): self.initialize() empty_fixture = True objects = getattr(module, 'objects', None) if objects is None: logger.info("Fixture %s has no attribute 'objects'" % module.__name__) else: for obj in objects(): for o in self.expand(obj): empty_fixture = False yield o if empty_fixture: if SUPPORT_EMPTY_FIXTURES: # avoid Django interpreting empty fixtures as an error yield DummyDeserializedObject() else: # To avoid Django interpreting empty fixtures as an # error, we yield one object which always exists: the # SiteConfig instance. # Oops, that will fail in lino_welfare if the company # pointed to by SiteConfig.job_office had been # deferred. if settings.SITE.site_config: yield FakeDeserializedObject( self, settings.SITE.site_config) else: raise Exception("""\ Fixture %s decided to not create any object. We're sorry, but Django doesn't like that. See <https://code.djangoproject.com/ticket/18213>. """ % module.__name__) #~ logger.info("Saved %d instances from %s.",self.saved,fp.name) self.finalize() def Deserializer(fp, **options): """The Deserializer used when ``manage.py loaddata`` encounters a `.py` fixture. """ d = DpyDeserializer() return d.deserialize(fp, **options) class Migrator(object): """The SITE's Migrator class is instantiated by `install_migrations`. If :setting:`migration_class` is None (the default), then this class will be instantiated. Applications may define their own Migrator class which should be a subclasss of this. """ def __init__(self, site, loader): self.site = site self.loader = loader def after_load(self, todo): """Declare a function to be called after all data has been loaded.""" assert callable(todo) # al = self.globals_dict['AFTER_LOAD_HANDLERS'] self.loader.AFTER_LOAD_HANDLERS.append(todo) def before_load(self, todo): """Declare a function to be called before loading dumped data.""" assert callable(todo) self.loader.before_load_handlers.append(todo) def install_migrations(self, loader): """Python dumps are generated with one line near the end of their `restore.py` file which calls this method, passing it their global namespace:: settings.SITE.install_migrations(globals()) A dumped fixture should always call this, even if there is no version change and no data migration, because this also does certain other things: - set :setting:`loading_from_dump` to `True` - remove any Permission and Site objects that might have been generated by `post_syncdb` signal if these apps are installed. """ globals_dict = loader.globals_dict self.loading_from_dump = True if self.is_installed('auth'): from django.contrib.auth.models import Permission Permission.objects.all().delete() if self.is_installed('sites'): from django.contrib.sites.models import Site Site.objects.all().delete() current_version = self.version if current_version is None: logger.info("Unversioned Site instance : no database migration") return if globals_dict['SOURCE_VERSION'] == current_version: logger.info("Source version is %s : no migration needed", current_version) return if self.migration_class is not None: mc = import_by_path(self.migration_class) migrator = mc(self, loader) else: migrator = self while True: from_version = globals_dict['SOURCE_VERSION'] funcname = 'migrate_from_' + from_version.replace('.', '_') m = getattr(migrator, funcname, None) if m is not None: #~ logger.info("Found %s()", funcname) to_version = m(globals_dict) if not isinstance(to_version, basestring): raise Exception("Oops: %s didn't return a string!" % m) if to_version <= from_version: raise Exception( "Oops: %s tries to migrate from version %s to %s ?!" % (m, from_version, to_version)) msg = "Migrating from version %s to %s" % ( from_version, to_version) if m.__doc__: msg += ":\n" + m.__doc__ logger.info(msg) globals_dict['SOURCE_VERSION'] = to_version else: if from_version != current_version: logger.warning( "No method for migrating from version %s to %s", from_version, current_version) break def load_fixture_from_module(m, **options): """ Used in unit tests to manually load a given fixture. E.g. in Lino `/tutorials/tables/index`. """ #~ filename = m.__file__[:-1] #~ print filename #~ assert filename.endswith('.py') #~ fp = open(filename) d = DpyDeserializer() for o in d.deserialize_module(m, **options): o.save() # 20140506 Don't remember why the following was. But it disturbed # in Lino `/tutorials/tables/index`. # if d.saved != 1: # logger.info("20140506 Loaded %d objects", d.saved) # raise Exception("Failed to load Python fixture from module %s" % # m.__name__) # return d
lsaffre/north
north/dpy.py
Python
bsd-2-clause
31,247
__version__ = '0.0.9' __author__ = 'Igor Vasilcovsky'
vasilcovsky/pytinypng
pytinypng/__init__.py
Python
bsd-2-clause
54
### extends 'class_empty.py' ### block ClassImports # NOTICE: Do not edit anything here, it is generated code from . import gxapi_cy from geosoft.gxapi import GXContext, float_ref, int_ref, str_ref ### endblock ClassImports ### block Header # NOTICE: The code generator will not replace the code in this block ### endblock Header ### block ClassImplementation # NOTICE: Do not edit anything here, it is generated code class GXRA(gxapi_cy.WrapRA): """ GXRA class. The `GXRA <geosoft.gxapi.GXRA>` class is used to access ASCII files sequentially or by line number. The files are opened in read-only mode, so no write operations are defined """ def __init__(self, handle=0): super(GXRA, self).__init__(GXContext._get_tls_geo(), handle) @classmethod def null(cls): """ A null (undefined) instance of `GXRA <geosoft.gxapi.GXRA>` :returns: A null `GXRA <geosoft.gxapi.GXRA>` :rtype: GXRA """ return GXRA() def is_null(self): """ Check if this is a null (undefined) instance :returns: True if this is a null (undefined) instance, False otherwise. :rtype: bool """ return self._internal_handle() == 0 # Miscellaneous @classmethod def create(cls, file): """ Creates `GXRA <geosoft.gxapi.GXRA>` :param file: Name of the file :type file: str :returns: `GXRA <geosoft.gxapi.GXRA>` Object :rtype: GXRA .. versionadded:: 5.0 **License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_ """ ret_val = gxapi_cy.WrapRA._create(GXContext._get_tls_geo(), file.encode()) return GXRA(ret_val) @classmethod def create_sbf(cls, sbf, file): """ Creates `GXRA <geosoft.gxapi.GXRA>` on an `GXSBF <geosoft.gxapi.GXSBF>` :param sbf: Storage :param file: Name of the file :type sbf: GXSBF :type file: str :returns: `GXRA <geosoft.gxapi.GXRA>` Object :rtype: GXRA .. versionadded:: 5.0 **License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_ **Note:** This method allows you to open an `GXRA <geosoft.gxapi.GXRA>` in a structured file storage (an `GXSBF <geosoft.gxapi.GXSBF>`). SBFs can be created inside other data containers, such as workspaces, maps, images and databases. This lets you store application specific information together with the data to which it applies. .. seealso:: sbf.gxh """ ret_val = gxapi_cy.WrapRA._create_sbf(GXContext._get_tls_geo(), sbf, file.encode()) return GXRA(ret_val) def gets(self, strbuff): """ Get next full line from `GXRA <geosoft.gxapi.GXRA>` :param strbuff: Buffer in which to place string :type strbuff: str_ref :returns: 0 - Ok 1 - End of file :rtype: int .. versionadded:: 5.0 **License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_ """ ret_val, strbuff.value = self._gets(strbuff.value.encode()) return ret_val def len(self): """ Returns the total number of lines in `GXRA <geosoft.gxapi.GXRA>` :returns: # of lines in the `GXRA <geosoft.gxapi.GXRA>`. :rtype: int .. versionadded:: 5.0 **License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_ """ ret_val = self._len() return ret_val def line(self): """ Returns current line #, 0 is the first :returns: The current read line location. :rtype: int .. versionadded:: 5.0 **License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_ **Note:** This will be the next line read. """ ret_val = self._line() return ret_val def seek(self, line): """ Position next read to specified line # :param line: Line #, 0 is the first. :type line: int :returns: 0 if seeked line is within the range of lines, 1 if outside range, line pointer will not be moved. :rtype: int .. versionadded:: 5.0 **License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_ """ ret_val = self._seek(line) return ret_val ### endblock ClassImplementation ### block ClassExtend # NOTICE: The code generator will not replace the code in this block ### endblock ClassExtend ### block Footer # NOTICE: The code generator will not replace the code in this block ### endblock Footer
GeosoftInc/gxpy
geosoft/gxapi/GXRA.py
Python
bsd-2-clause
5,242
from JumpScale import j class test_complextype_user_osismodelbase(j.code.classGetJSRootModelBase()): """ group of users """ def __init__(self): pass self._P_id=0 self._P_organization="" self._P_name="" self._P_emails=list() self._P_groups=list() self._P_guid="" self._P__meta=list() self._P__meta=["osismodel","test_complextype","user",1] #@todo version not implemented now, just already foreseen @property def id(self): return self._P_id @id.setter def id(self, value): if not isinstance(value, int) and value is not None: if isinstance(value, basestring) and j.basetype.integer.checkString(value): value = j.basetype.integer.fromString(value) else: msg="property id input error, needs to be int, specfile: /opt/jumpscale/apps/osis/logic/test_complextype/model.spec, name model: user, value was:" + str(value) raise TypeError(msg) self._P_id=value @id.deleter def id(self): del self._P_id @property def organization(self): return self._P_organization @organization.setter def organization(self, value): if not isinstance(value, str) and value is not None: if isinstance(value, basestring) and j.basetype.string.checkString(value): value = j.basetype.string.fromString(value) else: msg="property organization input error, needs to be str, specfile: /opt/jumpscale/apps/osis/logic/test_complextype/model.spec, name model: user, value was:" + str(value) raise TypeError(msg) self._P_organization=value @organization.deleter def organization(self): del self._P_organization @property def name(self): return self._P_name @name.setter def name(self, value): if not isinstance(value, str) and value is not None: if isinstance(value, basestring) and j.basetype.string.checkString(value): value = j.basetype.string.fromString(value) else: msg="property name input error, needs to be str, specfile: /opt/jumpscale/apps/osis/logic/test_complextype/model.spec, name model: user, value was:" + str(value) raise TypeError(msg) self._P_name=value @name.deleter def name(self): del self._P_name @property def emails(self): return self._P_emails @emails.setter def emails(self, value): if not isinstance(value, list) and value is not None: if isinstance(value, basestring) and j.basetype.list.checkString(value): value = j.basetype.list.fromString(value) else: msg="property emails input error, needs to be list, specfile: /opt/jumpscale/apps/osis/logic/test_complextype/model.spec, name model: user, value was:" + str(value) raise TypeError(msg) self._P_emails=value @emails.deleter def emails(self): del self._P_emails @property def groups(self): return self._P_groups @groups.setter def groups(self, value): if not isinstance(value, list) and value is not None: if isinstance(value, basestring) and j.basetype.list.checkString(value): value = j.basetype.list.fromString(value) else: msg="property groups input error, needs to be list, specfile: /opt/jumpscale/apps/osis/logic/test_complextype/model.spec, name model: user, value was:" + str(value) raise TypeError(msg) self._P_groups=value @groups.deleter def groups(self): del self._P_groups @property def guid(self): return self._P_guid @guid.setter def guid(self, value): if not isinstance(value, str) and value is not None: if isinstance(value, basestring) and j.basetype.string.checkString(value): value = j.basetype.string.fromString(value) else: msg="property guid input error, needs to be str, specfile: /opt/jumpscale/apps/osis/logic/test_complextype/model.spec, name model: user, value was:" + str(value) raise TypeError(msg) self._P_guid=value @guid.deleter def guid(self): del self._P_guid @property def _meta(self): return self._P__meta @_meta.setter def _meta(self, value): if not isinstance(value, list) and value is not None: if isinstance(value, basestring) and j.basetype.list.checkString(value): value = j.basetype.list.fromString(value) else: msg="property _meta input error, needs to be list, specfile: /opt/jumpscale/apps/osis/logic/test_complextype/model.spec, name model: user, value was:" + str(value) raise TypeError(msg) self._P__meta=value @_meta.deleter def _meta(self): del self._P__meta
Jumpscale/jumpscale6_core
apps/osis/logic/test_complextype/user/test_complextype_user_osismodelbase.py
Python
bsd-2-clause
5,069
import matplotlib as mpl mpl.rcParams['font.size'] = 14 from morphforge.stdimports import * from morphforgecontrib.stdimports import * eqnset_txt_na = """ define_component hh_na { i = g * (v-erev) * m**3*h m_inf = m_alpha_rate / (m_alpha_rate + m_beta_rate) m_tau = 1.0 / (m_alpha_rate + m_beta_rate) m' = (m_inf-m) / m_tau h_inf = h_alpha_rate / (h_alpha_rate + h_beta_rate) h_tau = 1.0 / (h_alpha_rate + h_beta_rate) h' = (h_inf-h) / h_tau StdFormAB(V, a1, a2, a3, a4, a5) = (a1+a2*V)/(a3+std.math.exp((V+a4)/a5)) m_alpha_rate = StdFormAB(V=v, a1=m_a1, a2=m_a2, a3=m_a3, a4=m_a4, a5=m_a5) m_beta_rate = StdFormAB(V=v, a1=m_b1, a2=m_b2, a3=m_b3, a4=m_b4, a5=m_b5) h_alpha_rate = StdFormAB(V=v, a1=h_a1, a2=h_a2, a3=h_a3, a4=h_a4, a5=h_a5) h_beta_rate = StdFormAB(V=v, a1=h_b1, a2=h_b2, a3=h_b3, a4=h_b4, a5=h_b5) m_a1={-4.00 ms-1}; m_a2={-0.10 mV-1 ms-1}; m_a3={-1.00}; m_a4={40.00 mV}; m_a5={-10.00 mV}; m_b1={ 4.00 ms-1}; m_b2={ 0.00 mV-1 ms-1}; m_b3={ 0.00}; m_b4={65.00 mV}; m_b5={ 18.00 mV}; h_a1={ 0.07 ms-1}; h_a2={ 0.00 mV-1 ms-1}; h_a3={ 0.00}; h_a4={65.00 mV}; h_a5={ 20.00 mV}; h_b1={ 1.00 ms-1}; h_b2={ 0.00 mV-1 ms-1}; h_b3={ 1.00}; h_b4={35.00 mV}; h_b5={-10.00 mV}; erev = 50.0mV; <=> PARAMETER g:(S/m2) <=> OUTPUT i:(A/m2) METADATA {"mf":{"role":"TRANSMEMBRANECURRENT"} } <=> INPUT v: V METADATA {"mf":{"role":"MEMBRANEVOLTAGE"} } } """ eqnset_txt_k = """ define_component hh_k { i = g * (v-erev) * n*n*n*n n_inf = n_alpha_rate / (n_alpha_rate + n_beta_rate) n_tau = 1.0 / (n_alpha_rate + n_beta_rate) n' = (n_inf-n) / n_tau StdFormAB(V, a1, a2, a3, a4, a5) = (a1 + a2*V)/(a3+std.math.exp((V+a4)/a5)) n_alpha_rate = StdFormAB(V=v, a1=n_a1, a2=n_a2, a3=n_a3, a4=n_a4, a5=n_a5) n_beta_rate = StdFormAB(V=v, a1=n_b1, a2=n_b2, a3=n_b3, a4=n_b4, a5=n_b5) n_a1={-0.55 ms-1}; n_a2={-0.01 mV-1 ms-1}; n_a3={-1.00}; n_a4={55.00 mV}; n_a5={-10.00 mV} n_b1={0.125 ms-1}; n_b2={ 0.00 mV-1 ms-1}; n_b3={ 0.00}; n_b4={65.00 mV}; n_b5={ 80.00 mV} g = {36.0mS/cm2} erev = {-77.0mV} <=> OUTPUT i:(A/m2) METADATA {"mf":{"role":"TRANSMEMBRANECURRENT"} } <=> INPUT v: V METADATA {"mf":{"role":"MEMBRANEVOLTAGE"} } } """ eqnset_txt_lk = """ define_component hh_lk { i = {0.3mS/cm2} * (v- {-54.3mV}) <=> OUTPUT i:(A/m2) METADATA {"mf":{"role":"TRANSMEMBRANECURRENT"} } <=> INPUT v: V METADATA {"mf":{"role":"MEMBRANEVOLTAGE"} } } """ env = NEURONEnvironment() sim = env.Simulation() # Create a cell: morph_dict = {'root': {'length': 18.8, 'diam': 18.8, 'id':'soma'} } my_morph = MorphologyTree.fromDictionary(morph_dict) cell = sim.create_cell(name="Cell1", morphology=my_morph) #soma = cell.get_location("soma") # Setup passive channels: cell.set_passive( PassiveProperty.SpecificCapacitance, qty('1.0:uF/cm2')) # Setup active channels: na_chl = env.Channel(NeuroUnitEqnsetMechanism, name="NaChl", eqnset=eqnset_txt_na, default_parameters={"g":qty("120:mS/cm2")}, ) k_chl = env.Channel(NeuroUnitEqnsetMechanism, name="KChl", eqnset=eqnset_txt_k, ) lk_chl = env.Channel(NeuroUnitEqnsetMechanism, name="LKChl", eqnset=eqnset_txt_lk, ) cell.apply_channel( na_chl) cell.apply_channel( lk_chl) cell.apply_channel( k_chl) # Define what to record: sim.record(cell, what=StandardTags.Voltage, name="SomaVoltage", cell_location = cell.soma) sim.record(na_chl, what='m', cell_location=cell.soma, user_tags=[StandardTags.StateVariable]) sim.record(na_chl, what='h', cell_location=cell.soma, user_tags=[StandardTags.StateVariable]) sim.record(k_chl, what='n', cell_location=cell.soma, user_tags=[StandardTags.StateVariable]) # Create the stimulus and record the injected current: cc = sim.create_currentclamp(name="CC1", amp=qty("100:pA"), dur=qty("100:ms"), delay=qty("100:ms"), cell_location=cell.soma) sim.record(cc, what=StandardTags.Current) # run the simulation results = sim.run() TagViewer(results, timerange=(50, 250)*units.ms, show=True)
mikehulluk/morphforge
doc/srcs_generated_examples/python_srcs/poster1.py
Python
bsd-2-clause
4,063
# Copyright (c) 2015, Anthony Schmieder # Use of this source code is governed by the 2-clause BSD license that # can be found in the LICENSE.txt file. """Loads and manages art assets""" import pyglet import os _ASSET_PATHS = ["res"] _ASSET_FILE_NAMES = [ "black_key_down.png", "black_key_up.png", "white_key_down.png", "white_key_up.png", "staff_line.png", ] class Assets(object): _loadedAssets = None @staticmethod def loadAssets(): Assets._loadedAssets = dict() Assets._updateResourcePath() for f in _ASSET_FILE_NAMES: Assets.loadAsset(f) @staticmethod def loadAsset(filename): Assets._loadedAssets[filename] = pyglet.resource.image(filename) @staticmethod def _updateResourcePath(): for p in _ASSET_PATHS: pyglet.resource.path.append(os.path.join(os.getcwd(), p)) pyglet.resource.reindex() @staticmethod def get(filename): if Assets._loadedAssets is None: raise RuntimeError("You must initialize the asset manager before " "retrieving assets") return Assets._loadedAssets[filename]
aschmied/keyzer
ui/assetmanager.py
Python
bsd-2-clause
1,180
#!/usr/bin/env python3 # Copyright 2015 Dietrich Epp. # This file is part of SGGL. SGGL is licensed under the terms of the # 2-clause BSD license. For more information, see LICENSE.txt. import glgen.__main__ glgen.__main__.main()
depp/sggl
sggl.py
Python
bsd-2-clause
232
#!/usr/bin/env python try: from setuptools import setup except ImportError: from distutils.core import setup requirements = [ 'pyte', 'docopt' ] try: import asyncio except ImportError: requirements.append('asyncio') setup( name='libpymux', author='Jonathan Slenders', version='0.1', license='LICENSE.txt', url='https://github.com/jonathanslenders/libpymux', description='Python terminal multiplexer (Pure Python tmux clone)', long_description=open("README.rst").read(), packages=['libpymux'], install_requires=requirements, )
jonathanslenders/libpymux
setup.py
Python
bsd-2-clause
617
from celery.app.task import BaseTask from sphinx.ext import autodoc class TaskDocumenter(autodoc.DataDocumenter): @classmethod def can_document_member(cls, member, membername, isattr, parent): if isinstance(member, BaseTask): return True def setup(app): app.add_autodocumenter(TaskDocumenter)
melinath/django-kiki
docs/_ext/kikidocs.py
Python
bsd-2-clause
305
""" Copyright (c) 2016-2017, Kir Chou https://github.com/note35/sinon/blob/master/LICENSE A set of functions for handling known error """ def __exception_helper(msg, exception=Exception): #pylint: disable=missing-docstring raise exception(msg) def mock_type_error(obj): #pylint: disable=missing-docstring error_msg = "[{}] is an invalid module/class".format(str(obj)) return __exception_helper(error_msg) def prop_type_error(prop): #pylint: disable=missing-docstring error_msg = "[{}] is an invalid property, it should be a string".format(prop) return __exception_helper(error_msg) def prop_is_func_error(obj, prop): #pylint: disable=missing-docstring name = obj.__name__ if hasattr(obj, "__name__") else obj error_msg = "[{}] is an invalid property, it should be a method in [{}]".format(prop, name) return __exception_helper(error_msg) def prop_in_obj_error(obj, prop): #pylint: disable=missing-docstring error_msg = "[{}] is not exist in [{}]".format(prop, obj) return __exception_helper(error_msg) def lock_error(obj): #pylint: disable=missing-docstring name = obj.__name__ if hasattr(obj, "__name__") else obj error_msg = "[{}] have already been declared".format(name) return __exception_helper(error_msg) def called_with_empty_error(): #pylint: disable=missing-docstring error_msg = "There is no argument" return __exception_helper(error_msg) def is_not_spy_error(obj): #pylint: disable=missing-docstring error_msg = "[{}] is an invalid spy".format(str(obj)) return __exception_helper(error_msg) def matcher_type_error(prop): #pylint: disable=missing-docstring error_msg = "[{}] is an invalid property, it should be a type".format(prop) return __exception_helper(error_msg, exception=TypeError) def matcher_instance_error(prop): #pylint: disable=missing-docstring error_msg = "[{}] is an invalid property, it should be an instance".format(prop) return __exception_helper(error_msg, exception=TypeError) def wrapper_object_not_found_error(): error_msg = 'Wrapper object cannot be found' return __exception_helper(error_msg)
note35/sinon
sinon/lib/util/ErrorHandler.py
Python
bsd-2-clause
2,133
# coding: utf-8 import os from difflib import SequenceMatcher from unicodedata import normalize def remove_diacritics(s): try: s = normalize('NFKD', s) except TypeError: s = normalize('NFKD', unicode(s, "utf-8")) finally: return s.encode('ASCII', 'ignore').decode('ASCII') def remove_suffixes_and_prefixes(state): for term in [" PROVINCE", "PROVINCIA DE ", "STATE OF ", " STATE"]: state = state.replace(term, "") return state def remove_non_alpha_characters(s): # Remove caracteres como vírgulas, pontos, parênteses etc return "".join([c for c in s if c.isalpha() or c in [" ", "-"]]) def similarity_ratio(value1, value2): s = SequenceMatcher(None, value1, value2) return s.ratio() def normalize_value(s): s = remove_diacritics(s) s = s.upper() s = remove_non_alpha_characters(s) return s class States: def __init__(self): self._states = {} self.load() def load(self): with open(os.path.dirname(os.path.realpath(__file__)) + "/assets/states_abbrev.csv") as fp: for row in fp.readlines(): row = row.strip() if "," in row: name, abbrev = row.split(",") name = remove_diacritics(name) name = name.upper() self._states[name] = abbrev def get_state_abbrev(self, state): return self._states.get(state) def get_state_abbrev_by_similarity(self, state): similar = [ (similarity_ratio(name, state), abbrev) for name, abbrev in self._states.items() ] similar = sorted(similar) if similar[-1][0] > 0.8: return similar[-1][1] def normalize(self, state): state = remove_suffixes_and_prefixes(state) state_abbrev = ( self.get_state_abbrev(state) or self.get_state_abbrev_by_similarity(state) or state ) return state_abbrev def is_a_match(original, normalized, states=None): original = normalize_value(original) normalized = normalize_value(normalized) if original == normalized: return True if similarity_ratio(original, normalized) > 0.8: return True if states and hasattr(states, 'normalize'): original = states.normalize(original) normalized = states.normalize(normalized) if original == normalized: return True return False STATES = States() def has_conflicts(original_aff, normaff): if original_aff: conflicts = [] for label in ["country_iso_3166", "state", "city"]: original = original_aff.get(label) normalized = normaff.get(label) if original and normalized: states = STATES if label == "state" else None if is_a_match(original, normalized, states): continue conflicts.append((label, original, normalized)) return conflicts
scieloorg/xylose
xylose/aff_validator.py
Python
bsd-2-clause
3,052
# -*- coding: utf-8 -*- """ Created on Thu Dec 11 13:55:17 2014 @author: sm1fg This is the main module to construct a magnetohydrostatic solar atmosphere, given a specified magnetic network of self-similar magnetic flux tubes and save the output to gdf format. To select an existing configuration change the import as model_pars, set Nxyz, xyz_SI and any other special parameters, then execute mhs_atmopshere. To add new configurations: add the model options to set_options in parameters/options.py; add options required in parameters/model_pars.py; add alternative empirical data sets to hs_model/; add alternativ table than interploate_atmosphere in hs_model/hs_atmosphere.py; add option to get_flux_tubes in mhs_model/flux_tubes.py If an alternative formulation of the flux tube is required add options to construct_magnetic_field and construct_pairwise_field in mhs_model/flux_tubes.py Plotting options are included in plot/mhs_plot.py """ import os import numpy as np import pysac.mhs_atmosphere as atm import astropy.units as u from pysac.mhs_atmosphere.parameters.model_pars import spruit as model_pars #============================================================================== #check whether mpi is required and the number of procs = size #============================================================================== try: from mpi4py import MPI comm = MPI.COMM_WORLD rank = comm.Get_rank() size = comm.Get_size() l_mpi = True l_mpi = l_mpi and (size != 1) except ImportError: l_mpi = False rank = 0 size = 1 #============================================================================== #set up model parameters #============================================================================== local_procs=1 #optional coordinate - resolution model_pars['Nxyz'] = [64,64,128] # 3D grid model_pars['xyz'] = [-0.63*u.Mm,0.63*u.Mm,-0.63*u.Mm,0.63*u.Mm,0.0*u.Mm,12.7*u.Mm] #grid size #standard set of logical switches option_pars = atm.set_options(model_pars, l_mpi, l_gdf=True) #standard conversion to dimensionless units and physical constants scales, physical_constants = \ atm.get_parameters() # select the option in the next line option_pars['l_linear'] = True # Alfven speed constant along the axis of the flux tube if option_pars['l_const']: option_pars['l_B0_quadz'] = True model_pars['chrom_scale'] *= 5e1 model_pars['p0'] *= 1.5e1 physical_constants['gravity'] *= 1. model_pars['radial_scale'] *= 1. # Alfven speed proportional to sqrt(Z) along the axis of the flux tube elif option_pars['l_sqrt']: option_pars['l_B0_rootz'] = True model_pars['chrom_scale'] *= 5.65e-3 model_pars['p0'] *= 1. physical_constants['gravity'] *= 7.5e3 model_pars['radial_scale'] *= 0.7 # Alfven speed proportional to Z along the axis of the flux tube elif option_pars['l_linear']: option_pars['l_B0_rootz'] = True model_pars['chrom_scale'] *= 0.062 model_pars['p0'] *= 3e2 physical_constants['gravity'] *= 8e3 model_pars['radial_scale'] *= 1. # Alfven speed proportional to Z^2 along the axis of the flux tube elif option_pars['l_square']: option_pars['l_B0_rootz'] = True model_pars['chrom_scale'] *= 1.65 model_pars['p0'] *= 2e4 physical_constants['gravity'] *= 5e4 model_pars['radial_scale'] *= 1. # Alfven speed not defined along the axis of the flux tube else: option_pars['l_B0_rootz'] = True model_pars['chrom_scale'] *= 1. model_pars['p0'] *= 1. #obtain code coordinates and model parameters in astropy units coords = atm.get_coords(model_pars['Nxyz'], u.Quantity(model_pars['xyz'])) #============================================================================== #calculate 1d hydrostatic balance from empirical density profile #============================================================================== pressure_Z, rho_Z, Rgas_Z = atm.get_spruit_hs(coords['Z'], model_pars, physical_constants, option_pars ) #============================================================================== # load flux tube footpoint parameters #============================================================================== # axial location and value of Bz at each footpoint xi, yi, Si = atm.get_flux_tubes( model_pars, coords, option_pars ) #============================================================================== # split domain into processes if mpi #============================================================================== ax, ay, az = np.mgrid[coords['xmin']:coords['xmax']:1j*model_pars['Nxyz'][0], coords['ymin']:coords['ymax']:1j*model_pars['Nxyz'][1], coords['zmin']:coords['zmax']:1j*model_pars['Nxyz'][2]] # split the grid between processes for mpi if l_mpi: x_chunks = np.array_split(ax, size, axis=0) y_chunks = np.array_split(ay, size, axis=0) z_chunks = np.array_split(az, size, axis=0) x = comm.scatter(x_chunks, root=0) y = comm.scatter(y_chunks, root=0) z = comm.scatter(z_chunks, root=0) else: x, y, z = ax, ay, az x = u.Quantity(x, unit=coords['xmin'].unit) y = u.Quantity(y, unit=coords['ymin'].unit) z = u.Quantity(z, unit=coords['zmin'].unit) #============================================================================== # initialize zero arrays in which to add magnetic field and mhs adjustments #============================================================================== Bx = u.Quantity(np.zeros(x.shape), unit=u.T) # magnetic x-component By = u.Quantity(np.zeros(x.shape), unit=u.T) # magnetic y-component Bz = u.Quantity(np.zeros(x.shape), unit=u.T) # magnetic z-component pressure_m = u.Quantity(np.zeros(x.shape), unit=u.Pa) # magneto-hydrostatic adjustment to pressure rho_m = u.Quantity(np.zeros(x.shape), unit=u.kg/u.m**3) # magneto-hydrostatic adjustment to density # initialize zero arrays in which to add balancing forces and magnetic tension Fx = u.Quantity(np.zeros(x.shape), unit=u.N/u.m**3) # balancing force x-component Fy = u.Quantity(np.zeros(x.shape), unit=u.N/u.m**3) # balancing force y-component # total tension force for comparison with residual balancing force Btensx = u.Quantity(np.zeros(x.shape), unit=u.N/u.m**3) Btensy = u.Quantity(np.zeros(x.shape), unit=u.N/u.m**3) #============================================================================== #calculate the magnetic field and pressure/density balancing expressions #============================================================================== for i in range(0,model_pars['nftubes']): for j in range(i,model_pars['nftubes']): if rank == 0: print'calculating ij-pair:',i,j if i == j: pressure_mi, rho_mi, Bxi, Byi ,Bzi, B2x, B2y =\ atm.construct_magnetic_field( x, y, z, xi[i], yi[i], Si[i], model_pars, option_pars, physical_constants, scales ) Bx, By, Bz = Bxi+Bx, Byi+By ,Bzi+Bz Btensx += B2x Btensy += B2y pressure_m += pressure_mi rho_m += rho_mi else: pressure_mi, rho_mi, Fxi, Fyi, B2x, B2y =\ atm.construct_pairwise_field( x, y, z, xi[i], yi[i], xi[j], yi[j], Si[i], Si[j], model_pars, option_pars, physical_constants, scales ) pressure_m += pressure_mi rho_m += rho_mi Fx += Fxi Fy += Fyi Btensx += B2x Btensy += B2y # clear some memory del pressure_mi, rho_mi, Bxi, Byi ,Bzi, B2x, B2y #============================================================================== # Construct 3D hs arrays and then add the mhs adjustments to obtain atmosphere #============================================================================== # select the 1D array spanning the local mpi process; the add/sub of dz to # ensure all indices are used, but only once indz = np.where(coords['Z'] >= z.min()-0.1*coords['dz']) and \ np.where(coords['Z'] <= z.max()+0.1*coords['dz']) pressure_z, rho_z, Rgas_z = pressure_Z[indz], rho_Z[indz], Rgas_Z[indz] # local proc 3D mhs arrays pressure, rho = atm.mhs_3D_profile(z, pressure_z, rho_z, pressure_m, rho_m ) magp = (Bx**2 + By**2 + Bz**2)/(2.*physical_constants['mu0']) if rank ==0: print'max B corona = ',magp[:,:,-1].max().decompose() energy = atm.get_internal_energy(pressure, magp, physical_constants) #============================================================================ # Save data for SAC and plotting #============================================================================ # set up data directory and file names # may be worthwhile locating on /data if files are large datadir = os.path.expanduser('~/Documents/mhs_atmosphere/'+model_pars['model']+'/') filename = datadir + model_pars['model'] + option_pars['suffix'] if not os.path.exists(datadir): os.makedirs(datadir) sourcefile = datadir + model_pars['model'] + '_sources' + option_pars['suffix'] aux3D = datadir + model_pars['model'] + '_3Daux' + option_pars['suffix'] aux1D = datadir + model_pars['model'] + '_1Daux' + option_pars['suffix'] # save the variables for the initialisation of a SAC simulation atm.save_SACvariables( filename, rho, Bx, By, Bz, energy, option_pars, physical_constants, coords, model_pars['Nxyz'] ) # save the balancing forces as the background source terms for SAC simulation atm.save_SACsources( sourcefile, Fx, Fy, option_pars, physical_constants, coords, model_pars['Nxyz'] ) # save auxilliary variable and 1D profiles for plotting and analysis Rgas = u.Quantity(np.zeros(x.shape), unit=Rgas_z.unit) Rgas[:] = Rgas_z temperature = pressure/rho/Rgas if not option_pars['l_hdonly']: inan = np.where(magp <=1e-7*pressure.min()) magpbeta = magp magpbeta[inan] = 1e-7*pressure.min() # low pressure floor to avoid NaN pbeta = pressure/magpbeta else: pbeta = magp+1.0 #dummy to avoid NaN alfven = np.sqrt(2.*magp/rho) if rank == 0: print'Alfven speed Z.min to Z.max =',\ alfven[model_pars['Nxyz'][0]/2,model_pars['Nxyz'][1]/2, 0].decompose(),\ alfven[model_pars['Nxyz'][0]/2,model_pars['Nxyz'][1]/2,-1].decompose() cspeed = np.sqrt(physical_constants['gamma']*pressure/rho) atm.save_auxilliary3D( aux3D, pressure_m, rho_m, temperature, pbeta, alfven, cspeed, Btensx, Btensy, option_pars, physical_constants, coords, model_pars['Nxyz'] ) atm.save_auxilliary1D( aux1D, pressure_Z, rho_Z, Rgas_Z, option_pars, physical_constants, coords, model_pars['Nxyz'] )
Cadair/pysac
examples/mhs_atmosphere/spruit_atmosphere.py
Python
bsd-2-clause
12,258
# django-openid-auth - OpenID integration for django.contrib.auth # # Copyright (C) 2007 Simon Willison # Copyright (C) 2008-2013 Canonical Ltd. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # Django settings for example project. import django django_version = django.get_version() DEBUG = True TEMPLATE_DEBUG = DEBUG ADMINS = ( # ('Your Name', 'your_email@domain.com'), ) MANAGERS = ADMINS if django_version >= "1.2": csrf_middleware = 'django.middleware.csrf.CsrfViewMiddleware' DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': 'sqlite.db', } } TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ) else: csrf_middleware = 'django.contrib.csrf.middleware.CsrfViewMiddleware' TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.load_template_source', 'django.template.loaders.app_directories.load_template_source', ) DATABASE_ENGINE = 'sqlite3' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'ado_mssql'. DATABASE_NAME = 'sqlite.db' # Or path to database file if using sqlite3. DATABASE_USER = '' # Not used with sqlite3. DATABASE_PASSWORD = '' # Not used with sqlite3. DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3. DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3. # Local time zone for this installation. Choices can be found here: # http://www.postgresql.org/docs/8.1/static/datetime-keywords.html#DATETIME-TIMEZONE-SET-TABLE # although not all variations may be possible on all operating systems. # If running in a Windows environment this must be set to the same as your # system time zone. TIME_ZONE = 'America/Chicago' # Language code for this installation. All choices can be found here: # http://www.w3.org/TR/REC-html40/struct/dirlang.html#langcodes # http://blogs.law.harvard.edu/tech/stories/storyReader$15 LANGUAGE_CODE = 'en-us' SITE_ID = 1 # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True # Absolute path to the directory that holds media. # Example: "/home/media/media.lawrence.com/" MEDIA_ROOT = '' # URL that handles the media served from MEDIA_ROOT. # Example: "http://media.lawrence.com" MEDIA_URL = '' # URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a # trailing slash. # Examples: "http://foo.com/media/", "/media/". ADMIN_MEDIA_PREFIX = '/media/' # Make this unique, and don't share it with anybody. SECRET_KEY = '34958734985734985734985798437' MIDDLEWARE_CLASSES = ( 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', csrf_middleware, ) ROOT_URLCONF = 'example_consumer.urls' TEMPLATE_DIRS = ( # Put strings here, like "/home/html/django_templates" or "C:/www/django/templates". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. ) INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.admin', 'django_openid_auth', 'south', ) AUTHENTICATION_BACKENDS = ( 'django_openid_auth.auth.OpenIDBackend', 'django.contrib.auth.backends.ModelBackend', ) # Should users be created when new OpenIDs are used to log in? OPENID_CREATE_USERS = True # When logging in again, should we overwrite user details based on # data received via Simple Registration? OPENID_UPDATE_DETAILS_FROM_SREG = True # Map of OpenID Provider base URLs to recognised account verification schemes # returned in response to a http://ns.login.ubuntu.com/2013/validation/account # request. Use None as the key in place of a URL to specify verification # schemes that will be trusted from unknown OpenID Providers (not recommended). OPENID_VALID_VERIFICATION_SCHEMES = { None: (), } # If set, always use this as the identity URL rather than asking the # user. This only makes sense if it is a server URL. OPENID_SSO_SERVER_URL = 'https://login.launchpad.net/' # Tell django.contrib.auth to use the OpenID signin URLs. LOGIN_URL = '/openid/login/' LOGIN_REDIRECT_URL = '/' # Should django_auth_openid be used to sign into the admin interface? OPENID_USE_AS_ADMIN_LOGIN = False
somcomltd/django-openid-auth
example_consumer/settings.py
Python
bsd-2-clause
5,789
# -*- coding: utf-8 -*- from south.utils import datetime_utils as datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Changing field 'Field.choices' db.alter_column(u'forms_field', 'choices', self.gf('django.db.models.fields.CharField')(max_length=5000)) def backwards(self, orm): # Changing field 'Field.choices' db.alter_column(u'forms_field', 'choices', self.gf('django.db.models.fields.CharField')(max_length=1000)) models = { u'auth.group': { 'Meta': {'object_name': 'Group'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, u'auth.permission': { 'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, u'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, u'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, u'forms.field': { 'Meta': {'ordering': "(u'order',)", 'object_name': 'Field'}, 'choices': ('django.db.models.fields.CharField', [], {'max_length': '5000', 'blank': 'True'}), 'condition': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}), 'default': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}), 'dependency': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '100', 'blank': 'True'}), 'field_type': ('django.db.models.fields.IntegerField', [], {}), 'form': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'fields'", 'to': u"orm['forms.Form']"}), 'help_text': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'label': ('django.db.models.fields.CharField', [], {'max_length': '1000'}), 'merge': ('django.db.models.fields.CharField', [], {'default': "u'0'", 'max_length': '100', 'blank': 'True'}), 'meta': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'blank': 'True'}), 'order': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'placeholder_text': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'required': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'slug': ('django.db.models.fields.SlugField', [], {'default': "u''", 'max_length': '100', 'blank': 'True'}), 'visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'}) }, u'forms.fieldentry': { 'Meta': {'object_name': 'FieldEntry'}, 'entry': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'fields'", 'to': u"orm['forms.FormEntry']"}), 'field_id': ('django.db.models.fields.IntegerField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'value': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True'}) }, u'forms.form': { 'Meta': {'object_name': 'Form'}, 'button_text': ('django.db.models.fields.CharField', [], {'default': "u'Submit'", 'max_length': '50'}), 'email_copies': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}), 'email_from': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'email_message': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'email_subject': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}), 'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'intro': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'redirect_url': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}), 'response': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'send_email': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'sites': ('django.db.models.fields.related.ManyToManyField', [], {'default': '[2]', 'related_name': "u'forms_form_forms'", 'symmetrical': 'False', 'to': u"orm['sites.Site']"}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}), 'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}), 'template': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, u'forms.formentry': { 'Meta': {'object_name': 'FormEntry'}, 'entry_time': ('django.db.models.fields.DateTimeField', [], {}), 'form': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'entries'", 'to': u"orm['forms.Form']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}) }, u'sites.site': { 'Meta': {'ordering': "(u'domain',)", 'object_name': 'Site', 'db_table': "u'django_site'"}, 'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) } } complete_apps = ['forms']
Afnarel/django-forms-builder
forms_builder/forms/south_migrations/0014_auto__chg_field_field_choices.py
Python
bsd-2-clause
8,984
from pymacaron.log import pymlogger import multiprocessing from math import ceil from pymacaron.config import get_config log = pymlogger(__name__) # Calculate resources available on this container hardware. # Used by pymacaron-async, pymacaron-gcp and pymacaron-docker def get_gunicorn_worker_count(cpu_count=None): """Return the number of gunicorn worker to run on this container hardware""" if cpu_count: return cpu_count * 2 + 1 return multiprocessing.cpu_count() * 2 + 1 def get_celery_worker_count(cpu_count=None): """Return the number of celery workers to run on this container hardware""" conf = get_config() if hasattr(conf, 'worker_count'): # Start worker_count parrallel celery workers return conf.worker_count if cpu_count: return cpu_count * 2 c = multiprocessing.cpu_count() * 2 # Minimum worker count == 2 if c < 2: c == 2 return c # Memory required, in Mb, by one gunicorn or celery worker: GUNICORN_WORKER_MEM = 400 CELERY_WORKER_MEM = 200 def get_memory_limit(default_celery_worker_count=None, cpu_count=None): """Return the memory in Megabytes required to run pymacaron on this container hardware""" # Let's calculate how much memory this pymacaron config requires for 1 container celery_count = default_celery_worker_count if not celery_count: celery_count = get_celery_worker_count(cpu_count=cpu_count) return ceil(get_gunicorn_worker_count(cpu_count=cpu_count) * GUNICORN_WORKER_MEM + celery_count * CELERY_WORKER_MEM) def get_celery_worker_memory_limit(): return CELERY_WORKER_MEM * 1024
erwan-lemonnier/klue-microservice
pymacaron/resources.py
Python
bsd-2-clause
1,638
import base64 import hashlib import logging import re import urllib import urlparse import xmlrpclib import redis import requests import lxml.html from django.conf import settings from django.core.exceptions import ValidationError from django.core.files.base import ContentFile from django.db import transaction from django.utils.timezone import utc from crate.web.history.models import Event from crate.web.packages.models import Package, Release, TroveClassifier from crate.web.packages.models import ReleaseRequire, ReleaseProvide, ReleaseObsolete, ReleaseURI, ReleaseFile from crate.pypi.exceptions import PackageHashMismatch from crate.pypi.models import PyPIMirrorPage, PyPIServerSigPage from crate.pypi.utils.serversigs import load_key, verify logger = logging.getLogger(__name__) INDEX_URL = "http://pypi.python.org/pypi" SIMPLE_URL = "http://pypi.python.org/simple/" SERVERSIG_URL = "http://pypi.python.org/serversig/" SERVERKEY_URL = "http://pypi.python.org/serverkey" SERVERKEY_KEY = "crate:pypi:serverkey" _disutils2_version_capture = re.compile("^(.*?)(?:\(([^()]+)\))?$") _md5_re = re.compile(r"(https?://pypi\.python\.org/packages/.+)#md5=([a-f0-9]+)") def get_helper(data, key, default=None): if data.get(key) and data[key] != "UNKNOWN": return data[key] return "" if default is None else default def split_meta(meta): meta_split = meta.split(";", 1) meta_name, meta_version = _disutils2_version_capture.search(meta_split[0].strip()).groups() meta_env = meta_split[1].strip() if len(meta_split) == 2 else "" return { "name": meta_name, "version": meta_version if meta_version is not None else "", "environment": meta_env, } class PyPIPackage(object): def __init__(self, name, version=None): self.name = name self.version = version self.stored = False self.pypi = xmlrpclib.ServerProxy(INDEX_URL, use_datetime=True) self.datastore = redis.StrictRedis(**dict([(x.lower(), y) for x, y in settings.REDIS[settings.PYPI_DATASTORE].items()])) def process(self, bulk=False, download=True, skip_modified=True): self.bulk = bulk self.skip_modified = skip_modified self.fetch() self.build() with transaction.commit_on_success(): self.store() if download: self.download() def delete(self): with transaction.commit_on_success(): self.verify_and_sync_pages() if self.version is None: # Delete the entire package packages = Package.objects.filter(name=self.name).select_for_update() releases = Release.objects.filter(package__in=packages).select_for_update() for package in packages: package.delete() else: # Delete only this release try: package = Package.objects.get(name=self.name) except Package.DoesNotExist: return releases = Release.objects.filter(package=package, version=self.version).select_for_update() for release in releases: release.hidden = True release.save() def remove_files(self, *files): self.verify_and_sync_pages() packages = Package.objects.filter(name=self.name) releases = Release.objects.filter(package__in=packages) for rf in ReleaseFile.objects.filter(release__in=releases, filename__in=files): rf.hidden = True rf.save() def fetch(self): logger.debug("[FETCH] %s%s" % (self.name, " %s" % self.version if self.version else "")) # Fetch meta data for this release self.releases = self.get_releases() self.release_data = self.get_release_data() self.release_url_data = self.get_release_urls() def build(self): logger.debug("[BUILD] %s%s" % (self.name, " %s" % self.version if self.version else "")) # Check to Make sure fetch has been ran if not hasattr(self, "releases") or not hasattr(self, "release_data") or not hasattr(self, "release_url_data"): raise Exception("fetch must be called prior to running build") # @@@ Make a Custom Exception # Construct our representation of the releases self.data = {} for release in self.releases: data = {} data["package"] = self.name data["version"] = release data["author"] = get_helper(self.release_data[release], "author") data["author_email"] = get_helper(self.release_data[release], "author_email") data["maintainer"] = get_helper(self.release_data[release], "maintainer") data["maintainer_email"] = get_helper(self.release_data[release], "maintainer_email") data["summary"] = get_helper(self.release_data[release], "summary") data["description"] = get_helper(self.release_data[release], "description") data["license"] = get_helper(self.release_data[release], "license") data["keywords"] = get_helper(self.release_data[release], "keywords") # @@@ Switch This to a List data["platform"] = get_helper(self.release_data[release], "platform") data["download_uri"] = get_helper(self.release_data[release], "download_url") # @@@ Should This Go Under URI? data["requires_python"] = get_helper(self.release_data[release], "required_python") data["stable_version"] = get_helper(self.release_data[release], "stable_version") # @@@ What Is This? data["classifiers"] = get_helper(self.release_data[release], "classifiers", []) # Construct the URIs data["uris"] = {} if get_helper(self.release_data[release], "home_page"): data["uris"]["Home Page"] = get_helper(self.release_data[release], "home_page") if get_helper(self.release_data[release], "bugtrack_url"): data["uris"]["Bug Tracker"] = get_helper(self.release_data[release], "bugtrack_url") for label, url in [x.split(",", 1) for x in get_helper(self.release_data[release], "project_url", [])]: data["uris"][label] = url # Construct Requires data["requires"] = [] for kind in ["requires", "requires_dist", "requires_external"]: for require in get_helper(self.release_data[release], kind, []): req = {"kind": kind if kind is not "requires_external" else "external"} req.update(split_meta(require)) data["requires"].append(req) # Construct Provides data["provides"] = [] for kind in ["provides", "provides_dist"]: for provides in get_helper(self.release_data[release], kind, []): req = {"kind": kind} req.update(split_meta(provides)) data["provides"].append(req) # Construct Obsoletes data["obsoletes"] = [] for kind in ["obsoletes", "obsoletes_dist"]: for provides in get_helper(self.release_data[release], kind, []): req = {"kind": kind} req.update(split_meta(provides)) data["obsoletes"].append(req) # Construct Files data["files"] = [] for url_data in self.release_url_data[release]: data["files"].append({ "comment": get_helper(url_data, "comment_text"), "downloads": get_helper(url_data, "downloads", 0), "file": get_helper(url_data, "url"), "filename": get_helper(url_data, "filename"), "python_version": get_helper(url_data, "python_version"), "type": get_helper(url_data, "packagetype"), "digests": { "md5": url_data["md5_digest"].lower(), } }) if url_data.get("upload_time"): data["files"][-1]["created"] = url_data["upload_time"].replace(tzinfo=utc) for file_data in data["files"]: if file_data.get("created"): if data.get("created"): if file_data["created"] < data["created"]: data["created"] = file_data["created"] else: data["created"] = file_data["created"] self.data[release] = data logger.debug("[RELEASE BUILD DATA] %s %s %s" % (self.name, release, data)) def store(self): try: package = Package.objects.get(normalized_name=re.sub('[^A-Za-z0-9.]+', '-', self.name).lower()) if package.name != self.name: package.name = self.name package.save() except Package.DoesNotExist: package = Package.objects.create(name=self.name) for data in self.data.values(): try: release = Release.objects.get(package=package, version=data["version"]) except Release.DoesNotExist: release = Release(package=package, version=data["version"]) release.full_clean() release.save() # This is an extra database call but it should prevent ShareLocks Release.objects.filter(pk=release.pk).select_for_update() if release.hidden: release.hidden = False for key, value in data.iteritems(): if key in ["package", "version"]: # Short circuit package and version continue if key == "uris": ReleaseURI.objects.filter(release=release).delete() for label, uri in value.iteritems(): try: ReleaseURI.objects.get(release=release, label=label, uri=uri) except ReleaseURI.DoesNotExist: try: release_uri = ReleaseURI(release=release, label=label, uri=uri) release_uri.full_clean() release_uri.save(force_insert=True) except ValidationError: logger.exception("%s, %s for %s-%s Invalid Data" % (label, uri, release.package.name, release.version)) elif key == "classifiers": release.classifiers.clear() for classifier in value: try: trove = TroveClassifier.objects.get(trove=classifier) except TroveClassifier.DoesNotExist: trove = TroveClassifier(trove=classifier) trove.full_clean() trove.save(force_insert=True) release.classifiers.add(trove) elif key in ["requires", "provides", "obsoletes"]: model = {"requires": ReleaseRequire, "provides": ReleaseProvide, "obsoletes": ReleaseObsolete}.get(key) model.objects.filter(release=release).delete() for item in value: try: model.objects.get(release=release, **item) except model.DoesNotExist: m = model(release=release, **item) m.full_clean() m.save(force_insert=True) elif key == "files": files = ReleaseFile.objects.filter(release=release) filenames = dict([(x.filename, x) for x in files]) for f in value: try: rf = ReleaseFile.objects.get( release=release, type=f["type"], filename=f["filename"], python_version=f["python_version"], ) for k, v in f.iteritems(): if k in ["digests", "file", "filename", "type", "python_version"]: continue setattr(rf, k, v) rf.hidden = False rf.full_clean() rf.save() except ReleaseFile.DoesNotExist: rf = ReleaseFile( release=release, type=f["type"], filename=f["filename"], python_version=f["python_version"], **dict([(k, v) for k, v in f.iteritems() if k not in ["digests", "file", "filename", "type", "python_version"]]) ) rf.hidden = False rf.full_clean() rf.save() if f["filename"] in filenames.keys(): del filenames[f["filename"]] if filenames: for rf in ReleaseFile.objects.filter(pk__in=[f.pk for f in filenames.values()]): rf.hidden = True rf.save() else: setattr(release, key, value) while True: try: release.full_clean() except ValidationError as e: if "download_uri" in e.message_dict: release.download_uri = "" logger.exception("%s-%s Release Validation Error %s" % (release.package.name, release.version, str(e.message_dict))) else: raise else: break release.save() # Mark unsynced as deleted when bulk processing if self.bulk: for release in Release.objects.filter(package=package).exclude(version__in=self.data.keys()): release.hidden = True release.save() self.stored = True def download(self): # Check to Make sure fetch has been ran if not hasattr(self, "releases") or not hasattr(self, "release_data") or not hasattr(self, "release_url_data"): raise Exception("fetch and build must be called prior to running download") # @@@ Make a Custom Exception # Check to Make sure build has been ran if not hasattr(self, "data"): raise Exception("build must be called prior to running download") # @@@ Make a Custom Exception if not self.stored: raise Exception("package must be stored prior to downloading") # @@@ Make a Custom Exception pypi_pages = self.verify_and_sync_pages() for data in self.data.values(): try: # if pypi_pages.get("has_sig"): # simple_html = lxml.html.fromstring(pypi_pages["simple"]) # simple_html.make_links_absolute(urlparse.urljoin(SIMPLE_URL, data["package"]) + "/") # verified_md5_hashes = {} # for link in simple_html.iterlinks(): # m = _md5_re.search(link[2]) # if m: # url, md5_hash = m.groups() # verified_md5_hashes[url] = md5_hash package = Package.objects.get(name=data["package"]) release = Release.objects.filter(package=package, version=data["version"]).select_for_update() for release_file in ReleaseFile.objects.filter(release=release, filename__in=[x["filename"] for x in data["files"]]).select_for_update(): file_data = [x for x in data["files"] if x["filename"] == release_file.filename][0] datastore_key = "crate:pypi:download:%(url)s" % {"url": file_data["file"]} stored_file_data = self.datastore.hgetall(datastore_key) headers = None if stored_file_data and self.skip_modified: # Stored data exists for this file if release_file.file: try: release_file.file.read() except IOError: pass else: # We already have a file if stored_file_data["md5"].lower() == file_data["digests"]["md5"].lower(): # The supposed MD5 from PyPI matches our local headers = { "If-Modified-Since": stored_file_data["modified"], } resp = requests.get(file_data["file"], headers=headers, prefetch=True) if resp.status_code == 304: logger.info("[DOWNLOAD] skipping %(filename)s because it has not been modified" % {"filename": release_file.filename}) return logger.info("[DOWNLOAD] downloading %(filename)s" % {"filename": release_file.filename}) resp.raise_for_status() # Make sure the MD5 of the file we receive matched what we were told it is if hashlib.md5(resp.content).hexdigest().lower() != file_data["digests"]["md5"].lower(): raise PackageHashMismatch("%s does not match %s for %s %s" % ( hashlib.md5(resp.content).hexdigest().lower(), file_data["digests"]["md5"].lower(), file_data["type"], file_data["filename"], )) release_file.digest = "$".join(["sha256", hashlib.sha256(resp.content).hexdigest().lower()]) release_file.full_clean() release_file.file.save(file_data["filename"], ContentFile(resp.content), save=False) release_file.save() Event.objects.create( package=release_file.release.package.name, version=release_file.release.version, action=Event.ACTIONS.file_add, data={ "filename": release_file.filename, "digest": release_file.digest, "uri": release_file.get_absolute_url(), } ) # Store data relating to this file (if modified etc) stored_file_data = { "md5": file_data["digests"]["md5"].lower(), "modified": resp.headers.get("Last-Modified"), } if resp.headers.get("Last-Modified"): self.datastore.hmset(datastore_key, { "md5": file_data["digests"]["md5"].lower(), "modified": resp.headers["Last-Modified"], }) # Set a year expire on the key so that stale entries disappear self.datastore.expire(datastore_key, 31556926) else: self.datastore.delete(datastore_key) except requests.HTTPError: logger.exception("[DOWNLOAD ERROR]") def get_releases(self): if self.version is None: releases = self.pypi.package_releases(self.name, True) else: releases = [self.version] logger.debug("[RELEASES] %s%s [%s]" % (self.name, " %s" % self.version if self.version else "", ", ".join(releases))) return releases def get_release_data(self): release_data = [] for release in self.releases: data = self.pypi.release_data(self.name, release) logger.debug("[RELEASE DATA] %s %s" % (self.name, release)) release_data.append([release, data]) return dict(release_data) def get_release_urls(self): release_url_data = [] for release in self.releases: data = self.pypi.release_urls(self.name, release) logger.info("[RELEASE URL] %s %s" % (self.name, release)) logger.debug("[RELEASE URL DATA] %s %s %s" % (self.name, release, data)) release_url_data.append([release, data]) return dict(release_url_data) def verify_and_sync_pages(self): # Get the Server Key for PyPI if self.datastore.get(SERVERKEY_KEY): key = load_key(self.datastore.get(SERVERKEY_KEY)) else: serverkey = requests.get(SERVERKEY_URL, prefetch=True) key = load_key(serverkey.content) self.datastore.set(SERVERKEY_KEY, serverkey.content) try: # Download the "simple" page from PyPI for this package simple = requests.get(urlparse.urljoin(SIMPLE_URL, urllib.quote(self.name.encode("utf-8"))), prefetch=True) simple.raise_for_status() except requests.HTTPError: if simple.status_code == 404: return {"has_sig": False} raise except ValueError: logger.exception("Got a ValueError from downloading the Simple page") return {"has_sig": False} try: # Download the "serversig" page from PyPI for this package serversig = requests.get(urlparse.urljoin(SERVERSIG_URL, urllib.quote(self.name.encode("utf-8"))), prefetch=True) serversig.raise_for_status() except requests.HTTPError: if serversig.status_code == 404: return {"has_sig": False} raise try: if not verify(key, simple.content, serversig.content): pass # raise Exception("Simple API page does not match serversig") # @@@ This Should be Custom Exception except (UnicodeDecodeError, UnicodeEncodeError, ValueError, AssertionError): logger.exception("Exception trying to verify %s" % self.name) # @@@ Figure out a better way to handle this try: package = Package.objects.get(normalized_name=re.sub('[^A-Za-z0-9.]+', '-', self.name).lower()) except Package.DoesNotExist: logger.exception("Error Trying To Verify %s (Querying Package)" % self.name) return simple_mirror, c = PyPIMirrorPage.objects.get_or_create(package=package, defaults={"content": simple.content}) if not c and simple_mirror.content != simple.content: simple_mirror.content = simple.content simple_mirror.save() serversig_mirror, c = PyPIServerSigPage.objects.get_or_create(package=package, defaults={"content": serversig.content.encode("base64")}) serversig_mirror.content = base64.b64encode(serversig.content) serversig_mirror.save() return { "simple": simple.content, "serversig": serversig.content, "has_sig": True, }
crateio/crate.pypi
crate/pypi/processor.py
Python
bsd-2-clause
23,954
from django.db import models from django.conf import settings from django.contrib.auth.models import User, Group from .endpoint import Endpoint DEFAULT_STORE_SLUG = getattr(settings, 'DEFAULT_STORE_SLUG', 'public') class Store(models.Model): slug = models.SlugField(primary_key=True) name = models.CharField(max_length=128) query_endpoint = models.URLField() update_endpoint = models.URLField(null=True, blank=True) graph_store_endpoint = models.URLField(null=True, blank=True) def __unicode__(self): return self.name def query(self, *args, **kwargs): return Endpoint(self.query_endpoint).query(*args, **kwargs) class Meta: permissions = (('administer_store', 'can administer'), ('query_store', 'can query'), ('update_store', 'can update')) class UserPrivileges(models.Model): user = models.ForeignKey(User, null=True, blank=True) group = models.ForeignKey(Group, null=True, blank=True) allow_concurrent_queries = models.BooleanField() disable_throttle = models.BooleanField() throttle_threshold = models.FloatField(null=True, blank=True) deny_threshold = models.FloatField(null=True, blank=True) intensity_decay = models.FloatField(null=True, blank=True) disable_timeout = models.BooleanField() maximum_timeout = models.IntegerField(null=True)
ox-it/humfrey
humfrey/sparql/models.py
Python
bsd-3-clause
1,393
import faker import faker.providers import faker.providers.address import faker.providers.address.cs_CZ import faker.providers.address.de_DE import faker.providers.address.el_GR import faker.providers.address.en import faker.providers.address.en_AU import faker.providers.address.en_CA import faker.providers.address.en_GB import faker.providers.address.en_US import faker.providers.address.es import faker.providers.address.es_ES import faker.providers.address.es_MX import faker.providers.address.fa_IR import faker.providers.address.fi_FI import faker.providers.address.fr_CH import faker.providers.address.fr_FR import faker.providers.address.hi_IN import faker.providers.address.hr_HR import faker.providers.address.it_IT import faker.providers.address.ja_JP import faker.providers.address.ko_KR import faker.providers.address.ne_NP import faker.providers.address.nl_BE import faker.providers.address.nl_NL import faker.providers.address.no_NO import faker.providers.address.pl_PL import faker.providers.address.pt_BR import faker.providers.address.pt_PT import faker.providers.address.ru_RU import faker.providers.address.sk_SK import faker.providers.address.sl_SI import faker.providers.address.sv_SE import faker.providers.address.uk_UA import faker.providers.address.zh_CN import faker.providers.address.zh_TW import faker.providers.barcode import faker.providers.barcode.en_US import faker.providers.color import faker.providers.color.en_US import faker.providers.color.uk_UA import faker.providers.company import faker.providers.company.bg_BG import faker.providers.company.cs_CZ import faker.providers.company.de_DE import faker.providers.company.en_US import faker.providers.company.es_MX import faker.providers.company.fa_IR import faker.providers.company.fi_FI import faker.providers.company.fr_CH import faker.providers.company.fr_FR import faker.providers.company.hr_HR import faker.providers.company.it_IT import faker.providers.company.ja_JP import faker.providers.company.ko_KR import faker.providers.company.no_NO import faker.providers.company.pt_BR import faker.providers.company.pt_PT import faker.providers.company.ru_RU import faker.providers.company.sk_SK import faker.providers.company.sl_SI import faker.providers.company.sv_SE import faker.providers.company.zh_CN import faker.providers.company.zh_TW import faker.providers.credit_card import faker.providers.credit_card.en_US import faker.providers.currency import faker.providers.currency.en_US import faker.providers.date_time import faker.providers.date_time.en_US import faker.providers.file import faker.providers.file.en_US import faker.providers.internet import faker.providers.internet.bg_BG import faker.providers.internet.bs_BA import faker.providers.internet.cs_CZ import faker.providers.internet.de_AT import faker.providers.internet.de_DE import faker.providers.internet.el_GR import faker.providers.internet.en_AU import faker.providers.internet.en_US import faker.providers.internet.fa_IR import faker.providers.internet.fi_FI import faker.providers.internet.fr_CH import faker.providers.internet.fr_FR import faker.providers.internet.hr_HR import faker.providers.internet.ja_JP import faker.providers.internet.ko_KR import faker.providers.internet.no_NO import faker.providers.internet.pt_BR import faker.providers.internet.pt_PT import faker.providers.internet.ru_RU import faker.providers.internet.sk_SK import faker.providers.internet.sl_SI import faker.providers.internet.sv_SE import faker.providers.internet.uk_UA import faker.providers.internet.zh_CN import faker.providers.job import faker.providers.job.en_US import faker.providers.job.fa_IR import faker.providers.job.fr_CH import faker.providers.job.fr_FR import faker.providers.job.hr_HR import faker.providers.job.pl_PL import faker.providers.job.ru_RU import faker.providers.job.uk_UA import faker.providers.job.zh_TW import faker.providers.lorem import faker.providers.lorem.el_GR import faker.providers.lorem.la import faker.providers.lorem.ru_RU import faker.providers.misc import faker.providers.misc.en_US import faker.providers.person import faker.providers.person.bg_BG import faker.providers.person.cs_CZ import faker.providers.person.de_AT import faker.providers.person.de_DE import faker.providers.person.dk_DK import faker.providers.person.el_GR import faker.providers.person.en import faker.providers.person.en_GB import faker.providers.person.en_US import faker.providers.person.es_ES import faker.providers.person.es_MX import faker.providers.person.fa_IR import faker.providers.person.fi_FI import faker.providers.person.fr_CH import faker.providers.person.fr_FR import faker.providers.person.hi_IN import faker.providers.person.hr_HR import faker.providers.person.it_IT import faker.providers.person.ja_JP import faker.providers.person.ko_KR import faker.providers.person.lt_LT import faker.providers.person.lv_LV import faker.providers.person.ne_NP import faker.providers.person.nl_NL import faker.providers.person.no_NO import faker.providers.person.pl_PL import faker.providers.person.pt_BR import faker.providers.person.pt_PT import faker.providers.person.ru_RU import faker.providers.person.sl_SI import faker.providers.person.sv_SE import faker.providers.person.tr_TR import faker.providers.person.uk_UA import faker.providers.person.zh_CN import faker.providers.person.zh_TW import faker.providers.phone_number import faker.providers.phone_number.bg_BG import faker.providers.phone_number.bs_BA import faker.providers.phone_number.cs_CZ import faker.providers.phone_number.de_DE import faker.providers.phone_number.dk_DK import faker.providers.phone_number.el_GR import faker.providers.phone_number.en_AU import faker.providers.phone_number.en_CA import faker.providers.phone_number.en_GB import faker.providers.phone_number.en_US import faker.providers.phone_number.es_ES import faker.providers.phone_number.es_MX import faker.providers.phone_number.fa_IR import faker.providers.phone_number.fi_FI import faker.providers.phone_number.fr_CH import faker.providers.phone_number.fr_FR import faker.providers.phone_number.hi_IN import faker.providers.phone_number.hr_HR import faker.providers.phone_number.it_IT import faker.providers.phone_number.ja_JP import faker.providers.phone_number.ko_KR import faker.providers.phone_number.lt_LT import faker.providers.phone_number.lv_LV import faker.providers.phone_number.ne_NP import faker.providers.phone_number.nl_BE import faker.providers.phone_number.nl_NL import faker.providers.phone_number.no_NO import faker.providers.phone_number.pl_PL import faker.providers.phone_number.pt_BR import faker.providers.phone_number.pt_PT import faker.providers.phone_number.ru_RU import faker.providers.phone_number.sk_SK import faker.providers.phone_number.sl_SI import faker.providers.phone_number.sv_SE import faker.providers.phone_number.tr_TR import faker.providers.phone_number.uk_UA import faker.providers.phone_number.zh_CN import faker.providers.phone_number.zh_TW import faker.providers.profile import faker.providers.profile.en_US import faker.providers.python import faker.providers.python.en_US import faker.providers.ssn import faker.providers.ssn.en_CA import faker.providers.ssn.en_US import faker.providers.ssn.fi_FI import faker.providers.ssn.fr_CH import faker.providers.ssn.hr_HR import faker.providers.ssn.it_IT import faker.providers.ssn.ko_KR import faker.providers.ssn.nl_BE import faker.providers.ssn.nl_NL import faker.providers.ssn.pt_BR import faker.providers.ssn.ru_RU import faker.providers.ssn.sv_SE import faker.providers.ssn.uk_UA import faker.providers.ssn.zh_CN import faker.providers.ssn.zh_TW import faker.providers.user_agent import faker.providers.user_agent.en_US import faker.utils
jjhelmus/berryconda
recipes/faker/run_test.py
Python
bsd-3-clause
7,740
#!/usr/bin/env python3 import cv2 import numpy as np from vision import camera_message_framework import itertools import time shape = (500, 500, 3) size = 1 for dim in shape: size *= dim def image_of(axes): im = np.zeros(shape, dtype=np.uint8) im[:, :, axes] = 255 return im black = image_of([]), 'black' red = image_of([2]), 'red' green = image_of([1]), 'green' blue = image_of([0]), 'blue' yellow = image_of([2, 1]), 'yellow' cyan = image_of([1, 0]), 'cyan' pink = image_of([0, 2]), 'pink' white = image_of([0, 1, 2]), 'white' images = [black, red, green, blue, yellow, cyan, pink, white] f = camera_message_framework.Creator('forward', size) def main(): for im, name in itertools.cycle(images): f.write_frame(im, int(time.time() * 1000)) print('wrote {}'.format(name)) time.sleep(1) if __name__ == '__main__': main()
cuauv/software
vision/utils/image_ordering_test.py
Python
bsd-3-clause
877
import unittest import tempfile from jsonconfigparser import JSONConfigParser, NoSectionError, ParseError class JSONConfigTestCase(unittest.TestCase): def test_init(self): JSONConfigParser() def test_read_string(self): cf = JSONConfigParser() cf.read_string(( '[section]\n' '# comment comment\n' 'foo = "bar"\n' '\n' '[section2]\n' 'bar = "baz"\n' )) self.assertEqual(cf.get('section', 'foo'), 'bar') def test_read_file(self): string = '[section]\n' + \ 'foo = "bar"' fp = tempfile.NamedTemporaryFile('w+') fp.write(string) fp.seek(0) cf = JSONConfigParser() cf.read_file(fp) self.assertEqual(cf.get('section', 'foo'), 'bar') def test_get(self): cf = JSONConfigParser() cf.add_section('section') cf.set('section', 'section', 'set-in-section') self.assertEqual(cf.get('section', 'section'), 'set-in-section') def test_get_from_defaults(self): cf = JSONConfigParser() cf.set(cf.default_section, 'option', 'set-in-defaults') try: cf.get('section', 'option') except NoSectionError: pass else: # pragma: no cover self.fail("Only fall back to defaults if section exists") cf.add_section('section') self.assertEqual(cf.get('section', 'option'), 'set-in-defaults', msg="get should fall back to defaults if value not \ set in section") cf.set('section', 'option', 'set-normally') self.assertEqual(cf.get('section', 'option'), 'set-normally', msg="get shouldn't fall back if option is set \ normally") def test_get_from_vars(self): cf = JSONConfigParser() cf.add_section('section') cf.set('section', 'option', 'set-in-section') self.assertEqual(cf.get('section', 'option', vars={'option': 'set-in-vars'}), 'set-in-vars', msg="vars should take priority over options in \ section") self.assertEqual(cf.get('section', 'option', vars={}), 'set-in-section', msg="get should fall back to section if option not \ in vars") def test_get_from_fallback(self): cf = JSONConfigParser() cf.add_section('section') # returns from fallback if section exists self.assertEqual(cf.get('section', 'unset', 'fallback'), 'fallback') try: cf.get('nosection', 'unset', 'fallback') except NoSectionError: pass else: # pragma: no cover self.fail() def test_has_option(self): cf = JSONConfigParser() # option in nonexistant section does not exist self.assertFalse(cf.has_option('nonexistant', 'unset')) cf.add_section('section') self.assertFalse(cf.has_option('section', 'unset'), msg="has_option should return False if section \ exists but option is unset") cf.set('section', 'set', 'set-normally') self.assertTrue(cf.has_option('section', 'set'), msg="has option should return True if option is set \ normally") cf.set(cf.default_section, 'default', 'set-in-defaults') self.assertTrue(cf.has_option('section', 'default'), msg="has_option should return True if option set in \ defaults") def test_remove_option(self): cf = JSONConfigParser() cf.add_section('section') cf.set('section', 'normal', 'set-normally') cf.set(cf.default_section, 'default', 'set-in-defaults') # can remove normal options self.assertTrue(cf.remove_option('section', 'normal')) self.assertFalse(cf.has_option('section', 'normal')) # can't remove defaults accidentally (maybe there should be shadowing) self.assertFalse(cf.remove_option('section', 'default')) self.assertEqual(cf.get('section', 'default'), 'set-in-defaults') def test_invalid_section(self): cf = JSONConfigParser() try: cf.read_string(( '[valid]\n' 'irrelevant = "meh"\n' '[]' )) except ParseError as e: self.assertEqual(e.lineno, 3) # check that nothing was added self.assertEqual(sum(1 for _ in cf.sections()), 0) else: # pragma: no cover self.fail() try: cf.read_string(( '[nooooooooooooooooooo' )) except ParseError as e: self.assertEqual(e.lineno, 1) # check that nothing was added self.assertEqual(sum(1 for _ in cf.sections()), 0) else: # pragma: no cover self.fail() def test_invalid_values(self): cf = JSONConfigParser() try: cf.read_string(( '[section]\n' 'unmatched = [1,2,3}' )) except ParseError as e: self.assertEqual(e.lineno, 2) # check that nothing was added self.assertEqual(sum(1 for _ in cf.sections()), 0) else: # pragma: no cover self.fail() try: cf.read_string(( '[section]\n' 'unterminated = "something\n' )) except ParseError as e: self.assertEqual(e.lineno, 2) # check that nothing was added self.assertEqual(sum(1 for _ in cf.sections()), 0) else: # pragma: no cover self.fail() suite = unittest.TestLoader().loadTestsFromTestCase(JSONConfigTestCase)
bwhmather/json-config-parser
jsonconfigparser/tests/__init__.py
Python
bsd-3-clause
6,075
#coding=utf-8 """ Command-line interface utilities for Trigger tools. Intended for re-usable pieces of code like user prompts, that don't fit in other utils modules. """ __author__ = 'Jathan McCollum' __maintainer__ = 'Jathan McCollum' __email__ = 'jathan.mccollum@teamaol.com' __copyright__ = 'Copyright 2006-2012, AOL Inc.' import datetime from fcntl import ioctl import os import pwd from pytz import timezone import struct import sys import termios import time import tty # Exports __all__ = ('yesno', 'get_terminal_width', 'get_terminal_size', 'Whirlygig', 'NullDevice', 'print_severed_head', 'min_sec', 'pretty_time', 'proceed', 'get_user') # Functions def yesno(prompt, default=False, autoyes=False): """ Present a yes-or-no prompt, get input, and return a boolean. The ``default`` argument is ignored if ``autoyes`` is set. :param prompt: Prompt text :param default: Yes if True; No if False :param autoyes: Automatically return True Default behavior (hitting "enter" returns ``False``):: >>> yesno('Blow up the moon?') Blow up the moon? (y/N) False Reversed behavior (hitting "enter" returns ``True``):: >>> yesno('Blow up the moon?', default=True) Blow up the moon? (Y/n) True Automatically return ``True`` with ``autoyes``; no prompt is displayed:: >>> yesno('Blow up the moon?', autoyes=True) True """ if autoyes: return True sys.stdout.write(prompt) if default: sys.stdout.write(' (Y/n) ') else: sys.stdout.write(' (y/N) ') sys.stdout.flush() fd = sys.stdin.fileno() attr = termios.tcgetattr(fd) try: tty.setraw(fd) yn = sys.stdin.read(1) finally: termios.tcsetattr(fd, termios.TCSANOW, attr) print '' if yn in ('y', 'Y'): return True elif yn in ('n', 'N'): return False else: return default def proceed(): """Present a proceed prompt. Return ``True`` if Y, else ``False``""" return raw_input('\nDo you wish to proceed? [y/N] ').lower().startswith('y') def get_terminal_width(): """Find and return stdout's terminal width, if applicable.""" try: width = struct.unpack("hhhh", ioctl(1, termios.TIOCGWINSZ, ' '*8))[1] except IOError: width = sys.maxint return width def get_terminal_size(): """Find and return stdouts terminal size as (height, width)""" rows, cols = os.popen('stty size', 'r').read().split() return rows, cols def get_user(): """Return the name of the current user.""" return pwd.getpwuid(os.getuid())[0] def print_severed_head(): """ Prints a demon holding a severed head. Best used when things go wrong, like production-impacting network outages caused by fat-fingered ACL changes. Thanks to Jeff Sullivan for this best error message ever. """ print r""" _( (~\ _ _ / ( \> > \ -/~/ / ~\ :; \ _ > /(~\/ || | | /\ ;\ |l _____ |; ( \/ > > _\\)\)\)/ ;;; `8o __-~ ~\ d| \ // ///(())(__/~;;\ "88p;. -. _\_;.oP (_._/ / (((__ __ \\ \ `>,% (\ (\./)8" ;:' i )))--`.'-- (( ;,8 \ ,;%%%: ./V^^^V' ;. ;. ((\ | /)) .,88 `: ..,,;;;;,-::::::'_::\ ||\ ;[8: ; )| ~-~ |(|(888; ..``'::::8888oooooo. :\`^^^/,,~--._ |88:: | |\ -===- /| \8;; ``:. oo.8888888888:`((( o.ooo8888Oo;:;:' | |_~-___-~_| `-\. ` `o`88888888b` )) 888b88888P""' ; ; ~~~~;~~ "`--_`. b`888888888;(.,"888b888" ..::;-' ; ; ~"-.... b`8888888:::::.`8888. .:;;;'' ; ; `:::. `:::OOO:::::::.`OO' ;;;'' : ; `. "``::::::'' .' ; `. \_ / ; ; +: ~~-- `:' -'; ACL LOADS FAILED `: : .::/ ; ;;+_ :::. :..;;; YOU LOSE ;;;;;;,;;;;;;;;,; """ def pretty_time(t): """ Print a pretty version of timestamp, including timezone info. Expects the incoming datetime object to have proper tzinfo. :param t: A ``datetime.datetime`` object >>> import datetime >>> from pytz import timezone >>> localzone = timezone('US/Eastern') <DstTzInfo 'US/Eastern' EST-1 day, 19:00:00 STD> >>> t = datetime.datetime.now(localzone) >>> print t 2011-07-19 12:40:30.820920-04:00 >>> print pretty_time(t) 09:40 PDT >>> t = datetime.datetime(2011,07,20,04,13,tzinfo=localzone) >>> print t 2011-07-20 04:13:00-05:00 >>> print pretty_time(t) tomorrow 02:13 PDT """ from trigger.conf import settings localzone = timezone(os.environ.get('TZ', settings.BOUNCE_DEFAULT_TZ)) t = t.astimezone(localzone) midnight = datetime.datetime.combine(datetime.datetime.now(), datetime.time(tzinfo=localzone)) midnight += datetime.timedelta(1) if t < midnight: return t.strftime('%H:%M %Z') elif t < midnight + datetime.timedelta(1): return t.strftime('tomorrow %H:%M %Z') elif t < midnight + datetime.timedelta(6): return t.strftime('%A %H:%M %Z') else: return t.strftime('%Y-%m-%d %H:%M %Z') def min_sec(secs): """ Takes an epoch timestamp and returns string of minutes:seconds. :param secs: Timestamp (in seconds) >>> import time >>> start = time.time() # Wait a few seconds >>> finish = time.time() >>> min_sec(finish - start) '0:11' """ secs = int(secs) return '%d:%02d' % (secs / 60, secs % 60) def setup_tty_for_pty(func): """ Sets up tty for raw mode while retaining original tty settings and then starts the reactor to connect to the pty. Upon exiting pty, restores original tty settings. :param func: The callable to run after the tty is ready, such as ``reactor.run`` """ # Preserve original tty settings stdin_fileno = sys.stdin.fileno() old_ttyattr = tty.tcgetattr(stdin_fileno) try: # Enter raw mode on the local tty. tty.setraw(stdin_fileno) raw_ta = tty.tcgetattr(stdin_fileno) raw_ta[tty.LFLAG] |= tty.ISIG raw_ta[tty.OFLAG] |= tty.OPOST | tty.ONLCR # Pass ^C through so we can abort traceroute, etc. raw_ta[tty.CC][tty.VINTR] = '\x18' # ^X is the new ^C # Ctrl-Z is used by a lot of vendors to exit config mode raw_ta[tty.CC][tty.VSUSP] = 0 # disable ^Z tty.tcsetattr(stdin_fileno, tty.TCSANOW, raw_ta) # Execute our callable here func() finally: # Restore original tty settings tty.tcsetattr(stdin_fileno, tty.TCSANOW, old_ttyattr) def update_password_and_reconnect(hostname): """ Prompts the user to update their password and reconnect to the target device :param hostname: Hostname of the device to connect to. """ if yesno('Authentication failed, would you like to update your password?', default=True): from trigger import tacacsrc tacacsrc.update_credentials(hostname) if yesno('\nReconnect to %s?' % hostname, default=True): # Replaces the current process w/ same pid os.execl(sys.executable, sys.executable, *sys.argv) # Classes class NullDevice(object): """ Used to supress output to ``sys.stdout`` (aka ``print``). Example:: >>> from trigger.utils.cli import NullDevice >>> import sys >>> print "1 - this will print to STDOUT" 1 - this will print to STDOUT >>> original_stdout = sys.stdout # keep a reference to STDOUT >>> sys.stdout = NullDevice() # redirect the real STDOUT >>> print "2 - this won't print" >>> >>> sys.stdout = original_stdout # turn STDOUT back on >>> print "3 - this will print to SDTDOUT" 3 - this will print to SDTDOUT """ def write(self, s): pass class Whirlygig(object): """ Prints a whirlygig for use in displaying pending operation in a command-line tool. Guaranteed to make the user feel warm and fuzzy and be 1000% bug-free. :param start_msg: The status message displayed to the user (e.g. "Doing stuff:") :param done_msg: The completion message displayed upon completion (e.g. "Done.") :param max: Integer of the number of whirlygig repetitions to perform Example:: >>> Whirlygig("Doing stuff:", "Done.", 12).run() """ def __init__(self, start_msg="", done_msg="", max=100): self.unbuff = os.fdopen(sys.stdout.fileno(), 'w', 0) self.start_msg = start_msg self.done_msg = done_msg self.max = max self.whirlygig = ['|', '/', '-', '\\'] self.whirl = self.whirlygig[:] self.first = False def do_whirl(self, whirl): if not self.first: self.unbuff.write(self.start_msg + " ") self.first = True self.unbuff.write('\b%s' % whirl.pop(0)) def run(self): """Executes the whirlygig!""" cnt = 1 while cnt <= self.max: try: self.do_whirl(self.whirl) except IndexError: self.whirl = self.whirlygig[:] time.sleep(.1) cnt += 1 print '\b' + self.done_msg
sysbot/trigger
trigger/utils/cli.py
Python
bsd-3-clause
9,769
# Copyright (c) 2006 The Regents of The University of Michigan # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer; # redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution; # neither the name of the copyright holders nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # Authors: Joel Hestness options.clusters = 4 options.cmd = 'gem5_gpu_bh' options.options = '1024 1 0'
ayoubg/gem5-graphics
gem5-gpu/tests/quick/se_gpu/20.bh/test.py
Python
bsd-3-clause
1,653
#------------------------------------------------------------------------------ # Copyright (c) 2013, Nucleic Development Team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file COPYING.txt, distributed with this software. #------------------------------------------------------------------------------ from atom.api import Int, Typed from enaml.colors import Color from enaml.widgets.color_dialog import ProxyColorDialog from .QtCore import Signal from .QtGui import QColor, QColorDialog from .qt_toolkit_dialog import QtToolkitDialog def color_from_qcolor(q): """ Convert a QColor into an Enaml Color. Parameters ---------- q : QColor The Qt color to convert to Enaml Color. Returns ------- result : Color or None An Enaml Color or None if the QColor is not valid. """ if not q.isValid(): return None return Color(q.red(), q.green(), q.blue(), q.alpha()) # Guard flags CURRENT_GUARD = 0x1 class QColorDialogEx(QColorDialog): """ A custom QColorDialog which emits a custom finished signal. """ #: A signal emitted at the end of the 'done' method. This works #: around the standard QColorDialog behavior which emits the #: 'colorSelected' signal *after* the 'finished' signal. reallyFinished = Signal(int) def done(self, result): """ A reimplemented done method. This method emits the 'reallyFinished' signal on completion. """ super(QColorDialogEx, self).done(result) self.reallyFinished.emit(result) class QtColorDialog(QtToolkitDialog, ProxyColorDialog): """ A Qt implementation of an Enaml ProxyColorDialog. """ #: A reference to the widget created by the proxy. widget = Typed(QColorDialogEx) #: Cyclic notification guard. This a bitfield of multiple guards. _guard = Int(0) def create_widget(self): """ Create the underlying QColorDialog. """ self.widget = QColorDialogEx(self.parent_widget()) def init_widget(self): """ Initialize the underlying widget. """ # Do not call super(...) as it connects the standard 'finished' # signal. This widget uses the custom 'reallyFinished' signal. d = self.declaration self.set_title(d.title) self.set_current_color(d.current_color) self.set_show_alpha(d.show_alpha) self.set_show_buttons(d.show_buttons) widget = self.widget widget.currentColorChanged.connect(self.on_current_color_changed) widget.colorSelected.connect(self.on_color_selected) widget.reallyFinished.connect(self.on_finished) #-------------------------------------------------------------------------- # Utility Methods #-------------------------------------------------------------------------- def get_default_title(self): """ Get the default window title for the color dialog. """ return u'Select Color' #-------------------------------------------------------------------------- # Signal Handlers #-------------------------------------------------------------------------- def on_current_color_changed(self, qcolor): """ Handle the 'currentColorChanged' signal from the widget. """ d = self.declaration if d is not None: self._guard |= CURRENT_GUARD try: d.current_color = color_from_qcolor(qcolor) finally: self._guard &= ~CURRENT_GUARD def on_color_selected(self, qcolor): """ Handle the 'colorSelected' signal from the widget. """ d = self.declaration if d is not None: d.selected_color = color_from_qcolor(qcolor) #-------------------------------------------------------------------------- # ProxyColorDialog API #-------------------------------------------------------------------------- @staticmethod def custom_count(): """ Get the number of available custom colors. """ return QColorDialog.customCount() @staticmethod def custom_color(index): """ Get the custom color for the given index. """ qrgb = QColorDialog.customColor(index) return color_from_qcolor(QColor.fromRgba(qrgb)) @staticmethod def set_custom_color(index, color): """ Set the custom color for the given index. """ QColorDialog.setCustomColor(index, color.argb) def set_current_color(self, color): """ Set the current color for the underlying widget. """ if not self._guard & CURRENT_GUARD: if color is not None: qcolor = QColor.fromRgba(color.argb) else: qcolor = QColor() self.widget.setCurrentColor(qcolor) def set_show_alpha(self, show): """ Set the show alpha option on the underlying widget. """ widget = self.widget opt = widget.options() if show: opt |= QColorDialog.ShowAlphaChannel else: opt &= ~QColorDialog.ShowAlphaChannel widget.setOptions(opt) def set_show_buttons(self, show): """ Set the show buttons option on the underlying widget. """ widget = self.widget opt = widget.options() if show: opt &= ~QColorDialog.NoButtons else: opt |= QColorDialog.NoButtons widget.setOptions(opt)
ContinuumIO/ashiba
enaml/enaml/qt/qt_color_dialog.py
Python
bsd-3-clause
5,551
# -*- coding: utf-8 -*- "kanbanpad.com API v1 library for Python" from kanbanpad import _version VERSION = _version.tuple __author__ = "Marek Wywia\xc5\x82" __contact__ = "onjinx@gmail.com" __homepage__ = "http://github.com/onjin/python-kanbanpad" __version__ = _version.dotted
onjin/python-kanbanpad
kanbanpad/__init__.py
Python
bsd-3-clause
281
from collections import namedtuple from datetime import date, datetime, timedelta from cms.models import CMSPlugin from django.db import models from django.utils.formats import date_format, time_format from django.utils.functional import cached_property from django.utils.translation import ugettext_lazy as _ from ..conf import settings from .agegroup import AgeGroup from .department import Department from .roles import Leader from .schoolyear import SchoolYear from .subjects import Subject, SubjectDiscount, SubjectGroup, SubjectRegistration, SubjectType from .targetgroup import TargetGroup from .utils import PaymentStatus, copy_related_objects class Orderable(Subject): duration = models.DurationField(_("duration"), help_text=_("Format: HH:MM:SS")) due_from_days = models.IntegerField( _("number of days to send the payment request before event date"), blank=True, null=True, help_text=_( "If set, payment request will be sent this number of days before event date. " "If not set, payment request will be sent when registration is approved." ), ) due_date_days = models.IntegerField(_("number of days to due date before event date"), default=0) class Meta: app_label = "leprikon" ordering = ("code", "name") verbose_name = _("orderable event") verbose_name_plural = _("orderable events") @property def inactive_registrations(self): return self.registrations.filter(canceled__isnull=False) def get_times_list(self): return self.duration get_times_list.short_description = _("duration") def copy_to_school_year(old, school_year): new = Orderable.objects.get(id=old.id) new.id, new.pk = None, None new.school_year = school_year new.public = False new.evaluation = "" new.note = "" new.save() new.groups.set(old.groups.all()) new.age_groups.set(old.age_groups.all()) new.target_groups.set(old.target_groups.all()) for leader in old.all_leaders: school_year.leaders.add(leader) new.leaders.set(old.all_leaders) new.questions.set(old.questions.all()) copy_related_objects( new, attachments=old.attachments, times=old.times, variants=old.variants, ) return new class OrderableRegistration(SubjectRegistration): subject_type = SubjectType.ORDERABLE start_date = models.DateField(_("start date")) start_time = models.TimeField(_("start time"), blank=True, null=True) class Meta: app_label = "leprikon" verbose_name = _("orderable event registration") verbose_name_plural = _("orderable event registrations") def get_payment_status(self, d=None): return PaymentStatus( price=self.price, discount=self.get_discounted(d), explanation=",\n".join( discount.explanation.strip() for discount in self.all_discounts if (d is None or discount.accounted.date() <= d) and discount.explanation.strip() ), received=self.get_received(d), returned=self.get_returned(d), current_date=d or date.today(), due_from=self.payment_requested and ( self.payment_requested.date() if self.subject.orderable.due_from_days is None else max( self.start_date - timedelta(days=self.subject.orderable.due_from_days), self.payment_requested.date(), ) ), due_date=self.payment_requested and max( self.start_date - timedelta(days=self.subject.orderable.due_date_days), self.payment_requested.date() + timedelta(days=self.subject.min_due_date_days), ), ) @cached_property def end_date(self): if self.start_time: return (datetime.combine(self.start_date, self.start_time) + self.subject.orderable.duration).date() else: return self.start_date + self.subject.orderable.duration @cached_property def end_time(self): if self.start_time: return (datetime.combine(self.start_date, self.start_time) + self.subject.orderable.duration).time() def event_date(self): return "{start}{separator}{end}".format( start=( date_format(datetime.combine(self.start_date, self.start_time), "SHORT_DATETIME_FORMAT") if self.start_time else date_format(self.start_date, "SHORT_DATE_FORMAT") ), separator=" - " if self.start_date != self.end_date or self.end_time is not None else "", end=( (time_format(self.end_time, "TIME_FORMAT") if self.end_time else "") if self.start_date == self.end_date else ( date_format(datetime.combine(self.end_date, self.end_time), "SHORT_DATETIME_FORMAT") if self.end_time else date_format(self.end_date, "SHORT_DATE_FORMAT") ) ), ) event_date.admin_order_field = "start_date" event_date.short_description = _("event date") class OrderableDiscount(SubjectDiscount): registration = models.ForeignKey( OrderableRegistration, on_delete=models.CASCADE, related_name="discounts", verbose_name=_("registration") ) class Meta: app_label = "leprikon" verbose_name = _("orderable event discount") verbose_name_plural = _("orderable event discounts") ordering = ("accounted",) class OrderablePlugin(CMSPlugin): event = models.ForeignKey(Orderable, on_delete=models.CASCADE, related_name="+", verbose_name=_("event")) template = models.CharField( _("template"), max_length=100, choices=settings.LEPRIKON_ORDERABLE_TEMPLATES, default=settings.LEPRIKON_ORDERABLE_TEMPLATES[0][0], help_text=_("The template used to render plugin."), ) class Meta: app_label = "leprikon" class OrderableListPlugin(CMSPlugin): school_year = models.ForeignKey( SchoolYear, blank=True, null=True, on_delete=models.CASCADE, related_name="+", verbose_name=_("school year") ) departments = models.ManyToManyField( Department, blank=True, related_name="+", verbose_name=_("departments"), help_text=_("Keep empty to skip searching by departments."), ) event_types = models.ManyToManyField( SubjectType, blank=True, limit_choices_to={"subject_type": SubjectType.ORDERABLE}, related_name="+", verbose_name=_("event types"), help_text=_("Keep empty to skip searching by event types."), ) age_groups = models.ManyToManyField( AgeGroup, blank=True, related_name="+", verbose_name=_("age groups"), help_text=_("Keep empty to skip searching by age groups."), ) target_groups = models.ManyToManyField( TargetGroup, blank=True, related_name="+", verbose_name=_("target groups"), help_text=_("Keep empty to skip searching by target groups."), ) groups = models.ManyToManyField( SubjectGroup, blank=True, related_name="+", verbose_name=_("event groups"), help_text=_("Keep empty to skip searching by groups."), ) leaders = models.ManyToManyField( Leader, verbose_name=_("leaders"), blank=True, related_name="+", help_text=_("Keep empty to skip searching by leaders."), ) template = models.CharField( _("template"), max_length=100, choices=settings.LEPRIKON_ORDERABLELIST_TEMPLATES, default=settings.LEPRIKON_ORDERABLELIST_TEMPLATES[0][0], help_text=_("The template used to render plugin."), ) class Meta: app_label = "leprikon" def copy_relations(self, oldinstance): self.departments.set(oldinstance.departments.all()) self.event_types.set(oldinstance.event_types.all()) self.groups.set(oldinstance.groups.all()) self.age_groups.set(oldinstance.age_groups.all()) self.target_groups.set(oldinstance.age_groups.all()) self.leaders.set(oldinstance.leaders.all()) @cached_property def all_departments(self): return list(self.departments.all()) @cached_property def all_event_types(self): return list(self.event_types.all()) @cached_property def all_groups(self): return list(self.groups.all()) @cached_property def all_age_groups(self): return list(self.age_groups.all()) @cached_property def all_target_groups(self): return list(self.target_groups.all()) @cached_property def all_leaders(self): return list(self.leaders.all()) Group = namedtuple("Group", ("group", "objects")) def render(self, context): school_year = ( self.school_year or getattr(context.get("request"), "school_year") or SchoolYear.objects.get_current() ) events = Orderable.objects.filter(school_year=school_year, public=True).distinct() if self.all_departments: events = events.filter(department__in=self.all_departments) if self.all_event_types: events = events.filter(subject_type__in=self.all_event_types) if self.all_age_groups: events = events.filter(age_groups__in=self.all_age_groups) if self.all_target_groups: events = events.filter(target_groups__in=self.all_target_groups) if self.all_leaders: events = events.filter(leaders__in=self.all_leaders) if self.all_groups: events = events.filter(groups__in=self.all_groups) groups = self.all_groups elif self.all_event_types: groups = SubjectGroup.objects.filter(subject_types__in=self.all_event_types) else: groups = SubjectGroup.objects.all() context.update( { "school_year": school_year, "events": events, "groups": (self.Group(group=group, objects=events.filter(groups=group)) for group in groups), } ) return context class FilteredOrderableListPlugin(CMSPlugin): school_year = models.ForeignKey( SchoolYear, blank=True, null=True, on_delete=models.CASCADE, related_name="+", verbose_name=_("school year") ) event_types = models.ManyToManyField( SubjectType, limit_choices_to={"subject_type": SubjectType.ORDERABLE}, related_name="+", verbose_name=_("event types"), ) class Meta: app_label = "leprikon" def copy_relations(self, oldinstance): self.event_types = oldinstance.event_types.all() @cached_property def all_event_types(self): return list(self.event_types.all()) def render(self, context): school_year = ( self.school_year or getattr(context.get("request"), "school_year") or SchoolYear.objects.get_current() ) from ..forms.subjects import SubjectFilterForm form = SubjectFilterForm( subject_type_type=SubjectType.ORDERABLE, subject_types=self.all_event_types, school_year=school_year, is_staff=context["request"].user.is_staff, data=context["request"].GET, ) context.update( { "school_year": school_year, "form": form, "events": form.get_queryset(), } ) return context
leprikon-cz/leprikon
leprikon/models/orderables.py
Python
bsd-3-clause
11,902
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.utils.translation import get_language, to_locale from cms.plugin_base import CMSPluginBase from cms.plugin_pool import plugin_pool from .conf import settings from .models import FacebookComments class FacebookCommentsPlugin(CMSPluginBase): module = settings.DJANGOCMS_FBCOMMENTS_PLUGIN_MODULE name = settings.DJANGOCMS_FBCOMMENTS_PLUGIN_NAME model = FacebookComments render_template = settings.DJANGOCMS_FBCOMMENTS_PLUGIN_TEMPLATE def render(self, context, instance, placeholder): context = super(FacebookCommentsPlugin, self).render(context, instance, placeholder) request = context.get('request') context['language_code'] = to_locale(get_language()) context['page_url'] = request.build_absolute_uri(location=request.path_info) return context class Media: css = { 'all': ('css/djangocms_fbcomments/admin/djangocms_fbcomments.css',) } js = ('js/djangocms_fbcomments/admin/djangocms_fbcomments.js',) plugin_pool.register_plugin(FacebookCommentsPlugin)
mishbahr/djangocms-fbcomments
djangocms_fbcomments/cms_plugins.py
Python
bsd-3-clause
1,136
from sklearn2sql_heroku.tests.classification import generic as class_gen class_gen.test_model("AdaBoostClassifier" , "iris" , "mysql")
antoinecarme/sklearn2sql_heroku
tests/classification/iris/ws_iris_AdaBoostClassifier_mysql_code_gen.py
Python
bsd-3-clause
137
# -*- coding: utf-8 -*- from south.utils import datetime_utils as datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding field 'TtTrip.date' db.add_column(u'timetable_tttrip', 'date', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True), keep_default=False) def backwards(self, orm): # Deleting field 'TtTrip.date' db.delete_column(u'timetable_tttrip', 'date') models = { u'timetable.ttstop': { 'Meta': {'object_name': 'TtStop'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'stop_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}), 'stop_lat': ('django.db.models.fields.FloatField', [], {}), 'stop_lon': ('django.db.models.fields.FloatField', [], {}), 'stop_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'stop_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}) }, u'timetable.ttstoptime': { 'Meta': {'object_name': 'TtStopTime'}, 'exp_arrival': ('django.db.models.fields.DateTimeField', [], {}), 'exp_departure': ('django.db.models.fields.DateTimeField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'stop': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['timetable.TtStop']"}), 'stop_sequence': ('django.db.models.fields.IntegerField', [], {}), 'trip': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['timetable.TtTrip']"}) }, u'timetable.tttrip': { 'Meta': {'object_name': 'TtTrip'}, 'date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'shape_id': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'trip_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}) } } complete_apps = ['timetable']
hasadna/OpenTrain
webserver/opentrain/timetable/migrations/0006_auto__add_field_tttrip_date.py
Python
bsd-3-clause
2,370
from __future__ import unicode_literals from django.contrib.syndication.views import Feed from django.shortcuts import get_object_or_404 from .models import Category class CategoryFeed(Feed): def get_object(self, request, slug): return get_object_or_404(Category.objects.categories(), slug=slug) def link(self, obj): return obj.get_absolute_url() def title(self, obj): return obj.name def description(self, obj): return obj.description def items(self, obj): return obj.articles.published() def item_description(self, item): return item.content.rendered def item_pubdate(self, item): return item.created def item_categories(self, item): return item.tags.all() def item_author_name(self, item): return item.created_by.username
eliostvs/django-kb
kb/category/feeds.py
Python
bsd-3-clause
846
#!/usr/bin/env python # Copyright (c) 2009, Purdue University # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # Redistributions in binary form must reproduce the above copyright notice, this # list of conditions and the following disclaimer in the documentation and/or # other materials provided with the distribution. # # Neither the name of the Purdue University nor the names of its contributors # may be used to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Setup script for roster config manager.""" __copyright__ = 'Copyright (C) 2009, Purdue University' __license__ = 'BSD' __version__ = '#TRUNK#' try: from setuptools import setup except ImportError: from distutils.core import setup current_version = __version__ if( __version__.startswith('#') ): current_version = '1000' setup(name='RosterConfigManager', version=current_version, description='RosterConfigManager is a Bind9 config importer/exporter for ' 'Roster', long_description='Roster is DNS management software for use with Bind 9. ' 'Roster is written in Python and uses a MySQL database ' 'with an XML-RPC front-end. It contains a set of ' 'command line user tools that connect to the XML-RPC ' 'front-end. The config files for Bind are generated ' 'from the MySQL database so a live MySQL database is ' 'not needed.', maintainer='Roster Development Team', maintainer_email='roster-discussion@googlegroups.com', url='http://code.google.com/p/roster-dns-management/', packages=['roster_config_manager'], license=__license__, classifiers=['Development Status :: 4 - Beta', 'Intended Audience :: System Administrators', 'License :: OSI Approved :: BSD License', 'Operating System :: Unix', 'Programming Language :: Python :: 2.5', 'Topic :: Internet :: Name Service (DNS)'], install_requires = ['dnspython>=1.6.0', 'IPy>=0.62', 'iscpy>=1.0.5', 'fabric>=1.4.0', 'RosterCore>=%s' % current_version], scripts = ['scripts/dnsconfigsync', 'scripts/dnszoneimporter', 'scripts/dnstreeexport', 'scripts/dnscheckconfig', 'scripts/dnsexportconfig', 'scripts/dnsrecover', 'scripts/dnszonecompare', 'scripts/dnsquerycheck', 'scripts/dnsservercheck', 'scripts/dnsversioncheck'] )
stephenlienharrell/roster-dns-management
roster-config-manager/setup.py
Python
bsd-3-clause
3,685
# -*- coding: utf-8 -*- # ###################################################################### # Copyright (c) 2014, Brookhaven Science Associates, Brookhaven # # National Laboratory. All rights reserved. # # # # @author: Li Li (lili@bnl.gov) # # created on 08/16/2014 # # # # Redistribution and use in source and binary forms, with or without # # modification, are permitted provided that the following conditions # # are met: # # # # * Redistributions of source code must retain the above copyright # # notice, this list of conditions and the following disclaimer. # # # # * Redistributions in binary form must reproduce the above copyright # # notice this list of conditions and the following disclaimer in # # the documentation and/or other materials provided with the # # distribution. # # # # * Neither the name of the Brookhaven Science Associates, Brookhaven # # National Laboratory nor the names of its contributors may be used # # to endorse or promote products derived from this software without # # specific prior written permission. # # # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # # COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, # # INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, # # STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OTHERWISE) ARISING # # IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # # POSSIBILITY OF SUCH DAMAGE. # ######################################################################## """ Module for xray scattering """ from __future__ import absolute_import, division, print_function from collections import namedtuple from itertools import repeat import logging import numpy as np from ..utils import q_to_d, d_to_q, twotheta_to_q, q_to_twotheta logger = logging.getLogger(__name__) # http://stackoverflow.com/questions/3624753/how-to-provide-additional-initialization-for-a-subclass-of-namedtuple class HKL(namedtuple('HKL', 'h k l')): ''' Namedtuple sub-class miller indicies (HKL) This class enforces that the values are integers. Parameters ---------- h : int k : int l : int Attributes ---------- length h k l ''' __slots__ = () def __new__(cls, *args, **kwargs): args = [int(_) for _ in args] for k in list(kwargs): kwargs[k] = int(kwargs[k]) return super(HKL, cls).__new__(cls, *args, **kwargs) @property def length(self): """ The L2 norm (length) of the hkl vector. """ return np.linalg.norm(self) class Reflection(namedtuple('Reflection', ('d', 'hkl', 'q'))): """ Namedtuple sub-class for scattering reflection information Parameters ---------- d : float Plane-spacing hkl : `hkl` miller indicies q : float q-value of the reflection Attributes ---------- d HKL q """ __slots__ = () class PowderStandard(object): """ Class for providing safe access to powder calibration standards data. Parameters ---------- name : str Name of the standard reflections : list A list of (d, (h, k, l), q) values. """ def __init__(self, name, reflections): self._reflections = [Reflection(d, HKL(*hkl), q) for d, hkl, q in reflections] self._reflections.sort(key=lambda x: x[-1]) self._name = name def __str__(self): return "Calibration standard: {}".format(self.name) __repr__ = __str__ @property def name(self): """ Name of the calibration standard """ return self._name @property def reflections(self): """ List of the known reflections """ return self._reflections def __iter__(self): return iter(self._reflections) def convert_2theta(self, wavelength): """ Convert the measured 2theta values to a different wavelength Parameters ---------- wavelength : float The new lambda in Angstroms Returns ------- two_theta : array The new 2theta values in radians """ q = np.array([_.q for _ in self]) return q_to_twotheta(q, wavelength) @classmethod def from_lambda_2theta_hkl(cls, name, wavelength, two_theta, hkl=None): """ Method to construct a PowderStandard object from calibrated :math:`2\\theata` values. Parameters ---------- name : str The name of the standard wavelength : float The wavelength that the calibration data was taken at two_theta : array The calibrated :math:`2\\theta` values hkl : list, optional List of (h, k, l) tuples of the Miller indicies that go with each measured :math:`2\\theta`. If not given then all of the miller indicies are stored as (0, 0, 0). Returns ------- standard : PowderStandard The standard object """ q = twotheta_to_q(two_theta, wavelength) d = q_to_d(q) if hkl is None: # todo write test that hits this line hkl = repeat((0, 0, 0)) return cls(name, zip(d, hkl, q)) @classmethod def from_d(cls, name, d, hkl=None): """ Method to construct a PowderStandard object from known :math:`d` values. Parameters ---------- name : str The name of the standard d : array The known plane spacings hkl : list, optional List of (h, k, l) tuples of the Miller indicies that go with each measured :math:`2\\theta`. If not given then all of the miller indicies are stored as (0, 0, 0). Returns ------- standard : PowderStandard The standard object """ q = d_to_q(d) if hkl is None: hkl = repeat((0, 0, 0)) return cls(name, zip(d, hkl, q)) def __len__(self): return len(self._reflections) # Si (Standard Reference Material 640d) data taken from # https://www-s.nist.gov/srmors/certificates/640D.pdf?CFID=3219362&CFTOKEN=c031f50442c44e42-57C377F6-BC7A-395A-F39B8F6F2E4D0246&jsessionid=f030c7ded9b463332819566354567a698744 # CeO2 (Standard Reference Material 674b) data taken from # http://11bm.xray.aps.anl.gov/documents/NISTSRM/NIST_SRM_676b_%5BZnO,TiO2,Cr2O3,CeO2%5D.pdf # Alumina (Al2O3), (Standard Reference Material 676a) taken from # https://www-s.nist.gov/srmors/certificates/676a.pdf?CFID=3259108&CFTOKEN=fa5bb0075f99948c-FA6ABBDA-9691-7A6B-FBE24BE35748DC08&jsessionid=f030e1751fc5365cac74417053f2c344f675 calibration_standards = { 'Si': PowderStandard.from_lambda_2theta_hkl( name='Si', wavelength=1.5405929, two_theta=np.deg2rad([28.441, 47.3, 56.119, 69.126, 76.371, 88.024, 94.946, 106.7, 114.082, 127.532, 136.877]), hkl=( (1, 1, 1), (2, 2, 0), (3, 1, 1), (4, 0, 0), (3, 3, 1), (4, 2, 2), (5, 1, 1), (4, 4, 0), (5, 3, 1), (6, 2, 0), (5, 3, 3)) ), 'CeO2': PowderStandard.from_lambda_2theta_hkl( name='CeO2', wavelength=1.5405929, two_theta=np.deg2rad([28.61, 33.14, 47.54, 56.39, 59.14, 69.46]), hkl=((1, 1, 1), (2, 0, 0), (2, 2, 0), (3, 1, 1), (2, 2, 2), (4, 0, 0)) ), 'Al2O3': PowderStandard.from_lambda_2theta_hkl( name='Al2O3', wavelength=1.5405929, two_theta=np.deg2rad([25.574, 35.149, 37.773, 43.351, 52.548, 57.497, 66.513, 68.203, 76.873, 77.233, 84.348, 88.994, 91.179, 95.240, 101.070, 116.085, 116.602, 117.835, 122.019, 127.671, 129.870, 131.098, 136.056, 142.314, 145.153, 149.185, 150.102, 150.413, 152.380]), hkl=((0, 1, 2), (1, 0, 4), (1, 1, 0), (1, 1, 3), (0, 2, 4), (1, 1, 6), (2, 1, 4), (3, 0, 0), (1, 0, 10), (1, 1, 9), (2, 2, 3), (0, 2, 10), (1, 3, 4), (2, 2, 6),(2, 1, 10), (3, 2, 4), (0, 1, 14), (4, 1, 0), (4, 1, 3), (1, 3, 10), (3, 0, 12), (2, 0, 14), (1, 4, 6), (1, 1, 15), (4, 0, 10), (0, 5, 4), (1, 2, 14), (1, 0, 16), (3, 3, 0)) ), 'LaB6': PowderStandard.from_d( name='LaB6', d=[4.156, 2.939, 2.399, 2.078, 1.859, 1.697, 1.469, 1.385, 1.314, 1.253, 1.200, 1.153, 1.111, 1.039, 1.008, 0.980, 0.953, 0.929, 0.907, 0.886, 0.848, 0.831, 0.815, 0.800] ), 'Ni': PowderStandard.from_d( name='Ni', d=[2.03458234862, 1.762, 1.24592214845, 1.06252597829, 1.01729117431, 0.881, 0.80846104616, 0.787990355271, 0.719333487797, 0.678194116208, 0.622961074225, 0.595664718733, 0.587333333333, 0.557193323722, 0.537404961852, 0.531262989146, 0.508645587156, 0.493458701611, 0.488690872874, 0.47091430825, 0.458785722296, 0.4405, 0.430525121912, 0.427347771314] ) } """ Calibration standards A dictionary holding known powder-pattern calibration standards """
giltis/scikit-xray
skxray/core/constants/xrs.py
Python
bsd-3-clause
10,527
import pyaf.Bench.TS_datasets as tsds import tests.artificial.process_artificial_dataset as art art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "MovingMedian", cycle_length = 0, transform = "Logit", sigma = 0.0, exog_count = 100, ar_order = 0);
antoinecarme/pyaf
tests/artificial/transf_Logit/trend_MovingMedian/cycle_0/ar_/test_artificial_32_Logit_MovingMedian_0__100.py
Python
bsd-3-clause
263
#!/usr/bin/python """ Small web application to retrieve information from uniprot and itag for a given compound. The idea is that for one compound we are able to find out in which reactions it is involved and what are the proteins involved in these reactions. For each of these proteins we can find if there are genes and genes from tomato associated with them. """ from flask import Flask, Response, render_template, request, redirect, url_for from flaskext.wtf import Form, TextField import ConfigParser import datetime import json import os import rdflib import urllib CONFIG = ConfigParser.ConfigParser() CONFIG.readfp(open(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'chebi2gene.cfg'))) # Address of the sparql server to query. SERVER = CONFIG.get('chebi2gene', 'sparql_server') # Create the application. APP = Flask(__name__) APP.secret_key = CONFIG.get('chebi2gene', 'secret_key') # Stores in which graphs are the different source of information. GRAPHS = {option: CONFIG.get('graph', option) for option in CONFIG.options('graph')} print GRAPHS class ChebiIDForm(Form): """ Simple text field form to input the chebi identifier or the name of the protein. """ chebi_id = TextField('Chebi ID or molecule name') def convert_to_uniprot_id(data): """ Converts from RHEA Uniprot URI to Uniprot ID. @param data, a dictionary of String: [String] where the keys are reaction ID and the values are protein URI. @return, a dictionary of String: [String] where the keys are reaction ID and the values are protein ID. """ for key in data: proteins = data[key] proteins2 = [] for protein in proteins: prot_id = protein.rsplit(':', 1)[1] proteins2.append(prot_id.strip()) data[key] = proteins2 return data def get_exact_chebi_from_search(name): """ Search the chebi database for molecule having the given string in their name. The data returned contains the chebi identifier, the name and synonyms of the molecule in chebi. @param name, a string, name of the molecule to search in chebi. @return, a dictionary containing all the molecule found for having the input string in their name. The data structure returned is like: {string: {'name': string, 'syn': [String]}}, where the keys are the chebi identifier and the values are dictionaries containing the name of the molecules and a list of its synonym. """ query = ''' PREFIX rdfs:<http://www.w3.org/2000/01/rdf-schema#> PREFIX obo:<http://purl.obolibrary.org/obo#> SELECT DISTINCT ?id ?name ?syn FROM <%(chebi)s> WHERE { { ?id rdfs:label ?name . ?id obo:Synonym ?syn . FILTER ( regex(?name, "%(search)s", "i") ) } } ORDER BY ?id ''' % {'search': name, 'chebi': GRAPHS['chebi']} data_js = sparql_query(query, SERVER) if not data_js: return molecules = {} for entry in data_js['results']['bindings']: chebi_id = entry['id']['value'].rsplit('/', 1)[1].split('_')[1] if chebi_id in molecules: molecules[chebi_id]['syn'].append(entry['syn']['value']) else: molecules[chebi_id] = { 'name': [entry['name']['value']], 'syn': [entry['syn']['value']] } return molecules def get_extended_chebi_from_search(name): """ Search the chebi database for molecule having the given string in their name or in their synonyms. The data returned contains the chebi identifier, the name and synonyms of the molecule in chebi. @param name, a string, name of the molecule to search in chebi. @return, a dictionary containing all the molecule found for having the input string in their name or in their synonyms. The data structure returned is like: {string: {'name': string, 'syn': [String]}}, where the keys are the chebi identifier and the values are dictionaries containing the name of the molecules and a list of its synonym. """ query = ''' PREFIX rdfs:<http://www.w3.org/2000/01/rdf-schema#> PREFIX obo:<http://purl.obolibrary.org/obo#> SELECT DISTINCT ?id ?name ?syn FROM <%(chebi)s> WHERE { { ?id rdfs:label ?name . ?id obo:Synonym ?syn . FILTER ( regex(?name, "%(search)s", "i") || regex(?syn, "%(search)s", "i") ) } } ORDER BY ?id ''' % {'search': name, 'chebi': GRAPHS['chebi']} data_js = sparql_query(query, SERVER) if not data_js: return molecules = {} for entry in data_js['results']['bindings']: chebi_id = entry['id']['value'].rsplit('/', 1)[1].split('_')[1] if chebi_id in molecules: molecules[chebi_id]['syn'].append(entry['syn']['value']) else: molecules[chebi_id] = { 'name': [entry['name']['value']], 'syn': [entry['syn']['value']] } return molecules def get_genes_of_proteins(data): """ Returns the genes associated with proteins. @param name, a dictionary where the keys are reactions identifier and the values lists of proteins identifier. @return, a dictionary containing all the genes related with the proteins specified. The data structure returned is like: {string: [String]}, where the keys are the uniprot identifier and the values are list of gene identifier associated with the protein. """ genes = {} for key in data: proteins = data[key] # Let's make sure the identifiers are unique proteins = list(set(proteins)) query = ''' PREFIX gene:<http://pbr.wur.nl/GENE#> PREFIX pos:<http://pbr.wur.nl/POSITION#> SELECT DISTINCT ?prot ?name ?sca ?start ?stop ?desc FROM <%(itag)s> WHERE{ ?gene gene:Protein ?prot . FILTER ( ?prot IN ( <http://purl.uniprot.org/uniprot/%(prot)s> ) ) ?gene gene:Position ?pos . ?pos pos:Scaffold ?sca . ?gene gene:Description ?desc . ?gene gene:FeatureName ?name . ?pos pos:Start ?start . ?pos pos:Stop ?stop . } ORDER BY ?name ''' % {'prot': '>,\n<http://purl.uniprot.org/uniprot/'.join( proteins), 'itag': GRAPHS['itag']} data_js = sparql_query(query, SERVER) for entry in data_js['results']['bindings']: prot_id = entry['prot']['value'].rsplit('/', 1)[1] gene = {} for var in ['name', 'sca', 'start', 'stop', 'desc']: gene[var] = entry[var]['value'] gene['sca'] = gene['sca'].rsplit('#', 1)[1] if prot_id in genes: genes[prot_id].append(gene) else: genes[prot_id] = [gene] return genes def get_pathways_of_proteins(data): """ Returns the pathways associated with proteins. @param name, a dictionary where the keys are reactions identifier and the values lists of proteins. @return, a dictionary containing all the pathways related with the proteins specified. The data structure returned is like: {string: [String]}, where the keys are the uniprot identifier and the values are list of pathways associated with the protein. """ pathways = {} for key in data: proteins = data[key] # Let's make sure the identifiers are unique proteins = list(set(proteins)) query = ''' PREFIX gene:<http://pbr.wur.nl/GENE#> PREFIX uniprot:<http://purl.uniprot.org/core/> PREFIX rdfs:<http://www.w3.org/2000/01/rdf-schema#> SELECT DISTINCT ?prot ?desc FROM <%(uniprot)s> WHERE { ?prot uniprot:annotation ?annot . ?annot rdfs:seeAlso ?url . ?annot rdfs:comment ?desc . FILTER ( ?prot IN ( <http://purl.uniprot.org/uniprot/%(prot)s> ) ) } ''' % {'prot': '>,\n<http://purl.uniprot.org/uniprot/'.join(proteins), 'uniprot': GRAPHS['uniprot']} data_js = sparql_query(query, SERVER) for entry in data_js['results']['bindings']: prot_id = entry['prot']['value'].rsplit('/', 1)[1] path = entry['desc']['value'] if prot_id in pathways and path not in pathways[prot_id]: pathways[prot_id].append(path) else: pathways[prot_id] = [path] return pathways def get_organism_of_proteins(data): """ Returns the all organism associated with the proteins. @param name, a dictionary where the keys are reactions identifier and the values lists of proteins. @return, a dictionary containing all the organism related with the proteins specified. The data structure returned is like: {string: [String]}, where the keys are the uniprot identifier and the values are list of organisms associated with the protein. """ organism = {} for key in data: proteins = data[key] # Let's make sure the identifiers are unique proteins = list(set(proteins)) query = ''' PREFIX uniprot:<http://purl.uniprot.org/core/> SELECT DISTINCT ?prot ?name FROM <%(uniprot)s> WHERE { ?prot uniprot:organism ?orga . ?orga uniprot:scientificName ?name . FILTER ( ?prot IN ( <http://purl.uniprot.org/uniprot/%(prot)s> ) ) } ''' % {'prot': '>,\n<http://purl.uniprot.org/uniprot/'.join(proteins), 'uniprot': GRAPHS['uniprot']} data_js = sparql_query(query, SERVER) for entry in data_js['results']['bindings']: prot_id = entry['prot']['value'].rsplit('/', 1)[1] orga = entry['name']['value'] if prot_id in organism and orga not in organism[prot_id]: organism[prot_id].append(orga) else: organism[prot_id] = [orga] return organism def get_protein_of_chebi(chebi_id): """ Returns the all protein associated with a compound. @param name, a string, identifier of a compound on chebi. @return, a dictionary containing all the proteins related with the compound specified. The data structure returned is like: {string: [String]}, where the keys are reaction identifiers and the values are list of proteins associated with the reaction. """ query = ''' prefix bp: <http://www.biopax.org/release/biopax-level2.owl#> SELECT DISTINCT ?react ?xref FROM <%(rhea)s> WHERE { ?cmp bp:XREF <http://www.ebi.ac.uk/rhea#CHEBI:%(chebi_id)s> . ?dir ?p ?cmp . ?react ?p2 ?dir . ?react bp:XREF ?xref . FILTER ( regex(?xref, 'UNIPROT') ) } ''' % {'chebi_id': chebi_id, 'rhea': GRAPHS['rhea']} data = sparql_query(query, SERVER) if not data: return output = {} for entry in data['results']['bindings']: key = entry['react']['value'].split('#')[1] if key in output: output[key].append(entry['xref']['value']) else: output[key] = [entry['xref']['value']] return output def sparql_query(query, server, output_format='application/json'): """ Runs the given SPARQL query against the desired sparql endpoint and return the output in the format asked (default being rdf/xml). @param query, the string of the sparql query that should be ran. @param server, a string, the url of the sparql endpoint that we want to run query against. @param format, specifies in which format we want to have the output. Defaults to `application/json` but can also be `application/rdf+xml`. @return, a JSON object, representing the output of the provided sparql query. """ params = { 'default-graph': '', 'should-sponge': 'soft', 'query': query, 'debug': 'off', 'timeout': '', 'format': output_format, 'save': 'display', 'fname': '' } querypart = urllib.urlencode(params) response = urllib.urlopen(server, querypart).read() try: output = json.loads(response) except ValueError: output = {} return output def run_query_via_rdflib(query, server): """ Runs the given query of the given server, loads the results rdf/xml into a rdflib.Graph and return a rdf/xml representation of this graph. This is a bit of a hack to return a nicer rdf/xml representation of the knowledge retrieve than older version of virtuoso offers. From version 6.1.5 at least, this trick should not be needed anymore. @param query, the string of the sparql query that should be ran. @param server, a string, the url of the sparql endpoint that we want to run query against. @return, a string, representing the rdf output of the provided query. """ graph = rdflib.Graph() graph.parse(data=sparql_query(query, server), output_format="application/rdf+xml") return graph.serialize(format='xml') ## Web-app @APP.route('/', methods=['GET', 'POST']) def index(): """ Shows the front page. All the content of this page is in the index.html file under the templates directory. The file is full html and has no templating logic within. """ print 'Chebi2gene %s -- %s -- %s' % (datetime.datetime.now(), request.remote_addr, request.url) form = ChebiIDForm(csrf_enabled=False) if form.validate_on_submit(): try: int(form.chebi_id.data) return redirect(url_for('show_chebi', chebi_id=form.chebi_id.data)) except ValueError: return redirect(url_for('search_chebi', name=form.chebi_id.data)) return render_template('index.html', form=form) @APP.route('/search/<name>') def search_chebi(name): """ Search the CHEBI database for the name given. """ print 'Chebi2gene %s -- %s -- %s' % (datetime.datetime.now(), request.remote_addr, request.url) molecules = get_exact_chebi_from_search(name) if molecules and len(molecules) == 1: return redirect(url_for('show_chebi', chebi_id=molecules.keys()[0])) return render_template('search.html', data=molecules, search=name, extended=False) @APP.route('/fullsearch/<name>') def search_chebi_extended(name): """ Search the CHEBI database for the name given including the synonyms. """ print 'Chebi2gene %s -- %s -- %s' % (datetime.datetime.now(), request.remote_addr, request.url) molecules = get_extended_chebi_from_search(name) return render_template('search.html', data=molecules, search=name, extended=True) @APP.route('/chebi/<chebi_id>') def show_chebi(chebi_id): """ Shows the front page. All the content of this page is in the index.html file under the templates directory. The file is full html and has no templating logic within. """ print 'Chebi2gene %s -- %s -- %s' % (datetime.datetime.now(), request.remote_addr, request.url) proteins = get_protein_of_chebi(chebi_id) if not proteins: return render_template('output.html', proteins=[], pathways=None, genes=None, organisms=None, chebi=chebi_id) proteins = convert_to_uniprot_id(proteins) pathways = get_pathways_of_proteins(proteins) genes = get_genes_of_proteins(proteins) organisms = get_organism_of_proteins(proteins) return render_template('output.html', proteins=proteins, pathways=pathways, genes=genes, organisms=organisms, chebi=chebi_id) @APP.route('/csv/<chebi_id>') def generate_csv(chebi_id): """ Generate a comma separated value file containing all the information. """ print 'Chebi2gene %s -- %s -- %s' % (datetime.datetime.now(), request.remote_addr, request.url) # Regenerate the informations proteins = get_protein_of_chebi(chebi_id) proteins = convert_to_uniprot_id(proteins) pathways = get_pathways_of_proteins(proteins) genes = get_genes_of_proteins(proteins) organisms = get_organism_of_proteins(proteins) string = 'Chebi ID, Chebi URL, Rhea ID, Rhea URL, UniProt, \ Organism, Type, Name, Scaffold, Start, Stop, Description\n' chebi_url = 'http://www.ebi.ac.uk/chebi/searchId.do?chebiId=%s' % \ chebi_id for reaction in proteins: react_url = 'http://www.ebi.ac.uk/rhea/reaction.xhtml?id=RHEA:%s' % \ reaction for protein in proteins[reaction]: if protein in pathways: for pathway in pathways[protein]: string = string + '%s,%s,%s,%s,%s,%s,Pathway,%s\n' % ( chebi_id, chebi_url, reaction, react_url, protein, " - ".join(organisms[protein]), pathway) if protein in genes: for gene in genes[protein]: string = string + \ '%s,%s,%s,%s,%s,%s,Gene,%s,%s,%s,%s,%s\n' % ( chebi_id, chebi_url, reaction, react_url, protein, " - ".join(organisms[protein]), gene['name'], gene['sca'], gene['start'], gene['stop'], gene['desc']) return Response(string, mimetype='application/excel') if __name__ == '__main__': APP.debug = True APP.run()
PBR/chebi2gene
chebi2gene.py
Python
bsd-3-clause
17,793
import sys from craystack import cf if len(sys.argv) < 4: print "Usage: %s <key> <subkey> <path>" % sys.argv[0] sys.exit(2) _, key, subkey, filename = sys.argv with open(filename) as f: content = f.read() cf.insert(key, {subkey: content}) print "Uploaded %s to %s/%s (%s bytes)" % (filename, key, subkey, len(content))
rbranson/craystack
upload.py
Python
bsd-3-clause
343
# -*- coding: utf-8 -*- # Copyright (c) 2016-present, CloudZero, Inc. All rights reserved. # Licensed under the BSD-style license. See LICENSE file in the project root for full license information. import pytest from reactor.aws.cloudtrail import traverse_map, CloudTrailEvent def test_traverse_map(): d = {'x': {'x': 7}, 'y': 8, 't': [{'t': 10}, {'b': 11}], 'z': {'z': {'z': 9}}} m = {'x': {'x': 'x_field'}, 'y': 'y_field', 't': {'t': 't_field'}, 'z': {'z': {'z': 'z_field'}}} realish_data = { 'eventversion': '1.06', 'useridentity': { 'type': 'AWSService', 'invokedBy': 'lambda.amazonaws.com' }, 'eventtime': '2018-05-19T16:15:32Z', 'eventsource': 'lambda.amazonaws.com', 'eventname': 'Invoke', 'awsregion': 'us-east-1', 'sourceipaddress': 'lambda.amazonaws.com', 'useragent': 'lambda.amazonaws.com', 'requestparameters': { 'functionName': 'arn:aws:lambda:us-east-1:123456789012:function:function-name', 'contentType': None, 'logType': None }, 'responseelements': None, 'additionaleventdata': { 'functionVersion': 'arn:aws:lambda:us-east-1:1234567890123:function:function-name:$LATEST' }, 'requestid': '6a733515-cc52-412b-bb7d-70766f30f5d0', 'eventid': 'a250f917-32ef-4174-ab0e-49d9fc955243', 'readonly': False, 'resources': [ { 'accountId': '1234567890123', 'type': 'AWS::Lambda::Function', 'ARN': 'arn:aws:lambda:us-east-1:998146006915:function:function-name' }, { 'accountId': '1234567890123', 'type': 'AWS::DynamoDB::Stream', 'ARN': 'arn:aws:dynamodb:us-east-1:123456789012:table/table_name/stream/2018-02-19T05:56:02.100' } ], 'eventtype': 'AwsApiCall', 'managementevent': False, 'recipientaccountid': '1234567890123', 'sharedeventid': '8b9f0853-6518-47c8-a8da-b648fc27d528' } realish_map = { 'resources': 'res_field', 'requestparameters': { 'functionName': 'arn:aws:lambda:us-east-1:123456789012:function:function-name' } } res = traverse_map(d, m) assert res['x_field'] == 7 assert res['y_field'] == 8 assert res['z_field'] == 9 assert res['t_field'] == 10 m = {'q': 'q_field'} # mapping for field not present in obj res = traverse_map(d, m) assert res == {} # specifying a mapping of wrong shape for field that does exist. # traverse_map doesn't support mapping a nested obj to a cz field. m = {'x': 'wrong_shape'} with pytest.raises(AttributeError): traverse_map(d, m) res = traverse_map(realish_data, realish_map) assert res['res_field'] == [{'accountId': '1234567890123', 'type': 'AWS::Lambda::Function', 'ARN': 'arn:aws:lambda:us-east-1:998146006915:function:function-name'}, {'accountId': '1234567890123', 'type': 'AWS::DynamoDB::Stream', 'ARN': 'arn:aws:dynamodb:us-east-1:123456789012:table/' 'table_name/stream/2018-02-19T05:56:02.100'}] def test_CloudTrailEvent(): raw_event = {'eventName': 'name', 'eventType': 'type', 'userIdentity': {'type': 'identity_type'}} cte = CloudTrailEvent(raw_event) assert cte.identity_type == 'identity_type' assert cte.name == 'name' assert cte.type == 'type' # path is <identity type>.<event name> path = cte.path peel_a_level = CloudTrailEvent.peel_path(path) peel_two_levels = CloudTrailEvent.peel_path(peel_a_level) peel_three_levels = CloudTrailEvent.peel_path(peel_two_levels) assert peel_a_level == 'identity_type' assert peel_two_levels == 'ROOT' assert peel_three_levels is None # path may sometimes be <event name> if there is no # event identity type. raw_event_no_id_type = {'eventName': 'name', 'eventType': 'type', 'userIdentity': {'no_field': 'named_type'}} cte_no_id_type = CloudTrailEvent(raw_event_no_id_type) assert cte_no_id_type.path == 'name'
Cloudzero/cloudzero-reactor-aws
test/unit/aws/test_unit_cloudtrail.py
Python
bsd-3-clause
4,413
from django.conf import settings from django.contrib.auth.models import User from django.core.exceptions import ImproperlyConfigured from django.utils import six from django.utils.html import conditional_escape from django.utils.safestring import mark_safe from six.moves import map def get_user_model(): """ Return the User model Using this function instead of Django 1.5's get_user_model allows backwards compatibility with Django 1.4. """ try: # Django 1.5+ from django.contrib.auth import get_user_model except ImportError: # Django <= 1.4 model = User else: model = get_user_model() # Test if user model has any custom fields and add attributes to the _meta # class core_fields = set([f.name for f in User._meta.fields]) model_fields = set([f.name for f in model._meta.fields]) new_fields = model_fields.difference(core_fields) model._meta.has_additional_fields = len(new_fields) > 0 model._meta.additional_fields = new_fields return model # A setting that can be used in foreign key declarations AUTH_USER_MODEL = getattr(settings, 'AUTH_USER_MODEL', 'auth.User') # Two additional settings that are useful in South migrations when # specifying the user model in the FakeORM try: AUTH_USER_APP_LABEL, AUTH_USER_MODEL_NAME = AUTH_USER_MODEL.rsplit('.', 1) except ValueError: raise ImproperlyConfigured("AUTH_USER_MODEL must be of the form" " 'app_label.model_name'") def format_html(format_string, *args, **kwargs): """ Backport of format_html from Django 1.5+ to support Django 1.4 """ args_safe = map(conditional_escape, args) kwargs_safe = dict([(k, conditional_escape(v)) for (k, v) in six.iteritems(kwargs)]) return mark_safe(format_string.format(*args_safe, **kwargs_safe)) # # Python3 compatibility layer # try: import urlparse as _urlparse except ImportError: from urllib import parse as _urlparse # NOQA urlparse = _urlparse # # Unicode compatible wrapper for CSV reader and writer that abstracts away # differences between Python 2 and 3. A package like unicodecsv would be # preferable, but it's not Python 3 compatible yet. # Code from http://python3porting.com/problems.html # Classes renamed to include CSV. Unused 'codecs' import is dropped. import sys import csv PY3 = sys.version > '3' class UnicodeCSVReader: def __init__(self, filename, dialect=csv.excel, encoding="utf-8", **kw): self.filename = filename self.dialect = dialect self.encoding = encoding self.kw = kw def __enter__(self): if PY3: self.f = open(self.filename, 'rt', encoding=self.encoding, newline='') else: self.f = open(self.filename, 'rb') self.reader = csv.reader(self.f, dialect=self.dialect, **self.kw) return self def __exit__(self, type, value, traceback): self.f.close() def next(self): row = next(self.reader) if PY3: return row return [s.decode("utf-8") for s in row] __next__ = next def __iter__(self): return self class UnicodeCSVWriter: def __init__(self, filename, dialect=csv.excel, encoding="utf-8", **kw): self.filename = filename self.dialect = dialect self.encoding = encoding self.kw = kw def __enter__(self): if PY3: self.f = open(self.filename, 'wt', encoding=self.encoding, newline='') else: self.f = open(self.filename, 'wb') self.writer = csv.writer(self.f, dialect=self.dialect, **self.kw) return self def __exit__(self, type, value, traceback): self.f.close() def writerow(self, row): if not PY3: row = [s.encode(self.encoding) for s in row] self.writer.writerow(row) def writerows(self, rows): for row in rows: self.writerow(row)
elliotthill/django-oscar
oscar/core/compat.py
Python
bsd-3-clause
4,157
from ...io.managers.context_manager import ContextManager COLLISION_STRATEGIES = ['fail', 'replace'] class SQLClient: """SQLClient class is a client to run SQL queries in a CARTO account. It also provides basic SQL utilities for analyzing and managing tables. Args: credentials (:py:class:`Credentials <cartoframes.auth.Credentials>`): A :py:class:`Credentials <cartoframes.auth.Credentials>` instance can be used in place of a `username`|`base_url` / `api_key` combination. Example: >>> sql = SQLClient(credentials) """ def __init__(self, credentials=None): self._context_manager = ContextManager(credentials) def query(self, query, verbose=False): """Run a SQL query. It returns a `list` with content of the response. If the `verbose` param is True it returns the full SQL response in a `dict`. For more information check the `SQL API documentation <https://carto.com/developers/sql-api/reference/#tag/Single-SQL-Statement>`. Args: query (str): SQL query. verbose (bool, optional): flag to return all the response. Default False. Example: >>> sql.query('SELECT * FROM table_name') """ response = self._context_manager.execute_query(query.strip()) if not verbose: return response.get('rows') else: return response def execute(self, query): """Run a long running query. It returns an object with the status and information of the job. For more information check the `Batch API documentation <https://carto.com/developers/sql-api/reference/#tag/Batch-Queries>`. Args: query (str): SQL query. Example: >>> sql.execute('DROP TABLE table_name') """ return self._context_manager.execute_long_running_query(query.strip()) def distinct(self, table_name, column_name): """Get the distict values and their count in a table for a specific column. Args: table_name (str): name of the table. column_name (str): name of the column. Example: >>> sql.distinct('table_name', 'column_name') [('value1', 10), ('value2', 5)] """ query = ''' SELECT {0}, COUNT(*) FROM {1} GROUP BY 1 ORDER BY 2 DESC '''.format(column_name, table_name) output = self.query(query) return [(x.get(column_name), x.get('count')) for x in output] def count(self, table_name): """Get the number of elements of a table. Args: table_name (str): name of the table. Example: >>> sql.count('table_name') 15 """ query = 'SELECT COUNT(*) FROM {};'.format(table_name) output = self.query(query) return output[0].get('count') def bounds(self, table_name): """Get the bounds of the geometries in a table. Args: table_name (str): name of the table containing a "the_geom" column. Example: >>> sql.bounds('table_name') [[-1,-1], [1,1]] """ query = ''' SELECT ARRAY[ ARRAY[st_xmin(geom_env), st_ymin(geom_env)], ARRAY[st_xmax(geom_env), st_ymax(geom_env)] ] bounds FROM ( SELECT ST_Extent(the_geom) geom_env FROM (SELECT the_geom FROM {}) q ) q; '''.format(table_name) output = self.query(query) return output[0].get('bounds') def schema(self, table_name, raw=False): """Show information about the schema of a table. Args: table_name (str): name of the table. raw (bool, optional): return raw dict data if set to True. Default False. Example: >>> sql.schema('table_name') Column name Column type ------------------------------------- cartodb_id number the_geom geometry the_geom_webmercator geometry column1 string column2 number """ query = 'SELECT * FROM {0} LIMIT 0;'.format(table_name) output = self.query(query, verbose=True) fields = output.get('fields') if raw is True: return {key: fields[key]['type'] for key in fields} else: columns = ['Column name', 'Column type'] rows = [(key, fields[key]['type']) for key in fields] self._print_table(rows, columns=columns, padding=[10, 5]) return None def describe(self, table_name, column_name): """Show information about a column in a specific table. It returns the COUNT of the table. If the column type is number it also returns the AVG, MIN and MAX. Args: table_name (str): name of the table. column_name (str): name of the column. Example: >>> sql.describe('table_name', 'column_name') count 1.00e+03 avg 2.00e+01 min 0.00e+00 max 5.00e+01 type: number """ column_type = self._get_column_type(table_name, column_name) stats = ['COUNT(*)'] if column_type == 'number': stats.append('AVG({})'.format(column_name)) stats.append('MIN({})'.format(column_name)) stats.append('MAX({})'.format(column_name)) query = ''' SELECT {0} FROM {1}; '''.format(','.join(stats), table_name) output = self.query(query, verbose=True) fields = output.get('rows')[0] rows = [(key, '{:0.2e}'.format(fields[key])) for key in fields if fields[key] is not None] self._print_table(rows, padding=[5, 10]) print('type: {}'.format(column_type)) def create_table(self, table_name, columns_types, if_exists='fail', cartodbfy=True): """Create a table with a specific table name and columns. Args: table_name (str): name of the table. column_types (dict): dictionary with the column names and types. if_exists (str, optional): collision strategy if the table already exists in CARTO. Options are 'fail' or 'replace'. Default 'fail'. cartodbfy (bool, optional): convert the table to CARTO format. Default True. More info `here <https://carto.com/developers/sql-api/guides/creating-tables/#create-tables>`. Example: >>> sql.create_table('table_name', {'column1': 'text', 'column2': 'integer'}) """ if not isinstance(columns_types, dict): raise ValueError('The columns_types parameter should be a dictionary of column names and types.') if if_exists not in COLLISION_STRATEGIES: raise ValueError('Please provide a valid if_exists value among {}'.format(', '.join(COLLISION_STRATEGIES))) columns = ['{0} {1}'.format(cname, ctype) for cname, ctype in columns_types.items()] schema = self._context_manager.get_schema() query = ''' BEGIN; {drop}; {create}; {cartodbfy}; COMMIT; '''.format( drop='DROP TABLE IF EXISTS {}'.format(table_name) if if_exists == 'replace' else '', create='CREATE TABLE {0} ({1})'.format(table_name, ','.join(columns)), cartodbfy='SELECT CDB_CartoDBFyTable(\'{0}\', \'{1}\')'.format( schema, table_name) if cartodbfy else '' ) self.execute(query) def insert_table(self, table_name, columns_values): """Insert a row to the table. Args: table_name (str): name of the table. columns_values (dict): dictionary with the column names and values. Example: >>> sql.insert_table('table_name', {'column1': ['value1', 'value2'], 'column2': [1, 2]}) """ cnames = columns_values.keys() cvalues = [self._row_values_format(v) for v in zip(*columns_values.values())] query = ''' INSERT INTO {0} ({1}) VALUES {2}; '''.format(table_name, ','.join(cnames), ','.join(cvalues)) self.execute(query) def update_table(self, table_name, column_name, column_value, condition): """Update the column's value for the rows that match the condition. Args: table_name (str): name of the table. column_name (str): name of the column. column_value (str): value of the column. condition (str): "where" condition of the request. Example: >>> sql.update_table('table_name', 'column1', 'VALUE1', 'column1=\'value1\'') """ value = self._sql_format(column_value) query = ''' UPDATE {0} SET {1}={2} WHERE {3}; '''.format(table_name, column_name, value, condition) self.execute(query) def rename_table(self, table_name, new_table_name): """Rename a table from its table name. Args: table_name (str): name of the original table. new_table_name (str): name of the new table. Example: >>> sql.rename_table('table_name', 'table_name2') """ query = 'ALTER TABLE {0} RENAME TO {1};'.format(table_name, new_table_name) self.execute(query) def drop_table(self, table_name): """Remove a table from its table name. Args: table_name (str): name of the table. Example: >>> sql.drop_table('table_name') """ query = 'DROP TABLE IF EXISTS {0};'.format(table_name) self.execute(query) def _get_column_type(self, table_name, column_name): query = 'SELECT {0} FROM {1} LIMIT 0;'.format(column_name, table_name) output = self.query(query, verbose=True) fields = output.get('fields') field = fields.get(column_name) return field.get('type') def _sql_format(self, value): if isinstance(value, str): return '\'{}\''.format(value) if isinstance(value, bool): return 'TRUE' if value else 'FALSE' return str(value) def _row_values_format(self, row_values): return '({})'.format(','.join([self._sql_format(value) for value in row_values])) def _print_table(self, rows, columns=None, padding=None): row_format = '' index = 0 for column in columns or rows[0]: length = str(len(str(column)) + (padding[index] if padding else 5)) row_format += '{:' + length + '}' index += 1 if columns: header = row_format.format(*columns) print(header) print('-' * len(header)) for row in rows: print(row_format.format(*row))
CartoDB/cartoframes
cartoframes/data/clients/sql_client.py
Python
bsd-3-clause
11,090
import cv2 import fitparse import os fname = 'V0280048.MP4' fitfile ='2018-06-01-12-13-33.fit' vidcap = cv2.VideoCapture(fname) success,image = vidcap.read() success count = 0 if success: stream = fname.split('.')[-2] os.mkdir(stream) while success: cv2.imwrite("{}/{}_frame{}.jpg".format(stream,stream,count), image) # save frame as JPEG file success,image = vidcap.read() print('Read a new frame: ', success) count += 1 #parse metadata fields_to_extract=['timestamp','enhanced_altitude','enhanced_speed','utc_timestamp','timestamp_ms','heading','velocity'] fields_to_convert=['position_lat','position_long'] meta = {} for key in fields_to_extract+fields_to_convert: meta[key]=[] ff=fitparse.FitFile('2018-06-01-12-13-33.fit') for i,m in enumerate(ff.get_messages('gps_metadata')): for f in m.fields: if f.name in fields_to_extract: meta[f.name].append(f.value) if f.name in fields_to_convert: meta[f.name].append(f.value*180.0/2**31) import pandas metadata = pandas.DataFrame() for key in fields_to_extract+fields_to_convert: metadata[key]=pandas.Series(meta[key]) metadata.to_csv('{}_metadata.csv'.format(stream))
GFZ-Centre-for-Early-Warning/REM_RRVS
permanent-tools/VIRB360/extract_frames.py
Python
bsd-3-clause
1,214
# -*- coding: utf-8 -*- """ Functions for calculation of potential and actual evaporation from meteorological data. Potential and actual evaporation functions ========================================== - E0: Calculate Penman (1948, 1956) open water evaporation. - Em: Calculate evaporation according to Makkink (1965). - Ept: Calculate evaporation according to Priestley and Taylor (1972). - ET0pm: Calculate Penman Monteith reference evaporation short grass. - Epm: Calculate Penman-Monteith evaporation (actual evapotranspiration). - ra: Calculate aerodynamic resistance from windspeed and roughnes parameters. - tvardry: calculate sensible heat flux from temperature variations. - gash79: Gash (1979) analytical rainfall interception model. Requires and imports scipy and meteolib modules. Compatible with Python 2.7.3. Function descriptions ===================== """ import scipy from . import meteolib __author__ = "Dr. Maarten J. Waterloo <maarten.waterloo@acaciawater.com>" __version__ = "1.0" __release__ = "1.0.1" __date__ = "June 2016" # 14 June 2016: Fixed error in Epm function: changed multiplication by ra # to division by ra. Thanks Spencer Whitman for pointing this out. # Make a help entry for this library def evaplib(): """ Evaplib: Python libray for calculation of evaporation from meteorological data. Parameters ---------- E0: Calculate Penman (1948, 1956) open water evaporation. Em: Calculate evaporation according to Makkink (1965). Ept: Calculate evaporation according to Priestley and Taylor (1972). ET0pm: Calculate Penman Monteith reference evaporation short grass (FAO). Epm: Calculate Penman Monteith reference evaporation (Monteith, 1965). ra: Calculate from windspeed and roughnes parameters. tvardry: Calculate sensible heat flux from temperature variations (Vugts et al., 1993). gash79: Calculate rainfall interception (Gash, 1979). Author: Dr. Maarten J. Waterloo <maarten.waterloo@acaciawater.com>. Version 1.0. Date: Sep 2012, last modified November June 2016. """ print("A libray with Python functions for calculation of") print("evaporation from meteorological and vegetation data.\n") print("Functions:\n") print("- E0: Calculate Penman (1948, 1956) (open water) evaporation") print("- Em: Calculate evaporation according to Makkink (1965)") print("- Ept: Calculate evaporation according to Priestley and Taylor (1972).") print("- ET0pm: Calculate Penman Monteith reference evaporation short grass.") print("- Epm: Calculate Penman Monteith evaporation (Monteith, 1965).") print("- ra: Calculate aerodynamic resistance.") print( "- tvardry: calculate sensible heat flux from temperature variations \ (Vugts et al., 1993)." ) print("- gash79: calculate rainfall interception (Gash, 1979).\n") print(("Author: ", __author__)) print(("Version: ", __version__)) print(("Date: ", __date__)) def ra(z=float, z0=float, d=float, u=scipy.array([])): """ Function to calculate aerodynamic resistance from windspeed: .. math:: r_a = \\frac{\\left[\\ln\\frac{z-d}{z_0}\\right]^2}{k^2 \\cdot u_z} where k is the von Karman constant set at 0.4. Parameters: - z: measurement height [m]. - z0: roughness length [m]. - d: displacement length [m]. - u: (array of) wind speed measured at height z [m s-1]. Returns: - ra: (array of) aerodynamic resistance [s m-1]. References ---------- A.S. Thom (1075), Momentum, mass and heat exchange of plant communities, In: Monteith, J.L. Vegetation and the Atmosphere, Academic Press, London. p. 57–109. Examples -------- >>> ra(3,0.12,2.4,5.0) 3.2378629924752942 >>> u=([2,4,6]) >>> ra(3,0.12,2.4,u) array([ 8.09465748, 4.04732874, 2.69821916]) """ # Test input array/value u = meteolib._arraytest(u) # Calculate ra ra = (scipy.log((z - d) / z0)) ** 2 / (0.16 * u) return ra # aerodynamic resistanc in s/m def E0( airtemp=scipy.array([]), rh=scipy.array([]), airpress=scipy.array([]), Rs=scipy.array([]), Rext=scipy.array([]), u=scipy.array([]), alpha=0.08, Z=0.0, ): """ Function to calculate daily Penman (open) water evaporation estimates: .. math:: E_0 = \\frac{R_n \\cdot \\Delta}{\\lambda \\cdot (\\Delta + \\gamma)} + \\frac{6430000 \\cdot E_a \\cdot \\gamma}{\\lambda \\cdot (\\Delta+\\gamma)} Parameters: - airtemp: (array of) daily average air temperatures [Celsius]. - rh: (array of) daily average relative humidity [%]. - airpress: (array of) daily average air pressure data [Pa]. - Rs: (array of) daily incoming solar radiation [J m-2 day-1]. - Rext: (array of) daily extraterrestrial radiation [J m-2 day-1]. - u: (array of) daily average wind speed at 2 m [m s-1]. - alpha: albedo [-] set at 0.08 for open water by default. - Z: (array of) site elevation, default is 0 m a.s.l. Returns: - E0: (array of) Penman open water evaporation values [mm day-1]. Notes ----- Meteorological parameters measured at 2 m above the surface. Albedo alpha set by default at 0.08 for open water (Valiantzas, 2006). References ---------- - H.L. Penman (1948). Natural evaporation from open water, bare soil\ and grass. Proceedings of the Royal Society of London. Series A.\ Mathematical and Physical Sciences 193: 120-145. - H.L. Penman (1956). Evaporation: An introductory survey. Netherlands\ Journal of Agricultural Science 4: 9-29. - J.D. Valiantzas (2006). Simplified versions for the Penman\ evaporation equation using routine weather data. J. Hydrology 331:\ 690-702. Examples -------- >>> # With single values and default albedo/elevation >>> E0(20.67,67.0,101300.0,22600000.,42000000.,3.2) 6.6029208786994467 >>> # With albedo is 0.18 instead of default and default elevation >>> E0(20.67,67.0,101300.0,22600000.,42000000.,3.2,alpha=0.18) 5.9664248091431968 >>> # With standard albedo and Z= 250.0 m >>> E0(20.67,67.0,101300.0,22600000.,42000000.,3.2,Z=250.0) 6.6135588207586284 >>> # With albedo alpha = 0.18 and elevation Z = 1000 m a.s.l. >>> E0(20.67,67.0,101300.0,22600000.,42000000.,3.2,0.18,1000.) 6.00814764682986 """ # Test input array/value airtemp, rh, airpress, Rs, Rext, u = meteolib._arraytest( airtemp, rh, airpress, Rs, Rext, u ) # Set constants sigma = 4.903e-3 # Stefan Boltzmann constant J/m2/K4/d # Calculate Delta, gamma and lambda DELTA = meteolib.Delta_calc(airtemp) # [Pa/K] gamma = meteolib.gamma_calc(airtemp, rh, airpress) # [Pa/K] Lambda = meteolib.L_calc(airtemp) # [J/kg] # Calculate saturated and actual water vapour pressures es = meteolib.es_calc(airtemp) # [Pa] ea = meteolib.ea_calc(airtemp, rh) # [Pa] # calculate radiation components (J/m2/day) Rns = (1.0 - alpha) * Rs # Shortwave component [J/m2/d] Rs0 = (0.75 + 2e-5 * Z) * Rext # Calculate clear sky radiation Rs0 f = 1.35 * Rs / Rs0 - 0.35 epsilom = 0.34 - 0.14 * scipy.sqrt(ea / 1000) Rnl = f * epsilom * sigma * (airtemp + 273.15) ** 4 # Longwave component [J/m2/d] Rnet = Rns - Rnl # Net radiation [J/m2/d] Ea = (1 + 0.536 * u) * (es / 1000 - ea / 1000) E0 = ( DELTA / (DELTA + gamma) * Rnet / Lambda + gamma / (DELTA + gamma) * 6430000 * Ea / Lambda ) return E0 def ET0pm( airtemp=scipy.array([]), rh=scipy.array([]), airpress=scipy.array([]), Rs=scipy.array([]), Rext=scipy.array([]), u=scipy.array([]), Z=0.0, ): """ Function to calculate daily Penman Monteith reference evaporation estimates. Parameters: - airtemp: (array of) daily average air temperatures [Celsius]. - rh: (array of) daily average relative humidity values [%]. - airpress: (array of) daily average air pressure data [hPa]. - Rs: (array of) total incoming shortwave radiation [J m-2 day-1]. - Rext: Incoming shortwave radiation at the top of the atmosphere\ [J m-2 day-1]. - u: windspeed [m s-1]. - Z: elevation [m], default is 0 m a.s.l. Returns: - ET0pm: (array of) Penman Monteith reference evaporation (short\ grass with optimum water supply) values [mm day-1]. Notes ----- Meteorological measuements standard at 2 m above soil surface. References ---------- R.G. Allen, L.S. Pereira, D. Raes and M. Smith (1998). Crop evapotranspiration - Guidelines for computing crop water requirements - FAO Irrigation and drainage paper 56. FAO - Food and Agriculture Organization of the United Nations, Rome, 1998. (http://www.fao.org/docrep/x0490e/x0490e07.htm) Examples -------- >>> ET0pm(20.67,67.0,101300.0,22600000.,42000000.,3.2) 4.7235349721073039 """ # Test input array/value airtemp, rh, airpress, Rs, Rext, u = meteolib._arraytest( airtemp, rh, airpress, Rs, Rext, u ) # Set constants albedo = 0.23 # short grass albedo sigma = 4.903e-3 # Stefan Boltzmann constant J/m2/K4/d # Calculate Delta, gamma and lambda DELTA = meteolib.Delta_calc(airtemp) # [Pa/K] gamma = meteolib.gamma_calc(airtemp, rh, airpress) # [Pa/K] Lambda = meteolib.L_calc(airtemp) # [J/kg] # Calculate saturated and actual water vapour pressures es = meteolib.es_calc(airtemp) # [Pa] ea = meteolib.ea_calc(airtemp, rh) # [Pa] Rns = (1.0 - albedo) * Rs # Shortwave component [J/m2/d] # Calculate clear sky radiation Rs0 Rs0 = (0.75 + 2e-5 * Z) * Rext # Clear sky radiation [J/m2/d] f = 1.35 * Rs / Rs0 - 0.35 epsilom = 0.34 - 0.14 * scipy.sqrt(ea / 1000) Rnl = f * epsilom * sigma * (airtemp + 273.15) ** 4 # Longwave component [J/m2/d] Rnet = Rns - Rnl # Net radiation [J/m2/d] ET0pm = ( DELTA / 1000.0 * Rnet / Lambda + 900.0 / (airtemp + 273.16) * u * (es - ea) / 1000 * gamma / 1000 ) / (DELTA / 1000.0 + gamma / 1000 * (1.0 + 0.34 * u)) return ET0pm # FAO reference evaporation [mm/day] def Em( airtemp=scipy.array([]), rh=scipy.array([]), airpress=scipy.array([]), Rs=scipy.array([]), ): """ Function to calculate Makkink evaporation (in mm/day): .. math:: E_m = 0.65 \\frac{R_s}{\\lambda} \\cdot \\frac{\\Delta}{\\Delta + \\gamma} The Makkink evaporation is a reference crop evaporation. It is a reference crop evaporation equation based on the Penman open water equation and represents evapotranspiration from short, well-watered grassland under Dutch climate conditions. Makkink reference evaporation values are published daily by the Royal Netherlands Meteorological Institute (KNMI) in the Netherlands. Values are used in combination with crop factors to provide daily estimates of actual crop evaporation for many crop types. Parameters: - airtemp: (array of) daily average air temperatures [Celsius]. - rh: (array of) daily average relative humidity values [%]. - airpress: (array of) daily average air pressure data [Pa]. - Rs: (array of) average daily incoming solar radiation [J m-2 day-1]. Returns: - Em: (array of) Makkink evaporation values [mm day-1]. Notes ----- Meteorological measurements standard at 2 m above soil surface. References ---------- H.A.R. de Bruin (1987). From Penman to Makkink, in Hooghart, C. (Ed.), Evaporation and Weather, Proceedings and Information. Comm. Hydrological Research TNO, The Hague. pp. 5-30. Examples -------- >>> Em(21.65,67.0,101300.,24200000.) 4.503830479197991 """ # Test input array/value airtemp, rh, airpress, Rs = meteolib._arraytest(airtemp, rh, airpress, Rs) # Calculate Delta and gamma constants DELTA = meteolib.Delta_calc(airtemp) gamma = meteolib.gamma_calc(airtemp, rh, airpress) Lambda = meteolib.L_calc(airtemp) # calculate Em [mm/day] Em = 0.65 * DELTA / (DELTA + gamma) * Rs / Lambda return Em def Ept( airtemp=scipy.array([]), rh=scipy.array([]), airpress=scipy.array([]), Rn=scipy.array([]), G=scipy.array([]), ): """ Function to calculate daily Priestley - Taylor evaporation: .. math:: E_{pt} = \\alpha \\frac{R_n - G}{\\lambda} \\cdot \\frac{\\Delta}{\\Delta + \\gamma} where alpha is set to 1.26. Parameters: - airtemp: (array of) daily average air temperatures [Celsius]. - rh: (array of) daily average relative humidity values [%]. - airpress: (array of) daily average air pressure data [Pa]. - Rn: (array of) average daily net radiation [J m-2 day-1]. - G: (array of) average daily soil heat flux [J m-2 day-1]. Returns: - Ept: (array of) Priestley Taylor evaporation values [mm day-1]. Notes ----- Meteorological parameters normally measured at 2 m above the surface. References ---------- Priestley, C.H.B. and R.J. Taylor, 1972. On the assessment of surface heat flux and evaporation using large-scale parameters. Mon. Weather Rev. 100:81-82. Examples -------- >>> Ept(21.65,67.0,101300.,18200000.,600000.) 6.349456116128078 """ # Test input array/value airtemp, rh, airpress, Rn, G = meteolib._arraytest(airtemp, rh, airpress, Rn, G) # Calculate Delta and gamma constants DELTA = meteolib.Delta_calc(airtemp) gamma = meteolib.gamma_calc(airtemp, rh, airpress) Lambda = meteolib.L_calc(airtemp) # calculate Em [mm/day] Ept = 1.26 * DELTA / (DELTA + gamma) * (Rn - G) / Lambda return Ept def Epm( airtemp=scipy.array([]), rh=scipy.array([]), airpress=scipy.array([]), Rn=scipy.array([]), G=scipy.array([]), ra=scipy.array([]), rs=scipy.array([]), ): """ Function to calculate the Penman Monteith evaporation. .. math:: E_{pm} = \\frac{\\Delta \\cdot (R_n-G)+\\rho \\cdot c_p \\cdot (e_s-e_a)/r_a}{\\lambda \\cdot (\\Delta + \\gamma \\cdot (1+\\frac{r_s}{r_a}))} The function can be used with different time intervals, such as commonly used hourly or daily time intervals are used. When a plant canopy is wet, the surface resistance (rs) becomes zero (stomatal resistance irrelevant, as evaporation is directly from wet leaf surface). Function ra() in this module can be used to calculate the aerodynamic resistance (ra) from wind speed and height parameters. Parameters: - airtemp: (array of) daily average air temperatures [Celsius]. - rh: (array of) daily average relative humidity values [%]. - airpress: (array of) daily average air pressure data [hPa]. - Rn: (array of) net radiation input over time interval t [J t-1]. - G: (array of) soil heat flux input over time interval t [J t-1]. - ra: aerodynamic resistance [s m-1]. - rs: surface resistance [s m-1]. Returns: - Epm: (array of) Penman Monteith evaporation values [mm t-1]. References ---------- J.L. Monteith (1965). Evaporation and environment. Symp. Soc. Exp. Biol. 19: 205-224. Examples -------- >>> Epm(21.67,67.0,1013.0,14100000.,500000.,104.,70.) 3.243341146049407 """ # Test input array/value airtemp, rh, airpress, Rn, G, ra, rs = meteolib._arraytest( airtemp, rh, airpress, Rn, G, ra, rs ) # Calculate Delta, gamma and lambda DELTA = meteolib.Delta_calc(airtemp) / 100.0 # [hPa/K] airpress = airpress * 100.0 # [Pa] gamma = meteolib.gamma_calc(airtemp, rh, airpress) / 100.0 # [hPa/K] Lambda = meteolib.L_calc(airtemp) # [J/kg] rho = meteolib.rho_calc(airtemp, rh, airpress) # [kg m-3] cp = meteolib.cp_calc(airtemp, rh, airpress) # [J kg-1 K-1] # Calculate saturated and actual water vapour pressures es = meteolib.es_calc(airtemp) / 100.0 # [hPa] ea = meteolib.ea_calc(airtemp, rh) / 100.0 # [hPa] # Calculate Epm Epm = ( (DELTA * (Rn - G) + rho * cp * (es - ea) / ra) / (DELTA + gamma * (1.0 + rs / ra)) ) / Lambda return Epm # actual ET in mm def tvardry( rho=scipy.array([]), cp=scipy.array([]), T=scipy.array([]), sigma_t=scipy.array([]), z=float(), d=0.0, C1=2.9, C2=28.4, ): """Function to calculate the sensible heat flux from high frequency temperature measurements and their standard deviation: .. math:: H= \\rho c_p \\left(k g (z-d) \\frac{C_2}{C_1^3}\\right)^\\frac{1}{2}\ \\left( \\frac{\\sigma_T^3}{T}\\right)^\\frac{1}{2} Parameters: - rho: (array of) air density values [kg m-3]. - cp: (array of) specific heat at constant temperature values [J kg-1 K-1]. - T: (array of) temperature data [Celsius]. - sigma_t: (array of) standard deviation of temperature data [Celsius]. - z: height [m] above the surface of the temperature measurement. - d: displacement height due to vegetation, default set to zero [m]. - C1: Constant, default set to 2.9 [-] for unstable conditions\ (de Bruin et al., 1993). - C2: Constant, default set to 28.4 [-] for unstable conditions\ (de Bruin et al., 1993). Returns: - H: (array of) sensible heat flux [W m-2]. Notes ----- This function holds only for free convective conditions when C2*z/L >>1, where L is the Obhukov length. References ---------- - H.A.R. de Bruin and W. Kohsiek and B.J.J.M. van den Hurk (1993). A \ verification of some methods to determine the fluxes of momentum, sensible \ heat andwWater vapour using standard seviation and structure parameter of \ scalar meteorological quantities. Boundary-Layer Meteorology 63(3): 231-257. - J.E. Tillman (1972), The indirect determination of stability, heat and\ momentum fluxes in the atmosphere boundary layer from simple scalar\ variables during dry unstable conditions, Journal of Applied Meteorology\ 11: 783-792. - H.F. Vugts, M.J. Waterloo, F.J. Beekman, K.F.A. Frumau and L.A.\ Bruijnzeel. The temperature variance method: a powerful tool in the\ estimation of actual evaporation rates. In J. S. Gladwell, editor,\ Hydrology of Warm Humid Regions, Proc. of the Yokohama Symp., IAHS\ Publication No. 216, pages 251-260, July 1993. Examples -------- >>> tvardry(1.25,1035.0,25.3,0.25,3.0) 34.658669290185287 >>> displ_len=0.25 >>> tvardry(1.25,1035.0,25.3,0.25,3.0,d=displ_len) 33.183149497185511 >>> tvardry(1.25,1035.0,25.3,0.25,3.0,d=displ_len,C2=30) 34.10507908798597 """ # Test input array/value rho, cp, T, sigma_t = meteolib._arraytest(rho, cp, T, sigma_t) # Define constants k = 0.40 # von Karman constant g = 9.81 # acceleration due to gravity [m/s^2] # C1 = 2.9 # De Bruin et al., 1992 # C2 = 28.4 # De Bruin et al., 1992 # L= Obhukov-length [m] # Free Convection Limit H = rho * cp * scipy.sqrt((sigma_t / C1) ** 3 * k * g * (z - d) / (T + 273.15) * C2) # else: # including stability correction # zoverL = z/L # tvardry = rho * cp * scipy.sqrt((sigma_t/C1)**3 * k*g*(z-d) / (T+273.15) *\ # (1-C2*z/L)/(-1*z/L)) # Check if we get complex numbers (square root of negative value) and remove # I = find(zoL >= 0 | H.imag != 0); # H(I) = scipy.ones(size(I))*NaN; return H # sensible heat flux def gash79(Pg=scipy.array([]), ER=float, S=float, St=float, p=float, pt=float): """ Function to calculate precipitation interception loss from daily precipitation values and vegetation parameters. Parameters: - Pg: daily rainfall data [mm]. - ER: evaporation percentage of total rainfall [mm h-1]. - S: storage capacity canopy [mm]. - St: stem storage capacity [mm]. - p: direct throughfall [mm]. - pt: stem precipitation [mm]. Returns: - Pg: Daily rainfall [mm]. - Ei: Interception [mm]. - TF: through fall [mm]. - SF: stemflow [mm]. References ---------- J.H.C. Gash, An analytical model of rainfall interception by forests, Quarterly Journal of the Royal Meteorological Society, 1979, 105, pp. 43-55. Examples -------- >>> gash79(12.4,0.15,1.3,0.2,0.2,0.02) (12.4, 8.4778854123725971, 0, 3.9221145876274024) >>> gash79(60.0,0.15,1.3,0.2,0.2,0.02) (60.0, 47.033885412372598, 0, 12.966114587627404) """ # Test input array/value Pg = meteolib._arraytest(Pg) # Determine length of array Pg l = scipy.size(Pg) # Check if we have a single precipitation value or an array if l < 2: # Dealing with single value... # PGsat calculation (for the saturation of the canopy) PGsat = -(1 / ER * S) * scipy.log(1 - (ER / (1 - p - pt))) # Set initial values to zero Ecan = 0.0 Etrunk = 0.0 # Calculate interception for different storm sizes if Pg < PGsat and Pg > 0: Ecan = (1 - p - pt) * Pg if Pg > St / pt: Etrunk = St + pt * Pg Ei = Ecan + Etrunk if Pg > PGsat and Pg < St / pt: Ecan = (((1 - p - pt) * PGsat) - S) + (ER * (Pg - PGsat)) + S Etrunk = 0.0 Ei = Ecan + Etrunk if Pg > PGsat and Pg > (St / pt): Ecan = ( (((1 - p - pt) * PGsat) - S) + (ER * (Pg - PGsat)) + S + (St + pt * Pg) ) Etrunk = St + pt * Pg Ei = Ecan + Etrunk TF = Pg - Ei SF = 0 else: # Define variables and constants n = scipy.size(Pg) TF = scipy.zeros(n) SF = scipy.zeros(n) Ei = scipy.zeros(n) Etrunk = scipy.zeros(n) # Set results to zero if rainfall Pg is zero TF[Pg == 0] = 0.0 SF[Pg == 0] = 0.0 Ei[Pg == 0] = 0.0 Etrunk[Pg == 0] = 0.0 # PGsat calc (for the saturation of the canopy) PGsat = -(1 / ER * S) * scipy.log(1 - (ER / (1 - p - pt))) # Process rainfall series for i in range(0, n): Ecan = 0.0 Etrunk = 0.0 if Pg[i] < PGsat and Pg[i] > 0: Ecan = (1 - p - pt) * Pg[i] if Pg[i] > St / pt: Etrunk = St + pt * Pg[i] Ei[i] = Ecan + Etrunk if Pg[i] > PGsat and Pg[i] < St / pt: Ecan = (((1 - p - pt) * PGsat) - S) + (ER * (Pg[i] - PGsat)) + S Etrunk = 0.0 Ei[i] if Pg[i] > PGsat and Pg[i] > (St / pt): Ecan = ( (((1 - p - pt) * PGsat) - S) + (ER * (Pg[i] - PGsat)) + S + (St + pt * Pg[i]) ) Etrunk = St + pt * Pg[i] Ei[i] = Ecan + Etrunk TF[i] = Pg[i] - Ei[i] return Pg, TF, SF, Ei # Run doctest when executing module if __name__ == "__main__": import doctest doctest.testmod() print("Ran all tests...")
timcera/mettoolbox
src/mettoolbox/evaplib.py
Python
bsd-3-clause
23,762
# Authors: Robert Luke <mail@robertluke.net> # # License: BSD (3-clause) from configparser import ConfigParser, RawConfigParser import glob as glob import re as re import os.path as op import numpy as np from ..base import BaseRaw from ..constants import FIFF from ..meas_info import create_info, _format_dig_points from ...annotations import Annotations from ...transforms import apply_trans, _get_trans from ...utils import logger, verbose, fill_doc from ...utils import warn @fill_doc def read_raw_nirx(fname, preload=False, verbose=None): """Reader for a NIRX fNIRS recording. This function has only been tested with NIRScout devices. Parameters ---------- fname : str Path to the NIRX data folder or header file. %(preload)s %(verbose)s Returns ------- raw : instance of RawNIRX A Raw object containing NIRX data. See Also -------- mne.io.Raw : Documentation of attribute and methods. """ return RawNIRX(fname, preload, verbose) def _open(fname): return open(fname, 'r', encoding='latin-1') @fill_doc class RawNIRX(BaseRaw): """Raw object from a NIRX fNIRS file. Parameters ---------- fname : str Path to the NIRX data folder or header file. %(preload)s %(verbose)s See Also -------- mne.io.Raw : Documentation of attribute and methods. """ @verbose def __init__(self, fname, preload=False, verbose=None): from ...externals.pymatreader import read_mat from ...coreg import get_mni_fiducials # avoid circular import prob logger.info('Loading %s' % fname) if fname.endswith('.hdr'): fname = op.dirname(op.abspath(fname)) if not op.isdir(fname): raise RuntimeError('The path you specified does not exist.') # Check if required files exist and store names for later use files = dict() keys = ('hdr', 'inf', 'set', 'tpl', 'wl1', 'wl2', 'config.txt', 'probeInfo.mat') for key in keys: files[key] = glob.glob('%s/*%s' % (fname, key)) if len(files[key]) != 1: raise RuntimeError('Expect one %s file, got %d' % (key, len(files[key]),)) files[key] = files[key][0] if len(glob.glob('%s/*%s' % (fname, 'dat'))) != 1: warn("A single dat file was expected in the specified path, but " "got %d. This may indicate that the file structure has been " "modified since the measurement was saved." % (len(glob.glob('%s/*%s' % (fname, 'dat'))))) # Read number of rows/samples of wavelength data last_sample = -1 with _open(files['wl1']) as fid: for line in fid: last_sample += 1 # Read header file # The header file isn't compliant with the configparser. So all the # text between comments must be removed before passing to parser with _open(files['hdr']) as f: hdr_str = f.read() hdr_str = re.sub('#.*?#', '', hdr_str, flags=re.DOTALL) hdr = RawConfigParser() hdr.read_string(hdr_str) # Check that the file format version is supported if not any(item == hdr['GeneralInfo']['NIRStar'] for item in ["\"15.0\"", "\"15.2\""]): raise RuntimeError('MNE does not support this NIRStar version' ' (%s)' % (hdr['GeneralInfo']['NIRStar'],)) if "NIRScout" not in hdr['GeneralInfo']['Device']: warn("Only import of data from NIRScout devices have been " "thoroughly tested. You are using a %s device. " % hdr['GeneralInfo']['Device']) # Parse required header fields # Extract frequencies of light used by machine fnirs_wavelengths = [int(s) for s in re.findall(r'(\d+)', hdr['ImagingParameters']['Wavelengths'])] # Extract source-detectors sources = np.asarray([int(s) for s in re.findall(r'(\d+)-\d+:\d+', hdr['DataStructure']['S-D-Key'])], int) detectors = np.asarray([int(s) for s in re.findall(r'\d+-(\d+):\d+', hdr['DataStructure']['S-D-Key'])], int) # Determine if short channels are present and on which detectors if 'shortbundles' in hdr['ImagingParameters']: short_det = [int(s) for s in re.findall(r'(\d+)', hdr['ImagingParameters']['ShortDetIndex'])] short_det = np.array(short_det, int) else: short_det = [] # Extract sampling rate samplingrate = float(hdr['ImagingParameters']['SamplingRate']) # Read participant information file inf = ConfigParser(allow_no_value=True) inf.read(files['inf']) inf = inf._sections['Subject Demographics'] # Store subject information from inf file in mne format # Note: NIRX also records "Study Type", "Experiment History", # "Additional Notes", "Contact Information" and this information # is currently discarded subject_info = {} names = inf['name'].split() if len(names) > 0: subject_info['first_name'] = \ inf['name'].split()[0].replace("\"", "") if len(names) > 1: subject_info['last_name'] = \ inf['name'].split()[-1].replace("\"", "") if len(names) > 2: subject_info['middle_name'] = \ inf['name'].split()[-2].replace("\"", "") # subject_info['birthday'] = inf['age'] # TODO: not formatted properly subject_info['sex'] = inf['gender'].replace("\"", "") # Recode values if subject_info['sex'] in {'M', 'Male', '1'}: subject_info['sex'] = FIFF.FIFFV_SUBJ_SEX_MALE elif subject_info['sex'] in {'F', 'Female', '2'}: subject_info['sex'] = FIFF.FIFFV_SUBJ_SEX_FEMALE # NIRStar does not record an id, or handedness by default # Read information about probe/montage/optodes # A word on terminology used here: # Sources produce light # Detectors measure light # Sources and detectors are both called optodes # Each source - detector pair produces a channel # Channels are defined as the midpoint between source and detector mat_data = read_mat(files['probeInfo.mat'], uint16_codec=None) requested_channels = mat_data['probeInfo']['probes']['index_c'] src_locs = mat_data['probeInfo']['probes']['coords_s3'] / 100. det_locs = mat_data['probeInfo']['probes']['coords_d3'] / 100. ch_locs = mat_data['probeInfo']['probes']['coords_c3'] / 100. # These are all in MNI coordinates, so let's transform them to # the Neuromag head coordinate frame mri_head_t, _ = _get_trans('fsaverage', 'mri', 'head') src_locs = apply_trans(mri_head_t, src_locs) det_locs = apply_trans(mri_head_t, det_locs) ch_locs = apply_trans(mri_head_t, ch_locs) # Set up digitization dig = get_mni_fiducials('fsaverage', verbose=False) for fid in dig: fid['r'] = apply_trans(mri_head_t, fid['r']) fid['coord_frame'] = FIFF.FIFFV_COORD_HEAD for ii, ch_loc in enumerate(ch_locs, 1): dig.append(dict( kind=FIFF.FIFFV_POINT_EEG, # misnomer but probably okay r=ch_loc, ident=ii, coord_frame=FIFF.FIFFV_COORD_HEAD, )) dig = _format_dig_points(dig) del mri_head_t # Determine requested channel indices # The wl1 and wl2 files include all possible source - detector pairs. # But most of these are not relevant. We want to extract only the # subset requested in the probe file req_ind = np.array([], int) for req_idx in range(requested_channels.shape[0]): sd_idx = np.where((sources == requested_channels[req_idx][0]) & (detectors == requested_channels[req_idx][1])) req_ind = np.concatenate((req_ind, sd_idx[0])) req_ind = req_ind.astype(int) # Generate meaningful channel names def prepend(list, str): str += '{0}' list = [str.format(i) for i in list] return(list) snames = prepend(sources[req_ind], 'S') dnames = prepend(detectors[req_ind], '_D') sdnames = [m + str(n) for m, n in zip(snames, dnames)] sd1 = [s + ' ' + str(fnirs_wavelengths[0]) for s in sdnames] sd2 = [s + ' ' + str(fnirs_wavelengths[1]) for s in sdnames] chnames = [val for pair in zip(sd1, sd2) for val in pair] # Create mne structure info = create_info(chnames, samplingrate, ch_types='fnirs_cw_amplitude') info.update(subject_info=subject_info, dig=dig) # Store channel, source, and detector locations # The channel location is stored in the first 3 entries of loc. # The source location is stored in the second 3 entries of loc. # The detector location is stored in the third 3 entries of loc. # NIRx NIRSite uses MNI coordinates. # Also encode the light frequency in the structure. for ch_idx2 in range(requested_channels.shape[0]): # Find source and store location src = int(requested_channels[ch_idx2, 0]) - 1 info['chs'][ch_idx2 * 2]['loc'][3:6] = src_locs[src, :] info['chs'][ch_idx2 * 2 + 1]['loc'][3:6] = src_locs[src, :] # Find detector and store location det = int(requested_channels[ch_idx2, 1]) - 1 info['chs'][ch_idx2 * 2]['loc'][6:9] = det_locs[det, :] info['chs'][ch_idx2 * 2 + 1]['loc'][6:9] = det_locs[det, :] # Store channel location as midpoint between source and detector. midpoint = (src_locs[src, :] + det_locs[det, :]) / 2 info['chs'][ch_idx2 * 2]['loc'][:3] = midpoint info['chs'][ch_idx2 * 2 + 1]['loc'][:3] = midpoint info['chs'][ch_idx2 * 2]['loc'][9] = fnirs_wavelengths[0] info['chs'][ch_idx2 * 2 + 1]['loc'][9] = fnirs_wavelengths[1] # Extract the start/stop numbers for samples in the CSV. In theory the # sample bounds should just be 10 * the number of channels, but some # files have mixed \n and \n\r endings (!) so we can't rely on it, and # instead make a single pass over the entire file at the beginning so # that we know how to seek and read later. bounds = dict() for key in ('wl1', 'wl2'): offset = 0 bounds[key] = [offset] with open(files[key], 'rb') as fid: for line in fid: offset += len(line) bounds[key].append(offset) assert offset == fid.tell() # Extras required for reading data raw_extras = { 'sd_index': req_ind, 'files': files, 'bounds': bounds, } super(RawNIRX, self).__init__( info, preload, filenames=[fname], last_samps=[last_sample], raw_extras=[raw_extras], verbose=verbose) # Read triggers from event file if op.isfile(files['hdr'][:-3] + 'evt'): with _open(files['hdr'][:-3] + 'evt') as fid: t = [re.findall(r'(\d+)', line) for line in fid] onset = np.zeros(len(t), float) duration = np.zeros(len(t), float) description = [''] * len(t) for t_idx in range(len(t)): binary_value = ''.join(t[t_idx][1:])[::-1] trigger_frame = float(t[t_idx][0]) onset[t_idx] = (trigger_frame) * (1.0 / samplingrate) duration[t_idx] = 1.0 # No duration info stored in files description[t_idx] = int(binary_value, 2) * 1. annot = Annotations(onset, duration, description) self.set_annotations(annot) def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): """Read a segment of data from a file. The NIRX machine records raw data as two different wavelengths. The returned data interleaves the wavelengths. """ sdindex = self._raw_extras[fi]['sd_index'] wls = [ _read_csv_rows_cols( self._raw_extras[fi]['files'][key], start, stop, sdindex, self._raw_extras[fi]['bounds'][key]).T for key in ('wl1', 'wl2') ] # TODO: Make this more efficient by only indexing above what we need. # For now let's just construct the full data matrix and index. # Interleave wavelength 1 and 2 to match channel names: this_data = np.zeros((len(wls[0]) * 2, stop - start)) this_data[0::2, :] = wls[0] this_data[1::2, :] = wls[1] data[:] = this_data[idx] return data def _read_csv_rows_cols(fname, start, stop, cols, bounds): with open(fname, 'rb') as fid: fid.seek(bounds[start]) data = fid.read(bounds[stop] - bounds[start]).decode('latin-1') x = np.fromstring(data, float, sep=' ') x.shape = (stop - start, -1) x = x[:, cols] return x
Teekuningas/mne-python
mne/io/nirx/nirx.py
Python
bsd-3-clause
13,599
import click from .. import templates @click.group() def generate(): pass @generate.command() @click.argument('name') @click.option('--path', required=True) @click.pass_context def blueprint(ctx, name, path): app = ctx.obj templates.extract_template( 'snippets/blueprint.py', app.get_blueprint_directory().join(name + '.py'), ctx={ 'blueprint': {'name': name, 'path': path}})
vmalloc/weber-cli
weber/cli/generate.py
Python
bsd-3-clause
427
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.core.urlresolvers import reverse from django.db import models from django.utils.encoding import ( force_text, python_2_unicode_compatible) from django.utils.translation import ugettext_lazy as _ from select_multiple_field.models import SelectMultipleField @python_2_unicode_compatible class ChickenBalls(models.Model): """ChickenBalls is used for South migration testing""" SUICIDE = 's' HOT = 'h' HOME_STYLE = 'H' CAJUN = 'c' JERK = 'j' GATOR = 'g' FLAVOUR_CHOICES = ( (_('Hot & Spicy'), ( (SUICIDE, _('Suicide hot')), (HOT, _('Hot hot sauce')), (CAJUN, _('Cajun sauce')), (JERK, _('Jerk sauce')))), (_('Traditional'), ( (HOME_STYLE, _('Homestyle')), (GATOR, _('Gator flavour')))), ) flavour = SelectMultipleField( blank=True, include_blank=False, max_length=5, max_choices=2, choices=FLAVOUR_CHOICES ) RANCH = 'r' HONEY_MUSTARD = 'h' BBQ = 'b' DIP_CHOICES = ( (RANCH, _('Ranch')), (HONEY_MUSTARD, _('Honey mustard')), (BBQ, _('BBQ')), ) dips = SelectMultipleField( blank=True, default='', include_blank=False, max_length=6, max_choices=3, choices=DIP_CHOICES ) def __str__(self): return "pk=%s" % force_text(self.pk) def get_absolute_url(self): return reverse('ftw:detail', args=[self.pk])
kelvinwong-ca/django-select-multiple-field
test_projects/django14/suthern/models.py
Python
bsd-3-clause
1,575
import os from django import forms from pontoon.base.models import ( Locale, ProjectLocale, User, UserProfile ) from pontoon.sync.formats import SUPPORTED_FORMAT_PARSERS class NoTabStopCharField(forms.CharField): widget = forms.TextInput(attrs={'tabindex': '-1'}) class NoTabStopFileField(forms.FileField): widget = forms.FileInput(attrs={'tabindex': '-1'}) class DownloadFileForm(forms.Form): slug = NoTabStopCharField() code = NoTabStopCharField() part = NoTabStopCharField() class UserPermissionGroupForm(object): def assign_users_to_groups(self, group_name, users): """ Clear group membership and assign a set of users to a given group of users. """ group = getattr(self.instance, '{}_group'.format(group_name)) group.user_set.clear() if users: group.user_set.add(*users) class LocalePermsForm(forms.ModelForm, UserPermissionGroupForm): translators = forms.ModelMultipleChoiceField(queryset=User.objects.all(), required=False) managers = forms.ModelMultipleChoiceField(queryset=User.objects.all(), required=False) class Meta: model = Locale fields = ('translators', 'managers') def save(self, *args, **kwargs): self.assign_users_to_groups('translators', self.cleaned_data.get('translators', [])) self.assign_users_to_groups('managers', self.cleaned_data.get('managers', [])) class ProjectLocalePermsForm(forms.ModelForm, UserPermissionGroupForm): translators = forms.ModelMultipleChoiceField(queryset=User.objects.all(), required=False) class Meta: model = ProjectLocale fields = ('translators', 'has_custom_translators') def save(self, *args, **kwargs): super(ProjectLocalePermsForm, self).save(*args, **kwargs) self.assign_users_to_groups('translators', self.cleaned_data.get('translators', [])) class ProjectLocaleFormSet(forms.models.BaseModelFormSet): """ Formset will update only existing objects and won't allow to create new-ones. """ @property def errors_dict(self): errors = {} for form in self: if form.errors: errors[form.instance.pk] = form.errors return errors def save(self, commit=True): self.new_objects = [] if commit: for form in self: if form.instance.pk and form.cleaned_data.get('has_custom_translators'): form.save() # We have to cleanup projects from translators without_translators = ( form.instance.pk for form in self if form.instance.pk and not form.cleaned_data.get('has_custom_translators') ) if not without_translators: return ProjectLocale.objects.filter( pk__in=without_translators ).update(has_custom_translators=False) User.groups.through.objects.filter( group__projectlocales__pk__in=without_translators ).delete() ProjectLocalePermsFormsSet = forms.modelformset_factory( ProjectLocale, ProjectLocalePermsForm, formset=ProjectLocaleFormSet, ) class UploadFileForm(DownloadFileForm): uploadfile = NoTabStopFileField() def clean(self): cleaned_data = super(UploadFileForm, self).clean() part = cleaned_data.get("part") uploadfile = cleaned_data.get("uploadfile") if uploadfile: limit = 5000 # File size validation if uploadfile.size > limit * 1000: current = round(uploadfile.size/1000) message = ( 'Upload failed. Keep filesize under {limit} kB. Your upload: {current} kB.' .format(limit=limit, current=current) ) raise forms.ValidationError(message) # File format validation if part: file_extension = os.path.splitext(uploadfile.name)[1].lower() part_extension = os.path.splitext(part)[1].lower() # For now, skip if uploading file while using subpages if part_extension in SUPPORTED_FORMAT_PARSERS.keys() and part_extension != file_extension: message = ( 'Upload failed. File format not supported. Use {supported}.' .format(supported=part_extension) ) raise forms.ValidationError(message) class UserProfileForm(forms.ModelForm): first_name = forms.RegexField(regex='^[^<>"\'&]+$', max_length=30, strip=True) class Meta: model = User fields = ('first_name',) class UserLocalesSettings(forms.ModelForm): """ Form is responsible for saving preferred locales of contributor. """ class Meta: model = UserProfile fields = ('locales_order',)
participedia/pontoon
pontoon/base/forms.py
Python
bsd-3-clause
4,967
# proxy module from __future__ import absolute_import from chaco.tools.legend_highlighter import *
enthought/etsproxy
enthought/chaco/tools/legend_highlighter.py
Python
bsd-3-clause
99
# ~*~ coding: utf-8 ~*~ """Module that contains a Marshmallow schema that generate JSON schemas. JSON Schemas can be a pain to write by hand. For example, the product requirements change, thus your schema changes. If you are maintaining your schemas by hand, you have to go through all of them and update them, or, even worse, you just don't maintain them. With this class, you should never need to hand write a JSON Schema again. Just pass your schema to it and it'll generate it for you. Example: This module is super easy to use. All you need to do is pass a schema or a Python path to a schema and this library will do the rest for you! .. code-block:: python # This is the schema we want to generate the schema for. class UserSchema(Schema): first_name = fields.String(**STR_REQUIRED) last_name = fields.String(**STR_REQUIRED) phone = PhoneNumberField(**REQUIRED) company_id = ForeignKeyField(**REQUIRED) joined = PendulumField(format='iso', **REQUIRED) last_login = ArrowField(allow_none=True, format='iso') class Meta(object): # This will dictate the filename that this schema will be # dumped to. If not provided, the filename will be # UserSchema.json json_schema_filename = 'user.json' # You can dump the schema to a file in a folder json_schema = FleakerJSONSchema.write_schema_to_file( # This library doesn't care if the schema has been initialized UserSchema, # The folder to write this schema to folder='docs/raml/schemas', # The context can control certain things about how the schema will # be dumped. context={'dump_schema': True} ) # Now, you can find the dumped schema in docs/raml/schemas/user.json # You also have the end result stored in the json_schema variable # If you'd like for fine grained control over the filename or want to # use the file object further, a file pointer can be passed to the # creation method. with open('user_schema.json', 'w') as fp: FleakerJSONSchema.write_schema_to_file(UserSchema, file_pointer=fp) # Maybe you just want the schema in dict form. Super easy. json_schema = FleakerJSONSchema.generate_json_schema( # For all creation methods in this module can be loaded either by # the instance/class of the schema or by passing a Python path to # it, like so. 'app.schemata.user.UserSchema' ) """ import decimal import json import os.path from inspect import isclass from importlib import import_module from sys import stdout from marshmallow import Schema from marshmallow_jsonschema import JSONSchema from marshmallow_jsonschema.base import TYPE_MAP from fleaker._compat import string_types from fleaker.constants import DEFAULT_DICT, MISSING # Update the built in TYPE_MAP to match our style better TYPE_MAP.update({ int: { 'type': 'integer', }, float: { 'type': 'number', }, decimal.Decimal: { 'type': 'number', }, }) class FleakerJSONSchema(JSONSchema): """Marshmallow schema that can be used to generate JSON schemas.""" @classmethod def generate_json_schema(cls, schema, context=DEFAULT_DICT): """Generate a JSON Schema from a Marshmallow schema. Args: schema (marshmallow.Schema|str): The Marshmallow schema, or the Python path to one, to create the JSON schema for. Keyword Args: file_pointer (file, optional): The path or pointer to the file to write this schema to. If not provided, the schema will be dumped to ``sys.stdout``. Returns: dict: The JSON schema in dictionary form. """ schema = cls._get_schema(schema) # Generate the JSON Schema return cls(context=context).dump(schema).data @classmethod def write_schema_to_file(cls, schema, file_pointer=stdout, folder=MISSING, context=DEFAULT_DICT): """Given a Marshmallow schema, create a JSON Schema for it. Args: schema (marshmallow.Schema|str): The Marshmallow schema, or the Python path to one, to create the JSON schema for. Keyword Args: file_pointer (file, optional): The pointer to the file to write this schema to. If not provided, the schema will be dumped to ``sys.stdout``. folder (str, optional): The folder in which to save the JSON schema. The name of the schema file can be optionally controlled my the schema's ``Meta.json_schema_filename``. If that attribute is not set, the class's name will be used for the filename. If writing the schema to a specific file is desired, please pass in a ``file_pointer``. context (dict, optional): The Marshmallow context to be pushed to the schema generates the JSONSchema. Returns: dict: The JSON schema in dictionary form. """ schema = cls._get_schema(schema) json_schema = cls.generate_json_schema(schema, context=context) if folder: schema_filename = getattr( schema.Meta, 'json_schema_filename', '.'.join([schema.__class__.__name__, 'json']) ) json_path = os.path.join(folder, schema_filename) file_pointer = open(json_path, 'w') json.dump(json_schema, file_pointer, indent=2) return json_schema @classmethod def _get_schema(cls, schema): """Method that will fetch a Marshmallow schema flexibly. Args: schema (marshmallow.Schema|str): Either the schema class, an instance of a schema, or a Python path to a schema. Returns: marshmallow.Schema: The desired schema. Raises: TypeError: This is raised if the provided object isn't a Marshmallow schema. """ if isinstance(schema, string_types): schema = cls._get_object_from_python_path(schema) if isclass(schema): schema = schema() if not isinstance(schema, Schema): raise TypeError("The schema must be a path to a Marshmallow " "schema or a Marshmallow schema.") return schema @staticmethod def _get_object_from_python_path(python_path): """Method that will fetch a Marshmallow schema from a path to it. Args: python_path (str): The string path to the Marshmallow schema. Returns: marshmallow.Schema: The schema matching the provided path. Raises: TypeError: This is raised if the specified object isn't a Marshmallow schema. """ # Dissect the path python_path = python_path.split('.') module_path = python_path[:-1] object_class = python_path[-1] if isinstance(module_path, list): module_path = '.'.join(module_path) # Grab the object module = import_module(module_path) schema = getattr(module, object_class) if isclass(schema): schema = schema() return schema
croscon/fleaker
fleaker/marshmallow/json_schema.py
Python
bsd-3-clause
7,584
"""Algorithms for partial fraction decomposition of rational functions. """ from __future__ import print_function, division from sympy.polys import Poly, RootSum, cancel, factor from sympy.polys.polytools import parallel_poly_from_expr from sympy.polys.polyoptions import allowed_flags, set_defaults from sympy.polys.polyerrors import PolynomialError from sympy.core import S, Add, sympify, Function, Lambda, Dummy from sympy.core.basic import preorder_traversal from sympy.utilities import numbered_symbols, take, xthreaded, public from sympy.core.compatibility import xrange @xthreaded @public def apart(f, x=None, full=False, **options): """ Compute partial fraction decomposition of a rational function. Given a rational function ``f``, computes the partial fraction decomposition of ``f``. Two algorithms are available: One is based on the undertermined coefficients method, the other is Bronstein's full partial fraction decomposition algorithm. The undetermined coefficients method (selected by ``full=False``) uses polynomial factorization (and therefore accepts the same options as factor) for the denominator. Per default it works over the rational numbers, therefore decomposition of denominators with non-rational roots (e.g. irrational, complex roots) is not supported by default (see options of factor). Bronstein's algorithm can be selected by using ``full=True`` and allows a decomposition of denominators with non-rational roots. A human-readable result can be obtained via ``doit()`` (see examples below). Examples ======== >>> from sympy.polys.partfrac import apart >>> from sympy.abc import x, y By default, using the undetermined coefficients method: >>> apart(y/(x + 2)/(x + 1), x) -y/(x + 2) + y/(x + 1) The undetermined coefficients method does not provide a result when the denominators roots are not rational: >>> apart(y/(x**2 + x + 1), x) y/(x**2 + x + 1) You can choose Bronstein's algorithm by setting ``full=True``: >>> apart(y/(x**2 + x + 1), x, full=True) RootSum(_w**2 + _w + 1, Lambda(_a, (-2*_a*y/3 - y/3)/(-_a + x))) Calling ``doit()`` yields a human-readable result: >>> apart(y/(x**2 + x + 1), x, full=True).doit() (-y/3 - 2*y*(-1/2 - sqrt(3)*I/2)/3)/(x + 1/2 + sqrt(3)*I/2) + (-y/3 - 2*y*(-1/2 + sqrt(3)*I/2)/3)/(x + 1/2 - sqrt(3)*I/2) See Also ======== apart_list, assemble_partfrac_list """ allowed_flags(options, []) f = sympify(f) if f.is_Atom: return f else: P, Q = f.as_numer_denom() _options = options.copy() options = set_defaults(options, extension=True) try: (P, Q), opt = parallel_poly_from_expr((P, Q), x, **options) except PolynomialError as msg: if f.is_commutative: raise PolynomialError(msg) # non-commutative if f.is_Mul: c, nc = f.args_cnc(split_1=False) nc = f.func(*[apart(i, x=x, full=full, **_options) for i in nc]) if c: c = apart(f.func._from_args(c), x=x, full=full, **_options) return c*nc else: return nc elif f.is_Add: c = [] nc = [] for i in f.args: if i.is_commutative: c.append(i) else: try: nc.append(apart(i, x=x, full=full, **_options)) except NotImplementedError: nc.append(i) return apart(f.func(*c), x=x, full=full, **_options) + f.func(*nc) else: reps = [] pot = preorder_traversal(f) next(pot) for e in pot: try: reps.append((e, apart(e, x=x, full=full, **_options))) pot.skip() # this was handled successfully except NotImplementedError: pass return f.xreplace(dict(reps)) if P.is_multivariate: fc = f.cancel() if fc != f: return apart(fc, x=x, full=full, **_options) raise NotImplementedError( "multivariate partial fraction decomposition") common, P, Q = P.cancel(Q) poly, P = P.div(Q, auto=True) P, Q = P.rat_clear_denoms(Q) if Q.degree() <= 1: partial = P/Q else: if not full: partial = apart_undetermined_coeffs(P, Q) else: partial = apart_full_decomposition(P, Q) terms = S.Zero for term in Add.make_args(partial): if term.has(RootSum): terms += term else: terms += factor(term) return common*(poly.as_expr() + terms) def apart_undetermined_coeffs(P, Q): """Partial fractions via method of undetermined coefficients. """ X = numbered_symbols(cls=Dummy) partial, symbols = [], [] _, factors = Q.factor_list() for f, k in factors: n, q = f.degree(), Q for i in xrange(1, k + 1): coeffs, q = take(X, n), q.quo(f) partial.append((coeffs, q, f, i)) symbols.extend(coeffs) dom = Q.get_domain().inject(*symbols) F = Poly(0, Q.gen, domain=dom) for i, (coeffs, q, f, k) in enumerate(partial): h = Poly(coeffs, Q.gen, domain=dom) partial[i] = (h, f, k) q = q.set_domain(dom) F += h*q system, result = [], S(0) for (k,), coeff in F.terms(): system.append(coeff - P.nth(k)) from sympy.solvers import solve solution = solve(system, symbols) for h, f, k in partial: h = h.as_expr().subs(solution) result += h/f.as_expr()**k return result def apart_full_decomposition(P, Q): """ Bronstein's full partial fraction decomposition algorithm. Given a univariate rational function ``f``, performing only GCD operations over the algebraic closure of the initial ground domain of definition, compute full partial fraction decomposition with fractions having linear denominators. Note that no factorization of the initial denominator of ``f`` is performed. The final decomposition is formed in terms of a sum of :class:`RootSum` instances. References ========== 1. [Bronstein93]_ """ return assemble_partfrac_list(apart_list(P/Q, P.gens[0])) @public def apart_list(f, x=None, dummies=None, **options): """ Compute partial fraction decomposition of a rational function and return the result in structured form. Given a rational function ``f`` compute the partial fraction decomposition of ``f``. Only Bronstein's full partial fraction decomposition algorithm is supported by this method. The return value is highly structured and perfectly suited for further algorithmic treatment rather than being human-readable. The function returns a tuple holding three elements: * The first item is the common coefficient, free of the variable `x` used for decomposition. (It is an element of the base field `K`.) * The second item is the polynomial part of the decomposition. This can be the zero polynomial. (It is an element of `K[x]`.) * The third part itself is a list of quadruples. Each quadruple has the following elements in this order: - The (not necessarily irreducible) polynomial `D` whose roots `w_i` appear in the linear denominator of a bunch of related fraction terms. (This item can also be a list of explicit roots. However, at the moment ``apart_list`` never returns a result this way, but the related ``assemble_partfrac_list`` function accepts this format as input.) - The numerator of the fraction, written as a function of the root `w` - The linear denominator of the fraction *excluding its power exponent*, written as a function of the root `w`. - The power to which the denominator has to be raised. On can always rebuild a plain expression by using the function ``assemble_partfrac_list``. Examples ======== A first example: >>> from sympy.polys.partfrac import apart_list, assemble_partfrac_list >>> from sympy.abc import x, t >>> f = (2*x**3 - 2*x) / (x**2 - 2*x + 1) >>> pfd = apart_list(f) >>> pfd (1, Poly(2*x + 4, x, domain='ZZ'), [(Poly(_w - 1, _w, domain='ZZ'), Lambda(_a, 4), Lambda(_a, -_a + x), 1)]) >>> assemble_partfrac_list(pfd) 2*x + 4 + 4/(x - 1) Second example: >>> f = (-2*x - 2*x**2) / (3*x**2 - 6*x) >>> pfd = apart_list(f) >>> pfd (-1, Poly(2/3, x, domain='QQ'), [(Poly(_w - 2, _w, domain='ZZ'), Lambda(_a, 2), Lambda(_a, -_a + x), 1)]) >>> assemble_partfrac_list(pfd) -2/3 - 2/(x - 2) Another example, showing symbolic parameters: >>> pfd = apart_list(t/(x**2 + x + t), x) >>> pfd (1, Poly(0, x, domain='ZZ[t]'), [(Poly(_w**2 + _w + t, _w, domain='ZZ[t]'), Lambda(_a, -2*_a*t/(4*t - 1) - t/(4*t - 1)), Lambda(_a, -_a + x), 1)]) >>> assemble_partfrac_list(pfd) RootSum(_w**2 + _w + t, Lambda(_a, (-2*_a*t/(4*t - 1) - t/(4*t - 1))/(-_a + x))) This example is taken from Bronstein's original paper: >>> f = 36 / (x**5 - 2*x**4 - 2*x**3 + 4*x**2 + x - 2) >>> pfd = apart_list(f) >>> pfd (1, Poly(0, x, domain='ZZ'), [(Poly(_w - 2, _w, domain='ZZ'), Lambda(_a, 4), Lambda(_a, -_a + x), 1), (Poly(_w**2 - 1, _w, domain='ZZ'), Lambda(_a, -3*_a - 6), Lambda(_a, -_a + x), 2), (Poly(_w + 1, _w, domain='ZZ'), Lambda(_a, -4), Lambda(_a, -_a + x), 1)]) >>> assemble_partfrac_list(pfd) -4/(x + 1) - 3/(x + 1)**2 - 9/(x - 1)**2 + 4/(x - 2) See also ======== apart, assemble_partfrac_list References ========== 1. [Bronstein93]_ """ allowed_flags(options, []) f = sympify(f) if f.is_Atom: return f else: P, Q = f.as_numer_denom() options = set_defaults(options, extension=True) (P, Q), opt = parallel_poly_from_expr((P, Q), x, **options) if P.is_multivariate: raise NotImplementedError( "multivariate partial fraction decomposition") common, P, Q = P.cancel(Q) poly, P = P.div(Q, auto=True) P, Q = P.rat_clear_denoms(Q) polypart = poly if dummies is None: def dummies(name): d = Dummy(name) while True: yield d dummies = dummies("w") rationalpart = apart_list_full_decomposition(P, Q, dummies) return (common, polypart, rationalpart) def apart_list_full_decomposition(P, Q, dummygen): """ Bronstein's full partial fraction decomposition algorithm. Given a univariate rational function ``f``, performing only GCD operations over the algebraic closure of the initial ground domain of definition, compute full partial fraction decomposition with fractions having linear denominators. Note that no factorization of the initial denominator of ``f`` is performed. The final decomposition is formed in terms of a sum of :class:`RootSum` instances. References ========== 1. [Bronstein93]_ """ f, x, U = P/Q, P.gen, [] u = Function('u')(x) a = Dummy('a') partial = [] for d, n in Q.sqf_list_include(all=True): b = d.as_expr() U += [ u.diff(x, n - 1) ] h = cancel(f*b**n) / u**n H, subs = [h], [] for j in range(1, n): H += [ H[-1].diff(x) / j ] for j in range(1, n + 1): subs += [ (U[j - 1], b.diff(x, j) / j) ] for j in range(0, n): P, Q = cancel(H[j]).as_numer_denom() for i in range(0, j + 1): P = P.subs(*subs[j - i]) Q = Q.subs(*subs[0]) P = Poly(P, x) Q = Poly(Q, x) G = P.gcd(d) D = d.quo(G) B, g = Q.half_gcdex(D) b = (P * B.quo(g)).rem(D) Dw = D.subs(x, next(dummygen)) numer = Lambda(a, b.as_expr().subs(x, a)) denom = Lambda(a, (x - a)) exponent = n-j partial.append((Dw, numer, denom, exponent)) return partial @public def assemble_partfrac_list(partial_list): r"""Reassemble a full partial fraction decomposition from a structured result obtained by the function ``apart_list``. Examples ======== This example is taken from Bronstein's original paper: >>> from sympy.polys.partfrac import apart_list, assemble_partfrac_list >>> from sympy.abc import x, y >>> f = 36 / (x**5 - 2*x**4 - 2*x**3 + 4*x**2 + x - 2) >>> pfd = apart_list(f) >>> pfd (1, Poly(0, x, domain='ZZ'), [(Poly(_w - 2, _w, domain='ZZ'), Lambda(_a, 4), Lambda(_a, -_a + x), 1), (Poly(_w**2 - 1, _w, domain='ZZ'), Lambda(_a, -3*_a - 6), Lambda(_a, -_a + x), 2), (Poly(_w + 1, _w, domain='ZZ'), Lambda(_a, -4), Lambda(_a, -_a + x), 1)]) >>> assemble_partfrac_list(pfd) -4/(x + 1) - 3/(x + 1)**2 - 9/(x - 1)**2 + 4/(x - 2) If we happen to know some roots we can provide them easily inside the structure: >>> pfd = apart_list(2/(x**2-2)) >>> pfd (1, Poly(0, x, domain='ZZ'), [(Poly(_w**2 - 2, _w, domain='ZZ'), Lambda(_a, _a/2), Lambda(_a, -_a + x), 1)]) >>> pfda = assemble_partfrac_list(pfd) >>> pfda RootSum(_w**2 - 2, Lambda(_a, _a/(-_a + x)))/2 >>> pfda.doit() -sqrt(2)/(2*(x + sqrt(2))) + sqrt(2)/(2*(x - sqrt(2))) >>> from sympy import Dummy, Poly, Lambda, sqrt >>> a = Dummy("a") >>> pfd = (1, Poly(0, x, domain='ZZ'), [([sqrt(2),-sqrt(2)], Lambda(a, a/2), Lambda(a, -a + x), 1)]) >>> assemble_partfrac_list(pfd) -sqrt(2)/(2*(x + sqrt(2))) + sqrt(2)/(2*(x - sqrt(2))) See also ======== apart, apart_list """ # Common factor common = partial_list[0] # Polynomial part polypart = partial_list[1] pfd = polypart.as_expr() # Rational parts for r, nf, df, ex in partial_list[2]: if isinstance(r, Poly): # Assemble in case the roots are given implicitly by a polynomials an, nu = nf.variables, nf.expr ad, de = df.variables, df.expr # Hack to make dummies equal because Lambda created new Dummies de = de.subs(ad[0], an[0]) func = Lambda(an, nu/de**ex) pfd += RootSum(r, func, auto=False, quadratic=False) else: # Assemble in case the roots are given explicitely by a list of algebraic numbers for root in r: pfd += nf(root)/df(root)**ex return common*pfd
AunShiLord/sympy
sympy/polys/partfrac.py
Python
bsd-3-clause
14,785
#!/usr/bin/python # -*- coding: utf-8 -*- from __future__ import print_function from future import standard_library standard_library.install_aliases() from builtins import object import requests import types from datetime import datetime, timedelta from pprint import pformat import urllib.request, urllib.parse, urllib.error import logging log = logging.getLogger('lvbRequester') class JsonDecodingError(Exception): """ exception on json decode which also returns raw content """ def __init__(self, msg, raw): Exception.__init__(self, msg) self.raw = raw log.error(raw) class LVB(object): """ Returns travel informations from Leipziger Verkehrsbetriebe (l.de) """ URL = { 'connection': 'https://www.l.de/verkehrsbetriebe/fahrplan/verbindung', 'complete': 'https://www.l.de/ajax_de', 'station': 'https://www.l.de/verkehrsbetriebe/fahrplan/abfahrten' } TRANSPORTMAP = { 'STR': 'Strasenbahn', 'BUS': 'Bus', 'RB/RE': 'Regionalbahn', 'S/U': 'S-Bahn', } HEADER = { 'Origin': 'https://www.l.de', # 'Accept-Encoding': 'gzip, deflate, br', 'Accept-Language': 'de,en-US;q=0.9,en;q=0.8', # 'en-US,en;q=0.8,de;q=0.6', # 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36', 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8', 'Accept': 'application/json, text/javascript, */*; q=0.01', # 'Referer': 'https://www.l.de/verkehrsbetriebe/fahrplan', 'X-Requested-With': 'XMLHttpRequest', 'Connection': 'keep-alive', 'Pragma': 'no-cache', } @classmethod def _defParseDatetime(self, time): """ allow different ways to provide a time and pares it to datetime """ if time: if isinstance(time, int): return datetime.now() + timedelta(minutes=time) elif isinstance(time, datetime): return time elif isinstance(time, (timedelta, )): return datetime.now() + time elif isinstance(time, types.StringTypes): # TODO return datetime.now() else: raise ValueError('Unable to parse %s as datetime' % time) else: return datetime.now() @classmethod def _encodeRequest(self, request, data=None): """ encode the request parameters in the expected way """ if isinstance(request, (list, tuple)): request = "".join(request) if data: resStr = request % data else: resStr = request # log.debug('DATA RAW: %s' % resStr) res = urllib.parse.quote(resStr).replace('%26', '&').replace('%2B', '+').replace('%3D', '=').replace('/', '%2F') # log.debug('DATA ENC: %s' % res) return res @classmethod def getAutoCompletion(self, station, limit=10): """ retrieves autocomplete result for station. This should be used to get the correct station name which will be needed for getStation and getConnection. """ reqData = { 'mode': 'autocomplete', 'limit': limit, 'poi': '', 'q': station.encode('utf-8') if isinstance(station, str) else station, } url = '%s?%s' % ( self.URL['complete'], urllib.parse.urlencode(reqData) ) log.debug('URL: %s' % url) data = requests.get(url) if data.status_code == 200: try: return data.json()['stations'] except ValueError as e: raise JsonDecodingError(e.message, data.text) raise Exception('Unable to retrieve data error: %s' % data.status_code) @classmethod def getConnection(self, stationFrom, stationTo, time=None): """ Retrieves connection information to travel from stationFrom to stationTo. The station name must be completely identical to the one in LVB System. You can use getAutoCompletion to retrieve the correct name. """ params = self._getConnectionParams(stationFrom, stationTo, self._defParseDatetime(time)) # log.debug('PARAMS: %s' % (params, )) data = requests.post(self.URL['connection'], data=params, headers=self.HEADER) if data.status_code == 200: # log.debug('BODY: %s' % (data.text)) try: return self._getConnectionParse(data.json()) except ValueError as e: raise JsonDecodingError(e.message, data.text) raise Exception('Unable to retrieve data error: %s' % data.status_code) @classmethod def _getConnectionParams(self, stationFrom, stationTo, conTime): """ builds parameter structur for connection call """ transport = list(self.TRANSPORTMAP.keys()) res = [ 'results[5][5][function]=ws_find_connections&results[5][5][data]=[', '{"name":"results[5][5][is_extended]","value":""},', '{"name":"results[5][5][from_opt]","value":"3"},', '{"name":"results[5][5][from]","value":"%(from)s"},', '{"name":"results[5][5][from_lat]","value":""},', '{"name":"results[5][5][from_lng]","value":""},', '{"name":"results[5][5][to_opt]","value":"3"},', '{"name":"results[5][5][to]","value":"%(to)s"},', '{"name":"results[5][5][to_lat]","value":""},', '{"name":"results[5][5][to_lng]","value":""},', '{"name":"results[5][5][via]","value":""},', '{"name":"results[5][5][via_lat]","value":""},', '{"name":"results[5][5][via_lng]","value":""},', '{"name":"results[5][5][time_mode]","value":"departure"},', '{"name":"results[5][5][time]","value":"%(time)s"},', '{"name":"results[5][5][date]","value":"%(date)s"},', ] # stationFrom = stationFrom.encode('utf-8') if isinstance(stationFrom, str) else stationFrom # stationTo = stationTo.encode('utf-8') if isinstance(stationTo, str) else stationTo for atransport in transport: res.append('{"name":"results[5][2][means_of_transport][]","value":"%s"},' % atransport) res.append('{"name":"results[5][2][mode]","value":"connection"}]') return self._encodeRequest(res, { 'from': stationFrom.replace(' ', '+'), 'to': stationTo.replace(' ', '+'), 'time': conTime.strftime('%H:%M'), 'date': conTime.strftime('%d.%m.%Y'), }) @classmethod def _getConnectionParse(self, result): """ builds connection results """ return result.get('connections', {}) @classmethod def getStation(self, station, time=None): """ get all exptected Trains at specified station The station names must be completely identical to the ones in LVB System. You can use getAutoCompletion to retrieve the correct names. """ params = self._getStationParams(station, self._defParseDatetime(time)) # log.debug('PARAMS: %s' % (params, )) data = requests.post(self.URL['station'], data=params, headers=self.HEADER) if data.status_code == 200: # log.debug('BODY: %s' % (data.text)) try: return self._getStationParse(data.json()) except ValueError as e: raise JsonDecodingError(e.message, data.text) raise Exception('Unable to retrieve data error: %s' % data.status_code) @classmethod def _getStationParams(self, stop, time): """ build paramter structure for station request """ res = [ 'results[5][5][function]=ws_info_stop&results[5][5][data]=[', '{"name":"results[5][5][stop]","value":"%(stop)s"},', '{"name":"results[5][5][date]","value":"%(date)s"},', '{"name":"results[5][5][time]","value":"%(time)s"},', '{"name":"results[5][5][mode]","value":"stop"}]' ] # stop = stop.encode('utf-8') if isinstance(stop, str) else stop data = { 'stop': stop.replace(' ', '+'), 'date': time.strftime('%d.%m.%Y'), 'time': time.strftime('%H:%M'), } # log.debug('PARMS: %s' % pformat('\n'.join(res) % data)) return self._encodeRequest(res, data) @classmethod def _getStationParse(self, result): """ build station results """ return result['connections'] if __name__ == '__main__': logging.basicConfig(level=logging.DEBUG) r = LVB() print("Station %s" % (pformat(r.getStation('Leipzig, Marschnerstr.')))) # , datetime(2017, 2, 10, 12, 57)))) print("Connection %s" % (pformat(r.getConnection('Leipzig, Marschnerstr.', 'Leipzig, Goerdelerring')))) # , datetime(2017, 2, 10, 13, 17)))) print("Autocomplete %s" % (pformat(r.getAutoCompletion(u'marschnerstra\xdfe')))) print("Autocomplete %s" % (pformat(r.getAutoCompletion(u'marschner'))))
native2k/lvbRequester
lvbRequester/lvbRequester.py
Python
bsd-3-clause
9,165