gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This module's classes provide an interface to mojo modules. Modules are
# collections of interfaces and structs to be used by mojo ipc clients and
# servers.
#
# A simple interface would be created this way:
# module = mojom.generate.module.Module('Foo')
# interface = module.AddInterface('Bar')
# method = interface.AddMethod('Tat', 0)
# method.AddParameter('baz', 0, mojom.INT32)
class Kind(object):
def __init__(self, spec=None):
self.spec = spec
self.parent_kind = None
class ReferenceKind(Kind):
"""ReferenceKind represents pointer types and handle types.
A type is nullable if null (for pointer types) or invalid handle (for handle
types) is a legal value for the type.
"""
def __init__(self, spec=None, is_nullable=False):
assert spec is None or is_nullable == spec.startswith('?')
Kind.__init__(self, spec)
self.is_nullable = is_nullable
self.shared_definition = {}
def MakeNullableKind(self):
assert not self.is_nullable
if self == STRING:
return NULLABLE_STRING
if self == HANDLE:
return NULLABLE_HANDLE
if self == DCPIPE:
return NULLABLE_DCPIPE
if self == DPPIPE:
return NULLABLE_DPPIPE
if self == MSGPIPE:
return NULLABLE_MSGPIPE
if self == SHAREDBUFFER:
return NULLABLE_SHAREDBUFFER
nullable_kind = type(self)()
nullable_kind.shared_definition = self.shared_definition
if self.spec is not None:
nullable_kind.spec = '?' + self.spec
nullable_kind.is_nullable = True
return nullable_kind
@classmethod
def AddSharedProperty(cls, name):
"""Adds a property |name| to |cls|, which accesses the corresponding item in
|shared_definition|.
The reason of adding such indirection is to enable sharing definition
between a reference kind and its nullable variation. For example:
a = Struct('test_struct_1')
b = a.MakeNullableKind()
a.name = 'test_struct_2'
print b.name # Outputs 'test_struct_2'.
"""
def Get(self):
return self.shared_definition[name]
def Set(self, value):
self.shared_definition[name] = value
setattr(cls, name, property(Get, Set))
# Initialize the set of primitive types. These can be accessed by clients.
BOOL = Kind('b')
INT8 = Kind('i8')
INT16 = Kind('i16')
INT32 = Kind('i32')
INT64 = Kind('i64')
UINT8 = Kind('u8')
UINT16 = Kind('u16')
UINT32 = Kind('u32')
UINT64 = Kind('u64')
FLOAT = Kind('f')
DOUBLE = Kind('d')
STRING = ReferenceKind('s')
HANDLE = ReferenceKind('h')
DCPIPE = ReferenceKind('h:d:c')
DPPIPE = ReferenceKind('h:d:p')
MSGPIPE = ReferenceKind('h:m')
SHAREDBUFFER = ReferenceKind('h:s')
NULLABLE_STRING = ReferenceKind('?s', True)
NULLABLE_HANDLE = ReferenceKind('?h', True)
NULLABLE_DCPIPE = ReferenceKind('?h:d:c', True)
NULLABLE_DPPIPE = ReferenceKind('?h:d:p', True)
NULLABLE_MSGPIPE = ReferenceKind('?h:m', True)
NULLABLE_SHAREDBUFFER = ReferenceKind('?h:s', True)
# Collection of all Primitive types
PRIMITIVES = (
BOOL,
INT8,
INT16,
INT32,
INT64,
UINT8,
UINT16,
UINT32,
UINT64,
FLOAT,
DOUBLE,
STRING,
HANDLE,
DCPIPE,
DPPIPE,
MSGPIPE,
SHAREDBUFFER,
NULLABLE_STRING,
NULLABLE_HANDLE,
NULLABLE_DCPIPE,
NULLABLE_DPPIPE,
NULLABLE_MSGPIPE,
NULLABLE_SHAREDBUFFER
)
ATTRIBUTE_MIN_VERSION = 'MinVersion'
class NamedValue(object):
def __init__(self, module, parent_kind, name):
self.module = module
self.namespace = module.namespace
self.parent_kind = parent_kind
self.name = name
self.imported_from = None
def GetSpec(self):
return (self.namespace + '.' +
(self.parent_kind and (self.parent_kind.name + '.') or "") +
self.name)
class BuiltinValue(object):
def __init__(self, value):
self.value = value
class ConstantValue(NamedValue):
def __init__(self, module, parent_kind, constant):
NamedValue.__init__(self, module, parent_kind, constant.name)
self.constant = constant
class EnumValue(NamedValue):
def __init__(self, module, enum, field):
NamedValue.__init__(self, module, enum.parent_kind, field.name)
self.enum = enum
def GetSpec(self):
return (self.namespace + '.' +
(self.parent_kind and (self.parent_kind.name + '.') or "") +
self.enum.name + '.' + self.name)
class Constant(object):
def __init__(self, name=None, kind=None, value=None, parent_kind=None):
self.name = name
self.kind = kind
self.value = value
self.parent_kind = parent_kind
class Field(object):
def __init__(self, name=None, kind=None, ordinal=None, default=None,
attributes=None):
if self.__class__.__name__ == 'Field':
raise Exception()
self.name = name
self.kind = kind
self.ordinal = ordinal
self.default = default
self.attributes = attributes
@property
def min_version(self):
return self.attributes.get(ATTRIBUTE_MIN_VERSION) \
if self.attributes else None
class StructField(Field): pass
class UnionField(Field): pass
class Struct(ReferenceKind):
ReferenceKind.AddSharedProperty('name')
ReferenceKind.AddSharedProperty('module')
ReferenceKind.AddSharedProperty('imported_from')
ReferenceKind.AddSharedProperty('fields')
ReferenceKind.AddSharedProperty('attributes')
def __init__(self, name=None, module=None, attributes=None):
if name is not None:
spec = 'x:' + name
else:
spec = None
ReferenceKind.__init__(self, spec)
self.name = name
self.module = module
self.imported_from = None
self.fields = []
self.attributes = attributes
def AddField(self, name, kind, ordinal=None, default=None, attributes=None):
field = StructField(name, kind, ordinal, default, attributes)
self.fields.append(field)
return field
class Union(ReferenceKind):
ReferenceKind.AddSharedProperty('name')
ReferenceKind.AddSharedProperty('module')
ReferenceKind.AddSharedProperty('imported_from')
ReferenceKind.AddSharedProperty('fields')
ReferenceKind.AddSharedProperty('attributes')
def __init__(self, name=None, module=None, attributes=None):
if name is not None:
spec = 'x:' + name
else:
spec = None
ReferenceKind.__init__(self, spec)
self.name = name
self.module = module
self.imported_from = None
self.fields = []
self.attributes = attributes
def AddField(self, name, kind, ordinal=None, attributes=None):
field = UnionField(name, kind, ordinal, None, attributes)
self.fields.append(field)
return field
class Array(ReferenceKind):
ReferenceKind.AddSharedProperty('kind')
ReferenceKind.AddSharedProperty('length')
def __init__(self, kind=None, length=None):
if kind is not None:
if length is not None:
spec = 'a%d:%s' % (length, kind.spec)
else:
spec = 'a:%s' % kind.spec
ReferenceKind.__init__(self, spec)
else:
ReferenceKind.__init__(self)
self.kind = kind
self.length = length
class Map(ReferenceKind):
ReferenceKind.AddSharedProperty('key_kind')
ReferenceKind.AddSharedProperty('value_kind')
def __init__(self, key_kind=None, value_kind=None):
if (key_kind is not None and value_kind is not None):
ReferenceKind.__init__(self,
'm[' + key_kind.spec + '][' + value_kind.spec +
']')
if IsNullableKind(key_kind):
raise Exception("Nullable kinds cannot be keys in maps.")
if IsStructKind(key_kind):
# TODO(erg): It would sometimes be nice if we could key on struct
# values. However, what happens if the struct has a handle in it? Or
# non-copyable data like an array?
raise Exception("Structs cannot be keys in maps.")
if IsAnyHandleKind(key_kind):
raise Exception("Handles cannot be keys in maps.")
if IsInterfaceKind(key_kind):
raise Exception("Interfaces cannot be keys in maps.")
if IsArrayKind(key_kind):
raise Exception("Arrays cannot be keys in maps.")
else:
ReferenceKind.__init__(self)
self.key_kind = key_kind
self.value_kind = value_kind
class InterfaceRequest(ReferenceKind):
ReferenceKind.AddSharedProperty('kind')
def __init__(self, kind=None):
if kind is not None:
if not isinstance(kind, Interface):
raise Exception(
"Interface request requires %r to be an interface." % kind.spec)
ReferenceKind.__init__(self, 'r:' + kind.spec)
else:
ReferenceKind.__init__(self)
self.kind = kind
class Parameter(object):
def __init__(self, name=None, kind=None, ordinal=None, default=None,
attributes=None):
self.name = name
self.ordinal = ordinal
self.kind = kind
self.default = default
self.attributes = attributes
@property
def min_version(self):
return self.attributes.get(ATTRIBUTE_MIN_VERSION) \
if self.attributes else None
class Method(object):
def __init__(self, interface, name, ordinal=None, attributes=None):
self.interface = interface
self.name = name
self.ordinal = ordinal
self.parameters = []
self.response_parameters = None
self.attributes = attributes
def AddParameter(self, name, kind, ordinal=None, default=None,
attributes=None):
parameter = Parameter(name, kind, ordinal, default, attributes)
self.parameters.append(parameter)
return parameter
def AddResponseParameter(self, name, kind, ordinal=None, default=None,
attributes=None):
if self.response_parameters == None:
self.response_parameters = []
parameter = Parameter(name, kind, ordinal, default, attributes)
self.response_parameters.append(parameter)
return parameter
@property
def min_version(self):
return self.attributes.get(ATTRIBUTE_MIN_VERSION) \
if self.attributes else None
class Interface(ReferenceKind):
ReferenceKind.AddSharedProperty('module')
ReferenceKind.AddSharedProperty('name')
ReferenceKind.AddSharedProperty('imported_from')
ReferenceKind.AddSharedProperty('methods')
ReferenceKind.AddSharedProperty('attributes')
def __init__(self, name=None, module=None, attributes=None):
if name is not None:
spec = 'x:' + name
else:
spec = None
ReferenceKind.__init__(self, spec)
self.module = module
self.name = name
self.imported_from = None
self.methods = []
self.attributes = attributes
def AddMethod(self, name, ordinal=None, attributes=None):
method = Method(self, name, ordinal, attributes)
self.methods.append(method)
return method
# TODO(451323): Remove when the language backends no longer rely on this.
@property
def client(self):
return None
class EnumField(object):
def __init__(self, name=None, value=None, attributes=None):
self.name = name
self.value = value
self.attributes = attributes
@property
def min_version(self):
return self.attributes.get(ATTRIBUTE_MIN_VERSION) \
if self.attributes else None
class Enum(Kind):
def __init__(self, name=None, module=None, attributes=None):
self.module = module
self.name = name
self.imported_from = None
if name is not None:
spec = 'x:' + name
else:
spec = None
Kind.__init__(self, spec)
self.fields = []
self.attributes = attributes
class Module(object):
def __init__(self, name=None, namespace=None, attributes=None):
self.name = name
self.path = name
self.namespace = namespace
self.structs = []
self.unions = []
self.interfaces = []
self.kinds = {}
self.attributes = attributes
def AddInterface(self, name, attributes=None):
interface = Interface(name, self, attributes)
self.interfaces.append(interface)
return interface
def AddStruct(self, name, attributes=None):
struct = Struct(name, self, attributes)
self.structs.append(struct)
return struct
def AddUnion(self, name, attributes=None):
union = Union(name, self, attributes)
self.unions.append(union)
return union
def IsBoolKind(kind):
return kind.spec == BOOL.spec
def IsFloatKind(kind):
return kind.spec == FLOAT.spec
def IsIntegralKind(kind):
return (kind.spec == BOOL.spec or
kind.spec == INT8.spec or
kind.spec == INT16.spec or
kind.spec == INT32.spec or
kind.spec == INT64.spec or
kind.spec == UINT8.spec or
kind.spec == UINT16.spec or
kind.spec == UINT32.spec or
kind.spec == UINT64.spec)
def IsStringKind(kind):
return kind.spec == STRING.spec or kind.spec == NULLABLE_STRING.spec
def IsGenericHandleKind(kind):
return kind.spec == HANDLE.spec or kind.spec == NULLABLE_HANDLE.spec
def IsDataPipeConsumerKind(kind):
return kind.spec == DCPIPE.spec or kind.spec == NULLABLE_DCPIPE.spec
def IsDataPipeProducerKind(kind):
return kind.spec == DPPIPE.spec or kind.spec == NULLABLE_DPPIPE.spec
def IsMessagePipeKind(kind):
return kind.spec == MSGPIPE.spec or kind.spec == NULLABLE_MSGPIPE.spec
def IsSharedBufferKind(kind):
return (kind.spec == SHAREDBUFFER.spec or
kind.spec == NULLABLE_SHAREDBUFFER.spec)
def IsStructKind(kind):
return isinstance(kind, Struct)
def IsUnionKind(kind):
return isinstance(kind, Union)
def IsArrayKind(kind):
return isinstance(kind, Array)
def IsInterfaceKind(kind):
return isinstance(kind, Interface)
def IsInterfaceRequestKind(kind):
return isinstance(kind, InterfaceRequest)
def IsEnumKind(kind):
return isinstance(kind, Enum)
def IsReferenceKind(kind):
return isinstance(kind, ReferenceKind)
def IsNullableKind(kind):
return IsReferenceKind(kind) and kind.is_nullable
def IsMapKind(kind):
return isinstance(kind, Map)
def IsObjectKind(kind):
return IsPointerKind(kind) or IsUnionKind(kind)
def IsPointerKind(kind):
return (IsStructKind(kind) or IsArrayKind(kind) or IsStringKind(kind) or
IsMapKind(kind))
# Please note that interface is not considered as handle kind, since it is an
# aggregate type consisting of a handle and a version number.
def IsAnyHandleKind(kind):
return (IsGenericHandleKind(kind) or
IsDataPipeConsumerKind(kind) or
IsDataPipeProducerKind(kind) or
IsMessagePipeKind(kind) or
IsSharedBufferKind(kind) or
IsInterfaceRequestKind(kind))
def IsMoveOnlyKind(kind):
return (not IsStringKind(kind) and IsObjectKind(kind)) or \
IsAnyHandleKind(kind) or IsInterfaceKind(kind)
def IsCloneableKind(kind):
def ContainsHandles(kind, visited_kinds):
if kind in visited_kinds:
# No need to examine the kind again.
return False
visited_kinds.add(kind)
if IsAnyHandleKind(kind) or IsInterfaceKind(kind):
return True
if IsArrayKind(kind):
return ContainsHandles(kind.kind, visited_kinds)
if IsStructKind(kind) or IsUnionKind(kind):
for field in kind.fields:
if ContainsHandles(field.kind, visited_kinds):
return True
if IsMapKind(kind):
# No need to examine the key kind, only primitive kinds and non-nullable
# string are allowed to be key kinds.
return ContainsHandles(kind.value_kind, visited_kinds)
return False
return not ContainsHandles(kind, set())
def HasCallbacks(interface):
for method in interface.methods:
if method.response_parameters != None:
return True
return False
| |
import json
import logging
import csv
import pkg_resources
from oic.extension.token import JWTToken
from oic.utils.authn.authn_context import AuthnBroker
from oic.utils.authn.client import verify_client
from oic.utils.authz import AuthzHandling
from oic.utils.keyio import keyjar_init
from oic.utils.sdb import SessionDB
from oic.utils.userinfo import UserInfo
from otest.events import Events
from otest.rp.provider import Provider
logger = logging.getLogger(__name__)
__author__ = 'roland'
def read_uri_schemes(filename):
csvfile = open(filename, 'r')
l = csvfile.readline()
l = l.strip()
fieldnames = l.split(',')
reader = csv.DictReader(csvfile, fieldnames)
return dict(
[(r['URI Scheme'], '{} {}'.format(r['Description'], r['Reference'])) for
r in reader])
def read_path2port_map(filename):
"""
Reads csv file containing two columns: column1 is path name,
column2 is port number
:param filename:
:return: dictionary with port as key and path as value
"""
res = {}
with open(filename, 'r') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
res[row[1]] = row[0]
return res
def as_arg_setup(args, lookup, config):
if args.port:
_port = args.port
else:
if args.tls:
_port = 443
else:
_port = 80
if args.path2port:
# means there is a reverse proxy in front translating
# path -> port
p2p_map = read_path2port_map(args.path2port)
_path = p2p_map[_port]
if args.xport:
_issuer = "{base}:{port}/{path}".format(base=config.baseurl,
port=args.xport,
path=_path)
_port = args.xport
else:
_issuer = "{base}/{path}".format(base=config.baseurl, path=_path)
else: # the old port based
_path = ''
_issuer = "{base}:{port}".format(base=config.baseurl, port=_port)
if args.tls and _issuer.startswith('http://'):
_issuer = _issuer.replace('http://', 'https://')
cdb = {}
ac = AuthnBroker()
for authkey, value in list(config.AUTHENTICATION.items()):
authn = None
# if "UserPassword" == authkey:
# from oic.utils.authn.user import UsernamePasswordMako
# authn = UsernamePasswordMako(None, "login.mako", LOOKUP, PASSWD,
# "authorization")
if "NoAuthn" == authkey:
from oic.utils.authn.user import NoAuthn
authn = NoAuthn(None, user=config.AUTHENTICATION[authkey]["user"])
if authn is not None:
ac.add(config.AUTHENTICATION[authkey]["ACR"], authn,
config.AUTHENTICATION[authkey]["WEIGHT"])
# dealing with authorization
authz = AuthzHandling()
if config.USERINFO == "SIMPLE":
# User info is a simple dictionary in this case statically defined in
# the configuration file
userinfo = UserInfo(config.USERDB)
else:
userinfo = None
as_args = {
"name": _issuer,
'instance_path': _path,
'instance_port': _port,
"cdb": cdb,
"authn_broker": ac,
"userinfo": userinfo,
"authz": authz,
"client_authn": verify_client,
"symkey": config.SYM_KEY,
"template_lookup": lookup,
"template": {"form_post": "form_response.mako"},
"jwks_name": "./static/jwks_{}.json",
'event_db': Events(),
}
try:
as_args['behavior'] = config.BEHAVIOR
except AttributeError:
pass
com_args = {
"baseurl": config.baseurl,
}
for arg in ['name', 'cdb', 'authn_broker', 'userinfo', 'authz', 'template',
'jwks_name', 'client_authn', 'symkey', 'template_lookup']:
com_args[arg] = as_args[arg]
# Add own keys for signing/encrypting JWTs
try:
# a throw-away OP used to do the initial key setup
_op = Provider(sdb=SessionDB(com_args["baseurl"]), **com_args)
jwks = keyjar_init(_op, config.keys)
except KeyError:
key_arg = {}
else:
key_arg = {"jwks": jwks, "keys": config.keys}
as_args['jwks_name'] = 'static/jwks.json'
f = open('static/jwks.json', 'w')
f.write(json.dumps(jwks))
f.close()
if args.insecure:
_op.keyjar.verify_ssl = False
else:
_op.keyjar.verify_ssl = True
as_args['keyjar'] = _op.keyjar
as_args['sdb'] = SessionDB(
com_args["baseurl"],
token_factory=JWTToken('T', keyjar=_op.keyjar,
lt_pattern={'code': 3600, 'token': 900},
iss=com_args['baseurl'],
sign_alg='RS256'),
refresh_token_factory=JWTToken(
'R', keyjar=_op.keyjar, lt_pattern={'': 24 * 3600},
iss=com_args['baseurl'])
)
return as_args, key_arg
def main_setup(args, lookup, config):
config.issuer = config.issuer % args.port
config.SERVICE_URL = config.SERVICE_URL % args.port
as_args, key_arg = as_arg_setup(args, lookup, config)
kwargs = {
"template_lookup": lookup,
"template": {"form_post": "form_response.mako"},
}
# Should I care about verifying the certificates used by other entities
if args.insecure:
kwargs["verify_ssl"] = False
else:
kwargs["verify_ssl"] = True
op_arg = key_arg
try:
op_arg["cookie_ttl"] = config.COOKIETTL
except AttributeError:
pass
try:
op_arg["cookie_name"] = config.COOKIENAME
except AttributeError:
pass
# print URLS
if args.debug:
op_arg["debug"] = True
# # All endpoints the OpenID Connect Provider should answer on
# add_endpoints(ENDPOINTS)
# op_arg["endpoints"] = ENDPOINTS
if args.port == 80:
_baseurl = config.baseurl
else:
if config.baseurl.endswith("/"):
config.baseurl = config.baseurl[:-1]
_baseurl = "%s:%d" % (config.baseurl, args.port)
if not _baseurl.endswith("/"):
_baseurl += "/"
op_arg["baseurl"] = _baseurl
logger.info('setup kwargs: {}'.format(kwargs))
try:
op_arg["marg"] = multi_keys(as_args, config.multi_keys)
except AttributeError as err:
pass
op_arg['uri_schemes'] = read_uri_schemes(
pkg_resources.resource_filename('otest', 'uri-schemes-1.csv'))
if args.op_profiles:
profiles = {}
for p in args.op_profiles:
profiles.update(json.loads(open(p).read()))
else:
profiles = {}
op_arg['profiles'] = profiles
logger.info("setup as_args: {}".format(as_args))
logger.info(" -- op_arg: {}".format(op_arg))
return as_args, op_arg, config
def multi_keys(as_args, key_conf):
# a throw-away OP used to do the initial key setup
_op = Provider(**as_args)
jwks = keyjar_init(_op, key_conf, "m%d")
return {"jwks": jwks, "keys": key_conf}
| |
import sys
import threading
import weakref
from django.utils.six.moves import range
if sys.version_info < (3, 4):
from .weakref_backports import WeakMethod
else:
from weakref import WeakMethod
def _make_id(target):
if hasattr(target, '__func__'):
return (id(target.__self__), id(target.__func__))
return id(target)
NONE_ID = _make_id(None)
# A marker for caching
NO_RECEIVERS = object()
class Signal(object):
"""
Base class for all signals
Internal attributes:
receivers
{ receiverkey (id) : weakref(receiver) }
"""
def __init__(self, providing_args=None, use_caching=False):
"""
Create a new signal.
providing_args
A list of the arguments this signal can pass along in a send() call.
"""
self.receivers = []
if providing_args is None:
providing_args = []
self.providing_args = set(providing_args)
self.lock = threading.Lock()
self.use_caching = use_caching
# For convenience we create empty caches even if they are not used.
# A note about caching: if use_caching is defined, then for each
# distinct sender we cache the receivers that sender has in
# 'sender_receivers_cache'. The cache is cleaned when .connect() or
# .disconnect() is called and populated on send().
self.sender_receivers_cache = weakref.WeakKeyDictionary() if use_caching else {}
self._dead_receivers = False
def connect(self, receiver, sender=None, weak=True, dispatch_uid=None):
"""
Connect receiver to sender for signal.
Arguments:
receiver
A function or an instance method which is to receive signals.
Receivers must be hashable objects.
If weak is True, then receiver must be weak referenceable.
Receivers must be able to accept keyword arguments.
If receivers have a dispatch_uid attribute, the receiver will
not be added if another receiver already exists with that
dispatch_uid.
sender
The sender to which the receiver should respond. Must either be
of type Signal, or None to receive events from any sender.
weak
Whether to use weak references to the receiver. By default, the
module will attempt to use weak references to the receiver
objects. If this parameter is false, then strong references will
be used.
dispatch_uid
An identifier used to uniquely identify a particular instance of
a receiver. This will usually be a string, though it may be
anything hashable.
"""
from django.conf import settings
# If DEBUG is on, check that we got a good receiver
if settings.configured and settings.DEBUG:
import inspect
assert callable(receiver), "Signal receivers must be callable."
# Check for **kwargs
# Not all callables are inspectable with getargspec, so we'll
# try a couple different ways but in the end fall back on assuming
# it is -- we don't want to prevent registration of valid but weird
# callables.
try:
argspec = inspect.getargspec(receiver)
except TypeError:
try:
argspec = inspect.getargspec(receiver.__call__)
except (TypeError, AttributeError):
argspec = None
if argspec:
assert argspec[2] is not None, \
"Signal receivers must accept keyword arguments (**kwargs)."
if dispatch_uid:
lookup_key = (dispatch_uid, _make_id(sender))
else:
lookup_key = (_make_id(receiver), _make_id(sender))
if weak:
ref = weakref.ref
receiver_object = receiver
# Check for bound methods
if hasattr(receiver, '__self__') and hasattr(receiver, '__func__'):
ref = WeakMethod
receiver_object = receiver.__self__
if sys.version_info >= (3, 4):
receiver = ref(receiver)
weakref.finalize(receiver_object, self._remove_receiver)
else:
receiver = ref(receiver, self._remove_receiver)
with self.lock:
self._clear_dead_receivers()
for r_key, _ in self.receivers:
if r_key == lookup_key:
break
else:
self.receivers.append((lookup_key, receiver))
self.sender_receivers_cache.clear()
def disconnect(self, receiver=None, sender=None, weak=True, dispatch_uid=None):
"""
Disconnect receiver from sender for signal.
If weak references are used, disconnect need not be called. The receiver
will be remove from dispatch automatically.
Arguments:
receiver
The registered receiver to disconnect. May be none if
dispatch_uid is specified.
sender
The registered sender to disconnect
weak
The weakref state to disconnect
dispatch_uid
the unique identifier of the receiver to disconnect
"""
if dispatch_uid:
lookup_key = (dispatch_uid, _make_id(sender))
else:
lookup_key = (_make_id(receiver), _make_id(sender))
with self.lock:
self._clear_dead_receivers()
for index in range(len(self.receivers)):
(r_key, _) = self.receivers[index]
if r_key == lookup_key:
del self.receivers[index]
break
self.sender_receivers_cache.clear()
def has_listeners(self, sender=None):
return bool(self._live_receivers(sender))
def send(self, sender, **named):
"""
Send signal from sender to all connected receivers.
If any receiver raises an error, the error propagates back through send,
terminating the dispatch loop, so it is quite possible to not have all
receivers called if a raises an error.
Arguments:
sender
The sender of the signal Either a specific object or None.
named
Named arguments which will be passed to receivers.
Returns a list of tuple pairs [(receiver, response), ... ].
"""
responses = []
if not self.receivers or self.sender_receivers_cache.get(sender) is NO_RECEIVERS:
return responses
for receiver in self._live_receivers(sender):
response = receiver(signal=self, sender=sender, **named)
responses.append((receiver, response))
return responses
def send_robust(self, sender, **named):
"""
Send signal from sender to all connected receivers catching errors.
Arguments:
sender
The sender of the signal. Can be any python object (normally one
registered with a connect if you actually want something to
occur).
named
Named arguments which will be passed to receivers. These
arguments must be a subset of the argument names defined in
providing_args.
Return a list of tuple pairs [(receiver, response), ... ]. May raise
DispatcherKeyError.
If any receiver raises an error (specifically any subclass of
Exception), the error instance is returned as the result for that
receiver. The traceback is always attached to the error at
``__traceback__``.
"""
responses = []
if not self.receivers or self.sender_receivers_cache.get(sender) is NO_RECEIVERS:
return responses
# Call each receiver with whatever arguments it can accept.
# Return a list of tuple pairs [(receiver, response), ... ].
for receiver in self._live_receivers(sender):
try:
response = receiver(signal=self, sender=sender, **named)
except Exception as err:
if not hasattr(err, '__traceback__'):
err.__traceback__ = sys.exc_info()[2]
responses.append((receiver, err))
else:
responses.append((receiver, response))
return responses
def _clear_dead_receivers(self):
# Note: caller is assumed to hold self.lock.
if self._dead_receivers:
self._dead_receivers = False
new_receivers = []
for r in self.receivers:
if isinstance(r[1], weakref.ReferenceType) and r[1]() is None:
continue
new_receivers.append(r)
self.receivers = new_receivers
def _live_receivers(self, sender):
"""
Filter sequence of receivers to get resolved, live receivers.
This checks for weak references and resolves them, then returning only
live receivers.
"""
receivers = None
if self.use_caching and not self._dead_receivers:
receivers = self.sender_receivers_cache.get(sender)
# We could end up here with NO_RECEIVERS even if we do check this case in
# .send() prior to calling _live_receivers() due to concurrent .send() call.
if receivers is NO_RECEIVERS:
return []
if receivers is None:
with self.lock:
self._clear_dead_receivers()
senderkey = _make_id(sender)
receivers = []
for (receiverkey, r_senderkey), receiver in self.receivers:
if r_senderkey == NONE_ID or r_senderkey == senderkey:
receivers.append(receiver)
if self.use_caching:
if not receivers:
self.sender_receivers_cache[sender] = NO_RECEIVERS
else:
# Note, we must cache the weakref versions.
self.sender_receivers_cache[sender] = receivers
non_weak_receivers = []
for receiver in receivers:
if isinstance(receiver, weakref.ReferenceType):
# Dereference the weak reference.
receiver = receiver()
if receiver is not None:
non_weak_receivers.append(receiver)
else:
non_weak_receivers.append(receiver)
return non_weak_receivers
def _remove_receiver(self, receiver=None):
# Mark that the self.receivers list has dead weakrefs. If so, we will
# clean those up in connect, disconnect and _live_receivers while
# holding self.lock. Note that doing the cleanup here isn't a good
# idea, _remove_receiver() will be called as side effect of garbage
# collection, and so the call can happen while we are already holding
# self.lock.
self._dead_receivers = True
def receiver(signal, **kwargs):
"""
A decorator for connecting receivers to signals. Used by passing in the
signal (or list of signals) and keyword arguments to connect::
@receiver(post_save, sender=MyModel)
def signal_receiver(sender, **kwargs):
...
@receiver([post_save, post_delete], sender=MyModel)
def signals_receiver(sender, **kwargs):
...
"""
def _decorator(func):
if isinstance(signal, (list, tuple)):
for s in signal:
s.connect(func, **kwargs)
else:
signal.connect(func, **kwargs)
return func
return _decorator
| |
import itertools
import pylab
import pandas as pd
import numpy as np
import scipy.stats as stats
import math
__doc__="""
>> import rpkmZ
>>> import pandas as pd
>>> geneExpression = pd.read_table('/nas3/lovci/projects/FOX_1and2/mouse_brain/rpkms/table', header=0, index_col=0)
>>> geneExpression.columns = map(lambda x: x[:6] + x[-5:], geneExpression.columns)
>>> geneExpression = geneExpression[np.all(geneExpression > 0.5, axis=1)]
>>> import numpy as np
>>> geneExpression = geneExpression[np.all(geneExpression > 0.5, axis=1)]
>>> samples = ('FOX1WT.rpkm', 'FOX1KO.rpkm')
>>> FOX1comparer= rpkmZ.TwoWayGeneComparison_local(geneExpression[samples[0]], geneExpression[samples[1]], list(geneExpression.index), sampleNames=samples)
>>> FOX1comparer.plot()
>>> pylab.show()
"""
def benjamini_hochberg(pValues, FDR=0.1):
""" benjamini-hochberg correction for MHT
pValues is a list of pValues
FDR is the desired false-discovery rate
from: http://udel.edu/~mcdonald/statmultcomp.html
"One good technique for controlling the false discovery rate was briefly
mentioned by Simes (1986) and developed in detail by Benjamini and Hochberg (1995).
Put the individual P-values in order, from smallest to largest. The smallest
P-value has a rank of i=1, the next has i=2, etc. Then compare each individual
P-value to (i/m)Q, where m is the total number of tests and Q is the chosen false
discovery rate. The largest P-value that has P<(i/m)Q is significant,
and all P-values smaller than it are also significant."
"""
ranks = np.argsort(np.argsort(pValues))
nComps = len(pValues) + 0.0
pSorter = np.argsort(pValues)
pRank = np.argsort(np.argsort(pValues))+1
BHcalc = (pRank / nComps) * FDR
sigs = np.ndarray(shape=(nComps, ), dtype='bool')
issig = True
for (p, b, r) in itertools.izip(pValues[pSorter], BHcalc[pSorter], pSorter):
if p > b:
issig = False
sigs[r] = issig
return sigs
class Colors(object):
import numpy as np
import matplotlib.colors as clrs
import matplotlib.cm as cmx
Set1 = cm = pylab.get_cmap('Set1')
cNorm = clrs.Normalize(vmin=min(range(25)), vmax=max(range(25))) #8 colors
set1ScalarMap = cmx.ScalarMappable(norm=cNorm, cmap=Set1)
redColor = set1ScalarMap.to_rgba(0)
blueColor = set1ScalarMap.to_rgba(3)
greenColor = set1ScalarMap.to_rgba(6)
purpleColor = set1ScalarMap.to_rgba(9)
orangeColor = set1ScalarMap.to_rgba(12)
yellowColor = set1ScalarMap.to_rgba(15)
brownColor = set1ScalarMap.to_rgba(18)
pinkColor = set1ScalarMap.to_rgba(21)
greyColor = set1ScalarMap.to_rgba(25)
#get nice colors
def __init__(self):
pass
def plot(self):
for i in np.arange(0, 25, 3):
pylab.plot((i, i+1),(0,i+2), color=self.set1ScalarMap.to_rgba(i), linewidth=3)
class TwoWayGeneComparison(object):
def __init__(self, genes1, genes2, labels, pCut = 0.001, sampleNames = ("Sample1", "Sample2")):
""" Run a two-sample RPKM experiment. Give control sample first, it will go on the x-axis """
import numpy as np
import scipy.stats as stats
assert len(genes1) == len(genes2) == len(labels)
self.sampleNames = sampleNames
self.genes1 = genes1
self.genes2 = genes2
self.pCut = pCut
self.upGenes = set()
self.dnGenes = set()
self.expressedGenes = set([labels[i] for i, t in enumerate(np.all(np.c_[genes1, genes2] > 1, axis=1)) if t])
self.log2Ratio = np.log2(genes2 / genes1)
self.meanLog2Ratio = np.mean(self.log2Ratio)
self.stdLog2Ratio = np.std(self.log2Ratio)
self.zScores = stats.norm.pdf(self.log2Ratio, self.meanLog2Ratio, self.stdLog2Ratio, axis=1)
for (label, zScore, r) in itertools.izip(labels, self.zScores, self.log2Ratio):
if zScore < pCut:
if r > 0:
self.upGenes.add(label)
elif r < 0:
self.dnGenes.add(label)
else:
raise ValueError
def plot(self):
f = pylab.figure(figsize=(8,4))
co = [] #colors container
for zScore, r in itertools.izip(self.zScores, self.log2Ratio):
if zScore < self.pCut:
if r > 0:
co.append(Colors().greenColor)
elif r < 0:
co.append(Colors().redColor)
else:
raise Exception
else:
co.append(Colors().blueColor)
#print "Probability this is from a normal distribution: %.3e" %stats.normaltest(self.log2Ratio)[1]
ax = f.add_subplot(121)
pylab.axvline(self.meanLog2Ratio, color=Colors().redColor)
pylab.axvspan(self.meanLog2Ratio-(2*self.stdLog2Ratio),
self.meanLog2Ratio+(2*self.stdLog2Ratio), color=Colors().blueColor, alpha=0.2)
his = pylab.hist(self.log2Ratio, bins=50, color=Colors().blueColor)
pylab.xlabel("log2 Ratio %s/%s" %(self.sampleNames[1], self.sampleNames[0]))
pylab.ylabel("Frequency")
ax = f.add_subplot(122, aspect='equal')
pylab.scatter(self.genes1, self.genes2, c=co, alpha=0.5)
pylab.ylabel("%s RPKM" %self.sampleNames[1])
pylab.xlabel("%s RPKM" %self.sampleNames[0])
pylab.yscale('log')
pylab.xscale('log')
pylab.tight_layout()
def gstats(self):
print "I used a p-value cutoff of %e" %self.pCut
print "There are", len(self.upGenes), "up-regulated genes in %s vs %s" %(self.sampleNames[1], self.sampleNames[0])
print "There are", len(self.dnGenes), "down-regulated genes in %s vs %s" %(self.sampleNames[1], self.sampleNames[0])
print "There are", len(self.expressedGenes), "expressed genes in both %s and %s" %self.sampleNames
class TwoWayGeneComparison_local(object):
def __init__(self, genes1, genes2, pCut = 0.001, local_fraction = 0.1, bonferroni = True, FDR=None):
""" Run a two-sample RPKM experiment. Give control sample first, it will go on the x-axis
genes1 and genes2 are pandas Series with identical indices
pCut - P value cutoff
local_fraction - by default the closest 10% of genes are used for local z-score calculation
bonferroni - p-values are adjusted for MHT with bonferroni correction
BH - benjamini-hochberg FDR filtering
"""
sampleNames = (genes1.name, genes2.name)
self.sampleNames = sampleNames
genes1 = genes1.replace(0, np.nan).dropna()
genes2 = genes2.replace(0, np.nan).dropna()
labels = genes1.index.intersection(genes2.index)
genes1 = genes1.ix[labels]
genes2 = genes2.ix[labels]
self.genes1 = genes1
self.genes2 = genes2
self.nGenes = len(labels)
if bonferroni:
correction = self.nGenes
else:
correction = 1
localCount = int(math.ceil(self.nGenes * local_fraction))
self.pCut = pCut
self.upGenes = set()
self.dnGenes = set()
self.expressedGenes = set([labels[i] for i, t in enumerate(np.any(np.c_[genes1, genes2] > 1, axis=1)) if t])
self.log2Ratio = np.log2(genes2 / genes1)
self.average_expression = (genes2 + genes1)/2.
self.ranks = np.argsort(np.argsort(self.average_expression))
self.pValues = pd.Series(index = labels)
self.localMean = pd.Series(index = labels)
self.localStd = pd.Series(index = labels)
self.localZ = pd.Series(index = labels)
for g, r in itertools.izip(self.ranks.index, self.ranks):
if r < localCount:
start = 0
stop = localCount
elif r > self.nGenes - localCount:
start = self.nGenes - localCount
stop = self.nGenes
else:
start = r - int(math.floor(localCount/2.))
stop = r + int(math.ceil(localCount/2.))
localGenes = self.ranks[self.ranks.between(start, stop)].index
self.localMean.ix[g] = np.mean(self.log2Ratio.ix[localGenes])
self.localStd.ix[g] = np.std(self.log2Ratio.ix[localGenes])
self.pValues.ix[g] = stats.norm.pdf(self.log2Ratio.ix[g],
self.localMean.ix[g],
self.localStd.ix[g]) * correction
self.localZ.ix[g] = (self.log2Ratio.ix[g]- self.localMean.ix[g])/self.localStd.ix[g]
data = pd.DataFrame(index = labels)
data["rank"] = self.ranks
data["log2Ratio"] = self.log2Ratio
data["localMean"] = self.localMean
data["localStd"] = self.localStd
data["pValue"] = self.pValues
if FDR == None:
data["isSig"] = self.pValues < pCut
else:
data["isSig"] = benjamini_hochberg(self.pValues, FDR=FDR)
data["meanExpression"] = self.average_expression
data["localZ"] = self.localZ
data[sampleNames[0]] = genes1
data[sampleNames[1]] = genes2
self.data = data
for label, (pVal, logratio, isSig) in data.get(["pValue", "log2Ratio", "isSig"]).iterrows():
if (pVal < pCut) and isSig:
if logratio > 0:
self.upGenes.add(label)
elif logratio < 0:
self.dnGenes.add(label)
else:
raise ValueError
def plot(self, ax=None):
co = [] #colors container
for label, (pVal, logratio, isSig) in self.data.get(["pValue", "log2Ratio", "isSig"]).iterrows():
if (pVal < self.pCut) and isSig:
if logratio > 0:
co.append(Colors().redColor)
elif logratio < 0:
co.append(Colors().greenColor)
else:
raise Exception
else:
co.append(Colors().blueColor)
#print "Probability this is from a normal distribution: %.3e" %stats.normaltest(self.log2Ratio)[1]
#ax = f.add_subplot(121)
#pylab.axvline(self.meanLog2Ratio, color=Colors().redColor)
#pylab.axvspan(self.meanLog2Ratio-(2*self.stdLog2Ratio),
# self.meanLog2Ratio+(2*self.stdLog2Ratio), color=Colors().blueColor, alpha=0.2)
#his = pylab.hist(self.log2Ratio, bins=50, color=Colors().blueColor)
#pylab.xlabel("log2 Ratio %s/%s" %(self.sampleNames[1], self.sampleNames[0]))
#pylab.ylabel("Frequency")
if ax == None:
ax = pylab.gca()
ax.set_aspect('equal')
minVal=np.min(np.c_[self.genes1, self.genes2])
ax.scatter(self.genes1, self.genes2, c=co, alpha=0.7, edgecolor='none')
ax.set_ylabel("%s RPKM" %self.sampleNames[1])
ax.set_xlabel("%s RPKM" %self.sampleNames[0])
ax.set_yscale('log')
ax.set_xscale('log')
ax.set_xlim(xmin=max(minVal, 0.1))
ax.set_ylim(ymin=max(minVal, 0.1))
if ax == None:
pylab.tight_layout()
def gstats(self):
print "I used a p-value cutoff of %e" %self.pCut
print "There are", len(self.upGenes), "up-regulated genes in %s vs %s" %(self.sampleNames[1], self.sampleNames[0])
print "There are", len(self.dnGenes), "down-regulated genes in %s vs %s" %(self.sampleNames[1], self.sampleNames[0])
print "There are", len(self.expressedGenes), "expressed genes in both %s and %s" %self.sampleNames
| |
r"""OS routines for Mac, DOS, NT, or Posix depending on what system we're on.
This exports:
- all functions from posix, nt, dos, os2, mac, or ce, e.g. unlink, stat, etc.
- os.path is one of the modules posixpath, ntpath, macpath, or dospath
- os.name is 'posix', 'nt', 'dos', 'os2', 'mac', 'ce' or 'riscos'
- os.curdir is a string representing the current directory ('.' or ':')
- os.pardir is a string representing the parent directory ('..' or '::')
- os.sep is the (or a most common) pathname separator ('/' or ':' or '\\')
- os.altsep is the alternate pathname separator (None or '/')
- os.pathsep is the component separator used in $PATH etc
- os.linesep is the line separator in text files ('\r' or '\n' or '\r\n')
- os.defpath is the default search path for executables
Programs that import and use 'os' stand a better chance of being
portable between different platforms. Of course, they must then
only use functions that are defined by all platforms (e.g., unlink
and opendir), and leave all pathname manipulation to os.path
(e.g., split and join).
"""
#'
import sys
_names = sys.builtin_module_names
altsep = None
__all__ = ["altsep", "curdir", "pardir", "sep", "pathsep", "linesep",
"defpath", "name"]
def _get_exports_list(module):
try:
return list(module.__all__)
except AttributeError:
return [n for n in dir(module) if n[0] != '_']
if 'posix' in _names:
name = 'posix'
linesep = '\n'
curdir = '.'; pardir = '..'; sep = '/'; pathsep = ':'
defpath = ':/bin:/usr/bin'
from posix import *
try:
from posix import _exit
except ImportError:
pass
import posixpath
path = posixpath
del posixpath
import posix
__all__.extend(_get_exports_list(posix))
del posix
elif 'nt' in _names:
name = 'nt'
linesep = '\r\n'
curdir = '.'; pardir = '..'; sep = '\\'; pathsep = ';'
defpath = '.;C:\\bin'
from nt import *
for i in ['_exit']:
try:
exec "from nt import " + i
except ImportError:
pass
import ntpath
path = ntpath
del ntpath
import nt
__all__.extend(_get_exports_list(nt))
del nt
elif 'dos' in _names:
name = 'dos'
linesep = '\r\n'
curdir = '.'; pardir = '..'; sep = '\\'; pathsep = ';'
defpath = '.;C:\\bin'
from dos import *
try:
from dos import _exit
except ImportError:
pass
import dospath
path = dospath
del dospath
import dos
__all__.extend(_get_exports_list(dos))
del dos
elif 'os2' in _names:
name = 'os2'
linesep = '\r\n'
curdir = '.'; pardir = '..'; sep = '\\'; pathsep = ';'
defpath = '.;C:\\bin'
from os2 import *
try:
from os2 import _exit
except ImportError:
pass
import ntpath
path = ntpath
del ntpath
import os2
__all__.extend(_get_exports_list(os2))
del os2
elif 'mac' in _names:
name = 'mac'
linesep = '\r'
curdir = ':'; pardir = '::'; sep = ':'; pathsep = '\n'
defpath = ':'
from mac import *
try:
from mac import _exit
except ImportError:
pass
import macpath
path = macpath
del macpath
import mac
__all__.extend(_get_exports_list(mac))
del mac
elif 'ce' in _names:
name = 'ce'
linesep = '\r\n'
curdir = '.'; pardir = '..'; sep = '\\'; pathsep = ';'
defpath = '\\Windows'
from ce import *
for i in ['_exit']:
try:
exec "from ce import " + i
except ImportError:
pass
# We can use the standard Windows path.
import ntpath
path = ntpath
del ntpath
import ce
__all__.extend(_get_exports_list(ce))
del ce
elif 'riscos' in _names:
name = 'riscos'
linesep = '\n'
curdir = '@'; pardir = '^'; sep = '.'; pathsep = ','
defpath = '<Run$Dir>'
from riscos import *
try:
from riscos import _exit
except ImportError:
pass
import riscospath
path = riscospath
del riscospath
import riscos
__all__.extend(_get_exports_list(riscos))
del riscos
else:
raise ImportError, 'no os specific module found'
__all__.append("path")
del _names
sys.modules['os.path'] = path
#'
# Super directory utilities.
# (Inspired by Eric Raymond; the doc strings are mostly his)
def makedirs(name, mode=0777):
"""makedirs(path [, mode=0777]) -> None
Super-mkdir; create a leaf directory and all intermediate ones.
Works like mkdir, except that any intermediate path segment (not
just the rightmost) will be created if it does not exist. This is
recursive.
"""
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
if head and tail and not path.exists(head):
makedirs(head, mode)
mkdir(name, mode)
def removedirs(name):
"""removedirs(path) -> None
Super-rmdir; remove a leaf directory and empty all intermediate
ones. Works like rmdir except that, if the leaf directory is
successfully removed, directories corresponding to rightmost path
segments will be pruned way until either the whole path is
consumed or an error occurs. Errors during this latter phase are
ignored -- they generally mean that a directory was not empty.
"""
rmdir(name)
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
while head and tail:
try:
rmdir(head)
except error:
break
head, tail = path.split(head)
def renames(old, new):
"""renames(old, new) -> None
Super-rename; create directories as necessary and delete any left
empty. Works like rename, except creation of any intermediate
directories needed to make the new pathname good is attempted
first. After the rename, directories corresponding to rightmost
path segments of the old name will be pruned way until either the
whole path is consumed or a nonempty directory is found.
Note: this function can fail with the new directory structure made
if you lack permissions needed to unlink the leaf directory or
file.
"""
head, tail = path.split(new)
if head and tail and not path.exists(head):
makedirs(head)
rename(old, new)
head, tail = path.split(old)
if head and tail:
try:
removedirs(head)
except error:
pass
__all__.extend(["makedirs", "removedirs", "renames"])
# Make sure os.environ exists, at least
try:
environ
except NameError:
environ = {}
def execl(file, *args):
"""execl(file, *args)
Execute the executable file with argument list args, replacing the
current process. """
execv(file, args)
def execle(file, *args):
"""execle(file, *args, env)
Execute the executable file with argument list args and
environment env, replacing the current process. """
env = args[-1]
execve(file, args[:-1], env)
def execlp(file, *args):
"""execlp(file, *args)
Execute the executable file (which is searched for along $PATH)
with argument list args, replacing the current process. """
execvp(file, args)
def execlpe(file, *args):
"""execlpe(file, *args, env)
Execute the executable file (which is searched for along $PATH)
with argument list args and environment env, replacing the current
process. """
env = args[-1]
execvpe(file, args[:-1], env)
def execvp(file, args):
"""execp(file, args)
Execute the executable file (which is searched for along $PATH)
with argument list args, replacing the current process.
args may be a list or tuple of strings. """
_execvpe(file, args)
def execvpe(file, args, env):
"""execv(file, args, env)
Execute the executable file (which is searched for along $PATH)
with argument list args and environment env , replacing the
current process.
args may be a list or tuple of strings. """
_execvpe(file, args, env)
__all__.extend(["execl","execle","execlp","execlpe","execvp","execvpe"])
_notfound = None
def _execvpe(file, args, env=None):
if env is not None:
func = execve
argrest = (args, env)
else:
func = execv
argrest = (args,)
env = environ
global _notfound
head, tail = path.split(file)
if head:
apply(func, (file,) + argrest)
return
if env.has_key('PATH'):
envpath = env['PATH']
else:
envpath = defpath
PATH = envpath.split(pathsep)
if not _notfound:
if sys.platform[:4] == 'beos':
# Process handling (fork, wait) under BeOS (up to 5.0)
# doesn't interoperate reliably with the thread interlocking
# that happens during an import. The actual error we need
# is the same on BeOS for posix.open() et al., ENOENT.
try: unlink('/_#.# ## #.#')
except error, _notfound: pass
else:
import tempfile
t = tempfile.mktemp()
# Exec a file that is guaranteed not to exist
try: execv(t, ('blah',))
except error, _notfound: pass
exc, arg = error, _notfound
for dir in PATH:
fullname = path.join(dir, file)
try:
apply(func, (fullname,) + argrest)
except error, (errno, msg):
if errno != arg[0]:
exc, arg = error, (errno, msg)
raise exc, arg
# Change environ to automatically call putenv() if it exists
try:
# This will fail if there's no putenv
putenv
except NameError:
pass
else:
import UserDict
if name == "riscos":
# On RISC OS, all env access goes through getenv and putenv
from riscosenviron import _Environ
elif name in ('os2', 'nt', 'dos'): # Where Env Var Names Must Be UPPERCASE
# But we store them as upper case
class _Environ(UserDict.UserDict):
def __init__(self, environ):
UserDict.UserDict.__init__(self)
data = self.data
for k, v in environ.items():
data[k.upper()] = v
def __setitem__(self, key, item):
putenv(key, item)
self.data[key.upper()] = item
def __getitem__(self, key):
return self.data[key.upper()]
def __delitem__(self, key):
del self.data[key.upper()]
def has_key(self, key):
return self.data.has_key(key.upper())
def get(self, key, failobj=None):
return self.data.get(key.upper(), failobj)
def update(self, dict):
for k, v in dict.items():
self[k] = v
else: # Where Env Var Names Can Be Mixed Case
class _Environ(UserDict.UserDict):
def __init__(self, environ):
UserDict.UserDict.__init__(self)
self.data = environ
def __setitem__(self, key, item):
putenv(key, item)
self.data[key] = item
def update(self, dict):
for k, v in dict.items():
self[k] = v
environ = _Environ(environ)
def getenv(key, default=None):
"""Get an environment variable, return None if it doesn't exist.
The optional second argument can specify an alternate default."""
return environ.get(key, default)
__all__.append("getenv")
def _exists(name):
try:
eval(name)
return 1
except NameError:
return 0
# Supply spawn*() (probably only for Unix)
if _exists("fork") and not _exists("spawnv") and _exists("execv"):
P_WAIT = 0
P_NOWAIT = P_NOWAITO = 1
# XXX Should we support P_DETACH? I suppose it could fork()**2
# and close the std I/O streams. Also, P_OVERLAY is the same
# as execv*()?
def _spawnvef(mode, file, args, env, func):
# Internal helper; func is the exec*() function to use
pid = fork()
if not pid:
# Child
try:
if env is None:
func(file, args)
else:
func(file, args, env)
except:
_exit(127)
else:
# Parent
if mode == P_NOWAIT:
return pid # Caller is responsible for waiting!
while 1:
wpid, sts = waitpid(pid, 0)
if WIFSTOPPED(sts):
continue
elif WIFSIGNALED(sts):
return -WTERMSIG(sts)
elif WIFEXITED(sts):
return WEXITSTATUS(sts)
else:
raise error, "Not stopped, signaled or exited???"
def spawnv(mode, file, args):
"""spawnv(mode, file, args) -> integer
Execute file with arguments from args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, None, execv)
def spawnve(mode, file, args, env):
"""spawnve(mode, file, args, env) -> integer
Execute file with arguments from args in a subprocess with the
specified environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, env, execve)
# Note: spawnvp[e] is't currently supported on Windows
def spawnvp(mode, file, args):
"""spawnvp(mode, file, args) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, None, execvp)
def spawnvpe(mode, file, args, env):
"""spawnvpe(mode, file, args, env) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, env, execvpe)
if _exists("spawnv"):
# These aren't supplied by the basic Windows code
# but can be easily implemented in Python
def spawnl(mode, file, *args):
"""spawnl(mode, file, *args) -> integer
Execute file with arguments from args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return spawnv(mode, file, args)
def spawnle(mode, file, *args):
"""spawnle(mode, file, *args, env) -> integer
Execute file with arguments from args in a subprocess with the
supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
env = args[-1]
return spawnve(mode, file, args[:-1], env)
if _exists("spawnvp"):
# At the moment, Windows doesn't implement spawnvp[e],
# so it won't have spawnlp[e] either.
def spawnlp(mode, file, *args):
"""spawnlp(mode, file, *args, env) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return spawnvp(mode, file, args)
def spawnlpe(mode, file, *args):
"""spawnlpe(mode, file, *args, env) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
env = args[-1]
return spawnvpe(mode, file, args[:-1], env)
__all__.extend(["spawnlp","spawnlpe","spawnv", "spawnve","spawnvp",
"spawnvpe","spawnl","spawnle",])
# Supply popen2 etc. (for Unix)
if _exists("fork"):
if not _exists("popen2"):
def popen2(cmd, mode="t", bufsize=-1):
import popen2
stdout, stdin = popen2.popen2(cmd, bufsize)
return stdin, stdout
__all__.append("popen2")
if not _exists("popen3"):
def popen3(cmd, mode="t", bufsize=-1):
import popen2
stdout, stdin, stderr = popen2.popen3(cmd, bufsize)
return stdin, stdout, stderr
__all__.append("popen3")
if not _exists("popen4"):
def popen4(cmd, mode="t", bufsize=-1):
import popen2
stdout, stdin = popen2.popen4(cmd, bufsize)
return stdin, stdout
__all__.append("popen4")
| |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Run ALBERT on SQuAD 1.1 and SQuAD 2.0 using sentence piece tokenization.
The file is forked from:
https://github.com/google-research/ALBERT/blob/master/run_squad_sp.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import json
import math
import REDACTED
from absl import logging
import numpy as np
import tensorflow.compat.v2 as tf
from REDACTED.tf2_bert.bert import tokenization
class SquadExample(object):
"""A single training/test example for simple sequence classification.
For examples without an answer, the start and end position are -1.
"""
def __init__(self,
qas_id,
question_text,
paragraph_text,
orig_answer_text=None,
start_position=None,
end_position=None,
is_impossible=False):
self.qas_id = qas_id
self.question_text = question_text
self.paragraph_text = paragraph_text
self.orig_answer_text = orig_answer_text
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
def __str__(self):
return self.__repr__()
def __repr__(self):
s = ""
s += "qas_id: %s" % (tokenization.printable_text(self.qas_id))
s += ", question_text: %s" % (
tokenization.printable_text(self.question_text))
s += ", paragraph_text: [%s]" % (" ".join(self.paragraph_text))
if self.start_position:
s += ", start_position: %d" % (self.start_position)
if self.start_position:
s += ", end_position: %d" % (self.end_position)
if self.start_position:
s += ", is_impossible: %r" % (self.is_impossible)
return s
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
unique_id,
example_index,
doc_span_index,
tok_start_to_orig_index,
tok_end_to_orig_index,
token_is_max_context,
tokens,
input_ids,
input_mask,
segment_ids,
paragraph_len,
start_position=None,
end_position=None,
is_impossible=None):
self.unique_id = unique_id
self.example_index = example_index
self.doc_span_index = doc_span_index
self.tok_start_to_orig_index = tok_start_to_orig_index
self.tok_end_to_orig_index = tok_end_to_orig_index
self.token_is_max_context = token_is_max_context
self.tokens = tokens
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.paragraph_len = paragraph_len
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
def read_squad_examples(input_file, is_training, version_2_with_negative):
"""Read a SQuAD json file into a list of SquadExample."""
del version_2_with_negative
with tf.io.gfile.GFile(input_file, "r") as reader:
input_data = json.load(reader)["data"]
examples = []
for entry in input_data:
for paragraph in entry["paragraphs"]:
paragraph_text = paragraph["context"]
for qa in paragraph["qas"]:
qas_id = qa["id"]
question_text = qa["question"]
start_position = None
orig_answer_text = None
is_impossible = False
if is_training:
is_impossible = qa.get("is_impossible", False)
if (len(qa["answers"]) != 1) and (not is_impossible):
raise ValueError(
"For training, each question should have exactly 1 answer.")
if not is_impossible:
answer = qa["answers"][0]
orig_answer_text = answer["text"]
start_position = answer["answer_start"]
else:
start_position = -1
orig_answer_text = ""
example = SquadExample(
qas_id=qas_id,
question_text=question_text,
paragraph_text=paragraph_text,
orig_answer_text=orig_answer_text,
start_position=start_position,
is_impossible=is_impossible)
examples.append(example)
return examples
def _convert_index(index, pos, m=None, is_start=True):
"""Converts index."""
if index[pos] is not None:
return index[pos]
n = len(index)
rear = pos
while rear < n - 1 and index[rear] is None:
rear += 1
front = pos
while front > 0 and index[front] is None:
front -= 1
assert index[front] is not None or index[rear] is not None
if index[front] is None:
if index[rear] >= 1:
if is_start:
return 0
else:
return index[rear] - 1
return index[rear]
if index[rear] is None:
if m is not None and index[front] < m - 1:
if is_start:
return index[front] + 1
else:
return m - 1
return index[front]
if is_start:
if index[rear] > index[front] + 1:
return index[front] + 1
else:
return index[rear]
else:
if index[rear] > index[front] + 1:
return index[rear] - 1
else:
return index[front]
def convert_examples_to_features(examples,
tokenizer,
max_seq_length,
doc_stride,
max_query_length,
is_training,
output_fn,
do_lower_case,
batch_size=None):
"""Loads a data file into a list of `InputBatch`s."""
cnt_pos, cnt_neg = 0, 0
base_id = 1000000000
unique_id = base_id
max_n, max_m = 1024, 1024
f = np.zeros((max_n, max_m), dtype=np.float32)
for (example_index, example) in enumerate(examples):
if example_index % 100 == 0:
logging.info("Converting %d/%d pos %d neg %d", example_index,
len(examples), cnt_pos, cnt_neg)
query_tokens = tokenization.encode_ids(
tokenizer.sp_model,
tokenization.preprocess_text(
example.question_text, lower=do_lower_case))
if len(query_tokens) > max_query_length:
query_tokens = query_tokens[0:max_query_length]
paragraph_text = example.paragraph_text
para_tokens = tokenization.encode_pieces(
tokenizer.sp_model,
tokenization.preprocess_text(
example.paragraph_text, lower=do_lower_case))
chartok_to_tok_index = []
tok_start_to_chartok_index = []
tok_end_to_chartok_index = []
char_cnt = 0
for i, token in enumerate(para_tokens):
new_token = token.replace(tokenization.SPIECE_UNDERLINE, " ")
chartok_to_tok_index.extend([i] * len(new_token))
tok_start_to_chartok_index.append(char_cnt)
char_cnt += len(new_token)
tok_end_to_chartok_index.append(char_cnt - 1)
tok_cat_text = "".join(para_tokens).replace(tokenization.SPIECE_UNDERLINE,
" ")
n, m = len(paragraph_text), len(tok_cat_text)
if n > max_n or m > max_m:
max_n = max(n, max_n)
max_m = max(m, max_m)
f = np.zeros((max_n, max_m), dtype=np.float32)
g = {}
# pylint: disable=cell-var-from-loop
def _lcs_match(max_dist, n=n, m=m):
"""Longest-common-substring algorithm."""
f.fill(0)
g.clear()
### longest common sub sequence
# f[i, j] = max(f[i - 1, j], f[i, j - 1], f[i - 1, j - 1] + match(i, j))
for i in range(n):
# unlike standard LCS, this is specifically optimized for the setting
# because the mismatch between sentence pieces and original text will
# be small
for j in range(i - max_dist, i + max_dist):
if j >= m or j < 0:
continue
if i > 0:
g[(i, j)] = 0
f[i, j] = f[i - 1, j]
if j > 0 and f[i, j - 1] > f[i, j]:
g[(i, j)] = 1
f[i, j] = f[i, j - 1]
f_prev = f[i - 1, j - 1] if i > 0 and j > 0 else 0
if (tokenization.preprocess_text(
paragraph_text[i], lower=do_lower_case,
remove_space=False) == tok_cat_text[j] and f_prev + 1 > f[i, j]):
g[(i, j)] = 2
f[i, j] = f_prev + 1
# pylint: enable=cell-var-from-loop
max_dist = abs(n - m) + 5
for _ in range(2):
_lcs_match(max_dist)
if f[n - 1, m - 1] > 0.8 * n:
break
max_dist *= 2
orig_to_chartok_index = [None] * n
chartok_to_orig_index = [None] * m
i, j = n - 1, m - 1
while i >= 0 and j >= 0:
if (i, j) not in g:
break
if g[(i, j)] == 2:
orig_to_chartok_index[i] = j
chartok_to_orig_index[j] = i
i, j = i - 1, j - 1
elif g[(i, j)] == 1:
j = j - 1
else:
i = i - 1
if (all(v is None for v in orig_to_chartok_index) or
f[n - 1, m - 1] < 0.8 * n):
logging.info("MISMATCH DETECTED!")
continue
tok_start_to_orig_index = []
tok_end_to_orig_index = []
for i in range(len(para_tokens)):
start_chartok_pos = tok_start_to_chartok_index[i]
end_chartok_pos = tok_end_to_chartok_index[i]
start_orig_pos = _convert_index(
chartok_to_orig_index, start_chartok_pos, n, is_start=True)
end_orig_pos = _convert_index(
chartok_to_orig_index, end_chartok_pos, n, is_start=False)
tok_start_to_orig_index.append(start_orig_pos)
tok_end_to_orig_index.append(end_orig_pos)
if not is_training:
tok_start_position = tok_end_position = None
if is_training and example.is_impossible:
tok_start_position = 0
tok_end_position = 0
if is_training and not example.is_impossible:
start_position = example.start_position
end_position = start_position + len(example.orig_answer_text) - 1
start_chartok_pos = _convert_index(
orig_to_chartok_index, start_position, is_start=True)
tok_start_position = chartok_to_tok_index[start_chartok_pos]
end_chartok_pos = _convert_index(
orig_to_chartok_index, end_position, is_start=False)
tok_end_position = chartok_to_tok_index[end_chartok_pos]
assert tok_start_position <= tok_end_position
def _piece_to_id(x):
return tokenizer.sp_model.PieceToId(x)
all_doc_tokens = list(map(_piece_to_id, para_tokens))
# The -3 accounts for [CLS], [SEP] and [SEP]
max_tokens_for_doc = max_seq_length - len(query_tokens) - 3
# We can have documents that are longer than the maximum sequence length.
# To deal with this we do a sliding window approach, where we take chunks
# of the up to our max length with a stride of `doc_stride`.
_DocSpan = collections.namedtuple( # pylint: disable=invalid-name
"DocSpan", ["start", "length"])
doc_spans = []
start_offset = 0
while start_offset < len(all_doc_tokens):
length = len(all_doc_tokens) - start_offset
if length > max_tokens_for_doc:
length = max_tokens_for_doc
doc_spans.append(_DocSpan(start=start_offset, length=length))
if start_offset + length == len(all_doc_tokens):
break
start_offset += min(length, doc_stride)
for (doc_span_index, doc_span) in enumerate(doc_spans):
tokens = []
token_is_max_context = {}
segment_ids = []
cur_tok_start_to_orig_index = []
cur_tok_end_to_orig_index = []
tokens.append(tokenizer.sp_model.PieceToId("[CLS]"))
segment_ids.append(0)
for token in query_tokens:
tokens.append(token)
segment_ids.append(0)
tokens.append(tokenizer.sp_model.PieceToId("[SEP]"))
segment_ids.append(0)
for i in range(doc_span.length):
split_token_index = doc_span.start + i
cur_tok_start_to_orig_index.append(
tok_start_to_orig_index[split_token_index])
cur_tok_end_to_orig_index.append(
tok_end_to_orig_index[split_token_index])
is_max_context = _check_is_max_context(doc_spans, doc_span_index,
split_token_index)
token_is_max_context[len(tokens)] = is_max_context
tokens.append(all_doc_tokens[split_token_index])
segment_ids.append(1)
tokens.append(tokenizer.sp_model.PieceToId("[SEP]"))
segment_ids.append(1)
paragraph_len = len(tokens)
input_ids = tokens
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
span_is_impossible = example.is_impossible
start_position = None
end_position = None
if is_training and not span_is_impossible:
# For training, if our document chunk does not contain an annotation
# we throw it out, since there is nothing to predict.
doc_start = doc_span.start
doc_end = doc_span.start + doc_span.length - 1
out_of_span = False
if not (tok_start_position >= doc_start and
tok_end_position <= doc_end):
out_of_span = True
if out_of_span:
# continue
start_position = 0
end_position = 0
span_is_impossible = True
else:
doc_offset = len(query_tokens) + 2
start_position = tok_start_position - doc_start + doc_offset
end_position = tok_end_position - doc_start + doc_offset
if is_training and span_is_impossible:
start_position = 0
end_position = 0
if example_index < 20:
logging.info("*** Example ***")
logging.info("unique_id: %s", (unique_id))
logging.info("example_index: %s", (example_index))
logging.info("doc_span_index: %s", (doc_span_index))
logging.info("tok_start_to_orig_index: %s",
" ".join([str(x) for x in cur_tok_start_to_orig_index]))
logging.info("tok_end_to_orig_index: %s",
" ".join([str(x) for x in cur_tok_end_to_orig_index]))
logging.info(
"token_is_max_context: %s", " ".join(
["%d:%s" % (x, y) for (x, y) in token_is_max_context.items()]))
logging.info(
"input_pieces: %s",
" ".join([tokenizer.sp_model.IdToPiece(x) for x in tokens]))
logging.info("input_ids: %s", " ".join([str(x) for x in input_ids]))
logging.info("input_mask: %s", " ".join([str(x) for x in input_mask]))
logging.info("segment_ids: %s", " ".join([str(x) for x in segment_ids]))
if is_training and span_is_impossible:
logging.info("impossible example span")
if is_training and not span_is_impossible:
pieces = [
tokenizer.sp_model.IdToPiece(token)
for token in tokens[start_position:(end_position + 1)]
]
answer_text = tokenizer.sp_model.DecodePieces(pieces)
logging.info("start_position: %d", (start_position))
logging.info("end_position: %d", (end_position))
logging.info("answer: %s", (tokenization.printable_text(answer_text)))
# With multi processing, the example_index is actually the index
# within the current process therefore we use example_index=None
# to avoid being used in the future.
# The current code does not use example_index of training data.
if is_training:
feat_example_index = None
else:
feat_example_index = example_index
feature = InputFeatures(
unique_id=unique_id,
example_index=feat_example_index,
doc_span_index=doc_span_index,
tok_start_to_orig_index=cur_tok_start_to_orig_index,
tok_end_to_orig_index=cur_tok_end_to_orig_index,
token_is_max_context=token_is_max_context,
tokens=[tokenizer.sp_model.IdToPiece(x) for x in tokens],
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
paragraph_len=paragraph_len,
start_position=start_position,
end_position=end_position,
is_impossible=span_is_impossible)
# Run callback
if is_training:
output_fn(feature)
else:
output_fn(feature, is_padding=False)
unique_id += 1
if span_is_impossible:
cnt_neg += 1
else:
cnt_pos += 1
if not is_training and feature:
assert batch_size
num_padding = 0
num_examples = unique_id - base_id
if unique_id % batch_size != 0:
num_padding = batch_size - (num_examples % batch_size)
dummy_feature = copy.deepcopy(feature)
for _ in range(num_padding):
dummy_feature.unique_id = unique_id
# Run callback
output_fn(feature, is_padding=True)
unique_id += 1
logging.info("Total number of instances: %d = pos %d neg %d",
cnt_pos + cnt_neg, cnt_pos, cnt_neg)
return unique_id - base_id
def _check_is_max_context(doc_spans, cur_span_index, position):
"""Check if this is the 'max context' doc span for the token."""
# Because of the sliding window approach taken to scoring documents, a single
# token can appear in multiple documents. E.g.
# Doc: the man went to the store and bought a gallon of milk
# Span A: the man went to the
# Span B: to the store and bought
# Span C: and bought a gallon of
# ...
#
# Now the word 'bought' will have two scores from spans B and C. We only
# want to consider the score with "maximum context", which we define as
# the *minimum* of its left and right context (the *sum* of left and
# right context will always be the same, of course).
#
# In the example the maximum context for 'bought' would be span C since
# it has 1 left context and 3 right context, while span B has 4 left context
# and 0 right context.
best_score = None
best_span_index = None
for (span_index, doc_span) in enumerate(doc_spans):
end = doc_span.start + doc_span.length - 1
if position < doc_span.start:
continue
if position > end:
continue
num_left_context = position - doc_span.start
num_right_context = end - position
score = min(num_left_context, num_right_context) + 0.01 * doc_span.length
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return cur_span_index == best_span_index
RawResult = collections.namedtuple("RawResult",
["unique_id", "start_logits", "end_logits"])
def write_predictions(all_examples,
all_features,
all_results,
n_best_size,
max_answer_length,
do_lower_case,
output_prediction_file,
output_nbest_file,
output_null_log_odds_file,
version_2_with_negative=False,
null_score_diff_threshold=0.0,
verbose=False):
"""Write final predictions to the json file and log-odds of null if needed."""
del do_lower_case, verbose
logging.info("Writing predictions to: %s", (output_prediction_file))
logging.info("Writing nbest to: %s", (output_nbest_file))
example_index_to_features = collections.defaultdict(list)
for feature in all_features:
example_index_to_features[feature.example_index].append(feature)
unique_id_to_result = {}
for result in all_results:
unique_id_to_result[result.unique_id] = result
_PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name
"PrelimPrediction",
["feature_index", "start_index", "end_index", "start_logit", "end_logit"])
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
scores_diff_json = collections.OrderedDict()
for (example_index, example) in enumerate(all_examples):
features = example_index_to_features[example_index]
prelim_predictions = []
# keep track of the minimum score of null start+end of position 0
score_null = 1000000 # large and positive
min_null_feature_index = 0 # the paragraph slice with min mull score
null_start_logit = 0 # the start logit at the slice with min null score
null_end_logit = 0 # the end logit at the slice with min null score
for (feature_index, feature) in enumerate(features):
result = unique_id_to_result[feature.unique_id]
start_indexes = _get_best_indexes(result.start_logits, n_best_size)
end_indexes = _get_best_indexes(result.end_logits, n_best_size)
# if we could have irrelevant answers, get the min score of irrelevant
if version_2_with_negative:
feature_null_score = result.start_logits[0] + result.end_logits[0]
if feature_null_score < score_null:
score_null = feature_null_score
min_null_feature_index = feature_index
null_start_logit = result.start_logits[0]
null_end_logit = result.end_logits[0]
for start_index in start_indexes:
for end_index in end_indexes:
doc_offset = feature.tokens.index("[SEP]") + 1
# We could hypothetically create invalid predictions, e.g., predict
# that the start of the span is in the question. We throw out all
# invalid predictions.
if start_index - doc_offset >= len(feature.tok_start_to_orig_index):
continue
if end_index - doc_offset >= len(feature.tok_end_to_orig_index):
continue
# if start_index not in feature.tok_start_to_orig_index:
# continue
# if end_index not in feature.tok_end_to_orig_index:
# continue
if not feature.token_is_max_context.get(start_index, False):
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
prelim_predictions.append(
_PrelimPrediction(
feature_index=feature_index,
start_index=start_index - doc_offset,
end_index=end_index - doc_offset,
start_logit=result.start_logits[start_index],
end_logit=result.end_logits[end_index]))
if version_2_with_negative:
prelim_predictions.append(
_PrelimPrediction(
feature_index=min_null_feature_index,
start_index=-1,
end_index=-1,
start_logit=null_start_logit,
end_logit=null_end_logit))
prelim_predictions = sorted(
prelim_predictions,
key=lambda x: (x.start_logit + x.end_logit),
reverse=True)
_NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name
"NbestPrediction", ["text", "start_logit", "end_logit"])
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if len(nbest) >= n_best_size:
break
feature = features[pred.feature_index]
if pred.start_index >= 0: # this is a non-null prediction
tok_start_to_orig_index = feature.tok_start_to_orig_index
tok_end_to_orig_index = feature.tok_end_to_orig_index
start_orig_pos = tok_start_to_orig_index[pred.start_index]
end_orig_pos = tok_end_to_orig_index[pred.end_index]
paragraph_text = example.paragraph_text
final_text = paragraph_text[start_orig_pos:end_orig_pos + 1].strip()
if final_text in seen_predictions:
continue
seen_predictions[final_text] = True
else:
final_text = ""
seen_predictions[final_text] = True
nbest.append(
_NbestPrediction(
text=final_text,
start_logit=pred.start_logit,
end_logit=pred.end_logit))
# if we didn't inlude the empty option in the n-best, inlcude it
if version_2_with_negative:
if "" not in seen_predictions:
nbest.append(
_NbestPrediction(
text="", start_logit=null_start_logit,
end_logit=null_end_logit))
# In very rare edge cases we could have no valid predictions. So we
# just create a nonce prediction in this case to avoid failure.
if not nbest:
nbest.append(
_NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
assert len(nbest) >= 1
total_scores = []
best_non_null_entry = None
for entry in nbest:
total_scores.append(entry.start_logit + entry.end_logit)
if not best_non_null_entry:
if entry.text:
best_non_null_entry = entry
probs = _compute_softmax(total_scores)
nbest_json = []
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output["text"] = entry.text
output["probability"] = probs[i]
output["start_logit"] = entry.start_logit
output["end_logit"] = entry.end_logit
nbest_json.append(output)
assert len(nbest_json) >= 1
if not version_2_with_negative:
all_predictions[example.qas_id] = nbest_json[0]["text"]
else:
assert best_non_null_entry is not None
# predict "" iff the null score - the score of best non-null > threshold
score_diff = score_null - best_non_null_entry.start_logit - (
best_non_null_entry.end_logit)
scores_diff_json[example.qas_id] = score_diff
if score_diff > null_score_diff_threshold:
all_predictions[example.qas_id] = ""
else:
all_predictions[example.qas_id] = best_non_null_entry.text
all_nbest_json[example.qas_id] = nbest_json
with tf.io.gfile.GFile(output_prediction_file, "w") as writer:
writer.write(json.dumps(all_predictions, indent=4) + "\n")
with tf.io.gfile.GFile(output_nbest_file, "w") as writer:
writer.write(json.dumps(all_nbest_json, indent=4) + "\n")
if version_2_with_negative:
with tf.io.gfile.GFile(output_null_log_odds_file, "w") as writer:
writer.write(json.dumps(scores_diff_json, indent=4) + "\n")
def _get_best_indexes(logits, n_best_size):
"""Get the n-best logits from a list."""
index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)
best_indexes = []
for i in range(len(index_and_score)):
if i >= n_best_size:
break
best_indexes.append(index_and_score[i][0])
return best_indexes
def _compute_softmax(scores):
"""Compute softmax probability over raw logits."""
if not scores:
return []
max_score = None
for score in scores:
if max_score is None or score > max_score:
max_score = score
exp_scores = []
total_sum = 0.0
for score in scores:
x = math.exp(score - max_score)
exp_scores.append(x)
total_sum += x
probs = []
for score in exp_scores:
probs.append(score / total_sum)
return probs
class FeatureWriter(object):
"""Writes InputFeature to TF example file."""
def __init__(self, filename, is_training):
self.filename = filename
self.is_training = is_training
self.num_features = 0
self._writer = tf.io.TFRecordWriter(filename)
def process_feature(self, feature):
"""Write a InputFeature to the TFRecordWriter as a tf.train.Example."""
self.num_features += 1
def create_int_feature(values):
feature = tf.train.Feature(
int64_list=tf.train.Int64List(value=list(values)))
return feature
features = collections.OrderedDict()
features["unique_ids"] = create_int_feature([feature.unique_id])
features["input_ids"] = create_int_feature(feature.input_ids)
features["input_mask"] = create_int_feature(feature.input_mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
if self.is_training:
features["start_positions"] = create_int_feature([feature.start_position])
features["end_positions"] = create_int_feature([feature.end_position])
impossible = 0
if feature.is_impossible:
impossible = 1
features["is_impossible"] = create_int_feature([impossible])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
self._writer.write(tf_example.SerializeToString())
def close(self):
self._writer.close()
def generate_tf_record_from_json_file(input_file_path,
sp_model_file,
output_path,
max_seq_length=384,
do_lower_case=True,
max_query_length=64,
doc_stride=128,
version_2_with_negative=False):
"""Generates and saves training data into a tf record file."""
train_examples = read_squad_examples(
input_file=input_file_path,
is_training=True,
version_2_with_negative=version_2_with_negative)
tokenizer = tokenization.FullSentencePieceTokenizer(
sp_model_file=sp_model_file)
train_writer = FeatureWriter(filename=output_path, is_training=True)
number_of_examples = convert_examples_to_features(
examples=train_examples,
tokenizer=tokenizer,
max_seq_length=max_seq_length,
doc_stride=doc_stride,
max_query_length=max_query_length,
is_training=True,
output_fn=train_writer.process_feature,
do_lower_case=do_lower_case)
train_writer.close()
meta_data = {
"task_type": "bert_squad",
"train_data_size": number_of_examples,
"max_seq_length": max_seq_length,
"max_query_length": max_query_length,
"doc_stride": doc_stride,
"version_2_with_negative": version_2_with_negative,
}
return meta_data
| |
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs SpecCPU2006.
From SpecCPU2006's documentation:
The SPEC CPU2006 benchmark is SPEC's industry-standardized, CPU-intensive
benchmark suite, stressing a system's processor, memory subsystem and compiler.
SpecCPU2006 homepage: http://www.spec.org/cpu2006/
"""
import logging
import posixpath
import re
from perfkitbenchmarker import data
from perfkitbenchmarker import errors
from perfkitbenchmarker import flags
from perfkitbenchmarker import sample
FLAGS = flags.FLAGS
flags.DEFINE_enum('benchmark_subset', 'int', ['int', 'fp', 'all'],
'specify a subset of benchmarks to run: int, fp, all')
flags.DEFINE_string('runspec_config', 'linux64-x64-gcc47.cfg',
'name of the cpu2006 configuration to use (runspec --config'
' argument)')
flags.DEFINE_integer('runspec_iterations', 3,
'number of benchmark iterations to execute - default 3 '
'(runspec --iterations argument)')
flags.DEFINE_string('runspec_define', '',
'optional comma separated list of preprocessor macros: '
'SYMBOL[=VALUE] - e.g. numa,smt,sse=SSE4.2 (runspec '
'--define arguments)')
flags.DEFINE_boolean('runspec_enable_32bit', default=False,
help='setting this flag will result in installation of '
'multilib packages to enable use of 32-bit cpu2006 '
'binaries (useful when running on memory constrained '
'instance types where 64-bit execution may be problematic '
' - i.e. < 1.5-2GB/core)')
flags.DEFINE_boolean('runspec_keep_partial_results', False,
'speccpu will report an aggregate score even if some of '
'the component tests failed with a "NR" status. If this '
'flag is set to true, save the available results and '
'mark metadata with partial=true. If unset, partial '
'failures are treated as errors.')
BENCHMARK_INFO = {'name': 'speccpu2006',
'description': 'Run Spec CPU2006',
'scratch_disk': True,
'num_machines': 1}
SPECCPU2006_TAR = 'cpu2006v1.2.tgz'
SPECCPU2006_DIR = 'cpu2006'
def GetInfo():
return BENCHMARK_INFO
def CheckPrerequisites():
"""Verifies that the required resources are present.
Raises:
perfkitbenchmarker.data.ResourceNotFound: On missing resource.
"""
data.ResourcePath(SPECCPU2006_TAR)
def Prepare(benchmark_spec):
"""Install SpecCPU2006 on the target vm.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
vms = benchmark_spec.vms
vm = vms[0]
logging.info('prepare SpecCPU2006 on %s', vm)
vm.Install('wget')
vm.Install('build_tools')
vm.Install('fortran')
if (FLAGS.runspec_enable_32bit):
vm.Install('multilib')
vm.Install('numactl')
try:
local_tar_file_path = data.ResourcePath(SPECCPU2006_TAR)
except data.ResourceNotFound as e:
logging.error('Please provide %s under perfkitbenchmarker/data directory '
'before running SpecCPU2006 benchmark.', SPECCPU2006_TAR)
raise errors.Benchmarks.PrepareException(str(e))
vm.tar_file_path = posixpath.join(vm.GetScratchDir(), SPECCPU2006_TAR)
vm.spec_dir = posixpath.join(vm.GetScratchDir(), SPECCPU2006_DIR)
vm.RemoteCommand('chmod 777 %s' % vm.GetScratchDir())
vm.PushFile(local_tar_file_path, vm.GetScratchDir())
vm.RemoteCommand('cd %s && tar xvfz %s' % (vm.GetScratchDir(),
SPECCPU2006_TAR))
def ExtractScore(stdout, vm, keep_partial_results):
"""Exact the Spec (int|fp) score from stdout.
Args:
stdout: stdout from running RemoteCommand.
vm: The vm instance where Spec CPU2006 was run.
keep_partial_results: A boolean indicating whether partial results should
be extracted in the event that not all benchmarks were successfully
run. See the "runspec_keep_partial_results" flag for more info.
Sample input for SPECint:
...
...
=============================================
400.perlbench 9770 417 23.4 *
401.bzip2 9650 565 17.1 *
403.gcc 8050 364 22.1 *
429.mcf 9120 364 25.1 *
445.gobmk 10490 499 21.0 *
456.hmmer 9330 491 19.0 *
458.sjeng 12100 588 20.6 *
462.libquantum 20720 468 44.2 *
464.h264ref 22130 700 31.6 *
471.omnetpp 6250 349 17.9 *
473.astar 7020 482 14.6 *
483.xalancbmk 6900 248 27.8 *
Est. SPECint(R)_base2006 22.7
Sample input for SPECfp:
...
...
=============================================
410.bwaves 13590 717 19.0 *
416.gamess 19580 923 21.2 *
433.milc 9180 480 19.1 *
434.zeusmp 9100 600 15.2 *
435.gromacs 7140 605 11.8 *
436.cactusADM 11950 1289 9.27 *
437.leslie3d 9400 859 10.9 *
444.namd 8020 504 15.9 *
447.dealII 11440 409 28.0 *
450.soplex 8340 272 30.6 *
453.povray 5320 231 23.0 *
454.calculix 8250 993 8.31 *
459.GemsFDTD 10610 775 13.7 *
465.tonto 9840 565 17.4 *
470.lbm 13740 365 37.7 *
481.wrf 11170 788 14.2 *
482.sphinx3 19490 668 29.2 *
Est. SPECfp(R)_base2006 17.5
Returns:
A list of sample.Sample objects.
"""
results = []
re_begin_section = re.compile('^={1,}')
re_end_section = re.compile(r'Est. (SPEC.*_base2006)\s*(\S*)')
result_section = []
in_result_section = False
# Extract the summary section
for line in stdout.splitlines():
if in_result_section:
result_section.append(line)
# search for begin of result section
match = re.search(re_begin_section, line)
if match:
assert not in_result_section
in_result_section = True
continue
# search for end of result section
match = re.search(re_end_section, line)
if match:
assert in_result_section
spec_name = str(match.group(1))
try:
spec_score = float(match.group(2))
except ValueError:
# Partial results may get reported as '--' instead of a number.
spec_score = None
in_result_section = False
# remove the final SPEC(int|fp) score, which has only 2 columns.
result_section.pop()
metadata = {'machine_type': vm.machine_type, 'num_cpus': vm.num_cpus}
missing_results = []
for benchmark in result_section:
# Skip over failed runs, but count them since they make the overall
# result invalid.
if 'NR' in benchmark:
logging.warning('SpecCPU2006 missing result: %s', benchmark)
missing_results.append(str(benchmark.split()[0]))
continue
# name, ref_time, time, score, misc
name, _, _, score, _ = benchmark.split()
results.append(sample.Sample(str(name), float(score), '', metadata))
if spec_score is None:
missing_results.append(spec_name)
if missing_results:
if keep_partial_results:
metadata['partial'] = 'true'
metadata['missing_results'] = ','.join(missing_results)
else:
raise errors.Benchmarks.RunError(
'speccpu2006: results missing, see log: ' + ','.join(missing_results))
if spec_score is not None:
results.append(sample.Sample(spec_name, spec_score, '', metadata))
return results
def ParseOutput(vm):
"""Parses the output from Spec CPU2006.
Args:
vm: The vm instance where Spec CPU2006 was run.
Returns:
A list of samples to be published (in the same format as Run() returns).
"""
results = []
log_files = []
# FIXME(liquncheng): Only reference runs generate SPEC scores. The log
# id is hardcoded as 001, which might change with different runspec
# parameters. Spec CPU 2006 will generate different logs for build, test
# run, training run and ref run.
if FLAGS.benchmark_subset in ('int', 'all'):
log_files.append('CINT2006.001.ref.txt')
if FLAGS.benchmark_subset in ('fp', 'all'):
log_files.append('CFP2006.001.ref.txt')
for log in log_files:
stdout, _ = vm.RemoteCommand('cat %s/result/%s' % (vm.spec_dir, log),
should_log=True)
results.extend(ExtractScore(stdout, vm, FLAGS.runspec_keep_partial_results))
return results
def Run(benchmark_spec):
"""Run SpecCPU2006 on the target vm.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
A list of sample.Sample objects.
"""
vms = benchmark_spec.vms
vm = vms[0]
logging.info('SpecCPU2006 running on %s', vm)
num_cpus = vm.num_cpus
iterations = ' --iterations=' + repr(FLAGS.runspec_iterations) if \
FLAGS.runspec_iterations != 3 else ''
defines = ' --define ' + ' --define '.join(FLAGS.runspec_define.split(','))\
if FLAGS.runspec_define != '' else ''
cmd = ('cd %s; . ./shrc; ./bin/relocate; . ./shrc; rm -rf result; '
'runspec --config=%s --tune=base '
'--size=ref --noreportable --rate %s%s%s %s'
% (vm.spec_dir, FLAGS.runspec_config, num_cpus, iterations,
defines, FLAGS.benchmark_subset))
vm.RobustRemoteCommand(cmd)
logging.info('SpecCPU2006 Results:')
return ParseOutput(vm)
def Cleanup(benchmark_spec):
"""Cleanup SpecCPU2006 on the target vm.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
vms = benchmark_spec.vms
vm = vms[0]
vm.RemoteCommand('rm -rf %s' % vm.spec_dir)
vm.RemoteCommand('rm -f %s' % vm.tar_file_path)
| |
from django.test import TestCase
from .models import User, CollectionSet, Credential, Collection, Seed, Group, Harvest, HarvestStat, Warc, \
default_uuid, Export
from .utils import collection_path as get_collection_path, collection_set_path as get_collection_set_path
import pytz
from datetime import datetime, date
from tzlocal import get_localzone
import os
import shutil
class CollectionTest(TestCase):
def setUp(self):
user = User.objects.create_superuser(username="test_user", email="test_user@test.com",
password="test_password")
group = Group.objects.create(name="test_group")
self.collection_set = CollectionSet.objects.create(group=group, name="test_collection_set")
self.credential = Credential.objects.create(user=user, platform="test_platform",
token="{}")
self.collection = Collection.objects.create(collection_set=self.collection_set,
name="test_collection",
harvest_type=Collection.TWITTER_USER_TIMELINE,
credential=self.credential)
self.collection_path = get_collection_path(self.collection)
os.makedirs(self.collection_path)
# Seed
Seed.objects.create(collection=self.collection, token='{"token":"token1}', is_active=True)
# Harvest
historical_collection = self.collection.history.all()[0]
historical_credential = historical_collection.credential.history.all()[0]
harvest1 = Harvest.objects.create(collection=self.collection,
historical_collection=historical_collection,
historical_credential=historical_credential)
Harvest.objects.create(collection=self.collection,
parent_harvest=harvest1)
# Harvest stats
HarvestStat.objects.create(harvest=harvest1, item="tweets", count=5, harvest_date=date(2016, 5, 20))
HarvestStat.objects.create(harvest=harvest1, item="tweets", count=7, harvest_date=date(2016, 5, 21))
# Warcs
Warc.objects.create(harvest=harvest1, warc_id=default_uuid(), path="/data/warc1.warc.gz", sha1="warc1sha",
bytes=10, date_created=datetime.now(get_localzone()))
def tearDown(self):
if os.path.exists(self.collection_path):
shutil.rmtree(self.collection_path)
def test_required_seed_count(self):
collection = Collection.objects.create(collection_set=self.collection_set,
name="test_collection",
harvest_type=Collection.TWITTER_USER_TIMELINE,
credential=self.credential)
self.assertIsNone(collection.required_seed_count())
collection.harvest_type = Collection.TWITTER_SAMPLE
self.assertEqual(0, collection.required_seed_count())
def test_active_seed_count(self):
collection = Collection.objects.create(collection_set=self.collection_set,
name="test_collection",
harvest_type=Collection.TWITTER_SEARCH,
credential=self.credential)
self.assertEqual(0, collection.active_seed_count())
Seed.objects.create(collection=collection, token='{}', uid="seed1", is_active=True)
self.assertEqual(1, collection.active_seed_count())
Seed.objects.create(collection=collection, token='{}', uid="seed2", is_active=False)
self.assertEqual(1, collection.active_seed_count())
def test_is_streaming(self):
collection = Collection.objects.create(collection_set=self.collection_set,
name="test_collection",
harvest_type=Collection.TWITTER_SEARCH,
credential=self.credential)
self.assertFalse(collection.is_streaming())
collection.harvest_type = Collection.TWITTER_FILTER
self.assertTrue(collection.is_streaming())
def test_last_harvest(self):
collection = Collection.objects.create(collection_set=self.collection_set,
name="test_collection",
harvest_type=Collection.TWITTER_SEARCH,
credential=self.credential)
self.assertIsNone(collection.last_harvest())
historical_collection = collection.history.all()[0]
historical_credential = historical_collection.credential.history.all()[0]
# Add a harvest
harvest1 = Harvest.objects.create(collection=collection,
historical_collection=historical_collection,
historical_credential=historical_credential)
self.assertEqual(harvest1, collection.last_harvest())
# Add a second harvest
harvest2 = Harvest.objects.create(collection=collection,
historical_collection=historical_collection,
historical_credential=historical_credential)
self.assertEqual(harvest2, collection.last_harvest())
# Add a web harvest
harvest3 = Harvest.objects.create(harvest_type="web", collection=collection,
historical_collection=historical_collection,
historical_credential=historical_credential)
self.assertEqual(harvest2, collection.last_harvest())
self.assertEqual(harvest3, collection.last_harvest(include_web_harvests=True))
# Add a skipped harvest
harvest4 = Harvest.objects.create(status=Harvest.SKIPPED, collection=collection,
historical_collection=historical_collection,
historical_credential=historical_credential)
self.assertEqual(harvest2, collection.last_harvest())
self.assertEqual(harvest4, collection.last_harvest(include_skipped=True))
def test_stats(self):
collection1 = Collection.objects.create(collection_set=self.collection_set,
name="test_collection",
harvest_type=Collection.TWITTER_USER_TIMELINE,
credential=self.credential)
historical_collection1 = collection1.history.all()[0]
historical_credential1 = historical_collection1.credential.history.all()[0]
harvest1 = Harvest.objects.create(collection=collection1,
historical_collection=historical_collection1,
historical_credential=historical_credential1)
day1 = date(2016, 5, 20)
day2 = date(2016, 5, 21)
HarvestStat.objects.create(harvest=harvest1, item="tweets", count=5, harvest_date=day1)
HarvestStat.objects.create(harvest=harvest1, item="users", count=6, harvest_date=day1)
HarvestStat.objects.create(harvest=harvest1, item="tweets", count=7, harvest_date=day2)
harvest2 = Harvest.objects.create(collection=collection1,
historical_collection=historical_collection1,
historical_credential=historical_credential1)
HarvestStat.objects.create(harvest=harvest2, item="tweets", count=5, harvest_date=day2)
harvest3 = Harvest.objects.create(parent_harvest=harvest1,
collection=collection1)
HarvestStat.objects.create(harvest=harvest3, item="web resources", count=25, harvest_date=day2)
# Add some extraneous stats.
collection2 = Collection.objects.create(collection_set=self.collection_set,
name="test_collection2",
harvest_type=Collection.TWITTER_USER_TIMELINE,
credential=self.credential)
historical_collection2 = collection2.history.all()[0]
historical_credential2 = historical_collection2.credential.history.all()[0]
harvest4 = Harvest.objects.create(collection=collection2,
historical_collection=historical_collection2,
historical_credential=historical_credential2)
HarvestStat.objects.create(harvest=harvest4, item="tweets", count=7, harvest_date=day1)
stats = collection1.stats()
self.assertEqual(17, stats["tweets"])
self.assertEqual(6, stats["users"])
self.assertEqual(25, stats["web resources"])
def test_warc_totals(self):
collection1 = Collection.objects.create(collection_set=self.collection_set,
name="test_collection",
harvest_type=Collection.TWITTER_USER_TIMELINE,
credential=self.credential)
historical_collection1 = collection1.history.all()[0]
historical_credential1 = historical_collection1.credential.history.all()[0]
Harvest.objects.create(collection=collection1,
historical_collection=historical_collection1,
historical_credential=historical_credential1,
warcs_count=1, warcs_bytes=10)
Harvest.objects.create(collection=collection1,
historical_collection=historical_collection1,
historical_credential=historical_credential1,
warcs_count=2, warcs_bytes=20)
self.assertEqual(3, collection1.warcs_count())
self.assertEqual(30, collection1.warcs_bytes())
def test_delete(self):
self.assertEqual(1, CollectionSet.objects.count())
self.assertEqual(1, Collection.objects.count())
self.assertEqual(1, Seed.objects.count())
self.assertEqual(2, Harvest.objects.count())
self.assertEqual(2, HarvestStat.objects.count())
self.assertEqual(1, Warc.objects.count())
self.assertTrue(os.path.exists(self.collection_path))
self.collection.delete()
self.assertEqual(1, CollectionSet.objects.count())
self.assertEqual(0, Collection.objects.count())
# Verify that deletes cascade
self.assertEqual(0, Seed.objects.count())
self.assertEqual(0, Harvest.objects.count())
self.assertEqual(0, HarvestStat.objects.count())
self.assertEqual(0, Warc.objects.count())
# Verify that collection deleted
self.assertFalse(os.path.exists(self.collection_path))
class CollectionSetTest(TestCase):
def setUp(self):
user = User.objects.create_superuser(username="test_user", email="test_user@test.com",
password="test_password")
group = Group.objects.create(name="test_group")
self.collection_set = CollectionSet.objects.create(group=group, name="test_collection_set")
self.collection_set_path = get_collection_set_path(self.collection_set)
os.makedirs(self.collection_set_path)
self.credential = Credential.objects.create(user=user, platform="test_platform",
token="{}")
collection1 = Collection.objects.create(collection_set=self.collection_set,
name="test_collection",
harvest_type=Collection.TWITTER_USER_TIMELINE,
credential=self.credential)
datetime1 = datetime(2016, 5, 18, 17, 31, tzinfo=pytz.utc)
day1 = date(2016, 5, 18)
self.day2 = date(2016, 5, 19)
historical_collection1 = collection1.history.all()[0]
historical_credential1 = historical_collection1.credential.history.all()[0]
harvest1 = Harvest.objects.create(collection=collection1,
historical_collection=historical_collection1,
historical_credential=historical_credential1,
date_requested=datetime1,
warcs_count=1, warcs_bytes=10)
HarvestStat.objects.create(harvest=harvest1, item="tweets", count=5, harvest_date=day1)
HarvestStat.objects.create(harvest=harvest1, item="users", count=6, harvest_date=day1)
HarvestStat.objects.create(harvest=harvest1, item="tweets", count=7, harvest_date=self.day2)
# Same day
harvest2 = Harvest.objects.create(collection=collection1,
historical_collection=historical_collection1,
historical_credential=historical_credential1,
date_requested=datetime1,
warcs_count=2, warcs_bytes=20)
HarvestStat.objects.create(harvest=harvest2, item="tweets", count=5, harvest_date=day1)
collection2 = Collection.objects.create(collection_set=self.collection_set,
name="test_collection2",
harvest_type=Collection.TWITTER_USER_TIMELINE,
credential=self.credential)
historical_collection2 = collection2.history.all()[0]
historical_credential2 = historical_collection2.credential.history.all()[0]
# Different day
harvest3 = Harvest.objects.create(collection=collection2,
historical_collection=historical_collection2,
historical_credential=historical_credential2,
date_requested=datetime1,
warcs_count=3, warcs_bytes=30)
HarvestStat.objects.create(harvest=harvest3, item="tweets", count=7, harvest_date=self.day2)
def tearDown(self):
if os.path.exists(self.collection_set_path):
shutil.rmtree(self.collection_set_path)
def test_stats(self):
stats = self.collection_set.stats()
self.assertEqual(24, stats["tweets"])
self.assertEqual(6, stats["users"])
def test_warc_totals(self):
self.assertEqual(6, self.collection_set.warcs_count())
self.assertEqual(60, self.collection_set.warcs_bytes())
def test_stats_item(self):
self.assertListEqual([(date(2016, 5, 16), 0),
(date(2016, 5, 17), 0),
(date(2016, 5, 18), 10),
(date(2016, 5, 19), 14)],
self.collection_set.item_stats("tweets", end_date=self.day2, days=4))
self.assertListEqual([(date(2016, 5, 16), 0),
(date(2016, 5, 17), 0),
(date(2016, 5, 18), 6),
(date(2016, 5, 19), 0)],
self.collection_set.item_stats("users", end_date=self.day2, days=4))
def test_stats_items(self):
self.assertListEqual(['tweets', 'users'], self.collection_set.stats_items())
def test_delete(self):
self.assertEqual(1, CollectionSet.objects.count())
self.assertEqual(2, Collection.objects.count())
self.assertTrue(os.path.exists(self.collection_set_path))
self.collection_set.delete()
self.assertEqual(0, CollectionSet.objects.count())
# Verify that deletes cascade
self.assertEqual(0, Collection.objects.count())
# Verify that collection set path deleted
self.assertFalse(os.path.exists(self.collection_set_path))
def test_isactive(self):
self.assertTrue(self.collection_set.is_active())
for collection in self.collection_set.collections.all():
collection.is_active = False
collection.save()
self.assertFalse(self.collection_set.is_active())
class HarvestTest(TestCase):
def setUp(self):
user = User.objects.create_superuser(username="test_user", email="test_user@test.com",
password="test_password")
group = Group.objects.create(name="test_group")
collection_set = CollectionSet.objects.create(group=group, name="test_collection_set")
credential = Credential.objects.create(user=user, platform="test_platform",
token="{}")
collection1 = Collection.objects.create(collection_set=collection_set,
name="test_collection",
harvest_type=Collection.TWITTER_USER_TIMELINE,
credential=credential)
datetime1 = datetime(2016, 5, 18, 17, 31, tzinfo=pytz.utc)
day1 = date(2016, 5, 18)
day2 = date(2016, 5, 19)
historical_collection1 = collection1.history.all()[0]
historical_credential1 = historical_collection1.credential.history.all()[0]
self.harvest1 = Harvest.objects.create(collection=collection1,
historical_collection=historical_collection1,
historical_credential=historical_credential1,
date_requested=datetime1)
HarvestStat.objects.create(harvest=self.harvest1, item="tweets", count=5, harvest_date=day1)
HarvestStat.objects.create(harvest=self.harvest1, item="users", count=6, harvest_date=day1)
HarvestStat.objects.create(harvest=self.harvest1, item="tweets", count=7, harvest_date=day2)
def test_stats(self):
stats = self.harvest1.stats()
self.assertEqual(12, stats["tweets"])
self.assertEqual(6, stats["users"])
class WarcTest(TestCase):
def setUp(self):
user = User.objects.create_superuser(username="test_user", email="test_user@test.com",
password="test_password")
group = Group.objects.create(name="test_group")
collection_set = CollectionSet.objects.create(group=group, name="test_collection_set")
credential = Credential.objects.create(user=user, platform="test_platform",
token="{}")
collection = Collection.objects.create(collection_set=collection_set,
name="test_collection",
harvest_type=Collection.TWITTER_USER_TIMELINE,
credential=credential)
# Harvest
historical_collection = collection.history.all()[0]
historical_credential = historical_collection.credential.history.all()[0]
harvest1 = Harvest.objects.create(collection=collection,
historical_collection=historical_collection,
historical_credential=historical_credential)
self.collection_path = get_collection_path(collection)
os.makedirs(os.path.join(self.collection_path, "2016/11/03"))
self.warc_filepath = os.path.join(self.collection_path, "2016/11/03/test.warc.gz")
with open(self.warc_filepath, "w") as f:
f.write("test")
# Warcs
self.warc = Warc.objects.create(harvest=harvest1, warc_id=default_uuid(), path=self.warc_filepath,
sha1="warc1sha",
bytes=10, date_created=datetime.now(get_localzone()))
def tearDown(self):
if os.path.exists(self.collection_path):
shutil.rmtree(self.collection_path)
def test_delete(self):
self.assertEqual(1, Warc.objects.count())
self.assertTrue(os.path.exists(self.warc_filepath))
self.warc.delete()
self.assertEqual(0, Warc.objects.count())
self.assertTrue(os.path.exists(self.collection_path))
self.assertFalse(os.path.exists(os.path.join(self.collection_path, "2016")))
self.assertFalse(os.path.exists(self.warc_filepath))
class ExportTest(TestCase):
def setUp(self):
user = User.objects.create_superuser(username="test_user", email="test_user@test.com",
password="test_password")
group = Group.objects.create(name="test_group")
collection_set = CollectionSet.objects.create(group=group, name="test_collection_set")
credential = Credential.objects.create(user=user, platform="test_platform",
token="{}")
collection = Collection.objects.create(collection_set=collection_set,
name="test_collection",
harvest_type=Collection.TWITTER_USER_TIMELINE,
credential=credential)
self.export = Export.objects.create(user=user,
collection=collection,
export_type=collection.harvest_type)
os.makedirs(self.export.path)
with open(os.path.join(self.export.path, "test.csv"), "w") as f:
f.write("test")
def test_delete(self):
self.assertEqual(1, Export.objects.count())
self.assertTrue(os.path.exists(self.export.path))
self.export.delete()
self.assertEqual(0, Export.objects.count())
self.assertFalse(os.path.exists(self.export.path))
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for nets.inception_v1."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from nets import inception
slim = tf.contrib.slim
class InceptionV3Test(tf.test.TestCase):
def testBuildClassificationNetwork(self):
batch_size = 5
height, width = 299, 299
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, end_points = inception.inception_v3(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV3/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
self.assertTrue('Predictions' in end_points)
self.assertListEqual(end_points['Predictions'].get_shape().as_list(),
[batch_size, num_classes])
def testBuildBaseNetwork(self):
batch_size = 5
height, width = 299, 299
inputs = tf.random_uniform((batch_size, height, width, 3))
final_endpoint, end_points = inception.inception_v3_base(inputs)
self.assertTrue(final_endpoint.op.name.startswith(
'InceptionV3/Mixed_7c'))
self.assertListEqual(final_endpoint.get_shape().as_list(),
[batch_size, 8, 8, 2048])
expected_endpoints = ['Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3',
'MaxPool_3a_3x3', 'Conv2d_3b_1x1', 'Conv2d_4a_3x3',
'MaxPool_5a_3x3', 'Mixed_5b', 'Mixed_5c', 'Mixed_5d',
'Mixed_6a', 'Mixed_6b', 'Mixed_6c', 'Mixed_6d',
'Mixed_6e', 'Mixed_7a', 'Mixed_7b', 'Mixed_7c']
self.assertItemsEqual(end_points.keys(), expected_endpoints)
def testBuildOnlyUptoFinalEndpoint(self):
batch_size = 5
height, width = 299, 299
endpoints = ['Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3',
'MaxPool_3a_3x3', 'Conv2d_3b_1x1', 'Conv2d_4a_3x3',
'MaxPool_5a_3x3', 'Mixed_5b', 'Mixed_5c', 'Mixed_5d',
'Mixed_6a', 'Mixed_6b', 'Mixed_6c', 'Mixed_6d',
'Mixed_6e', 'Mixed_7a', 'Mixed_7b', 'Mixed_7c']
for index, endpoint in enumerate(endpoints):
with tf.Graph().as_default():
inputs = tf.random_uniform((batch_size, height, width, 3))
out_tensor, end_points = inception.inception_v3_base(
inputs, final_endpoint=endpoint)
self.assertTrue(out_tensor.op.name.startswith(
'InceptionV3/' + endpoint))
self.assertItemsEqual(endpoints[:index+1], end_points)
def testBuildAndCheckAllEndPointsUptoMixed7c(self):
batch_size = 5
height, width = 299, 299
inputs = tf.random_uniform((batch_size, height, width, 3))
_, end_points = inception.inception_v3_base(
inputs, final_endpoint='Mixed_7c')
endpoints_shapes = {'Conv2d_1a_3x3': [batch_size, 149, 149, 32],
'Conv2d_2a_3x3': [batch_size, 147, 147, 32],
'Conv2d_2b_3x3': [batch_size, 147, 147, 64],
'MaxPool_3a_3x3': [batch_size, 73, 73, 64],
'Conv2d_3b_1x1': [batch_size, 73, 73, 80],
'Conv2d_4a_3x3': [batch_size, 71, 71, 192],
'MaxPool_5a_3x3': [batch_size, 35, 35, 192],
'Mixed_5b': [batch_size, 35, 35, 256],
'Mixed_5c': [batch_size, 35, 35, 288],
'Mixed_5d': [batch_size, 35, 35, 288],
'Mixed_6a': [batch_size, 17, 17, 768],
'Mixed_6b': [batch_size, 17, 17, 768],
'Mixed_6c': [batch_size, 17, 17, 768],
'Mixed_6d': [batch_size, 17, 17, 768],
'Mixed_6e': [batch_size, 17, 17, 768],
'Mixed_7a': [batch_size, 8, 8, 1280],
'Mixed_7b': [batch_size, 8, 8, 2048],
'Mixed_7c': [batch_size, 8, 8, 2048]}
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name in endpoints_shapes:
expected_shape = endpoints_shapes[endpoint_name]
self.assertTrue(endpoint_name in end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
expected_shape)
def testModelHasExpectedNumberOfParameters(self):
batch_size = 5
height, width = 299, 299
inputs = tf.random_uniform((batch_size, height, width, 3))
with slim.arg_scope(inception.inception_v3_arg_scope()):
inception.inception_v3_base(inputs)
total_params, _ = slim.model_analyzer.analyze_vars(
slim.get_model_variables())
self.assertAlmostEqual(21802784, total_params)
def testBuildEndPoints(self):
batch_size = 5
height, width = 299, 299
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
_, end_points = inception.inception_v3(inputs, num_classes)
self.assertTrue('Logits' in end_points)
logits = end_points['Logits']
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
self.assertTrue('AuxLogits' in end_points)
aux_logits = end_points['AuxLogits']
self.assertListEqual(aux_logits.get_shape().as_list(),
[batch_size, num_classes])
self.assertTrue('Mixed_7c' in end_points)
pre_pool = end_points['Mixed_7c']
self.assertListEqual(pre_pool.get_shape().as_list(),
[batch_size, 8, 8, 2048])
self.assertTrue('PreLogits' in end_points)
pre_logits = end_points['PreLogits']
self.assertListEqual(pre_logits.get_shape().as_list(),
[batch_size, 1, 1, 2048])
def testBuildEndPointsWithDepthMultiplierLessThanOne(self):
batch_size = 5
height, width = 299, 299
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
_, end_points = inception.inception_v3(inputs, num_classes)
endpoint_keys = [key for key in end_points.keys()
if key.startswith('Mixed') or key.startswith('Conv')]
_, end_points_with_multiplier = inception.inception_v3(
inputs, num_classes, scope='depth_multiplied_net',
depth_multiplier=0.5)
for key in endpoint_keys:
original_depth = end_points[key].get_shape().as_list()[3]
new_depth = end_points_with_multiplier[key].get_shape().as_list()[3]
self.assertEqual(0.5 * original_depth, new_depth)
def testBuildEndPointsWithDepthMultiplierGreaterThanOne(self):
batch_size = 5
height, width = 299, 299
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
_, end_points = inception.inception_v3(inputs, num_classes)
endpoint_keys = [key for key in end_points.keys()
if key.startswith('Mixed') or key.startswith('Conv')]
_, end_points_with_multiplier = inception.inception_v3(
inputs, num_classes, scope='depth_multiplied_net',
depth_multiplier=2.0)
for key in endpoint_keys:
original_depth = end_points[key].get_shape().as_list()[3]
new_depth = end_points_with_multiplier[key].get_shape().as_list()[3]
self.assertEqual(2.0 * original_depth, new_depth)
def testRaiseValueErrorWithInvalidDepthMultiplier(self):
batch_size = 5
height, width = 299, 299
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
with self.assertRaises(ValueError):
_ = inception.inception_v3(inputs, num_classes, depth_multiplier=-0.1)
with self.assertRaises(ValueError):
_ = inception.inception_v3(inputs, num_classes, depth_multiplier=0.0)
def testHalfSizeImages(self):
batch_size = 5
height, width = 150, 150
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, end_points = inception.inception_v3(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV3/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['Mixed_7c']
self.assertListEqual(pre_pool.get_shape().as_list(),
[batch_size, 3, 3, 2048])
def testUnknownImageShape(self):
tf.reset_default_graph()
batch_size = 2
height, width = 299, 299
num_classes = 1000
input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))
with self.test_session() as sess:
inputs = tf.placeholder(tf.float32, shape=(batch_size, None, None, 3))
logits, end_points = inception.inception_v3(inputs, num_classes)
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['Mixed_7c']
feed_dict = {inputs: input_np}
tf.initialize_all_variables().run()
pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)
self.assertListEqual(list(pre_pool_out.shape), [batch_size, 8, 8, 2048])
def testUnknowBatchSize(self):
batch_size = 1
height, width = 299, 299
num_classes = 1000
inputs = tf.placeholder(tf.float32, (None, height, width, 3))
logits, _ = inception.inception_v3(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV3/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[None, num_classes])
images = tf.random_uniform((batch_size, height, width, 3))
with self.test_session() as sess:
sess.run(tf.initialize_all_variables())
output = sess.run(logits, {inputs: images.eval()})
self.assertEquals(output.shape, (batch_size, num_classes))
def testEvaluation(self):
batch_size = 2
height, width = 299, 299
num_classes = 1000
eval_inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = inception.inception_v3(eval_inputs, num_classes,
is_training=False)
predictions = tf.argmax(logits, 1)
with self.test_session() as sess:
sess.run(tf.initialize_all_variables())
output = sess.run(predictions)
self.assertEquals(output.shape, (batch_size,))
def testTrainEvalWithReuse(self):
train_batch_size = 5
eval_batch_size = 2
height, width = 150, 150
num_classes = 1000
train_inputs = tf.random_uniform((train_batch_size, height, width, 3))
inception.inception_v3(train_inputs, num_classes)
eval_inputs = tf.random_uniform((eval_batch_size, height, width, 3))
logits, _ = inception.inception_v3(eval_inputs, num_classes,
is_training=False, reuse=True)
predictions = tf.argmax(logits, 1)
with self.test_session() as sess:
sess.run(tf.initialize_all_variables())
output = sess.run(predictions)
self.assertEquals(output.shape, (eval_batch_size,))
def testLogitsNotSqueezed(self):
num_classes = 25
images = tf.random_uniform([1, 299, 299, 3])
logits, _ = inception.inception_v3(images,
num_classes=num_classes,
spatial_squeeze=False)
with self.test_session() as sess:
tf.initialize_all_variables().run()
logits_out = sess.run(logits)
self.assertListEqual(list(logits_out.shape), [1, 1, 1, num_classes])
if __name__ == '__main__':
tf.test.main()
| |
# minorCodes.py -- generated by makeminors.py
def OMNIORBMinorCode(c):
return 0x41540000 | c
def OMGMinorCode(c):
return 0x4f4d0000 | c
UNKNOWN_UserException = OMGMinorCode(1)
UNKNOWN_SystemException = OMGMinorCode(2)
UNKNOWN_PythonException = OMNIORBMinorCode(98)
UNKNOWN_OmniThreadException = OMNIORBMinorCode(123)
BAD_PARAM_ValueFactoryFailure = OMGMinorCode(1)
BAD_PARAM_RIDAlreadyDefinedInIfR = OMGMinorCode(2)
BAD_PARAM_NameAlreadyUsedInIfR = OMGMinorCode(3)
BAD_PARAM_TargetIsInvalidContainer = OMGMinorCode(4)
BAD_PARAM_InheritedNameClash = OMGMinorCode(5)
BAD_PARAM_IncorrectAbstractIntfType = OMGMinorCode(6)
BAD_PARAM_BadSchemeName = OMGMinorCode(7)
BAD_PARAM_BadAddress = OMGMinorCode(8)
BAD_PARAM_BadSchemeSpecificPart = OMGMinorCode(9)
BAD_PARAM_BadURIOther = OMGMinorCode(10)
BAD_PARAM_NonAbstractBase = OMGMinorCode(11)
BAD_PARAM_SupportsTooManyConcreteIntfs = OMGMinorCode(12)
BAD_PARAM_IncompleteTypeCode = OMGMinorCode(13)
BAD_PARAM_InvalidObjectId = OMGMinorCode(14)
BAD_PARAM_InvalidName = OMGMinorCode(15)
BAD_PARAM_InvalidRepositoryId = OMGMinorCode(16)
BAD_PARAM_InvalidMemberName = OMGMinorCode(17)
BAD_PARAM_DuplicateLabelValue = OMGMinorCode(18)
BAD_PARAM_IncompatibleDiscriminatorType = OMGMinorCode(19)
BAD_PARAM_IllegitimateDiscriminatorType = OMGMinorCode(20)
BAD_PARAM_NotAnException = OMGMinorCode(21)
BAD_PARAM_UnlistedUserException = OMGMinorCode(22)
BAD_PARAM_WCharTCSNotKnown = OMGMinorCode(23)
BAD_PARAM_ServiceContextNotInRange = OMGMinorCode(24)
BAD_PARAM_EnumValueOutOfRange = OMGMinorCode(25)
BAD_PARAM_InvalidServiceCtxtId = OMGMinorCode(26)
BAD_PARAM_RegisterNilObject = OMGMinorCode(27)
BAD_PARAM_InvalidComponentId = OMGMinorCode(28)
BAD_PARAM_InvalidProfileId = OMGMinorCode(29)
BAD_PARAM_DuplicatePolicyType = OMGMinorCode(30)
BAD_PARAM_PollableAlreadyInPollableSet = OMGMinorCode(43)
BAD_PARAM_InvalidCompressionLevel = OMGMinorCode(44)
BAD_PARAM_IndexOutOfRange = OMNIORBMinorCode(21)
BAD_PARAM_InvalidUnionDiscValue = OMNIORBMinorCode(27)
BAD_PARAM_InvalidInitialSize = OMNIORBMinorCode(29)
BAD_PARAM_InvalidServant = OMNIORBMinorCode(35)
BAD_PARAM_IsPseudoObject = OMNIORBMinorCode(37)
BAD_PARAM_InvalidObjectRef = OMNIORBMinorCode(43)
BAD_PARAM_WCharOutOfRange = OMNIORBMinorCode(46)
BAD_PARAM_InternalInvariant = OMNIORBMinorCode(49)
BAD_PARAM_NullStringUnexpected = OMNIORBMinorCode(57)
BAD_PARAM_InvalidPOAName = OMNIORBMinorCode(59)
BAD_PARAM_LocalObjectExpected = OMNIORBMinorCode(60)
BAD_PARAM_InvalidSystemId = OMNIORBMinorCode(61)
BAD_PARAM_InvalidNVList = OMNIORBMinorCode(64)
BAD_PARAM_InvalidTypeCode = OMNIORBMinorCode(70)
BAD_PARAM_AnyDoesNotContainAString = OMNIORBMinorCode(72)
BAD_PARAM_EmptyContextPattern = OMNIORBMinorCode(73)
BAD_PARAM_InvalidContextName = OMNIORBMinorCode(74)
BAD_PARAM_InvalidContext = OMNIORBMinorCode(77)
BAD_PARAM_InvalidDynAny = OMNIORBMinorCode(78)
BAD_PARAM_InvalidException = OMNIORBMinorCode(80)
BAD_PARAM_InvalidExceptionList = OMNIORBMinorCode(81)
BAD_PARAM_InvalidEnvironment = OMNIORBMinorCode(82)
BAD_PARAM_InvalidAny = OMNIORBMinorCode(83)
BAD_PARAM_InvalidNamedValue = OMNIORBMinorCode(84)
BAD_PARAM_InvalidRequest = OMNIORBMinorCode(85)
BAD_PARAM_InvalidContextList = OMNIORBMinorCode(87)
BAD_PARAM_WrongPythonType = OMNIORBMinorCode(88)
BAD_PARAM_IncompletePythonType = OMNIORBMinorCode(92)
BAD_PARAM_PythonValueOutOfRange = OMNIORBMinorCode(95)
BAD_PARAM_EmbeddedNullInPythonString = OMNIORBMinorCode(96)
BAD_PARAM_StringIsTooLong = OMNIORBMinorCode(100)
BAD_PARAM_SequenceIsTooLong = OMNIORBMinorCode(101)
BAD_PARAM_WrongUnionMemberSelected = OMNIORBMinorCode(107)
BAD_PARAM_InvalidFixedPointLimits = OMNIORBMinorCode(109)
BAD_PARAM_AttemptToMarshalAbstractValue = OMNIORBMinorCode(115)
BAD_PARAM_InvalidPollerType = OMNIORBMinorCode(127)
NO_MEMORY_BadAlloc = OMNIORBMinorCode(121)
IMP_LIMIT_NoUsableProfile = OMGMinorCode(1)
COMM_FAILURE_MarshalArguments = OMNIORBMinorCode(3)
COMM_FAILURE_UnMarshalArguments = OMNIORBMinorCode(4)
COMM_FAILURE_MarshalResults = OMNIORBMinorCode(5)
COMM_FAILURE_UnMarshalResults = OMNIORBMinorCode(6)
COMM_FAILURE_WaitingForReply = OMNIORBMinorCode(7)
INV_OBJREF_WCharNotSupported = OMGMinorCode(1)
INV_OBJREF_CodesetComponentRequired = OMGMinorCode(2)
INV_OBJREF_TryToInvokePseudoRemotely = OMNIORBMinorCode(23)
INV_OBJREF_InvokeOnNilObjRef = OMNIORBMinorCode(48)
INV_OBJREF_CorruptedObjRef = OMNIORBMinorCode(54)
INV_OBJREF_InterfaceMisMatch = OMNIORBMinorCode(55)
INV_OBJREF_NoPythonTypeForPseudoObj = OMNIORBMinorCode(104)
INV_OBJREF_ShortcutServantDeactivated = OMNIORBMinorCode(117)
INV_OBJREF_IncorrectReplyHandler = OMNIORBMinorCode(125)
INV_OBJREF_InvalidPseudoObject = OMNIORBMinorCode(129)
INV_OBJREF_UnsuitableGIOPVersion = OMNIORBMinorCode(130)
MARSHAL_NoValueFactory = OMGMinorCode(1)
MARSHAL_ServerRequestWrongOrder = OMGMinorCode(2)
MARSHAL_ServerRequestNVList = OMGMinorCode(3)
MARSHAL_LocalObject = OMGMinorCode(4)
MARSHAL_WCharSentByGIOP10Client = OMGMinorCode(5)
MARSHAL_WCharSentByGIOP10Server = OMGMinorCode(6)
MARSHAL_InvalidVariableLenComponentSize = OMNIORBMinorCode(9)
MARSHAL_PassEndOfMessage = OMNIORBMinorCode(10)
MARSHAL_MessageSizeExceedLimit = OMNIORBMinorCode(69)
MARSHAL_MessageSizeExceedLimitOnClient = OMNIORBMinorCode(11)
MARSHAL_MessageSizeExceedLimitOnServer = OMNIORBMinorCode(12)
MARSHAL_SequenceIsTooLong = OMNIORBMinorCode(18)
MARSHAL_StringIsTooLong = OMNIORBMinorCode(19)
MARSHAL_WStringIsTooLong = OMNIORBMinorCode(20)
MARSHAL_StringNotEndWithNull = OMNIORBMinorCode(22)
MARSHAL_InvalidEnumValue = OMNIORBMinorCode(26)
MARSHAL_AttemptToWriteToReadOnlyBuf = OMNIORBMinorCode(28)
MARSHAL_InvalidFixedValue = OMNIORBMinorCode(42)
MARSHAL_InvalidWCharSize = OMNIORBMinorCode(47)
MARSHAL_InvalidIOR = OMNIORBMinorCode(52)
MARSHAL_ExceptionInDSINotPropagated = OMNIORBMinorCode(67)
MARSHAL_InvalidContextList = OMNIORBMinorCode(79)
MARSHAL_InvalidIndirection = OMNIORBMinorCode(90)
MARSHAL_InvalidTypeCodeKind = OMNIORBMinorCode(91)
MARSHAL_MessageTooLong = OMNIORBMinorCode(93)
MARSHAL_CannotReserveOutputSpace = OMNIORBMinorCode(110)
MARSHAL_InvalidChunkedEncoding = OMNIORBMinorCode(111)
MARSHAL_InvalidValueTag = OMNIORBMinorCode(112)
MARSHAL_NoRepoIdInValueType = OMNIORBMinorCode(113)
MARSHAL_TypeIsNotAValueType = OMNIORBMinorCode(114)
MARSHAL_InvalidCompressedData = OMNIORBMinorCode(128)
INITIALIZE_TransportError = OMNIORBMinorCode(13)
INITIALIZE_InvalidORBInitArgs = OMNIORBMinorCode(30)
INITIALIZE_FailedBOAInit = OMNIORBMinorCode(31)
INITIALIZE_FailedPOAInit = OMNIORBMinorCode(32)
INITIALIZE_FailedORBInit = OMNIORBMinorCode(44)
INITIALIZE_FailedLoadLibrary = OMNIORBMinorCode(45)
INITIALIZE_ConfigFileError = OMNIORBMinorCode(50)
INITIALIZE_NotOmniThread = OMNIORBMinorCode(56)
INITIALIZE_CannotOpenLogFile = OMNIORBMinorCode(118)
INITIALIZE_EndpointPublishFailure = OMNIORBMinorCode(119)
NO_IMPLEMENT_NoValueImpl = OMGMinorCode(1)
NO_IMPLEMENT_IncompatibleVersion = OMGMinorCode(2)
NO_IMPLEMENT_NoUsableProfile = OMGMinorCode(3)
NO_IMPLEMENT_DIIOnLocalObject = OMGMinorCode(4)
NO_IMPLEMENT_Unsupported = OMNIORBMinorCode(36)
NO_IMPLEMENT_NoPythonMethod = OMNIORBMinorCode(99)
BAD_TYPECODE_Incomplete = OMGMinorCode(1)
BAD_TYPECODE_IllegitimateMember = OMGMinorCode(2)
BAD_TYPECODE_TypeCodeIsNil = OMNIORBMinorCode(71)
BAD_TYPECODE_InvalidOperation = OMNIORBMinorCode(89)
BAD_TYPECODE_UnknownKind = OMNIORBMinorCode(94)
BAD_TYPECODE_UnresolvedRecursiveTC = OMNIORBMinorCode(105)
BAD_TYPECODE_InvalidIndirection = OMNIORBMinorCode(108)
BAD_TYPECODE_NotEquivalent = OMNIORBMinorCode(116)
BAD_OPERATION_UnRecognisedOperationName = OMNIORBMinorCode(38)
BAD_OPERATION_WrongPollerOperation = OMNIORBMinorCode(126)
BAD_INV_ORDER_DependencyPreventsDestruction = OMGMinorCode(1)
BAD_INV_ORDER_ObjectIndestructible = OMGMinorCode(2)
BAD_INV_ORDER_WouldDeadLock = OMGMinorCode(3)
BAD_INV_ORDER_ORBHasShutdown = OMGMinorCode(4)
BAD_INV_ORDER_RequestUsedMoreThanOnce = OMGMinorCode(5)
BAD_INV_ORDER_ServantManagerAlreadySet = OMGMinorCode(6)
BAD_INV_ORDER_ArgumentsCalledOutOfOrder = OMGMinorCode(7)
BAD_INV_ORDER_CtxCalledOutOfOrder = OMGMinorCode(8)
BAD_INV_ORDER_SetResultCalledOutOfOrder = OMGMinorCode(9)
BAD_INV_ORDER_RequestAlreadySent = OMGMinorCode(10)
BAD_INV_ORDER_RequestNotSentYet = OMGMinorCode(11)
BAD_INV_ORDER_ResultAlreadyReceived = OMGMinorCode(12)
BAD_INV_ORDER_RequestIsSynchronous = OMGMinorCode(13)
BAD_INV_ORDER_InvalidPortableInterceptorCall = OMGMinorCode(14)
BAD_INV_ORDER_ServiceContextIdAlreadyExists = OMGMinorCode(15)
BAD_INV_ORDER_PolicyFactoryTypeAlreadyExists = OMGMinorCode(16)
BAD_INV_ORDER_POACreationDuringDestruction = OMGMinorCode(17)
BAD_INV_ORDER_NoPollerResponseYet = OMGMinorCode(22)
BAD_INV_ORDER_CodeSetNotKnownYet = OMNIORBMinorCode(24)
BAD_INV_ORDER_ArgumentsNotCalled = OMNIORBMinorCode(65)
BAD_INV_ORDER_ErrorInDynamicImplementation = OMNIORBMinorCode(66)
BAD_INV_ORDER_RequestConfiguredOutOfOrder = OMNIORBMinorCode(86)
BAD_INV_ORDER_DynAnyNotInitialised = OMNIORBMinorCode(103)
BAD_INV_ORDER_ORBNotInitialised = OMNIORBMinorCode(120)
TRANSIENT_POANoResource = OMGMinorCode(1)
TRANSIENT_NoUsableProfile = OMGMinorCode(2)
TRANSIENT_RequestCancelled = OMGMinorCode(3)
TRANSIENT_POADestroyed = OMGMinorCode(4)
TRANSIENT_FailedOnForwarded = OMNIORBMinorCode(1)
TRANSIENT_ConnectFailed = OMNIORBMinorCode(2)
TRANSIENT_CallTimedout = OMNIORBMinorCode(8)
TRANSIENT_BiDirConnIsGone = OMNIORBMinorCode(14)
TRANSIENT_BiDirConnUsedWithNoPOA = OMNIORBMinorCode(16)
TRANSIENT_ConnectionClosed = OMNIORBMinorCode(17)
TRANSIENT_ObjDeactivatedWhileHolding = OMNIORBMinorCode(62)
TRANSIENT_PythonExceptionInORB = OMNIORBMinorCode(106)
OBJ_ADAPTER_POAUnknownAdapter = OMGMinorCode(1)
OBJ_ADAPTER_NoServant = OMGMinorCode(2)
OBJ_ADAPTER_NoDefaultServant = OMGMinorCode(3)
OBJ_ADAPTER_NoServantManager = OMGMinorCode(4)
OBJ_ADAPTER_WrongIncarnatePolicy = OMGMinorCode(5)
OBJ_ADAPTER_BiDirNotAllowed = OMNIORBMinorCode(15)
OBJ_ADAPTER_BOANotInitialised = OMNIORBMinorCode(33)
OBJ_ADAPTER_POANotInitialised = OMNIORBMinorCode(53)
OBJ_ADAPTER_ServantAlreadyActive = OMNIORBMinorCode(63)
OBJ_ADAPTER_IncompatibleServant = OMNIORBMinorCode(97)
DATA_CONVERSION_CannotMapChar = OMGMinorCode(1)
DATA_CONVERSION_RangeError = OMNIORBMinorCode(40)
DATA_CONVERSION_BadInput = OMNIORBMinorCode(41)
OBJECT_NOT_EXIST_NoMatch = OMGMinorCode(1)
OBJECT_NOT_EXIST_IncarnateFailed = OMGMinorCode(2)
OBJECT_NOT_EXIST_PollerAlreadyDeliveredReply = OMGMinorCode(5)
OBJECT_NOT_EXIST_BOANotInitialised = OMNIORBMinorCode(34)
OBJECT_NOT_EXIST_POANotInitialised = OMNIORBMinorCode(58)
OBJECT_NOT_EXIST_DynAnyDestroyed = OMNIORBMinorCode(102)
INV_POLICY_CannotReconcileIORWithOverride = OMGMinorCode(1)
INV_POLICY_InvalidPolicyType = OMGMinorCode(2)
INV_POLICY_NoPolicyFactoryForPolicyType = OMGMinorCode(3)
INTF_REPOS_NotAvailable = OMNIORBMinorCode(39)
INTF_REPOS_PrimaryInterfaceReturnedZero = OMNIORBMinorCode(68)
BAD_CONTEXT_NoMatchingProperty = OMNIORBMinorCode(75)
BAD_CONTEXT_StartingScopeNotFound = OMNIORBMinorCode(76)
TIMEOUT_NoPollerResponseInTime = OMGMinorCode(1)
TIMEOUT_CallTimedOutOnClient = OMNIORBMinorCode(8)
TIMEOUT_CallTimedOutOnServer = OMNIORBMinorCode(124)
| |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Import a TF v1-style SavedModel when executing eagerly."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from tensorflow.python.eager import context
from tensorflow.python.eager import lift_to_graph
from tensorflow.python.eager import wrap_function
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import func_graph
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import function_deserialization
from tensorflow.python.saved_model import loader_impl
from tensorflow.python.saved_model import signature_serialization
from tensorflow.python.saved_model.pywrap_saved_model import metrics
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver as tf_saver
from tensorflow.python.training.tracking import tracking
from tensorflow.python.util import nest
# API label for SavedModel metrics.
_LOAD_V1_V2_LABEL = "load_v1_in_v2"
class _Initializer(tracking.CapturableResource):
"""Represents an initialization operation restored from a SavedModel.
Without this object re-export of imported 1.x SavedModels would omit the
original SavedModel's initialization procedure.
Created when `tf.saved_model.load` loads a TF 1.x-style SavedModel with an
initialization op. This object holds a function that runs the
initialization. It does not require any manual user intervention;
`tf.saved_model.save` will see this object and automatically add it to the
exported SavedModel, and `tf.saved_model.load` runs the initialization
function automatically.
"""
def __init__(self, init_fn, asset_paths):
super(_Initializer, self).__init__()
self._asset_paths = asset_paths
self._init_fn = init_fn
def _create_resource(self):
return array_ops.placeholder(
dtype=dtypes.resource, shape=[], name="unused_resource")
def _initialize(self):
return self._init_fn(*[path.asset_path for path in self._asset_paths])
class _EagerSavedModelLoader(loader_impl.SavedModelLoader):
"""Loads a SavedModel without using Sessions."""
def get_meta_graph_def_from_tags(self, tags):
"""Override to support implicit one-MetaGraph loading with tags=None."""
if tags is None:
if len(self._saved_model.meta_graphs) != 1:
tag_sets = [mg.meta_info_def.tags
for mg in self._saved_model.meta_graphs]
raise ValueError(
"Importing a SavedModel with `tf.saved_model.load` requires a "
"`tags=` argument if there is more than one MetaGraph. Got "
f"`tags=None`, but there are {len(self._saved_model.meta_graphs)} "
f"MetaGraphs in the SavedModel with tag sets: {tag_sets}. Pass a "
"`tags=` argument to load this SavedModel.")
return self._saved_model.meta_graphs[0]
return super(_EagerSavedModelLoader, self).get_meta_graph_def_from_tags(
tags)
def load_graph(self, returns, meta_graph_def):
"""Called from wrap_function to import `meta_graph_def`."""
# pylint: disable=protected-access
saver, _ = tf_saver._import_meta_graph_with_return_elements(
meta_graph_def)
# pylint: enable=protected-access
returns[0] = saver
def _extract_saver_restore(self, wrapped, saver):
if saver is None:
return None
saver_def = saver.saver_def
filename_tensor = wrapped.graph.as_graph_element(
saver_def.filename_tensor_name)
# We both feed and fetch filename_tensor so we have an operation to use to
# feed into variable initializers (only relevant for v1 graph building).
return wrapped.prune(
feeds=[filename_tensor],
fetches=[filename_tensor,
wrapped.graph.as_graph_element(saver_def.restore_op_name)])
def restore_variables(self, wrapped, restore_from_saver):
"""Restores variables from the checkpoint."""
if restore_from_saver is not None:
initializer, _ = restore_from_saver(
constant_op.constant(self._variables_path))
if not ops.executing_eagerly_outside_functions():
# Add the initialization operation to the "saved_model_initializers"
# collection in case we don't have any lifted variables to attach it to.
ops.add_to_collection("saved_model_initializers", initializer)
one_unlifted = False
for variable in wrapped.graph.get_collection_ref(
ops.GraphKeys.GLOBAL_VARIABLES):
if variable.graph is wrapped.graph:
one_unlifted = True
# pylint: disable=protected-access
variable._initializer_op = initializer
# pylint: enable=protected-access
if one_unlifted:
logging.warning(
"Some variables could not be lifted out of a loaded function. "
"Please run "
"`sess.run(tf.get_collection(\"saved_model_initializers\"))`to "
"restore these variables.")
def _extract_signatures(self, wrapped, meta_graph_def):
"""Creates ConcreteFunctions for signatures in `meta_graph_def`."""
signature_functions = {}
for signature_key, signature_def in meta_graph_def.signature_def.items():
if signature_def.inputs:
input_items = sorted(
signature_def.inputs.items(), key=lambda item: item[1].name)
original_input_names, input_specs = zip(*input_items)
else:
original_input_names = []
input_specs = []
# TODO(allenl): Support optional arguments
feeds = [
wrap_function._get_element_from_tensor_info(input_spec, wrapped.graph) # pylint: disable=protected-access
for input_spec in input_specs
]
input_names = []
input_tensors = []
for original_input_name, feed in zip(original_input_names, feeds):
if isinstance(feed, sparse_tensor.SparseTensor):
# We have to give explicit name for SparseTensor arguments, because
# these are not present in the TensorInfo.
indices_name = "%s_indices" % original_input_name
values_name = "%s_values" % original_input_name
dense_shape_name = "%s_dense_shape" % original_input_name
input_names.extend([indices_name, values_name, dense_shape_name])
input_tensors.extend([feed.indices, feed.values, feed.dense_shape])
elif isinstance(feed, composite_tensor.CompositeTensor):
component_tensors = nest.flatten(feed, expand_composites=True)
input_names.extend("%s_component_%d" % (original_input_name, n)
for n in range(len(component_tensors)))
input_tensors.extend(component_tensors)
else:
input_names.append(original_input_name)
input_tensors.append(feed)
fetches = {name: out for name, out in signature_def.outputs.items()}
try:
signature_fn = wrapped.prune(feeds=feeds, fetches=fetches)
except lift_to_graph.UnliftableError as ex:
# Mutate the exception to add a bit more detail.
args = ex.args
if not args:
message = ""
else:
message = args[0]
message = (
("A SavedModel signature needs an input for each placeholder the "
"signature's outputs use. An output for signature '{}' depends on "
"a placeholder which is not an input (i.e. the placeholder is not "
"fed a value).\n\n").format(signature_key)
+ message)
ex.args = (message,) + args[1:]
raise
# pylint: disable=protected-access
signature_fn._arg_keywords = input_names
signature_fn._func_graph.structured_input_signature = (
(),
func_graph.convert_structure_to_signature(
dict(zip(input_names, input_tensors))))
if len(input_names) == 1:
# Allowing positional arguments does not create any ambiguity if there's
# only one.
signature_fn._num_positional_args = 1
else:
signature_fn._num_positional_args = 0
# pylint: enable=protected-access
signature_functions[signature_key] = signature_fn
return signature_functions
def load(self, tags):
"""Creates an object from the MetaGraph identified by `tags`."""
meta_graph_def = self.get_meta_graph_def_from_tags(tags)
load_shared_name_suffix = "_load_{}".format(ops.uid())
functions = function_deserialization.load_function_def_library(
meta_graph_def.graph_def.library,
load_shared_name_suffix=load_shared_name_suffix)
# Replace existing functions in the MetaGraphDef with renamed functions so
# we don't have duplicates or name collisions.
meta_graph_def.graph_def.library.Clear()
for function in functions.values():
meta_graph_def.graph_def.library.function.add().CopyFrom(
function.function_def)
# We've renamed functions and shared names. We need the same operation on
# the GraphDef itself for consistency.
for node_def in meta_graph_def.graph_def.node:
function_deserialization.fix_node_def(node_def, functions,
load_shared_name_suffix)
load_graph_returns = [None]
wrapped = wrap_function.wrap_function(
functools.partial(self.load_graph, load_graph_returns, meta_graph_def),
signature=[])
saver, = load_graph_returns
restore_from_saver = self._extract_saver_restore(wrapped, saver)
self.restore_variables(wrapped, restore_from_saver)
with wrapped.graph.as_default():
init_op = loader_impl.get_init_op(
meta_graph_def) or monitored_session.Scaffold.default_local_init_op()
# Add a dummy Tensor we know we can fetch to add control dependencies to.
init_anchor = constant_op.constant(0., name="dummy_fetch")
root = tracking.AutoTrackable()
if restore_from_saver is not None:
root.restore = (
lambda path: restore_from_saver(constant_op.constant(path)))
asset_feed_tensors = []
asset_paths = []
for tensor_name, value in loader_impl.get_asset_tensors(
self._export_dir, meta_graph_def).items():
asset_feed_tensors.append(wrapped.graph.as_graph_element(tensor_name))
asset_paths.append(tracking.Asset(value))
init_fn = wrapped.prune(
feeds=asset_feed_tensors,
fetches=[init_anchor, wrapped.graph.as_graph_element(init_op)])
initializer = _Initializer(init_fn, asset_paths)
# pylint: disable=protected-access
local_init_op, _ = initializer._initialize()
# pylint: enable=protected-access
with ops.init_scope():
if not context.executing_eagerly():
ops.add_to_collection(ops.GraphKeys.TABLE_INITIALIZERS, local_init_op)
for variable in wrapped.graph.get_collection_ref(
ops.GraphKeys.LOCAL_VARIABLES):
# pylint: disable=protected-access
variable._initializer_op = local_init_op
# pylint: enable=protected-access
root.initializer = initializer
root.asset_paths = asset_paths
signature_functions = self._extract_signatures(wrapped, meta_graph_def)
root.signatures = signature_serialization.create_signature_map(
signature_functions)
root.variables = list(wrapped.graph.variables)
root.tensorflow_version = (
meta_graph_def.meta_info_def.tensorflow_version)
root.tensorflow_git_version = (
meta_graph_def.meta_info_def.tensorflow_git_version)
root.graph = wrapped.graph
root.prune = wrapped.prune
return root
def load(export_dir, tags):
"""Load a v1-style SavedModel as an object."""
metrics.IncrementReadApi(_LOAD_V1_V2_LABEL)
loader = _EagerSavedModelLoader(export_dir)
result = loader.load(tags=tags)
metrics.IncrementRead(write_version="1")
return result
| |
from picshell.engine.core.Inst import Inst
from picshell.engine.core.InstModel import InstModel
# 16F877/876 Formater
class Format:
spAdrReg = {
0x00:'INDF',
0x01:'TMR0',
0x02:'PCL',
0x03:'STATUS',
0x04:'FSR',
0x05:'PORTA',
0x06:'PORTB',
0x07:'PORTC',
0x08:'PORTD',
0x09:'PORTE',
0x0A:'PCLATH',
0x0B:'INTCON',
0x0C:'PIR1',
0x0D:'PIR2',
0x0E:'TMR1L',
0x0F:'TMR1H',
0x10:'T1CON',
0x11:'TMR2',
0x12:'T2CON',
0x13:'SSPBUF',
0x14:'SSPCON',
0x15:'CCPR1L',
0x16:'CCPR1H',
0x17:'CCP1CON',
0x18:'RCSTA',
0x19:'TXREG',
0x1A:'RCREG',
0x1B:'CCPR2L',
0x1C:'CCPR2H',
0x1D:'CCP2CON',
0x1E:'ADRESH',
0x1F:'ADCON0',
0x81:'OPTION_REG',
0x85:'TRISA',
0x86:'TRISB',
0x87:'TRISC',
0x88:'TRISD',
0x89:'TRISE',
0x8C:'PIE1',
0x8D:'PIE2',
0x8E:'PCON',
0x91:'SSPCON2',
0x92:'PR2',
0x93:'SSPADD',
0x94:'SSPSTAT',
0x98:'TXSTA',
0x99:'SPBRG',
0x9E:'ADRESL',
0x9F:'ADCON1'}
spReg = {'INDF':0x00,
'TMR0':0x01,
'PCL':0x02,
'STATUS':0x03,
'FSR':0x04,
'PORTA':0x05,
'PORTB':0x06,
'PORTC':0x07,
'PORTD':0x08,
'PORTE':0x09,
'PCLATH':0x0A,
'INTCON':0x0B,
'PIR1':0x0C,
'PIR2':0x0D,
'TMR1L':0x0E,
'TMR1H':0x0F,
'T1CON':0x10,
'TMR2':0x11,
'T2CON':0x12,
'SSPBUF':0x13,
'SSPCON':0x14,
'CCPR1L':0x15,
'CCPR1H':0x16,
'CCP1CON':0x17,
'RCSTA':0x18,
'TXREG':0x19,
'RCREG':0x1A,
'CCPR2L':0x1B,
'CCPR2H':0x1C,
'CCP2CON':0x1D,
'ADRESH':0x1E,
'ADCON0':0x1F,
'OPTION_REG':0x81,
'TRISA':0x85,
'TRISB':0x86,
'TRISC':0x87,
'TRISD':0x88,
'TRISE':0x89,
'PIE1':0x8C,
'PIE2':0x8D,
'PCON':0x8E,
'SSPCON2':0x91,
'PR2':0x92,
'SSPADD':0x93,
'SSPSTAT':0x94,
'TXSTA':0x98,
'SPBRG':0x99,
'ADRESL':0x9E,
'ADCON1':0x9F}
def dumpInstructionListTillAddress(self,instructionList,address):
for i in range(0,address+1):
inst = instructionList[i]
self.dumpInstruction(inst)
@staticmethod
def dumpInstruction(inst):
print(Format.formatInstruction(inst, 0, False))
@staticmethod
def formatInstruction(inst,level,hex):
adresse = ""
if (hex):
adresse = "0x%04X" % inst.adresse
else:
adresse = "%d" % inst.adresse
length = len(adresse)
for i in range (6,length, -1):
adresse = " " + adresse
res = adresse + " "
res = res + Format.formatInstructionWithoutAddress(inst,level,hex)
return res
@staticmethod
def formatInstructionWithoutAddress(inst,level,hex):
res = ""
for i in range (0,level):
res = res + "| "
res = res + inst.model.mnemonic
res += " "
l = len(res)
for i in range (10,l,-1):
res += " "
if (inst.model.hasData):
if (hex):
if (inst.value > 0xFF):
res = res +"0x%X" % inst.value
else:
res = res +"0x%02X" % inst.value
else:
res = res +"%d" % inst.value
if (InstModel.TYPE_BIT == inst.model.type):
res = res +", %d" % inst.bit
if (InstModel.TYPE_A_BIT == inst.model.type):
res = res +", %d" % inst.bit
if (inst.model.mnemonic != "NOP"):
if (Inst.DEST_F == inst.dest):
res += ", f"
elif (Inst.DEST_W == inst.dest):
res += ", w"
if (InstModel.TYPE_LITERAL_FF_12 == inst.model.type):
ff = inst.dest - Inst.DEST_FSR_0
res += ", %d" % ff
if (Inst.ACCESS_RAM == inst.access):
res += ", ACCESS"
elif (Inst.ACCESS_BSR == inst.access):
res += ", BANKED"
# end data data
l = len (res)
for i in range(30,l,-1):
res = res + " "
return res
@staticmethod
def bin(n):
res = ''
while n != 0: n, res = n >> 1, `n & 1` + res
bin = res
# pad with leading 0
for j in range (8,len(res),-1):
bin = "0" + bin
return bin.replace("L","")
@staticmethod
def binf(n):
res1 = Format.bin(n)
res =""
if len(res1) != 8 :
for i in range(0,len(res1)):
res += res1[i]
else:
for i in range(0,len(res1)):
res += res1[i]
if ((i % 4)==3) and i<(len(res1)-1) :
res += "_"
return res
@staticmethod
def bin10(n):
res = ''
while n != 0: n, res = n >> 1, `n & 1` + res
bin = res
# pad with leading 0
for j in range (10,len(res),-1):
bin = "0" + bin
return bin.replace("L","")
# explain
@staticmethod
def binfx(n,bit):
res1 = Format.bin(n)
res =""
bit = 7-bit
for i in range(0,len(res1)):
if i == int(bit):
res+="["
res += res1[i]
if i == bit:
res+="]"
return res
@staticmethod
def toNumber(str,varAdrMapping=None):
if str == None :
return 0
if str =="" :
return 0
if (str.upper().startswith("0X")):
return int(str,16)
elif (str.upper().startswith("B")):
return int(str[1:],2)
else:
try:
num = int(str)
except :
num = str
#is this a token like PORTA ?
if str.upper() in Format.spReg:
num = Format.spReg[str.upper()]
elif varAdrMapping !=None:
varKey = "v_"+str
if varKey in varAdrMapping :
num = varAdrMapping[varKey]
#print "WARNING : toNumber failed on "+str+" so 0 will be used."
return num
| |
from django import forms
from django.conf import settings
from django.db import models
from django.db.models.fields import related
from django.utils import translation as translation_utils
from django.utils.translation.trans_real import to_language
from .hold import add_translation, make_key, save_translations
from .models import (Translation, PurifiedTranslation, LinkifiedTranslation,
NoLinksTranslation, NoLinksNoMarkupTranslation)
from .utils import to_language as amo_to_language
from .widgets import TransInput, TransTextarea
class TranslatedField(models.ForeignKey):
"""
A foreign key to the translations table.
If require_locale=False, the fallback join will not use a locale. Instead,
we will look for 1) a translation in the current locale and 2) fallback
with any translation matching the foreign key.
"""
to = Translation
def __init__(self, **kwargs):
# to_field: The field on the related object that the relation is to.
# Django wants to default to translations.autoid, but we need id.
options = dict(null=True, to_field='id', unique=True, blank=True,
on_delete=models.SET_NULL)
kwargs.update(options)
self.short = kwargs.pop('short', True)
self.require_locale = kwargs.pop('require_locale', True)
super(TranslatedField, self).__init__(self.to, **kwargs)
@property
def db_column(self):
# Django wants to call the db_column ('%s_id' % self.name), but our
# translations foreign keys aren't set up that way.
return self._db_column if hasattr(self, '_db_column') else self.name
@db_column.setter
def db_column(self, value):
# Django sets db_column=None to initialize it. I don't think anyone
# would set the db_column otherwise.
if value is not None:
self._db_column = value
def contribute_to_class(self, cls, name):
"""Add this Translation to ``cls._meta.translated_fields``."""
super(TranslatedField, self).contribute_to_class(cls, name)
# Add self to the list of translated fields.
if hasattr(cls._meta, 'translated_fields'):
cls._meta.translated_fields.append(self)
else:
cls._meta.translated_fields = [self]
# Set up a unique related name. The + means it's hidden.
self.rel.related_name = '%s_%s_set+' % (cls.__name__, name)
# Replace the normal descriptor with our custom descriptor.
setattr(cls, self.name, TranslationDescriptor(self))
def formfield(self, **kw):
widget = TransInput if self.short else TransTextarea
defaults = {'form_class': TransField, 'widget': widget}
defaults.update(kw)
return super(TranslatedField, self).formfield(**defaults)
def validate(self, value, model_instance):
# Skip ForeignKey.validate since that expects only one Translation when
# doing .get(id=id)
return models.Field.validate(self, value, model_instance)
class PurifiedField(TranslatedField):
to = PurifiedTranslation
class LinkifiedField(TranslatedField):
to = LinkifiedTranslation
class NoLinksField(TranslatedField):
to = NoLinksTranslation
class NoLinksNoMarkupField(TranslatedField):
to = NoLinksNoMarkupTranslation
def switch(obj, new_model):
"""Switch between Translation and Purified/Linkified Translations."""
fields = [(f.name, getattr(obj, f.name)) for f in new_model._meta.fields]
return new_model(**dict(fields))
def save_on_signal(obj, trans):
"""Connect signals so the translation gets saved during obj.save()."""
signal = models.signals.pre_save
def cb(sender, instance, **kw):
if instance is obj:
is_new = trans.autoid is None
trans.save(force_insert=is_new, force_update=not is_new)
signal.disconnect(cb)
signal.connect(cb, sender=obj.__class__, weak=False)
class TranslationDescriptor(related.ReverseSingleRelatedObjectDescriptor):
"""
Descriptor that handles creating and updating Translations given strings.
"""
def __init__(self, field):
super(TranslationDescriptor, self).__init__(field)
self.model = field.rel.to
def __get__(self, instance, instance_type=None):
if instance is None:
return self
# If Django doesn't find find the value in the cache (which would only
# happen if the field was set or accessed already), it does a db query
# to follow the foreign key. We expect translations to be set by
# queryset transforms, so doing a query is the wrong thing here.
try:
return getattr(instance, self.field.get_cache_name())
except AttributeError:
return None
def __set__(self, instance, value):
lang = translation_utils.get_language()
if isinstance(value, basestring):
value = self.translation_from_string(instance, lang, value)
elif hasattr(value, 'items'):
value = self.translation_from_dict(instance, lang, value)
# Don't let this be set to None, because Django will then blank out the
# foreign key for this object. That's incorrect for translations.
if value is not None:
# We always get these back from the database as Translations, but
# we may want them to be a more specific Purified/Linkified child
# class.
if not isinstance(value, self.model):
value = switch(value, self.model)
super(TranslationDescriptor, self).__set__(instance, value)
elif getattr(instance, self.field.attname, None) is None:
super(TranslationDescriptor, self).__set__(instance, None)
def translation_from_string(self, instance, lang, string):
"""Create, save, and return a Translation from a string."""
try:
trans = getattr(instance, self.field.name)
trans_id = getattr(instance, self.field.attname)
if trans is None and trans_id is not None:
# This locale doesn't have a translation set, but there are
# translations in another locale, so we have an id already.
translation = self.model.new(string, lang, id=trans_id)
elif to_language(trans.locale) == lang.lower():
# Replace the translation in the current language.
trans.localized_string = string
translation = trans
else:
# We already have a translation in a different language.
translation = self.model.new(string, lang, id=trans.id)
except AttributeError:
# Create a brand new translation.
translation = self.model.new(string, lang)
# A new translation has been created and it might need to be saved.
# This adds the translation to the queue of translation that need
# to be saved for this instance.
add_translation(make_key(instance), translation)
return translation
def translation_from_dict(self, instance, lang, dict_):
"""
Create Translations from a {'locale': 'string'} mapping.
If one of the locales matches lang, that Translation will be returned.
"""
rv = None
for locale, string in dict_.items():
loc = amo_to_language(locale)
if loc not in settings.AMO_LANGUAGES + settings.HIDDEN_LANGUAGES:
continue
# The Translation is created and saved in here.
trans = self.translation_from_string(instance, locale, string)
# Set the Translation on the object because translation_from_string
# doesn't expect Translations to be created but not attached.
self.__set__(instance, trans)
# If we're setting the current locale, set it to the object so
# callers see the expected effect.
if to_language(locale) == lang:
rv = trans
return rv
class _TransField(object):
def __init__(self, *args, **kwargs):
self.default_locale = settings.LANGUAGE_CODE
for k in ('queryset', 'to_field_name'):
if k in kwargs:
del kwargs[k]
self.widget = kwargs.pop('widget', TransInput)
super(_TransField, self).__init__(*args, **kwargs)
def clean(self, value):
errors = LocaleList()
value = dict((k, v.strip() if v else v) for (k, v) in value.items())
# Raise an exception if the default locale is required and not present
if self.default_locale.lower() not in value:
value[self.default_locale.lower()] = None
# Now, loop through them and validate them separately.
for locale, val in value.items():
try:
# Only the default locale can be required; all non-default
# fields are automatically optional.
if self.default_locale.lower() == locale:
super(_TransField, self).validate(val)
super(_TransField, self).run_validators(val)
except forms.ValidationError, e:
errors.extend(e.messages, locale)
if errors:
raise LocaleValidationError(errors)
return value
def _has_changed(self, initial, data):
# This used to be called on the field's widget and always returned False!
return False
class LocaleValidationError(forms.ValidationError):
def __init__(self, messages, code=None, params=None):
self.msgs = messages
@property
def messages(self):
return self.msgs
class TransField(_TransField, forms.CharField):
"""
A CharField subclass that can deal with multiple locales.
Most validators are run over the data for each locale. The required
validator is only run on the default_locale, which is hooked up to the
instance with TranslationFormMixin.
"""
@staticmethod
def adapt(cls, opts={}):
"""Get a new TransField that subclasses cls instead of CharField."""
return type('Trans%s' % cls.__name__, (_TransField, cls), opts)
# Subclass list so that isinstance(list) in Django works.
class LocaleList(dict):
"""
List-like objects that maps list elements to a locale.
>>> LocaleList([1, 2], 'en')
[1, 2]
['en', 'en']
This is useful for validation error lists where we want to associate an
error with a locale.
"""
def __init__(self, seq=None, locale=None):
self.seq, self.locales = [], []
if seq:
assert seq and locale
self.extend(seq, locale)
def __iter__(self):
return iter(self.zip())
def extend(self, seq, locale):
self.seq.extend(seq)
self.locales.extend([locale] * len(seq))
def __nonzero__(self):
return bool(self.seq)
def __contains__(self, item):
return item in self.seq
def zip(self):
return zip(self.locales, self.seq)
def save_signal(sender, instance, **kw):
"""
Use this signal on a model to iterate through all the translations added
to the hold queue and save them all. Hook this up to the pre_save signal
on the model.
"""
if not kw.get('raw'):
save_translations(make_key(instance))
| |
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import netaddr
from neutron_lib import context as nctx
from neutron_lib.db import api as db_api
from neutron_lib.plugins import constants
from neutron_lib.plugins import directory
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_utils import uuidutils
from sqlalchemy.orm import session as se
from webob import exc
from neutron.db import models_v2
from neutron.objects import ports as port_obj
from neutron.tests.unit.plugins.ml2 import test_plugin
class TestRevisionPlugin(test_plugin.Ml2PluginV2TestCase):
l3_plugin = ('neutron.tests.unit.extensions.test_extraroute.'
'TestExtraRouteL3NatServicePlugin')
_extension_drivers = ['qos']
def get_additional_service_plugins(self):
p = super(TestRevisionPlugin, self).get_additional_service_plugins()
p.update({'revision_plugin_name': 'revisions',
'qos_plugin_name': 'qos',
'tag_name': 'tag'})
return p
def setUp(self):
cfg.CONF.set_override('extension_drivers',
self._extension_drivers,
group='ml2')
super(TestRevisionPlugin, self).setUp()
self.cp = directory.get_plugin()
self.l3p = directory.get_plugin(constants.L3)
self._ctx = nctx.get_admin_context()
@property
def ctx(self):
# TODO(kevinbenton): return ctx without expire_all after switch to
# enginefacade complete. We expire_all here because the switch to
# the new engine facade is resulting in changes being spread over
# other sessions so we can end up getting stale reads in the parent
# session if objects remain in the identity map.
if not self._ctx.session.is_active:
self._ctx.session.expire_all()
return self._ctx
def test_handle_expired_object(self):
rp = directory.get_plugin('revision_plugin')
with self.port():
with self.ctx.session.begin():
ipal_objs = port_obj.IPAllocation.get_objects(self.ctx)
if not ipal_objs:
raise Exception("No IP allocations available.")
ipal_obj = ipal_objs[0]
# load port into our session
port = self.ctx.session.query(models_v2.Port).one()
# simulate concurrent delete in another session
other_ctx = nctx.get_admin_context()
other_ctx.session.delete(
other_ctx.session.query(models_v2.Port).first()
)
# expire the port so the revision bumping code will trigger a
# lookup on its attributes and encounter an ObjectDeletedError
self.ctx.session.expire(port)
rp._bump_related_revisions(self.ctx.session, ipal_obj)
def test_port_name_update_revises(self):
with self.port() as port:
rev = port['port']['revision_number']
new = {'port': {'name': 'seaweed'}}
response = self._update('ports', port['port']['id'], new)
new_rev = response['port']['revision_number']
self.assertGreater(new_rev, rev)
def test_constrained_port_update(self):
with self.port() as port:
rev = port['port']['revision_number']
new = {'port': {'name': 'nigiri'}}
for val in (rev - 1, rev + 1):
# make sure off-by ones are rejected
self._update('ports', port['port']['id'], new,
headers={'If-Match': 'revision_number=%s' % val},
expected_code=exc.HTTPPreconditionFailed.code)
after_attempt = self._show('ports', port['port']['id'])
self.assertEqual(rev, after_attempt['port']['revision_number'])
self.assertEqual(port['port']['name'],
after_attempt['port']['name'])
# correct revision should work
self._update('ports', port['port']['id'], new,
headers={'If-Match': 'revision_number=%s' % rev})
def test_constrained_port_delete(self):
with self.port() as port:
rev = port['port']['revision_number']
for val in (rev - 1, rev + 1):
# make sure off-by ones are rejected
self._delete('ports', port['port']['id'],
headers={'If-Match': 'revision_number=%s' % val},
expected_code=exc.HTTPPreconditionFailed.code)
# correct revision should work
self._delete('ports', port['port']['id'],
headers={'If-Match': 'revision_number=%s' % rev})
def test_constrained_port_update_handles_db_retries(self):
# here we ensure all of the constraint handling logic persists
# on retriable failures to commit caused by races with another
# update
with self.port() as port:
rev = port['port']['revision_number']
new = {'port': {'name': 'nigiri'}}
def concurrent_increment(s):
db_api.sqla_remove(se.Session, 'before_commit',
concurrent_increment)
# slip in a concurrent update that will bump the revision
plugin = directory.get_plugin()
plugin.update_port(nctx.get_admin_context(),
port['port']['id'], new)
raise db_exc.DBDeadlock()
db_api.sqla_listen(se.Session, 'before_commit',
concurrent_increment)
self._update('ports', port['port']['id'], new,
headers={'If-Match': 'revision_number=%s' % rev},
expected_code=exc.HTTPPreconditionFailed.code)
def test_port_ip_update_revises(self):
with self.port() as port:
rev = port['port']['revision_number']
new = {'port': {'fixed_ips': port['port']['fixed_ips']}}
# ensure adding an IP allocation updates the port
next_ip = str(netaddr.IPAddress(
new['port']['fixed_ips'][0]['ip_address']) + 1)
new['port']['fixed_ips'].append({'ip_address': next_ip})
response = self._update('ports', port['port']['id'], new)
self.assertEqual(2, len(response['port']['fixed_ips']))
new_rev = response['port']['revision_number']
self.assertGreater(new_rev, rev)
# ensure deleting an IP allocation updates the port
rev = new_rev
new['port']['fixed_ips'].pop()
response = self._update('ports', port['port']['id'], new)
self.assertEqual(1, len(response['port']['fixed_ips']))
new_rev = response['port']['revision_number']
self.assertGreater(new_rev, rev)
def test_security_group_rule_ops_bump_security_group(self):
s = {'security_group': {'tenant_id': 'some_tenant', 'name': '',
'description': 's'}}
sg = self.cp.create_security_group(self.ctx, s)
s['security_group']['name'] = 'hello'
updated = self.cp.update_security_group(self.ctx, sg['id'], s)
self.assertGreater(updated['revision_number'], sg['revision_number'])
# ensure rule changes bump parent SG
r = {'security_group_rule': {'tenant_id': 'some_tenant',
'port_range_min': 80, 'protocol': 6,
'port_range_max': 90,
'remote_ip_prefix': '0.0.0.0/0',
'ethertype': 'IPv4',
'remote_group_id': None,
'direction': 'ingress',
'security_group_id': sg['id']}}
rule = self.cp.create_security_group_rule(self.ctx, r)
sg = updated
updated = self.cp.get_security_group(self.ctx, sg['id'])
self.assertGreater(updated['revision_number'], sg['revision_number'])
self.cp.delete_security_group_rule(self.ctx, rule['id'])
sg = updated
updated = self.cp.get_security_group(self.ctx, sg['id'])
self.assertGreater(updated['revision_number'], sg['revision_number'])
def test_router_interface_ops_bump_router(self):
r = {'router': {'name': 'myrouter', 'tenant_id': 'some_tenant',
'admin_state_up': True}}
router = self.l3p.create_router(self.ctx, r)
r['router']['name'] = 'yourrouter'
updated = self.l3p.update_router(self.ctx, router['id'], r)
self.assertGreater(updated['revision_number'],
router['revision_number'])
# add an intf and make sure it bumps rev
with self.subnet(tenant_id='some_tenant', cidr='10.0.1.0/24') as s:
interface_info = {'subnet_id': s['subnet']['id']}
self.l3p.add_router_interface(self.ctx, router['id'],
interface_info)
router = updated
updated = self.l3p.get_router(self.ctx, router['id'])
self.assertGreater(updated['revision_number'],
router['revision_number'])
# Add a route and make sure it bumps revision number
router = updated
body = {'router': {'routes': [{'destination': '192.168.2.0/24',
'nexthop': '10.0.1.3'}]}}
self.l3p.update_router(self.ctx, router['id'], body)
updated = self.l3p.get_router(self.ctx, router['id'])
self.assertGreater(updated['revision_number'],
router['revision_number'])
router = updated
body['router']['routes'] = []
self.l3p.update_router(self.ctx, router['id'], body)
updated = self.l3p.get_router(self.ctx, router['id'])
self.assertGreater(updated['revision_number'],
router['revision_number'])
self.l3p.remove_router_interface(self.ctx, router['id'],
interface_info)
router = updated
updated = self.l3p.get_router(self.ctx, router['id'])
self.assertGreater(updated['revision_number'],
router['revision_number'])
def test_qos_policy_bump_port_revision(self):
with self.port() as port:
rev = port['port']['revision_number']
qos_plugin = directory.get_plugin('QOS')
qos_policy = {'policy': {'id': uuidutils.generate_uuid(),
'name': "policy1",
'project_id': uuidutils.generate_uuid()}}
qos_obj = qos_plugin.create_policy(self.ctx, qos_policy)
data = {'port': {'qos_policy_id': qos_obj['id']}}
response = self._update('ports', port['port']['id'], data)
new_rev = response['port']['revision_number']
self.assertGreater(new_rev, rev)
def test_qos_policy_bump_network_revision(self):
with self.network() as network:
rev = network['network']['revision_number']
qos_plugin = directory.get_plugin('QOS')
qos_policy = {'policy': {'id': uuidutils.generate_uuid(),
'name': "policy1",
'project_id': uuidutils.generate_uuid()}}
qos_obj = qos_plugin.create_policy(self.ctx, qos_policy)
data = {'network': {'qos_policy_id': qos_obj['id']}}
response = self._update('networks', network['network']['id'], data)
new_rev = response['network']['revision_number']
self.assertGreater(new_rev, rev)
def test_net_tag_bumps_net_revision(self):
with self.network() as network:
rev = network['network']['revision_number']
tag_plugin = directory.get_plugin('TAG')
tag_plugin.update_tag(self.ctx, 'networks',
network['network']['id'], 'mytag')
updated = directory.get_plugin().get_network(
self.ctx, network['network']['id'])
self.assertGreater(updated['revision_number'], rev)
tag_plugin.delete_tag(self.ctx, 'networks',
network['network']['id'], 'mytag')
rev = updated['revision_number']
updated = directory.get_plugin().get_network(
self.ctx, network['network']['id'])
self.assertGreater(updated['revision_number'], rev)
| |
import re
import logging
from collections import OrderedDict
from builtins import str
import vertica_python.errors as errors
import vertica_python.vertica.messages as messages
from vertica_python.vertica.column import Column
logger = logging.getLogger('vertica')
class Cursor(object):
def __init__(self, connection, cursor_type=None, unicode_error='strict'):
self.connection = connection
self.cursor_type = cursor_type
self.unicode_error = unicode_error
self._closed = False
self._message = None
self.error = None
#
# dbApi properties
#
self.description = None
self.rowcount = -1
self.arraysize = 1
def __enter__(self):
return self
def __exit__(self, type_, value, traceback):
self.close()
#
# dbApi methods
#
def callproc(self, procname, parameters=None):
raise errors.NotSupportedError('Cursor.callproc() is not implemented')
def close(self):
self._closed = True
def execute(self, operation, parameters=None):
if self.closed():
raise errors.Error('Cursor is closed')
self.flush_to_query_ready()
if parameters:
# # optional requirement
from psycopg2.extensions import adapt
if isinstance(parameters, dict):
for key in parameters:
param = parameters[key]
# Make sure adapt() behaves properly
if isinstance(param, str):
v = adapt(param.encode('utf8')).getquoted()
else:
v = adapt(param).getquoted()
# Using a regex with word boundary to correctly handle params with similar names
# such as :s and :start
match_str = u':%s\\b' % str(key)
operation = re.sub(match_str, v.decode('utf-8'), operation, flags=re.UNICODE)
elif isinstance(parameters, tuple):
tlist = []
for p in parameters:
if isinstance(p, str):
tlist.append(adapt(p.encode('utf8')).getquoted())
else:
tlist.append(adapt(p).getquoted())
operation = operation % tuple(tlist)
else:
raise errors.Error("Argument 'parameters' must be dict or tuple")
self.rowcount = -1
self.connection.write(messages.Query(operation))
# read messages until we hit an Error, DataRow or ReadyForQuery
while True:
message = self.connection.read_message()
# save the message because there's no way to undo the read
self._message = message
if isinstance(message, messages.ErrorResponse):
raise errors.QueryError.from_error_response(message, operation)
elif isinstance(message, messages.RowDescription):
self.description = list(map(lambda fd: Column(fd, self.unicode_error), message.fields))
elif isinstance(message, messages.DataRow):
break
elif isinstance(message, messages.ReadyForQuery):
break
else:
self.connection.process_message(message)
def fetchone(self):
if isinstance(self._message, messages.DataRow):
if self.rowcount == -1:
self.rowcount = 1
else:
self.rowcount += 1
row = self.row_formatter(self._message)
# fetch next message
self._message = self.connection.read_message()
return row
elif isinstance(self._message, messages.ReadyForQuery):
return None
elif isinstance(self._message, messages.CommandComplete):
return None
else:
self.connection.process_message(self._message)
def iterate(self):
row = self.fetchone()
while row:
yield row
row = self.fetchone()
def fetchmany(self, size=None):
if not size:
size = self.arraysize
results = []
while True:
row = self.fetchone()
if not row:
break
results.append(row)
if len(results) >= size:
break
return results
def fetchall(self):
return list(self.iterate())
def nextset(self):
# skip any data for this set if exists
self.flush_to_command_complete()
if self._message is None:
return None
elif isinstance(self._message, messages.CommandComplete):
# there might be another set, read next message to find out
self._message = self.connection.read_message()
if isinstance(self._message, messages.RowDescription):
# next row will be either a DataRow or CommandComplete
self._message = self.connection.read_message()
return True
elif isinstance(self._message, messages.ReadyForQuery):
return None
else:
raise errors.Error('Unexpected nextset() state after CommandComplete: ' + str(self._message))
elif isinstance(self._message, messages.ReadyForQuery):
# no more sets left to be read
return None
else:
raise errors.Error('Unexpected nextset() state: ' + str(self._message))
def setinputsizes(self):
pass
def setoutputsize(self, size, column=None):
pass
#
# Non dbApi methods
#
def flush_to_query_ready(self):
# if the last message isnt empty or ReadyForQuery, read all remaining messages
if(self._message is None
or isinstance(self._message, messages.ReadyForQuery)):
return
while True:
message = self.connection.read_message()
if isinstance(message, messages.ReadyForQuery):
self.connection.transaction_status = message.transaction_status
self._message = message
break
def flush_to_command_complete(self):
# if the last message isnt empty or CommandComplete, read messages until it is
if(self._message is None
or isinstance(self._message, messages.ReadyForQuery)
or isinstance(self._message, messages.CommandComplete)):
return
while True:
message = self.connection.read_message()
if isinstance(message, messages.CommandComplete):
self._message = message
break
# example:
#
# with open("/tmp/file.csv", "rb") as fs:
# cursor.copy("COPY table(field1,field2) FROM STDIN DELIMITER ',' ENCLOSED BY '\"'", fs, buffer_size=65536)
#
def copy(self, sql, data, **kwargs):
if self.closed():
raise errors.Error('Cursor is closed')
self.flush_to_query_ready()
self.connection.write(messages.Query(sql))
while True:
message = self.connection.read_message()
if isinstance(message, messages.ErrorResponse):
raise errors.QueryError.from_error_response(message, sql)
self.connection.process_message(message=message)
if isinstance(message, messages.ReadyForQuery):
break
elif isinstance(message, messages.CopyInResponse):
#write stuff
if not hasattr(data, "read"):
self.connection.write(messages.CopyData(data))
else:
# treat data as stream
self.connection.write(messages.CopyStream(data, **kwargs))
self.connection.write(messages.CopyDone())
if self.error is not None:
raise self.error
#
# Internal
#
def closed(self):
return self._closed or self.connection.closed()
def row_formatter(self, row_data):
if self.cursor_type is None:
return self.format_row_as_array(row_data)
elif self.cursor_type in (list, 'list'):
return self.format_row_as_array(row_data)
elif self.cursor_type in (dict, 'dict'):
return self.format_row_as_dict(row_data)
else:
raise Exception('Unrecognized cursor_type: %r' % self.cursor_type)
def format_row_as_dict(self, row_data):
return OrderedDict(
(self.description[idx].name, self.description[idx].convert(value))
for idx, value in enumerate(row_data.values)
)
def format_row_as_array(self, row_data):
return [self.description[idx].convert(value)
for idx, value in enumerate(row_data.values)]
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class MonitorsOperations(object):
"""MonitorsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~microsoft_datadog_client.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_api_keys(
self,
resource_group_name, # type: str
monitor_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.DatadogApiKeyListResponse"]
"""List the api keys for a given monitor resource.
List the api keys for a given monitor resource.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param monitor_name: Monitor resource name.
:type monitor_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DatadogApiKeyListResponse or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~microsoft_datadog_client.models.DatadogApiKeyListResponse]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DatadogApiKeyListResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_api_keys.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'monitorName': self._serialize.url("monitor_name", monitor_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.post(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('DatadogApiKeyListResponse', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_api_keys.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Datadog/monitors/{monitorName}/listApiKeys'} # type: ignore
def get_default_key(
self,
resource_group_name, # type: str
monitor_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.DatadogApiKey"
"""Get the default api key.
Get the default api key.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param monitor_name: Monitor resource name.
:type monitor_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DatadogApiKey, or the result of cls(response)
:rtype: ~microsoft_datadog_client.models.DatadogApiKey
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DatadogApiKey"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-01"
accept = "application/json"
# Construct URL
url = self.get_default_key.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'monitorName': self._serialize.url("monitor_name", monitor_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DatadogApiKey', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_default_key.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Datadog/monitors/{monitorName}/getDefaultKey'} # type: ignore
def set_default_key(
self,
resource_group_name, # type: str
monitor_name, # type: str
body=None, # type: Optional["_models.DatadogApiKey"]
**kwargs # type: Any
):
# type: (...) -> None
"""Set the default api key.
Set the default api key.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param monitor_name: Monitor resource name.
:type monitor_name: str
:param body:
:type body: ~microsoft_datadog_client.models.DatadogApiKey
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.set_default_key.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'monitorName': self._serialize.url("monitor_name", monitor_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
if body is not None:
body_content = self._serialize.body(body, 'DatadogApiKey')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
set_default_key.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Datadog/monitors/{monitorName}/setDefaultKey'} # type: ignore
def list_hosts(
self,
resource_group_name, # type: str
monitor_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.DatadogHostListResponse"]
"""List the hosts for a given monitor resource.
List the hosts for a given monitor resource.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param monitor_name: Monitor resource name.
:type monitor_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DatadogHostListResponse or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~microsoft_datadog_client.models.DatadogHostListResponse]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DatadogHostListResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_hosts.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'monitorName': self._serialize.url("monitor_name", monitor_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.post(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('DatadogHostListResponse', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_hosts.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Datadog/monitors/{monitorName}/listHosts'} # type: ignore
def list_linked_resources(
self,
resource_group_name, # type: str
monitor_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.LinkedResourceListResponse"]
"""List all Azure resources associated to the same Datadog organization as the target resource.
List all Azure resources associated to the same Datadog organization as the target resource.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param monitor_name: Monitor resource name.
:type monitor_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either LinkedResourceListResponse or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~microsoft_datadog_client.models.LinkedResourceListResponse]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LinkedResourceListResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_linked_resources.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'monitorName': self._serialize.url("monitor_name", monitor_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.post(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('LinkedResourceListResponse', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_linked_resources.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Datadog/monitors/{monitorName}/listLinkedResources'} # type: ignore
def list_monitored_resources(
self,
resource_group_name, # type: str
monitor_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.MonitoredResourceListResponse"]
"""List the resources currently being monitored by the Datadog monitor resource.
List the resources currently being monitored by the Datadog monitor resource.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param monitor_name: Monitor resource name.
:type monitor_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either MonitoredResourceListResponse or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~microsoft_datadog_client.models.MonitoredResourceListResponse]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.MonitoredResourceListResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_monitored_resources.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'monitorName': self._serialize.url("monitor_name", monitor_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.post(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('MonitoredResourceListResponse', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_monitored_resources.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Datadog/monitors/{monitorName}/listMonitoredResources'} # type: ignore
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.DatadogMonitorResourceListResponse"]
"""List all monitors under the specified subscription.
List all monitors under the specified subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DatadogMonitorResourceListResponse or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~microsoft_datadog_client.models.DatadogMonitorResourceListResponse]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DatadogMonitorResourceListResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('DatadogMonitorResourceListResponse', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Datadog/monitors'} # type: ignore
def list_by_resource_group(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.DatadogMonitorResourceListResponse"]
"""List all monitors under the specified resource group.
List all monitors under the specified resource group.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DatadogMonitorResourceListResponse or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~microsoft_datadog_client.models.DatadogMonitorResourceListResponse]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DatadogMonitorResourceListResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('DatadogMonitorResourceListResponse', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Datadog/monitors'} # type: ignore
def get(
self,
resource_group_name, # type: str
monitor_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.DatadogMonitorResource"
"""Get the properties of a specific monitor resource.
Get the properties of a specific monitor resource.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param monitor_name: Monitor resource name.
:type monitor_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DatadogMonitorResource, or the result of cls(response)
:rtype: ~microsoft_datadog_client.models.DatadogMonitorResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DatadogMonitorResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'monitorName': self._serialize.url("monitor_name", monitor_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DatadogMonitorResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Datadog/monitors/{monitorName}'} # type: ignore
def _create_initial(
self,
resource_group_name, # type: str
monitor_name, # type: str
body=None, # type: Optional["_models.DatadogMonitorResource"]
**kwargs # type: Any
):
# type: (...) -> "_models.DatadogMonitorResource"
cls = kwargs.pop('cls', None) # type: ClsType["_models.DatadogMonitorResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'monitorName': self._serialize.url("monitor_name", monitor_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
if body is not None:
body_content = self._serialize.body(body, 'DatadogMonitorResource')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DatadogMonitorResource', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('DatadogMonitorResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Datadog/monitors/{monitorName}'} # type: ignore
def begin_create(
self,
resource_group_name, # type: str
monitor_name, # type: str
body=None, # type: Optional["_models.DatadogMonitorResource"]
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.DatadogMonitorResource"]
"""Create a monitor resource.
Create a monitor resource.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param monitor_name: Monitor resource name.
:type monitor_name: str
:param body:
:type body: ~microsoft_datadog_client.models.DatadogMonitorResource
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either DatadogMonitorResource or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~microsoft_datadog_client.models.DatadogMonitorResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.DatadogMonitorResource"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_initial(
resource_group_name=resource_group_name,
monitor_name=monitor_name,
body=body,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('DatadogMonitorResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'monitorName': self._serialize.url("monitor_name", monitor_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Datadog/monitors/{monitorName}'} # type: ignore
def _update_initial(
self,
resource_group_name, # type: str
monitor_name, # type: str
body=None, # type: Optional["_models.DatadogMonitorResourceUpdateParameters"]
**kwargs # type: Any
):
# type: (...) -> "_models.DatadogMonitorResource"
cls = kwargs.pop('cls', None) # type: ClsType["_models.DatadogMonitorResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'monitorName': self._serialize.url("monitor_name", monitor_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
if body is not None:
body_content = self._serialize.body(body, 'DatadogMonitorResourceUpdateParameters')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DatadogMonitorResource', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('DatadogMonitorResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Datadog/monitors/{monitorName}'} # type: ignore
def begin_update(
self,
resource_group_name, # type: str
monitor_name, # type: str
body=None, # type: Optional["_models.DatadogMonitorResourceUpdateParameters"]
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.DatadogMonitorResource"]
"""Update a monitor resource.
Update a monitor resource.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param monitor_name: Monitor resource name.
:type monitor_name: str
:param body:
:type body: ~microsoft_datadog_client.models.DatadogMonitorResourceUpdateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either DatadogMonitorResource or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~microsoft_datadog_client.models.DatadogMonitorResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.DatadogMonitorResource"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_initial(
resource_group_name=resource_group_name,
monitor_name=monitor_name,
body=body,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('DatadogMonitorResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'monitorName': self._serialize.url("monitor_name", monitor_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Datadog/monitors/{monitorName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
monitor_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'monitorName': self._serialize.url("monitor_name", monitor_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Datadog/monitors/{monitorName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
monitor_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Delete a monitor resource.
Delete a monitor resource.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param monitor_name: Monitor resource name.
:type monitor_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
monitor_name=monitor_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'monitorName': self._serialize.url("monitor_name", monitor_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Datadog/monitors/{monitorName}'} # type: ignore
def refresh_set_password_link(
self,
resource_group_name, # type: str
monitor_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.DatadogSetPasswordLink"
"""Refresh the set password link and return a latest one.
Refresh the set password link and return a latest one.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param monitor_name: Monitor resource name.
:type monitor_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DatadogSetPasswordLink, or the result of cls(response)
:rtype: ~microsoft_datadog_client.models.DatadogSetPasswordLink
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DatadogSetPasswordLink"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-01"
accept = "application/json"
# Construct URL
url = self.refresh_set_password_link.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'monitorName': self._serialize.url("monitor_name", monitor_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DatadogSetPasswordLink', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
refresh_set_password_link.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Datadog/monitors/{monitorName}/refreshSetPasswordLink'} # type: ignore
| |
# Copyright (c) 2017 Ansible by Red Hat
# All Rights Reserved.
import sys
import time
import json
import random
from django.utils import timezone
from django.core.management.base import BaseCommand
from awx.main.models import (
UnifiedJob,
Job,
AdHocCommand,
ProjectUpdate,
InventoryUpdate,
SystemJob
)
from awx.main.consumers import emit_channel_notification
from awx.api.serializers import (
JobEventWebSocketSerializer,
AdHocCommandEventWebSocketSerializer,
ProjectUpdateEventWebSocketSerializer,
InventoryUpdateEventWebSocketSerializer,
SystemJobEventWebSocketSerializer
)
class JobStatusLifeCycle():
def emit_job_status(self, job, status):
# {"status": "successful", "project_id": 13, "unified_job_id": 659, "group_name": "jobs"}
job.websocket_emit_status(status)
def determine_job_event_finish_status_index(self, job_event_count, random_seed):
if random_seed == 0:
return job_event_count - 1
random.seed(random_seed)
job_event_index = random.randint(0, job_event_count - 1)
return job_event_index
class ReplayJobEvents(JobStatusLifeCycle):
recording_start = None
replay_start = None
def now(self):
return timezone.now()
def start(self, first_event_created):
self.recording_start = first_event_created
self.replay_start = self.now()
def lateness(self, now, created):
time_passed = now - self.recording_start
job_event_time = created - self.replay_start
return (time_passed - job_event_time).total_seconds()
def get_job(self, job_id):
try:
unified_job = UnifiedJob.objects.get(id=job_id)
except UnifiedJob.DoesNotExist:
print("UnifiedJob {} not found.".format(job_id))
sys.exit(1)
return unified_job.get_real_instance()
def sleep(self, seconds):
time.sleep(seconds)
def replay_elapsed(self):
return (self.now() - self.replay_start)
def recording_elapsed(self, created):
return (created - self.recording_start)
def replay_offset(self, created, speed):
return self.replay_elapsed().total_seconds() - (self.recording_elapsed(created).total_seconds() * (1.0 / speed))
def get_job_events(self, job):
if type(job) is Job:
job_events = job.job_events.order_by('created')
elif type(job) is AdHocCommand:
job_events = job.ad_hoc_command_events.order_by('created')
elif type(job) is ProjectUpdate:
job_events = job.project_update_events.order_by('created')
elif type(job) is InventoryUpdate:
job_events = job.inventory_update_events.order_by('created')
elif type(job) is SystemJob:
job_events = job.system_job_events.order_by('created')
count = job_events.count()
if count == 0:
raise RuntimeError("No events for job id {}".format(job.id))
return job_events, count
def get_serializer(self, job):
if type(job) is Job:
return JobEventWebSocketSerializer
elif type(job) is AdHocCommand:
return AdHocCommandEventWebSocketSerializer
elif type(job) is ProjectUpdate:
return ProjectUpdateEventWebSocketSerializer
elif type(job) is InventoryUpdate:
return InventoryUpdateEventWebSocketSerializer
elif type(job) is SystemJob:
return SystemJobEventWebSocketSerializer
else:
raise RuntimeError("Job is of type {} and replay is not yet supported.".format(type(job)))
sys.exit(1)
def run(self, job_id, speed=1.0, verbosity=0, skip_range=[], random_seed=0, final_status_delay=0, debug=False):
stats = {
'events_ontime': {
'total': 0,
'percentage': 0,
},
'events_late': {
'total': 0,
'percentage': 0,
'lateness_total': 0,
'lateness_average': 0,
},
'events_total': 0,
'events_distance_total': 0,
'events_distance_average': 0,
'recording_start': 0,
'recording_end': 0,
'recording_duration': 0,
'replay_start': 0,
'replay_end': 0,
'replay_duration': 0,
}
try:
job = self.get_job(job_id)
job_events, job_event_count = self.get_job_events(job)
serializer = self.get_serializer(job)
except RuntimeError as e:
print("{}".format(e.message))
sys.exit(1)
je_previous = None
self.emit_job_status(job, 'pending')
self.emit_job_status(job, 'waiting')
self.emit_job_status(job, 'running')
finish_status_index = self.determine_job_event_finish_status_index(job_event_count, random_seed)
for n, je_current in enumerate(job_events):
if je_current.counter in skip_range:
continue
if debug:
raw_input("{} of {}:".format(n, job_event_count))
if not je_previous:
stats['recording_start'] = je_current.created
self.start(je_current.created)
stats['replay_start'] = self.replay_start
je_previous = je_current
je_serialized = serializer(je_current).data
emit_channel_notification('{}-{}'.format(je_serialized['group_name'], job.id), je_serialized)
replay_offset = self.replay_offset(je_previous.created, speed)
recording_diff = (je_current.created - je_previous.created).total_seconds() * (1.0 / speed)
stats['events_distance_total'] += recording_diff
if verbosity >= 3:
print("recording: next job in {} seconds".format(recording_diff))
if replay_offset >= 0:
replay_diff = recording_diff - replay_offset
if replay_diff > 0:
stats['events_ontime']['total'] += 1
if verbosity >= 3:
print("\treplay: sleep for {} seconds".format(replay_diff))
self.sleep(replay_diff)
else:
stats['events_late']['total'] += 1
stats['events_late']['lateness_total'] += (replay_diff * -1)
if verbosity >= 3:
print("\treplay: too far behind to sleep {} seconds".format(replay_diff))
else:
replay_offset = self.replay_offset(je_current.created, speed)
stats['events_late']['lateness_total'] += (replay_offset * -1)
stats['events_late']['total'] += 1
if verbosity >= 3:
print("\treplay: behind by {} seconds".format(replay_offset))
stats['events_total'] += 1
je_previous = je_current
if n == finish_status_index:
if final_status_delay != 0:
self.sleep(final_status_delay)
self.emit_job_status(job, job.status)
if stats['events_total'] > 2:
stats['replay_end'] = self.now()
stats['replay_duration'] = (stats['replay_end'] - stats['replay_start']).total_seconds()
stats['replay_start'] = stats['replay_start'].isoformat()
stats['replay_end'] = stats['replay_end'].isoformat()
stats['recording_end'] = je_current.created
stats['recording_duration'] = (stats['recording_end'] - stats['recording_start']).total_seconds()
stats['recording_start'] = stats['recording_start'].isoformat()
stats['recording_end'] = stats['recording_end'].isoformat()
stats['events_ontime']['percentage'] = (stats['events_ontime']['total'] / float(stats['events_total'])) * 100.00
stats['events_late']['percentage'] = (stats['events_late']['total'] / float(stats['events_total'])) * 100.00
stats['events_distance_average'] = stats['events_distance_total'] / stats['events_total']
stats['events_late']['lateness_average'] = stats['events_late']['lateness_total'] / stats['events_late']['total']
else:
stats = {'events_total': stats['events_total']}
if verbosity >= 2:
print(json.dumps(stats, indent=4, sort_keys=True))
class Command(BaseCommand):
help = 'Replay job events over websockets ordered by created on date.'
def _parse_slice_range(self, slice_arg):
slice_arg = tuple([int(n) for n in slice_arg.split(':')])
slice_obj = slice(*slice_arg)
start = slice_obj.start or 0
stop = slice_obj.stop or -1
step = slice_obj.step or 1
return range(start, stop, step)
def add_arguments(self, parser):
parser.add_argument('--job_id', dest='job_id', type=int, metavar='j',
help='Id of the job to replay (job or adhoc)')
parser.add_argument('--speed', dest='speed', type=float, metavar='s',
help='Speedup factor.')
parser.add_argument('--skip-range', dest='skip_range', type=str, metavar='k',
default='0:-1:1', help='Range of events to skip')
parser.add_argument('--random-seed', dest='random_seed', type=int, metavar='r',
default=0, help='Random number generator seed to use when determining job_event index to emit final job status')
parser.add_argument('--final-status-delay', dest='final_status_delay', type=float, metavar='f',
default=0, help='Delay between event and final status emit')
parser.add_argument('--debug', dest='debug', type=bool, metavar='d',
default=False, help='Enable step mode to control emission of job events one at a time.')
def handle(self, *args, **options):
job_id = options.get('job_id')
speed = options.get('speed') or 1
verbosity = options.get('verbosity') or 0
random_seed = options.get('random_seed')
final_status_delay = options.get('final_status_delay')
debug = options.get('debug')
skip = self._parse_slice_range(options.get('skip_range'))
replayer = ReplayJobEvents()
replayer.run(job_id, speed=speed, verbosity=verbosity, skip_range=skip, random_seed=random_seed,
final_status_delay=final_status_delay, debug=debug)
| |
#
# Test script for the curses module
#
# This script doesn't actually display anything very coherent. but it
# does call (nearly) every method and function.
#
# Functions not tested: {def,reset}_{shell,prog}_mode, getch(), getstr(),
# init_color()
# Only called, not tested: getmouse(), ungetmouse()
#
import os
import string
import sys
import tempfile
import unittest
from test.support import requires, import_module, verbose
# Optionally test curses module. This currently requires that the
# 'curses' resource be given on the regrtest command line using the -u
# option. If not available, nothing after this line will be executed.
import inspect
requires('curses')
# If either of these don't exist, skip the tests.
curses = import_module('curses')
import_module('curses.panel')
import_module('curses.ascii')
import_module('curses.textpad')
def requires_curses_func(name):
return unittest.skipUnless(hasattr(curses, name),
'requires curses.%s' % name)
term = os.environ.get('TERM')
# If newterm was supported we could use it instead of initscr and not exit
@unittest.skipIf(not term or term == 'unknown',
"$TERM=%r, calling initscr() may cause exit" % term)
@unittest.skipIf(sys.platform == "cygwin",
"cygwin's curses mostly just hangs")
class TestCurses(unittest.TestCase):
@classmethod
def setUpClass(cls):
if not sys.__stdout__.isatty():
# Temporary skip tests on non-tty
raise unittest.SkipTest('sys.__stdout__ is not a tty')
cls.tmp = tempfile.TemporaryFile()
fd = cls.tmp.fileno()
else:
cls.tmp = None
fd = sys.__stdout__.fileno()
# testing setupterm() inside initscr/endwin
# causes terminal breakage
curses.setupterm(fd=fd)
@classmethod
def tearDownClass(cls):
if cls.tmp:
cls.tmp.close()
del cls.tmp
def setUp(self):
if verbose:
# just to make the test output a little more readable
print()
self.stdscr = curses.initscr()
curses.savetty()
def tearDown(self):
curses.resetty()
curses.endwin()
def test_window_funcs(self):
"Test the methods of windows"
stdscr = self.stdscr
win = curses.newwin(10,10)
win = curses.newwin(5,5, 5,5)
win2 = curses.newwin(15,15, 5,5)
for meth in [stdscr.addch, stdscr.addstr]:
for args in [('a'), ('a', curses.A_BOLD),
(4,4, 'a'), (5,5, 'a', curses.A_BOLD)]:
with self.subTest(meth=meth.__qualname__, args=args):
meth(*args)
for meth in [stdscr.box, stdscr.clear, stdscr.clrtobot,
stdscr.clrtoeol, stdscr.cursyncup, stdscr.delch,
stdscr.deleteln, stdscr.erase, stdscr.getbegyx,
stdscr.getbkgd, stdscr.getkey, stdscr.getmaxyx,
stdscr.getparyx, stdscr.getyx, stdscr.inch,
stdscr.insertln, stdscr.instr, stdscr.is_wintouched,
win.noutrefresh, stdscr.redrawwin, stdscr.refresh,
stdscr.standout, stdscr.standend, stdscr.syncdown,
stdscr.syncup, stdscr.touchwin, stdscr.untouchwin]:
with self.subTest(meth=meth.__qualname__):
meth()
stdscr.addnstr('1234', 3)
stdscr.addnstr('1234', 3, curses.A_BOLD)
stdscr.addnstr(4,4, '1234', 3)
stdscr.addnstr(5,5, '1234', 3, curses.A_BOLD)
stdscr.attron(curses.A_BOLD)
stdscr.attroff(curses.A_BOLD)
stdscr.attrset(curses.A_BOLD)
stdscr.bkgd(' ')
stdscr.bkgd(' ', curses.A_REVERSE)
stdscr.bkgdset(' ')
stdscr.bkgdset(' ', curses.A_REVERSE)
win.border(65, 66, 67, 68,
69, 70, 71, 72)
win.border('|', '!', '-', '_',
'+', '\\', '#', '/')
with self.assertRaises(TypeError,
msg="Expected win.border() to raise TypeError"):
win.border(65, 66, 67, 68,
69, [], 71, 72)
stdscr.clearok(1)
win4 = stdscr.derwin(2,2)
win4 = stdscr.derwin(1,1, 5,5)
win4.mvderwin(9,9)
stdscr.echochar('a')
stdscr.echochar('a', curses.A_BOLD)
stdscr.hline('-', 5)
stdscr.hline('-', 5, curses.A_BOLD)
stdscr.hline(1,1,'-', 5)
stdscr.hline(1,1,'-', 5, curses.A_BOLD)
stdscr.idcok(1)
stdscr.idlok(1)
stdscr.immedok(1)
stdscr.insch('c')
stdscr.insdelln(1)
stdscr.insnstr('abc', 3)
stdscr.insnstr('abc', 3, curses.A_BOLD)
stdscr.insnstr(5, 5, 'abc', 3)
stdscr.insnstr(5, 5, 'abc', 3, curses.A_BOLD)
stdscr.insstr('def')
stdscr.insstr('def', curses.A_BOLD)
stdscr.insstr(5, 5, 'def')
stdscr.insstr(5, 5, 'def', curses.A_BOLD)
stdscr.is_linetouched(0)
stdscr.keypad(1)
stdscr.leaveok(1)
stdscr.move(3,3)
win.mvwin(2,2)
stdscr.nodelay(1)
stdscr.notimeout(1)
win2.overlay(win)
win2.overwrite(win)
win2.overlay(win, 1, 2, 2, 1, 3, 3)
win2.overwrite(win, 1, 2, 2, 1, 3, 3)
stdscr.redrawln(1,2)
stdscr.scrollok(1)
stdscr.scroll()
stdscr.scroll(2)
stdscr.scroll(-3)
stdscr.move(12, 2)
stdscr.setscrreg(10,15)
win3 = stdscr.subwin(10,10)
win3 = stdscr.subwin(10,10, 5,5)
stdscr.syncok(1)
stdscr.timeout(5)
stdscr.touchline(5,5)
stdscr.touchline(5,5,0)
stdscr.vline('a', 3)
stdscr.vline('a', 3, curses.A_STANDOUT)
stdscr.chgat(5, 2, 3, curses.A_BLINK)
stdscr.chgat(3, curses.A_BOLD)
stdscr.chgat(5, 8, curses.A_UNDERLINE)
stdscr.chgat(curses.A_BLINK)
stdscr.refresh()
stdscr.vline(1,1, 'a', 3)
stdscr.vline(1,1, 'a', 3, curses.A_STANDOUT)
if hasattr(curses, 'resize'):
stdscr.resize()
if hasattr(curses, 'enclose'):
stdscr.enclose()
self.assertRaises(ValueError, stdscr.getstr, -400)
self.assertRaises(ValueError, stdscr.getstr, 2, 3, -400)
self.assertRaises(ValueError, stdscr.instr, -2)
self.assertRaises(ValueError, stdscr.instr, 2, 3, -2)
def test_module_funcs(self):
"Test module-level functions"
for func in [curses.baudrate, curses.beep, curses.can_change_color,
curses.cbreak, curses.def_prog_mode, curses.doupdate,
curses.filter, curses.flash, curses.flushinp,
curses.has_colors, curses.has_ic, curses.has_il,
curses.isendwin, curses.killchar, curses.longname,
curses.nocbreak, curses.noecho, curses.nonl,
curses.noqiflush, curses.noraw,
curses.reset_prog_mode, curses.termattrs,
curses.termname, curses.erasechar, curses.getsyx]:
with self.subTest(func=func.__qualname__):
func()
# Functions that actually need arguments
if curses.tigetstr("cnorm"):
curses.curs_set(1)
curses.delay_output(1)
curses.echo() ; curses.echo(1)
with tempfile.TemporaryFile() as f:
self.stdscr.putwin(f)
f.seek(0)
curses.getwin(f)
curses.halfdelay(1)
curses.intrflush(1)
curses.meta(1)
curses.napms(100)
curses.newpad(50,50)
win = curses.newwin(5,5)
win = curses.newwin(5,5, 1,1)
curses.nl() ; curses.nl(1)
curses.putp(b'abc')
curses.qiflush()
curses.raw() ; curses.raw(1)
curses.setsyx(5,5)
curses.tigetflag('hc')
curses.tigetnum('co')
curses.tigetstr('cr')
curses.tparm(b'cr')
curses.typeahead(sys.__stdin__.fileno())
curses.unctrl('a')
curses.ungetch('a')
curses.use_env(1)
# Functions only available on a few platforms
def test_colors_funcs(self):
if not curses.has_colors():
self.skipTest('requires colors support')
curses.start_color()
curses.init_pair(2, 1,1)
curses.color_content(1)
curses.color_pair(2)
curses.pair_content(curses.COLOR_PAIRS - 1)
curses.pair_number(0)
if hasattr(curses, 'use_default_colors'):
curses.use_default_colors()
@requires_curses_func('keyname')
def test_keyname(self):
curses.keyname(13)
@requires_curses_func('has_key')
def test_has_key(self):
curses.has_key(13)
@requires_curses_func('getmouse')
def test_getmouse(self):
(availmask, oldmask) = curses.mousemask(curses.BUTTON1_PRESSED)
if availmask == 0:
self.skipTest('mouse stuff not available')
curses.mouseinterval(10)
# just verify these don't cause errors
curses.ungetmouse(0, 0, 0, 0, curses.BUTTON1_PRESSED)
m = curses.getmouse()
def test_userptr_without_set(self):
w = curses.newwin(10, 10)
p = curses.panel.new_panel(w)
# try to access userptr() before calling set_userptr() -- segfaults
with self.assertRaises(curses.panel.error,
msg='userptr should fail since not set'):
p.userptr()
def test_userptr_memory_leak(self):
w = curses.newwin(10, 10)
p = curses.panel.new_panel(w)
obj = object()
nrefs = sys.getrefcount(obj)
for i in range(100):
p.set_userptr(obj)
p.set_userptr(None)
self.assertEqual(sys.getrefcount(obj), nrefs,
"set_userptr leaked references")
def test_userptr_segfault(self):
panel = curses.panel.new_panel(self.stdscr)
class A:
def __del__(self):
panel.set_userptr(None)
panel.set_userptr(A())
panel.set_userptr(None)
def test_new_curses_panel(self):
panel = curses.panel.new_panel(self.stdscr)
self.assertRaises(TypeError, type(panel))
@requires_curses_func('is_term_resized')
def test_is_term_resized(self):
curses.is_term_resized(*self.stdscr.getmaxyx())
@requires_curses_func('resize_term')
def test_resize_term(self):
curses.resize_term(*self.stdscr.getmaxyx())
@requires_curses_func('resizeterm')
def test_resizeterm(self):
stdscr = self.stdscr
lines, cols = curses.LINES, curses.COLS
new_lines = lines - 1
new_cols = cols + 1
curses.resizeterm(new_lines, new_cols)
self.assertEqual(curses.LINES, new_lines)
self.assertEqual(curses.COLS, new_cols)
def test_issue6243(self):
curses.ungetch(1025)
self.stdscr.getkey()
@requires_curses_func('unget_wch')
def test_unget_wch(self):
stdscr = self.stdscr
encoding = stdscr.encoding
for ch in ('a', '\xe9', '\u20ac', '\U0010FFFF'):
try:
ch.encode(encoding)
except UnicodeEncodeError:
continue
try:
curses.unget_wch(ch)
except Exception as err:
self.fail("unget_wch(%a) failed with encoding %s: %s"
% (ch, stdscr.encoding, err))
read = stdscr.get_wch()
self.assertEqual(read, ch)
code = ord(ch)
curses.unget_wch(code)
read = stdscr.get_wch()
self.assertEqual(read, ch)
def test_issue10570(self):
b = curses.tparm(curses.tigetstr("cup"), 5, 3)
self.assertIs(type(b), bytes)
def test_encoding(self):
stdscr = self.stdscr
import codecs
encoding = stdscr.encoding
codecs.lookup(encoding)
with self.assertRaises(TypeError):
stdscr.encoding = 10
stdscr.encoding = encoding
with self.assertRaises(TypeError):
del stdscr.encoding
def test_issue21088(self):
stdscr = self.stdscr
#
# http://bugs.python.org/issue21088
#
# the bug:
# when converting curses.window.addch to Argument Clinic
# the first two parameters were switched.
# if someday we can represent the signature of addch
# we will need to rewrite this test.
try:
signature = inspect.signature(stdscr.addch)
self.assertFalse(signature)
except ValueError:
# not generating a signature is fine.
pass
# So. No signature for addch.
# But Argument Clinic gave us a human-readable equivalent
# as the first line of the docstring. So we parse that,
# and ensure that the parameters appear in the correct order.
# Since this is parsing output from Argument Clinic, we can
# be reasonably certain the generated parsing code will be
# correct too.
human_readable_signature = stdscr.addch.__doc__.split("\n")[0]
self.assertIn("[y, x,]", human_readable_signature)
def test_issue13051(self):
stdscr = self.stdscr
box = curses.textpad.Textbox(stdscr, insert_mode=True)
lines, cols = stdscr.getmaxyx()
stdscr.resize(lines-2, cols-2)
# this may cause infinite recursion, leading to a RuntimeError
box._insert_printable_char('a')
class MiscTests(unittest.TestCase):
@requires_curses_func('update_lines_cols')
def test_update_lines_cols(self):
# this doesn't actually test that LINES and COLS are updated,
# because we can't automate changing them. See Issue #4254 for
# a manual test script. We can only test that the function
# can be called.
curses.update_lines_cols()
class TestAscii(unittest.TestCase):
def test_controlnames(self):
for name in curses.ascii.controlnames:
self.assertTrue(hasattr(curses.ascii, name), name)
def test_ctypes(self):
def check(func, expected):
with self.subTest(ch=c, func=func):
self.assertEqual(func(i), expected)
self.assertEqual(func(c), expected)
for i in range(256):
c = chr(i)
b = bytes([i])
check(curses.ascii.isalnum, b.isalnum())
check(curses.ascii.isalpha, b.isalpha())
check(curses.ascii.isdigit, b.isdigit())
check(curses.ascii.islower, b.islower())
check(curses.ascii.isspace, b.isspace())
check(curses.ascii.isupper, b.isupper())
check(curses.ascii.isascii, i < 128)
check(curses.ascii.ismeta, i >= 128)
check(curses.ascii.isctrl, i < 32)
check(curses.ascii.iscntrl, i < 32 or i == 127)
check(curses.ascii.isblank, c in ' \t')
check(curses.ascii.isgraph, 32 < i <= 126)
check(curses.ascii.isprint, 32 <= i <= 126)
check(curses.ascii.ispunct, c in string.punctuation)
check(curses.ascii.isxdigit, c in string.hexdigits)
for i in (-2, -1, 256, sys.maxunicode, sys.maxunicode+1):
self.assertFalse(curses.ascii.isalnum(i))
self.assertFalse(curses.ascii.isalpha(i))
self.assertFalse(curses.ascii.isdigit(i))
self.assertFalse(curses.ascii.islower(i))
self.assertFalse(curses.ascii.isspace(i))
self.assertFalse(curses.ascii.isupper(i))
self.assertFalse(curses.ascii.isascii(i))
self.assertFalse(curses.ascii.isctrl(i))
self.assertFalse(curses.ascii.iscntrl(i))
self.assertFalse(curses.ascii.isblank(i))
self.assertFalse(curses.ascii.isgraph(i))
self.assertFalse(curses.ascii.isprint(i))
self.assertFalse(curses.ascii.ispunct(i))
self.assertFalse(curses.ascii.isxdigit(i))
self.assertFalse(curses.ascii.ismeta(-1))
def test_ascii(self):
ascii = curses.ascii.ascii
self.assertEqual(ascii('\xc1'), 'A')
self.assertEqual(ascii('A'), 'A')
self.assertEqual(ascii(ord('\xc1')), ord('A'))
def test_ctrl(self):
ctrl = curses.ascii.ctrl
self.assertEqual(ctrl('J'), '\n')
self.assertEqual(ctrl('\n'), '\n')
self.assertEqual(ctrl('@'), '\0')
self.assertEqual(ctrl(ord('J')), ord('\n'))
def test_alt(self):
alt = curses.ascii.alt
self.assertEqual(alt('\n'), '\x8a')
self.assertEqual(alt('A'), '\xc1')
self.assertEqual(alt(ord('A')), 0xc1)
def test_unctrl(self):
unctrl = curses.ascii.unctrl
self.assertEqual(unctrl('a'), 'a')
self.assertEqual(unctrl('A'), 'A')
self.assertEqual(unctrl(';'), ';')
self.assertEqual(unctrl(' '), ' ')
self.assertEqual(unctrl('\x7f'), '^?')
self.assertEqual(unctrl('\n'), '^J')
self.assertEqual(unctrl('\0'), '^@')
self.assertEqual(unctrl(ord('A')), 'A')
self.assertEqual(unctrl(ord('\n')), '^J')
# Meta-bit characters
self.assertEqual(unctrl('\x8a'), '!^J')
self.assertEqual(unctrl('\xc1'), '!A')
self.assertEqual(unctrl(ord('\x8a')), '!^J')
self.assertEqual(unctrl(ord('\xc1')), '!A')
if __name__ == '__main__':
unittest.main()
| |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from nose.plugins.attrib import attr
from nose.tools import assert_raises, raises
import numpy as np
from numpy.random import RandomState
from ..core import (corr,
two_sample,
two_sample_shift,
two_sample_conf_int,
one_sample)
def test_corr():
prng = RandomState(42)
x = prng.randint(5, size=10)
y = x
res1 = corr(x, y, seed=prng)
res2 = corr(x, y)
np.testing.assert_equal(len(res1), 5)
np.testing.assert_equal(len(res2), 5)
np.testing.assert_equal(res1[0], res2[0])
np.testing.assert_equal(res1[1], res2[1])
#np.testing.assert_equal(res1[2], res2[2])
#np.testing.assert_equal(res1[3], res2[3])
y = prng.randint(5, size=10)
res1 = corr(x, y, seed=prng)
res2 = corr(x, y)
np.testing.assert_equal(len(res1), 5)
np.testing.assert_equal(len(res2), 5)
np.testing.assert_equal(res1[0], res2[0])
#np.testing.assert_equal(res1[1], res2[1])
#np.testing.assert_equal(res1[2], res2[2])
#np.testing.assert_equal(res1[3], res2[3])
@attr('slow')
def test_two_sample():
prng = RandomState(42)
# Normal-normal, different means examples
x = prng.normal(1, size=20)
y = prng.normal(4, size=20)
res = two_sample(x, y, seed=42)
expected = (1.0, -2.90532344604777)
np.testing.assert_almost_equal(res, expected)
# This one has keep_dist = True
y = prng.normal(1.4, size=20)
res = two_sample(x, y, seed=42)
res2 = two_sample(x, y, seed=42, keep_dist=True)
expected = (0.96975, -0.54460818906623765)
np.testing.assert_approx_equal(res[0], expected[0], 2)
np.testing.assert_equal(res[1], expected[1])
np.testing.assert_approx_equal(res2[0], expected[0], 2)
np.testing.assert_equal(res2[1], expected[1])
# Normal-normal, same means
y = prng.normal(1, size=20)
res = two_sample(x, y, seed=42)
expected = (0.66505000000000003, -0.13990200413154097)
np.testing.assert_approx_equal(res[0], expected[0], 2)
np.testing.assert_equal(res[1], expected[1])
# Check the permutation distribution
res = two_sample(x, y, seed=42, keep_dist=True)
expected_pv = 0.66505000000000003
expected_ts = -0.13990200413154097
exp_dist_firstfive = [0.08939649,
-0.26323896,
0.15428355,
-0.0294264,
0.03318078]
np.testing.assert_approx_equal(res[0], expected_pv, 2)
np.testing.assert_equal(res[1], expected_ts)
np.testing.assert_equal(len(res[2]), 100000)
np.testing.assert_almost_equal(res[2][:5], exp_dist_firstfive)
# Define a lambda function (K-S test)
f = lambda u, v: np.max(
[abs(sum(u <= val) / len(u) - sum(v <= val) / len(v))
for val in np.concatenate([u, v])])
res = two_sample(x, y, seed=42, stat=f, reps=100)
expected = (0.68, 0.20000000000000007)
np.testing.assert_equal(res[0], expected[0])
np.testing.assert_equal(res[1], expected[1])
def test_two_sample_shift():
prng = RandomState(42)
# Normal-normal, different means examples
x = prng.normal(1, size=20)
y = prng.normal(4, size=20)
f = lambda u: u - 3
finv = lambda u: u + 3
f_err = lambda u: 2 * u
f_err_inv = lambda u: u / 2
expected_ts = -2.9053234460477784
# Test null with shift other than zero
res = two_sample_shift(x, y, seed=42, shift=2)
np.testing.assert_equal(res[0], 1)
np.testing.assert_equal(res[1], expected_ts)
res2 = two_sample_shift(x, y, seed=42, shift=2, keep_dist=True)
np.testing.assert_equal(res2[0], 1)
np.testing.assert_equal(res2[1], expected_ts)
np.testing.assert_almost_equal(res2[2][:3], np.array(
[1.55886506, 0.87281296, 1.13611123]))
res = two_sample_shift(x, y, seed=42, shift=2, alternative="less")
np.testing.assert_equal(res[0], 0)
np.testing.assert_equal(res[1], expected_ts)
# Test null with shift -3
res = two_sample_shift(x, y, seed=42, shift=(f, finv))
np.testing.assert_equal(res[0], 0.38074999999999998)
np.testing.assert_equal(res[1], expected_ts)
res = two_sample_shift(x, y, seed=42, shift=(f, finv), alternative="less")
np.testing.assert_almost_equal(res[0], 0.61925)
np.testing.assert_equal(res[1], expected_ts)
# Test null with multiplicative shift
res = two_sample_shift(x, y, seed=42,
shift=(f_err, f_err_inv), alternative="two-sided")
np.testing.assert_equal(res[0], 0)
np.testing.assert_equal(res[1], expected_ts)
# Define a lambda function
f = lambda u, v: np.max(u) - np.max(v)
res = two_sample(x, y, seed=42, stat=f, reps=100)
expected = (1, -3.2730653690015465)
np.testing.assert_equal(res[0], expected[0])
np.testing.assert_equal(res[1], expected[1])
@raises(ValueError)
def test_two_sample_bad_shift():
# Break it with a bad shift
x = np.array(range(5))
y = np.array(range(1, 6))
shift = lambda u: u + 3
two_sample_shift(x, y, seed=5, shift=shift)
@attr('slow')
def test_two_sample_conf_int():
prng = RandomState(42)
# Shift is -1
x = np.array(range(5))
y = np.array(range(1, 6))
res = two_sample_conf_int(x, y, seed=prng)
expected_ci = (-3.5, 1.012957978810817)
np.testing.assert_almost_equal(res, expected_ci)
res = two_sample_conf_int(x, y, seed=prng, alternative="upper")
expected_ci = (-5, 1)
np.testing.assert_almost_equal(res, expected_ci)
res = two_sample_conf_int(x, y, seed=prng, alternative="lower")
expected_ci = (-3, 5)
np.testing.assert_almost_equal(res, expected_ci)
# Specify shift with a function pair
shift = (lambda u, d: u + d, lambda u, d: u - d)
res = two_sample_conf_int(x, y, seed=5, shift=shift)
np.testing.assert_almost_equal(res, (-3.5, 1))
# Specify shift with a multiplicative pair
shift = (lambda u, d: u * d, lambda u, d: u / d)
res = two_sample_conf_int(x, y, seed=5, shift=shift)
np.testing.assert_almost_equal(res, (-1, -1))
@raises(AssertionError)
def test_two_sample_conf_int_bad_shift():
# Break it with a bad shift
x = np.array(range(5))
y = np.array(range(1, 6))
shift = (lambda u, d: -d * u, lambda u, d: -u / d)
two_sample_conf_int(x, y, seed=5, shift=shift)
def test_one_sample():
prng = RandomState(42)
x = np.array(range(5))
y = x - 1
# case 1: one sample only
res = one_sample(x, seed=42, reps=100)
np.testing.assert_almost_equal(res[0], 0.05999999)
np.testing.assert_equal(res[1], 2)
# case 2: paired sample
res = one_sample(x, y, seed=42, reps=100)
np.testing.assert_equal(res[0], 0.02)
np.testing.assert_equal(res[1], 1)
# case 3: break it - supply x and y, but not paired
y = np.append(y, 10)
assert_raises(ValueError, one_sample, x, y)
# case 4: say keep_dist=True
res = one_sample(x, seed=42, reps=100, keep_dist=True)
np.testing.assert_almost_equal(res[0], 0.05999999)
np.testing.assert_equal(res[1], 2)
np.testing.assert_equal(min(res[2]), -2)
np.testing.assert_equal(max(res[2]), 2)
np.testing.assert_equal(np.median(res[2]), 0)
# case 5: use t as test statistic
y = x + prng.normal(size=5)
res = one_sample(x, y, seed=42, reps=100, stat="t", alternative="less")
np.testing.assert_almost_equal(res[0], 0.05)
np.testing.assert_almost_equal(res[1], -1.4491883)
| |
import logging
from django.contrib.gis.geos import GEOSGeometry
from django.utils import translation
from django.utils.translation import gettext as _
from django.contrib.gis.geos import LineString
from django.conf import settings
from django.db import connection
import pygal
from pygal.style import LightSolarizedStyle
logger = logging.getLogger(__name__)
class AltimetryHelper:
@classmethod
def elevation_profile(cls, geometry3d, precision=None, offset=0):
"""Extract elevation profile from a 3D geometry.
:precision: geometry sampling in meters
"""
precision = precision or settings.ALTIMETRIC_PROFILE_PRECISION
if geometry3d.geom_type == 'Point':
return [[0, geometry3d.x, geometry3d.y, geometry3d.z]]
if geometry3d.geom_type == 'MultiLineString':
profile = []
for subcoords in geometry3d.coords:
subline = LineString(subcoords, srid=geometry3d.srid)
offset += subline.length
subprofile = AltimetryHelper.elevation_profile(subline, precision, offset)
profile.extend(subprofile)
return profile
# Add measure to 2D version of geometry3d
# Get distance from origin for each vertex
sql = """
WITH line2d AS (SELECT ST_Force2D('%(ewkt)s'::geometry) AS geom),
line_measure AS (SELECT ST_Addmeasure(geom, 0, ST_length(geom)) AS geom FROM line2d),
points2dm AS (SELECT (ST_DumpPoints(geom)).geom AS point FROM line_measure)
SELECT (%(offset)s + ST_M(point)) FROM points2dm;
""" % {'offset': offset, 'ewkt': geometry3d.ewkt}
cursor = connection.cursor()
cursor.execute(sql)
pointsm = cursor.fetchall()
# Join (offset+distance, x, y, z) together
geom3dapi = geometry3d.transform(settings.API_SRID, clone=True)
assert len(pointsm) == len(geom3dapi.coords), 'Cannot map distance to xyz'
dxyz = [pointsm[i] + v for i, v in enumerate(geom3dapi.coords)]
return dxyz
@classmethod
def altimetry_limits(cls, profile):
elevations = [int(v[3]) for v in profile]
min_elevation = int(min(elevations))
max_elevation = int(max(elevations))
floor_elevation = round(min_elevation, 100) - 100
ceil_elevation = round(max_elevation, 100) + 100
if ceil_elevation < floor_elevation + settings.ALTIMETRIC_PROFILE_MIN_YSCALE:
ceil_elevation = floor_elevation + settings.ALTIMETRIC_PROFILE_MIN_YSCALE
return ceil_elevation, floor_elevation
@classmethod
def profile_svg(cls, profile, language):
"""
Plot the altimetric graph in SVG using PyGal.
Most of the job done here is dedicated to preparing
nice labels scales.
"""
ceil_elevation, floor_elevation = cls.altimetry_limits(profile)
config = dict(show_legend=False,
print_values=False,
show_dots=False,
zero=floor_elevation,
value_formatter=lambda v: '%d' % v,
margin=settings.ALTIMETRIC_PROFILE_FONTSIZE,
width=settings.ALTIMETRIC_PROFILE_WIDTH,
height=settings.ALTIMETRIC_PROFILE_HEIGHT,
title_font_size=settings.ALTIMETRIC_PROFILE_FONTSIZE,
label_font_size=0.8 * settings.ALTIMETRIC_PROFILE_FONTSIZE,
major_label_font_size=settings.ALTIMETRIC_PROFILE_FONTSIZE,
js=[])
style = LightSolarizedStyle
style.background = settings.ALTIMETRIC_PROFILE_BACKGROUND
style.colors = (settings.ALTIMETRIC_PROFILE_COLOR,)
style.font_family = settings.ALTIMETRIC_PROFILE_FONT
line_chart = pygal.XY(fill=True, style=style, **config)
if language:
translation.activate(language)
line_chart.x_title = _("Distance (m)")
line_chart.y_title = _("Altitude (m)")
line_chart.show_minor_x_labels = False
line_chart.x_labels_major_count = 5
line_chart.show_minor_y_labels = False
line_chart.truncate_label = 50
line_chart.range = [floor_elevation, ceil_elevation]
line_chart.no_data_text = _("Altimetry data not available")
translation.deactivate()
line_chart.add('', [(int(v[0]), int(v[3])) for v in profile])
return line_chart.render()
@classmethod
def _nice_extent(cls, geom):
xmin, ymin, xmax, ymax = geom.extent
amplitude = max(xmax - xmin, ymax - ymin)
geom_buffer = geom.envelope.buffer(amplitude * settings.ALTIMETRIC_AREA_MARGIN)
xmin, ymin, xmax, ymax = geom_buffer.extent
width = xmax - xmin
height = ymax - ymin
xcenter = xmin + width / 2.0
ycenter = ymin + height / 2.0
min_ratio = 1 / 1.618 # golden ratio
if width > height:
height = max(width * min_ratio, height)
else:
width = max(height * min_ratio, width)
xmin, ymin, xmax, ymax = (int(xcenter - width / 2.0),
int(ycenter - height / 2.0),
int(xcenter + width / 2.0),
int(ycenter + height / 2.0))
return (xmin, ymin, xmax, ymax)
@classmethod
def elevation_area(cls, geom):
xmin, ymin, xmax, ymax = cls._nice_extent(geom)
width = xmax - xmin
height = ymax - ymin
precision = settings.ALTIMETRIC_PROFILE_PRECISION
max_resolution = settings.ALTIMETRIC_AREA_MAX_RESOLUTION
if width / precision > max_resolution:
precision = int(width / max_resolution)
if height / precision > 10000:
precision = int(width / max_resolution)
if height < precision or width < precision:
precision = min([height, width])
sql = """
-- Author: Celian Garcia
WITH columns AS (
SELECT generate_series({xmin}::int, {xmax}::int, {precision}) AS x
),
lines AS (
SELECT generate_series({ymin}::int, {ymax}::int, {precision}) AS y
),
resolution AS (
SELECT x, y
FROM (SELECT COUNT(x) AS x FROM columns) AS col,
(SELECT COUNT(y) AS y FROM lines) AS lin
),
points2d AS (
SELECT row_number() OVER () AS id,
ST_SetSRID(ST_MakePoint(x, y), {srid}) AS geom,
ST_Transform(ST_SetSRID(ST_MakePoint(x, y), {srid}), 4326) AS geomll
FROM lines, columns
),
draped AS (
SELECT id, ST_Value(altimetry_dem.rast, p.geom)::int AS altitude
FROM altimetry_dem, points2d AS p
WHERE ST_Intersects(altimetry_dem.rast, p.geom)
),
all_draped AS (
SELECT geomll, geom, altitude
FROM points2d LEFT JOIN draped ON (points2d.id = draped.id)
ORDER BY points2d.id
),
extent_latlng AS (
SELECT ST_Envelope(ST_Union(geom)) AS extent,
MIN(altitude) AS min_z,
MAX(altitude) AS max_z,
AVG(altitude) AS center_z
FROM all_draped
)
SELECT extent,
ST_transform(extent, 4326),
center_z,
min_z,
max_z,
resolution.x AS resolution_w,
resolution.y AS resolution_h,
altitude
FROM extent_latlng, resolution, all_draped;
""".format(xmin=xmin, ymin=ymin, xmax=xmax, ymax=ymax,
srid=settings.SRID, precision=precision)
cursor = connection.cursor()
cursor.execute(sql)
result = cursor.fetchall()
first = result[0]
envelop_native, envelop, center_z, min_z, max_z, resolution_w, resolution_h, a = first
envelop = GEOSGeometry(envelop, srid=4326)
envelop_native = GEOSGeometry(envelop_native, srid=settings.SRID)
if center_z is None:
logger.warning("No DEM present")
return {}
altitudes = []
row = []
for i, record in enumerate(result):
if i > 0 and i % resolution_w == 0:
altitudes.append(row)
row = []
elevation = (record[7] or 0.0) - min_z
row.append(elevation)
altitudes.append(row)
area = {
'center': {
'x': envelop_native.centroid.x,
'y': envelop_native.centroid.y,
'lat': envelop.centroid.y,
'lng': envelop.centroid.x,
'z': int(center_z)
},
'resolution': {
'x': resolution_w,
'y': resolution_h,
'step': precision
},
'size': {
'x': envelop_native.coords[0][2][0] - envelop_native.coords[0][0][0],
'y': envelop_native.coords[0][2][1] - envelop_native.coords[0][0][1],
'lat': envelop.coords[0][2][0] - envelop.coords[0][0][0],
'lng': envelop.coords[0][2][1] - envelop.coords[0][0][1]
},
'extent': {
'altitudes': {
'min': min_z,
'max': max_z
},
'southwest': {'lat': envelop.coords[0][0][1],
'lng': envelop.coords[0][0][0],
'x': envelop_native.coords[0][0][0],
'y': envelop_native.coords[0][0][1]},
'northwest': {'lat': envelop.coords[0][1][1],
'lng': envelop.coords[0][1][0],
'x': envelop_native.coords[0][1][0],
'y': envelop_native.coords[0][1][1]},
'northeast': {'lat': envelop.coords[0][2][1],
'lng': envelop.coords[0][2][0],
'x': envelop_native.coords[0][2][0],
'y': envelop_native.coords[0][2][1]},
'southeast': {'lat': envelop.coords[0][3][1],
'lng': envelop.coords[0][3][0],
'x': envelop_native.coords[0][3][0],
'y': envelop_native.coords[0][3][1]}
},
'altitudes': altitudes
}
return area
| |
# Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Functions for type handling and type conversion (Blink/C++ <-> V8/JS).
Extends IdlType and IdlUnionType with V8-specific properties, methods, and
class methods.
Spec:
http://www.w3.org/TR/WebIDL/#es-type-mapping
Design doc: http://www.chromium.org/developers/design-documents/idl-compiler
"""
import posixpath
from idl_types import IdlTypeBase, IdlType, IdlUnionType, IdlArrayOrSequenceType, IdlNullableType
import v8_attributes # for IdlType.constructor_type_name
from v8_globals import includes
################################################################################
# V8-specific handling of IDL types
################################################################################
NON_WRAPPER_TYPES = frozenset([
'Dictionary',
'EventHandler',
'EventListener',
'NodeFilter',
'SerializedScriptValue',
])
TYPED_ARRAY_TYPES = frozenset([
'Float32Array',
'Float64Array',
'Int8Array',
'Int16Array',
'Int32Array',
'Uint8Array',
'Uint8ClampedArray',
'Uint16Array',
'Uint32Array',
])
ARRAY_BUFFER_AND_VIEW_TYPES = TYPED_ARRAY_TYPES.union(frozenset([
'ArrayBuffer',
'ArrayBufferView',
'DataView',
'SharedArrayBuffer',
]))
IdlType.is_array_buffer_or_view = property(
lambda self: self.base_type in ARRAY_BUFFER_AND_VIEW_TYPES)
IdlType.is_typed_array = property(
lambda self: self.base_type in TYPED_ARRAY_TYPES)
IdlType.is_wrapper_type = property(
lambda self: (self.is_interface_type and
not self.is_callback_interface and
self.base_type not in NON_WRAPPER_TYPES))
################################################################################
# C++ types
################################################################################
CPP_TYPE_SAME_AS_IDL_TYPE = set([
'double',
'float',
'long long',
'unsigned long long',
])
CPP_INT_TYPES = set([
'byte',
'long',
'short',
])
CPP_UNSIGNED_TYPES = set([
'octet',
'unsigned int',
'unsigned long',
'unsigned short',
])
CPP_SPECIAL_CONVERSION_RULES = {
'Date': 'double',
'Dictionary': 'Dictionary',
'EventHandler': 'EventListener*',
'NodeFilter': 'RefPtrWillBeRawPtr<NodeFilter>',
'Promise': 'ScriptPromise',
'ScriptValue': 'ScriptValue',
# FIXME: Eliminate custom bindings for XPathNSResolver http://crbug.com/345529
'XPathNSResolver': 'RawPtr<XPathNSResolver>',
'boolean': 'bool',
'unrestricted double': 'double',
'unrestricted float': 'float',
}
def cpp_type(idl_type, extended_attributes=None, raw_type=False, used_as_rvalue_type=False, used_as_variadic_argument=False, used_in_cpp_sequence=False):
"""Returns C++ type corresponding to IDL type.
|idl_type| argument is of type IdlType, while return value is a string
Args:
idl_type:
IdlType
raw_type:
bool, True if idl_type's raw/primitive C++ type should be returned.
used_as_rvalue_type:
bool, True if the C++ type is used as an argument or the return
type of a method.
used_as_variadic_argument:
bool, True if the C++ type is used as a variadic argument of a method.
used_in_cpp_sequence:
bool, True if the C++ type is used as an element of a container.
Containers can be an array, a sequence or a dictionary.
"""
def string_mode():
if extended_attributes.get('TreatNullAs') == 'EmptyString':
return 'TreatNullAsEmptyString'
if idl_type.is_nullable or extended_attributes.get('TreatNullAs') == 'NullString':
if extended_attributes.get('TreatUndefinedAs') == 'NullString':
return 'TreatNullAndUndefinedAsNullString'
return 'TreatNullAsNullString'
return ''
extended_attributes = extended_attributes or {}
idl_type = idl_type.preprocessed_type
# Array or sequence types
if used_as_variadic_argument:
native_array_element_type = idl_type
else:
native_array_element_type = idl_type.native_array_element_type
if native_array_element_type:
vector_type = cpp_ptr_type('Vector', 'HeapVector', native_array_element_type.gc_type)
vector_template_type = cpp_template_type(vector_type, native_array_element_type.cpp_type_args(used_in_cpp_sequence=True))
if used_as_rvalue_type:
return 'const %s&' % vector_template_type
return vector_template_type
# Simple types
base_idl_type = idl_type.base_type
if base_idl_type in CPP_TYPE_SAME_AS_IDL_TYPE:
return base_idl_type
if base_idl_type in CPP_INT_TYPES:
return 'int'
if base_idl_type in CPP_UNSIGNED_TYPES:
return 'unsigned'
if base_idl_type in CPP_SPECIAL_CONVERSION_RULES:
return CPP_SPECIAL_CONVERSION_RULES[base_idl_type]
if base_idl_type in NON_WRAPPER_TYPES:
return ('PassRefPtr<%s>' if used_as_rvalue_type else 'RefPtr<%s>') % base_idl_type
if idl_type.is_string_type:
if not raw_type:
return 'String'
return 'V8StringResource<%s>' % string_mode()
if idl_type.base_type == 'ArrayBufferView' and 'FlexibleArrayBufferView' in extended_attributes:
return 'FlexibleArrayBufferView'
if idl_type.base_type in TYPED_ARRAY_TYPES and 'FlexibleArrayBufferView' in extended_attributes:
return 'Flexible' + idl_type.base_type + 'View'
if idl_type.is_interface_type:
implemented_as_class = idl_type.implemented_as
if raw_type or (used_as_rvalue_type and idl_type.is_garbage_collected):
return implemented_as_class + '*'
new_type = 'Member' if used_in_cpp_sequence else 'RawPtr'
ptr_type = cpp_ptr_type(('PassRefPtr' if used_as_rvalue_type else 'RefPtr'), new_type, idl_type.gc_type)
return cpp_template_type(ptr_type, implemented_as_class)
if idl_type.is_dictionary:
return base_idl_type
if idl_type.is_union_type:
# Avoid "AOrNullOrB" for cpp type of (A? or B) because we generate
# V8AOrBOrNull to handle nulle for (A? or B), (A or B?) and (A or B)?
def member_cpp_name(idl_type):
if idl_type.is_nullable:
return idl_type.inner_type.name
return idl_type.name
idl_type_name = "Or".join(member_cpp_name(member)
for member in idl_type.member_types)
return 'const %s&' % idl_type_name if used_as_rvalue_type else idl_type_name
# Default, assume native type is a pointer with same type name as idl type
return base_idl_type + '*'
def cpp_type_initializer(idl_type):
"""Returns a string containing a C++ initialization statement for the
corresponding type.
|idl_type| argument is of type IdlType.
"""
base_idl_type = idl_type.base_type
if idl_type.native_array_element_type:
return ''
if idl_type.is_numeric_type:
return ' = 0'
if base_idl_type == 'boolean':
return ' = false'
if (base_idl_type in NON_WRAPPER_TYPES or
base_idl_type in CPP_SPECIAL_CONVERSION_RULES or
base_idl_type == 'any' or
idl_type.is_string_type or
idl_type.is_enum):
return ''
return ' = nullptr'
# Allow access as idl_type.cpp_type if no arguments
IdlTypeBase.cpp_type = property(cpp_type)
IdlTypeBase.cpp_type_initializer = property(cpp_type_initializer)
IdlTypeBase.cpp_type_args = cpp_type
IdlUnionType.cpp_type_initializer = ''
IdlArrayOrSequenceType.native_array_element_type = property(
lambda self: self.element_type)
def cpp_template_type(template, inner_type):
"""Returns C++ template specialized to type."""
format_string = '{template}<{inner_type}>'
return format_string.format(template=template, inner_type=inner_type)
def cpp_ptr_type(old_type, new_type, gc_type):
if gc_type == 'GarbageCollectedObject':
return new_type
if gc_type == 'WillBeGarbageCollectedObject':
if old_type == 'Vector':
return 'WillBe' + new_type
return old_type + 'WillBe' + new_type
return old_type
def v8_type(interface_name):
return 'V8' + interface_name
# [ImplementedAs]
# This handles [ImplementedAs] on interface types, not [ImplementedAs] in the
# interface being generated. e.g., given:
# Foo.idl: interface Foo {attribute Bar bar};
# Bar.idl: [ImplementedAs=Zork] interface Bar {};
# when generating bindings for Foo, the [ImplementedAs] on Bar is needed.
# This data is external to Foo.idl, and hence computed as global information in
# compute_interfaces_info.py to avoid having to parse IDLs of all used interfaces.
IdlType.implemented_as_interfaces = {}
def implemented_as(idl_type):
base_idl_type = idl_type.base_type
if base_idl_type in IdlType.implemented_as_interfaces:
return IdlType.implemented_as_interfaces[base_idl_type]
return base_idl_type
IdlType.implemented_as = property(implemented_as)
IdlType.set_implemented_as_interfaces = classmethod(
lambda cls, new_implemented_as_interfaces:
cls.implemented_as_interfaces.update(new_implemented_as_interfaces))
# [GarbageCollected]
IdlType.garbage_collected_types = set()
IdlType.is_garbage_collected = property(
lambda self: self.base_type in IdlType.garbage_collected_types)
IdlType.set_garbage_collected_types = classmethod(
lambda cls, new_garbage_collected_types:
cls.garbage_collected_types.update(new_garbage_collected_types))
# [WillBeGarbageCollected]
IdlType.will_be_garbage_collected_types = set()
IdlType.is_will_be_garbage_collected = property(
lambda self: self.base_type in IdlType.will_be_garbage_collected_types)
IdlType.set_will_be_garbage_collected_types = classmethod(
lambda cls, new_will_be_garbage_collected_types:
cls.will_be_garbage_collected_types.update(new_will_be_garbage_collected_types))
def gc_type(idl_type):
if idl_type.is_garbage_collected or idl_type.is_dictionary or idl_type.is_union_type:
return 'GarbageCollectedObject'
if idl_type.is_will_be_garbage_collected:
return 'WillBeGarbageCollectedObject'
return 'RefCountedObject'
IdlTypeBase.gc_type = property(gc_type)
def is_traceable(idl_type):
return (idl_type.is_garbage_collected
or idl_type.is_will_be_garbage_collected
or idl_type.is_dictionary)
IdlTypeBase.is_traceable = property(is_traceable)
IdlUnionType.is_traceable = property(lambda self: True)
IdlArrayOrSequenceType.is_traceable = property(
lambda self: self.element_type.is_traceable)
################################################################################
# Includes
################################################################################
def includes_for_cpp_class(class_name, relative_dir_posix):
return set([posixpath.join('bindings', relative_dir_posix, class_name + '.h')])
INCLUDES_FOR_TYPE = {
'object': set(),
'ArrayBufferView': set(['bindings/core/v8/V8ArrayBufferView.h',
'core/dom/FlexibleArrayBufferView.h']),
'Dictionary': set(['bindings/core/v8/Dictionary.h']),
'EventHandler': set(['bindings/core/v8/V8AbstractEventListener.h',
'bindings/core/v8/V8EventListenerList.h']),
'EventListener': set(['bindings/core/v8/BindingSecurity.h',
'bindings/core/v8/V8EventListenerList.h',
'core/frame/LocalDOMWindow.h']),
'HTMLCollection': set(['bindings/core/v8/V8HTMLCollection.h',
'core/dom/ClassCollection.h',
'core/dom/TagCollection.h',
'core/html/HTMLCollection.h',
'core/html/HTMLDataListOptionsCollection.h',
'core/html/HTMLFormControlsCollection.h',
'core/html/HTMLTableRowsCollection.h']),
'NodeList': set(['bindings/core/v8/V8NodeList.h',
'core/dom/NameNodeList.h',
'core/dom/NodeList.h',
'core/dom/StaticNodeList.h',
'core/html/LabelsNodeList.h']),
'Promise': set(['bindings/core/v8/ScriptPromise.h']),
'SerializedScriptValue': set(['bindings/core/v8/SerializedScriptValue.h',
'bindings/core/v8/SerializedScriptValueFactory.h']),
'ScriptValue': set(['bindings/core/v8/ScriptValue.h']),
}
def includes_for_type(idl_type, extended_attributes=None):
idl_type = idl_type.preprocessed_type
extended_attributes = extended_attributes or {}
# Simple types
base_idl_type = idl_type.base_type
if base_idl_type in INCLUDES_FOR_TYPE:
return INCLUDES_FOR_TYPE[base_idl_type]
if idl_type.base_type in TYPED_ARRAY_TYPES:
return INCLUDES_FOR_TYPE['ArrayBufferView'].union(
set(['bindings/%s/v8/V8%s.h' % (component_dir[base_idl_type], base_idl_type)])
)
if idl_type.is_basic_type:
return set()
if base_idl_type.endswith('ConstructorConstructor'):
# FIXME: rename to NamedConstructor
# FIXME: replace with a [NamedConstructorAttribute] extended attribute
# Ending with 'ConstructorConstructor' indicates a named constructor,
# and these do not have header files, as they are part of the generated
# bindings for the interface
return set()
if base_idl_type.endswith('Constructor'):
# FIXME: replace with a [ConstructorAttribute] extended attribute
base_idl_type = idl_type.constructor_type_name
if base_idl_type not in component_dir:
return set()
return set(['bindings/%s/v8/V8%s.h' % (component_dir[base_idl_type],
base_idl_type)])
IdlType.includes_for_type = includes_for_type
def includes_for_union_type(idl_type, extended_attributes=None):
return set.union(*[member_type.includes_for_type(extended_attributes)
for member_type in idl_type.member_types])
IdlUnionType.includes_for_type = includes_for_union_type
def includes_for_array_or_sequence_type(idl_type, extended_attributes=None):
return idl_type.element_type.includes_for_type(extended_attributes)
IdlArrayOrSequenceType.includes_for_type = includes_for_array_or_sequence_type
def add_includes_for_type(idl_type, extended_attributes=None):
includes.update(idl_type.includes_for_type(extended_attributes))
IdlTypeBase.add_includes_for_type = add_includes_for_type
def includes_for_interface(interface_name):
return IdlType(interface_name).includes_for_type()
def add_includes_for_interface(interface_name):
includes.update(includes_for_interface(interface_name))
def impl_should_use_nullable_container(idl_type):
return not(idl_type.cpp_type_has_null_value)
IdlTypeBase.impl_should_use_nullable_container = property(
impl_should_use_nullable_container)
def impl_includes_for_type(idl_type, interfaces_info):
includes_for_type = set()
if idl_type.impl_should_use_nullable_container:
includes_for_type.add('bindings/core/v8/Nullable.h')
idl_type = idl_type.preprocessed_type
native_array_element_type = idl_type.native_array_element_type
if native_array_element_type:
includes_for_type.update(impl_includes_for_type(
native_array_element_type, interfaces_info))
includes_for_type.add('wtf/Vector.h')
base_idl_type = idl_type.base_type
if idl_type.is_string_type:
includes_for_type.add('wtf/text/WTFString.h')
if base_idl_type in interfaces_info:
interface_info = interfaces_info[idl_type.base_type]
if interface_info['include_path']:
includes_for_type.add(interface_info['include_path'])
if base_idl_type in INCLUDES_FOR_TYPE:
includes_for_type.update(INCLUDES_FOR_TYPE[base_idl_type])
if idl_type.is_typed_array:
return set(['core/dom/DOMTypedArray.h'])
return includes_for_type
def impl_includes_for_type_union(idl_type, interfaces_info):
includes_for_type = set()
for member_type in idl_type.member_types:
includes_for_type.update(member_type.impl_includes_for_type(interfaces_info))
return includes_for_type
IdlTypeBase.impl_includes_for_type = impl_includes_for_type
IdlUnionType.impl_includes_for_type = impl_includes_for_type_union
component_dir = {}
def set_component_dirs(new_component_dirs):
component_dir.update(new_component_dirs)
################################################################################
# V8 -> C++
################################################################################
V8_VALUE_TO_CPP_VALUE = {
# Basic
'Date': 'toCoreDate({isolate}, {v8_value})',
'DOMString': '{v8_value}',
'ByteString': 'toByteString({isolate}, {arguments})',
'USVString': 'toUSVString({isolate}, {arguments})',
'boolean': 'toBoolean({isolate}, {arguments})',
'float': 'toRestrictedFloat({isolate}, {arguments})',
'unrestricted float': 'toFloat({isolate}, {arguments})',
'double': 'toRestrictedDouble({isolate}, {arguments})',
'unrestricted double': 'toDouble({isolate}, {arguments})',
'byte': 'toInt8({isolate}, {arguments})',
'octet': 'toUInt8({isolate}, {arguments})',
'short': 'toInt16({isolate}, {arguments})',
'unsigned short': 'toUInt16({isolate}, {arguments})',
'long': 'toInt32({isolate}, {arguments})',
'unsigned long': 'toUInt32({isolate}, {arguments})',
'long long': 'toInt64({isolate}, {arguments})',
'unsigned long long': 'toUInt64({isolate}, {arguments})',
# Interface types
'Dictionary': 'Dictionary({v8_value}, {isolate}, exceptionState)',
'EventTarget': 'toEventTarget({isolate}, {v8_value})',
'FlexibleArrayBufferView': 'toFlexibleArrayBufferView({isolate}, {v8_value}, {variable_name}, allocateFlexibleArrayBufferViewStorage({v8_value}))',
'NodeFilter': 'toNodeFilter({v8_value}, info.Holder(), ScriptState::current({isolate}))',
'Promise': 'ScriptPromise::cast(ScriptState::current({isolate}), {v8_value})',
'SerializedScriptValue': 'SerializedScriptValueFactory::instance().create({isolate}, {v8_value}, nullptr, nullptr, nullptr, exceptionState)',
'ScriptValue': 'ScriptValue(ScriptState::current({isolate}), {v8_value})',
'Window': 'toDOMWindow({isolate}, {v8_value})',
'XPathNSResolver': 'toXPathNSResolver(ScriptState::current({isolate}), {v8_value})',
}
def v8_conversion_needs_exception_state(idl_type):
return (idl_type.is_numeric_type or
idl_type.is_enum or
idl_type.is_dictionary or
idl_type.name in ('Boolean', 'ByteString', 'Dictionary', 'USVString', 'SerializedScriptValue'))
IdlType.v8_conversion_needs_exception_state = property(v8_conversion_needs_exception_state)
IdlArrayOrSequenceType.v8_conversion_needs_exception_state = True
IdlUnionType.v8_conversion_needs_exception_state = True
TRIVIAL_CONVERSIONS = frozenset([
'any',
'boolean',
'Date',
'Dictionary',
'NodeFilter',
'XPathNSResolver',
'Promise'
])
def v8_conversion_is_trivial(idl_type):
# The conversion is a simple expression that returns the converted value and
# cannot raise an exception.
return (idl_type.base_type in TRIVIAL_CONVERSIONS or
idl_type.is_wrapper_type)
IdlType.v8_conversion_is_trivial = property(v8_conversion_is_trivial)
def v8_value_to_cpp_value(idl_type, extended_attributes, v8_value, variable_name, index, isolate, restricted_float=False):
if idl_type.name == 'void':
return ''
# Array or sequence types
native_array_element_type = idl_type.native_array_element_type
if native_array_element_type:
return v8_value_to_cpp_value_array_or_sequence(native_array_element_type, v8_value, index, isolate)
# Simple types
idl_type = idl_type.preprocessed_type
base_idl_type = idl_type.as_union_type.name if idl_type.is_union_type else idl_type.base_type
if 'FlexibleArrayBufferView' in extended_attributes:
if base_idl_type not in TYPED_ARRAY_TYPES.union(set(['ArrayBufferView'])):
raise "Unrecognized base type for extended attribute 'FlexibleArrayBufferView': %s" % (idl_type.base_type)
base_idl_type = 'FlexibleArrayBufferView'
if idl_type.is_integer_type:
configuration = 'NormalConversion'
if 'EnforceRange' in extended_attributes:
configuration = 'EnforceRange'
elif 'Clamp' in extended_attributes:
configuration = 'Clamp'
arguments = ', '.join([v8_value, configuration, 'exceptionState'])
elif idl_type.v8_conversion_needs_exception_state:
arguments = ', '.join([v8_value, 'exceptionState'])
else:
arguments = v8_value
if base_idl_type in V8_VALUE_TO_CPP_VALUE:
cpp_expression_format = V8_VALUE_TO_CPP_VALUE[base_idl_type]
elif idl_type.is_array_buffer_or_view:
cpp_expression_format = (
'{v8_value}->Is{idl_type}() ? '
'V8{idl_type}::toImpl(v8::Local<v8::{idl_type}>::Cast({v8_value})) : 0')
elif idl_type.is_union_type:
nullable = 'UnionTypeConversionMode::Nullable' if idl_type.includes_nullable_type else 'UnionTypeConversionMode::NotNullable'
cpp_expression_format = 'V8{idl_type}::toImpl({isolate}, {v8_value}, {variable_name}, %s, exceptionState)' % nullable
elif idl_type.use_output_parameter_for_result:
cpp_expression_format = 'V8{idl_type}::toImpl({isolate}, {v8_value}, {variable_name}, exceptionState)'
else:
cpp_expression_format = (
'V8{idl_type}::toImplWithTypeCheck({isolate}, {v8_value})')
return cpp_expression_format.format(arguments=arguments, idl_type=base_idl_type, v8_value=v8_value, variable_name=variable_name, isolate=isolate)
def v8_value_to_cpp_value_array_or_sequence(native_array_element_type, v8_value, index, isolate='info.GetIsolate()'):
# Index is None for setters, index (starting at 0) for method arguments,
# and is used to provide a human-readable exception message
if index is None:
index = 0 # special case, meaning "setter"
else:
index += 1 # human-readable index
if (native_array_element_type.is_interface_type and
native_array_element_type.name != 'Dictionary'):
this_cpp_type = None
ref_ptr_type = cpp_ptr_type('RefPtr', 'Member', native_array_element_type.gc_type)
expression_format = '(to{ref_ptr_type}NativeArray<{native_array_element_type}, V8{native_array_element_type}>({v8_value}, {index}, {isolate}, exceptionState))'
else:
ref_ptr_type = None
this_cpp_type = native_array_element_type.cpp_type
if native_array_element_type.is_dictionary or native_array_element_type.is_union_type:
vector_type = 'HeapVector'
else:
vector_type = 'Vector'
expression_format = 'toImplArray<%s<{cpp_type}>>({v8_value}, {index}, {isolate}, exceptionState)' % vector_type
expression = expression_format.format(native_array_element_type=native_array_element_type.name, cpp_type=this_cpp_type, index=index, ref_ptr_type=ref_ptr_type, v8_value=v8_value, isolate=isolate)
return expression
# FIXME: this function should be refactored, as this takes too many flags.
def v8_value_to_local_cpp_value(idl_type, extended_attributes, v8_value, variable_name, index=None, declare_variable=True, isolate='info.GetIsolate()', bailout_return_value=None, use_exception_state=False, restricted_float=False):
"""Returns an expression that converts a V8 value to a C++ value and stores it as a local value."""
this_cpp_type = idl_type.cpp_type_args(extended_attributes=extended_attributes, raw_type=True)
idl_type = idl_type.preprocessed_type
cpp_value = v8_value_to_cpp_value(idl_type, extended_attributes, v8_value, variable_name, index, isolate, restricted_float=restricted_float)
# Optional expression that returns a value to be assigned to the local variable.
assign_expression = None
# Optional void expression executed unconditionally.
set_expression = None
# Optional expression that returns true if the conversion fails.
check_expression = None
# Optional expression used as the return value when returning. Only
# meaningful if 'check_expression' is not None.
return_expression = bailout_return_value
if idl_type.is_string_type or idl_type.v8_conversion_needs_exception_state:
# Types for which conversion can fail and that need error handling.
if use_exception_state:
check_expression = 'exceptionState.hadException()'
else:
check_expression = 'exceptionState.throwIfNeeded()'
if idl_type.is_dictionary or idl_type.is_union_type:
set_expression = cpp_value
else:
assign_expression = cpp_value
# Note: 'not idl_type.v8_conversion_needs_exception_state' implies
# 'idl_type.is_string_type', but there are types for which both are
# true (ByteString and USVString), so using idl_type.is_string_type
# as the condition here would be wrong.
if not idl_type.v8_conversion_needs_exception_state:
if use_exception_state:
check_expression = '!%s.prepare(exceptionState)' % variable_name
else:
check_expression = '!%s.prepare()' % variable_name
elif not idl_type.v8_conversion_is_trivial:
return {
'error_message': 'no V8 -> C++ conversion for IDL type: %s' % idl_type.name
}
elif 'FlexibleArrayBufferView' in extended_attributes:
if idl_type.base_type not in TYPED_ARRAY_TYPES.union(set(['ArrayBufferView'])):
raise "Unrecognized base type for extended attribute 'FlexibleArrayBufferView': %s" % (idl_type.base_type)
set_expression = cpp_value
else:
assign_expression = cpp_value
# Types that don't need error handling, and simply assign a value to the
# local variable.
return {
'assign_expression': assign_expression,
'check_expression': check_expression,
'cpp_type': this_cpp_type,
'cpp_name': variable_name,
'declare_variable': declare_variable,
'return_expression': bailout_return_value,
'set_expression': set_expression,
}
IdlTypeBase.v8_value_to_local_cpp_value = v8_value_to_local_cpp_value
def use_output_parameter_for_result(idl_type):
"""True when methods/getters which return the given idl_type should
take the output argument.
"""
return idl_type.is_dictionary or idl_type.is_union_type
IdlTypeBase.use_output_parameter_for_result = property(use_output_parameter_for_result)
################################################################################
# C++ -> V8
################################################################################
def preprocess_idl_type(idl_type):
if idl_type.is_nullable:
return IdlNullableType(idl_type.inner_type.preprocessed_type)
if idl_type.is_enum:
# Enumerations are internally DOMStrings
return IdlType('DOMString')
if idl_type.base_type in ['any', 'object'] or idl_type.is_callback_function:
return IdlType('ScriptValue')
return idl_type
IdlTypeBase.preprocessed_type = property(preprocess_idl_type)
def preprocess_idl_type_and_value(idl_type, cpp_value, extended_attributes):
"""Returns IDL type and value, with preliminary type conversions applied."""
idl_type = idl_type.preprocessed_type
if idl_type.name == 'Promise':
idl_type = IdlType('ScriptValue')
if idl_type.base_type in ['long long', 'unsigned long long']:
# long long and unsigned long long are not representable in ECMAScript;
# we represent them as doubles.
is_nullable = idl_type.is_nullable
idl_type = IdlType('double')
if is_nullable:
idl_type = IdlNullableType(idl_type)
cpp_value = 'static_cast<double>(%s)' % cpp_value
# HTML5 says that unsigned reflected attributes should be in the range
# [0, 2^31). When a value isn't in this range, a default value (or 0)
# should be returned instead.
extended_attributes = extended_attributes or {}
if ('Reflect' in extended_attributes and
idl_type.base_type in ['unsigned long', 'unsigned short']):
cpp_value = cpp_value.replace('getUnsignedIntegralAttribute',
'getIntegralAttribute')
cpp_value = 'std::max(0, static_cast<int>(%s))' % cpp_value
return idl_type, cpp_value
def v8_conversion_type(idl_type, extended_attributes):
"""Returns V8 conversion type, adding any additional includes.
The V8 conversion type is used to select the C++ -> V8 conversion function
or v8SetReturnValue* function; it can be an idl_type, a cpp_type, or a
separate name for the type of conversion (e.g., 'DOMWrapper').
"""
extended_attributes = extended_attributes or {}
# Nullable dictionaries need to be handled differently than either
# non-nullable dictionaries or unions.
if idl_type.is_dictionary and idl_type.is_nullable:
return 'NullableDictionary'
if idl_type.is_dictionary or idl_type.is_union_type:
return 'DictionaryOrUnion'
# Array or sequence types
native_array_element_type = idl_type.native_array_element_type
if native_array_element_type:
return 'array'
# Simple types
base_idl_type = idl_type.base_type
# Basic types, without additional includes
if base_idl_type in CPP_INT_TYPES:
return 'int'
if base_idl_type in CPP_UNSIGNED_TYPES:
return 'unsigned'
if idl_type.is_string_type:
if idl_type.is_nullable:
return 'StringOrNull'
if 'TreatReturnedNullStringAs' not in extended_attributes:
return base_idl_type
treat_returned_null_string_as = extended_attributes['TreatReturnedNullStringAs']
if treat_returned_null_string_as == 'Null':
return 'StringOrNull'
if treat_returned_null_string_as == 'Undefined':
return 'StringOrUndefined'
raise 'Unrecognized TreatReturnedNullStringAs value: "%s"' % treat_returned_null_string_as
if idl_type.is_basic_type or base_idl_type == 'ScriptValue':
return base_idl_type
# Generic dictionary type
if base_idl_type == 'Dictionary':
return 'Dictionary'
# Data type with potential additional includes
if base_idl_type in V8_SET_RETURN_VALUE: # Special v8SetReturnValue treatment
return base_idl_type
# Pointer type
return 'DOMWrapper'
IdlTypeBase.v8_conversion_type = v8_conversion_type
V8_SET_RETURN_VALUE = {
'boolean': 'v8SetReturnValueBool(info, {cpp_value})',
'int': 'v8SetReturnValueInt(info, {cpp_value})',
'unsigned': 'v8SetReturnValueUnsigned(info, {cpp_value})',
'DOMString': 'v8SetReturnValueString(info, {cpp_value}, info.GetIsolate())',
'ByteString': 'v8SetReturnValueString(info, {cpp_value}, info.GetIsolate())',
'USVString': 'v8SetReturnValueString(info, {cpp_value}, info.GetIsolate())',
# [TreatReturnedNullStringAs]
'StringOrNull': 'v8SetReturnValueStringOrNull(info, {cpp_value}, info.GetIsolate())',
'StringOrUndefined': 'v8SetReturnValueStringOrUndefined(info, {cpp_value}, info.GetIsolate())',
'void': '',
# No special v8SetReturnValue* function (set value directly)
'float': 'v8SetReturnValue(info, {cpp_value})',
'unrestricted float': 'v8SetReturnValue(info, {cpp_value})',
'double': 'v8SetReturnValue(info, {cpp_value})',
'unrestricted double': 'v8SetReturnValue(info, {cpp_value})',
# No special v8SetReturnValue* function, but instead convert value to V8
# and then use general v8SetReturnValue.
'array': 'v8SetReturnValue(info, {cpp_value})',
'Date': 'v8SetReturnValue(info, {cpp_value})',
'EventHandler': 'v8SetReturnValue(info, {cpp_value})',
'ScriptValue': 'v8SetReturnValue(info, {cpp_value})',
'SerializedScriptValue': 'v8SetReturnValue(info, {cpp_value})',
# DOMWrapper
'DOMWrapperForMainWorld': 'v8SetReturnValueForMainWorld(info, WTF::getPtr({cpp_value}))',
'DOMWrapperFast': 'v8SetReturnValueFast(info, WTF::getPtr({cpp_value}), {script_wrappable})',
'DOMWrapperDefault': 'v8SetReturnValue(info, {cpp_value})',
# Note that static attributes and operations do not check whether |this| is
# an instance of the interface nor |this|'s creation context is the same as
# the current context. So we must always use the current context as the
# creation context of the DOM wrapper for the return value.
'DOMWrapperStatic': 'v8SetReturnValue(info, {cpp_value}, info.GetIsolate()->GetCurrentContext()->Global())',
# Generic dictionary type
'Dictionary': 'v8SetReturnValue(info, {cpp_value})',
'DictionaryStatic': '#error not implemented yet',
# Nullable dictionaries
'NullableDictionary': 'v8SetReturnValue(info, result.get())',
'NullableDictionaryStatic': '#error not implemented yet',
# Union types or dictionaries
'DictionaryOrUnion': 'v8SetReturnValue(info, result)',
'DictionaryOrUnionStatic': '#error not implemented yet',
}
def v8_set_return_value(idl_type, cpp_value, extended_attributes=None, script_wrappable='', release=False, for_main_world=False, is_static=False):
"""Returns a statement that converts a C++ value to a V8 value and sets it as a return value.
"""
def dom_wrapper_conversion_type():
if is_static:
return 'DOMWrapperStatic'
if not script_wrappable:
return 'DOMWrapperDefault'
if for_main_world:
return 'DOMWrapperForMainWorld'
return 'DOMWrapperFast'
idl_type, cpp_value = preprocess_idl_type_and_value(idl_type, cpp_value, extended_attributes)
this_v8_conversion_type = idl_type.v8_conversion_type(extended_attributes)
# SetReturn-specific overrides
if this_v8_conversion_type in ['Date', 'EventHandler', 'ScriptValue', 'SerializedScriptValue', 'array']:
# Convert value to V8 and then use general v8SetReturnValue
cpp_value = idl_type.cpp_value_to_v8_value(cpp_value, extended_attributes=extended_attributes)
if this_v8_conversion_type == 'DOMWrapper':
this_v8_conversion_type = dom_wrapper_conversion_type()
if is_static and this_v8_conversion_type in ('Dictionary', 'NullableDictionary', 'DictionaryOrUnion'):
this_v8_conversion_type += 'Static'
format_string = V8_SET_RETURN_VALUE[this_v8_conversion_type]
# FIXME: oilpan: Remove .release() once we remove all RefPtrs from generated code.
if release:
cpp_value = '%s.release()' % cpp_value
statement = format_string.format(cpp_value=cpp_value, script_wrappable=script_wrappable)
return statement
IdlTypeBase.v8_set_return_value = v8_set_return_value
IdlType.release = property(lambda self: self.is_interface_type)
IdlUnionType.release = False
CPP_VALUE_TO_V8_VALUE = {
# Built-in types
'Date': 'v8DateOrNaN({isolate}, {cpp_value})',
'DOMString': 'v8String({isolate}, {cpp_value})',
'ByteString': 'v8String({isolate}, {cpp_value})',
'USVString': 'v8String({isolate}, {cpp_value})',
'boolean': 'v8Boolean({cpp_value}, {isolate})',
'int': 'v8::Integer::New({isolate}, {cpp_value})',
'unsigned': 'v8::Integer::NewFromUnsigned({isolate}, {cpp_value})',
'float': 'v8::Number::New({isolate}, {cpp_value})',
'unrestricted float': 'v8::Number::New({isolate}, {cpp_value})',
'double': 'v8::Number::New({isolate}, {cpp_value})',
'unrestricted double': 'v8::Number::New({isolate}, {cpp_value})',
'void': 'v8Undefined()',
# [TreatReturnedNullStringAs]
'StringOrNull': '{cpp_value}.isNull() ? v8::Local<v8::Value>(v8::Null({isolate})) : v8String({isolate}, {cpp_value})',
'StringOrUndefined': '{cpp_value}.isNull() ? v8Undefined() : v8String({isolate}, {cpp_value})',
# Special cases
'Dictionary': '{cpp_value}.v8Value()',
'EventHandler': '{cpp_value} ? v8::Local<v8::Value>(V8AbstractEventListener::cast({cpp_value})->getListenerObject(impl->executionContext())) : v8::Local<v8::Value>(v8::Null({isolate}))',
'ScriptValue': '{cpp_value}.v8Value()',
'SerializedScriptValue': '{cpp_value} ? {cpp_value}->deserialize() : v8::Local<v8::Value>(v8::Null({isolate}))',
# General
'array': 'toV8({cpp_value}, {creation_context}, {isolate})',
'DOMWrapper': 'toV8({cpp_value}, {creation_context}, {isolate})',
# Passing nullable dictionaries isn't a pattern currently used
# anywhere in the web platform, and more work would be needed in
# the code generator to distinguish between passing null, and
# passing an object which happened to not contain any of the
# dictionary's defined attributes. For now, don't define
# NullableDictionary here, which will cause an exception to be
# thrown during code generation if an argument to a method is a
# nullable dictionary type.
#
# Union types or dictionaries
'DictionaryOrUnion': 'toV8({cpp_value}, {creation_context}, {isolate})',
}
def cpp_value_to_v8_value(idl_type, cpp_value, isolate='info.GetIsolate()', creation_context='info.Holder()', extended_attributes=None):
"""Returns an expression that converts a C++ value to a V8 value."""
# the isolate parameter is needed for callback interfaces
idl_type, cpp_value = preprocess_idl_type_and_value(idl_type, cpp_value, extended_attributes)
this_v8_conversion_type = idl_type.v8_conversion_type(extended_attributes)
format_string = CPP_VALUE_TO_V8_VALUE[this_v8_conversion_type]
statement = format_string.format(cpp_value=cpp_value, isolate=isolate, creation_context=creation_context)
return statement
IdlTypeBase.cpp_value_to_v8_value = cpp_value_to_v8_value
def literal_cpp_value(idl_type, idl_literal):
"""Converts an expression that is a valid C++ literal for this type."""
# FIXME: add validation that idl_type and idl_literal are compatible
if idl_type.base_type in ('any', 'object') and idl_literal.is_null:
return 'ScriptValue()'
literal_value = str(idl_literal)
if idl_type.base_type in CPP_UNSIGNED_TYPES:
return literal_value + 'u'
return literal_value
def union_literal_cpp_value(idl_type, idl_literal):
if idl_literal.is_null:
return idl_type.name + '()'
elif idl_literal.idl_type == 'DOMString':
member_type = idl_type.string_member_type
elif idl_literal.idl_type in ('integer', 'float'):
member_type = idl_type.numeric_member_type
elif idl_literal.idl_type == 'boolean':
member_type = idl_type.boolean_member_type
else:
raise ValueError('Unsupported literal type: ' + idl_literal.idl_type)
return '%s::from%s(%s)' % (idl_type.name, member_type.name,
member_type.literal_cpp_value(idl_literal))
def array_or_sequence_literal_cpp_value(idl_type, idl_literal):
# Only support empty arrays.
if idl_literal.value == '[]':
return cpp_type(idl_type) + '()'
raise ValueError('Unsupported literal type: ' + idl_literal.idl_type)
IdlType.literal_cpp_value = literal_cpp_value
IdlUnionType.literal_cpp_value = union_literal_cpp_value
IdlArrayOrSequenceType.literal_cpp_value = array_or_sequence_literal_cpp_value
################################################################################
# Utility properties for nullable types
################################################################################
def cpp_type_has_null_value(idl_type):
# - String types (String/AtomicString) represent null as a null string,
# i.e. one for which String::isNull() returns true.
# - Enum types, as they are implemented as Strings.
# - Interface types (raw pointer or RefPtr/PassRefPtr) represent null as
# a null pointer.
# - Union types, as thier container classes can represent null value.
# - 'Object' and 'any' type. We use ScriptValue for object type.
return (idl_type.is_string_type or idl_type.is_interface_type or
idl_type.is_enum or idl_type.is_union_type
or idl_type.base_type == 'object' or idl_type.base_type == 'any'
or idl_type.is_callback_function or idl_type.is_callback_interface)
IdlTypeBase.cpp_type_has_null_value = property(cpp_type_has_null_value)
def is_implicit_nullable(idl_type):
# Nullable type where the corresponding C++ type supports a null value.
return idl_type.is_nullable and idl_type.cpp_type_has_null_value
def is_explicit_nullable(idl_type):
# Nullable type that isn't implicit nullable (see above.) For such types,
# we use Nullable<T> or similar explicit ways to represent a null value.
return idl_type.is_nullable and not idl_type.is_implicit_nullable
IdlTypeBase.is_implicit_nullable = property(is_implicit_nullable)
IdlUnionType.is_implicit_nullable = False
IdlTypeBase.is_explicit_nullable = property(is_explicit_nullable)
def number_of_nullable_member_types_union(idl_type):
# http://heycam.github.io/webidl/#dfn-number-of-nullable-member-types
count = 0
for member in idl_type.member_types:
if member.is_nullable:
count += 1
member = member.inner_type
if member.is_union_type:
count += number_of_nullable_member_types_union(member)
return count
IdlUnionType.number_of_nullable_member_types = property(
number_of_nullable_member_types_union)
def includes_nullable_type_union(idl_type):
# http://heycam.github.io/webidl/#dfn-includes-a-nullable-type
return idl_type.number_of_nullable_member_types == 1
IdlTypeBase.includes_nullable_type = False
IdlNullableType.includes_nullable_type = True
IdlUnionType.includes_nullable_type = property(includes_nullable_type_union)
| |
import os
from collections import namedtuple
from uuid import uuid4
from email.mime.nonmultipart import MIMENonMultipart
from django.core.exceptions import ValidationError
from django.core.mail import EmailMessage, EmailMultiAlternatives
from django.db import models
from django.utils.encoding import smart_str
from django.utils.translation import pgettext_lazy, gettext_lazy as _
from django.utils import timezone
from jsonfield import JSONField
from post_office import cache
from post_office.fields import CommaSeparatedEmailField
from .connections import connections
from .settings import context_field_class, get_log_level, get_template_engine, get_override_recipients
from .validators import validate_email_with_name, validate_template_syntax
PRIORITY = namedtuple('PRIORITY', 'low medium high now')._make(range(4))
STATUS = namedtuple('STATUS', 'sent failed queued requeued')._make(range(4))
class Email(models.Model):
"""
A model to hold email information.
"""
PRIORITY_CHOICES = [(PRIORITY.low, _("low")), (PRIORITY.medium, _("medium")),
(PRIORITY.high, _("high")), (PRIORITY.now, _("now"))]
STATUS_CHOICES = [(STATUS.sent, _("sent")), (STATUS.failed, _("failed")),
(STATUS.queued, _("queued")), (STATUS.requeued, _("requeued"))]
from_email = models.CharField(_("Email From"), max_length=254,
validators=[validate_email_with_name])
to = CommaSeparatedEmailField(_("Email To"))
cc = CommaSeparatedEmailField(_("Cc"))
bcc = CommaSeparatedEmailField(_("Bcc"))
subject = models.CharField(_("Subject"), max_length=989, blank=True)
message = models.TextField(_("Message"), blank=True)
html_message = models.TextField(_("HTML Message"), blank=True)
"""
Emails with 'queued' status will get processed by ``send_queued`` command.
Status field will then be set to ``failed`` or ``sent`` depending on
whether it's successfully delivered.
"""
status = models.PositiveSmallIntegerField(
_("Status"),
choices=STATUS_CHOICES, db_index=True,
blank=True, null=True)
priority = models.PositiveSmallIntegerField(_("Priority"),
choices=PRIORITY_CHOICES,
blank=True, null=True)
created = models.DateTimeField(auto_now_add=True, db_index=True)
last_updated = models.DateTimeField(db_index=True, auto_now=True)
scheduled_time = models.DateTimeField(_("The scheduled sending time"),
blank=True, null=True, db_index=True)
expires_at = models.DateTimeField(_("Email won't be sent after this timestamp"),
blank=True, null=True)
number_of_retries = models.PositiveIntegerField(null=True, blank=True)
headers = JSONField(_('Headers'), blank=True, null=True)
template = models.ForeignKey('post_office.EmailTemplate', blank=True,
null=True, verbose_name=_("Email template"),
on_delete=models.CASCADE)
context = context_field_class(_('Context'), blank=True, null=True)
backend_alias = models.CharField(_("Backend alias"), blank=True, default='',
max_length=64)
class Meta:
app_label = 'post_office'
verbose_name = pgettext_lazy("Email address", "Email")
verbose_name_plural = pgettext_lazy("Email addresses", "Emails")
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._cached_email_message = None
def __str__(self):
return '%s' % self.to
def email_message(self):
"""
Returns Django EmailMessage object for sending.
"""
if self._cached_email_message:
return self._cached_email_message
return self.prepare_email_message()
def prepare_email_message(self):
"""
Returns a django ``EmailMessage`` or ``EmailMultiAlternatives`` object,
depending on whether html_message is empty.
"""
if get_override_recipients():
self.to = get_override_recipients()
if self.template is not None:
engine = get_template_engine()
subject = engine.from_string(self.template.subject).render(self.context)
plaintext_message = engine.from_string(self.template.content).render(self.context)
multipart_template = engine.from_string(self.template.html_content)
html_message = multipart_template.render(self.context)
else:
subject = smart_str(self.subject)
plaintext_message = self.message
multipart_template = None
html_message = self.html_message
connection = connections[self.backend_alias or 'default']
if isinstance(self.headers, dict) or self.expires_at:
headers = dict(self.headers or {})
if self.expires_at:
headers.update({'Expires': self.expires_at.strftime("%a, %-d %b %H:%M:%S %z")})
else:
headers = None
if html_message:
if plaintext_message:
msg = EmailMultiAlternatives(
subject=subject, body=plaintext_message, from_email=self.from_email,
to=self.to, bcc=self.bcc, cc=self.cc,
headers=headers, connection=connection)
msg.attach_alternative(html_message, "text/html")
else:
msg = EmailMultiAlternatives(
subject=subject, body=html_message, from_email=self.from_email,
to=self.to, bcc=self.bcc, cc=self.cc,
headers=headers, connection=connection)
msg.content_subtype = 'html'
if hasattr(multipart_template, 'attach_related'):
multipart_template.attach_related(msg)
else:
msg = EmailMessage(
subject=subject, body=plaintext_message, from_email=self.from_email,
to=self.to, bcc=self.bcc, cc=self.cc,
headers=headers, connection=connection)
for attachment in self.attachments.all():
if attachment.headers:
mime_part = MIMENonMultipart(*attachment.mimetype.split('/'))
mime_part.set_payload(attachment.file.read())
for key, val in attachment.headers.items():
try:
mime_part.replace_header(key, val)
except KeyError:
mime_part.add_header(key, val)
msg.attach(mime_part)
else:
msg.attach(attachment.name, attachment.file.read(), mimetype=attachment.mimetype or None)
attachment.file.close()
self._cached_email_message = msg
return msg
def dispatch(self, log_level=None,
disconnect_after_delivery=True, commit=True):
"""
Sends email and log the result.
"""
try:
self.email_message().send()
status = STATUS.sent
message = ''
exception_type = ''
except Exception as e:
status = STATUS.failed
message = str(e)
exception_type = type(e).__name__
# If run in a bulk sending mode, reraise and let the outer
# layer handle the exception
if not commit:
raise
if commit:
self.status = status
self.save(update_fields=['status'])
if log_level is None:
log_level = get_log_level()
# If log level is 0, log nothing, 1 logs only sending failures
# and 2 means log both successes and failures
if log_level == 1:
if status == STATUS.failed:
self.logs.create(status=status, message=message,
exception_type=exception_type)
elif log_level == 2:
self.logs.create(status=status, message=message,
exception_type=exception_type)
return status
def clean(self):
if self.scheduled_time and self.expires_at and self.scheduled_time > self.expires_at:
raise ValidationError(_("The scheduled time may not be later than the expires time."))
def save(self, *args, **kwargs):
self.full_clean()
return super().save(*args, **kwargs)
class Log(models.Model):
"""
A model to record sending email sending activities.
"""
STATUS_CHOICES = [(STATUS.sent, _("sent")), (STATUS.failed, _("failed"))]
email = models.ForeignKey(Email, editable=False, related_name='logs',
verbose_name=_('Email address'), on_delete=models.CASCADE)
date = models.DateTimeField(auto_now_add=True)
status = models.PositiveSmallIntegerField(_('Status'), choices=STATUS_CHOICES)
exception_type = models.CharField(_('Exception type'), max_length=255, blank=True)
message = models.TextField(_('Message'))
class Meta:
app_label = 'post_office'
verbose_name = _("Log")
verbose_name_plural = _("Logs")
def __str__(self):
return str(self.date)
class EmailTemplateManager(models.Manager):
def get_by_natural_key(self, name, language, default_template):
return self.get(name=name, language=language, default_template=default_template)
class EmailTemplate(models.Model):
"""
Model to hold template information from db
"""
name = models.CharField(_('Name'), max_length=255, help_text=_("e.g: 'welcome_email'"))
description = models.TextField(_('Description'), blank=True,
help_text=_("Description of this template."))
created = models.DateTimeField(auto_now_add=True)
last_updated = models.DateTimeField(auto_now=True)
subject = models.CharField(max_length=255, blank=True,
verbose_name=_("Subject"), validators=[validate_template_syntax])
content = models.TextField(blank=True,
verbose_name=_("Content"), validators=[validate_template_syntax])
html_content = models.TextField(blank=True,
verbose_name=_("HTML content"), validators=[validate_template_syntax])
language = models.CharField(max_length=12,
verbose_name=_("Language"),
help_text=_("Render template in alternative language"),
default='', blank=True)
default_template = models.ForeignKey('self', related_name='translated_templates',
null=True, default=None, verbose_name=_('Default template'), on_delete=models.CASCADE)
objects = EmailTemplateManager()
class Meta:
app_label = 'post_office'
unique_together = ('name', 'language', 'default_template')
verbose_name = _("Email Template")
verbose_name_plural = _("Email Templates")
ordering = ['name']
def __str__(self):
return '%s %s' % (self.name, self.language)
def natural_key(self):
return (self.name, self.language, self.default_template)
def save(self, *args, **kwargs):
# If template is a translation, use default template's name
if self.default_template and not self.name:
self.name = self.default_template.name
template = super().save(*args, **kwargs)
cache.delete(self.name)
return template
def get_upload_path(instance, filename):
"""Overriding to store the original filename"""
if not instance.name:
instance.name = filename # set original filename
date = timezone.now().date()
filename = '{name}.{ext}'.format(name=uuid4().hex,
ext=filename.split('.')[-1])
return os.path.join('post_office_attachments', str(date.year),
str(date.month), str(date.day), filename)
class Attachment(models.Model):
"""
A model describing an email attachment.
"""
file = models.FileField(_('File'), upload_to=get_upload_path)
name = models.CharField(_('Name'), max_length=255, help_text=_("The original filename"))
emails = models.ManyToManyField(Email, related_name='attachments',
verbose_name=_('Email addresses'))
mimetype = models.CharField(max_length=255, default='', blank=True)
headers = JSONField(_('Headers'), blank=True, null=True)
class Meta:
app_label = 'post_office'
verbose_name = _("Attachment")
verbose_name_plural = _("Attachments")
def __str__(self):
return self.name
| |
import json
from django.core import exceptions, serializers
from django.forms import Form
from . import PostgresSQLTestCase
from .models import HStoreModel
try:
from django.contrib.postgres import forms
from django.contrib.postgres.fields import HStoreField
from django.contrib.postgres.validators import KeysValidator
except ImportError:
pass
class SimpleTests(PostgresSQLTestCase):
apps = ['django.contrib.postgres']
def test_save_load_success(self):
value = {'a': 'b'}
instance = HStoreModel(field=value)
instance.save()
reloaded = HStoreModel.objects.get()
self.assertEqual(reloaded.field, value)
def test_null(self):
instance = HStoreModel(field=None)
instance.save()
reloaded = HStoreModel.objects.get()
self.assertEqual(reloaded.field, None)
def test_value_null(self):
value = {'a': None}
instance = HStoreModel(field=value)
instance.save()
reloaded = HStoreModel.objects.get()
self.assertEqual(reloaded.field, value)
class TestQuerying(PostgresSQLTestCase):
def setUp(self):
self.objs = [
HStoreModel.objects.create(field={'a': 'b'}),
HStoreModel.objects.create(field={'a': 'b', 'c': 'd'}),
HStoreModel.objects.create(field={'c': 'd'}),
HStoreModel.objects.create(field={}),
HStoreModel.objects.create(field=None),
]
def test_exact(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__exact={'a': 'b'}),
self.objs[:1]
)
def test_contained_by(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__contained_by={'a': 'b', 'c': 'd'}),
self.objs[:4]
)
def test_contains(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__contains={'a': 'b'}),
self.objs[:2]
)
def test_has_key(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__has_key='c'),
self.objs[1:3]
)
def test_has_keys(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__has_keys=['a', 'c']),
self.objs[1:2]
)
def test_key_transform(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__a='b'),
self.objs[:2]
)
def test_keys(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__keys=['a']),
self.objs[:1]
)
def test_values(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__values=['b']),
self.objs[:1]
)
def test_field_chaining(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__a__contains='b'),
self.objs[:2]
)
def test_keys_contains(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__keys__contains=['a']),
self.objs[:2]
)
def test_values_overlap(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__values__overlap=['b', 'd']),
self.objs[:3]
)
def test_key_isnull(self):
obj = HStoreModel.objects.create(field={'a': None})
self.assertSequenceEqual(
HStoreModel.objects.filter(field__a__isnull=True),
self.objs[2:5] + [obj]
)
self.assertSequenceEqual(
HStoreModel.objects.filter(field__a__isnull=False),
self.objs[:2]
)
class TestSerialization(PostgresSQLTestCase):
test_data = '[{"fields": {"field": "{\\"a\\": \\"b\\"}"}, "model": "postgres_tests.hstoremodel", "pk": null}]'
def test_dumping(self):
instance = HStoreModel(field={'a': 'b'})
data = serializers.serialize('json', [instance])
self.assertEqual(json.loads(data), json.loads(self.test_data))
def test_loading(self):
instance = list(serializers.deserialize('json', self.test_data))[0].object
self.assertEqual(instance.field, {'a': 'b'})
class TestValidation(PostgresSQLTestCase):
def test_not_a_string(self):
field = HStoreField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean({'a': 1}, None)
self.assertEqual(cm.exception.code, 'not_a_string')
self.assertEqual(cm.exception.message % cm.exception.params, 'The value of "a" is not a string.')
class TestFormField(PostgresSQLTestCase):
def test_valid(self):
field = forms.HStoreField()
value = field.clean('{"a": "b"}')
self.assertEqual(value, {'a': 'b'})
def test_invalid_json(self):
field = forms.HStoreField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('{"a": "b"')
self.assertEqual(cm.exception.messages[0], 'Could not load JSON data.')
self.assertEqual(cm.exception.code, 'invalid_json')
def test_not_string_values(self):
field = forms.HStoreField()
value = field.clean('{"a": 1}')
self.assertEqual(value, {'a': '1'})
def test_empty(self):
field = forms.HStoreField(required=False)
value = field.clean('')
self.assertEqual(value, {})
def test_model_field_formfield(self):
model_field = HStoreField()
form_field = model_field.formfield()
self.assertIsInstance(form_field, forms.HStoreField)
def test_empty_field_has_not_changed(self):
class HStoreFormTest(Form):
f1 = HStoreField()
form_w_hstore = HStoreFormTest()
self.assertFalse(form_w_hstore.has_changed())
class TestValidator(PostgresSQLTestCase):
def test_simple_valid(self):
validator = KeysValidator(keys=['a', 'b'])
validator({'a': 'foo', 'b': 'bar', 'c': 'baz'})
def test_missing_keys(self):
validator = KeysValidator(keys=['a', 'b'])
with self.assertRaises(exceptions.ValidationError) as cm:
validator({'a': 'foo', 'c': 'baz'})
self.assertEqual(cm.exception.messages[0], 'Some keys were missing: b')
self.assertEqual(cm.exception.code, 'missing_keys')
def test_strict_valid(self):
validator = KeysValidator(keys=['a', 'b'], strict=True)
validator({'a': 'foo', 'b': 'bar'})
def test_extra_keys(self):
validator = KeysValidator(keys=['a', 'b'], strict=True)
with self.assertRaises(exceptions.ValidationError) as cm:
validator({'a': 'foo', 'b': 'bar', 'c': 'baz'})
self.assertEqual(cm.exception.messages[0], 'Some unknown keys were provided: c')
self.assertEqual(cm.exception.code, 'extra_keys')
def test_custom_messages(self):
messages = {
'missing_keys': 'Foobar',
}
validator = KeysValidator(keys=['a', 'b'], strict=True, messages=messages)
with self.assertRaises(exceptions.ValidationError) as cm:
validator({'a': 'foo', 'c': 'baz'})
self.assertEqual(cm.exception.messages[0], 'Foobar')
self.assertEqual(cm.exception.code, 'missing_keys')
with self.assertRaises(exceptions.ValidationError) as cm:
validator({'a': 'foo', 'b': 'bar', 'c': 'baz'})
self.assertEqual(cm.exception.messages[0], 'Some unknown keys were provided: c')
self.assertEqual(cm.exception.code, 'extra_keys')
def test_deconstruct(self):
messages = {
'missing_keys': 'Foobar',
}
validator = KeysValidator(keys=['a', 'b'], strict=True, messages=messages)
path, args, kwargs = validator.deconstruct()
self.assertEqual(path, 'django.contrib.postgres.validators.KeysValidator')
self.assertEqual(args, ())
self.assertEqual(kwargs, {'keys': ['a', 'b'], 'strict': True, 'messages': messages})
| |
import os
import stat
import time
import shutil
import datetime
from operator import itemgetter, attrgetter
from dulwich.objects import Commit, Blob, Tree
from dulwich.repo import Repo
from vacuous.backends.base import BaseBackend
from vacuous.exceptions import FileDoesNotExist, BranchDoesNotExist, BranchDoesAlreadyExist, CommitDoesNotExist
from vacuous.constants import WRITE, RENAME, DELETE
from vacuous.backends.dulwich.utils import clean_path, iter_blob_paths, tree_diff, get_by_path, is_same_object
from vacuous.backends.dulwich.commit import DulwichCommit
class Backend(BaseBackend):
default_branch = 'master'
null_revision = 'null'
file_mode = 0100644
directory_mode = 040755
@property
def repo(self):
if not hasattr(self, '_repo'):
self._repo = Repo(self.path)
return self._repo
def _get_commit(self, revision=None, branch=None):
repo = self.repo
if not revision:
try:
revision = repo.refs['refs/heads/%s' % branch]
except KeyError:
raise BranchDoesNotExist(self, branch)
elif isinstance(revision, DulwichCommit):
revision = revision.revision
try:
commit = repo[revision]
if not isinstance(commit, Commit):
raise CommitDoesNotExist(self, revision)
return commit
except KeyError:
raise CommitDoesNotExist(self, revision)
def _collect(self, tree, path, cache=None):
result = [(None, None, tree)]
bits = filter(None, path.split(os.path.sep))
repo = self.repo
for i, bit in enumerate(bits):
found = False
for mode, name, hexsha in tree.items():
if name == bit:
found = True
if cache and hexsha in cache:
tree = cache[hexsha]
else:
tree = repo[hexsha]
result.append((mode, name, tree))
break
if not found:
result += [(self.directory_mode, bit, Tree()) for bit in bits[i:]]
break
return result
def _link(self, seq):
cache = {}
for i in xrange(len(seq) - 1, -1, -1):
mode, name, obj = seq[i]
cache[obj.id] = obj
if i > 0:
seq[i - 1][2][name] = (mode, obj.id)
return cache
### repo ###
def init_repo(self):
if os.path.exists(self.path):
return
os.mkdir(self.path)
self._repo = Repo.init_bare(self.path)
def delete_repo(self):
shutil.rmtree(self.path)
### branches ###
def has_branch(self, name):
return 'refs/heads/%s' % name in self.repo.refs
def create_branch(self, name, revision=None):
if self.has_branch(name):
raise BranchDoesAlreadyExist(self, name)
self.repo.refs['refs/heads/%s' % name] = self._get_commit(revision, 'master').id
def delete_branch(self, name):
try:
del self.repo.refs['refs/heads/%s' % name]
except KeyError:
raise BranchDoesNotExist(self, name)
def rename_branch(self, old_name, new_name):
if old_name == new_name:
return
if not self.has_branch(old_name):
raise BranchDoesNotExist(self, old_name)
if self.has_branch(new_name):
raise BranchDoesAlreadyExist(new_name)
self.create_branch(new_name, 'refs/heads/%s' % old_name)
self.delete_branch(old_name)
### api ###
def revision(self, revision=None, branch='master'):
return DulwichCommit(self, self._get_commit(revision, branch))
def _walk(self, path, tree):
repo = self.repo
blobs, subtrees = [], []
for mode, name, hexsha in tree.items():
if stat.S_ISREG(mode):
blobs.append(name)
elif stat.S_ISDIR(mode):
subtrees.append(name)
yield (path, subtrees, blobs)
for name in subtrees:
mode, hexsha = tree[name]
for t in self._walk(os.path.join(path, name), repo[hexsha]):
yield t
def walk(self, path, revision=None, branch='master'):
root = repo[self._get_commit(revision, branch).tree]
return self._walk(path, root)
def history(self, path=None, revision=None, branch='master', since_revision=None, since=None, sort=True):
if revision == self.null_revision:
return []
if path is not None:
path = clean_path(path)
if since_revision:
ctime = self.revision(since_revision).commit_time
if since:
since = max(ctime, since)
else:
since = ctime
if revision or branch:
pending = set([self._get_commit(revision, branch).id])
else:
pending = set(self._repo.get_refs().values())
visited = set()
result = []
repo = self.repo
while pending:
commit_id = pending.pop()
commit = self.revision(commit_id)
if commit_id in visited:
continue
visited.add(commit_id)
if since and since > commit.commit_time:
continue
if commit_id != since_revision:
pending.update(commit._commit.parents)
if path:
tree = repo[commit._commit.tree]
found = False
parents = commit._commit.parents
for parent in parents:
parent_tree = repo[repo[parent].tree]
if not is_same_object(repo, tree, parent_tree, path):
found = True
break
if not parents and get_by_path(repo, tree, path):
found = True
if not found:
continue
result.append(commit)
if sort:
result.sort(key=attrgetter('commit_time'), reverse=True)
return result
def do_read(self, path, revision=None, branch='master'):
path = clean_path(path)
repo = self.repo
if revision == self.null_revision:
raise FileDoesNotExist(self, "'%s' does not exist at revision null" % path)
c = self._get_commit(revision, branch)
obj = get_by_path(repo, repo[c.tree], path)
if not obj:
raise FileDoesNotExist(self, "'%s' does not exist" % path)
if not isinstance(obj, Blob):
raise FileDoesNotExist(self, "'%s' is not a regular file" % path)
data = obj.as_pretty_string()
return data
def do_commit(self, message='', author=None, committer=None, branch='master', parent=None):
if isinstance(message, unicode):
message = message.encode('utf-8')
repo = self.repo
try:
parent = self._get_commit(parent, branch)
root = repo[parent.tree]
except BranchDoesNotExist:
if branch == 'master': # initial commit
root = Tree()
else:
raise
cache = {}
paths = set()
objects = set()
for path, (action, data) in self.changes.iteritems():
path = clean_path(path)
paths.add(path)
dirname, filename = os.path.split(path)
trees = self._collect(root, dirname, cache)
if action == WRITE:
blob = Blob.from_string(data)
trees[-1][2][filename] = (self.file_mode, blob.id)
cache[blob.id] = blob
elif action == DELETE:
del trees[-1][2][filename]
elif action == RENAME:
old = self._collect(root, data, cache)
mode, name, obj = old[-1]
del old[-2][2][name]
trees[-1][2][filename] = (mode, obj.id)
cache.update(self._link(old[:-1]))
paths.add(data)
cache.update(self._link(trees))
else:
objects.add(root)
# collect all objects that have to be committed
for path in paths:
objects.update([obj for mode, name, obj in self._collect(root, path, cache)])
# create the commit
c = Commit()
if parent:
c.parents = [parent.id]
c.tree = root.id
c.committer = committer or self.committer
c.author = author or c.committer
t = time.localtime()
c.commit_time = c.author_time = int(time.mktime(t))
c.commit_timezone = c.author_timezone = t.tm_isdst * 3600 - time.timezone
c.encoding = "UTF-8"
c.message = message
objects.add(c)
# write everything to disk
for obj in objects:
repo.object_store.add_object(obj)
repo.refs['refs/heads/%s' % branch] = c.id
return DulwichCommit(self, c)
| |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
import torch
from torch.nn import functional as F
from ..utils import cat
from ..utils import cat_bbox
from ..utils import nonzero
from ..utils import smooth_l1_loss
from .matcher import Matcher
from .target_preparator import TargetPreparator
class FastRCNNTargetPreparator(TargetPreparator):
"""
This class returns labels and regression targets for Fast R-CNN
"""
def index_target(self, target, index):
target = target.copy_with_fields("labels")
return target[index]
def prepare_labels(self, matched_targets_per_image, anchors_per_image):
matched_idxs = matched_targets_per_image.get_field("matched_idxs")
labels_per_image = matched_targets_per_image.get_field("labels")
labels_per_image = labels_per_image.to(dtype=torch.int64)
# Label background (below the low threshold)
bg_inds = matched_idxs == Matcher.BELOW_LOW_THRESHOLD
labels_per_image[bg_inds] = 0
# Label ignore proposals (between low and high thresholds)
ignore_inds = matched_idxs == Matcher.BETWEEN_THRESHOLDS
labels_per_image[ignore_inds] = -1 # -1 is ignored by sampler
return labels_per_image
class FastRCNNLossComputation(object):
"""
Computes the loss for Faster R-CNN.
Also supports FPN
"""
def __init__(self, target_preparator, fg_bg_sampler):
"""
Arguments:
target_preparator: an instance of TargetPreparator
fg_bg_sampler: an instance of BalancedPositiveNegativeSampler
"""
self.target_preparator = target_preparator
self.fg_bg_sampler = fg_bg_sampler
def subsample(self, anchors, targets):
"""
This method performs the positive/negative sampling, and return
the sampled anchors.
Note: this function keeps a state.
Arguments:
anchors (list of list of BoxList)
targets (list of BoxList)
"""
labels, regression_targets = self.target_preparator(anchors, targets)
sampled_pos_inds, sampled_neg_inds = self.fg_bg_sampler(labels)
# flip anchors to be images -> feature map levels
if anchors:
device = anchors[0][0].bbox.device
anchors = list(zip(*anchors))
levels = [
torch.tensor(
[i for i, n in enumerate(anchor) for _ in range(n.bbox.shape[0])], device=device
)
for anchor in anchors
]
num_levels = len(anchors[0])
num_images = len(anchors)
# concatenate all anchors for the same image
anchors = [cat_bbox(anchors_per_image) for anchors_per_image in anchors]
# add corresponding label information to the bounding boxes
# this can be used with `keep_only_positive_boxes` in order to
# restrict the set of boxes to be used during other steps (Mask R-CNN
# for example)
for labels_per_image, anchors_per_image in zip(labels, anchors):
anchors_per_image.add_field("labels", labels_per_image)
sampled_inds = []
sampled_image_levels = []
# distributed sampled anchors, that were obtained on all feature maps
# concatenated via the fg_bg_sampler, into individual feature map levels
for img_idx, (pos_inds_img, neg_inds_img) in enumerate(
zip(sampled_pos_inds, sampled_neg_inds)
):
img_sampled_inds = pos_inds_img | neg_inds_img
anchors_per_image = anchors[img_idx][img_sampled_inds]
sampled_levels = levels[img_idx].index_select(0,nonzero(img_sampled_inds)[0])
# TODO replace with bincount because indices in the same level
# are packed together
anchors_per_level_per_image = []
sampled_image_level_temp = []
for level in range(num_levels):
level_idx = nonzero(sampled_levels == level)[0]
anchors_per_level_per_image.append(anchors_per_image[level_idx])
sampled_image_level_temp.append(torch.full_like(level_idx, img_idx))
anchors[img_idx] = anchors_per_level_per_image
sampled_inds.append(img_sampled_inds)
sampled_image_levels.append(sampled_image_level_temp)
# flip back to original format feature map level -> images
anchors = list(zip(*anchors))
labels = torch.cat(labels, dim=0)
regression_targets = torch.cat(regression_targets, dim=0)
self._labels = labels
self._regression_targets = regression_targets
self._sampled_pos_inds = sampled_pos_inds
self._sampled_neg_inds = sampled_neg_inds
self._sampled_inds = sampled_inds
# find permutation that brings the concatenated representation in the order
# that first joins the images for the same level, and then concatenates the
# levels into the representation obtained by concatenating first the feature maps
# and then the images
sampled_image_levels = list(zip(*sampled_image_levels))
sampled_image_levels = cat([cat(l, dim=0) for l in sampled_image_levels], dim=0)
permute_inds = cat(
[
nonzero(sampled_image_levels == img_idx)[0]
for img_idx in range(num_images)
],
dim=0,
)
self._permute_inds = permute_inds
return anchors
def __call__(self, class_logits, box_regression):
"""
Computes the loss for Faster R-CNN.
This requires that the subsample method has been called beforehand.
Arguments:
class_logits (list of tensor)
box_regression (list of tensor)
"""
class_logits = cat(class_logits, dim=0)
box_regression = cat(box_regression, dim=0)
device = class_logits.device
if not hasattr(self, "_labels"):
raise RuntimeError("subsample needs to be called before")
labels = self._labels
regression_targets = self._regression_targets
sampled_pos_inds = torch.cat(self._sampled_pos_inds, dim=0)
sampled_neg_inds = torch.cat(self._sampled_neg_inds, dim=0)
sampled_inds = torch.cat(self._sampled_inds, dim=0)
permute_inds = self._permute_inds
assert len(permute_inds) == len(class_logits)
class_logits = class_logits[permute_inds]
box_regression = box_regression[permute_inds]
# delete cached elements
for attr in [
"_labels",
"_regression_targets",
"_sampled_pos_inds",
"_sampled_neg_inds",
"_sampled_inds",
"_permute_inds",
]:
delattr(self, attr)
# get indices of the positive examples in the subsampled space
markers = torch.arange(sampled_inds.sum(), device=device)
marked_sampled_inds = torch.zeros(
sampled_inds.shape[0], dtype=torch.int64, device=device
)
marked_sampled_inds[sampled_inds] = markers
sampled_pos_inds_subset = marked_sampled_inds[sampled_pos_inds]
sampled_pos_inds = nonzero(sampled_pos_inds)[0]
sampled_neg_inds = nonzero(sampled_neg_inds)[0]
sampled_inds = nonzero(sampled_inds)[0]
classification_loss = F.cross_entropy(class_logits, labels[sampled_inds])
# get indices that correspond to the regression targets for
# the corresponding ground truth labels, to be used with
# advanced indexing
labels_pos = labels[sampled_pos_inds]
map_inds = 4 * labels_pos[:, None] + torch.tensor([0, 1, 2, 3], device=device)
box_loss = smooth_l1_loss(
box_regression[sampled_pos_inds_subset[:, None], map_inds],
regression_targets[sampled_pos_inds],
size_average=False,
beta=1,
) / (sampled_inds.numel())
return classification_loss, box_loss
# FIXME merge this with FastRCNNLossComputation
class FastRCNNOHEMLossComputation(object):
"""
This class computes the Fast R-CNN loss
In an OHEM manner.
"""
def __init__(self, target_preparator, fg_bg_sampler):
self.target_preparator = target_preparator
self.fg_bg_sampler = fg_bg_sampler
def __call__(self, anchors, class_logits, box_regression, targets):
assert len(anchors) == 1, "only single feature map supported"
assert len(class_logits) == 1, "only single feature map supported"
anchors = anchors[0]
class_logits = class_logits[0]
box_regression = box_regression[0]
# TODO test if this works for multi-feature maps
# assert len(anchors) == len(class_logits)
# class_logits = cat(class_logits, dim=0)
# box_regression = cat(box_regression, dim=0)
device = class_logits.device
labels, regression_targets = self.target_preparator(anchors, targets)
sampled_pos_inds, sampled_neg_inds = self.fg_bg_sampler(labels)
sampled_pos_inds = nonzero(torch.cat(sampled_pos_inds, dim=0))[0]
sampled_neg_inds = nonzero(torch.cat(sampled_neg_inds, dim=0))[0]
sampled_inds = torch.cat([sampled_pos_inds, sampled_neg_inds], dim=0)
labels = torch.cat(labels, dim=0)
regression_targets = torch.cat(regression_targets, dim=0)
classification_loss = F.cross_entropy(
class_logits[sampled_inds], labels[sampled_inds]
)
# FIXME workaround because can't unsqueeze empty tensor in PyTorch
# when there are no positive labels
if len(sampled_pos_inds) == 0:
box_loss = torch.tensor(0., device=device, requires_grad=True)
return classification_loss, box_loss
# get indices that correspond to the regression targets for
# the corresponding ground truth labels, to be used with
# advanced indexing
labels_pos = labels[sampled_pos_inds]
map_inds = 4 * labels_pos[:, None] + torch.tensor([0, 1, 2, 3], device=device)
box_loss = smooth_l1_loss(
box_regression[sampled_pos_inds[:, None], map_inds],
regression_targets[sampled_pos_inds],
size_average=False,
beta=1,
) / (sampled_inds.numel())
return classification_loss, box_loss
| |
# -*- coding: utf-8 -*-
# !/usr/bin/env python
#
# Copyright (c) 2017 Intel Corp.
#
"""
This module creates the interactive command line parser and executes commands.
"""
from IPython.core.magic import (Magics, magics_class, line_magic)
from IPython import get_ipython
from IPython.core.magic_arguments import (argument, magic_arguments,
parse_argstring)
from IPython.core.completerlib import quick_completer
from IPython.terminal.prompts import Prompts, Token
from .command_invoker import CommandInvoker
class CustomPrompt(Prompts):
"""Custom Prompt, this prompt changes with the nodes being set"""
def in_prompt_tokens(self, cli=None):
line_number = self.shell.execution_count - CtrlCommands.curLine
return [(Token.Prompt, CtrlCommands.NODES + ' ['),
(Token.PromptNum, str(line_number)),
(Token.Prompt, ']: ')]
class CtrlPrompt(Prompts):
"""Custom Prompt, makes the default prompt Ctrl[i]"""
def in_prompt_tokens(self, cli=None):
line_number = CtrlCommands.ctrlLine + (self.shell.execution_count - CtrlCommands.curLine)
return[(Token.Prompt, 'Ctrl ['),
(Token.PromptNum, str(line_number)),
(Token.Prompt, ']: ')]
Command_menu = '\nActSys specific commands you can use are:\n' \
'\tpower Power on/off/cycle\n' \
'\tresource Add or remove resource from resource pool\n' \
'\tprocess Process list/kill on a node in a cluster\n' \
'\tget Get powercap/freq value of a node\n' \
'\tset Set powercap/freq value of a node\n' \
'\tservice Check, start or stop services specified in the configuration file\n' \
'\tprovision Adding, setting and removing provisioning options for devices\n' \
'\tdiag Launching diagnostic tests on devices\n' \
'\tbios Update or get version of bios on specified nodes/group\n' \
'\tsensor Get specifiged sensor value on specified nodes/group\n' \
'\tjob Launch, check, retrive or cancel job\n' \
'\nInteractive cli commands to select/clear nodes are:\n' \
'\tselect Select node or group\n' \
'\tclear_select Clear selection. Provide device name for each command\n' \
'For help on these commands type <command_name>?. For example to get\n' \
'help on power type "power?" and enter it.Type menu to see command list\n'
@magics_class
class CtrlCommands(Magics):
"""All the Ctrl Commands"""
NODES = None
curLine = 0
ctrlLine = 0
ctrlPrompt = True
def __init__(self, shell):
# Constructor for UserMagics class. Get the instance of ASD class
super(CtrlCommands, self).__init__()
self.add_completer_options()
self.ctrl_command_invoker = CommandInvoker()
self.device_name = None
CtrlCommands.curLine = 0
CtrlCommands.ctrlLine = 0
CtrlCommands.ctrlPrompt = True
@line_magic
@magic_arguments()
@argument('node_regex', help='Set nodes to be used for all commands.'
'Use "clear_selection" to unset')
def select(self, args):
"""Set node regex"""
parse_args = parse_argstring(CtrlCommands.select, args)
if self.check_valid_devices(parse_args.node_regex, self):
CtrlCommands.NODES = parse_args.node_regex
else:
return
ipy = get_ipython()
if CtrlCommands.ctrlPrompt:
CtrlCommands.ctrlLine = ipy.execution_count
CtrlCommands.ctrlPrompt = False
CtrlCommands.curLine = ipy.execution_count
ipy.prompts = CustomPrompt(ipy)
@staticmethod
@line_magic
def clear_select(self):
"""Return to default prompt"""
CtrlCommands.NODES = None
CtrlCommands.ctrlPrompt = True
ipy = get_ipython()
CtrlCommands.curLine = ipy.execution_count
ipy.prompts = CtrlPrompt(ipy)
@line_magic
@magic_arguments()
@argument('subcommand', help='on, off, cycle, bios, efi, hdd, pxe, cdrom, removable',
choices=('on', 'off', 'cycle', 'bios', 'efi', 'hdd', 'pxe', 'cdrom', 'removable'))
@argument('-o', '--outlet', help='Power off with pdu outlet')
@argument('-d', '--device', help='Device name. Required if nodename is not set')
@argument('-f', '--force', help='This option will allow user to force thePower On/Off/Reboot')
def power(self, args):
"""Power management commands """
parse_args = parse_argstring(CtrlCommands.power, args)
command_result = self.ctrl_command_invoker.common_cmd_invoker(
self.get_device(parse_args), parse_args.subcommand, force=parse_args.force, outlet=parse_args.outlet)
self.handle_command_result(self, command_result)
@line_magic
@magic_arguments()
@argument('action', help='add or remove',
choices=('add', 'remove', 'check'))
@argument('-d', '--device', help='Device name. Required if nodename is not set')
def resource(self, args):
"""Resource management commands"""
parse_args = parse_argstring(CtrlCommands.resource, args)
if parse_args.action == 'add':
command_result = self.ctrl_command_invoker.resource_add(
self.get_device(parse_args))
elif parse_args.action == 'remove':
command_result = self.ctrl_command_invoker.resource_remove(
self.get_device(parse_args))
elif parse_args.action == 'check':
command_result = self.ctrl_command_invoker.resource_check(
self.get_device(parse_args))
return self.handle_command_result(self, command_result)
@line_magic
@magic_arguments()
@argument('action', help='status, start, stop', choices=('status', 'start', 'stop'))
@argument('-d', '--device', help='Device name. Required if nodename is not set')
def service(self, args):
"""Service management commands """
parse_args = parse_argstring(CtrlCommands.service, args)
if parse_args.action == 'status':
command_result = self.ctrl_command_invoker.service_status(
self.get_device(parse_args))
elif parse_args.action == 'start':
command_result = self.ctrl_command_invoker.service_on(
self.get_device(parse_args))
elif parse_args.action == 'stop':
command_result = self.ctrl_command_invoker.service_off(
self.get_device(parse_args))
self.handle_command_result(self, command_result)
@line_magic
@magic_arguments()
@argument('action', help='add, delete, set', choices=('add', 'delete', 'set'))
@argument('-d', '--device', help='Device name. Required if nodename is not set')
@argument('-ip', '--ip_address', help='IP address')
@argument('-hw', '--hw_address', help='hw address')
@argument('-n', '--net_interface', help='net interface')
@argument('-i', '--image', help='image')
@argument('-b', '--bootstrap', help='bootstrap')
@argument('-f', '--file', help='file')
@argument('-k', '--kernel_args', help='kernel args')
def provision(self, args):
"""Provision management commands """
pa = parse_argstring(CtrlCommands.provision, args)
if pa.action == 'add':
command_result = self.ctrl_command_invoker.provision_add(
self.get_device(pa))
elif pa.action == 'delete':
command_result = self.ctrl_command_invoker.provision_delete(
self.get_device(pa))
elif pa.action == 'set':
command_result = self.ctrl_command_invoker.provision_set(
self.get_device(pa), ip_address=pa.ip_address, hw_address=pa.hw_address, net_interface=pa.net_interface,
image=pa.image, bootstrap=pa.bootstrap, files=pa.file, kernel_args=pa.kernel_args)
self.handle_command_result(self, command_result)
@line_magic
@magic_arguments()
@argument('action', help='inband or oob', choices=('inband', 'oob'))
@argument('-d', '--device', help='Device name. Required if nodename is not set')
@argument('-t', '--test', help='Test name')
@argument('-i', '--image', help='Image')
def diag(self, args):
"""Diag management commands """
parse_args = parse_argstring(CtrlCommands.diag, args)
if parse_args.action == 'inband':
command_result = self.ctrl_command_invoker.diagnostics_inband(self.get_device(parse_args),
test=parse_args.test, image=parse_args.image)
elif parse_args.action == 'oob':
command_result = self.ctrl_command_invoker.diagnostics_oob(
self.get_device(parse_args), test=parse_args.test)
self.handle_command_result(self, command_result)
@line_magic
@magic_arguments()
@argument('subcommand', help='update or get-version', choices=('update', 'get-version'))
@argument('-d', '--device', help='Device name. Required if nodename is not set')
@argument('-i', '--image', help='Specify the bios image')
def bios(self, args):
"""Bios management commands """
parse_args = parse_argstring(CtrlCommands.bios, args)
if parse_args.subcommand == 'update':
command_result = self.ctrl_command_invoker.bios_update(self.get_device(parse_args), parse_args.image)
elif parse_args.subcommand == 'get-version':
command_result = self.ctrl_command_invoker.bios_version(self.get_device(parse_args))
self.handle_command_result(self, command_result)
@line_magic
@magic_arguments()
@argument('subcommand', help='get or get over time', choices=('get', 'get_over_time'))
@argument('sensor', help='Sensor name required')
@argument('-d', '--device', help='Device name. Required if nodename is not set')
@argument('-r', '--sample_rate', help='Samples per second')
@argument('-t', '--time', help='Sampling Duration (seconds)')
def sensor(self, args):
"""Sensor management commands """
parse_args = parse_argstring(CtrlCommands.sensor, args)
s_name = parse_args.sensor
d_name = self.get_device(parse_args)
if parse_args.subcommand == 'get':
command_result = self.ctrl_command_invoker.oob_sensor_get(d_name, s_name)
elif parse_args.subcommand == 'get_over_time':
command_result = self.ctrl_command_invoker.oob_sensor_get_over_time(d_name, s_name, parse_args.sample_rate, parse_args.time)
self.handle_command_result(self, command_result)
@line_magic
@magic_arguments()
@argument('subcommand', help='launch, check, retrieve, cancel', choices=('launch', 'check', 'retrieve', 'cancel'))
@argument('-e', '--script', help='Job script')
@argument('-j', '--job_id', help='Job ID required')
@argument('-nc', '--node_count', help='node count')
@argument('-n', '--node', help='node')
@argument('-o', '--output_file', help='output file')
@argument('-s', '--state', help='state')
def job(self, args):
"""Job management commands"""
pa = parse_argstring(CtrlCommands.job, args)
if pa.subcommand == 'launch':
command_result = self.ctrl_command_invoker.job_launch(pa.script, pa.node_count, pa.node, pa.output_file)
elif pa.subcommand == 'check':
command_result = self.ctrl_command_invoker.job_check(pa.job_id, pa.state)
elif pa.subcommand == 'cancel':
command_result = self.ctrl_command_invoker.job_cancel(pa.job_id)
self.handle_command_result(self, command_result)
@line_magic
@magic_arguments()
@argument('option', help='Get powercap/freq value of node', choices=('powercap', 'freq'))
@argument('-d', '--device', help='Device name. Required if nodename is not set')
def get(self, args):
"""Function to call appropriate get sub-command"""
parse_args = parse_argstring(CtrlCommands.get, args)
if parse_args.option == 'powercap':
print("Command not implemented: Get Powercap Command Called")
else:
print("Command not implemented: Get Freq Command Called")
@line_magic
@magic_arguments()
@argument('option', help='Set powercap/freq value of node', choices=('powercap', 'freq'))
@argument('-d', '--device', help='Device name. Required if nodename is not set')
def set(self, args):
"""Function to call appropriate set sub-command"""
parse_args = parse_argstring(CtrlCommands.set, args)
if parse_args.option == 'powercap':
print("Command not implemented: Set Powercap Command Called")
else:
print("Command not implemented: Set Freq Command Called")
@line_magic
@magic_arguments()
@argument('subcommand', help='Process list/kill in a node', choices=('list', 'kill'))
@argument('-d', '--device', help='Device name. Required if nodename is not set')
def process(self, args):
"""Function to call appropriate process sub-command"""
parse_args = parse_argstring(CtrlCommands.process, args)
if parse_args.subcommand == 'list':
print("Command not implemented: Process List Command called")
else:
print("Command not implemented: Process Kill Command Called")
@line_magic
def menu(self, args):
print('\x1b[2J\x1b[H\n' \
'*************************************************************\n' \
'ActSys via IPython Shell')
print(Command_menu)
@staticmethod
def get_device(parse_args):
"""Gets the device name"""
if parse_args.device:
return parse_args.device
else:
return CtrlCommands.NODES
@staticmethod
def handle_command_result(self, command_result):
"""Handles the command result"""
if isinstance(command_result, list):
num_device = 0
num_failed_device = 0
num_failures = 0
for com_result in command_result:
count = len(self.ctrl_command_invoker.datastore.
expand_device_list(com_result.device_name))
num_device += count
if com_result.return_code != 0:
num_failed_device += count
num_failures += 1
print(com_result)
else:
print(com_result)
print(("Result: {}/{} devices were successful".
format(num_device - num_failed_device, num_device)))
else:
print (command_result)
@staticmethod
def check_valid_devices(device_regex, self):
valid = True
try:
device_list = self.ctrl_command_invoker.datastore.expand_device_list(device_regex)
except self.ctrl_command_invoker.datastore.DeviceListParseError:
print("Error parsing device list")
return False
if not device_list:
print("No valid devices to run this command on.")
return False
for device_name in device_list:
if not self.ctrl_command_invoker.device_exists_in_config(device_name):
print(("{} device does not exist in config.".format(device_name)))
valid = False
return valid
def add_completer_options(self):
"""Sets the tab completion for the options of each command"""
self.complete_command_option('power', ['on', 'off', 'cycle', 'bios', 'efi', 'hdd', 'pxe', 'cdrom', 'removable'])
self.complete_command_option('resource', ['remove', 'add', 'check'])
self.complete_command_option('service', ['status', 'start', 'stop'])
self.complete_command_option('provision', ['add', 'delete', 'set'])
self.complete_command_option('diag', ['inband', 'oob'])
self.complete_command_option('bios', ['update', 'get-version'])
self.complete_command_option('sensor', ['get', 'get_over_time'])
self.complete_command_option('job', ['launch', 'check', 'retrieve', 'cancel'])
self.complete_command_option('get', ['powercap', 'freq'])
self.complete_command_option('set', ['powercap', 'freq'])
self.complete_command_option('process', ['list', 'kill'])
@staticmethod
def complete_command_option(command, options):
"""Sets tab completetion for the commands"""
quick_completer(command, options)
quick_completer('%'+command, options)
try:
IPYTHON_ID = get_ipython()
MAGICS = CtrlCommands(IPYTHON_ID)
IPYTHON_ID.register_magics(MAGICS)
except AttributeError:
print ("Unable to get the IPython shell identifier")
| |
"""Task to generate a report on duplicated articles in the index"""
from portality.tasks.redis_huey import long_running
from portality.app_email import email_archive
from portality.background import BackgroundTask, BackgroundApi
import os
import shutil
import json
import csv
from datetime import datetime
from portality import models
from portality.lib import dates
from portality.core import app, es_connection
from portality.bll.doaj import DOAJ
from portality.bll import exceptions
class ArticleDuplicateReportBackgroundTask(BackgroundTask):
__action__ = "article_duplicate_report"
# Keep a cache of ISSNs to owners
owner_cache = {}
def run(self):
job = self.background_job
params = job.params
# Set up the files we need to run this task - a dir to place the report, and a place to write the article csv
outdir = self.get_param(params, "outdir", "article_duplicates_" + dates.today())
job.add_audit_message("Saving reports to " + outdir)
if not os.path.exists(outdir):
os.makedirs(outdir)
# Location for our interim CSV file of articles
tmpdir = self.get_param(params, "tmpdir", 'tmp_article_duplicate_report')
if not os.path.exists(tmpdir):
os.makedirs(tmpdir)
tmp_csvname = self.get_param(params, "article_csv", False)
tmp_csvpath, total = self._make_csv_dump(tmpdir, tmp_csvname)
# Initialise our reports
global_reportfile = 'duplicate_articles_global_' + dates.today() + '.csv'
global_reportpath = os.path.join(outdir, global_reportfile)
f = open(global_reportpath, "w", encoding="utf-8")
global_report = csv.writer(f)
header = ["article_id", "article_created", "article_doi", "article_fulltext", "article_owner", "article_issns", "article_in_doaj", "n_matches", "match_type", "match_id", "match_created", "match_doi", "match_fulltext", "match_owner", "match_issns", "match_in_doaj", "owners_match", "titles_match", "article_title", "match_title"]
global_report.writerow(header)
noids_reportfile = 'noids_' + dates.today() + '.csv'
noids_reportpath = os.path.join(outdir, noids_reportfile)
g = open(noids_reportpath, "w", encoding="utf-8")
noids_report = csv.writer(g)
header = ["article_id", "article_created", "article_owner", "article_issns", "article_in_doaj"]
noids_report.writerow(header)
# Record the sets of duplicated articles
global_matches = []
a_count = 0
articleService = DOAJ.articleService()
# Read back in the article csv file we created earlier
with open(tmp_csvpath, 'r', encoding='utf-8') as t:
article_reader = csv.reader(t)
start = datetime.now()
estimated_finish = ""
for a in article_reader:
if a_count > 1 and a_count % 100 == 0:
n = datetime.now()
diff = (n - start).total_seconds()
expected_total = ((diff / a_count) * total)
estimated_finish = dates.format(dates.after(start, expected_total))
a_count += 1
article = models.Article(_source={'id': a[0], 'created_date': a[1], 'bibjson': {'identifier': json.loads(a[2]), 'link': json.loads(a[3]), 'title': a[4]}, 'admin': {'in_doaj': json.loads(a[5])}})
# Get the global duplicates
try:
global_duplicates = articleService.discover_duplicates(article, results_per_match_type=10000, include_article = False)
except exceptions.DuplicateArticleException:
# this means the article did not have any ids that could be used for deduplication
owner = self._lookup_owner(article)
noids_report.writerow([article.id, article.created_date, owner, ','.join(article.bibjson().issns()), article.is_in_doaj()])
continue
dupcount = 0
if global_duplicates:
# Look up an article's owner
owner = self._lookup_owner(article)
# Deduplicate the DOI and fulltext duplicate lists
s = set([article.id] + [d.id for d in global_duplicates.get('doi', []) + global_duplicates.get('fulltext', [])])
# remove article's own id from global_duplicates
dupcount = len(s)-1
if s not in global_matches:
self._write_rows_from_duplicates(article, owner, global_duplicates, global_report)
global_matches.append(s)
app.logger.debug('{0}/{1} {2} {3} {4} {5}'.format(a_count, total, article.id, dupcount, len(global_matches), estimated_finish))
job.add_audit_message('{0} articles processed for duplicates. {1} global duplicate sets found.'.format(a_count, len(global_matches)))
f.close()
g.close()
# Delete the transient temporary files.
shutil.rmtree(tmpdir)
# Email the reports if that parameter has been set.
send_email = self.get_param(params, "email", False)
if send_email:
archive_name = "article_duplicates_" + dates.today()
email_archive(outdir, archive_name)
job.add_audit_message("email alert sent")
else:
job.add_audit_message("no email alert sent")
@classmethod
def _make_csv_dump(self, tmpdir, filename):
# Connection to the ES index
conn = es_connection
if not filename:
filename = 'tmp_articles_' + dates.today() + '.csv'
filename = os.path.join(tmpdir, filename)
with open(filename, 'w', encoding='utf-8') as t:
count = self._create_article_csv(conn, t)
return filename, count
@classmethod
def _lookup_owner(self, article):
# Look up an article's owner
journal = article.get_journal()
owner = None
if journal:
owner = journal.owner
for issn in journal.bibjson().issns():
if issn not in self.owner_cache:
self.owner_cache[issn] = owner
return owner
@staticmethod
def _create_article_csv(connection, file_object):
""" Create a CSV file with the minimum information we require to find and report duplicates. """
csv_writer = csv.writer(file_object, quoting=csv.QUOTE_ALL)
# Scroll through all articles, newest to oldest
scroll_query = {
"_source": [
"id",
"created_date",
"bibjson.identifier",
"bibjson.link",
"bibjson.title",
"admin.in_doaj"
],
"query": {
"match_all": {}
},
"sort": [
{"last_updated": {"order": "desc"}}
]
}
count = 0
for a in models.Article.iterate(q=scroll_query, page_size=1000, keepalive='1m'):
row = [
a['id'],
a['created_date'],
json.dumps(a['bibjson']['identifier']),
json.dumps(a['bibjson'].get('link', [])),
a['bibjson'].get('title', ''),
json.dumps(a.get('admin', {}).get('in_doaj', ''))
]
csv_writer.writerow(row)
count += 1
return count
def _summarise_article(self, article, owner=None):
a_doi = article.bibjson().get_identifiers('doi')
a_fulltext = article.bibjson().get_urls('fulltext')
o = owner
if o is None:
for i in article.bibjson().issns():
o = self.owner_cache.get(i, None)
if o is not None:
break
return {
'created': article.created_date,
'doi': a_doi[0] if len(a_doi) > 0 else '',
'fulltext': a_fulltext[0] if len(a_fulltext) > 0 else '',
'owner': o if o is not None else '',
'issns': ','.join(article.bibjson().issns()),
'title': article.bibjson().title,
'in_doaj': article.is_in_doaj()
}
def _write_rows_from_duplicates(self, article, owner, duplicates, report):
dups = {}
for d in duplicates.get('doi', []):
dups[d.id] = self._summarise_article(d, owner=owner)
dups[d.id]['match_type'] = 'doi'
for d in duplicates.get('fulltext', []):
if d.id in dups:
dups[d.id]['match_type'] = 'doi+fulltext'
else:
dups[d.id] = self._summarise_article(d, owner=owner)
dups[d.id]['match_type'] = 'fulltext'
# write rows to report
a_summary = self._summarise_article(article, owner)
for k, v in dups.items():
row = [article.id,
a_summary['created'],
a_summary['doi'],
a_summary['fulltext'],
a_summary['owner'],
a_summary['issns'],
a_summary['in_doaj'],
str(len(dups)),
v['match_type'],
k,
v['created'],
v['doi'],
v['fulltext'],
v['owner'],
v['issns'],
v['in_doaj'],
str(a_summary['owner'] == v['owner']),
str(a_summary['title'] == v['title']),
a_summary['title'] if a_summary['title'] != v['title'] else '',
v['title'] if a_summary['title'] != v['title'] else '']
report.writerow(row)
def cleanup(self):
"""
Cleanup after a successful OR failed run of the task
:return:
"""
pass
@classmethod
def prepare(cls, username, **kwargs):
"""
Take an arbitrary set of keyword arguments and return an instance of a BackgroundJob,
or fail with a suitable exception
:param kwargs: arbitrary keyword arguments pertaining to this task type
:return: a BackgroundJob instance representing this task
"""
# First prepare a job record
job = models.BackgroundJob()
job.user = username
job.action = cls.__action__
params = {}
cls.set_param(params, "outdir", kwargs.get("outdir", "article_duplicates_" + dates.today()))
cls.set_param(params, "email", kwargs.get("email", False))
cls.set_param(params, "tmpdir", kwargs.get("tmpdir", "tmp_article_duplicates_" + dates.today()))
cls.set_param(params, "article_csv", kwargs.get("article_csv", False))
job.params = params
return job
@classmethod
def submit(cls, background_job):
"""
Submit the specified BackgroundJob to the background queue
:param background_job: the BackgroundJob instance
:return:
"""
background_job.save()
article_duplicate_report.schedule(args=(background_job.id,), delay=10)
'''
@long_running.periodic_task(schedule("article_duplicate_report"))
def scheduled_article_cleanup_sync():
user = app.config.get("SYSTEM_USERNAME")
job = ArticleDuplicateReportBackgroundTask.prepare(user)
ArticleDuplicateReportBackgroundTask.submit(job)
'''
@long_running.task()
def article_duplicate_report(job_id):
job = models.BackgroundJob.pull(job_id)
task = ArticleDuplicateReportBackgroundTask(job)
BackgroundApi.execute(task)
| |
#!/usr/bin/env python
import datetime
from jsk_arc2017_common.msg import Content
from jsk_arc2017_common.msg import ContentArray
from jsk_arc2017_common.srv import UpdateJSON
from jsk_arc2017_common.srv import UpdateJSONResponse
import json
import os
import os.path as osp
import rospy
import shutil
from std_msgs.msg import String
from std_srvs.srv import Trigger
from std_srvs.srv import TriggerResponse
import threading
class JSONSaver(threading.Thread):
def __init__(self):
super(JSONSaver, self).__init__(target=self._run_services)
json_dir = rospy.get_param('~json_dir', None)
output_dir = rospy.get_param('~output_dir', None)
if json_dir is None:
rospy.logerr('must set json dir path to ~json_dir')
return
if output_dir is None:
rospy.logerr('must set output dir path to ~output_dir')
return
now = datetime.datetime.now()
output_dir = osp.join(output_dir, now.strftime('%Y%m%d_%H%M%S'))
if not osp.exists(output_dir):
os.makedirs(output_dir)
location_path = osp.join(json_dir, 'item_location_file.json')
self.output_json_path = osp.join(
output_dir, 'item_location_file.json')
if osp.exists(location_path):
shutil.copy(location_path, self.output_json_path)
with open(location_path) as location_f:
data = json.load(location_f)
else:
rospy.logerr(
'item_location_file.json does not exists in {}', location_path)
self.bin_contents = {}
for bin_ in data['bins']:
self.bin_contents[bin_['bin_id']] = bin_['contents']
self.tote_contents = data['tote']['contents']
self.cardboard_contents = {}
self.cardboard_ids = {}
# this is for pick task
# order file is only used in pick task
order_path = osp.join(json_dir, 'order_file.json')
if osp.exists(order_path):
output_order_path = osp.join(output_dir, 'order_file.json')
shutil.copy(order_path, output_order_path)
order_path = osp.join(json_dir, 'order_file.json')
with open(order_path) as order_f:
orders = json.load(order_f)['orders']
for order in orders:
size_id = order['size_id']
if len(order['contents']) == 2:
cardboard_id = 'A'
elif len(order['contents']) == 3:
cardboard_id = 'B'
else: # len(order['contents']) == 5
cardboard_id = 'C'
self.cardboard_ids[cardboard_id] = size_id
cardboard_contents = {}
for box in data['boxes']:
size_id = box['size_id']
cardboard_contents[size_id] = box['contents']
for key in 'ABC':
size_id = self.cardboard_ids[key]
self.cardboard_contents[key] = cardboard_contents[size_id]
# publish stamped json_dir
self.pub = rospy.Publisher('~output/json_dir', String, queue_size=1)
self.pub_bin = rospy.Publisher(
'~output/bin_contents',
ContentArray,
queue_size=1)
rate = rospy.get_param('~rate', 1)
self.timer_pub = rospy.Timer(rospy.Duration(1. / rate), self._cb_pub)
self.lock = threading.Lock()
self.daemon = True
def _cb_pub(self, event):
self.pub.publish(String(data=osp.dirname(self.output_json_path)))
contents_msg = ContentArray()
contents = []
for bin_ in 'ABC':
msg = Content()
msg.bin = bin_
msg.items = self.bin_contents[bin_]
contents.append(msg)
contents_msg.header.stamp = rospy.Time.now()
contents_msg.contents = contents
self.pub_bin.publish(contents_msg)
def _run_services(self):
self.services = []
self.services.append(rospy.Service(
'~update_json', UpdateJSON, self._update))
self.services.append(rospy.Service(
'~save_json', Trigger, self._save))
def _update(self, req):
is_updated = self._update_location(req)
is_saved = self._save_json()
is_updated = is_saved and is_updated
return UpdateJSONResponse(updated=is_updated)
def _save(self, req):
is_saved = self._save_json()
return TriggerResponse(success=is_saved)
def _save_json(self):
separators = (',', ': ')
self.lock.acquire()
is_saved = True
boxes = []
if len(self.cardboard_contents.keys()) > 0:
for key in 'ABC':
boxes.append({
'size_id': self.cardboard_ids[key],
'contents': self.cardboard_contents[key]
})
location = {
'bins': [
{
'bin_id': 'A',
'contents': self.bin_contents['A']
},
{
'bin_id': 'B',
'contents': self.bin_contents['B']
},
{
'bin_id': 'C',
'contents': self.bin_contents['C']
},
],
'boxes': boxes,
'tote': {
'contents': self.tote_contents,
}
}
try:
with open(self.output_json_path, 'w+') as f:
json.dump(
location, f, sort_keys=True,
indent=4, separators=separators)
except Exception:
rospy.logerr('could not save json in {}'
.format(self.output_json_path))
is_saved = False
self.lock.release()
return is_saved
def _update_location(self, req):
is_updated = True
self.lock.acquire()
item = req.item
src = req.src
dst = req.dst
if src[:3] == 'bin':
src = src[4]
try:
self.bin_contents[src].remove(item)
except Exception:
rospy.logerr('{0} does not exist in bin {1}'.format(item, src))
self.lock.release()
return False
elif src[:9] == 'cardboard':
src = src[10]
try:
self.cardboard_contents[src].remove(item)
except Exception:
rospy.logerr('{0} does not exist in bin {1}'.format(item, src))
self.lock.release()
return False
elif src == 'tote':
try:
self.tote_contents.remove(item)
except Exception:
rospy.logerr('{} does not exist in tote'.format(item))
self.lock.release()
return False
else:
rospy.logerr('Invalid src request {}', src)
is_updated = False
if dst[:3] == 'bin':
dst = dst[4]
self.bin_contents[dst].append(item)
elif dst[:9] == 'cardboard':
dst = dst[10]
self.cardboard_contents[dst].append(item)
elif dst == 'tote':
self.tote_contents.append(item)
else:
rospy.logerr('Invalid dst request {}', dst)
is_updated = False
self.lock.release()
return is_updated
if __name__ == '__main__':
rospy.init_node('json_saver')
json_saver = JSONSaver()
json_saver.start()
rospy.spin()
| |
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import time
import unittest
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from nose.plugins.skip import SkipTest
from nose.tools import assert_true, assert_false, assert_equal
from desktop.lib.django_test_util import make_logged_in_client
from desktop.lib.test_utils import grant_access, add_to_group
from desktop.models import Document
from hadoop import cluster
from hadoop.conf import YARN_CLUSTERS
from hadoop.yarn import resource_manager_api, mapreduce_api, history_server_api
from liboozie.oozie_api_tests import OozieServerProvider
from oozie.models import Workflow
from jobbrowser import models, views
from jobbrowser.conf import SHARE_JOBS
from jobbrowser.models import can_view_job, can_modify_job, Job, LinkJobLogs
LOG = logging.getLogger(__name__)
_INITIALIZED = False
class TestBrowser():
def test_dots_to_camel_case(self):
assert_equal("fooBar", models.dots_to_camel_case("foo.bar"))
assert_equal("fooBarBaz", models.dots_to_camel_case("foo.bar.baz"))
assert_equal("foo", models.dots_to_camel_case("foo"))
assert_equal("foo.", models.dots_to_camel_case("foo."))
def test_get_path(self):
assert_equal("/foo/bar", models.get_path("hdfs://host/foo/bar"))
def test_format_counter_name(self):
assert_equal("Foo Bar", views.format_counter_name("fooBar"))
assert_equal("Foo Bar Baz", views.format_counter_name("fooBarBaz"))
assert_equal("Foo", views.format_counter_name("foo"))
assert_equal("Foo.", views.format_counter_name("foo."))
assert_equal("A Bbb Ccc", views.format_counter_name("A_BBB_CCC"))
def get_hadoop_job_id(oozie_api, oozie_jobid, action_index=1, timeout=60, step=5):
hadoop_job_id = None
start = time.time()
while not hadoop_job_id and time.time() - start < timeout:
time.sleep(step)
hadoop_job_id = oozie_api.get_job(oozie_jobid).actions[action_index].externalId
if not hadoop_job_id:
logs = OozieServerProvider.oozie.get_job_log(oozie_jobid)
msg = "[%d] %s took more than %d to create a job: %s" % (time.time(), oozie_jobid, timeout, logs)
LOG.info(msg)
raise Exception(msg)
return hadoop_job_id
class TestJobBrowserWithHadoop(unittest.TestCase, OozieServerProvider):
requires_hadoop = True
user_count = 0
@classmethod
def setup_class(cls):
OozieServerProvider.setup_class()
def setUp(self):
"""
To clean: creating test1, test2, test3...users
"""
TestJobBrowserWithHadoop.user_count += 1
self.username = 'test' + str(TestJobBrowserWithHadoop.user_count)
self.home_dir = '/user/%s' % self.username
self.cluster.fs.do_as_user(self.username, self.cluster.fs.create_home_dir, self.home_dir)
self.client = make_logged_in_client(username=self.username, is_superuser=False, groupname='test')
self.user = User.objects.get(username=self.username)
grant_access(self.username, 'test', 'jobsub')
grant_access(self.username, 'test', 'jobbrowser')
grant_access(self.username, 'test', 'oozie')
add_to_group(self.username)
self.prev_user = self.cluster.fs.user
self.cluster.fs.setuser(self.username)
self.install_examples()
self.design = self.create_design()
raise SkipTest
# Run the sleep example, since it doesn't require user home directory
design_id = self.design.id
response = self.client.post(reverse('oozie:submit_workflow',
args=[design_id]),
data={u'form-MAX_NUM_FORMS': [u''],
u'form-INITIAL_FORMS': [u'1'],
u'form-0-name': [u'REDUCER_SLEEP_TIME'],
u'form-0-value': [u'1'],
u'form-TOTAL_FORMS': [u'1']},
follow=True)
oozie_jobid = response.context['oozie_workflow'].id
OozieServerProvider.wait_until_completion(oozie_jobid, timeout=120, step=1)
self.hadoop_job_id = get_hadoop_job_id(self.oozie, oozie_jobid, 1)
self.hadoop_job_id_short = views.get_shorter_id(self.hadoop_job_id)
def tearDown(self):
try:
Document.objects.all().delete()
Workflow.objects.all().delete()
# Remove user home directories.
self.cluster.fs.do_as_superuser(self.cluster.fs.rmtree, self.home_dir)
except:
pass
self.cluster.fs.setuser(self.prev_user)
def create_design(self):
if not Document.objects.available_docs(Workflow, self.user).filter(name='sleep_job').exists():
response = self.client.post(reverse('jobsub.views.new_design',
kwargs={'node_type': 'mapreduce'}),
data={'name': 'sleep_job',
'description': '',
'node_type': 'mapreduce',
'jar_path': '/user/hue/oozie/workspaces/lib/hadoop-examples.jar',
'prepares': '[]',
'files': '[]',
'archives': '[]',
'job_properties': '[{\"name\":\"mapred.reduce.tasks\",\"value\":\"1\"},{\"name\":\"mapred.mapper.class\",\"value\":\"org.apache.hadoop.examples.SleepJob\"},{\"name\":\"mapred.reducer.class\",\"value\":\"org.apache.hadoop.examples.SleepJob\"},{\"name\":\"mapred.mapoutput.key.class\",\"value\":\"org.apache.hadoop.io.IntWritable\"},{\"name\":\"mapred.mapoutput.value.class\",\"value\":\"org.apache.hadoop.io.NullWritable\"},{\"name\":\"mapred.output.format.class\",\"value\":\"org.apache.hadoop.mapred.lib.NullOutputFormat\"},{\"name\":\"mapred.input.format.class\",\"value\":\"org.apache.hadoop.examples.SleepJob$SleepInputFormat\"},{\"name\":\"mapred.partitioner.class\",\"value\":\"org.apache.hadoop.examples.SleepJob\"},{\"name\":\"mapred.speculative.execution\",\"value\":\"false\"},{\"name\":\"sleep.job.map.sleep.time\",\"value\":\"0\"},{\"name\":\"sleep.job.reduce.sleep.time\",\"value\":\"${REDUCER_SLEEP_TIME}\"}]'
},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
assert_equal(response.status_code, 200)
return Document.objects.available_docs(Workflow, self.user).get(name='sleep_job').content_object
def install_examples(self):
global _INITIALIZED
if _INITIALIZED:
return
self.client.post(reverse('oozie:install_examples'))
self.cluster.fs.do_as_user(self.username, self.cluster.fs.create_home_dir, self.home_dir)
self.cluster.fs.do_as_superuser(self.cluster.fs.chmod, self.home_dir, 0777, True)
_INITIALIZED = True
def test_uncommon_views(self):
"""
These views exist, but tend not to be ever called, because they're not in the normal UI.
"""
raise SkipTest
self.client.get("/jobbrowser/clusterstatus")
self.client.get("/jobbrowser/queues")
self.client.get("/jobbrowser/jobbrowser")
def test_failed_jobs(self):
"""
Test jobs with genuine failure, not just killed
"""
# Create design that will fail because the script file isn't there
INPUT_DIR = self.home_dir + '/input'
OUTPUT_DIR = self.home_dir + '/output'
try:
self.cluster.fs.mkdir(self.home_dir + "/jt-test_failed_jobs")
self.cluster.fs.mkdir(INPUT_DIR)
self.cluster.fs.rmtree(OUTPUT_DIR)
except:
pass
response = self.client.post(reverse('jobsub.views.new_design', kwargs={'node_type': 'mapreduce'}), {
'name': ['test_failed_jobs-1'],
'description': ['description test_failed_jobs-1'],
'args': '',
'jar_path': '/user/hue/oozie/workspaces/lib/hadoop-examples.jar',
'prepares': '[]',
'archives': '[]',
'files': '[]',
'job_properties': ['[{"name":"mapred.input.dir","value":"%s"},\
{"name":"mapred.output.dir","value":"%s"},\
{"name":"mapred.mapper.class","value":"org.apache.hadoop.mapred.lib.dne"},\
{"name":"mapred.combiner.class","value":"org.apache.hadoop.mapred.lib.dne"},\
{"name":"mapred.reducer.class","value":"org.apache.hadoop.mapred.lib.dne"}]' % (INPUT_DIR, OUTPUT_DIR)]
}, HTTP_X_REQUESTED_WITH='XMLHttpRequest', follow=True)
# Submit the job
design_dict = json.loads(response.content)
design_id = int(design_dict['id'])
response = self.client.post(reverse('oozie:submit_workflow',
args=[design_id]),
data={u'form-MAX_NUM_FORMS': [u''],
u'form-INITIAL_FORMS': [u'1'],
u'form-0-name': [u'REDUCER_SLEEP_TIME'],
u'form-0-value': [u'1'],
u'form-TOTAL_FORMS': [u'1']},
follow=True)
oozie_jobid = response.context['oozie_workflow'].id
job = OozieServerProvider.wait_until_completion(oozie_jobid, timeout=120, step=1)
self.hadoop_job_id = get_hadoop_job_id(self.oozie, oozie_jobid, 1)
self.hadoop_job_id_short = views.get_shorter_id(self.hadoop_job_id)
# Select only killed jobs (should be absent)
# Taking advantage of the fact new jobs are at the top of the list!
response = self.client.get('/jobbrowser/jobs/?format=json&state=killed')
assert_false(self.hadoop_job_id_short in response.content)
# Select only failed jobs (should be present)
# Map job should succeed. Reduce job should fail.
response = self.client.get('/jobbrowser/jobs/?format=json&state=failed')
assert_true(self.hadoop_job_id_short in response.content)
raise SkipTest # Not compatible with MR2
# The single job view should have the failed task table
response = self.client.get('/jobbrowser/jobs/%s' % (self.hadoop_job_id,))
html = response.content.lower()
assert_true('failed task' in html, html)
# The map task should say success (empty input)
map_task_id = self.hadoop_job_id.replace('job', 'task') + '_m_000000'
response = self.client.get('/jobbrowser/jobs/%s/tasks/%s' % (self.hadoop_job_id, map_task_id))
assert_true('succeed' in response.content)
assert_true('failed' not in response.content)
# The reduce task should say failed
reduce_task_id = self.hadoop_job_id.replace('job', 'task') + '_r_000000'
response = self.client.get('/jobbrowser/jobs/%s/tasks/%s' % (self.hadoop_job_id, reduce_task_id))
assert_true('succeed' not in response.content)
assert_true('failed' in response.content)
# Selecting by failed state should include the failed map
response = self.client.get('/jobbrowser/jobs/%s/tasks?taskstate=failed' % (self.hadoop_job_id,))
assert_true('r_000000' in response.content)
assert_true('m_000000' not in response.content)
def test_jobs_page(self):
# All jobs page and fetch job ID
# Taking advantage of the fact new jobs are at the top of the list!
response = self.client.get('/jobbrowser/jobs/?format=json')
assert_true(self.hadoop_job_id_short in response.content, response.content)
# Make sure job succeeded
response = self.client.get('/jobbrowser/jobs/?format=json&state=completed')
assert_true(self.hadoop_job_id_short in response.content)
response = self.client.get('/jobbrowser/jobs/?format=json&state=failed')
assert_false(self.hadoop_job_id_short in response.content)
response = self.client.get('/jobbrowser/jobs/?format=json&state=running')
assert_false(self.hadoop_job_id_short in response.content)
response = self.client.get('/jobbrowser/jobs/?format=json&state=killed')
assert_false(self.hadoop_job_id_short in response.content)
def test_tasks_page(self):
raise SkipTest
# Test tracker page
early_task_id = self.hadoop_job_id.replace('job', 'task') + '_m_000000'
response = self.client.get('/jobbrowser/jobs/%s/tasks/%s' % (self.hadoop_job_id, early_task_id))
tracker_url = re.search('<a href="(/jobbrowser/trackers/.+?)"', response.content).group(1)
response = self.client.get(tracker_url)
assert_true('Tracker at' in response.content)
def test_job_permissions(self):
# Login as ourself
finish = SHARE_JOBS.set_for_testing(True)
try:
response = self.client.get('/jobbrowser/jobs/?format=json&user=')
assert_true(self.hadoop_job_id_short in response.content)
finally:
finish()
finish = SHARE_JOBS.set_for_testing(False)
try:
response = self.client.get('/jobbrowser/jobs/?format=json&user=')
assert_true(self.hadoop_job_id_short in response.content)
finally:
finish()
# Login as someone else
client_not_me = make_logged_in_client(username='not_me', is_superuser=False, groupname='test')
grant_access("not_me", "test", "jobbrowser")
finish = SHARE_JOBS.set_for_testing(True)
try:
response = client_not_me.get('/jobbrowser/jobs/?format=json&user=')
assert_true(self.hadoop_job_id_short in response.content)
finally:
finish()
finish = SHARE_JOBS.set_for_testing(False)
try:
response = client_not_me.get('/jobbrowser/jobs/?format=json&user=')
assert_false(self.hadoop_job_id_short in response.content)
finally:
finish()
def test_job_counter(self):
raise SkipTest
# Single job page
response = self.client.get('/jobbrowser/jobs/%s' % self.hadoop_job_id)
# Check some counters for single job.
counters = response.context['job'].counters
counters_file_bytes_written = counters['org.apache.hadoop.mapreduce.FileSystemCounter']['counters']['FILE_BYTES_WRITTEN']
assert_true(counters_file_bytes_written['map'] > 0)
assert_true(counters_file_bytes_written['reduce'] > 0)
def test_task_page(self):
raise SkipTest
response = self.client.get('/jobbrowser/jobs/%s/tasks' % (self.hadoop_job_id,))
assert_true(len(response.context['page'].object_list), 4)
# Select by tasktype
response = self.client.get('/jobbrowser/jobs/%s/tasks?tasktype=reduce' % (self.hadoop_job_id,))
assert_true(len(response.context['page'].object_list), 1)
# Select by taskstate
response = self.client.get('/jobbrowser/jobs/%s/tasks?taskstate=succeeded' % (self.hadoop_job_id,))
assert_true(len(response.context['page'].object_list), 4)
# Select by text
response = self.client.get('/jobbrowser/jobs/%s/tasks?tasktext=clean' % (self.hadoop_job_id,))
assert_true(len(response.context['page'].object_list), 1)
def test_job_single_logs_page(self):
raise SkipTest
response = self.client.get('/jobbrowser/jobs/%s/single_logs' % (self.hadoop_job_id))
assert_true('syslog' in response.content, response.content)
assert_true('<div class="tab-pane active" id="logsSysLog">' in response.content or
'<div class="tab-pane active" id="logsStdErr">' in response.content or # Depending on Hadoop
'<div class="tab-pane active" id="logsStdOut">' in response.content, # For jenkins
response.content)
class TestMapReduce1NoHadoop:
def test_acls_job(self):
job = MockMr1Job()
assert_true(can_view_job('test', job))
assert_true(can_modify_job('test', job))
assert_false(can_view_job('test2', job))
assert_false(can_modify_job('test2', job))
class MockMr1Job(Job):
def __init__(self):
self.is_mr2 = False
self._full_job_conf = {
'mapreduce.cluster.acls.enabled': True,
'mapreduce.job.acl-modify-job': 'test',
'mapreduce.job.acl-view-job': 'test'
}
class TestMapReduce2NoHadoop:
def setUp(self):
# Beware: Monkey patching
if not hasattr(resource_manager_api, 'old_get_resource_manager_api'):
resource_manager_api.old_get_resource_manager = resource_manager_api.get_resource_manager
if not hasattr(resource_manager_api, 'old_get_mapreduce_api'):
mapreduce_api.old_get_mapreduce_api = mapreduce_api.get_mapreduce_api
if not hasattr(history_server_api, 'old_get_history_server_api'):
history_server_api.old_get_history_server_api = history_server_api.get_history_server_api
resource_manager_api.get_resource_manager = lambda: MockResourceManagerApi()
mapreduce_api.get_mapreduce_api = lambda: MockMapreduceApi()
history_server_api.get_history_server_api = lambda: HistoryServerApi()
self.c = make_logged_in_client(is_superuser=False)
grant_access("test", "test", "jobbrowser")
self.user = User.objects.get(username='test')
self.c2 = make_logged_in_client(is_superuser=False, username="test2")
grant_access("test2", "test2", "jobbrowser")
self.user2 = User.objects.get(username='test2')
self.finish = [
YARN_CLUSTERS['default'].SUBMIT_TO.set_for_testing(True),
SHARE_JOBS.set_for_testing(False)
]
assert_true(cluster.is_yarn())
def tearDown(self):
resource_manager_api.get_resource_manager = getattr(resource_manager_api, 'old_get_resource_manager')
mapreduce_api.get_mapreduce_api = getattr(mapreduce_api, 'old_get_mapreduce_api')
history_server_api.get_history_server_api = getattr(history_server_api, 'old_get_history_server_api')
for f in self.finish:
f()
def test_jobs(self):
response = self.c.get('/jobbrowser/?format=json')
response_content = json.loads(response.content)
assert_equal(len(response_content['jobs']), 4)
response = self.c.get('/jobbrowser/jobs/?format=json&text=W=MapReduce-copy2')
response_content = json.loads(response.content)
assert_equal(len(response_content['jobs']), 1)
def test_running_job(self):
response = self.c.get('/jobbrowser/jobs/application_1356251510842_0054')
assert_true('job_1356251510842_0054' in response.content)
assert_true('RUNNING' in response.content)
response = self.c.get('/jobbrowser/jobs/job_1356251510842_0054')
assert_true('job_1356251510842_0054' in response.content)
assert_true('RUNNING' in response.content)
def test_finished_job(self):
response = self.c.get('/jobbrowser/jobs/application_1356251510842_0009')
assert_equal(response.context['job'].jobId, 'job_1356251510842_0009')
response = self.c.get('/jobbrowser/jobs/job_1356251510842_0009')
assert_equal(response.context['job'].jobId, 'job_1356251510842_0009')
def test_spark_job(self):
response = self.c.get('/jobbrowser/jobs/application_1428442704693_0006')
assert_equal(response.context['job'].jobId, 'application_1428442704693_0006')
def test_yarn_job(self):
response = self.c.get('/jobbrowser/jobs/application_1428442704693_0007')
assert_equal(response.context['job'].jobId, 'application_1428442704693_0007')
def job_not_assigned(self):
response = self.c.get('/jobbrowser/jobs/job_1356251510842_0009/job_not_assigned//my_url')
assert_equal(response.context['jobid'], 'job_1356251510842_0009')
assert_equal(response.context['path'], '/my_url')
response = self.c.get('/jobbrowser/jobs/job_1356251510842_0009/job_not_assigned//my_url?format=json')
result = json.loads(response.content)
assert_equal(result['status'], 0)
def test_acls_job(self):
response = self.c.get('/jobbrowser/jobs/job_1356251510842_0054') # Check in perm decorator
assert_true(can_view_job('test', response.context['job']))
assert_true(can_modify_job('test', response.context['job']))
response2 = self.c2.get('/jobbrowser/jobs/job_1356251510842_0054')
assert_true('don't have permission to access job' in response2.content, response2.content)
assert_false(can_view_job('test2', response.context['job']))
assert_false(can_modify_job('test2', response.context['job']))
def test_kill_job(self):
job_id = 'application_1356251510842_0054'
try:
response = self.c.post('/jobbrowser/jobs/%s/kill?format=json' % job_id)
assert_equal(json.loads(response.content), {"status": 0})
finally:
MockResourceManagerApi.APPS[job_id]['state'] = 'RUNNING'
class MockResourceManagerApi:
APPS = {
'application_1356251510842_0054': {
u'finishedTime': 1356961070119,
u'name': u'oozie:launcher:T=map-reduce:W=MapReduce-copy:A=Sleep:ID=0000004-121223003201296-oozie-oozi-W',
u'amContainerLogs': u'http://localhost:8042/node/containerlogs/container_1356251510842_0054_01_000001/romain',
u'clusterId': 1356251510842,
u'trackingUrl': u'http://localhost:8088/proxy/application_1356251510842_0054/jobhistory/job/job_1356251510842_0054',
u'amHostHttpAddress': u'localhost:8042',
u'startedTime': 1356961057225,
u'queue': u'default',
u'state': u'RUNNING',
u'elapsedTime': 12894,
u'finalStatus': u'UNDEFINED',
u'diagnostics': u'',
u'progress': 100.0,
u'trackingUI': u'History',
u'id': u'application_1356251510842_0054',
u'user': u'test',
# For when the job is KILLED
u'startTime': 1356961057226,
u'finishTime': 1356961057226,
u'applicationType': 'MAPREDUCE'
},
'application_1356251510842_0009': {
u'finishedTime': 1356467118570,
u'name': u'oozie:action:T=map-reduce:W=MapReduce-copy2:A=Sleep:ID=0000002-121223003201296-oozie-oozi-W',
u'amContainerLogs': u'http://localhost:8042/node/containerlogs/container_1356251510842_0009_01_000001/romain',
u'clusterId': 1356251510842,
u'trackingUrl': u'http://localhost:8088/proxy/application_1356251510842_0009/jobhistory/job/job_1356251510842_0009',
u'amHostHttpAddress': u'localhost:8042',
u'startedTime': 1356467081121,
u'queue': u'default',
u'state': u'FINISHED',
u'elapsedTime': 37449,
u'finalStatus': u'SUCCEEDED',
u'diagnostics': u'',
u'progress': 100.0,
u'trackingUI': u'History',
u'id': u'application_1356251510842_0009',
u'user': u'test',
u'applicationType': 'MAPREDUCE'
},
'application_1428442704693_0006': {
u'allocatedMB': 4096,
u'allocatedVCores': 3,
u'amContainerLogs': u'http://localhost:8042/node/containerlogs/container_1428442704693_0006_01_000001/erickt',
u'amHostHttpAddress': u'localhost:8042',
u'applicationTags': u'',
u'applicationType': u'SPARK',
u'clusterId': 1428442704693,
u'diagnostics': u'',
u'elapsedTime': 529040,
u'finalStatus': u'UNDEFINED',
u'finishedTime': 0,
u'id': u'application_1428442704693_0006',
u'memorySeconds': 2138468,
u'name': u'Spark shell',
u'numAMContainerPreempted': 0,
u'numNonAMContainerPreempted': 0,
u'preemptedResourceMB': 0,
u'preemptedResourceVCores': 0,
u'progress': 10.0,
u'queue': u'root.erickt',
u'runningContainers': 3,
u'startedTime': 1428443335161,
u'state': u'RUNNING',
u'trackingUI': u'ApplicationMaster',
u'trackingUrl': u'http://localhost:8088/proxy/application_1428442704693_0006/',
u'user': u'test',
u'vcoreSeconds': 1567,
},
'application_1428442704693_0007': {
u'allocatedMB': -1,
u'allocatedVCores': -1,
u'applicationTags': u'',
u'applicationType': u'YARN',
u'clusterId': 1428442704693,
u'diagnostics': u'',
u'elapsedTime': 4056,
u'finalStatus': u'SUCCEEDED',
u'finishedTime': 1428454945371,
u'id': u'application_1428442704693_0007',
u'memorySeconds': 2290,
u'name': u'UnmanagedAM',
u'numAMContainerPreempted': 0,
u'numNonAMContainerPreempted': 0,
u'preemptedResourceMB': 0,
u'preemptedResourceVCores': 0,
u'progress': 100.0,
u'queue': u'root.erickt',
u'runningContainers': -1,
u'startedTime': 1428454941315,
u'state': u'FINISHED',
u'trackingUI': u'History',
u'trackingUrl': u'http://N/A',
u'user': u'test',
u'vcoreSeconds': 1,
},
}
def __init__(self, oozie_url=None): pass
def apps(self, **kwargs):
return {
'apps': {
'app': [
# RUNNING
MockResourceManagerApi.APPS['application_1356251510842_0054'],
# FINISHED
MockResourceManagerApi.APPS['application_1356251510842_0009'],
# SPARK
MockResourceManagerApi.APPS['application_1428442704693_0006'],
# YARN
MockResourceManagerApi.APPS['application_1428442704693_0007'],
]
}
}
def app(self, job_id):
return {
u'app': MockResourceManagerApi.APPS[job_id]
}
class MockMapreduce2Api(object):
"""
MockMapreduceApi and HistoryServerApi are very similar and inherit from it.
"""
def __init__(self, oozie_url=None): pass
def tasks(self, job_id):
return {
u'tasks': {
u'task': [{
u'finishTime': 1357153330271, u'successfulAttempt': u'attempt_1356251510842_0062_m_000000_0', u'elapsedTime': 1901, u'state': u'SUCCEEDED',
u'startTime': 1357153328370, u'progress': 100.0, u'type': u'MAP', u'id': u'task_1356251510842_0062_m_000000'},
{
u'finishTime': 0, u'successfulAttempt': u'', u'elapsedTime': 0, u'state': u'SCHEDULED', u'startTime': 1357153326322, u'progress': 0.0,
u'type': u'REDUCE', u'id': u'task_1356251510842_0062_r_000000'}
]
}
}
def conf(self, job_id):
return {
"conf" : {
"path" : "hdfs://host.domain.com:9000/user/user1/.staging/job_1326232085508_0004/job.xml",
"property" : [
{
"name" : "dfs.datanode.data.dir",
"value" : "/home/hadoop/hdfs/data",
}, {
"name" : "mapreduce.job.acl-modify-job",
"value" : "test",
}, {
"name" : "mapreduce.job.acl-view-job",
"value" : "test",
}
]
}
}
def job_attempts(self, job_id):
return {
"jobAttempts" : {
"jobAttempt" : [
{
"nodeId" : "host.domain.com:8041",
"nodeHttpAddress" : "host.domain.com:8042",
"startTime" : 1326238773493,
"id" : 1,
"logsLink" : "http://host.domain.com:8042/node/containerlogs/container_1326232085508_0004_01_000001",
"containerId" : "container_1326232085508_0004_01_000001"
}
]
}
}
def task_attempts(self, job_id, task_id):
return {
"taskAttempts" : {
"taskAttempt" : [
{
"elapsedMergeTime" : 47,
"shuffleFinishTime" : 1326238780052,
"assignedContainerId" : "container_1326232085508_0004_01_000003",
"progress" : 100,
"elapsedTime" : 0,
"state" : "RUNNING",
"elapsedShuffleTime" : 2592,
"mergeFinishTime" : 1326238780099,
"rack" : "/98.139.92.0",
"elapsedReduceTime" : 0,
"nodeHttpAddress" : "host.domain.com:8042",
"type" : "REDUCE",
"startTime" : 1326238777460,
"id" : "attempt_1326232085508_4_4_r_0_0",
"finishTime" : 0
}
]
}
}
def counters(self, job_id):
return {
"jobCounters" : {
"id" : "job_1326232085508_4_4",
"counterGroup" : [
{
"counterGroupName" : "org.apache.hadoop.mapreduce.lib.input.FileInputFormatCounter",
"counter" : [
{
"reduceCounterValue" : 0,
"mapCounterValue" : 0,
"totalCounterValue" : 0,
"name" : "BYTES_READ"
}
]
},
{
"counterGroupName" : "org.apache.hadoop.mapreduce.lib.output.FileOutputFormatCounter",
"counter" : [
{
"reduceCounterValue" : 0,
"mapCounterValue" : 0,
"totalCounterValue" : 0,
"name" : "BYTES_WRITTEN"
}
]
}
]
}
}
def kill(self, job_id):
job_id = job_id.replace('job', 'application')
MockResourceManagerApi.APPS[job_id]['state'] = 'KILLED'
return {}
class MockMapreduceApi(MockMapreduce2Api):
def job(self, user, job_id):
if '1356251510842_0009' not in job_id:
job = {
u'job': {
u'reducesCompleted': 0, u'mapsRunning': 1, u'id': u'job_1356251510842_0054', u'successfulReduceAttempts': 0, u'successfulMapAttempts': 0,
u'uberized': False, u'reducesTotal': 1, u'elapsedTime': 3426, u'mapsPending': 0, u'state': u'RUNNING', u'failedReduceAttempts': 0,
u'mapsCompleted': 0, u'killedMapAttempts': 0, u'killedReduceAttempts': 0, u'runningReduceAttempts': 0, u'failedMapAttempts': 0, u'mapsTotal': 1,
u'user': u'test', u'startTime': 1357152972886, u'reducesPending': 1, u'reduceProgress': 0.0, u'finishTime': 0,
u'name': u'select avg(salary) from sample_07(Stage-1)', u'reducesRunning': 0, u'newMapAttempts': 0, u'diagnostics': u'', u'mapProgress': 0.0,
u'runningMapAttempts': 1, u'newReduceAttempts': 1,
# Does not seems to exist in API, we actually skip it in case.
"acls" : [{
"value" : "test",
"name" : "mapreduce.job.acl-modify-job"
}, {
"value" : "test",
"name" : "mapreduce.job.acl-view-job"
}
],
}
}
job['job']['id'] = job_id
return job
class HistoryServerApi(MockMapreduce2Api):
def __init__(self, oozie_url=None): pass
def job(self, user, job_id):
if '1356251510842_0054' == job_id:
return {
u'job': {
u'reducesCompleted': 1, u'avgMapTime': 1798, u'avgMergeTime': 1479, u'id': job_id,
u'successfulReduceAttempts': 1, u'successfulMapAttempts': 2, u'uberized': False, u'reducesTotal': 1,
u'state': u'KILLED', u'failedReduceAttempts': 0, u'mapsCompleted': 2,
u'killedMapAttempts': 0, u'diagnostics': u'', u'mapsTotal': 2, u'user': u'test',
u'startTime': 1357151916268, u'avgReduceTime': 137,
u'finishTime': 1357151923925, u'name': u'oozie:action:T=map-reduce:W=MapReduce-copy:A=Sleep:ID=0000004-121223003201296-oozie-oozi-W',
u'avgShuffleTime': 1421, u'queue': u'default', u'killedReduceAttempts': 0, u'failedMapAttempts': 0
}
}
else:
return {
u'job': {
u'reducesCompleted': 1, u'avgMapTime': 1798, u'avgMergeTime': 1479, u'id': u'job_1356251510842_0009',
u'successfulReduceAttempts': 1, u'successfulMapAttempts': 2, u'uberized': False, u'reducesTotal': 1,
u'state': u'SUCCEEDED', u'failedReduceAttempts': 0, u'mapsCompleted': 2,
u'killedMapAttempts': 0, u'diagnostics': u'', u'mapsTotal': 2, u'user': u'test',
u'startTime': 1357151916268, u'avgReduceTime': 137,
u'finishTime': 1357151923925, u'name': u'oozie:action:T=map-reduce:W=MapReduce-copy:A=Sleep:ID=0000004-121223003201296-oozie-oozi-W',
u'avgShuffleTime': 1421, u'queue': u'default', u'killedReduceAttempts': 0, u'failedMapAttempts': 0
}
}
def test_make_log_links():
"""
Unit test for models.LinkJobLogs._make_links
"""
# FileBrowser
assert_equal(
"""<a href="/filebrowser/view/user/romain/tmp" target="_blank">hdfs://localhost:8020/user/romain/tmp</a> <dir>""",
LinkJobLogs._make_links('hdfs://localhost:8020/user/romain/tmp <dir>')
)
assert_equal(
"""<a href="/filebrowser/view/user/romain/tmp" target="_blank">hdfs://localhost:8020/user/romain/tmp</a><dir>""",
LinkJobLogs._make_links('hdfs://localhost:8020/user/romain/tmp<dir>')
)
assert_equal(
"""output: <a href="/filebrowser/view/user/romain/tmp" target="_blank">/user/romain/tmp</a> <dir>""",
LinkJobLogs._make_links('output: /user/romain/tmp <dir>')
)
assert_equal(
'Successfully read 3760 records (112648 bytes) from: "<a href="/filebrowser/view/user/hue/pig/examples/data/midsummer.txt" target="_blank">/user/hue/pig/examples/data/midsummer.txt</a>"',
LinkJobLogs._make_links('Successfully read 3760 records (112648 bytes) from: "/user/hue/pig/examples/data/midsummer.txt"')
)
assert_equal(
'data,upper_case MAP_ONLY <a href="/filebrowser/view/user/romain/out/fffff" target="_blank">hdfs://localhost:8020/user/romain/out/fffff</a>,',
LinkJobLogs._make_links('data,upper_case MAP_ONLY hdfs://localhost:8020/user/romain/out/fffff,')
)
assert_equal(
'MAP_ONLY <a href="/filebrowser/view/user/romain/out/fffff" target="_blank">hdfs://localhost:8020/user/romain/out/fffff</a>\n2013',
LinkJobLogs._make_links('MAP_ONLY hdfs://localhost:8020/user/romain/out/fffff\n2013')
)
assert_equal(
' <a href="/filebrowser/view/jobs.tsv" target="_blank">/jobs.tsv</a> ',
LinkJobLogs._make_links(' /jobs.tsv ')
)
assert_equal(
'<a href="/filebrowser/view/user/romain/job_pos_2012.tsv" target="_blank">hdfs://localhost:8020/user/romain/job_pos_2012.tsv</a>',
LinkJobLogs._make_links('hdfs://localhost:8020/user/romain/job_pos_2012.tsv')
)
# JobBrowser
assert_equal(
"""<a href="/jobbrowser/jobs/job_201306261521_0058" target="_blank">job_201306261521_0058</a>""",
LinkJobLogs._make_links('job_201306261521_0058')
)
assert_equal(
"""Hadoop Job IDs executed by Pig: <a href="/jobbrowser/jobs/job_201306261521_0058" target="_blank">job_201306261521_0058</a>""",
LinkJobLogs._make_links('Hadoop Job IDs executed by Pig: job_201306261521_0058')
)
assert_equal(
"""MapReduceLauncher - HadoopJobId: <a href="/jobbrowser/jobs/job_201306261521_0058" target="_blank">job_201306261521_0058</a>""",
LinkJobLogs._make_links('MapReduceLauncher - HadoopJobId: job_201306261521_0058')
)
assert_equal(
"""- More information at: http://localhost:50030/jobdetails.jsp?jobid=<a href="/jobbrowser/jobs/job_201306261521_0058" target="_blank">job_201306261521_0058</a>""",
LinkJobLogs._make_links('- More information at: http://localhost:50030/jobdetails.jsp?jobid=job_201306261521_0058')
)
assert_equal(
""" Logging error messages to: job_201307091553_0028/attempt_201307091553_002""",
LinkJobLogs._make_links(' Logging error messages to: job_201307091553_0028/attempt_201307091553_002')
)
assert_equal(
""" pig-job_201307091553_0028.log""",
LinkJobLogs._make_links(' pig-job_201307091553_0028.log')
)
| |
from typing import Dict, Union, List, Optional
import ray
from ray._raylet import ObjectRef
from ray._raylet import PlacementGroupID
from ray._private.utils import hex_to_binary
from ray.util.annotations import PublicAPI, DeveloperAPI
from ray.ray_constants import to_memory_units
from ray._private.client_mode_hook import client_mode_should_convert
from ray._private.client_mode_hook import client_mode_wrap
bundle_reservation_check = None
BUNDLE_RESOURCE_LABEL = "bundle"
# We need to import this method to use for ready API.
# But ray.remote is only available in runtime, and
# if we define this method inside ready method, this function is
# exported whenever ready is called, which can impact performance,
# https://github.com/ray-project/ray/issues/6240.
def _export_bundle_reservation_check_method_if_needed():
global bundle_reservation_check
if bundle_reservation_check:
return
@ray.remote(num_cpus=0)
def bundle_reservation_check_func(placement_group):
return placement_group
bundle_reservation_check = bundle_reservation_check_func
@PublicAPI
class PlacementGroup:
"""A handle to a placement group."""
@staticmethod
def empty() -> "PlacementGroup":
return PlacementGroup(PlacementGroupID.nil())
def __init__(self, id: PlacementGroupID, bundle_cache: Optional[List[Dict]] = None):
self.id = id
self.bundle_cache = bundle_cache
@property
def is_empty(self):
return self.id.is_nil()
def ready(self) -> ObjectRef:
"""Returns an ObjectRef to check ready status.
This API runs a small dummy task to wait for placement group creation.
It is compatible to ray.get and ray.wait.
Example:
>>> pg = placement_group([{"CPU": 1}])
ray.get(pg.ready())
>>> pg = placement_group([{"CPU": 1}])
ray.wait([pg.ready()], timeout=0)
"""
self._fill_bundle_cache_if_needed()
_export_bundle_reservation_check_method_if_needed()
assert len(self.bundle_cache) != 0, (
"ready() cannot be called on placement group object with a "
"bundle length == 0, current bundle length: "
f"{len(self.bundle_cache)}"
)
return bundle_reservation_check.options(
placement_group=self, resources={BUNDLE_RESOURCE_LABEL: 0.001}
).remote(self)
def wait(self, timeout_seconds: Union[float, int]) -> bool:
"""Wait for the placement group to be ready within the specified time.
Args:
timeout_seconds(float|int): Timeout in seconds.
Return:
True if the placement group is created. False otherwise.
"""
return _call_placement_group_ready(self.id, timeout_seconds)
@property
def bundle_specs(self) -> List[Dict]:
"""List[Dict]: Return bundles belonging to this placement group."""
self._fill_bundle_cache_if_needed()
return self.bundle_cache
@property
def bundle_count(self) -> int:
self._fill_bundle_cache_if_needed()
return len(self.bundle_cache)
def _fill_bundle_cache_if_needed(self) -> None:
if not self.bundle_cache:
self.bundle_cache = _get_bundle_cache(self.id)
@client_mode_wrap
def _call_placement_group_ready(pg_id: PlacementGroupID, timeout_seconds: int) -> bool:
worker = ray.worker.global_worker
worker.check_connected()
return worker.core_worker.wait_placement_group_ready(pg_id, timeout_seconds)
@client_mode_wrap
def _get_bundle_cache(pg_id: PlacementGroupID) -> List[Dict]:
worker = ray.worker.global_worker
worker.check_connected()
return list(ray.state.state.placement_group_table(pg_id)["bundles"].values())
@PublicAPI
@client_mode_wrap
def placement_group(
bundles: List[Dict[str, float]],
strategy: str = "PACK",
name: str = "",
lifetime=None,
) -> PlacementGroup:
"""Asynchronously creates a PlacementGroup.
Args:
bundles(List[Dict]): A list of bundles which
represent the resources requirements.
strategy(str): The strategy to create the placement group.
- "PACK": Packs Bundles into as few nodes as possible.
- "SPREAD": Places Bundles across distinct nodes as even as possible.
- "STRICT_PACK": Packs Bundles into one node. The group is
not allowed to span multiple nodes.
- "STRICT_SPREAD": Packs Bundles across distinct nodes.
name(str): The name of the placement group.
lifetime(str): Either `None`, which defaults to the placement group
will fate share with its creator and will be deleted once its
creator is dead, or "detached", which means the placement group
will live as a global object independent of the creator.
Raises:
ValueError if bundle type is not a list.
ValueError if empty bundle or empty resource bundles are given.
ValueError if the wrong lifetime arguments are given.
Return:
PlacementGroup: Placement group object.
"""
worker = ray.worker.global_worker
worker.check_connected()
if not isinstance(bundles, list):
raise ValueError("The type of bundles must be list, got {}".format(bundles))
# Validate bundles
for bundle in bundles:
if len(bundle) == 0 or all(
resource_value == 0 for resource_value in bundle.values()
):
raise ValueError(
"Bundles cannot be an empty dictionary or "
f"resources with only 0 values. Bundles: {bundles}"
)
if "memory" in bundle.keys() and bundle["memory"] > 0:
# Make sure the memory resource can be
# transformed to memory unit.
to_memory_units(bundle["memory"], True)
if lifetime is None:
detached = False
elif lifetime == "detached":
detached = True
else:
raise ValueError(
"placement group `lifetime` argument must be either" " `None` or 'detached'"
)
placement_group_id = worker.core_worker.create_placement_group(
name, bundles, strategy, detached
)
return PlacementGroup(placement_group_id)
@PublicAPI
@client_mode_wrap
def remove_placement_group(placement_group: PlacementGroup) -> None:
"""Asynchronously remove placement group.
Args:
placement_group (PlacementGroup): The placement group to delete.
"""
assert placement_group is not None
worker = ray.worker.global_worker
worker.check_connected()
worker.core_worker.remove_placement_group(placement_group.id)
@PublicAPI
@client_mode_wrap
def get_placement_group(placement_group_name: str) -> PlacementGroup:
"""Get a placement group object with a global name.
Returns:
None if can't find a placement group with the given name.
The placement group object otherwise.
"""
if not placement_group_name:
raise ValueError("Please supply a non-empty value to get_placement_group")
worker = ray.worker.global_worker
worker.check_connected()
placement_group_info = ray.state.state.get_placement_group_by_name(
placement_group_name, worker.namespace
)
if placement_group_info is None:
raise ValueError(f"Failed to look up actor with name: {placement_group_name}")
else:
return PlacementGroup(
PlacementGroupID(hex_to_binary(placement_group_info["placement_group_id"]))
)
@DeveloperAPI
@client_mode_wrap
def placement_group_table(placement_group: PlacementGroup = None) -> dict:
"""Get the state of the placement group from GCS.
Args:
placement_group (PlacementGroup): placement group to see
states.
"""
worker = ray.worker.global_worker
worker.check_connected()
placement_group_id = placement_group.id if (placement_group is not None) else None
return ray.state.state.placement_group_table(placement_group_id)
@PublicAPI
def get_current_placement_group() -> Optional[PlacementGroup]:
"""Get the current placement group which a task or actor is using.
It returns None if there's no current placement group for the worker.
For example, if you call this method in your driver, it returns None
(because drivers never belong to any placement group).
Examples:
>>> @ray.remote
>>> def f():
>>> # This will return the placement group the task f belongs to.
>>> # It means this pg will be identical to the pg created below.
>>> pg = get_current_placement_group()
>>> pg = placement_group([{"CPU": 2}])
>>> f.options(placement_group=pg).remote()
>>> # New script.
>>> ray.init()
>>> # New script doesn't belong to any placement group,
>>> # so it returns None.
>>> assert get_current_placement_group() is None
Return:
PlacementGroup: Placement group object.
None if the current task or actor wasn't
created with any placement group.
"""
if client_mode_should_convert(auto_init=True):
# Client mode is only a driver.
return None
worker = ray.worker.global_worker
worker.check_connected()
pg_id = worker.placement_group_id
if pg_id.is_nil():
return None
return PlacementGroup(pg_id)
def check_placement_group_index(
placement_group: PlacementGroup, bundle_index: int
) -> None:
assert placement_group is not None
if placement_group.id.is_nil():
if bundle_index != -1:
raise ValueError(
"If placement group is not set, "
"the value of bundle index must be -1."
)
elif bundle_index >= placement_group.bundle_count or bundle_index < -1:
raise ValueError(
f"placement group bundle index {bundle_index} "
f"is invalid. Valid placement group indexes: "
f"0-{placement_group.bundle_count}"
)
def _validate_resource_shape(
placement_group, resources, placement_resources, task_or_actor_repr
):
def valid_resource_shape(resources, bundle_specs):
"""
If the resource shape cannot fit into every
bundle spec, return False
"""
for bundle in bundle_specs:
fit_in_bundle = True
for resource, requested_val in resources.items():
# Skip "bundle" resource as it is automatically added
# to all nodes with bundles by the placement group.
if resource == BUNDLE_RESOURCE_LABEL:
continue
if bundle.get(resource, 0) < requested_val:
fit_in_bundle = False
break
if fit_in_bundle:
# If resource request fits in any bundle, it is valid.
return True
return False
bundles = placement_group.bundle_specs
resources_valid = valid_resource_shape(resources, bundles)
placement_resources_valid = valid_resource_shape(placement_resources, bundles)
if not resources_valid:
raise ValueError(
f"Cannot schedule {task_or_actor_repr} with "
"the placement group because the resource request "
f"{resources} cannot fit into any bundles for "
f"the placement group, {bundles}."
)
if not placement_resources_valid:
# Happens for the default actor case.
# placement_resources is not an exposed concept to users,
# so we should write more specialized error messages.
raise ValueError(
f"Cannot schedule {task_or_actor_repr} with "
"the placement group because the actor requires "
f"{placement_resources.get('CPU', 0)} CPU for "
"creation, but it cannot "
f"fit into any bundles for the placement group, "
f"{bundles}. Consider "
"creating a placement group with CPU resources."
)
def configure_placement_group_based_on_context(
placement_group_capture_child_tasks: bool,
bundle_index: int,
resources: Dict,
placement_resources: Dict,
task_or_actor_repr: str,
placement_group: Union[PlacementGroup, str, None] = "default",
) -> PlacementGroup:
"""Configure the placement group based on the given context.
Based on the given context, this API returns the placement group instance
for task/actor scheduling.
Params:
placement_group_capture_child_tasks: Whether or not the
placement group needs to be captured from the global
context.
bundle_index: The bundle index for tasks/actor scheduling.
resources: The scheduling resources.
placement_resources: The scheduling placement resources for
actors.
task_or_actor_repr: The repr of task or actor
function/class descriptor.
placement_group: The placement group instance.
- "default": Default placement group argument. Currently,
the default behavior is to capture the parent task'
placement group if placement_group_capture_child_tasks
is set.
- None: means placement group is explicitly not configured.
- Placement group instance: In this case, do nothing.
Returns:
Placement group instance based on the given context.
Raises:
ValueError: If the bundle index is invalid for the placement group
or the requested resources shape doesn't fit to any
bundles.
"""
# Validate inputs.
assert placement_group_capture_child_tasks is not None
assert resources is not None
# Validate and get the PlacementGroup instance.
# Placement group could be None, default, or placement group.
# Default behavior is "do not capture child tasks".
if placement_group != "default":
if not placement_group:
placement_group = PlacementGroup.empty()
elif placement_group == "default":
if placement_group_capture_child_tasks:
placement_group = get_current_placement_group()
else:
placement_group = PlacementGroup.empty()
if not placement_group:
placement_group = PlacementGroup.empty()
assert isinstance(placement_group, PlacementGroup)
# Validate the index.
check_placement_group_index(placement_group, bundle_index)
# Validate the shape.
if not placement_group.is_empty:
_validate_resource_shape(
placement_group, resources, placement_resources, task_or_actor_repr
)
return placement_group
| |
"""
Allows isometric viewing of a 3D data cube.
Click or click-drag in any data window to set the slice to view.
"""
# Outstanding TODOs:
# - need to add line inspectors to side and bottom plots, and synchronize
# with center plot
# - need to set the various image plots to use the same colormap instance,
# and that colormap's range needs to be set to min/max of the entire cube
# - refactor create_window() so there is less code duplication
# - try to eliminate the use of model.xs, ys, zs in favor of bounds tuples
from numpy import amin, amax, zeros, fromfile, transpose, uint8
# Standard library imports
import os, sys, shutil
# Major library imports
from numpy import arange, linspace, nanmin, nanmax, newaxis, pi, sin, cos
# Enthought library imports
from chaco.api import ArrayPlotData, Plot, GridPlotContainer, \
BaseTool, DataRange1D
from chaco.default_colormaps import *
from chaco.tools.api import LineInspector, ZoomTool
from enable.example_support import DemoFrame, demo_main
from enable.api import Window
from traits.api import Any, Array, Bool, Callable, CFloat, CInt, \
Event, Float, HasTraits, Int, Trait, on_trait_change
# Will hold the path that the user chooses to download to. Will be an empty
# string if the user decides to download to the current directory.
dl_path = ''
# Determines if the script should ask the user if they would like to remove the
# downloaded data. This defaults to False, because data deletion is
# irreversible, and in the worst case, the user will have to remove it
# manually themselves.
run_cleanup = False
class Model(HasTraits):
npts_x = CInt(256)
npts_y = CInt(256)
npts_z = CInt(109)
min_x = CFloat(-2*pi)
max_x = CFloat(2*pi)
min_y = CFloat(-2*pi)
max_y = CFloat(2*pi)
min_z = CFloat(-pi)
max_z = CFloat(pi)
xs = Array
ys = Array
vals = Array
minval = Float
maxval = Float
model_changed = Event
def __init__(self, *args, **kwargs):
super(Model, self).__init__(*args, **kwargs)
self.compute_model()
@on_trait_change("npts_+", "min_+", "max_+")
def compute_model(self):
def vfunc(x, y, z):
return sin(x*z) * cos(y)*sin(z) + sin(0.5*z)
# Create the axes
self.xs = linspace(self.min_x, self.max_x, self.npts_x)
self.ys = linspace(self.min_y, self.max_y, self.npts_y)
self.zs = linspace(self.min_z, self.max_z, self.npts_z)
# Generate a cube of values by using newaxis to span new dimensions
self.vals = vfunc(self.xs[:, newaxis, newaxis],
self.ys[newaxis, :, newaxis],
self.zs[newaxis, newaxis, :])
self.minval = nanmin(self.vals)
self.maxval = nanmax(self.vals)
self.model_changed = True
class BrainModel(Model):
def __init__(self, *args, **kwargs):
download_data()
super(BrainModel, self).__init__(*args, **kwargs)
def compute_model(self):
global dl_path
mrbrain_path = os.path.join(dl_path, 'voldata', 'MRbrain.')
nx = 256
ny = 256
nz = 109
full_arr = zeros((nx, ny, nz), dtype='f')
for i in range(1, 110):
arr = fromfile(mrbrain_path + str(i), dtype='>u2')
arr.shape = (256,256)
full_arr[:,:,i-1] = arr
self.vals = full_arr
# Create the axes
self.xs = arange(nx)
self.ys = arange(ny)
self.zs = arange(nz)
# Generate a cube of values by using newaxis to span new dimensions
self.minval = nanmin(self.vals)
self.maxval = nanmax(self.vals)
self.model_changed = True
class ImageIndexTool(BaseTool):
""" A tool to set the slice of a cube based on the user's mouse movements
or clicks.
"""
# This callback will be called with the index into self.component's
# index and value:
# callback(tool, x_index, y_index)
# where *tool* is a reference to this tool instance. The callback
# can then use tool.token.
callback = Any()
# This callback (if it exists) will be called with the integer number
# of mousewheel clicks
wheel_cb = Any()
# This token can be used by the callback to decide how to process
# the event.
token = Any()
# Whether or not to update the slice info; we enter select mode when
# the left mouse button is pressed and exit it when the mouse button
# is released
# FIXME: This is not used right now.
select_mode = Bool(False)
def normal_left_down(self, event):
self._update_slices(event)
def normal_right_down(self, event):
self._update_slices(event)
def normal_mouse_move(self, event):
if event.left_down or event.right_down:
self._update_slices(event)
def _update_slices(self, event):
plot = self.component
ndx = plot.map_index((event.x, event.y),
threshold=5.0, index_only=True)
if ndx:
self.callback(self, *ndx)
def normal_mouse_wheel(self, event):
if self.wheel_cb is not None:
self.wheel_cb(self, event.mouse_wheel)
class PlotFrame(DemoFrame):
# These are the indices into the cube that each of the image plot views
# will show; the default values are non-zero just to make it a little
# interesting.
slice_x = 10
slice_y = 10
slice_z = 10
num_levels = Int(15)
colormap = Any
colorcube = Any
#---------------------------------------------------------------------------
# Private Traits
#---------------------------------------------------------------------------
_cmap = Trait(jet, Callable)
def _index_callback(self, tool, x_index, y_index):
plane = tool.token
if plane == "xy":
self.slice_x = x_index
self.slice_y = y_index
elif plane == "yz":
# transposed because the plot is oriented vertically
self.slice_z = x_index
self.slice_y = y_index
elif plane == "xz":
self.slice_x = x_index
self.slice_z = y_index
else:
warnings.warn("Unrecognized plane for _index_callback: %s" % plane)
self._update_images()
self.center.invalidate_and_redraw()
self.right.invalidate_and_redraw()
self.bottom.invalidate_and_redraw()
return
def _wheel_callback(self, tool, wheelamt):
plane_slice_dict = {"xy": ("slice_z", 2),
"yz": ("slice_x", 0),
"xz": ("slice_y", 1)}
attr, shape_ndx = plane_slice_dict[tool.token]
val = getattr(self, attr)
max = self.model.vals.shape[shape_ndx]
if val + wheelamt > max:
setattr(self, attr, max-1)
elif val + wheelamt < 0:
setattr(self, attr, 0)
else:
setattr(self, attr, val + wheelamt)
self._update_images()
self.center.invalidate_and_redraw()
self.right.invalidate_and_redraw()
self.bottom.invalidate_and_redraw()
return
def _create_window(self):
# Create the model
#try:
# self.model = model = BrainModel()
# cmap = bone
#except SystemExit:
# sys.exit()
#except:
# print "Unable to load BrainModel, using generated data cube."
self.model = model = Model()
cmap = jet
self._update_model(cmap)
datacube = self.colorcube
# Create the plot
self.plotdata = ArrayPlotData()
self._update_images()
# Center Plot
centerplot = Plot(self.plotdata, padding=0)
imgplot = centerplot.img_plot("xy",
xbounds=(model.xs[0], model.xs[-1]),
ybounds=(model.ys[0], model.ys[-1]),
colormap=cmap)[0]
self._add_plot_tools(imgplot, "xy")
self.center = imgplot
# Right Plot
rightplot = Plot(self.plotdata, width=150, resizable="v", padding=0)
rightplot.value_range = centerplot.value_range
imgplot = rightplot.img_plot("yz",
xbounds=(model.zs[0], model.zs[-1]),
ybounds=(model.ys[0], model.ys[-1]),
colormap=cmap)[0]
self._add_plot_tools(imgplot, "yz")
self.right = imgplot
# Bottom Plot
bottomplot = Plot(self.plotdata, height=150, resizable="h", padding=0)
bottomplot.index_range = centerplot.index_range
imgplot = bottomplot.img_plot("xz",
xbounds=(model.xs[0], model.xs[-1]),
ybounds=(model.zs[0], model.zs[-1]),
colormap=cmap)[0]
self._add_plot_tools(imgplot, "xz")
self.bottom = imgplot
# Create Container and add all Plots
container = GridPlotContainer(padding=20, fill_padding=True,
bgcolor="white", use_backbuffer=True,
shape=(2,2), spacing=(12,12))
container.add(centerplot)
container.add(rightplot)
container.add(bottomplot)
self.container = container
return Window(self, -1, component=container)
def _add_plot_tools(self, imgplot, token):
""" Add LineInspectors, ImageIndexTool, and ZoomTool to the image plots. """
imgplot.overlays.append(ZoomTool(component=imgplot, tool_mode="box",
enable_wheel=False, always_on=False))
imgplot.overlays.append(LineInspector(imgplot, axis="index_y", color="white",
inspect_mode="indexed", write_metadata=True, is_listener=True))
imgplot.overlays.append(LineInspector(imgplot, axis="index_x", color="white",
inspect_mode="indexed", write_metadata=True, is_listener=True))
imgplot.tools.append(ImageIndexTool(imgplot, token=token,
callback=self._index_callback, wheel_cb=self._wheel_callback))
def _update_model(self, cmap):
range = DataRange1D(low=amin(self.model.vals),
high=amax(self.model.vals))
self.colormap = cmap(range)
self.colorcube = (self.colormap.map_screen(self.model.vals) * 255).astype(uint8)
def _update_images(self):
""" Updates the image data in self.plotdata to correspond to the
slices given.
"""
cube = self.colorcube
pd = self.plotdata
# These are transposed because img_plot() expects its data to be in
# row-major order
pd.set_data("xy", transpose(cube[:, :, self.slice_z], (1,0,2)))
pd.set_data("xz", transpose(cube[:, self.slice_y, :], (1,0,2)))
pd.set_data("yz", cube[self.slice_x, :, :])
def download_data():
global dl_path, run_cleanup
print 'Please enter the location of the "voldata" subdirectory containing'
print 'the data files for this demo, or enter a path to download to (7.8MB).'
print 'Press <ENTER> to download to the current directory.'
dl_path = raw_input('Path: ').strip().rstrip("/").rstrip("\\")
if not dl_path.endswith("voldata"):
voldata_path = os.path.join(dl_path, 'voldata')
else:
voldata_path = dl_path
tar_path = os.path.join(dl_path, 'MRbrain.tar.gz')
data_good = True
try:
for i in range(1,110):
if not os.path.isfile(os.path.join(voldata_path, "MRbrain.%d" % i)):
data_good = False
break
else:
data_good = True
except:
data_good = False
if not data_good:
import urllib
import tarfile
if len(dl_path) > 0 and not os.path.exists(dl_path):
print 'The given path does not exist.'
run_cleanup = False
sys.exit()
if not os.path.isabs(dl_path):
print 'Downloading to: ' + os.path.join(os.getcwd(), dl_path)
else:
print 'Downloading to: ' + dl_path
try:
# download and extract the file
print "Downloading data, Please Wait (7.8MB)"
opener = urllib.urlopen('http://www-graphics.stanford.edu/data/voldata/MRbrain.tar.gz')
except:
print 'Download error. Opening backup data.'
run_cleanup = False
raise
try:
open(tar_path, 'wb').write(opener.read())
except:
print 'Cannot write to the destination directory specified. ' \
'Opening backup data.'
run_cleanup = False
raise
tar_file = tarfile.open(tar_path)
try:
os.mkdir(voldata_path)
except:
pass
tar_file.extractall(voldata_path)
tar_file.close()
os.unlink(tar_path)
else:
print 'Previously downloaded data detected.'
def cleanup_data():
global dl_path
answer = raw_input('Remove downloaded files? [Y/N]: ')
if answer.lower() == 'y':
try:
shutil.rmtree(os.path.join(dl_path, 'voldata'))
except:
pass
if __name__ == "__main__":
# Save demo so that it doesn't get garbage collected when run within
# existing event loop (i.e. from ipython).
demo = demo_main(PlotFrame, size=(800,700), title="Cube analyzer")
if run_cleanup:
cleanup_data()
| |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'AAT.ui'
#
# Created: Wed Sep 09 12:06:25 2015
# by: PyQt4 UI code generator 4.9.6
#
# WARNING! All changes made in this file will be lost!
'''
* Copyright (C) 2015 Francisco Javier <https://mx.linkedin.com/in/fcojavierpena>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
'''
import os
import threading
import warnings
from PyQt4 import QtCore, QtGui
from PyQt4.QtGui import QFileDialog
import goslate
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.pool import StaticPool
from Export import ExportData
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_MainWindow(QtGui.QWidget):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(500, 273)
MainWindow.setFixedSize(500, 273)
MainWindow.setStyleSheet(_fromUtf8("QWidget#centralwidget{background-color: qlineargradient(spread:pad, x1:1, y1:1, x2:1, y2:0, stop:0 rgba(186, 186, 186, 255), stop:0.781095 rgba(235, 235, 235, 255));}\n"
"\n"
"QToolButton, QToolButton:pressed{\n"
"\n"
"background-color:transparent;\n"
"border:none;\n"
"color: rgb(156, 156, 156);\n"
"\n"
"}\n"
"\n"
"QToolButton:checked, QToolButton:pressed{\n"
"\n"
"background-color:rgb(219,218,206);\n"
"border: 1px solid rgb(255, 255, 255);\n"
"\n"
"}\n"
"\n"
"QToolButton:hover{\n"
"\n"
"background-color:rgb(89,209,171);\n"
"\n"
"}\n"
"\n"
"\n"
"QToolButton:checked:hover{\n"
"\n"
"background-color:rgb(219,218,206);\n"
"\n"
"}\n"
"\n"
" QPushButton {\n"
"font: 75 14pt \"Segoe UI Light\";\n"
" background-color: rgb(0, 150, 136);\n"
" color: rgb(255, 255, 255);\n"
" border-width: 2px;\n"
" border-radius: 10px;\n"
" border-color: rgb(0, 150, 136);\n"
" font: bold 16px;\n"
" min-width: 10em;\n"
" padding: 6px;\n"
" }\n"
" QPushButton:pressed {\n"
" background-color: rgb(77,182, 172);\n"
" }\n"
"\n"
"QLineEdit {\n"
"\n"
" border-style: solid;\n"
" border: 2px solid gray;\n"
" border-radius: 8px;\n"
" \n"
" color: rgb(159, 159, 159);\n"
" font: 75 14pt \"Segoe UI Light\";\n"
" border-width: 2px;\n"
" border-radius: 10px;\n"
" min-width: 10em;\n"
" padding: 6px;\n"
"\n"
" }\n"
"\n"
"QLabel{\n"
"font: 63 15pt \"Segoe UI Light\";\n"
"color: rgb(156, 156, 156);\n"
"}\n"
"\n"
"QGroupBox{\n"
"color: rgb(156, 156, 156);\n"
"}\n"
"\n"
"QProgressBar {\n"
" border: 2px solid grey;\n"
" border-radius: 5px;\n"
" text-align: center;\n"
" }\n"
"\n"
" QProgressBar::chunk {\n"
" background-color: #05B8CC;\n"
"background-color: rgb(0, 150, 136);\n"
" width: 20px;\n"
" }\n"
"QCheckBox{\n"
"font: 63 15pt \"Segoe UI Light\";\n"
"color: rgb(0, 150, 136);\n"
"}\n"
"\n"
"\n"
"/* QComboBox STYLE */\n"
"\n"
"QComboBox {\n"
" border: 1px solid gray;\n"
" border-radius: 3px;\n"
" padding: 1px 18px 1px 3px;\n"
" min-width: 5em;\n"
" height: 30px;\n"
" font: 63 15pt \"Segoe UI Light\";\n"
"}\n"
"\n"
"QComboBox:editable {\n"
" background: white;\n"
"}\n"
"\n"
"QComboBox:!editable, QComboBox::drop-down:editable {\n"
" background: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1,\n"
" stop: 0 #E1E1E1, stop: 0.4 #DDDDDD,\n"
" stop: 0.5 #D8D8D8, stop: 1.0 #D3D3D3);\n"
"}\n"
"\n"
"/* QComboBox gets the \"on\" state when the popup is open */\n"
"QComboBox:!editable:on, QComboBox::drop-down:editable:on {\n"
" background: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1,\n"
" stop: 0 #D3D3D3, stop: 0.4 #D8D8D8,\n"
" stop: 0.5 #DDDDDD, stop: 1.0 #E1E1E1);\n"
"}\n"
"\n"
"QComboBox:on { /* shift the text when the popup opens */\n"
" padding-top: 3px;\n"
" padding-left: 4px;\n"
"}\n"
"\n"
"QComboBox::drop-down {\n"
" subcontrol-origin: padding;\n"
" subcontrol-position: top right;\n"
" width: 15px;\n"
"\n"
" border-left-width: 1px;\n"
" border-left-color: darkgray;\n"
" border-left-style: solid; /* just a single line */\n"
" border-top-right-radius: 3px; /* same radius as the QComboBox */\n"
" border-bottom-right-radius: 3px;\n"
"}\n"
"\n"
"\n"
"QComboBox::down-arrow:on { /* shift the arrow when popup is open */\n"
" top: 1px;\n"
" left: 1px;\n"
"}"))
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.btImportRes = QtGui.QPushButton(self.centralwidget)
self.btImportRes.setGeometry(QtCore.QRect(260, 120, 222, 51))
self.btImportRes.setStyleSheet(_fromUtf8(" QPushButton:pressed {\n"
" background-color: rgb(0,151, 167);\n"
" }\n"
" QPushButton{\n"
"background-color: rgb(1, 87, 155);\n"
"}\n"
""))
self.btImportRes.setObjectName(_fromUtf8("btImportRes"))
self.lbTo = QtGui.QLabel(self.centralwidget)
self.lbTo.setGeometry(QtCore.QRect(30, 130, 131, 21))
self.lbTo.setObjectName(_fromUtf8("lbTo"))
self.btExportFile = QtGui.QPushButton(self.centralwidget)
self.btExportFile.setGeometry(QtCore.QRect(260, 190, 222, 51))
self.btExportFile.setStyleSheet(_fromUtf8("\n"
" QPushButton:pressed {\n"
" background-color: rgb(0,150, 136);\n"
" }\n"
" QPushButton{\n"
"background-color: rgb(0, 191, 165);\n"
"}\n"
"\n"
""))
self.btExportFile.setObjectName(_fromUtf8("btExportFile"))
self.lbProcess = QtGui.QLabel(self.centralwidget)
self.lbProcess.setGeometry(QtCore.QRect(10, 10, 481, 91))
self.lbProcess.setStyleSheet(_fromUtf8(" border: 2px solid #B2DFDB;"))
self.lbProcess.setTextFormat(QtCore.Qt.AutoText)
self.lbProcess.setAlignment(QtCore.Qt.AlignCenter)
self.lbProcess.setWordWrap(True)
self.lbProcess.setObjectName(_fromUtf8("lbProcess"))
self.comboTo = QtGui.QComboBox(self.centralwidget)
self.comboTo.setGeometry(QtCore.QRect(20, 160, 221, 51))
self.comboTo.setObjectName(_fromUtf8("comboTo"))
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "Android App Translate", None))
self.btImportRes.setText(_translate("MainWindow", "Import string resource", None))
self.lbTo.setText(_translate("MainWindow", "Translate to", None))
self.btExportFile.setText(_translate("MainWindow", "Export", None))
self.lbProcess.setText(_translate("MainWindow", "Android Application Translate", None))
#QtCore.QObject.connect(self.comboFrom, QtCore.SIGNAL("currentIndexChanged(int)"), self.onItemFromSelected)
QtCore.QObject.connect(self.comboTo, QtCore.SIGNAL("currentIndexChanged(int)"), self.onItemToSelected)
QtCore.QObject.connect(self.btExportFile, QtCore.SIGNAL("clicked()"), self.exportFile)
QtCore.QObject.connect(self.btImportRes, QtCore.SIGNAL("clicked()"), self.selectResourceString)
dataLanguages = {}
fromKey = ""
toKey = ""
def onItemToSelected(self, item):
self.toKey = ""
selectedLanguage = self.comboTo.currentText()
self.toKey = self.dataLanguages[str(selectedLanguage)]
def onItemFromSelected(self, item):
self.fromKey = ""
selectedLanguage = self.comboFrom.currentText()
self.fromKey = self.dataLanguages[str(selectedLanguage)]
def selectResourceString(self):
in_path = QtGui.QFileDialog.getOpenFileName(self, u'Select string.xml resource', '')
fileName, fileExtension = os.path.splitext(str(in_path))
if fileExtension != '':
if str(fileExtension) != '.xml':
QtGui.QMessageBox.critical(self, u'System', u' Wrong file, the file should contain the XML extension.',QtGui.QMessageBox.Ok)
else:
self.lbProcess.setText(in_path)
self.btExportFile.setEnabled(False)
e = ImportData()
t = threading.Thread(target=e.importFile, args=(self, in_path, self.toKey), name='ServiceImport')
t.start()
def exportFile(self):
self.importEvo = True
engine = create_engine('sqlite:///data.sqlite',connect_args={'check_same_thread':True}, poolclass=StaticPool)
session = sessionmaker()
session.configure(bind=engine)
s = session()
language = ""
try:
ln = s.query(DataAccess.DataString.language_translation).first()
language = ln[0]
if language == '':
language = self.toKey
except Exception as e:
language = self.toKey
fileName = QFileDialog.getSaveFileName(self, 'Save as','strings-%s'%(language), selectedFilter='*.xml')
if fileName:
e = ExportData()
t = threading.Thread(target=e.exportToXMLFileString, args=(self, fileName, self.toKey), name='ServiceExportToXML')
t.start()
def loadLanguage(self):
gs = goslate.Goslate()
print gs.get_languages()
data = gs.get_languages()
count = 0
for string in data:
key = json.dumps(string)
key = key.split('"')
key = key[1]
language = gs.get_languages()[key]
self.dataLanguages[language] = key
#print count, language
#self.comboFrom.setItemText(count, _translate("MainWindow", key, None))
#self.comboFrom.addItem(_fromUtf8(language))
#self.comboTo.setItemText(count, _translate("MainWindow", key, None))
self.comboTo.addItem(_fromUtf8(language))
count += 1
import json
import DataAccess
from Import import ImportData
from xml.dom import minidom
import wx
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
sits = QtGui.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(sits)
ui.loadLanguage()
sits.show()
sys.exit(app.exec_())
| |
from __future__ import print_function
import argparse
import random
import mxnet as mx
import numpy as np
from mxnet import gluon, init
from mxnet import autograd as ag
from mxnet.gluon import nn
from mxnet.gluon.data.vision import transforms
from gluoncv.model_zoo import get_model
from gluoncv.data import transforms as gcv_transforms
# Training settings
parser = argparse.ArgumentParser(description="CIFAR-10 Example")
parser.add_argument(
"--model",
required=True,
type=str,
default="resnet50_v1b",
help="name of the pretrained model from gluoncv model zoo"
"(default: resnet50_v1b).")
parser.add_argument(
"--batch_size",
type=int,
default=64,
metavar="N",
help="input batch size for training (default: 64)")
parser.add_argument(
"--epochs",
type=int,
default=1,
metavar="N",
help="number of epochs to train (default: 1)")
parser.add_argument(
"--num_gpus",
default=0,
type=int,
help="number of gpus to use, 0 indicates cpu only (default: 0)")
parser.add_argument(
"--num_workers",
default=4,
type=int,
help="number of preprocessing workers (default: 4)")
parser.add_argument(
"--classes",
type=int,
default=10,
metavar="N",
help="number of outputs (default: 10)")
parser.add_argument(
"--lr",
default=0.001,
type=float,
help="initial learning rate (default: 0.001)")
parser.add_argument(
"--momentum",
default=0.9,
type=float,
help="initial momentum (default: 0.9)")
parser.add_argument(
"--wd", default=1e-4, type=float, help="weight decay (default: 1e-4)")
parser.add_argument(
"--expname", type=str, default="cifar10exp", help="experiments location")
parser.add_argument(
"--num_samples",
type=int,
default=20,
metavar="N",
help="number of samples (default: 20)")
parser.add_argument(
"--scheduler",
type=str,
default="fifo",
help="FIFO or AsyncHyperBandScheduler.")
parser.add_argument(
"--seed",
type=int,
default=1,
metavar="S",
help="random seed (default: 1)")
parser.add_argument(
"--smoke_test", action="store_true", help="Finish quickly for testing")
args = parser.parse_args()
def train_cifar10(args, config, reporter):
vars(args).update(config)
np.random.seed(args.seed)
random.seed(args.seed)
mx.random.seed(args.seed)
# Set Hyper-params
batch_size = args.batch_size * max(args.num_gpus, 1)
ctx = [mx.gpu(i)
for i in range(args.num_gpus)] if args.num_gpus > 0 else [mx.cpu()]
# Define DataLoader
transform_train = transforms.Compose([
gcv_transforms.RandomCrop(32, pad=4),
transforms.RandomFlipLeftRight(),
transforms.ToTensor(),
transforms.Normalize([0.4914, 0.4822, 0.4465],
[0.2023, 0.1994, 0.2010])
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.4914, 0.4822, 0.4465],
[0.2023, 0.1994, 0.2010])
])
train_data = gluon.data.DataLoader(
gluon.data.vision.CIFAR10(train=True).transform_first(transform_train),
batch_size=batch_size,
shuffle=True,
last_batch="discard",
num_workers=args.num_workers)
test_data = gluon.data.DataLoader(
gluon.data.vision.CIFAR10(train=False).transform_first(transform_test),
batch_size=batch_size,
shuffle=False,
num_workers=args.num_workers)
# Load model architecture and Initialize the net with pretrained model
finetune_net = get_model(args.model, pretrained=True)
with finetune_net.name_scope():
finetune_net.fc = nn.Dense(args.classes)
finetune_net.fc.initialize(init.Xavier(), ctx=ctx)
finetune_net.collect_params().reset_ctx(ctx)
finetune_net.hybridize()
# Define trainer
trainer = gluon.Trainer(finetune_net.collect_params(), "sgd", {
"learning_rate": args.lr,
"momentum": args.momentum,
"wd": args.wd
})
L = gluon.loss.SoftmaxCrossEntropyLoss()
metric = mx.metric.Accuracy()
def train(epoch):
for i, batch in enumerate(train_data):
data = gluon.utils.split_and_load(
batch[0], ctx_list=ctx, batch_axis=0, even_split=False)
label = gluon.utils.split_and_load(
batch[1], ctx_list=ctx, batch_axis=0, even_split=False)
with ag.record():
outputs = [finetune_net(X) for X in data]
loss = [L(yhat, y) for yhat, y in zip(outputs, label)]
for l in loss:
l.backward()
trainer.step(batch_size)
mx.nd.waitall()
def test():
test_loss = 0
for i, batch in enumerate(test_data):
data = gluon.utils.split_and_load(
batch[0], ctx_list=ctx, batch_axis=0, even_split=False)
label = gluon.utils.split_and_load(
batch[1], ctx_list=ctx, batch_axis=0, even_split=False)
outputs = [finetune_net(X) for X in data]
loss = [L(yhat, y) for yhat, y in zip(outputs, label)]
test_loss += sum(l.mean().asscalar() for l in loss) / len(loss)
metric.update(label, outputs)
_, test_acc = metric.get()
test_loss /= len(test_data)
reporter(mean_loss=test_loss, mean_accuracy=test_acc)
for epoch in range(1, args.epochs + 1):
train(epoch)
test()
if __name__ == "__main__":
args = parser.parse_args()
import ray
from ray import tune
from ray.tune.schedulers import AsyncHyperBandScheduler, FIFOScheduler
ray.init()
if args.scheduler == "fifo":
sched = FIFOScheduler()
elif args.scheduler == "asynchyperband":
sched = AsyncHyperBandScheduler(
time_attr="training_iteration",
metric="mean_loss",
mode="min",
max_t=400,
grace_period=60)
else:
raise NotImplementedError
tune.register_trainable(
"TRAIN_FN",
lambda config, reporter: train_cifar10(args, config, reporter))
tune.run(
"TRAIN_FN",
name=args.expname,
verbose=2,
scheduler=sched,
**{
"stop": {
"mean_accuracy": 0.98,
"training_iteration": 1 if args.smoke_test else args.epochs
},
"resources_per_trial": {
"cpu": int(args.num_workers),
"gpu": int(args.num_gpus)
},
"num_samples": 1 if args.smoke_test else args.num_samples,
"config": {
"lr": tune.sample_from(
lambda spec: np.power(10.0, np.random.uniform(-4, -1))),
"momentum": tune.sample_from(
lambda spec: np.random.uniform(0.85, 0.95)),
}
})
| |
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module implements various transmuter classes.
Transmuters are essentially classes that generate TransformedStructures from
various data sources. They enable the high-throughput generation of new
structures and input files.
It also includes the helper function, batch_write_vasp_input to generate an
entire directory of vasp input files for running.
"""
__author__ = "Shyue Ping Ong, Will Richards"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Mar 4, 2012"
import os
import re
from multiprocessing import Pool
from pymatgen.alchemy.materials import TransformedStructure
from pymatgen.io.vasp.sets import MPRelaxSet
class StandardTransmuter:
"""
An example of a Transmuter object, which performs a sequence of
transformations on many structures to generate TransformedStructures.
.. attribute: transformed_structures
List of all transformed structures.
"""
def __init__(
self,
transformed_structures,
transformations=None,
extend_collection=0,
ncores=None,
):
"""
Initializes a transmuter from an initial list of
:class:`pymatgen.alchemy.materials.TransformedStructure`.
Args:
transformed_structures ([TransformedStructure]): Input transformed
structures
transformations ([Transformations]): New transformations to be
applied to all structures.
extend_collection (int): Whether to use more than one output
structure from one-to-many transformations. extend_collection
can be an int, which determines the maximum branching for each
transformation.
ncores (int): Number of cores to use for applying transformations.
Uses multiprocessing.Pool. Default is None, which implies
serial.
"""
self.transformed_structures = transformed_structures
self.ncores = ncores
if transformations is not None:
for trans in transformations:
self.append_transformation(trans, extend_collection=extend_collection)
def __getitem__(self, index):
return self.transformed_structures[index]
def __getattr__(self, name):
return [getattr(x, name) for x in self.transformed_structures]
def undo_last_change(self):
"""
Undo the last transformation in the TransformedStructure.
Raises:
IndexError if already at the oldest change.
"""
for x in self.transformed_structures:
x.undo_last_change()
def redo_next_change(self):
"""
Redo the last undone transformation in the TransformedStructure.
Raises:
IndexError if already at the latest change.
"""
for x in self.transformed_structures:
x.redo_next_change()
def __len__(self):
return len(self.transformed_structures)
def append_transformation(self, transformation, extend_collection=False, clear_redo=True):
"""
Appends a transformation to all TransformedStructures.
Args:
transformation: Transformation to append
extend_collection: Whether to use more than one output structure
from one-to-many transformations. extend_collection can be a
number, which determines the maximum branching for each
transformation.
clear_redo (bool): Whether to clear the redo list. By default,
this is True, meaning any appends clears the history of
undoing. However, when using append_transformation to do a
redo, the redo list should not be cleared to allow multiple
redos.
Returns:
List of booleans corresponding to initial transformed structures
each boolean describes whether the transformation altered the
structure
"""
if self.ncores and transformation.use_multiprocessing:
with Pool(self.ncores) as p:
# need to condense arguments into single tuple to use map
z = map(
lambda x: (x, transformation, extend_collection, clear_redo),
self.transformed_structures,
)
new_tstructs = p.map(_apply_transformation, z, 1)
self.transformed_structures = []
for ts in new_tstructs:
self.transformed_structures.extend(ts)
else:
new_structures = []
for x in self.transformed_structures:
new = x.append_transformation(transformation, extend_collection, clear_redo=clear_redo)
if new is not None:
new_structures.extend(new)
self.transformed_structures.extend(new_structures)
def extend_transformations(self, transformations):
"""
Extends a sequence of transformations to the TransformedStructure.
Args:
transformations: Sequence of Transformations
"""
for t in transformations:
self.append_transformation(t)
def apply_filter(self, structure_filter):
"""
Applies a structure_filter to the list of TransformedStructures
in the transmuter.
Args:
structure_filter: StructureFilter to apply.
"""
def test_transformed_structure(ts):
return structure_filter.test(ts.final_structure)
self.transformed_structures = list(filter(test_transformed_structure, self.transformed_structures))
for ts in self.transformed_structures:
ts.append_filter(structure_filter)
def write_vasp_input(self, **kwargs):
r"""
Batch write vasp input for a sequence of transformed structures to
output_dir, following the format output_dir/{formula}_{number}.
Args:
\\*\\*kwargs: All kwargs supported by batch_write_vasp_input.
"""
batch_write_vasp_input(self.transformed_structures, **kwargs)
def set_parameter(self, key, value):
"""
Add parameters to the transmuter. Additional parameters are stored in
the as_dict() output.
Args:
key: The key for the parameter.
value: The value for the parameter.
"""
for x in self.transformed_structures:
x.other_parameters[key] = value
def add_tags(self, tags):
"""
Add tags for the structures generated by the transmuter.
Args:
tags: A sequence of tags. Note that this should be a sequence of
strings, e.g., ["My awesome structures", "Project X"].
"""
self.set_parameter("tags", tags)
def __str__(self):
output = ["Current structures", "------------"]
for x in self.transformed_structures:
output.append(str(x.final_structure))
return "\n".join(output)
def append_transformed_structures(self, tstructs_or_transmuter):
"""
Method is overloaded to accept either a list of transformed structures
or transmuter, it which case it appends the second transmuter"s
structures.
Args:
tstructs_or_transmuter: A list of transformed structures or a
transmuter.
"""
if isinstance(tstructs_or_transmuter, self.__class__):
self.transformed_structures.extend(tstructs_or_transmuter.transformed_structures)
else:
for ts in tstructs_or_transmuter:
assert isinstance(ts, TransformedStructure)
self.transformed_structures.extend(tstructs_or_transmuter)
@staticmethod
def from_structures(structures, transformations=None, extend_collection=0):
"""
Alternative constructor from structures rather than
TransformedStructures.
Args:
structures: Sequence of structures
transformations: New transformations to be applied to all
structures
extend_collection: Whether to use more than one output structure
from one-to-many transformations. extend_collection can be a
number, which determines the maximum branching for each
transformation.
Returns:
StandardTransmuter
"""
tstruct = [TransformedStructure(s, []) for s in structures]
return StandardTransmuter(tstruct, transformations, extend_collection)
class CifTransmuter(StandardTransmuter):
"""
Generates a Transmuter from a cif string, possibly containing multiple
structures.
"""
def __init__(self, cif_string, transformations=None, primitive=True, extend_collection=False):
"""
Generates a Transmuter from a cif string, possibly
containing multiple structures.
Args:
cif_string: A string containing a cif or a series of cifs
transformations: New transformations to be applied to all
structures
primitive: Whether to generate the primitive cell from the cif.
extend_collection: Whether to use more than one output structure
from one-to-many transformations. extend_collection can be a
number, which determines the maximum branching for each
transformation.
"""
transformed_structures = []
lines = cif_string.split("\n")
structure_data = []
read_data = False
for line in lines:
if re.match(r"^\s*data", line):
structure_data.append([])
read_data = True
if read_data:
structure_data[-1].append(line)
for data in structure_data:
tstruct = TransformedStructure.from_cif_string("\n".join(data), [], primitive)
transformed_structures.append(tstruct)
super().__init__(transformed_structures, transformations, extend_collection)
@staticmethod
def from_filenames(filenames, transformations=None, primitive=True, extend_collection=False):
"""
Generates a TransformedStructureCollection from a cif, possibly
containing multiple structures.
Args:
filenames: List of strings of the cif files
transformations: New transformations to be applied to all
structures
primitive: Same meaning as in __init__.
extend_collection: Same meaning as in __init__.
"""
allcifs = []
for fname in filenames:
with open(fname) as f:
allcifs.append(f.read())
return CifTransmuter(
"\n".join(allcifs),
transformations,
primitive=primitive,
extend_collection=extend_collection,
)
class PoscarTransmuter(StandardTransmuter):
"""
Generates a transmuter from a sequence of POSCARs.
"""
def __init__(self, poscar_string, transformations=None, extend_collection=False):
"""
Args:
poscar_string: List of POSCAR strings
transformations: New transformations to be applied to all
structures.
extend_collection: Whether to use more than one output structure
from one-to-many transformations.
"""
tstruct = TransformedStructure.from_poscar_string(poscar_string, [])
super().__init__([tstruct], transformations, extend_collection=extend_collection)
@staticmethod
def from_filenames(poscar_filenames, transformations=None, extend_collection=False):
"""
Convenient constructor to generates a POSCAR transmuter from a list of
POSCAR filenames.
Args:
poscar_filenames: List of POSCAR filenames
transformations: New transformations to be applied to all
structures.
extend_collection:
Same meaning as in __init__.
"""
tstructs = []
for filename in poscar_filenames:
with open(filename) as f:
tstructs.append(TransformedStructure.from_poscar_string(f.read(), []))
return StandardTransmuter(tstructs, transformations, extend_collection=extend_collection)
def batch_write_vasp_input(
transformed_structures,
vasp_input_set=MPRelaxSet,
output_dir=".",
create_directory=True,
subfolder=None,
include_cif=False,
**kwargs,
):
"""
Batch write vasp input for a sequence of transformed structures to
output_dir, following the format output_dir/{group}/{formula}_{number}.
Args:
transformed_structures: Sequence of TransformedStructures.
vasp_input_set: pymatgen.io.vaspio_set.VaspInputSet to creates
vasp input files from structures.
output_dir: Directory to output files
create_directory (bool): Create the directory if not present.
Defaults to True.
subfolder: Function to create subdirectory name from
transformed_structure.
e.g., lambda x: x.other_parameters["tags"][0] to use the first
tag.
include_cif (bool): Boolean indication whether to output a CIF as
well. CIF files are generally better supported in visualization
programs.
"""
for i, s in enumerate(transformed_structures):
formula = re.sub(r"\s+", "", s.final_structure.formula)
if subfolder is not None:
subdir = subfolder(s)
dirname = os.path.join(output_dir, subdir, f"{formula}_{i}")
else:
dirname = os.path.join(output_dir, f"{formula}_{i}")
s.write_vasp_input(vasp_input_set, dirname, create_directory=create_directory, **kwargs)
if include_cif:
from pymatgen.io.cif import CifWriter
writer = CifWriter(s.final_structure)
writer.write_file(os.path.join(dirname, f"{formula}.cif"))
def _apply_transformation(inputs):
"""
Helper method for multiprocessing of apply_transformation. Must not be
in the class so that it can be pickled.
Args:
inputs: Tuple containing the transformed structure, the transformation
to be applied, a boolean indicating whether to extend the
collection, and a boolean indicating whether to clear the redo
Returns:
List of output structures (the modified initial structure, plus
any new structures created by a one-to-many transformation)
"""
ts, transformation, extend_collection, clear_redo = inputs
new = ts.append_transformation(transformation, extend_collection, clear_redo=clear_redo)
o = [ts]
if new:
o.extend(new)
return o
| |
import os
import bottle
import _dynStruct
def check_block_id(block_id):
if not block_id or block_id == "None":
return None
try:
block_id = int(block_id)
except:
return False
if block_id < 0 or block_id >= len(_dynStruct.l_block):
return False
return block_id
def check_struct_id(id_struct):
if not id_struct or id_struct == "None":
return None
if not _dynStruct.Struct.get_by_id(id_struct):
return False
return id_struct
def check_id_member_from_access(id_member):
if not id_member or id_member == "None":
return None
if not _dynStruct.Struct.get_by_id(id_member):
return False
return id_member
@bottle.route("/block")
def block_view():
block_id = check_block_id(bottle.request.query.id)
if block_id or block_id == 0:
return bottle.template("block_view", block=_dynStruct.l_block[block_id])
else:
return bottle.template("error", msg="Bad block id")
@bottle.route("/block_search")
def block_search():
id_struct = check_struct_id(bottle.request.query.id_struct)
if id_struct == False:
return bottle.template("error", msg="Bad struct id")
else:
return bottle.template("block_search", id_struct=id_struct)
@bottle.route("/block_get")
def block_get():
id_struct = check_struct_id(bottle.request.query.id_struct)
return _dynStruct.block_json(id_struct, bottle.request.query)
@bottle.route("/access_search")
def access_search():
id_block = check_block_id(bottle.request.query.id_block)
id_member = check_id_member_from_access(bottle.request.query.id_member)
if id_block != 0 and id_block == False:
return bottle.template("error", msg="Bad block id")
elif id_member != 0 and id_member == False:
return bottle.template("error", msg="Bad struct id")
else:
return bottle.template("access_search", id_block=id_block, id_member=id_member)
@bottle.route("/access_get")
def access_get():
id_block = check_block_id(bottle.request.query.id_block)
id_member = check_id_member_from_access(bottle.request.query.id_member)
return _dynStruct.access_json(id_block, id_member, bottle.request.query)
@bottle.route("/struct")
def struct_view():
struct = _dynStruct.Struct.get_by_id(bottle.request.query.id)
if not struct:
return bottle.template("error", msg="Bad struct id")
return bottle.template("struct_view", struct=struct, edit=False)
@bottle.route("/struct_edit")
def struct_edit():
struct = _dynStruct.Struct.get_by_id(bottle.request.query.id)
if not struct:
return bottle.template("error", msg="Bad struct id")
return bottle.template("struct_view", struct=struct, edit=True)
@bottle.route("/struct_do_edit", method='POST')
def struct_do_edit():
struct = _dynStruct.Struct.get_by_id(bottle.request.query.id)
if not struct:
return bottle.template("error", msg="Bad struct id")
struct.name = bottle.request.forms.name
_dynStruct.save_modif()
bottle.redirect("/struct?id=%d" % (struct.id))
@bottle.route("/struct_remove")
def struct_remove():
struct = _dynStruct.Struct.get_by_id(bottle.request.query.id)
if not struct:
return bottle.template("error", msg="Bad struct id")
struct.remove_all_block()
_dynStruct.l_struct.remove(struct)
_dynStruct.save_modif()
bottle.redirect("/struct_search")
@bottle.route("/struct_create")
def struct_create():
return bottle.template("struct_create")
@bottle.route("/struct_do_create", method='POST')
def struct_do_create():
size = bottle.request.forms.size
try:
size = int(size)
except ValueError:
return bottle.template("error", msg="Size is not in integer")
if size <= 0:
return bottle.template("error", msg="Size have to be positive")
new_struct = _dynStruct.Struct(None)
new_struct.id = _dynStruct.l_struct[-1].id + 1
new_struct.name = bottle.request.forms.name
new_struct.size = size
new_struct.add_pad()
_dynStruct.l_struct.append(new_struct)
_dynStruct.save_modif()
bottle.redirect("/struct?id=%d" % (new_struct.id))
@bottle.route("/struct_search")
def struct_search():
return bottle.template("struct_search")
@bottle.route("/struct_get")
def struct_get():
return _dynStruct.struct_json()
@bottle.route("/struct_edit_instance")
def struct_edit_instance():
struct = _dynStruct.Struct.get_by_id(bottle.request.query.id_struct)
if not struct:
return bottle.template("error", msg="Bad struct id")
return bottle.template("edit_block_list", id_struct=struct.id, struct_name=struct.name)
@bottle.route("/member_get")
def member_get():
id_struct = bottle.request.query.id_struct
return _dynStruct.member_json(_dynStruct.Struct.get_by_id(id_struct), id_struct)
def member_template(query, edit):
id_struct = check_struct_id(query.id_struct)
id_member = query.id_member
if id_member != 0 and not id_member:
return bottle.template("error", msg="member id missing")
if id_struct != 0 and not id_struct:
return bottle.template("error", msg="Bad struct id")
member = _dynStruct.Struct.get_member_by_id(id_struct, int(id_member))
if not member:
return bottle.template("error", msg="bad member id")
return bottle.template("member_view",
id_member="%s.%d" % (id_struct, member.offset),
member=member,
name_member=_dynStruct.Struct.make_member_name(id_struct, member.offset),
edit=edit)
@bottle.route("/member")
def member_view():
return(member_template(bottle.request.query, False))
@bottle.route("/member_edit")
def member_edit():
return(member_template(bottle.request.query, True))
@bottle.route("/member_do_edit", method='POST')
def member_do_edit():
id_struct = check_struct_id(bottle.request.query.id_struct)
id_member = bottle.request.query.id_member
if id_member != 0 and not id_member:
return bottle.template("error", msg="member id missing")
if not id_struct:
return bottle.template("error", msg="Bad struct id")
struct = _dynStruct.Struct.get_by_id(id_struct)
if not struct:
return bottle.template("error", msg="bad struct id")
member = struct.get_member(int(id_member))
next_member = struct.get_member(int(id_member) + member.size)
try:
member.edit(bottle.request.forms, next_member, struct.size)
except ValueError as err:
return bottle.template("error", msg=str(err))
# if next member is padding remove it, add_pad will set a new padding with
# correct size + offset if needed
if next_member and next_member.is_padding:
struct.members.remove(next_member)
struct.add_pad()
_dynStruct.save_modif()
bottle.redirect("/member?id_struct=%s&id_member=%s" % (id_struct, id_member))
@bottle.route("/member_remove")
def member_remove():
id_struct = check_struct_id(bottle.request.query.id_struct)
id_member = bottle.request.query.id_member
if id_member != 0 and not id_member:
return bottle.template("error", msg="member id missing")
if id_struct != 0 and not id_struct:
return bottle.template("error", msg="Bad struct id")
id_struct = str(id_struct)
struct = _dynStruct.Struct.get_by_id(id_struct)
if not struct:
return bottle.template("error", msg="bad struct id")
member = struct.get_member(int(id_member))
next_member = struct.get_member(int(id_member) + member.size)
prev_member = None
if struct.members.index(member) > 0:
prev_member = struct.members[struct.members.index(member) - 1]
struct.members.remove(member)
# by removing the next or previous padding a new padding with the size
# of the old padding + the size of removed member will be created
if next_member and next_member.is_padding:
struct.members.remove(next_member)
if prev_member and prev_member.is_padding:
struct.members.remove(prev_member)
struct.add_pad()
_dynStruct.save_modif()
bottle.redirect("/struct?id=%s" % (id_struct))
@bottle.route("/member_create")
def member_create():
id_struct = check_struct_id(bottle.request.query.id_struct)
id_member = bottle.request.query.id_member
if not id_member:
return bottle.template("error", msg="member id missing")
if not id_struct:
return bottle.template("error", msg="Bad struct id")
member = _dynStruct.Struct.get_by_id("%s.%s" % (id_struct, id_member))
if not member:
return bottle.template("error", msg="Bad member id")
if not member.is_padding:
return bottle.template("error", msg="To add a member, member_id have to point to a padding member")
return bottle.template("member_create", id_struct=id_struct, member=member)
@bottle.route("/member_do_create", method='POST')
def member_do_create():
id_struct = check_struct_id(bottle.request.query.id_struct)
id_member = bottle.request.query.id_member
if not id_member:
return bottle.template("error", msg="member id missing")
if not id_struct:
return bottle.template("error", msg="Bad struct id")
struct = _dynStruct.Struct.get_by_id(id_struct)
if not struct:
return bottle.template("error", msg="bad struct id")
member = struct.get_member(int(id_member))
if not member:
return bottle.template("error", msg="Bad member id")
if not member.is_padding:
return bottle.template("error", msg="To add a member, member_id have to point to a padding member")
try:
struct.add_member_from_web_ui(member, bottle.request.forms)
except ValueError as err:
struct.add_pad()
return bottle.template("error", msg=str(err))
struct.members.remove(member)
struct.add_pad()
_dynStruct.save_modif()
if '.' in id_struct:
bottle.redirect("/member?id_struct=%s&id_member=%s" % (id_struct[:id_struct.rfind('.')], id_struct[id_struct.rfind('.') + 1:]))
else:
bottle.redirect("/struct?id=%s" % (id_struct))
@bottle.route("/header.h")
def dl_header():
bottle.response.content_type = 'text/x-c'
return _dynStruct.get_header(_dynStruct.l_struct)
@bottle.route("/static/<filename:path>")
def serve_static(filename):
return bottle.static_file(filename, root=os.path.join(os.path.dirname(__file__), "static"))
@bottle.route("/remove_struct")
def remove_struct_from_block():
id_block = check_block_id(bottle.request.query.id_block)
if id_block != 0 and not id_block:
return bottle.template("error", msg="Bad block id")
_dynStruct.l_block[id_block].struct.remove_block(_dynStruct.l_block[id_block])
_dynStruct.save_modif()
bottle.redirect("/block?id=%d" % (id_block))
@bottle.route("/add_to_struct")
def add_to_struct_struct_from_block():
id_block = check_block_id(bottle.request.query.id_block)
if id_block != 0 and not id_block:
return bottle.template("error", msg="Bad block id")
return bottle.template("struct_select", id_block=id_block)
@bottle.route("/do_add_to_struct")
def add_to_struct_struct_from_block():
id_block = check_block_id(bottle.request.query.id_block)
id_struct = check_struct_id(bottle.request.query.id_struct)
if id_block != 0 and not id_block:
return bottle.template("error", msg="Bad block id")
if not id_struct:
return bottle.template("error", msg="Bad struct id")
block = _dynStruct.l_block[id_block]
if block.struct:
return bottle.template("error", msg="Block already linked")
struct = _dynStruct.Struct.get_by_id(id_struct)
struct.add_block(block)
_dynStruct.save_modif()
return bottle.template("block_view", block=block)
@bottle.route("/struct_select_get")
def get_list_compat_struct():
id_block = check_block_id(bottle.request.query.id_block)
return _dynStruct.struct_select_json(id_block)
@bottle.route("/struct_instance_get")
def struct_instance_get():
id_struct = check_struct_id(bottle.request.query.id_struct)
if not id_struct:
return bottle.template("error", msg="Bad struct id")
struct = _dynStruct.Struct.get_by_id(id_struct)
if not struct:
return bottle.template("error", msg="bad struct id")
instance = True if bottle.request.query.instance == "true" else False
return(_dynStruct.struct_instances_json(struct, instance))
@bottle.route("/struct_instance_do_edit", method='POST')
def struct_instance_do_edit():
id_struct = check_struct_id(bottle.request.query.id)
if not id_struct:
return bottle.template("error", msg="Bad struct id")
struct = _dynStruct.Struct.get_by_id(id_struct)
if not struct:
return bottle.template("error", msg="bad struct id")
if bottle.request.forms.add != '':
for add in bottle.request.forms.add.split(','):
struct.add_block(_dynStruct.l_block[int(add)])
if bottle.request.forms.remove != '':
for remove in bottle.request.forms.remove.split(','):
struct.remove_block(_dynStruct.l_block[int(remove)])
_dynStruct.save_modif()
@bottle.route("/struct_do_detect")
def struct_do_detect():
id_struct = check_struct_id(bottle.request.query.id_struct)
if not id_struct:
return bottle.template("error", msg="Bad struct id")
struct = _dynStruct.Struct.get_by_id(id_struct)
struct.detect(_dynStruct.l_block)
_dynStruct.save_modif()
bottle.redirect("/struct?id=%s" % (id_struct))
@bottle.route("/do_recovery")
def do_recovery():
block_id = check_block_id(bottle.request.query.id_block)
if not block_id and block_id != 0:
return bottle.template("error", msg="Bad block id")
block = _dynStruct.l_block[block_id]
if not block.struct:
new_struct = _dynStruct.Struct(block)
new_struct.clean_struct()
try:
new_struct.id = _dynStruct.l_struct[-1].id + 1;
except IndexError:
new_struct.id = 1
new_struct.set_default_name()
_dynStruct.l_struct.append(new_struct)
_dynStruct.save_modif()
else:
return bottle.template("error", msg="Block already linked")
bottle.redirect("/block?id=%d" % (block_id))
@bottle.route("/quit")
def quit():
return bottle.template("quit")
@bottle.route("/do_quit")
def do_quit():
os._exit(0)
@bottle.route("/")
def index():
bottle.redirect("/block_search")
def start_webui(addr, port):
bottle.TEMPLATE_PATH.insert(0, os.path.dirname(__file__) + "/views")
print("Starting web server at http://%s:%s" % (addr, port))
bottle.run(host=addr, port=port, quiet=True)
| |
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Trains a GAN to generate synthetic images of MNIST digits.
A Generative Adversarial Network (GAN) is a generative model that learns the
probability distribution of training examples and generates similar samples. It
simultaneously trains two models: a generative model G that learns the data
distribution, and a discriminative model D that estimates the probability that a
sample came from the training data [1].
To learn the generator's distribution p_g over data z, we define a multilayer
perceptron G(z; theta_g), where the prior of input noise z~p_z(z) is a Gaussian
distribution. We also define a second multilayer perceptron D(x; theta_d)
that outputs a single scalar. D(x) represents the probability that x came
from the training data rather than p_g.
We train D to maximize the probability of assigning the correct label to both
training examples and samples from G. We simultaneously train G to maximize the
probability of D making a mistake. This framework corresponds to a minimax
two-player game with value function V(G, D):
```none
V(G, D) = E_{x~p_data(x)}[log(D(x))] + E_{z~p_z(z)}[log(1-D(G(z)))]
```
This optimization problem is bilevel: it requires a minima solution with respect
to generative parameters theta_g and a maxima solution with respect to
discriminative parameters theta_d. In practice, the algorithm proceeds by
iterating gradient updates on each network. The goal of training is to reach
the equilibrium where the generator produces samples that are indistinguishable
by the discriminator.
#### References
[1]: Ian J. Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu,
David Warde-Farley, Sherjil Ozair, Aaron Courville, Yoshua Bengio.
Generative Adversarial Nets. In _Neural Information Processing
Systems Conference_, 2014.
https://arxiv.org/pdf/1406.2661.pdf
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from trainer.tfgfile_wrapper import tfgfile_wrapper
import os
# Dependency imports
from absl import flags
flags.DEFINE_string(name="job-dir", default="/tmp", help="AI Platform Training passes this to the training script.")
from matplotlib.backends import backend_agg
from matplotlib import figure
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow.contrib.learn.python.learn.datasets import mnist
tfd = tfp.distributions
IMAGE_SHAPE = [28, 28, 1]
tf.flags.DEFINE_float('learning_rate',
default=1e-4,
help='Initial learning rate.')
tf.flags.DEFINE_integer('max_steps',
default=100000,
help='Number of training steps to run.')
tf.flags.DEFINE_integer('batch_size',
default=128,
help='Batch size.')
tf.flags.DEFINE_integer('hidden_size',
default=128,
help='Hidden layer size.')
flags.DEFINE_integer('viz_steps',
default=1000,
help='Frequency at which save generated images.')
flags.DEFINE_string('data_dir',
default='/tmp/data',
help='Directory where data is stored (if using real data)')
flags.DEFINE_string(
'model_dir',
default=os.path.join(os.getenv('TEST_TMPDIR', '/tmp'),
'generative_adversarial_network/'),
help="Directory to put the model's fit.")
flags.DEFINE_bool('fake_data',
default=None,
help='If true, uses fake data. Defaults to real data.')
FLAGS = flags.FLAGS
def build_input_pipeline(train_images, batch_size):
"""Build an iterator over training batches."""
training_dataset = tf.data.Dataset.from_tensor_slices(train_images)
training_batches = training_dataset.shuffle(
50000, reshuffle_each_iteration=True).repeat().batch(batch_size)
training_iterator = tf.compat.v1.data.make_one_shot_iterator(training_batches)
images = training_iterator.get_next()
return images
def build_fake_data(size):
"""Generate fake images of MNIST digits."""
# Generate random noise from a Gaussian distribution.
fake_images = np.random.normal(size=size)
return fake_images
@tfgfile_wrapper
def plot_generated_images(images, fname):
"""Save a synthetic image as a PNG file.
Args:
images: samples of synthetic images generated by the generative network.
fname: Python `str`, filename to save the plot to.
"""
fig = figure.Figure(figsize=(4, 4))
canvas = backend_agg.FigureCanvasAgg(fig)
for i, image in enumerate(images):
ax = fig.add_subplot(4, 4, i + 1)
ax.axis('off')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.imshow(image.reshape(IMAGE_SHAPE[:-1]), cmap='Greys_r')
fig.tight_layout()
fig.subplots_adjust(wspace=0.05, hspace=0.05)
canvas.print_figure(fname, format='png')
def main(argv):
del argv # unused
if tf.io.gfile.exists(FLAGS.model_dir):
tf.compat.v1.logging.warning(
'Warning: deleting old log directory at {}'.format(FLAGS.model_dir))
tf.io.gfile.rmtree(FLAGS.model_dir)
tf.io.gfile.makedirs(FLAGS.model_dir)
# Collapse the image data dimension for use with a fully-connected layer.
image_size = np.prod(IMAGE_SHAPE, dtype=np.int32)
if FLAGS.fake_data:
train_images = build_fake_data([10, image_size])
else:
mnist_data = mnist.read_data_sets(FLAGS.data_dir, reshape=image_size)
train_images = mnist_data.train.images
images = build_input_pipeline(train_images, FLAGS.batch_size)
# Build a Generative network. We use the Flipout Monte Carlo estimator
# for the fully-connected layers: this enables lower variance stochastic
# gradients than naive reparameterization.
with tf.compat.v1.name_scope('Generator'):
random_noise = tf.placeholder(tf.float64, shape=[None, FLAGS.hidden_size])
generative_net = tf.keras.Sequential([
tfp.layers.DenseFlipout(FLAGS.hidden_size, activation=tf.nn.relu),
tfp.layers.DenseFlipout(image_size, activation=tf.sigmoid)
])
sythetic_image = generative_net(random_noise)
# Build a Discriminative network. Define the model as a Bernoulli
# distribution parameterized by logits from a fully-connected layer.
with tf.compat.v1.name_scope('Discriminator'):
discriminative_net = tf.keras.Sequential([
tfp.layers.DenseFlipout(FLAGS.hidden_size, activation=tf.nn.relu),
tfp.layers.DenseFlipout(1)
])
logits_real = discriminative_net(images)
logits_fake = discriminative_net(sythetic_image)
labels_distribution_real = tfd.Bernoulli(logits=logits_real)
labels_distribution_fake = tfd.Bernoulli(logits=logits_fake)
# Compute the model loss for discrimator and generator, averaged over
# the batch size.
loss_real = -tf.reduce_mean(
input_tensor=labels_distribution_real.log_prob(
tf.ones_like(logits_real)))
loss_fake = -tf.reduce_mean(
input_tensor=labels_distribution_fake.log_prob(
tf.zeros_like(logits_fake)))
loss_discriminator = loss_real + loss_fake
loss_generator = -tf.reduce_mean(
input_tensor=labels_distribution_fake.log_prob(
tf.ones_like(logits_fake)))
with tf.compat.v1.name_scope('train'):
optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate)
train_op_discriminator = optimizer.minimize(
loss_discriminator,
var_list=tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES, scope='Discriminator'))
train_op_generator = optimizer.minimize(
loss_generator,
var_list=tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES, scope='Generator'))
with tf.compat.v1.Session() as sess:
sess.run(tf.global_variables_initializer())
for step in range(FLAGS.max_steps + 1):
# Iterate gradient updates on each network.
_, loss_value_d = sess.run([train_op_discriminator, loss_discriminator],
feed_dict={random_noise: build_fake_data(
[FLAGS.batch_size, FLAGS.hidden_size])})
_, loss_value_g = sess.run([train_op_generator, loss_generator],
feed_dict={random_noise: build_fake_data(
[FLAGS.batch_size, FLAGS.hidden_size])})
# Visualize some sythetic images produced by the generative network.
if step % FLAGS.viz_steps == 0:
images = sess.run(sythetic_image,
feed_dict={random_noise: build_fake_data(
[16, FLAGS.hidden_size])})
plot_generated_images(images, fname=os.path.join(
FLAGS.model_dir,
'step{:06d}_images.png'.format(step)))
print('Step: {:>3d} Loss_discriminator: {:.3f} '
'Loss_generator: {:.3f}'.format(step, loss_value_d, loss_value_g))
if __name__ == '__main__':
tf.compat.v1.app.run()
| |
#!/usr/bin/env python2.7
# -*- mode: python; coding: utf-8; -*-
"""Module for generating lexicon using Velikovich's method (2010).
"""
##################################################################
# Imports
from __future__ import unicode_literals, print_function
from datetime import datetime
from lasagne.init import HeUniform, Orthogonal
from sklearn.model_selection import train_test_split
from theano import tensor as TT
import codecs
import numpy as np
import re
import sys
import theano
from common import ENCODING, EPSILON, FMAX, FMIN, MAX_EPOCHS, MIN_EPOCHS, \
NONMATCH_RE, NEGATIVE_IDX, NEUTRAL_IDX, POSITIVE_IDX, \
floatX, sgd_updates_adadelta
from common import POSITIVE as POSITIVE_LBL
from common import NEGATIVE as NEGATIVE_LBL
from germanet import normalize
##################################################################
# Constants
SPACE_RE = re.compile(r"\s+")
ORTHOGONAL = Orthogonal()
HE_UNIFORM = HeUniform()
##################################################################
# Methods
def digitize_trainset(w2i, a_pos, a_neg, a_neut, a_pos_re, a_neg_re):
"""Method for generating sentiment lexicons using Velikovich's approach.
@param a_N - number of terms to extract
@param a_emb_fname - files of the original corpus
@param a_pos - initial set of positive terms to be expanded
@param a_neg - initial set of negative terms to be expanded
@param a_neut - initial set of neutral terms to be expanded
@param a_pos_re - regular expression for matching positive terms
@param a_neg_re - regular expression for matching negative terms
@return list of terms sorted according to their polarities
"""
X = []
Y = []
def add_seeds(seeds, label):
for iterm in seeds:
iterm = normalize(iterm)
if iterm in w2i:
X.append(w2i[iterm])
Y.append(label)
add_seeds(a_pos, POSITIVE_IDX)
add_seeds(a_neg, NEGATIVE_IDX)
add_seeds(a_neut, NEUTRAL_IDX)
for iterm, idx in w2i.iteritems():
if a_pos_re.match(iterm):
X.append(idx)
Y.append(POSITIVE_IDX)
elif a_neg_re.match(iterm):
X.append(idx)
Y.append(NEGATIVE_IDX)
return (np.asarray(X, dtype="int32"),
np.asarray(Y, dtype="int32"))
def read_embeddings(fname, encoding):
"""Read embeddings from file and populate an mebedding matrix.
Args:
fname (str): name of the embedding file
encoding (str): file encoding
Returns:
3-tuple(dict, theano.shared, int): embedding matrix, vector
dimensionality
"""
i = 0
w2i = {}
EMBS = None
with codecs.open(fname, 'r', encoding=encoding, errors="replace") as ifile:
for iline in ifile:
iline = iline.strip()
if EMBS is None:
nterms, ndim = [int(t) for t in SPACE_RE.split(iline)]
EMBS = floatX(np.empty((nterms, ndim)))
continue
fields = SPACE_RE.split(iline)
try:
EMBS[i] = np.array([float(f) for f in fields[-ndim:]])
w2i[' '.join(fields[:-ndim])] = i
except:
print("Invalid line format: {!r}".format(iline),
file=sys.stderr)
continue
i += 1
EMBS = theano.shared(value=EMBS, name="EMBS")
return (w2i, EMBS, ndim)
def init_nnet(W, n_classes, vec_dim):
"""Initialize neural network.
Args:
W (theano.shared): embedding matrix
n_classes: number of classes to be predicted
vec_dim: dimensionality of the embeddings
"""
w_idx = TT.iscalar(name="w_idx")
y_gold = TT.iscalar(name="y_gold")
embs = W[w_idx]
Theta = theano.shared(value=ORTHOGONAL.sample((n_classes, vec_dim)),
name="Theta")
beta = theano.shared(value=HE_UNIFORM.sample((1, n_classes)), name="beta")
y_probs = TT.nnet.softmax(TT.dot(Theta, embs.T).flatten() + beta).flatten()
params = [Theta]
cost = -TT.mean(TT.log(y_probs[y_gold]))
updates = sgd_updates_adadelta(params, cost)
train = theano.function([w_idx, y_gold], cost, updates=updates)
y_pred = TT.argmax(y_probs)
y_score = y_probs[y_pred]
predict = theano.function([w_idx], (y_pred, y_score))
acc = TT.eq(y_gold, y_pred)
validate = theano.function([w_idx, y_gold], acc)
return (train, validate, predict, params)
def tang(a_N, a_emb_fname, a_pos, a_neg, a_neut,
a_pos_re=NONMATCH_RE, a_neg_re=NONMATCH_RE,
a_encoding=ENCODING):
"""Method for generating sentiment lexicons using Velikovich's approach.
@param a_N - number of terms to extract
@param a_emb_fname - files of the original corpus
@param a_pos - initial set of positive terms to be expanded
@param a_neg - initial set of negative terms to be expanded
@param a_neut - initial set of neutral terms to be expanded
@param a_pos_re - regular expression for matching positive terms
@param a_neg_re - regular expression for matching negative terms
@param a_neg_re - regular expression for matching negative terms
@param a_encoding - encoding of the vector file
@return list of terms sorted according to their polarities
"""
w2i, EMBS, ndim = read_embeddings(a_emb_fname, a_encoding)
X, Y = digitize_trainset(w2i, a_pos, a_neg, a_neut,
a_pos_re, a_neg_re)
train, validate, predict, params = init_nnet(EMBS,
len(set(Y)), ndim)
best_params = []
best_acc = acc = -1
N = len(Y)
train_idcs, devtest_idcs = train_test_split(np.arange(N),
test_size=0.1)
devtest_N = float(len(devtest_idcs))
devtest_X = X[devtest_idcs]
devtest_Y = Y[devtest_idcs]
# train
epoch_i = 0
prev_cost = 0
while epoch_i < MAX_EPOCHS:
np.random.shuffle(train_idcs)
cost = 0.
start_time = datetime.utcnow()
for idx in train_idcs:
x_i, y_i = X[idx], Y[idx]
cost += train(x_i, y_i)
acc = 0.
for x_i, y_i in zip(devtest_X, devtest_Y):
acc += validate(x_i, y_i)
acc /= devtest_N
if acc >= best_acc:
best_params = [p.get_value() for p in params]
best_acc = acc
sfx = " *"
else:
sfx = ''
end_time = datetime.utcnow()
tdelta = (end_time - start_time).total_seconds()
print("Iteration #{:d} ({:.2f} sec): cost = {:.2f}, "
"accuracy = {:.2%};{:s}".format(epoch_i, tdelta,
cost, acc, sfx),
file=sys.stderr)
if abs(prev_cost - cost) < EPSILON and epoch_i > MIN_EPOCHS:
break
else:
prev_cost = cost
epoch_i += 1
if best_params:
for p, val in zip(params, best_params):
p.set_value(val)
# apply trained classifier to unseen data
ret = []
for w, w_idx in w2i.iteritems():
if normalize(w) in a_pos or a_pos_re.match(w):
pol_cls = POSITIVE_LBL
pol_score = FMAX
elif normalize(w) in a_neg or a_neg_re.match(w):
pol_cls = NEGATIVE_LBL
pol_score = FMIN
else:
pol_idx, pol_score = predict(w_idx)
pol_score = pol_score.item(0)
if pol_idx == POSITIVE_IDX:
pol_cls = POSITIVE_LBL
elif pol_idx == NEGATIVE_IDX:
pol_cls = NEGATIVE_LBL
else:
continue
ret.append((w, pol_cls, pol_score))
ret.sort(key=lambda el: abs(el[-1]), reverse=True)
return ret
| |
# -*- coding:utf-8 -*-
__author__ = 'Randolph'
import os
import sys
import time
import logging
sys.path.append('../')
logging.getLogger('tensorflow').disabled = True
import numpy as np
import tensorflow as tf
from tensorboard.plugins import projector
from text_ann import TextANN
from utils import checkmate as cm
from utils import data_helpers as dh
from utils import param_parser as parser
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, roc_auc_score
args = parser.parameter_parser()
OPTION = dh._option(pattern=0)
logger = dh.logger_fn("tflog", "logs/{0}-{1}.log".format('Train' if OPTION == 'T' else 'Restore', time.asctime()))
def create_input_data(data: dict):
return zip(data['f_pad_seqs'], data['b_pad_seqs'], data['onehot_labels'])
def train_ann():
"""Training ANN model."""
# Print parameters used for the model
dh.tab_printer(args, logger)
# Load word2vec model
word2idx, embedding_matrix = dh.load_word2vec_matrix(args.word2vec_file)
# Load sentences, labels, and training parameters
logger.info("Loading data...")
logger.info("Data processing...")
train_data = dh.load_data_and_labels(args, args.train_file, word2idx)
val_data = dh.load_data_and_labels(args, args.validation_file, word2idx)
# Build a graph and ann object
with tf.Graph().as_default():
session_conf = tf.ConfigProto(
allow_soft_placement=args.allow_soft_placement,
log_device_placement=args.log_device_placement)
session_conf.gpu_options.allow_growth = args.gpu_options_allow_growth
sess = tf.Session(config=session_conf)
with sess.as_default():
ann = TextANN(
sequence_length=args.pad_seq_len,
vocab_size=len(word2idx),
embedding_type=args.embedding_type,
embedding_size=args.embedding_dim,
fc_hidden_size=args.fc_dim,
num_classes=args.num_classes,
l2_reg_lambda=args.l2_lambda,
pretrained_embedding=embedding_matrix)
# Define training procedure
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
learning_rate = tf.train.exponential_decay(learning_rate=args.learning_rate,
global_step=ann.global_step,
decay_steps=args.decay_steps,
decay_rate=args.decay_rate,
staircase=True)
optimizer = tf.train.AdamOptimizer(learning_rate)
grads, vars = zip(*optimizer.compute_gradients(ann.loss))
grads, _ = tf.clip_by_global_norm(grads, clip_norm=args.norm_ratio)
train_op = optimizer.apply_gradients(zip(grads, vars), global_step=ann.global_step, name="train_op")
# Keep track of gradient values and sparsity (optional)
grad_summaries = []
for g, v in zip(grads, vars):
if g is not None:
grad_hist_summary = tf.summary.histogram("{0}/grad/hist".format(v.name), g)
sparsity_summary = tf.summary.scalar("{0}/grad/sparsity".format(v.name), tf.nn.zero_fraction(g))
grad_summaries.append(grad_hist_summary)
grad_summaries.append(sparsity_summary)
grad_summaries_merged = tf.summary.merge(grad_summaries)
# Output directory for models and summaries
out_dir = dh.get_out_dir(OPTION, logger)
checkpoint_dir = os.path.abspath(os.path.join(out_dir, "checkpoints"))
best_checkpoint_dir = os.path.abspath(os.path.join(out_dir, "bestcheckpoints"))
# Summaries for loss
loss_summary = tf.summary.scalar("loss", ann.loss)
# Train summaries
train_summary_op = tf.summary.merge([loss_summary, grad_summaries_merged])
train_summary_dir = os.path.join(out_dir, "summaries", "train")
train_summary_writer = tf.summary.FileWriter(train_summary_dir, sess.graph)
# Validation summaries
validation_summary_op = tf.summary.merge([loss_summary])
validation_summary_dir = os.path.join(out_dir, "summaries", "validation")
validation_summary_writer = tf.summary.FileWriter(validation_summary_dir, sess.graph)
saver = tf.train.Saver(tf.global_variables(), max_to_keep=args.num_checkpoints)
best_saver = cm.BestCheckpointSaver(save_dir=best_checkpoint_dir, num_to_keep=3, maximize=True)
if OPTION == 'R':
# Load ann model
logger.info("Loading model...")
checkpoint_file = tf.train.latest_checkpoint(checkpoint_dir)
logger.info(checkpoint_file)
# Load the saved meta graph and restore variables
saver = tf.train.import_meta_graph("{0}.meta".format(checkpoint_file))
saver.restore(sess, checkpoint_file)
if OPTION == 'T':
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
# Embedding visualization config
config = projector.ProjectorConfig()
embedding_conf = config.embeddings.add()
embedding_conf.tensor_name = "embedding"
embedding_conf.metadata_path = args.metadata_file
projector.visualize_embeddings(train_summary_writer, config)
projector.visualize_embeddings(validation_summary_writer, config)
# Save the embedding visualization
saver.save(sess, os.path.join(out_dir, "embedding", "embedding.ckpt"))
current_step = sess.run(ann.global_step)
def train_step(batch_data):
"""A single training step."""
x_f, x_b, y_onehot = zip(*batch_data)
feed_dict = {
ann.input_x_front: x_f,
ann.input_x_behind: x_b,
ann.input_y: y_onehot,
ann.dropout_keep_prob: args.dropout_rate,
ann.is_training: True
}
_, step, summaries, loss = sess.run(
[train_op, ann.global_step, train_summary_op, ann.loss], feed_dict)
logger.info("step {0}: loss {1:g}".format(step, loss))
train_summary_writer.add_summary(summaries, step)
def validation_step(val_loader, writer=None):
"""Evaluates model on a validation set."""
batches_validation = dh.batch_iter(list(create_input_data(val_loader)), args.batch_size, 1)
eval_counter, eval_loss = 0, 0.0
true_labels = []
predicted_scores = []
predicted_labels = []
for batch_validation in batches_validation:
x_f, x_b, y_onehot = zip(*batch_validation)
feed_dict = {
ann.input_x_front: x_f,
ann.input_x_behind: x_b,
ann.input_y: y_onehot,
ann.dropout_keep_prob: 1.0,
ann.is_training: False
}
step, summaries, predictions, cur_loss = sess.run(
[ann.global_step, validation_summary_op, ann.topKPreds, ann.loss], feed_dict)
# Prepare for calculating metrics
for i in y_onehot:
true_labels.append(np.argmax(i))
for j in predictions[0]:
predicted_scores.append(j[0])
for k in predictions[1]:
predicted_labels.append(k[0])
eval_loss = eval_loss + cur_loss
eval_counter = eval_counter + 1
if writer:
writer.add_summary(summaries, step)
eval_loss = float(eval_loss / eval_counter)
# Calculate Precision & Recall & F1
eval_acc = accuracy_score(y_true=np.array(true_labels), y_pred=np.array(predicted_labels))
eval_pre = precision_score(y_true=np.array(true_labels),
y_pred=np.array(predicted_labels), average='micro')
eval_rec = recall_score(y_true=np.array(true_labels),
y_pred=np.array(predicted_labels), average='micro')
eval_F1 = f1_score(y_true=np.array(true_labels),
y_pred=np.array(predicted_labels), average='micro')
# Calculate the average AUC
eval_auc = roc_auc_score(y_true=np.array(true_labels),
y_score=np.array(predicted_scores), average='micro')
return eval_loss, eval_acc, eval_pre, eval_rec, eval_F1, eval_auc
# Generate batches
batches_train = dh.batch_iter(list(create_input_data(train_data)), args.batch_size, args.epochs)
num_batches_per_epoch = int((len(train_data['f_pad_seqs']) - 1) / args.batch_size) + 1
# Training loop. For each batch...
for batch_train in batches_train:
train_step(batch_train)
current_step = tf.train.global_step(sess, ann.global_step)
if current_step % args.evaluate_steps == 0:
logger.info("\nEvaluation:")
eval_loss, eval_acc, eval_pre, eval_rec, eval_F1, eval_auc = \
validation_step(val_data, writer=validation_summary_writer)
logger.info("All Validation set: Loss {0:g} | Acc {1:g} | Precision {2:g} | "
"Recall {3:g} | F1 {4:g} | AUC {5:g}"
.format(eval_loss, eval_acc, eval_pre, eval_rec, eval_F1, eval_auc))
best_saver.handle(eval_acc, sess, current_step)
if current_step % args.checkpoint_steps == 0:
checkpoint_prefix = os.path.join(checkpoint_dir, "model")
path = saver.save(sess, checkpoint_prefix, global_step=current_step)
logger.info("Saved model checkpoint to {0}\n".format(path))
if current_step % num_batches_per_epoch == 0:
current_epoch = current_step // num_batches_per_epoch
logger.info("Epoch {0} has finished!".format(current_epoch))
logger.info("All Done.")
if __name__ == '__main__':
train_ann()
| |
#!/usr/bin/env python
#
# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Core methods for checking EcmaScript files for common style guide violations.
"""
__author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)',
'jacobr@google.com (Jacob Richman)')
import re
import gflags as flags
from closure_linter import checkerbase
from closure_linter import ecmametadatapass
from closure_linter import error_check
from closure_linter import errorrules
from closure_linter import errors
from closure_linter import indentation
from closure_linter import javascripttokenizer
from closure_linter import javascripttokens
from closure_linter import statetracker
from closure_linter import tokenutil
from closure_linter.common import error
from closure_linter.common import position
FLAGS = flags.FLAGS
flags.DEFINE_list('custom_jsdoc_tags', '', 'Extra jsdoc tags to allow')
# TODO(user): When flipping this to True, remove logic from unit tests
# that overrides this flag.
flags.DEFINE_boolean('dot_on_next_line', False, 'Require dots to be'
'placed on the next line for wrapped expressions')
# TODO(robbyw): Check for extra parens on return statements
# TODO(robbyw): Check for 0px in strings
# TODO(robbyw): Ensure inline jsDoc is in {}
# TODO(robbyw): Check for valid JS types in parameter docs
# Shorthand
Context = ecmametadatapass.EcmaContext
Error = error.Error
Modes = javascripttokenizer.JavaScriptModes
Position = position.Position
Rule = error_check.Rule
Type = javascripttokens.JavaScriptTokenType
class EcmaScriptLintRules(checkerbase.LintRulesBase):
"""EmcaScript lint style checking rules.
Can be used to find common style errors in JavaScript, ActionScript and other
Ecma like scripting languages. Style checkers for Ecma scripting languages
should inherit from this style checker.
Please do not add any state to EcmaScriptLintRules or to any subclasses.
All state should be added to the StateTracker subclass used for a particular
language.
"""
# It will be initialized in constructor so the flags are initialized.
max_line_length = -1
# Static constants.
MISSING_PARAMETER_SPACE = re.compile(r',\S')
EXTRA_SPACE = re.compile(r'(\(\s|\s\))')
ENDS_WITH_SPACE = re.compile(r'\s$')
ILLEGAL_TAB = re.compile(r'\t')
# Regex used to split up complex types to check for invalid use of ? and |.
TYPE_SPLIT = re.compile(r'[,<>()]')
# Regex for form of author lines after the @author tag.
AUTHOR_SPEC = re.compile(r'(\s*)[^\s]+@[^(\s]+(\s*)\(.+\)')
# Acceptable tokens to remove for line too long testing.
LONG_LINE_IGNORE = frozenset(
['*', '//', '@see'] +
['@%s' % tag for tag in statetracker.DocFlag.HAS_TYPE])
JSDOC_FLAGS_DESCRIPTION_NOT_REQUIRED = frozenset([
'@fileoverview', '@param', '@return', '@returns'])
def __init__(self):
"""Initialize this lint rule object."""
checkerbase.LintRulesBase.__init__(self)
if EcmaScriptLintRules.max_line_length == -1:
EcmaScriptLintRules.max_line_length = errorrules.GetMaxLineLength()
def Initialize(self, checker, limited_doc_checks, is_html):
"""Initialize this lint rule object before parsing a new file."""
checkerbase.LintRulesBase.Initialize(self, checker, limited_doc_checks,
is_html)
self._indentation = indentation.IndentationRules()
def HandleMissingParameterDoc(self, token, param_name):
"""Handle errors associated with a parameter missing a @param tag."""
raise TypeError('Abstract method HandleMissingParameterDoc not implemented')
def _CheckLineLength(self, last_token, state):
"""Checks whether the line is too long.
Args:
last_token: The last token in the line.
state: parser_state object that indicates the current state in the page
"""
# Start from the last token so that we have the flag object attached to
# and DOC_FLAG tokens.
line_number = last_token.line_number
token = last_token
# Build a representation of the string where spaces indicate potential
# line-break locations.
line = []
while token and token.line_number == line_number:
if state.IsTypeToken(token):
line.insert(0, 'x' * len(token.string))
elif token.type in (Type.IDENTIFIER, Type.OPERATOR):
# Dots are acceptable places to wrap (may be tokenized as identifiers).
line.insert(0, token.string.replace('.', ' '))
else:
line.insert(0, token.string)
token = token.previous
line = ''.join(line)
line = line.rstrip('\n\r\f')
try:
length = len(unicode(line, 'utf-8'))
except (LookupError, UnicodeDecodeError):
# Unknown encoding. The line length may be wrong, as was originally the
# case for utf-8 (see bug 1735846). For now just accept the default
# length, but as we find problems we can either add test for other
# possible encodings or return without an error to protect against
# false positives at the cost of more false negatives.
length = len(line)
if length > EcmaScriptLintRules.max_line_length:
# If the line matches one of the exceptions, then it's ok.
for long_line_regexp in self.GetLongLineExceptions():
if long_line_regexp.match(last_token.line):
return
# If the line consists of only one "word", or multiple words but all
# except one are ignoreable, then it's ok.
parts = set(line.split())
# We allow two "words" (type and name) when the line contains @param
max_parts = 1
if '@param' in parts:
max_parts = 2
# Custom tags like @requires may have url like descriptions, so ignore
# the tag, similar to how we handle @see.
custom_tags = set(['@%s' % f for f in FLAGS.custom_jsdoc_tags])
if (len(parts.difference(self.LONG_LINE_IGNORE | custom_tags))
> max_parts):
self._HandleError(
errors.LINE_TOO_LONG,
'Line too long (%d characters).' % len(line), last_token)
def _CheckJsDocType(self, token, js_type):
"""Checks the given type for style errors.
Args:
token: The DOC_FLAG token for the flag whose type to check.
js_type: The flag's typeannotation.TypeAnnotation instance.
"""
if not js_type: return
if js_type.type_group and len(js_type.sub_types) == 2:
identifiers = [t.identifier for t in js_type.sub_types]
if 'null' in identifiers:
# Don't warn if the identifier is a template type (e.g. {TYPE|null}.
if not identifiers[0].isupper() and not identifiers[1].isupper():
self._HandleError(
errors.JSDOC_PREFER_QUESTION_TO_PIPE_NULL,
'Prefer "?Type" to "Type|null": "%s"' % js_type, token)
# TODO(user): We should report an error for wrong usage of '?' and '|'
# e.g. {?number|string|null} etc.
for sub_type in js_type.IterTypes():
self._CheckJsDocType(token, sub_type)
def _CheckForMissingSpaceBeforeToken(self, token):
"""Checks for a missing space at the beginning of a token.
Reports a MISSING_SPACE error if the token does not begin with a space or
the previous token doesn't end with a space and the previous token is on the
same line as the token.
Args:
token: The token being checked
"""
# TODO(user): Check if too many spaces?
if (len(token.string) == len(token.string.lstrip()) and
token.previous and token.line_number == token.previous.line_number and
len(token.previous.string) - len(token.previous.string.rstrip()) == 0):
self._HandleError(
errors.MISSING_SPACE,
'Missing space before "%s"' % token.string,
token,
position=Position.AtBeginning())
def _CheckOperator(self, token):
"""Checks an operator for spacing and line style.
Args:
token: The operator token.
"""
last_code = token.metadata.last_code
if not self._ExpectSpaceBeforeOperator(token):
if (token.previous and token.previous.type == Type.WHITESPACE and
last_code and last_code.type in (Type.NORMAL, Type.IDENTIFIER) and
last_code.line_number == token.line_number):
self._HandleError(
errors.EXTRA_SPACE, 'Extra space before "%s"' % token.string,
token.previous, position=Position.All(token.previous.string))
elif (token.previous and
not token.previous.IsComment() and
not tokenutil.IsDot(token) and
token.previous.type in Type.EXPRESSION_ENDER_TYPES):
self._HandleError(errors.MISSING_SPACE,
'Missing space before "%s"' % token.string, token,
position=Position.AtBeginning())
# Check wrapping of operators.
next_code = tokenutil.GetNextCodeToken(token)
is_dot = tokenutil.IsDot(token)
wrapped_before = last_code and last_code.line_number != token.line_number
wrapped_after = next_code and next_code.line_number != token.line_number
if FLAGS.dot_on_next_line and is_dot and wrapped_after:
self._HandleError(
errors.LINE_ENDS_WITH_DOT,
'"." must go on the following line',
token)
if (not is_dot and wrapped_before and
not token.metadata.IsUnaryOperator()):
self._HandleError(
errors.LINE_STARTS_WITH_OPERATOR,
'Binary operator must go on previous line "%s"' % token.string,
token)
def _IsLabel(self, token):
# A ':' token is considered part of a label if it occurs in a case
# statement, a plain label, or an object literal, i.e. is not part of a
# ternary.
return (token.string == ':' and
token.metadata.context.type in (Context.LITERAL_ELEMENT,
Context.CASE_BLOCK,
Context.STATEMENT))
def _ExpectSpaceBeforeOperator(self, token):
"""Returns whether a space should appear before the given operator token.
Args:
token: The operator token.
Returns:
Whether there should be a space before the token.
"""
if token.string == ',' or token.metadata.IsUnaryPostOperator():
return False
if tokenutil.IsDot(token):
return False
# Colons should appear in labels, object literals, the case of a switch
# statement, and ternary operator. Only want a space in the case of the
# ternary operator.
if self._IsLabel(token):
return False
if token.metadata.IsUnaryOperator() and token.IsFirstInLine():
return False
return True
def CheckToken(self, token, state):
"""Checks a token, given the current parser_state, for warnings and errors.
Args:
token: The current token under consideration
state: parser_state object that indicates the current state in the page
"""
# Store some convenience variables
first_in_line = token.IsFirstInLine()
last_in_line = token.IsLastInLine()
last_non_space_token = state.GetLastNonSpaceToken()
token_type = token.type
# Process the line change.
if not self._is_html and error_check.ShouldCheck(Rule.INDENTATION):
# TODO(robbyw): Support checking indentation in HTML files.
indentation_errors = self._indentation.CheckToken(token, state)
for indentation_error in indentation_errors:
self._HandleError(*indentation_error)
if last_in_line:
self._CheckLineLength(token, state)
if token_type == Type.PARAMETERS:
# Find missing spaces in parameter lists.
if self.MISSING_PARAMETER_SPACE.search(token.string):
fix_data = ', '.join([s.strip() for s in token.string.split(',')])
self._HandleError(errors.MISSING_SPACE, 'Missing space after ","',
token, position=None, fix_data=fix_data.strip())
# Find extra spaces at the beginning of parameter lists. Make sure
# we aren't at the beginning of a continuing multi-line list.
if not first_in_line:
space_count = len(token.string) - len(token.string.lstrip())
if space_count:
self._HandleError(errors.EXTRA_SPACE, 'Extra space after "("',
token, position=Position(0, space_count))
elif (token_type == Type.START_BLOCK and
token.metadata.context.type == Context.BLOCK):
self._CheckForMissingSpaceBeforeToken(token)
elif token_type == Type.END_BLOCK:
last_code = token.metadata.last_code
if state.InFunction() and state.IsFunctionClose():
if state.InTopLevelFunction():
# A semicolons should not be included at the end of a function
# declaration.
if not state.InAssignedFunction():
if not last_in_line and token.next.type == Type.SEMICOLON:
self._HandleError(
errors.ILLEGAL_SEMICOLON_AFTER_FUNCTION,
'Illegal semicolon after function declaration',
token.next, position=Position.All(token.next.string))
# A semicolon should be included at the end of a function expression
# that is not immediately called or used by a dot operator.
if (state.InAssignedFunction() and token.next
and token.next.type != Type.SEMICOLON):
next_token = tokenutil.GetNextCodeToken(token)
is_immediately_used = (next_token.type == Type.START_PAREN or
tokenutil.IsDot(next_token))
if not is_immediately_used:
self._HandleError(
errors.MISSING_SEMICOLON_AFTER_FUNCTION,
'Missing semicolon after function assigned to a variable',
token, position=Position.AtEnd(token.string))
if state.InInterfaceMethod() and last_code.type != Type.START_BLOCK:
self._HandleError(errors.INTERFACE_METHOD_CANNOT_HAVE_CODE,
'Interface methods cannot contain code', last_code)
elif (state.IsBlockClose() and
token.next and token.next.type == Type.SEMICOLON):
if (last_code.metadata.context.parent.type != Context.OBJECT_LITERAL
and last_code.metadata.context.type != Context.OBJECT_LITERAL):
self._HandleError(
errors.REDUNDANT_SEMICOLON,
'No semicolon is required to end a code block',
token.next, position=Position.All(token.next.string))
elif token_type == Type.SEMICOLON:
if token.previous and token.previous.type == Type.WHITESPACE:
self._HandleError(
errors.EXTRA_SPACE, 'Extra space before ";"',
token.previous, position=Position.All(token.previous.string))
if token.next and token.next.line_number == token.line_number:
if token.metadata.context.type != Context.FOR_GROUP_BLOCK:
# TODO(robbyw): Error about no multi-statement lines.
pass
elif token.next.type not in (
Type.WHITESPACE, Type.SEMICOLON, Type.END_PAREN):
self._HandleError(
errors.MISSING_SPACE,
'Missing space after ";" in for statement',
token.next,
position=Position.AtBeginning())
last_code = token.metadata.last_code
if last_code and last_code.type == Type.SEMICOLON:
# Allow a single double semi colon in for loops for cases like:
# for (;;) { }.
# NOTE(user): This is not a perfect check, and will not throw an error
# for cases like: for (var i = 0;; i < n; i++) {}, but then your code
# probably won't work either.
for_token = tokenutil.CustomSearch(
last_code,
lambda token: token.type == Type.KEYWORD and token.string == 'for',
end_func=lambda token: token.type == Type.SEMICOLON,
distance=None,
reverse=True)
if not for_token:
self._HandleError(errors.REDUNDANT_SEMICOLON, 'Redundant semicolon',
token, position=Position.All(token.string))
elif token_type == Type.START_PAREN:
# Ensure that opening parentheses have a space before any keyword
# that is not being invoked like a member function.
if (token.previous and token.previous.type == Type.KEYWORD and
(not token.previous.metadata or
not token.previous.metadata.last_code or
not token.previous.metadata.last_code.string or
token.previous.metadata.last_code.string[-1:] != '.')):
self._HandleError(errors.MISSING_SPACE, 'Missing space before "("',
token, position=Position.AtBeginning())
elif token.previous and token.previous.type == Type.WHITESPACE:
before_space = token.previous.previous
# Ensure that there is no extra space before a function invocation,
# even if the function being invoked happens to be a keyword.
if (before_space and before_space.line_number == token.line_number and
before_space.type == Type.IDENTIFIER or
(before_space.type == Type.KEYWORD and before_space.metadata and
before_space.metadata.last_code and
before_space.metadata.last_code.string and
before_space.metadata.last_code.string[-1:] == '.')):
self._HandleError(
errors.EXTRA_SPACE, 'Extra space before "("',
token.previous, position=Position.All(token.previous.string))
elif token_type == Type.START_BRACKET:
self._HandleStartBracket(token, last_non_space_token)
elif token_type in (Type.END_PAREN, Type.END_BRACKET):
# Ensure there is no space before closing parentheses, except when
# it's in a for statement with an omitted section, or when it's at the
# beginning of a line.
if (token.previous and token.previous.type == Type.WHITESPACE and
not token.previous.IsFirstInLine() and
not (last_non_space_token and last_non_space_token.line_number ==
token.line_number and
last_non_space_token.type == Type.SEMICOLON)):
self._HandleError(
errors.EXTRA_SPACE, 'Extra space before "%s"' %
token.string, token.previous,
position=Position.All(token.previous.string))
elif token_type == Type.WHITESPACE:
if self.ILLEGAL_TAB.search(token.string):
if token.IsFirstInLine():
if token.next:
self._HandleError(
errors.ILLEGAL_TAB,
'Illegal tab in whitespace before "%s"' % token.next.string,
token, position=Position.All(token.string))
else:
self._HandleError(
errors.ILLEGAL_TAB,
'Illegal tab in whitespace',
token, position=Position.All(token.string))
else:
self._HandleError(
errors.ILLEGAL_TAB,
'Illegal tab in whitespace after "%s"' % token.previous.string,
token, position=Position.All(token.string))
# Check whitespace length if it's not the first token of the line and
# if it's not immediately before a comment.
if last_in_line:
# Check for extra whitespace at the end of a line.
self._HandleError(errors.EXTRA_SPACE, 'Extra space at end of line',
token, position=Position.All(token.string))
elif not first_in_line and not token.next.IsComment():
if token.length > 1:
self._HandleError(
errors.EXTRA_SPACE, 'Extra space after "%s"' %
token.previous.string, token,
position=Position(1, len(token.string) - 1))
elif token_type == Type.OPERATOR:
self._CheckOperator(token)
elif token_type == Type.DOC_FLAG:
flag = token.attached_object
if flag.flag_type == 'bug':
# TODO(robbyw): Check for exactly 1 space on the left.
string = token.next.string.lstrip()
string = string.split(' ', 1)[0]
if not string.isdigit():
self._HandleError(errors.NO_BUG_NUMBER_AFTER_BUG_TAG,
'@bug should be followed by a bug number', token)
elif flag.flag_type == 'suppress':
if flag.type is None:
# A syntactically invalid suppress tag will get tokenized as a normal
# flag, indicating an error.
self._HandleError(
errors.INCORRECT_SUPPRESS_SYNTAX,
'Invalid suppress syntax: should be @suppress {errortype}. '
'Spaces matter.', token)
else:
for suppress_type in flag.jstype.IterIdentifiers():
if suppress_type not in state.GetDocFlag().SUPPRESS_TYPES:
self._HandleError(
errors.INVALID_SUPPRESS_TYPE,
'Invalid suppression type: %s' % suppress_type, token)
elif (error_check.ShouldCheck(Rule.WELL_FORMED_AUTHOR) and
flag.flag_type == 'author'):
# TODO(user): In non strict mode check the author tag for as much as
# it exists, though the full form checked below isn't required.
string = token.next.string
result = self.AUTHOR_SPEC.match(string)
if not result:
self._HandleError(errors.INVALID_AUTHOR_TAG_DESCRIPTION,
'Author tag line should be of the form: '
'@author foo@somewhere.com (Your Name)',
token.next)
else:
# Check spacing between email address and name. Do this before
# checking earlier spacing so positions are easier to calculate for
# autofixing.
num_spaces = len(result.group(2))
if num_spaces < 1:
self._HandleError(errors.MISSING_SPACE,
'Missing space after email address',
token.next, position=Position(result.start(2), 0))
elif num_spaces > 1:
self._HandleError(
errors.EXTRA_SPACE, 'Extra space after email address',
token.next,
position=Position(result.start(2) + 1, num_spaces - 1))
# Check for extra spaces before email address. Can't be too few, if
# not at least one we wouldn't match @author tag.
num_spaces = len(result.group(1))
if num_spaces > 1:
self._HandleError(errors.EXTRA_SPACE,
'Extra space before email address',
token.next, position=Position(1, num_spaces - 1))
elif (flag.flag_type in state.GetDocFlag().HAS_DESCRIPTION and
not self._limited_doc_checks):
if flag.flag_type == 'param':
if flag.name is None:
self._HandleError(errors.MISSING_JSDOC_PARAM_NAME,
'Missing name in @param tag', token)
if not flag.description or flag.description is None:
flag_name = token.type
if 'name' in token.values:
flag_name = '@' + token.values['name']
if flag_name not in self.JSDOC_FLAGS_DESCRIPTION_NOT_REQUIRED:
self._HandleError(
errors.MISSING_JSDOC_TAG_DESCRIPTION,
'Missing description in %s tag' % flag_name, token)
else:
self._CheckForMissingSpaceBeforeToken(flag.description_start_token)
if flag.HasType():
if flag.type_start_token is not None:
self._CheckForMissingSpaceBeforeToken(
token.attached_object.type_start_token)
if flag.jstype and not flag.jstype.IsEmpty():
self._CheckJsDocType(token, flag.jstype)
if error_check.ShouldCheck(Rule.BRACES_AROUND_TYPE) and (
flag.type_start_token.type != Type.DOC_START_BRACE or
flag.type_end_token.type != Type.DOC_END_BRACE):
self._HandleError(
errors.MISSING_BRACES_AROUND_TYPE,
'Type must always be surrounded by curly braces.', token)
if token_type in (Type.DOC_FLAG, Type.DOC_INLINE_FLAG):
if (token.values['name'] not in state.GetDocFlag().LEGAL_DOC and
token.values['name'] not in FLAGS.custom_jsdoc_tags):
self._HandleError(
errors.INVALID_JSDOC_TAG,
'Invalid JsDoc tag: %s' % token.values['name'], token)
if (error_check.ShouldCheck(Rule.NO_BRACES_AROUND_INHERIT_DOC) and
token.values['name'] == 'inheritDoc' and
token_type == Type.DOC_INLINE_FLAG):
self._HandleError(errors.UNNECESSARY_BRACES_AROUND_INHERIT_DOC,
'Unnecessary braces around @inheritDoc',
token)
elif token_type == Type.SIMPLE_LVALUE:
identifier = token.values['identifier']
if ((not state.InFunction() or state.InConstructor()) and
state.InTopLevel() and not state.InObjectLiteralDescendant()):
jsdoc = state.GetDocComment()
if not state.HasDocComment(identifier):
# Only test for documentation on identifiers with .s in them to
# avoid checking things like simple variables. We don't require
# documenting assignments to .prototype itself (bug 1880803).
if (not state.InConstructor() and
identifier.find('.') != -1 and not
identifier.endswith('.prototype') and not
self._limited_doc_checks):
comment = state.GetLastComment()
if not (comment and comment.lower().count('jsdoc inherited')):
self._HandleError(
errors.MISSING_MEMBER_DOCUMENTATION,
"No docs found for member '%s'" % identifier,
token)
elif jsdoc and (not state.InConstructor() or
identifier.startswith('this.')):
# We are at the top level and the function/member is documented.
if identifier.endswith('_') and not identifier.endswith('__'):
# Can have a private class which inherits documentation from a
# public superclass.
#
# @inheritDoc is deprecated in favor of using @override, and they
if (jsdoc.HasFlag('override') and not jsdoc.HasFlag('constructor')
and ('accessControls' not in jsdoc.suppressions)):
self._HandleError(
errors.INVALID_OVERRIDE_PRIVATE,
'%s should not override a private member.' % identifier,
jsdoc.GetFlag('override').flag_token)
if (jsdoc.HasFlag('inheritDoc') and not jsdoc.HasFlag('constructor')
and ('accessControls' not in jsdoc.suppressions)):
self._HandleError(
errors.INVALID_INHERIT_DOC_PRIVATE,
'%s should not inherit from a private member.' % identifier,
jsdoc.GetFlag('inheritDoc').flag_token)
if (not jsdoc.HasFlag('private') and
('underscore' not in jsdoc.suppressions) and not
((jsdoc.HasFlag('inheritDoc') or jsdoc.HasFlag('override')) and
('accessControls' in jsdoc.suppressions))):
self._HandleError(
errors.MISSING_PRIVATE,
'Member "%s" must have @private JsDoc.' %
identifier, token)
if jsdoc.HasFlag('private') and 'underscore' in jsdoc.suppressions:
self._HandleError(
errors.UNNECESSARY_SUPPRESS,
'@suppress {underscore} is not necessary with @private',
jsdoc.suppressions['underscore'])
elif (jsdoc.HasFlag('private') and
not self.InExplicitlyTypedLanguage()):
# It is convention to hide public fields in some ECMA
# implementations from documentation using the @private tag.
self._HandleError(
errors.EXTRA_PRIVATE,
'Member "%s" must not have @private JsDoc' %
identifier, token)
# These flags are only legal on localizable message definitions;
# such variables always begin with the prefix MSG_.
for f in ('desc', 'hidden', 'meaning'):
if (jsdoc.HasFlag(f)
and not identifier.startswith('MSG_')
and identifier.find('.MSG_') == -1):
self._HandleError(
errors.INVALID_USE_OF_DESC_TAG,
'Member "%s" should not have @%s JsDoc' % (identifier, f),
token)
# Check for illegaly assigning live objects as prototype property values.
index = identifier.find('.prototype.')
# Ignore anything with additional .s after the prototype.
if index != -1 and identifier.find('.', index + 11) == -1:
equal_operator = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES)
next_code = tokenutil.SearchExcept(equal_operator, Type.NON_CODE_TYPES)
if next_code and (
next_code.type in (Type.START_BRACKET, Type.START_BLOCK) or
next_code.IsOperator('new')):
self._HandleError(
errors.ILLEGAL_PROTOTYPE_MEMBER_VALUE,
'Member %s cannot have a non-primitive value' % identifier,
token)
elif token_type == Type.END_PARAMETERS:
# Find extra space at the end of parameter lists. We check the token
# prior to the current one when it is a closing paren.
if (token.previous and token.previous.type == Type.PARAMETERS
and self.ENDS_WITH_SPACE.search(token.previous.string)):
self._HandleError(errors.EXTRA_SPACE, 'Extra space before ")"',
token.previous)
jsdoc = state.GetDocComment()
if state.GetFunction().is_interface:
if token.previous and token.previous.type == Type.PARAMETERS:
self._HandleError(
errors.INTERFACE_CONSTRUCTOR_CANNOT_HAVE_PARAMS,
'Interface constructor cannot have parameters',
token.previous)
elif (state.InTopLevel() and jsdoc and not jsdoc.HasFlag('see')
and not jsdoc.InheritsDocumentation()
and not state.InObjectLiteralDescendant() and not
jsdoc.IsInvalidated()):
distance, edit = jsdoc.CompareParameters(state.GetParams())
if distance:
params_iter = iter(state.GetParams())
docs_iter = iter(jsdoc.ordered_params)
for op in edit:
if op == 'I':
# Insertion.
# Parsing doc comments is the same for all languages
# but some languages care about parameters that don't have
# doc comments and some languages don't care.
# Languages that don't allow variables to by typed such as
# JavaScript care but languages such as ActionScript or Java
# that allow variables to be typed don't care.
if not self._limited_doc_checks:
self.HandleMissingParameterDoc(token, params_iter.next())
elif op == 'D':
# Deletion
self._HandleError(errors.EXTRA_PARAMETER_DOCUMENTATION,
'Found docs for non-existing parameter: "%s"' %
docs_iter.next(), token)
elif op == 'S':
# Substitution
if not self._limited_doc_checks:
self._HandleError(
errors.WRONG_PARAMETER_DOCUMENTATION,
'Parameter mismatch: got "%s", expected "%s"' %
(params_iter.next(), docs_iter.next()), token)
else:
# Equality - just advance the iterators
params_iter.next()
docs_iter.next()
elif token_type == Type.STRING_TEXT:
# If this is the first token after the start of the string, but it's at
# the end of a line, we know we have a multi-line string.
if token.previous.type in (
Type.SINGLE_QUOTE_STRING_START,
Type.DOUBLE_QUOTE_STRING_START) and last_in_line:
self._HandleError(errors.MULTI_LINE_STRING,
'Multi-line strings are not allowed', token)
# This check is orthogonal to the ones above, and repeats some types, so
# it is a plain if and not an elif.
if token.type in Type.COMMENT_TYPES:
if self.ILLEGAL_TAB.search(token.string):
self._HandleError(errors.ILLEGAL_TAB,
'Illegal tab in comment "%s"' % token.string, token)
trimmed = token.string.rstrip()
if last_in_line and token.string != trimmed:
# Check for extra whitespace at the end of a line.
self._HandleError(
errors.EXTRA_SPACE, 'Extra space at end of line', token,
position=Position(len(trimmed), len(token.string) - len(trimmed)))
# This check is also orthogonal since it is based on metadata.
if token.metadata.is_implied_semicolon:
self._HandleError(errors.MISSING_SEMICOLON,
'Missing semicolon at end of line', token)
def _HandleStartBracket(self, token, last_non_space_token):
"""Handles a token that is an open bracket.
Args:
token: The token to handle.
last_non_space_token: The last token that was not a space.
"""
if (not token.IsFirstInLine() and token.previous.type == Type.WHITESPACE and
last_non_space_token and
last_non_space_token.type in Type.EXPRESSION_ENDER_TYPES):
self._HandleError(
errors.EXTRA_SPACE, 'Extra space before "["',
token.previous, position=Position.All(token.previous.string))
# If the [ token is the first token in a line we shouldn't complain
# about a missing space before [. This is because some Ecma script
# languages allow syntax like:
# [Annotation]
# class MyClass {...}
# So we don't want to blindly warn about missing spaces before [.
# In the the future, when rules for computing exactly how many spaces
# lines should be indented are added, then we can return errors for
# [ tokens that are improperly indented.
# For example:
# var someVeryVeryVeryVeryVeryVeryVeryVeryVeryVeryVeryLongVariableName =
# [a,b,c];
# should trigger a proper indentation warning message as [ is not indented
# by four spaces.
elif (not token.IsFirstInLine() and token.previous and
token.previous.type not in (
[Type.WHITESPACE, Type.START_PAREN, Type.START_BRACKET] +
Type.EXPRESSION_ENDER_TYPES)):
self._HandleError(errors.MISSING_SPACE, 'Missing space before "["',
token, position=Position.AtBeginning())
def Finalize(self, state):
"""Perform all checks that need to occur after all lines are processed.
Args:
state: State of the parser after parsing all tokens
Raises:
TypeError: If not overridden.
"""
last_non_space_token = state.GetLastNonSpaceToken()
# Check last line for ending with newline.
if state.GetLastLine() and not (
state.GetLastLine().isspace() or
state.GetLastLine().rstrip('\n\r\f') != state.GetLastLine()):
self._HandleError(
errors.FILE_MISSING_NEWLINE,
'File does not end with new line. (%s)' % state.GetLastLine(),
last_non_space_token)
try:
self._indentation.Finalize()
except Exception, e:
self._HandleError(
errors.FILE_DOES_NOT_PARSE,
str(e),
last_non_space_token)
def GetLongLineExceptions(self):
"""Gets a list of regexps for lines which can be longer than the limit.
Returns:
A list of regexps, used as matches (rather than searches).
"""
return []
def InExplicitlyTypedLanguage(self):
"""Returns whether this ecma implementation is explicitly typed."""
return False
| |
import subprocess
import re
parameters = [
["disks" , [ "8", "7", "6", "5", "4", "3", "2", "1" ] ] # 0
, [ "raid" , [ "raid0", "raid1", "raid5", "raid6", "raid00", "raid10", "raid50", "raid60" ] ] # 1
, [ "strip-size", [ "64", "128", "256", "512", "1024" ] ] # 2
, [ "read-policy", [ "normal", "ahead" ] ] # 3
, [ "write-policy", [ "write-back", "write-thru" ] ] # 4
, [ "io-policy", [ "cached", "direct" ] ] # 5
#, [ "ext4", "xfs", "btrfs" ] # fs
#, [ "ubuntu14.04", "centos7", "debian7.5", "opensuse13.1", "fedora20" ] # OS
, [ "swap-size", [ "0", ".125", ".5", "2", "4", "8", "16", "32", "64", "128" ] ] # 6 swap (GB)
, [ "disk-size", [ "8", "16", "32", "64", "128", "256", "512", "1024", "2048", "3072" ] ] # 7 HD size (GB)
, [ "memory-size", [ "1024", "2048", "3072", "4096", "8192", "16384" ] ] # 8 RAM
, [ "num-cpus", [ "1", "2", "3", "4", "5", "6", "7", "8" ] ] # 9 CPUs
, [ "scheduler", [ "deadline", "noop", "cfq" ] ] # 10 disk scheduler - https://wiki.archlinux.org/index.php/Solid_State_Drives#I.2FO_Scheduler
# http://erikugel.wordpress.com/2011/04/14/the-quest-for-the-fastest-linux-filesystem/
, [ "block-size", [ "1024", "2048", "4096" ] ] # 11 fs block size
, [ "ext4-stride", [ "1", "2", "4", "8", "16", "32", "64", "128", "256", "512", "1024" ] ] # 12 fs stride (ext4)
, [ "ext4-stripe-width", [ "8", "16", "32", "64", "128", "256", "512", "1024" ] ] # 13 fs stripe width (ext4) "recommended" lowest is 16 because smallest stripe/largest block size = 64/4
# sunit/swidth (xfs)
, [ "ext4-journal-mode", [ "journal_data", "journal_data_ordered", "journal_data_writeback" ] ] # 14 journal mode
, [ "ext4-barrier", [ "barrier", "no_barrier" ] ] # 15 barrier=0
# partition alignment
, [ "ext4-atime", [ "noatime", "strictatime", "relatime" ] ] # 16 noatime/strictatime/relatime
, [ "ext4-diratime", [ "nodiratime", "diratime" ] ] # 17 nodiratime
, [ "ext4-64-bit", [ "64bit", "no_64bit" ] ] # 18
, [ "ext4-dir-index", [ "dir_index", "no_dir_index" ] ] # 19 directory indexing
, [ "ext4-dir-nlink", [ "dir_nlink", "no_dir_nlink" ] ] # 20
, [ "ext4-extent", [ "extent", "no_extent" ] ] # 21
, [ "ext4-extra-isize", [ "extra_isize", "no_extra_isize" ] ] # 22
, [ "ext4-ext-attr", [ "ext_attr", "no_ext_attr" ] ] # 23
, [ "ext4-filetype", [ "filetype", "no_filetype" ] ] # 24
, [ "ext4-flex-bg", [ "flex_bg", "no_flex_bg" ] ] # 25
, [ "ext4-flex-bg-num-groups", [ "2", "4", "8", "16", "32", "64", "128", "256", "512" ] ] # 26 Number of groups used for flex_bg
#, [ "has_journal", "no_has_journal" ]
, [ "ext4-huge-file", [ "huge_file", "no_huge_file" ] ] # 27
, [ "ext4-sparse-super2", [ "sparse_super2", "no_sparse_super2" ] ] # 28
, [ "ext4-mmp", [ "mmp", "no_mmp" ] ] # 29
# don't test quota, it seems to be buggy
# , [ "quota", "no_quota" ]
# , [ "both", "usr", "grp" ] # extended option: quota type, only applicable if quota is enabled
, [ "ext4-resize-inode", [ "resize_inode", "no_resize_inode" ] ] # 30
, [ "ext4-sparse-super", [ "sparse_super", "no_sparse_super" ] ] # 31
, [ "ext4-uninit-bg", [ "uninit_bg", "no_uninit_bg" ] ] # 32
, [ "ext4-inode-size", [ "128", "256", "512", "1024", "2048", "4096" ] ] # 33, inode_size
, [ "ext4-inode-ratio", [ "16384", "65536", "262144", "1048576", "4194304", "16777216" ] ] # 34 inode_ratio
, [ "ext4-num-backup-sb", [ "0", "1", "2" ] ] # 35 extended option: num_backup_sb
, [ "ext4-packed-meta-blocks", [ "packed_meta_blocks", "no_packed_meta_blocks" ] ] # 36 extended option: packed_meta_blocks (only applicable if flex_bg option is enabled)
, [ "ext4-acl", [ "acl", "noacl" ] ] # 37 mount option: acl
, [ "ext4-inode-allocator", [ "oldalloc", "orlov", "unspecified" ] ] # 38
, [ "ext4-user-xattr", [ "user_xattr", "nouser_xattr" ] ] # 39
, [ "ext4-journal-commit-interval", [ "1", "2", "3", "5", "10", "20", "40", "80" ] ] # 40 journal commit interval
, [ "ext4-journal-checksum-async-commit", [ "no_journal_checksum", "journal_checksum", "journal_async_commit" ] ] # 41
#, [ "0", "4", "8", "16", "32", "64", "128", "512" ] # inode_readahead (default 32) - not functional on 14.04
, [ "ext4-delalloc", [ "nodelalloc", "delalloc" ] ] # 42 nodealloc - http://www.phoronix.com/scan.php?page=article&item=ext4_linux35_tuning&num=1
, [ "ext4-max-batch-time", [ "0", "1000", "1900", "3800", "7500", "15000", "30000", "60000", "120000", "240000" ] ] # 43 max_batch_time
, [ "ext4-min-batch-time", [ "0", "1000", "1900", "3800", "7500", "15000", "30000", "60000", "120000", "240000" ] ] # 44 min_batch_time
, [ "ext4-journal-ioprio", [ "0", "1", "2", "3", "4", "5", "6", "7" ] ] # 45 journal_ioprio
, [ "ext4-auto-da-alloc", [ "auto_da_alloc", "noauto_da_alloc" ] ] # 46
, [ "ext4-discard", [ "discard", "nodiscard" ] ] # 47
, [ "ext4-dioread-lock", [ "dioread_lock", "dioread_nolock" ] ] # 48
, [ "ext4-i-version", [ "i_version", "noi_version" ] ] # 49
, [ "kernel-vm-dirty-ratio", [ "1", "2", "3", "4", "5", "7", "10", "15", "20", "30" ] ] # 50 vm_dirty_ratio - https://wiki.archlinux.org/index.php/Sysctl#Virtual_memory
, [ "kernel-vm-dirty-background-ratio", [ "1", "2", "3", "4", "5", "7", "10", "15", "20", "30" ] ] # 51 vm_dirty_background_ratio - https://wiki.archlinux.org/index.php/Sysctl#Virtual_memory
, [ "kernel-vm-swappiness", [ "0", "1", "3", "10", "30", "50", "80", "90", "95", "99" ] ] # 52 vm_swappiness
, [ "kernel-read-ahead", [ "0", "8", "24", "128", "512", "2048", "8192", "32768", "65536", "131072" ] ] # 53 read ahead - https://raid.wiki.kernel.org/index.php/Performance#RAID-5 must be a multiple of 8
, [ "kernel-fs-read-ahead", [ "0", "8", "24", "128", "512", "2048", "8192", "32768", "65536", "131072" ] ] # 54 filesystem read ahead - https://raid.wiki.kernel.org/index.php/Performance#RAID-5 must be a multiple of 8
, [ "kernel-dev-ncq", [ "1", "2", "4", "8", "12", "16", "20", "24", "28", "32" ] ] # 55 ncq - https://raid.wiki.kernel.org/index.php/Performance#RAID-5 32 is max on my system
, [ "ext4-bh", [ "bh", "nobh" ] ] # 56 http://blog.loxal.net/2008/01/tuning-ext3-for-performance-without.html
, [ "kernel-vm-vfs-cache-pressure", [ "1", "3", "10", "33", "100", "333", "1000" ] ] # 57 vm.vfs_cache_pressure
, [ "kernel-vm-dirty-expire-centisecs", [ "100", "300", "1000", "3000", "10000", "30000", "100000" ] ] # 58 vm.dirty_expire_centisecs (3000 default)
, [ "kernel-vm-dirty-writeback-centisecs", [ "0", "30", "125", "250", "500", "1000", "2000", "4000", "10000", "30000", "100000" ] ] # 59 vm.dirty_writeback_centisecs (default 500, 0 disables)
, [ "kernel-vm-extfrag-threshold", [ "-1", "0", "100", "200", "300", "400", "500", "600", "700", "800", "900", "1000" ] ] # 60 vm.extfrag_threshold
, [ "kernel-vm-hugepages-treas-as-movable", [ "0", "1" ] ] # 61 vm.hugepages_treat_as_movable
, [ "kernel-vm-laptop-mode", [ "0", "1", "3", "10", "33", "100", "333", "1000" ] ] # 62 vm.laptop_mode
, [ "kernel-vm-overcommit-memory", [ "0", "1", "2" ] ] # 63 vm.overcommit_memory
, [ "kernel-vm-overcommit-ratio", [ "32", "64", "128" ] ] # 64 vm.overcommit_ratio
, [ "kernel-vm-percpu-pagelist-fraction", [ "0", "8", "16", "32", "64", "128" ] ] # 65 vm.percpu_pagelist_fraction
, [ "kernel-vm-zone-reclaim-mode", [ "0", "1", "2", "3", "4", "5", "6", "7" ] ] # 66 vm.zone_reclaim_mode
]
param_dict = dict(parameters)
def cmd_line_for(param, value):
param_idx = [x[0] for x in parameters].index(param)
return str(param_idx + 1) + chr(ord('a') + parameters[param_idx][1].index(value))
def create_two_var_cmd_line(param1, param2, test_f):
result = ""
for val1 in param_dict[param1]:
for val2 in param_dict[param2]:
if not test_f(val1, val2):
result += " -w" + cmd_line_for(param1, val1) + cmd_line_for(param2, val2)
return result
def create_three_var_cmd_line(param1, param2, param3, test_f):
result = ""
for val1 in param_dict[param1]:
for val2 in param_dict[param2]:
for val3 in param_dict[param3]:
if not test_f(val1, val2, val3):
result += " -w" + cmd_line_for(param1, val1) + cmd_line_for(param2, val2) + cmd_line_for(param3, val3)
return result
def create_four_var_cmd_line(param1, param2, param3, param4, test_f):
result = ""
for val1 in param_dict[param1]:
for val2 in param_dict[param2]:
for val3 in param_dict[param3]:
for val4 in param_dict[param4]:
if not test_f(val1, val2, val3, val4):
result += " -w" + cmd_line_for(param1, val1) + cmd_line_for(param2, val2) + cmd_line_for(param3, val3) + cmd_line_for(param4, val4)
return result
invalid = ""
def is_raid_valid_combination( disks, raid ):
"""
Should return True if combination is valid and False otherwise.
Test row that is passed here can be incomplete.
To prevent search for unnecessary items filtering function
is executed with found subset of data to validate it.
"""
# check raid level compatibility with number of drives
# [ "raid0", "raid1", "raid5", "raid6", "raid00", "raid10", "raid50", "raid60" ]
if raid == "raid0":
return True
elif raid == "raid1":
return disks in ["2", "4", "6", "8"]
elif raid == "raid5":
return disks in ["3", "4", "5", "6", "7", "8"]
elif raid == "raid6":
return disks in ["3", "4", "5", "6", "7", "8"]
elif raid == "raid00":
return disks in ["2", "4", "6", "8"]
elif raid == "raid10":
return disks in ["4", "8"]
elif raid == "raid50":
return disks in ["6", "8"]
elif raid == "raid60":
return disks in ["6", "8"]
return True
invalid += create_two_var_cmd_line('disks', 'raid', is_raid_valid_combination)
def is_dioread_valid_combination(block_size, dioread):
# EXT4-fs (sdc1): can't mount with dioread_nolock if block size != PAGE_SIZE
if (dioread == 'dioread_nolock'):
return block_size == '4096'
return True
invalid += create_two_var_cmd_line('block-size', 'ext4-dioread-lock', is_dioread_valid_combination)
def is_extent_valid_combination(p64bit, extent):
if p64bit == "64bit":
return extent == "extent"
return True
invalid += create_two_var_cmd_line('ext4-64-bit', 'ext4-extent', is_extent_valid_combination)
def is_inode_size_valid_combination(block_size, inode_size):
return int(inode_size) <= int(block_size)
invalid += create_two_var_cmd_line('block-size', 'ext4-inode-size', is_inode_size_valid_combination)
def is_resize_inode_valid_combination(resize_inode, sparse_super):
return not (resize_inode == "resize_inode" and sparse_super == "no_sparse_super")
invalid += create_two_var_cmd_line('ext4-resize-inode', 'ext4-sparse-super', is_resize_inode_valid_combination)
def is_stripe_width_valid_combination(stripe_width, stride):
return (int(stripe_width) % int(stride)) == 0
invalid += create_two_var_cmd_line('ext4-stripe-width', 'ext4-stride', is_stripe_width_valid_combination)
def is_inode_ratio_valid_combination(block_size, inode_ratio):
return int(block_size) < int(inode_ratio)
invalid += create_two_var_cmd_line('block-size', 'ext4-inode-ratio', is_inode_ratio_valid_combination)
def is_disk_space_valid_combination(disks, raid_level, swap_size, disk_size):
raw_size = 465.25 * float(disks)
def raid_level_multiplier():
if raid_level == "raid0" or raid_level == "raid00":
return 1.0
elif raid_level == "raid1" or raid_level == "raid10":
return 0.5
elif raid_level == "raid5":
return (float(disks) - 1) / float(disks)
elif raid_level == "raid6" or raid_level == "raid50":
return (float(disks) - 2) / float(disks)
elif raid_level == "raid60":
return (float(disks) - 4) / float(disks)
usable_size = raw_size * raid_level_multiplier()
# 3% VMFS overhead
usable_size *= 0.97
return (float(swap_size) + float(disk_size)) <= usable_size
invalid += create_four_var_cmd_line('disks', 'raid', 'swap-size', 'disk-size', is_disk_space_valid_combination)
def is_inode_ratio_and_disk_size_valid_combination(inode_ratio, disk_size):
num_inodes = (long(disk_size) * 1024 * 1024 * 1024) / long(inode_ratio)
return num_inodes >= 16384 and num_inodes < 2**32
invalid += create_two_var_cmd_line('ext4-inode-ratio', 'disk-size', is_inode_ratio_and_disk_size_valid_combination)
def is_packed_meta_blocks_flex_bg_valid_combination(packed_meta_blocks, flex_bg):
# doc for packed_meta_blocks: This option requires that the flex_bg file system feature to be enabled in order for it to have effect
if packed_meta_blocks == "packed_meta_blocks" and flex_bg == "no_flex_bg":
return False
return True
invalid += create_two_var_cmd_line('ext4-packed-meta-blocks', 'ext4-flex-bg', is_packed_meta_blocks_flex_bg_valid_combination)
def is_packed_meta_blocks_uninit_bg_valid_combination(packed_meta_blocks, uninit_bg):
if packed_meta_blocks == "packed_meta_blocks" and uninit_bg == "uninit_bg":
return False
return True
invalid += create_two_var_cmd_line('ext4-packed-meta-blocks', 'ext4-uninit-bg', is_packed_meta_blocks_uninit_bg_valid_combination)
# This combination causes invalid number of inode errors
def is_block_size_disk_size_valid_combination(block_size, disk_size):
return not(int(block_size) == 1024 and int(disk_size) >= 1024)
invalid += create_two_var_cmd_line('block-size', 'disk-size', is_block_size_disk_size_valid_combination)
def write_experiments_header():
with open("experiments.csv", "w") as experiments_csv:
experiments_csv.write(",".join([x[0] for x in parameters]) + "\n")
def run_jenny(n_tuples):
cmd = "./jenny -n" + str(n_tuples) + " " + " ".join([str(len(param[1])) for param in parameters]) + invalid
print "executing: " + cmd
output = subprocess.check_output(cmd, shell=True)
#print output
parsed = [line.strip(" ").split(" ") for line in output.split("\n")]
parsed.remove([''])
#if parsed[-1] == ['']:
parsed.pop
with open("experiments.csv", "a") as experiments_csv:
for experiment in parsed:
print experiment
if experiment[0] == 'Could':
print "!"
else:
experiments_csv.write(",".join([parameters[int(re.search("[0-9]+", row_item).group(0)) - 1][1][ord(re.search("[a-z]+", row_item).group(0)) - ord('a')] for row_item in experiment]) + "\n")
write_experiments_header()
run_jenny(1)
run_jenny(2)
# 3-tuples is too much
#run_jenny(3)
| |
#!/usr/bin/env python
'''Kong 0.5.0 Migration Script
Usage: python migration.py --config=/path/to/kong/config [--purge]
Run this script first to migrate Kong to the 0.5.0 schema. Once successful, reload Kong
and run this script again with the --purge option.
Arguments:
-c, --config path to your Kong configuration file
Flags:
--purge if already migrated, purge the old values
-h print help
'''
import getopt, sys, os.path, logging, json
log = logging.getLogger()
log.setLevel("INFO")
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter("[%(levelname)s]: %(message)s"))
log.addHandler(handler)
try:
import yaml
from cassandra.cluster import Cluster
from cassandra import ConsistencyLevel, InvalidRequest
from cassandra.query import SimpleStatement
except ImportError as err:
log.error(err)
log.info("""This script requires cassandra-driver and PyYAML:
$ pip install cassandra-driver pyyaml""")
sys.exit(1)
session = None
class ArgumentException(Exception):
pass
def usage():
"""
Print usage informations about this script.
"""
print sys.exit(__doc__)
def shutdown_exit(exit_code):
"""
Shutdown the Cassandra session and exit the script.
"""
session.shutdown()
sys.exit(exit_code)
def load_cassandra_config(kong_config):
"""
Return a host and port from the first contact point in the Kong configuration.
:param kong_config: parsed Kong configuration
:return: host and port tuple
"""
cass_properties = kong_config["databases_available"]["cassandra"]["properties"]
host, port = cass_properties["contact_points"][0].split(":")
keyspace = cass_properties["keyspace"]
return (host, port, keyspace)
def migrate_schema_migrations_table(session):
"""
Migrate the schema_migrations table whose values changed between < 0.5.0 and 0.5.0
:param session: opened cassandra session
"""
log.info("Migrating schema_migrations table...")
query = SimpleStatement("INSERT INTO schema_migrations(id, migrations) VALUES(%s, %s)", consistency_level=ConsistencyLevel.ALL)
session.execute(query, ["core", ['2015-01-12-175310_skeleton', '2015-01-12-175310_init_schema']])
session.execute(query, ["basic-auth", ['2015-08-03-132400_init_basicauth']])
session.execute(query, ["key-auth", ['2015-07-31-172400_init_keyauth']])
session.execute(query, ["rate-limiting", ['2015-08-03-132400_init_ratelimiting']])
session.execute(query, ["oauth2", ['2015-08-03-132400_init_oauth2', '2015-08-24-215800_cascade_delete_index']])
log.info("schema_migrations table migrated")
def migrate_plugins_configurations(session):
"""
Migrate all rows in the `plugins_configurations` table to `plugins`, applying:
- renaming of plugins if name changed
- conversion of old rate-limiting schema if old schema detected
:param session: opened cassandra session
"""
log.info("Migrating plugins...")
new_names = {
"keyauth": "key-auth",
"basicauth": "basic-auth",
"ratelimiting": "rate-limiting",
"tcplog": "tcp-log",
"udplog": "udp-log",
"filelog": "file-log",
"httplog": "http-log",
"request_transformer": "request-transformer",
"response_transfomer": "response-transfomer",
"requestsizelimiting": "request-size-limiting",
"ip_restriction": "ip-restriction"
}
session.execute("""
create table if not exists plugins(
id uuid,
api_id uuid,
consumer_id uuid,
name text,
config text,
enabled boolean,
created_at timestamp,
primary key (id, name))""")
session.execute("create index if not exists on plugins(name)")
session.execute("create index if not exists on plugins(api_id)")
session.execute("create index if not exists on plugins(consumer_id)")
for plugin in session.execute("SELECT * FROM plugins_configurations"):
# New plugins names
plugin_name = plugin.name
if plugin.name in new_names:
plugin_name = new_names[plugin.name]
# rate-limiting config
plugin_conf = plugin.value
if plugin_name == "rate-limiting":
conf = json.loads(plugin.value)
if "limit" in conf:
plugin_conf = {}
plugin_conf[conf["period"]] = conf["limit"]
plugin_conf = json.dumps(plugin_conf)
insert_query = SimpleStatement("""
INSERT INTO plugins(id, api_id, consumer_id, name, config, enabled, created_at)
VALUES(%s, %s, %s, %s, %s, %s, %s)""", consistency_level=ConsistencyLevel.ALL)
session.execute(insert_query, [plugin.id, plugin.api_id, plugin.consumer_id, plugin_name, plugin_conf, plugin.enabled, plugin.created_at])
log.info("Plugins migrated")
def migrate_rename_apis_properties(sessions):
"""
Create new columns for the `apis` column family and insert the equivalent values in it
:param session: opened cassandra session
"""
log.info("Renaming some properties for APIs...")
session.execute("ALTER TABLE apis ADD inbound_dns text")
session.execute("ALTER TABLE apis ADD upstream_url text")
session.execute("CREATE INDEX IF NOT EXISTS ON apis(inbound_dns)")
select_query = SimpleStatement("SELECT * FROM apis", consistency_level=ConsistencyLevel.ALL)
for api in session.execute(select_query):
session.execute("UPDATE apis SET inbound_dns = %s, upstream_url = %s WHERE id = %s", [api.public_dns, api.target_url, api.id])
log.info("APIs properties renamed")
def purge(session):
session.execute("ALTER TABLE apis DROP public_dns")
session.execute("ALTER TABLE apis DROP target_url")
session.execute("DROP TABLE plugins_configurations")
session.execute("DELETE FROM schema_migrations WHERE id = 'migrations'")
def migrate(session):
migrate_schema_migrations_table(session)
migrate_plugins_configurations(session)
migrate_rename_apis_properties(session)
def parse_arguments(argv):
"""
Parse the scripts arguments.
:param argv: scripts arguments
:return: parsed kong configuration
"""
config_path = ""
purge = False
opts, args = getopt.getopt(argv, "hc:", ["config=", "purge"])
for opt, arg in opts:
if opt == "-h":
usage()
elif opt in ("-c", "--config"):
config_path = arg
elif opt in ("--purge"):
purge = True
if config_path == "":
raise ArgumentException("No Kong configuration given")
elif not os.path.isfile(config_path):
raise ArgumentException("No configuration file at path %s" % str(arg))
log.info("Using Kong configuration file at: %s" % os.path.abspath(config_path))
with open(config_path, "r") as stream:
config = yaml.load(stream)
return (config, purge)
def main(argv):
try:
kong_config, purge_cmd = parse_arguments(argv)
host, port, keyspace = load_cassandra_config(kong_config)
cluster = Cluster([host], protocol_version=2, port=port)
global session
session = cluster.connect(keyspace)
# Find out where the schema is at
rows = session.execute("SELECT * FROM schema_migrations")
is_migrated = len(rows) > 1 and any(mig.id == "core" for mig in rows)
is_0_4_2 = len(rows) == 1 and rows[0].migrations[-1] == "2015-08-10-813213_0.4.2"
is_purged = len(session.execute("SELECT * FROM system.schema_columnfamilies WHERE keyspace_name = %s AND columnfamily_name = 'plugins_configurations'", [keyspace])) == 0
if not is_0_4_2 and not is_migrated:
log.error("Please migrate your cluster to Kong 0.4.2 before running this script.")
shutdown_exit(1)
if purge_cmd :
if not is_purged and is_migrated:
purge(session)
log.info("Cassandra purged from <0.5.0 data")
elif not is_purged and not is_migrated:
log.info("Cassandra not previously migrated. Run this script in migration mode before.")
shutdown_exit(1)
else:
log.info("Cassandra already purged and migrated")
elif not is_migrated:
migrate(session)
log.info("Cassandra migrated to Kong 0.5.0.")
else:
log.info("Cassandra already migrated to Kong 0.5.0")
shutdown_exit(0)
except getopt.GetoptError as err:
log.error(err)
usage()
except ArgumentException as err:
log.error("Bad argument: %s " % err)
usage()
except yaml.YAMLError as err:
log.error("Cannot parse given configuration file: %s" % err)
sys.exit(1)
if __name__ == "__main__":
main(sys.argv[1:])
| |
# eightpuzzle.py
# --------------
# Licensing Information: Please do not distribute or publish solutions to this
# project. You are free to use and extend these projects for educational
# purposes. The Pacman AI projects were developed at UC Berkeley, primarily by
# John DeNero (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# For more info, see http://inst.eecs.berkeley.edu/~cs188/sp09/pacman.html
import search
import random
# Module Classes
class EightPuzzleState:
"""
The Eight Puzzle is described in the course textbook on
page 64.
This class defines the mechanics of the puzzle itself. The
task of recasting this puzzle as a search problem is left to
the EightPuzzleSearchProblem class.
"""
def __init__( self, numbers ):
"""
Constructs a new eight puzzle from an ordering of numbers.
numbers: a list of integers from 0 to 8 representing an
instance of the eight puzzle. 0 represents the blank
space. Thus, the list
[1, 0, 2, 3, 4, 5, 6, 7, 8]
represents the eight puzzle:
-------------
| 1 | | 2 |
-------------
| 3 | 4 | 5 |
-------------
| 6 | 7 | 8 |
------------
The configuration of the puzzle is stored in a 2-dimensional
list (a list of lists) 'cells'.
"""
self.cells = []
numbers = numbers[:] # Make a copy so as not to cause side-effects.
numbers.reverse()
for row in range( 3 ):
self.cells.append( [] )
for col in range( 3 ):
self.cells[row].append( numbers.pop() )
if self.cells[row][col] == 0:
self.blankLocation = row, col
def isGoal( self ):
"""
Checks to see if the puzzle is in its goal state.
-------------
| | 1 | 2 |
-------------
| 3 | 4 | 5 |
-------------
| 6 | 7 | 8 |
-------------
>>> EightPuzzleState([0, 1, 2, 3, 4, 5, 6, 7, 8]).isGoal()
True
>>> EightPuzzleState([1, 0, 2, 3, 4, 5, 6, 7, 8]).isGoal()
False
"""
current = 0
for row in range( 3 ):
for col in range( 3 ):
if current != self.cells[row][col]:
return False
current += 1
return True
def legalMoves( self ):
"""
Returns a list of legal moves from the current state.
Moves consist of moving the blank space up, down, left or right.
These are encoded as 'up', 'down', 'left' and 'right' respectively.
>>> EightPuzzleState([0, 1, 2, 3, 4, 5, 6, 7, 8]).legalMoves()
['down', 'right']
"""
moves = []
row, col = self.blankLocation
if(row != 0):
moves.append('up')
if(row != 2):
moves.append('down')
if(col != 0):
moves.append('left')
if(col != 2):
moves.append('right')
return moves
def result(self, move):
"""
Returns a new eightPuzzle with the current state and blankLocation
updated based on the provided move.
The move should be a string drawn from a list returned by legalMoves.
Illegal moves will raise an exception, which may be an array bounds
exception.
NOTE: This function *does not* change the current object. Instead,
it returns a new object.
"""
row, col = self.blankLocation
if(move == 'up'):
newrow = row - 1
newcol = col
elif(move == 'down'):
newrow = row + 1
newcol = col
elif(move == 'left'):
newrow = row
newcol = col - 1
elif(move == 'right'):
newrow = row
newcol = col + 1
else:
raise Exception("Illegal Move")
# Create a copy of the current eightPuzzle
newPuzzle = EightPuzzleState([0, 0, 0, 0, 0, 0, 0, 0, 0])
newPuzzle.cells = [values[:] for values in self.cells]
# And update it to reflect the move
newPuzzle.cells[row][col] = self.cells[newrow][newcol]
newPuzzle.cells[newrow][newcol] = self.cells[row][col]
newPuzzle.blankLocation = newrow, newcol
return newPuzzle
# Utilities for comparison and display
def __eq__(self, other):
"""
Overloads '==' such that two eightPuzzles with the same configuration
are equal.
>>> EightPuzzleState([0, 1, 2, 3, 4, 5, 6, 7, 8]) == \
EightPuzzleState([1, 0, 2, 3, 4, 5, 6, 7, 8]).result('left')
True
"""
for row in range( 3 ):
if self.cells[row] != other.cells[row]:
return False
return True
def __hash__(self):
return hash(str(self.cells))
def __getAsciiString(self):
"""
Returns a display string for the maze
"""
lines = []
horizontalLine = ('-' * (13))
lines.append(horizontalLine)
for row in self.cells:
rowLine = '|'
for col in row:
if col == 0:
col = ' '
rowLine = rowLine + ' ' + col.__str__() + ' |'
lines.append(rowLine)
lines.append(horizontalLine)
return '\n'.join(lines)
def __str__(self):
return self.__getAsciiString()
# TODO: Implement The methods in this class
class EightPuzzleSearchProblem(search.SearchProblem):
"""
Implementation of a SearchProblem for the Eight Puzzle domain
Each state is represented by an instance of an eightPuzzle.
"""
def __init__(self,puzzle):
"Creates a new EightPuzzleSearchProblem which stores search information."
self.puzzle = puzzle
def getStartState(self):
return puzzle
def isGoalState(self,state):
return state.isGoal()
def getSuccessors(self,state):
"""
Returns list of (successor, action, stepCost) pairs where
each succesor is either left, right, up, or down
from the original state and the cost is 1.0 for each
"""
succ = []
for a in state.legalMoves():
succ.append((state.result(a), a, 1))
return succ
def getCostOfActions(self, actions):
"""
actions: A list of actions to take
This method returns the total cost of a particular sequence of actions. The sequence must
be composed of legal moves
"""
return len(actions)
EIGHT_PUZZLE_DATA = [[1, 0, 2, 3, 4, 5, 6, 7, 8],
[1, 7, 8, 2, 3, 4, 5, 6, 0],
[4, 3, 2, 7, 0, 5, 1, 6, 8],
[5, 1, 3, 4, 0, 2, 6, 7, 8],
[1, 2, 5, 7, 6, 8, 0, 4, 3],
[0, 3, 1, 6, 8, 2, 7, 5, 4]]
def loadEightPuzzle(puzzleNumber):
"""
puzzleNumber: The number of the eight puzzle to load.
Returns an eight puzzle object generated from one of the
provided puzzles in EIGHT_PUZZLE_DATA.
puzzleNumber can range from 0 to 5.
>>> print loadEightPuzzle(0)
-------------
| 1 | | 2 |
-------------
| 3 | 4 | 5 |
-------------
| 6 | 7 | 8 |
-------------
"""
return EightPuzzleState(EIGHT_PUZZLE_DATA[puzzleNumber])
def createRandomEightPuzzle(moves=100):
"""
moves: number of random moves to apply
Creates a random eight puzzle by applying
a series of 'moves' random moves to a solved
puzzle.
"""
puzzle = EightPuzzleState([0,1,2,3,4,5,6,7,8])
for i in range(moves):
# Execute a random legal move
puzzle = puzzle.result(random.sample(puzzle.legalMoves(), 1)[0])
return puzzle
if __name__ == '__main__':
puzzle = createRandomEightPuzzle(25)
print('A random puzzle:')
print(puzzle)
problem = EightPuzzleSearchProblem(puzzle)
path = search.breadthFirstSearch(problem)
print(('BFS found a path of %d moves: %s' % (len(path), str(path))))
curr = puzzle
i = 1
for a in path:
curr = curr.result(a)
print(('After %d move%s: %s' % (i, ("", "s")[i>1], a)))
print(curr)
input("Press return for the next state...") # wait for key stroke
i += 1
| |
#!/usr/bin/python
# @lint-avoid-python-3-compatibility-imports
#
# tcpconnlat Trace TCP active connection latency (connect).
# For Linux, uses BCC, eBPF. Embedded C.
#
# USAGE: tcpconnlat [-h] [-t] [-p PID] [-4 | -6]
#
# This uses dynamic tracing of kernel functions, and will need to be updated
# to match kernel changes.
#
# Copyright 2016 Netflix, Inc.
# Licensed under the Apache License, Version 2.0 (the "License")
#
# 19-Feb-2016 Brendan Gregg Created this.
# 15-Mar-2021 Suresh Kumar Added LPORT option
from __future__ import print_function
from bcc import BPF
from socket import inet_ntop, AF_INET, AF_INET6
from struct import pack
import argparse
# arg validation
def positive_float(val):
try:
ival = float(val)
except ValueError:
raise argparse.ArgumentTypeError("must be a float")
if ival < 0:
raise argparse.ArgumentTypeError("must be positive")
return ival
# arguments
examples = """examples:
./tcpconnlat # trace all TCP connect()s
./tcpconnlat 1 # trace connection latency slower than 1 ms
./tcpconnlat 0.1 # trace connection latency slower than 100 us
./tcpconnlat -t # include timestamps
./tcpconnlat -p 181 # only trace PID 181
./tcpconnlat -L # include LPORT while printing outputs
./tcpconnlat -4 # trace IPv4 family only
./tcpconnlat -6 # trace IPv6 family only
"""
parser = argparse.ArgumentParser(
description="Trace TCP connects and show connection latency",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=examples)
parser.add_argument("-t", "--timestamp", action="store_true",
help="include timestamp on output")
parser.add_argument("-p", "--pid",
help="trace this PID only")
parser.add_argument("-L", "--lport", action="store_true",
help="include LPORT on output")
group = parser.add_mutually_exclusive_group()
group.add_argument("-4", "--ipv4", action="store_true",
help="trace IPv4 family only")
group.add_argument("-6", "--ipv6", action="store_true",
help="trace IPv6 family only")
parser.add_argument("duration_ms", nargs="?", default=0,
type=positive_float,
help="minimum duration to trace (ms)")
parser.add_argument("-v", "--verbose", action="store_true",
help="print the BPF program for debugging purposes")
parser.add_argument("--ebpf", action="store_true",
help=argparse.SUPPRESS)
args = parser.parse_args()
if args.duration_ms:
# support fractions but round to nearest microsecond
duration_us = int(args.duration_ms * 1000)
else:
duration_us = 0 # default is show all
debug = 0
# define BPF program
bpf_text = """
#include <uapi/linux/ptrace.h>
#include <net/sock.h>
#include <net/tcp_states.h>
#include <bcc/proto.h>
struct info_t {
u64 ts;
u32 pid;
char task[TASK_COMM_LEN];
};
BPF_HASH(start, struct sock *, struct info_t);
// separate data structs for ipv4 and ipv6
struct ipv4_data_t {
u64 ts_us;
u32 pid;
u32 saddr;
u32 daddr;
u64 ip;
u16 lport;
u16 dport;
u64 delta_us;
char task[TASK_COMM_LEN];
};
BPF_PERF_OUTPUT(ipv4_events);
struct ipv6_data_t {
u64 ts_us;
u32 pid;
unsigned __int128 saddr;
unsigned __int128 daddr;
u64 ip;
u16 lport;
u16 dport;
u64 delta_us;
char task[TASK_COMM_LEN];
};
BPF_PERF_OUTPUT(ipv6_events);
int trace_connect(struct pt_regs *ctx, struct sock *sk)
{
u32 pid = bpf_get_current_pid_tgid() >> 32;
FILTER
struct info_t info = {.pid = pid};
info.ts = bpf_ktime_get_ns();
bpf_get_current_comm(&info.task, sizeof(info.task));
start.update(&sk, &info);
return 0;
};
// See tcp_v4_do_rcv() and tcp_v6_do_rcv(). So TCP_ESTBALISHED and TCP_LISTEN
// are fast path and processed elsewhere, and leftovers are processed by
// tcp_rcv_state_process(). We can trace this for handshake completion.
// This should all be switched to static tracepoints when available.
int trace_tcp_rcv_state_process(struct pt_regs *ctx, struct sock *skp)
{
// will be in TCP_SYN_SENT for handshake
if (skp->__sk_common.skc_state != TCP_SYN_SENT)
return 0;
// check start and calculate delta
struct info_t *infop = start.lookup(&skp);
if (infop == 0) {
return 0; // missed entry or filtered
}
u64 ts = infop->ts;
u64 now = bpf_ktime_get_ns();
u64 delta_us = (now - ts) / 1000ul;
#ifdef MIN_LATENCY
if ( delta_us < DURATION_US ) {
return 0; // connect latency is below latency filter minimum
}
#endif
// pull in details
u16 family = 0, lport = 0, dport = 0;
family = skp->__sk_common.skc_family;
lport = skp->__sk_common.skc_num;
dport = skp->__sk_common.skc_dport;
// emit to appropriate data path
if (family == AF_INET) {
struct ipv4_data_t data4 = {.pid = infop->pid, .ip = 4};
data4.ts_us = now / 1000;
data4.saddr = skp->__sk_common.skc_rcv_saddr;
data4.daddr = skp->__sk_common.skc_daddr;
data4.lport = lport;
data4.dport = ntohs(dport);
data4.delta_us = delta_us;
__builtin_memcpy(&data4.task, infop->task, sizeof(data4.task));
ipv4_events.perf_submit(ctx, &data4, sizeof(data4));
} else /* AF_INET6 */ {
struct ipv6_data_t data6 = {.pid = infop->pid, .ip = 6};
data6.ts_us = now / 1000;
bpf_probe_read_kernel(&data6.saddr, sizeof(data6.saddr),
skp->__sk_common.skc_v6_rcv_saddr.in6_u.u6_addr32);
bpf_probe_read_kernel(&data6.daddr, sizeof(data6.daddr),
skp->__sk_common.skc_v6_daddr.in6_u.u6_addr32);
data6.lport = lport;
data6.dport = ntohs(dport);
data6.delta_us = delta_us;
__builtin_memcpy(&data6.task, infop->task, sizeof(data6.task));
ipv6_events.perf_submit(ctx, &data6, sizeof(data6));
}
start.delete(&skp);
return 0;
}
"""
if duration_us > 0:
bpf_text = "#define MIN_LATENCY\n" + bpf_text
bpf_text = bpf_text.replace('DURATION_US', str(duration_us))
# code substitutions
if args.pid:
bpf_text = bpf_text.replace('FILTER',
'if (pid != %s) { return 0; }' % args.pid)
else:
bpf_text = bpf_text.replace('FILTER', '')
if debug or args.verbose or args.ebpf:
print(bpf_text)
if args.ebpf:
exit()
# initialize BPF
b = BPF(text=bpf_text)
if args.ipv4:
b.attach_kprobe(event="tcp_v4_connect", fn_name="trace_connect")
elif args.ipv6:
b.attach_kprobe(event="tcp_v6_connect", fn_name="trace_connect")
else:
b.attach_kprobe(event="tcp_v4_connect", fn_name="trace_connect")
b.attach_kprobe(event="tcp_v6_connect", fn_name="trace_connect")
b.attach_kprobe(event="tcp_rcv_state_process",
fn_name="trace_tcp_rcv_state_process")
# process event
start_ts = 0
def print_ipv4_event(cpu, data, size):
event = b["ipv4_events"].event(data)
global start_ts
if args.timestamp:
if start_ts == 0:
start_ts = event.ts_us
print("%-9.3f" % ((float(event.ts_us) - start_ts) / 1000000), end="")
if args.lport:
print("%-6d %-12.12s %-2d %-16s %-6d %-16s %-5d %.2f" % (event.pid,
event.task.decode('utf-8', 'replace'), event.ip,
inet_ntop(AF_INET, pack("I", event.saddr)), event.lport,
inet_ntop(AF_INET, pack("I", event.daddr)), event.dport,
float(event.delta_us) / 1000))
else:
print("%-6d %-12.12s %-2d %-16s %-16s %-5d %.2f" % (event.pid,
event.task.decode('utf-8', 'replace'), event.ip,
inet_ntop(AF_INET, pack("I", event.saddr)),
inet_ntop(AF_INET, pack("I", event.daddr)), event.dport,
float(event.delta_us) / 1000))
def print_ipv6_event(cpu, data, size):
event = b["ipv6_events"].event(data)
global start_ts
if args.timestamp:
if start_ts == 0:
start_ts = event.ts_us
print("%-9.3f" % ((float(event.ts_us) - start_ts) / 1000000), end="")
if args.lport:
print("%-6d %-12.12s %-2d %-16s %-6d %-16s %-5d %.2f" % (event.pid,
event.task.decode('utf-8', 'replace'), event.ip,
inet_ntop(AF_INET6, event.saddr), event.lport,
inet_ntop(AF_INET6, event.daddr),
event.dport, float(event.delta_us) / 1000))
else:
print("%-6d %-12.12s %-2d %-16s %-16s %-5d %.2f" % (event.pid,
event.task.decode('utf-8', 'replace'), event.ip,
inet_ntop(AF_INET6, event.saddr), inet_ntop(AF_INET6, event.daddr),
event.dport, float(event.delta_us) / 1000))
# header
if args.timestamp:
print("%-9s" % ("TIME(s)"), end="")
if args.lport:
print("%-6s %-12s %-2s %-16s %-6s %-16s %-5s %s" % ("PID", "COMM",
"IP", "SADDR", "LPORT", "DADDR", "DPORT", "LAT(ms)"))
else:
print("%-6s %-12s %-2s %-16s %-16s %-5s %s" % ("PID", "COMM", "IP",
"SADDR", "DADDR", "DPORT", "LAT(ms)"))
# read events
if args.ipv4:
b["ipv4_events"].open_perf_buffer(print_ipv4_event)
elif args.ipv6:
b["ipv6_events"].open_perf_buffer(print_ipv6_event)
else:
b["ipv4_events"].open_perf_buffer(print_ipv4_event)
b["ipv6_events"].open_perf_buffer(print_ipv6_event)
while 1:
try:
b.perf_buffer_poll()
except KeyboardInterrupt:
exit()
| |
CDMA_PREFIXES = {}
MOBILE_CDMA_PREFIXES = {}
GSM_PREFIXES = {
# Indosat
"814": "Matrix",
"815": "Matrix / Mentari",
"816": "Matrix / Mentari",
"855": "Matrix",
"858": "Mentari",
"856": "IM3",
"857": "IM3",
# Telkomsel (Halo, Simpati, LOOP, AS)
"811": "Telkomsel",
"812": "Telkomsel",
"813": "Telkomsel",
"821": "Telkomsel",
"822": "Telkomsel",
"854": "Telkomsel",
"823": "Telkomsel",
"851": "Telkomsel",
"852": "Telkomsel",
"853": "Telkomsel",
# PT XL Axiata
"817": "XL",
"818": "XL",
"819": "XL",
"859": "XL",
"877": "XL",
"878": "XL",
"879": "XL",
# AXIS, acquired by XL
"838": "XL",
"833": "XL",
"832": "XL",
"831": "XL",
# PT Hutchison CP Telecommunications
"899": "3",
"898": "3",
"897": "3",
"896": "3",
"895": "3",
"894": "3",
# Smartfren
"881": "Smartfren",
"882": "Smartfren",
"883": "Smartfren",
"884": "Smartfren",
"885": "Smartfren",
"886": "Smartfren",
"887": "Smartfren",
"888": "Smartfren",
"889": "Smartfren",
}
AREA_CODE = {
"21": "Jakarta, Tangerang",
"252": "Lebak",
"253": "Pandeglang",
"254": "Cilegon, Serang",
"22": "Bandung, Cimahi, Sumedang(Jatinagor)",
"231": "Cirebon",
"232": "Kuningan",
"233": "Majalengka",
"234": "Indramayu",
"251": "Bogor",
"260": "Subang",
"261": "Sumedang",
"262": "Garut",
"263": "cianjur",
"264": "Purwakarta",
"265": "Banjar, Ciamis, Tasikmalaya",
"266": "Sukabumi",
"267": "Karawang",
'24': "Semarang",
'271': "Solo, Srage, Karanganyar, Sukoharjo, Boyolali",
'272': "Klaten",
'273': "Wonogiri",
'274': "Yogyakarta",
'275': "Purworejo",
'276': "Boyolali",
'280': "Cilacap",
'281': "Banyumas, Purbalingga",
'282': "Cilacap",
'283': "Tegal, Brebes",
'284': "Pemalang",
'285': "Pekalongan, Batang",
'286': "Banjarnegara, Wonosobo",
'287': "Kebumen",
'289': "Bumiayu",
'291': "Kudus",
'292': "Grobogan",
'293': "Magelang, Temanggung",
'294': "Kendal, Batang(Grinsing)",
'295': "Pati, Rembang",
'296': "Blora",
'297': "Karimun Jawa",
'298': "Salatiga, Semarang, Boyolali",
'31': "Surabaya, Gersik, Sidoarjo, Bangkalan",
'321': "Mojokerto, Jombang",
'322': "Lamongan",
'323': "Sampang",
'324': "Pamekasan",
'325': "Bawean",
'326': "Pulau Masalembu",
'327': "Kangean",
'328': "Sumenep",
'331': "Jember",
'332': "Bondowoso",
'333': "Banyuwangi",
'334': "Lumajang",
'335': "Probolinggo",
'338': "Situbundo",
'341': "Malang, Batu",
'342': "Blitar",
'343': "Pasuruan",
'351': "Madiun, Magetan, Ngawi",
'352': "Ponorogo",
'353': "Bojonegoro",
'354': "Kediri",
'355': "Tulungagung, Trenggalek",
'356': "Tuban",
'357': "Pacitan",
'358': "Nganjuk",
'361': "Denpasar, Badung, Gianyar, Tabanan",
'362': "Buleleng",
'363': "Karangasem",
'365': "Jembrana",
'366': "Klungkung, Bangli",
'368': "Tabanan (Baturiti, Bedugul)",
'370': "Mataram, Lombok Barat, Lombok Pusat",
'371': "Sumbawa",
'372': "Sumbawa Barat",
'373': "Dompu",
'374': "Bima",
'376': "Lombok Timur",
'379': "Pulau Alor",
'380': "Kupang",
'381': "Ende",
'382': "Sikka",
'383': "Flores Timur",
'384': "Ngada",
'385': "Manggarai",
'386': "Manggarai Barat",
'387': "Sumba",
'388': "North Central Timor, South Central Timor",
'389': "Belu",
'401': "Kendari, Konawe",
'402': "Buton",
'403': "Muna",
'404': "Wakatobi",
'405': "Kolaka",
'408': "Konawe",
'409': "Morowali",
'410': "Pangkajene",
'411': "Makassar, Maros, Gowa",
'413': "Bantaeng, Bulukumba",
'414': "Pulau Selayar",
'417': "Malino",
'418': "Takalar",
'419': "Jeneponto",
'420': "Enrekang",
'421': "Pare Pare, Pinrang, Sidenreng Rappang",
'422': "Majene",
'423': "Tana Toraja",
'426': "Mamuju",
'427': "Barru",
'428': "Polewali",
'430': "Minahasa Selatan",
'431': "Manado, Tomohon, Minahasa, Minahasa Utara",
'432': "Pulau Sangihe",
'434': "Bolaang Mongondow",
'435': "Gorontalo",
'438': "Bitung",
'443': "Gorontalo - Pohuwato",
'450': "Parigi Moutong",
'451': "Palu",
'452': "Poso",
'453': "Toli-Toli",
'457': "Donggala",
'458': "Poso(Tentena)",
'461': "Banggai",
'462': "Pulau Banggai",
'464': "Tojo Una-una",
'471': "Luwu",
'473': "Luwu Utara",
'474': "Luwu Timur",
'481': "Bone",
'482': "Sinjai",
'484': "Soppeng",
'485': "Wajo",
'511': "Banjarmasin, Banjar, Banjarbaru, Barito Kuala",
'512': "Tanah Laut",
'513': "Kapuas, Pulang Pisau",
'517': "Hulu sungai selatan, Hulu sungai tengah, Tapin",
'518': "Kotabaru, Tanah Bumbu",
'519': "Barito Utara",
'526': "Tabalong, Balangan, Barito Selatan, Barito Timur",
'527': "Hulu Sungai Utara",
'528': "Murang Raya",
'531': "Kotawaringin Timur",
'532': "Kotawaringin Barat, Sukamara",
'534': "Ketapang",
'535': "Kayong Utara",
'536': "Palangka raya, Katingan",
'537': "Gunung Mas",
'539': "Seruyan, Kotawaringin timur",
'541': "Samarinda, Kutai Kartanegara",
'542': "Balikpapan, Penajam Paser Utara",
'543': "Paser",
'545': "Kutai Barat",
'548': "Bontang",
'549': "Kutai Timur",
'551': "Tarakan, Pulau Bunyu",
'552': "Bulungan",
'553': "Malinau",
'554': "Beray",
'556': "Nunukan",
'561': "Pontianak",
'562': "Sambas, Bengkayang, Singkawang",
'563': "Landak",
'564': "Sangau, Sekadau",
'565': "Sintang",
'567': "Kapuas Hulu",
'568': "Melawi",
'61': "Medan, Binjai, Deli Serdang, Serdang Bedagai(Perbaungan, Pantai Cermin), Langkat",
'620': "Langkat(Pangkalan Brandan",
'621': "Tebingtinggi, Serdang Bedagai",
'622': "Pematang Siantar, Simalungun, Batubara, Serdang bedagai",
'623': "Asahan, Tanjung Balai, Labuhan Batu(Labuhan Ruku)",
'624': "Labuhan Batu",
'625': "Parapat, Samosir",
'626': "Samosir",
'627': "Dairi, Pakpak Bharat, Subulussalam",
'628': "Karo, Deil Serdang (Bandar Baru, Sibolangit)",
'629': "Aceh Barat Daya",
'630': "Nias Selatan",
'631': "Tapanuli pusat, Sibolga",
'632': "Toba Samosir",
'633': "Tapanuli Utara, Humbang Hasundutan",
'634': "Tapanuli Selatan, Padang Sidempuan",
'635': "Tapanuli Selatan",
'636': "Mandailing Natal",
'639': "Nias",
'641': "Langsa, Aceh Timur, Aceh Tamiang",
'642': "Gayo Lues",
'643': "Aceh Pusat",
'644': "Bireuen",
'645': "Lhokseumawe, Aceh Utara",
'646': "Aceh Timur",
'650': "Siemulue",
'651': "Banda Aceh, Aceh Besar, Aceh Jaya(Lamno)",
'652': "Sabang",
'653': "Pidie",
'654': "Aceh Jaya",
'655': "Aceh Barat",
'656': "Aceh Selatan",
'657': "Aceh Selatan",
'658': "Singkil",
'659': "Aceh Barat Daya",
'702': "Empat Lawang",
'711': "Palembang, Ogan Ilir, Banyuasin",
'712': "Ogan Komering Ilir",
'713': "Prabumulih, Muara Enim",
'714': "Musi Banyuasin",
'715': "Bangka (Belinyu)",
'716': "Bangka Barat",
'717': "Bangka, Pangkal Pinang",
'718': "Bangka pusat, Bangka selatan",
'719': "Belitung",
'721': "Bandar Lampung, Lampung Selatan",
'722': "Tanggamus",
'723': "Way Kanan",
'724': "Lampung Utara",
'725': "Lampung pusat, Lampung Timur, Metro",
'726': "Tulang Bawang",
'727': "Lampung Selatan",
'728': "Lampung Barat",
'729': "Tanggamus",
'730': "Lahat, Pagar Alam",
'731': "Lahat",
'732': "Rejang Lebong, Kepahiang",
'733': "Lubuk Linggau, Musi Rawas",
'734': "Muara Enim",
'735': "Ogan Komering Ulu",
'736': "Bengkulu, Seluma",
'737': "Bengkulu Utara, Muko-muko",
'739': "Bengkulu Selatan, Kaur",
'741': "Jambi, Muaro Jambi",
'742': "Tanjung Jabung Barat",
'743': "Batanghari",
'744': "Tebo",
'745': "Sarolangun",
'746': "Merangin",
'747': "Bungo, Tebo(Rimbo Bujang)",
'748': "Kerinci",
'751': "Padang, Padang Pariaman, Pariaman, Pesisir Selatan",
'752': "Agam, Tanah Datar, Limapuluh Koto, Bukittinggi, Padang Panjang, Payakumbuh",
'753': "Pasaman, Pasaman Barat",
'754': "Sawahlunto, Sijunjung, Dharmasraya",
'755': "Solok, Solok Selatan",
'756': "Pesisir Selatan",
'757': "Pesisir Selatan",
'759': "Pulau Mentawai",
'760': "Kuantan Singingi",
'761': "Pekanbaru, Pelalawan, Siak, Kampar",
'762': "Kampar, rokan Hulu",
'763': "Bengkalis",
'764': "Siak",
'765': "Dumai, Bengkalis(Duri), Rokan Hulu(Bagan Batu)",
'766': "Bengkalis",
'767': "Rokan Hulu",
'768': "Indragiri Hilir",
'769': "Indragiri Hulu",
'771': "Tanjungpinang, Bintan",
'772': "Pulan Anambas",
'773': "Pulau Natuna",
'776': "Lingga",
'777': "Great Karimun",
'778': "Batam",
'779': "Kundur",
'901': "Timika",
'902': "Agats",
'910': "Banda Naira",
'911': "Ambon",
'913': "Namlea",
'914': "Masohi",
'916': "Tual",
'917': "Dobo",
'918': "Saumlaki",
'920': "Weda",
'921': "Ternate",
'924': "Tobelo",
'929': "Tidore",
'951': "Sorong",
'956': "Kaimana, Fak Fak",
'963': "Serui",
'966': "Sarmi",
'967': "Jayapura",
'969': "Wamena",
'971': "Merauke",
'975': "Tanah Merah (Boven Digoel)",
'979': "Tembagapura",
'981': "Biak",
'983': "Botawa / Serui",
'984': "Nabire",
'986': "Manokwari",
}
| |
from django.core.management.base import BaseCommand, CommandError
from django.utils import timezone
from viz.models import PollingStation, Election, Party, RegionalElectoralDistrict, State, District, Municipality, List
import json
import datetime
import os
class Command(BaseCommand):
help = 'Import basic data into database'
def add_arguments(self, parser):
"""
Add extra arguments to command.
"""
parser.add_argument(
'path',
nargs='?',
help='Specify a different location from where to take the setup data'
)
def handle(self, *args, **options):
"""
Main entry point of the command.
"""
if options['path']:
setup_path = options['path']
else:
setup_path = os.path.dirname(os.path.realpath(__name__)) + '/data/base/'
config = {
'party_location': 'austria',
'log_detail' : 'low'
}
# import elections
elections = self.open_jsonfile(setup_path + 'elections.json')
self.import_elections(elections, config)
# import regional electoral districts
reds = self.open_jsonfile(setup_path + 'regional-electoral-districts_20170101.json')
self.import_reds(reds, config)
# import parties
parties = self.open_jsonfile(setup_path + 'parties.json')
self.import_parties(parties, config)
# import lists
lists = self.open_jsonfile(setup_path + 'lists.json')
self.import_lists(lists, config)
# import states and districts
states_districts = self.open_jsonfile(setup_path + 'states-to-districts_20170101.json')
self.import_states_districts(states_districts, config)
# import municipalities
municipalities = self.open_jsonfile(setup_path + 'municipalities_20170101_2.json')
muns2reds = self.open_jsonfile(setup_path + 'municipality2red_20170101.json')
self.import_municipalities(municipalities, muns2reds, config)
def open_file(self, filename,):
"""
Open file.
"""
try:
with open(filename, encoding='utf-8') as data_file:
return data_file.read()
except IOError:
print('Error: can\'t find file or read data')
def open_jsonfile(self, filename):
try:
data = json.loads(open(filename, encoding='utf-8-sig').read())
return data
except ValueError:
print('Error: File is not valid JSON.')
def import_elections(self, elections, config):
"""
Import elections data into database.
"""
num_entries_created = 0
num_entries_updated = 0
for key, value in elections.items():
ts = datetime.datetime.strptime(value['election_day'], '%Y-%m-%d')
ts = timezone.make_aware(ts, timezone.get_current_timezone())
e = Election.objects.update_or_create(
short_name = value['short_name'],
short_name_text = value['short_name_text'],
full_name = value['full_name'],
election_type = value['election_type'],
wikidata_id = value['wikidata_id'],
administrative_level = value['administrative_level'],
election_day = ts,
status = value['status']
)
if e[1] == True:
if config['log_detail'] == 'high':
print('New election entry "'+value['short_name']+'" created.')
num_entries_created += 1
else:
if config['log_detail'] == 'high':
print('Election entry "'+value['short_name']+'" updated.')
num_entries_updated += 1
print('Election table imported: '+ 'new entries: '+str(num_entries_created)+', updated entries: '+str(num_entries_updated))
def import_reds(self, reds, config):
"""
Import regional electoral districts data into database.
"""
num_entries_created = 0
num_entries_updated = 0
for key, value in reds.items():
red = RegionalElectoralDistrict.objects.update_or_create(
short_code=str(key),
name = value
)
if red[1] == True:
if config['log_detail'] == 'high':
print('New regional electoral district entry "'+value+'" created.')
num_entries_created += 1
else:
if config['log_detail'] == 'high':
print('Regional electoral district entry "'+value+'" updated.')
num_entries_updated += 1
print('Regionalelectoraldistrict table imported: '+ 'new entries: '+str(num_entries_created)+', updated entries: '+str(num_entries_updated))
def import_parties(self, parties, config):
"""
Import parties data into database.
"""
num_entries_created = 0
num_entries_updated = 0
for key, value in parties.items():
p = Party.objects.update_or_create(
short_name = value['short_name'],
short_name_text = value['short_name_text'],
full_name = value['full_name'],
family = value['family'],
wikidata_id = value['wikidata_id'],
website = value['website'],
location = config['party_location']
)
if p[1] == True:
if config['log_detail'] == 'high':
print('New party entry "'+value['short_name']+'" created.')
num_entries_created += 1
else:
if config['log_detail'] == 'high':
print('Party entry "'+value['short_name']+'" updated.')
num_entries_updated += 1
print('Party table imported: '+ 'new entries: '+str(num_entries_created)+', updated entries: '+str(num_entries_updated))
def import_lists(self, lists, config):
"""
Import parties data into database.
"""
num_entries_created = 0
num_entries_updated = 0
num_lists_notfound = 0
for key, ele_list in lists.items():
for lst in ele_list:
try:
p = Party.objects.get(short_name = lst['party'])
l = List.objects.update_or_create(
short_name = lst['short_name'],
short_name_text = lst['short_name_text'],
full_name = lst['full_name'],
party = p
)
except Exception as e:
if config['log_detail'] == 'middle' or config['log_detail'] == 'high':
print('Warning: Party not found.')
num_lists_notfound += 1
l = List.objects.update_or_create(
short_name = lst['short_name'],
short_name_text = lst['short_name_text'],
full_name = lst['full_name']
)
if l[1] == True:
if config['log_detail'] == 'high':
print('New list entry "'+lst['short_name']+'" created.')
num_entries_created += 1
else:
if config['log_detail'] == 'high':
print('List entry "'+lst['short_name']+'" updated.')
num_entries_updated += 1
print('List table imported: '+ 'new entries: '+str(num_entries_created)+', updated entries: '+str(num_entries_updated)+', lists not found: '+str(num_lists_notfound))
def import_states_districts(self, states_districts, config):
"""
Import states and districts into database.
"""
d_num_entries_created = 0
d_num_entries_updated = 0
s_num_entries_created = 0
s_num_entries_updated = 0
for s_key, s_val in states_districts.items():
s = State.objects.update_or_create(
short_code = str(s_key),
name = s_val['name']
)
if s[1] == True:
if config['log_detail'] == 'high':
print('New state entry "'+str(s_key)+'" created.')
s_num_entries_created += 1
else:
if config['log_detail'] == 'high':
print('State entry "'+str(s_key)+'" updated.')
s_num_entries_updated += 1
for key, value in s_val['districts'].items():
d = District.objects.update_or_create(
short_code=str(key),
name = value,
state=s[0]
)
if d[1] == True:
if config['log_detail'] == 'high':
print('New district entry "'+str(key)+'" created.')
d_num_entries_created += 1
else:
if config['log_detail'] == 'high':
print('District entry "'+str(key)+'" updated.')
d_num_entries_updated += 1
print('State table imported: '+ 'new entries: '+str(s_num_entries_created)+', updated entries: '+str(s_num_entries_updated))
print('District table imported: '+ 'new entries: '+str(d_num_entries_created)+', updated entries: '+str(d_num_entries_updated))
def import_municipalities(self, municipalities, muns2reds, config):
"""
Import municipalities as polling stations into database.
"""
ps_num_entries_created = 0
ps_num_entries_updated = 0
m_num_entries_created = 0
m_num_entries_updated = 0
for mun in municipalities:
try:
red = RegionalElectoralDistrict.objects.get(short_code=muns2reds[mun['municipality_code']])
except Exception as e:
if config['log_detail'] == 'middle' or config['log_detail'] == 'high':
print('Warning: regionalelectoraldistrict not found.')
try:
d = District.objects.get(name=mun['district'])
except Exception as e:
if config['log_detail'] == 'middle' or config['log_detail'] == 'high':
print('Warning: district not found.')
m = Municipality.objects.update_or_create(
code = str(mun['municipality_code']),
kennzahl = str(mun['municipality_kennzahl']),
name = mun['name'],
regional_electoral_district = red,
district = d
)
if m[1] == True:
if config['log_detail'] == 'high':
print('New municipality entry "'+str(mun['name'])+'" created.')
m_num_entries_created += 1
else:
if config['log_detail'] == 'high':
print('Municipality entry "'+str(mun['name'])+'" updated.')
m_num_entries_updated += 1
ps = PollingStation.objects.update_or_create(
name = mun['name'],
type = 'municipality',
municipality = m[0]
)
if ps[1] == True:
if config['log_detail'] == 'high':
print('New pollingstation entry "'+str(mun['name'])+'" created.')
ps_num_entries_created += 1
else:
if config['log_detail'] == 'high':
print('Pollingstation entry "'+str(mun['name'])+'" updated.')
ps_num_entries_updated += 1
print('Municipality table imported: '+ 'new entries: '+str(m_num_entries_created)+', updated entries: '+str(m_num_entries_updated))
print('Pollingstation table imported: '+ 'new entries: '+str(ps_num_entries_created)+', updated entries: '+str(ps_num_entries_updated))
| |
from collections import Counter
from datetime import datetime
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand, CommandError
from django.template.loader import render_to_string
from django.utils import timezone
from bulk_update.helper import bulk_update
from pontoon.administration.files import update_from_repository
from pontoon.administration.vcs import commit_to_vcs, CommitToRepositoryException
from pontoon.base.models import (
ChangedEntityLocale,
Entity,
Locale,
Project,
Resource,
Translation,
update_stats
)
from pontoon.base.utils import match_attr
from pontoon.base.vcs_models import VCSProject
class Command(BaseCommand):
args = '<project_slug project_slug ...>'
help = 'Synchronize database and remote repositories.'
def add_arguments(self, parser):
parser.add_argument(
'--no-commit',
action='store_true',
dest='no_commit',
default=False,
help='Do not commit changes to VCS'
)
parser.add_argument(
'--no-pull',
action='store_true',
dest='no_pull',
default=False,
help='Do not pull new commits from VCS'
)
def log(self, msg, *args, **kwargs):
"""Log a message to the console."""
self.stdout.write(msg.format(*args, **kwargs))
def info(self, msg, *args, **kwargs):
"""Log a message to the console if --verbosity=1 or more."""
if self.verbosity >= 1:
self.log(msg, *args, **kwargs)
def debug(self, msg, *args, **kwargs):
"""Log a message to the console if --verbosity=2."""
if self.verbosity == 2:
self.log(msg, *args, **kwargs)
def handle(self, *args, **options):
self.verbosity = options['verbosity']
self.no_commit = options['no_commit']
self.no_pull = options['no_pull']
self.log('SYNC PROJECTS: start')
projects = Project.objects.filter(disabled=False)
if args:
projects = projects.filter(slug__in=args)
if len(projects) < 1:
raise CommandError('No matching projects found.')
for project in projects:
if not project.can_commit:
self.log(u'Skipping project {0}, cannot commit to repository.'
.format(project.name))
else:
self.handle_project(project)
self.log('SYNC PROJECTS: done')
# Once we've synced, we can delete all translations scheduled
# for deletion.
Translation.deleted_objects.all().delete()
def handle_project(self, db_project):
# Pull changes from VCS and update what we know about the files.
if not self.no_pull:
update_from_repository(db_project)
vcs_project = VCSProject(db_project)
self.update_resources(db_project, vcs_project)
# Collect all entities across VCS and the database and get their
# keys so we can match up matching entities.
vcs_entities = self.get_vcs_entities(vcs_project)
db_entities = self.get_db_entities(db_project)
entity_keys = set().union(db_entities.keys(), vcs_entities.keys())
changeset = ChangeSet(db_project, vcs_project)
for key in entity_keys:
db_entity = db_entities.get(key, None)
vcs_entity = vcs_entities.get(key, None)
self.handle_entity(changeset, db_project, key, db_entity, vcs_entity)
# Apply the changeset to the files, commit them, and update stats
# entries in the DB.
changeset.execute()
if not self.no_commit:
self.commit_changes(db_project, vcs_project, changeset)
self.update_stats(db_project, vcs_project, changeset)
# Clear out the list of changed locales for entity in this
# project now that we've finished syncing.
(ChangedEntityLocale.objects
.filter(entity__resource__project=db_project)
.delete())
self.log(u'Synced project {0}', db_project.slug)
def handle_entity(self, changeset, db_project, key, db_entity, vcs_entity):
"""
Determine what needs to be synced between the database and VCS versions
of a single entity and log what needs to be changed in the changeset.
"""
if vcs_entity is None:
if db_entity is None:
# This should never happen. What? Hard abort.
raise CommandError('No entities found for key {0}'.format(key))
else:
# VCS no longer has the entity, remove it from Pontoon.
changeset.obsolete_db_entity(db_entity)
elif db_entity is None:
# New VCS entities are added to Pontoon.
changeset.create_db_entity(vcs_entity)
else:
for locale in db_project.locales.all():
if not vcs_entity.has_translation_for(locale.code):
# VCS lacks an entity for this locale, so we can't
# pull updates nor edit it. Skip it!
continue
if db_entity.has_changed(locale):
# Pontoon changes overwrite whatever VCS has.
changeset.update_vcs_entity(locale.code, db_entity, vcs_entity)
else:
# If Pontoon has nothing or has not changed, and the VCS
# still has the entity, update Pontoon with whatever may
# have changed.
changeset.update_db_entity(locale.code, db_entity, vcs_entity)
def update_resources(self, db_project, vcs_project):
"""Update the database on what resource files exist in VCS."""
relative_paths = vcs_project.resources.keys()
db_project.resource_set.exclude(path__in=relative_paths).delete()
for relative_path, vcs_resource in vcs_project.resources.items():
resource, created = db_project.resource_set.get_or_create(path=relative_path)
resource.format = Resource.get_path_format(relative_path)
resource.entity_count = len(vcs_resource.entities)
resource.save()
def update_stats(self, db_project, vcs_project, changeset):
"""Update the Stats entries in the database."""
for resource in db_project.resource_set.all():
for locale in db_project.locales.all():
# We only want to create/update the stats object if the resource
# exists in the current locale, UNLESS the file is asymmetric.
vcs_resource = vcs_project.resources[resource.path]
resource_exists = vcs_resource.files.get(locale) is not None
if resource_exists or resource.is_asymmetric:
update_stats(resource, locale)
def get_vcs_entities(self, vcs_project):
return {self.entity_key(entity): entity for entity in vcs_project.entities}
def get_db_entities(self, db_project):
entities = (Entity.objects
.select_related('resource')
.prefetch_related('changed_locales')
.filter(resource__project=db_project, obsolete=False))
return {self.entity_key(entity): entity for entity in entities}
def entity_key(self, entity):
"""
Generate a key for the given entity that is unique within the
project.
"""
key = entity.key or entity.string
return ':'.join([entity.resource.path, key])
def commit_changes(self, db_project, vcs_project, changeset):
"""Commit the changes we've made back to the VCS."""
for locale in db_project.locales.all():
authors = changeset.commit_authors_per_locale.get(locale.code, [])
# Use the top translator for this batch as commit author, or
# the fake Pontoon user if there are no authors.
if len(authors) > 0:
commit_author = Counter(authors).most_common(1)[0][0]
else:
commit_author = User(first_name="Pontoon", email="pontoon@mozilla.com")
commit_message = render_to_string('commit_message.jinja', {
'locale': locale,
'project': db_project,
'authors': authors
})
try:
result = commit_to_vcs(
db_project.repository_type,
vcs_project.locale_directory_path(locale.code),
commit_message,
commit_author,
db_project.repository_url
)
except CommitToRepositoryException as err:
result = {'message': unicode(err)}
if result is not None:
self.log(
u'Committing project {project.name} for {locale.name} '
u'({locale.code}) failed: {reason}',
project=db_project,
locale=locale,
reason=result['message']
)
class ChangeSet(object):
"""
Stores a set of changes to be made to the database and the
translations stored in VCS. Once all the necessary changes have been
stored, execute all the changes at once efficiently.
"""
def __init__(self, db_project, vcs_project):
self.db_project = db_project
self.vcs_project = vcs_project
self.executed = False
self.changes = {
'update_vcs': [],
'update_db': [],
'obsolete_db': [],
'create_db': []
}
self.entities_to_update = []
self.translations_to_update = []
self.translations_to_create = []
self.commit_authors_per_locale = {}
def update_vcs_entity(self, locale_code, db_entity, vcs_entity):
"""
Replace the translations in VCS with the translations from the
database.
"""
self.changes['update_vcs'].append((locale_code, db_entity, vcs_entity))
def create_db_entity(self, vcs_entity):
"""Create a new entity in the database."""
self.changes['create_db'].append(vcs_entity)
def update_db_entity(self, locale_code, db_entity, vcs_entity):
"""Update the database with translations from VCS."""
self.changes['update_db'].append((locale_code, db_entity, vcs_entity))
def obsolete_db_entity(self, db_entity):
"""Mark the given entity as obsolete."""
self.changes['obsolete_db'].append(db_entity.pk)
def execute(self):
"""
Execute the changes stored in this changeset. Execute can only
be called once per changeset; subsequent calls raise a
RuntimeError, even if the changes failed.
"""
if self.executed:
raise RuntimeError('execute() can only be called once per changeset.')
else:
self.executed = True
# Store locales and resources for FK relationships.
self.locales = {l.code: l for l in Locale.objects.all()}
self.resources = {r.path: r for r in self.db_project.resource_set.all()}
# Perform the changes and fill the lists for bulk creation and
# updating.
self.execute_update_vcs()
self.execute_create_db()
self.execute_update_db()
self.execute_obsolete_db()
# Apply the built-up changes to the DB
if len(self.entities_to_update) > 0:
bulk_update(self.entities_to_update, update_fields=[
'resource',
'string',
'string_plural',
'key',
'comment',
'order',
'source'
])
Translation.objects.bulk_create(self.translations_to_create)
if len(self.translations_to_update) > 0:
bulk_update(self.translations_to_update, update_fields=[
'entity',
'locale',
'string',
'plural_form',
'approved',
'approved_user_id',
'approved_date',
'fuzzy',
'extra'
])
def execute_update_vcs(self):
resources = self.vcs_project.resources
changed_resources = set()
for locale_code, db_entity, vcs_entity in self.changes['update_vcs']:
changed_resources.add(resources[db_entity.resource.path])
vcs_translation = vcs_entity.translations[locale_code]
db_translations = (db_entity.translation_set
.filter(approved=True, locale__code=locale_code))
# If no DB translations are fuzzy, set fuzzy to False.
# Otherwise, it's true.
vcs_translation.fuzzy = any(t for t in db_translations if t.fuzzy)
if len(db_translations) > 0:
last_translation = max(
db_translations,
key=lambda t: t.date or timezone.make_aware(datetime.min)
)
vcs_translation.last_updated = last_translation.date
vcs_translation.last_translator = last_translation.user
# Replace existing translations with ones from the database.
vcs_translation.strings = {
db.plural_form: db.string for db in db_translations
}
# Track which translators were involved.
self.commit_authors_per_locale[locale_code] = [t.user for t in db_translations if t.user]
for resource in changed_resources:
resource.save()
def get_entity_updates(self, vcs_entity):
"""
Return a dict of the properties and values necessary to create
or update a database entity from a VCS entity.
"""
return {
'resource': self.resources[vcs_entity.resource.path],
'string': vcs_entity.string,
'string_plural': vcs_entity.string_plural,
'key': vcs_entity.key,
'comment': '\n'.join(vcs_entity.comments),
'order': vcs_entity.order,
'source': vcs_entity.source
}
def execute_create_db(self):
for vcs_entity in self.changes['create_db']:
entity = Entity(**self.get_entity_updates(vcs_entity))
entity.save() # We can't use bulk_create since we need a PK
for locale_code, vcs_translation in vcs_entity.translations.items():
for plural_form, string in vcs_translation.strings.items():
self.translations_to_create.append(Translation(
entity=entity,
locale=self.locales[locale_code],
string=string,
plural_form=plural_form,
approved=not vcs_translation.fuzzy,
approved_date=timezone.now() if not vcs_translation.fuzzy else None,
fuzzy=vcs_translation.fuzzy
))
def execute_update_db(self):
for locale_code, db_entity, vcs_entity in self.changes['update_db']:
for field, value in self.get_entity_updates(vcs_entity).items():
setattr(db_entity, field, value)
if db_entity.is_dirty(check_relationship=True):
self.entities_to_update.append(db_entity)
# Update translations for the entity.
vcs_translation = vcs_entity.translations[locale_code]
db_translations = db_entity.translation_set.filter(locale__code=locale_code)
approved_translations = []
for plural_form, string in vcs_translation.strings.items():
# Check if we need to modify an existing translation or
# create a new one.
db_translation = match_attr(db_translations,
plural_form=plural_form,
string=string)
if db_translation:
if not db_translation.approved:
db_translation.approved = True
db_translation.approved_date = timezone.now()
db_translation.fuzzy = vcs_translation.fuzzy
db_translation.extra = vcs_translation.extra
if db_translation.is_dirty():
self.translations_to_update.append(db_translation)
if not db_translation.fuzzy:
approved_translations.append(db_translation)
else:
self.translations_to_create.append(Translation(
entity=db_entity,
locale=self.locales[locale_code],
string=string,
plural_form=plural_form,
approved=not vcs_translation.fuzzy,
approved_date=timezone.now() if not vcs_translation.fuzzy else None,
fuzzy=vcs_translation.fuzzy,
extra=vcs_translation.extra
))
# Any existing translations that were not approved get unapproved.
for translation in db_translations:
if translation not in approved_translations:
translation.approved = False
translation.approved_user = None
translation.approved_date = None
if translation.is_dirty():
self.translations_to_update.append(translation)
def execute_obsolete_db(self):
(Entity.objects
.filter(pk__in=self.changes['obsolete_db'])
.update(obsolete=True))
| |
import ConfigParser
import datetime
import json
import os
import re
import time
import uuid
import paramiko
import pasteraw
from pygerrit import rest
from pygerrit.rest import auth
from publish import publish
_COMMENT = """
Master Results (sha: {master_sha})
Token creation
Requests per second: {master_create_rps}
Time per request: {master_create_tpr}
Token validation
Requests per second: {master_validate_rps}
Time per request: {master_validate_tpr}
Patch Results:
Token creation
Requests per second: {patch_create_rps}
Time per request: {patch_create_tpr}
Token validation
Requests per second: {patch_validate_rps}
Time per request: {patch_validate_tpr}
"""
class ContainerError(Exception):
def __init__(self, err_code, error, output):
print 'Error code: %r' % err_code
print 'Error: %r' % error
print 'Output: %r' % output
class PerformanceManager(object):
def __init__(self, host, user):
self.client = paramiko.client.SSHClient()
self.client.load_system_host_keys()
self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.client.connect(host, username=user)
self.paste_client = pasteraw.Client()
self.container_ip = None
def get_container_by_name(self, name):
command = 'lxc list --format json ' + name
return self.client.exec_command(command)
def delete_container_by_name(self, name):
command = 'lxc delete --force ' + name
return self.client.exec_command(command)
def launch_container(self):
container_name = 'keystone-' + uuid.uuid4().hex
command = 'lxc launch ubuntu1604 ' + container_name
stdin, stdout, stderr = self.client.exec_command(command)
return container_name, stdin, stdout, stderr
def set_container_ip_by_name(self, name):
while not self.container_ip:
stdin, stdout, stderr = self.get_container_by_name(name)
container = json.loads(stdout.read())
if container and container[0]['state']:
addresses = (
container[0]['state']['network']['eth0']['addresses']
)
for address in addresses:
if address['family'] == 'inet':
if 'address' in address:
self.container_ip = address['address']
# NOTE(lbragstad): Setting this and leaving the
# function can result in connection refused errors
# because even though we have an IP address,
# networking might not be up inside the container.
# Let's give it a few seconds before returning.
time.sleep(5)
else:
time.sleep(2)
def _build_container_command(self, command):
values = {'ip': self.container_ip, 'command': command}
full_command = "ssh root@%(ip)s '%(command)s'" % values
return full_command
def install_container(self):
print 'installing container'
# Bootstrap and install keystone on the container.
com = ('git clone https://github.com/lbragstad/keystone-performance; '
'cd keystone-performance; bash run_everything.sh')
command = self._build_container_command(com)
stdin, stdout, stderr = self.client.exec_command(command)
exit_status = stdout.channel.recv_exit_status()
if exit_status != 0:
raise ContainerError(exit_status, stderr.read(), stdout.read())
def benchmark_master(self):
com = ('git ls-remote --heads https://github.com/openstack/keystone |'
'grep master | cut -f1')
command = self._build_container_command(com)
stdin, stdout, stderr = self.client.exec_command(command)
master_sha = stdout.read().rstrip()[:10]
print 'benchmarking master (sha: %s)' % master_sha
com = ('git ls-remote --heads '
'https://github.com/openstack/openstack-ansible-os_keystone|'
'grep master | cut -f1')
command = self._build_container_command(com)
stdin, stdout, stderr = self.client.exec_command(command)
osa_sha = stdout.read().rstrip()[:10]
com = 'cd keystone-performance; bash benchmark.sh'
command = self._build_container_command(com)
stdin, stdout, stderr = self.client.exec_command(command)
exit_status = stdout.channel.recv_exit_status()
if exit_status != 0:
raise ContainerError(exit_status, stderr.read(), stdout.read())
master_results = stdout.read()
return (master_sha, osa_sha, master_results)
def benchmark_change(self, ref):
# Check out the patch to test from Gerrit.
com = ('cd keystone-performance; '
'ansible-playbook -i inventory_localhost --sudo '
'-e "ref=%s" checkout_change.yml') % ref
command = self._build_container_command(com)
stdin, stdout, stderr = self.client.exec_command(command)
exit_status = stdout.channel.recv_exit_status()
if exit_status != 0:
raise ContainerError(exit_status, stderr.read(), stdout.read())
# Benchmark the change under review.
print 'benchmarking change'
com = 'cd keystone-performance; bash benchmark.sh'
command = self._build_container_command(com)
stdin, stdout, stderr = self.client.exec_command(command)
exit_status = stdout.channel.recv_exit_status()
if exit_status != 0:
raise ContainerError(exit_status, stderr.read(), stdout.read())
change_results = stdout.read()
return change_results
def _sorted_ls(path):
"""Sort the contents of a directory by last modified date.
:param path: directory path
:returns: a list of files sorted by their last modified time
"""
def _get_modified_time(f):
return os.stat(os.path.join(path, f)).st_mtime
return list(sorted(os.listdir(path), key=_get_modified_time))
def get_next_change_file():
"""Return a filepath that contains data about the next change to test.
:returns: the absolute path of a file
"""
path = '/tmp/perf/'
changes_to_test = _sorted_ls(path)
if changes_to_test:
return os.path.join(path, changes_to_test[0])
def get_requests_per_second(result_data):
tm = re.compile(r'(\d+\.\d+)')
section_name = None
section_value = None
for line in result_data.split('\n'):
if line.startswith('Benchmarking ') and line.endswith('...'):
if section_value:
yield {section_name: section_value}
section_name = line[13:-3]
section_value = None
elif line.startswith('Requests per second:'):
section_value = tm.search(line).group()
if section_name:
yield {section_name: section_value}
def get_time_per_request(result_data):
tm = re.compile(r'(\d+\.\d+)')
section_name = None
section_value = None
for line in result_data.split('\n'):
if line.startswith('Benchmarking ') and line.endswith('...'):
if section_value:
yield {section_name: section_value}
section_name = line[13:-3]
section_value = None
elif line.startswith('Time per request:'):
section_value = tm.search(line).group()
if section_name:
yield {section_name: section_value}
if __name__ == '__main__':
config_parser = ConfigParser.ConfigParser()
config_parser.read('performance.conf')
gerrit_user = config_parser.get('global', 'gerrit_user')
gerrit_password = config_parser.get('global', 'gerrit_password')
perf_user = config_parser.get('scheduler', 'performance_username')
perf_host = config_parser.get('scheduler', 'performance_host_ip')
results_dir = config_parser.get('scheduler', 'results_directory')
try:
next_change_path = get_next_change_file()
while True:
if next_change_path:
with open(next_change_path, 'r') as f:
event = json.loads(f.read())
if event['type'] == 'change-merged':
change_ref = None
print 'performance testing merged change %s' % (
event['change']['url']
)
else:
change_ref = event['patchSet']['ref']
print 'performance testing %s at patch set %s' % (
event['change']['url'], event['patchSet']['number']
)
# Establish a connection the with host running performance.
pm = PerformanceManager(perf_host, perf_user)
# Launch a new container and wait for it to be assigned an IP.
container_name, stdin, stdout, stderr = pm.launch_container()
pm.set_container_ip_by_name(container_name)
# SSH into the container, install keystone, and benchmark
try:
pm.install_container()
master_sha, osa_sha, master_results = pm.benchmark_master()
change_results = None
if change_ref:
change_results = pm.benchmark_change(change_ref)
except ContainerError as e:
print 'something bad happened...'
print 'cleaning up and trying again'
# FIXME(lbragstad): I'm not sure what the best way to
# handle this is without just infinite looping and
# hammering a system (maybe that is the best way?).
pm.delete_container_by_name(container_name)
continue
master_rps = get_requests_per_second(master_results)
for value in master_rps:
if 'token validation' in value:
master_validate_rps = value['token validation']
elif 'token creation' in value:
master_create_rps = value['token creation']
master_tpr = get_time_per_request(master_results)
for value in master_tpr:
if 'token validation' in value:
master_validate_tpr = value['token validation']
elif 'token creation' in value:
master_create_tpr = value['token creation']
timestamp = int(time.time())
date = datetime.datetime.fromtimestamp(timestamp).strftime(
'%Y%m%d%H%M%S'
)
results = dict(
sha=master_sha,
osa_sha=osa_sha,
timestamp=timestamp,
token_creation=dict(
requests_per_second=master_create_rps,
time_per_request=master_create_tpr
),
token_validation=dict(
requests_per_second=master_validate_rps,
time_per_request=master_validate_tpr
)
)
results_directory = os.path.join(
results_dir, master_sha, date
)
os.makedirs(results_directory)
summary_file = os.path.join(
results_directory,
'summary.json'
)
with open(summary_file, 'w') as f:
f.write(json.dumps(results))
results_file = os.path.join(
results_directory,
'benchmark-results.txt'
)
with open(results_file, 'w') as f:
f.write(master_results)
if change_results:
patch_rps = get_requests_per_second(change_results)
for value in patch_rps:
if 'token validation' in value:
patch_validate_rps = value['token validation']
elif 'token creation' in value:
patch_create_rps = value['token creation']
patch_tpr = get_time_per_request(change_results)
for value in patch_tpr:
if 'token validation' in value:
patch_validate_tpr = value['token validation']
elif 'token creation' in value:
patch_create_tpr = value['token creation']
# Leave a comment on the review.
msg = _COMMENT.format(
master_sha=master_sha,
master_create_rps=master_create_rps,
master_create_tpr=master_create_tpr,
master_validate_rps=master_validate_rps,
master_validate_tpr=master_validate_tpr,
patch_create_rps=patch_create_rps,
patch_create_tpr=patch_create_tpr,
patch_validate_rps=patch_validate_rps,
patch_validate_tpr=patch_validate_tpr
)
gerrit_auth = auth.HTTPDigestAuth(
gerrit_user, gerrit_password
)
gerrit_client = rest.GerritRestAPI(
'https://review.openstack.org/', auth=gerrit_auth
)
change_id = event['change']['id']
rev = event['patchSet']['revision']
review = rest.GerritReview(message=msg)
gerrit_client.review(change_id, rev, review)
print 'commented on %s' % event['change']['url']
# remove container
pm.delete_container_by_name(container_name)
os.remove(next_change_path)
print 'cleaned up container... ready to test next change'
publish.main(summary_file)
else:
time.sleep(1)
next_change_path = get_next_change_file()
except KeyboardInterrupt:
exit()
| |
# -*- coding: utf-8 -*-
###############################################################################
#
# InsertRecords
# Inserts records into your Zoho CRM account.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class InsertRecords(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the InsertRecords Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(InsertRecords, self).__init__(temboo_session, '/Library/Zoho/CRM/InsertRecords')
def new_input_set(self):
return InsertRecordsInputSet()
def _make_result_set(self, result, path):
return InsertRecordsResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return InsertRecordsChoreographyExecution(session, exec_id, path)
class InsertRecordsInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the InsertRecords
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AnnualRevenue(self, value):
"""
Set the value of the AnnualRevenue input for this Choreo. ((optional, string) Corresponds to the Annual Revenue field in Zoho)
"""
super(InsertRecordsInputSet, self)._set_input('AnnualRevenue', value)
def set_AuthenticationToken(self, value):
"""
Set the value of the AuthenticationToken input for this Choreo. ((required, string) A valid authentication token. Permanent authentication tokens can be generated by the GenerateAuthToken Choreo.)
"""
super(InsertRecordsInputSet, self)._set_input('AuthenticationToken', value)
def set_CampaignSource(self, value):
"""
Set the value of the CampaignSource input for this Choreo. ((optional, string) Corresponds to the Campaign Source field in Zoho)
"""
super(InsertRecordsInputSet, self)._set_input('CampaignSource', value)
def set_City(self, value):
"""
Set the value of the City input for this Choreo. ((optional, string) Corresponds to the City field in Zoho)
"""
super(InsertRecordsInputSet, self)._set_input('City', value)
def set_Company(self, value):
"""
Set the value of the Company input for this Choreo. ((optional, string) Corresponds to the Company field in Zoho)
"""
super(InsertRecordsInputSet, self)._set_input('Company', value)
def set_Country(self, value):
"""
Set the value of the Country input for this Choreo. ((optional, string) Corresponds to the Country field in Zoho)
"""
super(InsertRecordsInputSet, self)._set_input('Country', value)
def set_Description(self, value):
"""
Set the value of the Description input for this Choreo. ((optional, string) Corresponds to the Description field in Zoho)
"""
super(InsertRecordsInputSet, self)._set_input('Description', value)
def set_EmailOptOut(self, value):
"""
Set the value of the EmailOptOut input for this Choreo. ((optional, boolean) Corresponds to the Email Opt Out field in Zoho. Defaults to 0 for false.)
"""
super(InsertRecordsInputSet, self)._set_input('EmailOptOut', value)
def set_Email(self, value):
"""
Set the value of the Email input for this Choreo. ((optional, string) Corresponds to the Email field in Zoho)
"""
super(InsertRecordsInputSet, self)._set_input('Email', value)
def set_Fax(self, value):
"""
Set the value of the Fax input for this Choreo. ((optional, string) Corresponds to the Fax field in Zoho)
"""
super(InsertRecordsInputSet, self)._set_input('Fax', value)
def set_FirstName(self, value):
"""
Set the value of the FirstName input for this Choreo. ((optional, string) Corresponds to the First Name field in Zoho)
"""
super(InsertRecordsInputSet, self)._set_input('FirstName', value)
def set_Industry(self, value):
"""
Set the value of the Industry input for this Choreo. ((optional, string) Corresponds to the Industry field in Zoho)
"""
super(InsertRecordsInputSet, self)._set_input('Industry', value)
def set_LastName(self, value):
"""
Set the value of the LastName input for this Choreo. ((required, string) Corresponds to the Last Name field in Zoho)
"""
super(InsertRecordsInputSet, self)._set_input('LastName', value)
def set_LeadOwner(self, value):
"""
Set the value of the LeadOwner input for this Choreo. ((optional, string) Corresponds to the Lead Owner field in Zoho)
"""
super(InsertRecordsInputSet, self)._set_input('LeadOwner', value)
def set_LeadSource(self, value):
"""
Set the value of the LeadSource input for this Choreo. ((optional, string) Corresponds to the Lead Source field in Zoho)
"""
super(InsertRecordsInputSet, self)._set_input('LeadSource', value)
def set_LeadStatus(self, value):
"""
Set the value of the LeadStatus input for this Choreo. ((optional, string) Corresponds to the Lead Status field in Zoho)
"""
super(InsertRecordsInputSet, self)._set_input('LeadStatus', value)
def set_Mobile(self, value):
"""
Set the value of the Mobile input for this Choreo. ((optional, string) Corresponds to the Mobile field in Zoho)
"""
super(InsertRecordsInputSet, self)._set_input('Mobile', value)
def set_Module(self, value):
"""
Set the value of the Module input for this Choreo. ((optional, string) The Zoho module you want to access. Defaults to 'Leads'.)
"""
super(InsertRecordsInputSet, self)._set_input('Module', value)
def set_NumOfEmployees(self, value):
"""
Set the value of the NumOfEmployees input for this Choreo. ((optional, string) Corresponds to the Num Of Employees field in Zoho)
"""
super(InsertRecordsInputSet, self)._set_input('NumOfEmployees', value)
def set_Phone(self, value):
"""
Set the value of the Phone input for this Choreo. ((optional, string) Corresponds to the Phone field in Zoho)
"""
super(InsertRecordsInputSet, self)._set_input('Phone', value)
def set_Rating(self, value):
"""
Set the value of the Rating input for this Choreo. ((optional, string) Corresponds to the Rating field in Zoho)
"""
super(InsertRecordsInputSet, self)._set_input('Rating', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Valid formats are: json and xml (the default).)
"""
super(InsertRecordsInputSet, self)._set_input('ResponseFormat', value)
def set_Salutation(self, value):
"""
Set the value of the Salutation input for this Choreo. ((optional, string) Corresponds to the Salutation field in Zoho)
"""
super(InsertRecordsInputSet, self)._set_input('Salutation', value)
def set_SkypeID(self, value):
"""
Set the value of the SkypeID input for this Choreo. ((optional, string) Corresponds to the Skype ID field in Zoho)
"""
super(InsertRecordsInputSet, self)._set_input('SkypeID', value)
def set_State(self, value):
"""
Set the value of the State input for this Choreo. ((optional, string) Corresponds to the State field in Zoho)
"""
super(InsertRecordsInputSet, self)._set_input('State', value)
def set_Street(self, value):
"""
Set the value of the Street input for this Choreo. ((optional, string) Corresponds to the Street field in Zoho)
"""
super(InsertRecordsInputSet, self)._set_input('Street', value)
def set_Title(self, value):
"""
Set the value of the Title input for this Choreo. ((optional, string) Corresponds to the Title field in Zoho)
"""
super(InsertRecordsInputSet, self)._set_input('Title', value)
def set_Website(self, value):
"""
Set the value of the Website input for this Choreo. ((optional, string) Corresponds to the Website field in Zoho)
"""
super(InsertRecordsInputSet, self)._set_input('Website', value)
def set_ZipCode(self, value):
"""
Set the value of the ZipCode input for this Choreo. ((optional, integer) Corresponds to the Zip Code field in Zoho)
"""
super(InsertRecordsInputSet, self)._set_input('ZipCode', value)
class InsertRecordsResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the InsertRecords Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Zoho. Format corresponds to the ResponseFormat input. Defaults to xml.)
"""
return self._output.get('Response', None)
class InsertRecordsChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return InsertRecordsResultSet(response, path)
| |
#!/usr/bin/env python3
'''
Copyright 2016 Sofus Rose
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import sys, os, time
import multiprocessing as mp
import numpy as np
#Matplotlib is optional.
MOD_MATPLOTLIB = False
try:
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
MOD_MATPLOTLIB = True
except:
print("Matplotlib not installed. Graphs won't be drawn")
class Files :
"""
The Files object is an immutable sequence of files, which supports writing simultaneously to all the files.
"""
def __init__(self, *files) :
seq=[]
for f in files:
if isinstance(f, Files): seq += f.files
elif 'write' in dir(f): seq.append(f)
else: raise TypeError('Wrong Input Type: ' + repr(f))
self.files = tuple(seq) #Immutable tuple of file-like objects,
def write(self, inStr, exclInd=[]) :
"""
Writes inStr all file-like objects stored within the Files object. You may exclude certain entries with a sequence of indices.
"""
for f in enumerate(self.files) :
if f[0] in exclInd: continue
f[1].write(inStr)
def __add__(self, o, commut=False) :
"""
Implements merging with Files objects and appending file-like objects. Returns new Files object.
"""
if isinstance(o, Files) :
this, other = self.files, o.files
elif 'write' in dir(o) :
this, other = self.files, [o]
else :
return None
if commut: this, other = other, this
return Files(*this, *other) #this and other must be unpackable.
def __radd__(self, o) :
"""
Commutative addition.
"""
return self.__add__(o, commut=True) #Use the add operator. It's commutative!!
def __bool__(self) :
"""
False if empty.
"""
return bool(self.files)
def __getitem__(self, index) :
"""
Supports slicing and indexing.
"""
if isinstance(index, slice) :
return Files(self.files[index])
else :
return self.files[index]
def __len__(self) :
"""
Number of files in the Files object.
"""
return len(self.files)
def __repr__(self) :
return 'Files(' + ', '.join("'{}'".format(n.name) for n in self.files) + ')'
def __iter__(self) :
"""
Iterates through the file-like objects.
"""
return iter(self.files)
class ColLib :
"""
Simple hashmap to colors.. Make sure to activate colors in ~/.bashrc or enable ansi.sys!
"""
cols = { 'HEADER' : '\033[97m',
'OKBLUE' : '\033[94m',
'OKGREEN' : '\033[92m',
'WARNING' : '\033[93m',
'FAIL' : '\033[91m',
'CRIT' : '\033[31m',
'DEBUG' : '\033[35m',
'ENDC' : '\033[0m',
'BOLD' : '\033[1m',
'ITALIC' : '\033[3m',
'UNDERLINE' : '\033[4m'
}
debug = { 'info' : ('[INFO]', 'OKGREEN'),
'error' : ('[ERROR]', 'FAIL'),
'crit' : ('[CRIT]', 'CRIT'),
'warn' : ('[WARNING]', 'WARNING'),
'debug' : ('[DEBUG]', 'DEBUG'),
'run' : ('[RUN]', 'OKBLUE')
}
def colString(color, string) :
"""
Returns a colored string.
"""
return '{}{}{}'.format(cols[color], string, cols['ENDC'])
def dbgString(signal, string) :
"""
"""
return '{}{}{}'.format(debug[signal])
def printCol(color, colored, *output, **settings) :
"""
Simple print clone where the first printed parameter is colored.
"""
print(cols[color] + str(colored) + cols['ENDC'], *output, **settings)
def printDbg(signal, *output, **settings) :
"""
Pass in simple debug signals to print the corresponding entry.
"""
printCol(debug[signal][1], debug[signal][0] + ' ' + colored, *output, **settings)
class Log(ColLib) :
"""
Logging object, an instance of which is passed throughout afarm. **It has + changes state**, as the sole exception to the
'no globals' paradigm. You may pass in any file-like object, the only criteria being that it has a 'write' method. stdout is
used by default.
"""
def __init__(self, *file, verb=3, useCol=True, startTime=None) :
if not file: file = [sys.stdout]
if startTime is None: startTime = time.perf_counter()
self.verb = verb #From 0 to 3. 0: CRITICAL 1: ERRORS 2: WARNINGS 3: DEBUG. Info all.
self.file = Files(*file)
self.log = [] #Log list. Format: (verb, time in ms, debug_text, text)
self.sTimes = dict() #Dict of start times for various runs.
self._useCol = useCol #Whether or not to use colored output.
self._attrLock = mp.Lock() #The log access lock.
self._startTime = startTime #Global instance time. begins when the instance is created.
def getLogTime(self) :
"""
Gets the current logging time in seconds, from the time of instantiation of the Log object.
"""
return time.perf_counter() - self._startTime
def startTime(self, run) :
"""
Starts the timer for the specified run. Can use any immutable object to mark the run.
"""
self.sTimes[run] = self.getLogTime()
def getTime(self, run) :
"""
Gets the time since startTime for the specified run (an immutable object).
"""
if run in self.sTimes :
return self.getLogTime() - self.sTimes[run]
else :
raise ValueError('Run wasn\'t found!!')
@staticmethod
def bench(f, args=[], kwargs={}, trials=15, graph=False) :
def t(): l = Log(); l.startTime(0); f(*args, **kwargs); return l.getTime(0)
data = np.array([t() for i in range(trials)])
anyl = { 'mean' : np.mean(data),
'median' : np.median(data),
'std_dev' : np.std(data),
'vari' : np.std(data) ** 2,
'total' : sum(data)
}
if graph: Log.graphBench(anyl)
return anyl
@staticmethod
def graphBench(anyl) :
if MOD_MATPLOTLIB :
fig = plt.figure()
x = np.linspace(-3 * anyl['std_dev'] + anyl['mean'], 3 * anyl['std_dev'] + anyl['mean'], 100)
plt.plot(x, mlab.normpdf(x, anyl['mean'], anyl['std_dev']))
plt.axvline(x = anyl['mean'], color='red', linestyle = "--")
plt.text( anyl['mean'] - 0.2 * anyl['std_dev'], 0, 'mean',
horizontalalignment = 'left', verticalalignment='bottom',
rotation = 90, fontsize=10, fontstyle='italic'
)
plt.xlabel('Time (Seconds)', fontsize=15)
plt.ylabel('Distribution', fontsize=11)
plt.show()
def compItem(self, state, time, *text) :
"""
Returns a displayable log item as a string, formatted with or without color.
"""
decor = { 'info' : '',
'error' : '',
'crit' : ColLib.cols['BOLD'],
'warn' : '',
'debug' : ColLib.cols['BOLD'],
'run' : ColLib.cols['BOLD']
}[state]
timeCol = { 'info' : ColLib.cols['HEADER'],
'error' : ColLib.cols['WARNING'],
'crit' : ColLib.cols['FAIL'],
'warn' : ColLib.cols['HEADER'],
'debug' : ColLib.cols['DEBUG'] + ColLib.cols['BOLD'],
'run' : ColLib.cols['OKGREEN']
}[state]
if self._useCol :
return '{3}{5}{0}{4[ENDC]}\t{6}{1:.10f}{4[ENDC]}: {2}'.format( ColLib.debug[state][0],
time,
''.join(text),
ColLib.cols[ColLib.debug[state][1]],
ColLib.cols,
decor,
timeCol
)
else :
return '{0} {1:.10f}: {2}'.format(ColLib.debug[state][0], time, ''.join(text))
def write(self, *text, verb=2, state='info') :
"""
Adds an entry to the log file, as well as to the internal structure.
Possible state values:
*'info': To give information.
*'error': When things go wrong.
*'crit': When things go very wrong.
*'warn': To let the user know that something weird is up.
*'run': To report on an intensive process.
*'debug': For debugging purposes. Keep it at verbosity 3.
Possible verbosity values, and suggested usage:
*0: User-oriented, general info about important happenings.
*1: Helpful info about what is running/happening, even to the user.
*2: Deeper info about the programs functionality, for fixing problems.
*3: Developer oriented debugging.
"""
text = [str(t).strip() for t in text if str(t).strip()]
if not text: return #Empty write's are no good.
curTime = self.getLogTime()
with self._attrLock :
self.log.append( { 'verb' : verb,
'time' : curTime,
'state' : state,
'text' : ' '.join(str(t) for t in text)
}
)
if self.verb >= verb :
with self._attrLock :
print(self.compItem(state, curTime, ' '.join(text)), file=self.file)
def read(self, verb=None) :
"""
Reads the internal logging data structure, optionally overriding verbosity.
"""
if not verb: verb = self.verb
with self._attrLock :
return '\n'.join([self.compItem(l['state'], l['time'], l['text']) for l in self.log if verb >= l['verb']])
def reset(self, startTime=None) :
return Log(self.file, verb=self.verb, useCol=self._useCol, startTime=startTime)
def getFiles(self) :
"""
Get the list of files to dump Log output to.
"""
return self.file
def setFiles(self, *files) :
"""
Set a new list of files to dump Log output to.
"""
with self._attrLock :
self.file = Files(*files)
def addFiles(self, *files) :
"""
Add a list of files to dump Log output to.
"""
with self._attrLock :
self.file += Files(*files)
def setVerb(self, newVerb) :
"""
Call to set verbosity.
"""
with self._attrLock :
self.verb = newVerb
self.write('Verbosity set to', str(newVerb) + '.', verb=0, state='info')
def setCol(self, newUseCol) :
"""
Call to change color output.
"""
with self._attrLock :
self._useCol = newUseCol
self.write('Color Output set to', self._useCol, verb=0, state='info')
def __call__(self, verb, state, *text) :
"""
Identical to Log.write(), except it requires the verbosity level to be specified.
"""
self.write(verb=verb, state=state, *text)
def __repr__(self) :
return ( 'Log(' +
(', '.join("'{}'".format(f.name) for f in self.file.files) + ', ' if self.file else '') +
'verb={}, useCol={}, startTime={:.3f})'.format(self.verb, self._useCol, self._startTime)
)
def __str__(self) :
return self.read()
def __add__(self, o, commut=False) :
"""
Merges a Log object with another Log object, a Files object, or a file-like object.
*For Log object addition, the minimum startTime attibute is used to initialize the merged startTime.
*The Files objects of both Log objects are merged.
"""
if isinstance(o, Log) :
l = self.reset(min(self.getLogTime(), o.getLogTime())) #Max of self and other time.
l.log = self.log + o.log
l.log.sort(key=lambda item: item['time']) #Make sure to sort the internal log by time.
l.addFiles(o.file)
return l
elif isinstance(o, Files) :
l = self.reset(self.getLogTime())
l.log = self.log
l.setFiles(self.getFiles(), *o.files)
return l
elif 'write' in dir(o) :
l = self.reset(self.getLogTime())
l.file = o + self.file if commut else self.file + o
return l
else :
return None
def __radd__(self, o) :
return self.__add__(o, commut=True) #Use the add operator. It's commutative!!
def __bool__(self) :
"""
False if log is empty.
"""
return bool(self.log)
def __getitem__(self, i) :
"""
Supports slicing and indexing, from recent (0) to oldest (end).
"""
if isinstance(i, slice) :
l = self.reset(self.getLogTime())
for ind, itm in enumerate(self.log) :
if ind in list(range(i.start if i.start else 0, i.stop, i.step if i.step else 1)): l.log.append(itm)
return list(l)
else :
l = self.log[::-1][i]
return self.compItem(l['state'], l['time'], l['text'], noCol = not self._useCol)
def __len__(self) :
"""
Amount of items in the log.
"""
return len(self.log)
def __iter__(self) :
"""
Iterator never colors output.
"""
return iter(self.compItem(l['state'], l['time'], l['text'], noCol = True) for l in self.log)
class LogFile() :
"""
Similar to a normal file, except it splits into several files. On the frontend, however, it acts as if it were a single file.
*Writes to 'path'.log.
*When maxLen is exceeded, lines are pushed into 'path'.0.log, then 'path'.1.log, etc. .
"""
def __init__(self, path, maxLen=1000, trunc=False) : #Make maxLen 1000 later.
"""
Constructor accepts a path (extension will be rewritten to '.log'), a maximum length, and will optionally truncate
any previous logfiles.
"""
self.path = os.path.splitext(path)[0] + '.log'
self.bPath = os.path.splitext(self.path)[0]
self.maxLen = maxLen
self.name = '<{0}.log, {0}.0...n.log>'.format(self.bPath)
self.lines = 0
self.fileNum = 0
#If the logfile already exists, it's read + rewritten using the current maxLen.
if os.path.exists(self.path) :
if trunc: self.truncate(); return
inLines = open(self.path, 'r').readlines()[::-1]
os.remove(os.path.abspath(self.path)) #Remove the old path.log.
i = 0
while os.path.exists('{0}.{1}.log'.format(self.bPath, i)) :
inLines += open('{0}.{1}.log'.format(self.bPath, i), 'r').readlines()[::-1]
os.remove(os.path.abspath('{0}.{1}.log'.format(self.bPath, i)))
i += 1
self.write(''.join(reversed(inLines)))
def write(self, *inStr) :
apnd = list(filter(bool, ''.join(inStr).strip().split('\n')))
if not apnd: return #Nothing to append = don't even try!
if not os.path.exists(self.path): open(self.path, 'w').close() #Make sure path.log exists.
#Empty apnd line by line.
while len(apnd) > 0 :
toWrite = self.maxLen * (self.fileNum + 1) - self.lines #Lines needed to fill up path.log
if toWrite == 0 : #Time to make new files.
#Rename upwards. path.n.log -> path.(n+1).log, etc. . path.log becomes path.0.log.
for i in reversed(range(self.fileNum)) :
os.rename('{0}.{1}.log'.format(self.bPath, i), '{0}.{1}.log'.format(self.bPath, i+1))
os.rename('{0}.log'.format(self.bPath), '{0}.0.log'.format(self.bPath))
#Make new path.log.
open(self.path, 'w').close() #Just create the file.
#Number of files just increased.
self.fileNum += 1
else : #Fill up path.log.
print(apnd.pop(0), file=open(self.path, 'a'))
#Number of written lines just increasd.
self.lines += 1
def read(self) :
collec = []
for i in reversed(range(self.fileNum)) :
collec += open('{0}.{1}.log'.format(self.bPath, i), 'r').readlines()
collec += open(self.path, 'r').readlines()
return ''.join(collec)
def truncate(self) :
"""
Deletes all associated files + resets the instance.
"""
i = 0
os.remove(os.path.abspath(self.path)) #Remove the old path.log.
while os.path.exists('{0}.{1}.log'.format(self.bPath, i)) : #Remove all log files.
os.remove(os.path.abspath('{0}.{1}.log'.format(self.bPath, i)))
i += 1
self.lines = 0
self.fileNum = 0
def readlines(self) :
return self.read().split('\n')
def isatty(self) :
"""
Always returns false, as a LogFile is never associated with a tty.
"""
return False
def __iter__(self) :
return (line for line in self.readlines())
def __repr__(self) :
return 'LogFile({0}, maxLen={1})'.format(self.path, self.maxLen)
def coolTest() :
l = Log(Files(LogFile('first', 10, True), LogFile('second', 20, True)), LogFile('third', 30, True), sys.stdout)
l(0, 'info', 'Big Failure Oh NO!')
l(0, 'error', 'Big Failure Oh NO!')
l(0, 'crit', 'Big Failure Oh NO!')
l(0, 'warn', 'Big Failure Oh NO!')
l(0, 'debug', 'Big Failure Oh NO!')
l(0, 'run', 'Big Failure Oh NO!')
print('We got ourselves a log file here kids.')
for i in range(50) :
l(0, 'run', 'This is the', i, 'run today!')
print(l.getLogTime())
def logFileTest() :
l = LogFile('hi.log', maxLen = 3, trunc=True)
print('\n', repr(l), sep='')
print('hi', 'world', file=l, sep='\n')
print('\n', repr(l), sep='')
print('you', 'are', 'cool', 'friend', 'hi', file=l, sep='\n')
print('hello\nmotherfucker\nits\narnold\nyour\nold\nfriend\nrunbaby!!\nlittlebitch :)', file=l)
print('\n', repr(l), sep='')
print('Reading Log Files:', repr(l.read()))
print(l.name, '\n\n\n')
print(l.read().split('\n'))
print(len([x for x in l.read().split('\n') if x]))
l = LogFile('hi.log', 10)
print(l.read().split('\n'))
print(len([x for x in l.read().split('\n') if x]))
l = Log(LogFile('hi', 100, False))
print(repr(l))
for x in range(10) :
l(0, x)
#~ l.setCol(False)
l(0, 'info', 'Big Failure Oh NO!')
l(0, 'error', 'Big Failure Oh NO!')
l(0, 'crit', 'Big Failure Oh NO!')
l(0, 'warn', 'Big Failure Oh NO!')
l(0, 'debug', 'Big Failure Oh NO!')
l(0, 'run', 'Big Failure Oh NO!')
l = LogFile('hi', 500, False)
print('hihi', 'you', 'can\'t', 'beat', 'the', 'trunc', file=l, sep='\n')
def logTest() :
l = Log(open('hi.txt', 'w'))
#~ l.setCol(False)
print(repr(l))
print('1', file=l)
l(1, '2')
a = l.reset()
#~ l.setCol(True)
l.addFiles(sys.stderr)
print('\n', repr(l), sep='')
print('\n', repr(a), sep='')
a(2, '3')
a.write('4')
a.addFiles(sys.stdout)
print('\n a + l ', repr(a + l), sep='')
print(a + l)
print('\n', repr(a + sys.stderr), sep='')
print('\n', repr(sys.stderr + a), sep='')
print('\n', (l + a), sep='')
print('\n', (l + a)[0:3], sep='')
for item in (l + a) :
print(item)
print("\nLength of l + a: ", len(l + a))
print("\nl + a: ", repr(Log() + Files(sys.stdout)))
print('\n', l.read(verb=0), sep='')
if __name__ == "__main__" :
#~ unitTest()
#~ logFileTest()
coolTest()
| |
#!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mapreduce execution context.
Mapreduce context provides handler code with information about
current mapreduce execution and organizes utility data flow
from handlers such as counters, log messages, mutation pools.
"""
__all__ = [
"get",
"Context",
"Counters",
"EntityList",
"ItemList",
"MutationPool",
"COUNTER_MAPPER_CALLS",
"COUNTER_MAPPER_WALLTIME_MS",
"DATASTORE_DEADLINE",
"MAX_ENTITY_COUNT",
"MAX_POOL_SIZE",
]
import threading
from google.appengine.api import datastore
from google.appengine.ext import db
# Maximum pool size in bytes. Pool will be flushed when reaches this amount.
# We use 950,000 bytes which is slightly less than maximum allowed RPC size of
# 1M to have some space cushion.
MAX_POOL_SIZE = 900 * 1000
# Maximum number of items. Pool will be flushed when reaches this amount.
MAX_ENTITY_COUNT = 500
# Deadline in seconds for mutation pool datastore operations.
DATASTORE_DEADLINE = 15
# The name of the counter which counts all mapper calls.
COUNTER_MAPPER_CALLS = "mapper-calls"
# Total walltime in msec given to mapper process. This is not just mapper
# hundler function, but includes all i/o overhead.
COUNTER_MAPPER_WALLTIME_MS = "mapper-walltime-ms"
def _normalize_entity(value):
"""Return an entity from an entity or model instance."""
# TODO(user): Consider using datastore.NormalizeAndTypeCheck.
if getattr(value, "_populate_internal_entity", None):
return value._populate_internal_entity()
return value
def _normalize_key(value):
"""Return a key from an entity, model instance, key, or key string."""
if getattr(value, "key", None):
return value.key()
elif isinstance(value, basestring):
return datastore.Key(value)
else:
return value
class ItemList(object):
"""Holds list of arbitrary items, and their total size.
Properties:
items: list of objects.
length: length of item list.
size: aggregate item size in bytes.
"""
def __init__(self):
"""Constructor."""
self.items = []
self.length = 0
self.size = 0
def append(self, item, item_size):
"""Add new item to the list.
Args:
item: an item to add to the list.
item_size: item size in bytes as int.
"""
self.items.append(item)
self.length += 1
self.size += item_size
def clear(self):
"""Clear item list."""
self.items = []
self.length = 0
self.size = 0
@property
def entities(self):
"""Return items. For backwards compatability."""
return self.items
# For backwards compatability.
EntityList = ItemList
# TODO(user): mutation pool has no error handling at all. Add some.
class MutationPool(object):
"""Mutation pool accumulates datastore changes to perform them in batch.
Properties:
puts: ItemList of entities to put to datastore.
deletes: ItemList of keys to delete from datastore.
max_pool_size: maximum single list pool size. List changes will be flushed
when this size is reached.
"""
def __init__(self,
max_pool_size=MAX_POOL_SIZE,
max_entity_count=MAX_ENTITY_COUNT,
mapreduce_spec=None):
"""Constructor.
Args:
max_pool_size: maximum pools size in bytes before flushing it to db.
max_entity_count: maximum number of entities before flushing it to db.
mapreduce_spec: An optional instance of MapperSpec.
"""
self.max_pool_size = max_pool_size
self.max_entity_count = max_entity_count
params = mapreduce_spec.params if mapreduce_spec is not None else {}
self.force_writes = bool(params.get("force_ops_writes", False))
self.puts = ItemList()
self.deletes = ItemList()
def put(self, entity):
"""Registers entity to put to datastore.
Args:
entity: an entity or model instance to put.
"""
actual_entity = _normalize_entity(entity)
entity_size = len(actual_entity._ToPb().Encode())
if (self.puts.length >= self.max_entity_count or
(self.puts.size + entity_size) > self.max_pool_size):
self.__flush_puts()
self.puts.append(actual_entity, entity_size)
def delete(self, entity):
"""Registers entity to delete from datastore.
Args:
entity: an entity, model instance, or key to delete.
"""
# This is not very nice: we're calling two protected methods here...
key = _normalize_key(entity)
key_size = len(key._ToPb().Encode())
if (self.deletes.length >= self.max_entity_count or
(self.deletes.size + key_size) > self.max_pool_size):
self.__flush_deletes()
self.deletes.append(key, key_size)
# TODO(user): some kind of error handling/retries is needed here.
def flush(self):
"""Flush(apply) all changed to datastore."""
self.__flush_puts()
self.__flush_deletes()
def __flush_puts(self):
"""Flush all puts to datastore."""
if self.puts.length:
datastore.Put(self.puts.items, config=self.__create_config())
self.puts.clear()
def __flush_deletes(self):
"""Flush all deletes to datastore."""
if self.deletes.length:
datastore.Delete(self.deletes.items, config=self.__create_config())
self.deletes.clear()
def __create_config(self):
"""Creates datastore Config.
Returns:
A datastore_rpc.Configuration instance.
"""
return datastore.CreateConfig(deadline=DATASTORE_DEADLINE,
force_writes=self.force_writes)
# This doesn't do much yet. In future it will play nicely with checkpoint/error
# handling system.
class Counters(object):
"""Regulates access to counters."""
def __init__(self, shard_state):
"""Constructor.
Args:
shard_state: current mapreduce shard state as model.ShardState.
"""
self._shard_state = shard_state
def increment(self, counter_name, delta=1):
"""Increment counter value.
Args:
counter_name: name of the counter as string.
delta: increment delta as int.
"""
self._shard_state.counters_map.increment(counter_name, delta)
def flush(self):
"""Flush unsaved counter values."""
pass
class Context(object):
"""MapReduce execution context.
Properties:
mapreduce_spec: current mapreduce specification as model.MapreduceSpec.
shard_state: current shard state as model.ShardState.
mutation_pool: current mutation pool as MutationPool.
counters: counters object as Counters.
"""
# Current context instance
_local = threading.local()
def __init__(self, mapreduce_spec, shard_state, task_retry_count=0):
"""Constructor.
Args:
mapreduce_spec: mapreduce specification as model.MapreduceSpec.
shard_state: shard state as model.ShardState.
"""
self.mapreduce_spec = mapreduce_spec
self.shard_state = shard_state
self.task_retry_count = task_retry_count
if self.mapreduce_spec:
self.mapreduce_id = self.mapreduce_spec.mapreduce_id
else:
# Only in tests
self.mapreduce_id = None
if self.shard_state:
self.shard_id = self.shard_state.get_shard_id()
else:
# Only in tests
self.shard_id = None
self.mutation_pool = MutationPool(
max_pool_size=(MAX_POOL_SIZE/(2**self.task_retry_count)),
max_entity_count=(MAX_ENTITY_COUNT/(2**self.task_retry_count)),
mapreduce_spec=mapreduce_spec)
self.counters = Counters(shard_state)
self._pools = {}
self.register_pool("mutation_pool", self.mutation_pool)
self.register_pool("counters", self.counters)
def flush(self):
"""Flush all information recorded in context."""
for pool in self._pools.values():
pool.flush()
# TODO(user): Add convenience method for mapper params.
# TODO(user): Add fatal error logging method here. Will log the message
# and set the shard state to failure result status, which the controller
# callback should pick up and force all shards to terminate.
def register_pool(self, key, pool):
"""Register an arbitrary pool to be flushed together with this context.
Args:
key: pool key as string.
pool: a pool instance. Pool should implement flush(self) method.
"""
self._pools[key] = pool
def get_pool(self, key):
"""Obtains an instance of registered pool.
Args:
key: pool key as string.
Returns:
an instance of the pool registered earlier, or None.
"""
return self._pools.get(key, None)
@classmethod
def _set(cls, context):
"""Set current context instance.
Args:
context: new context as Context or None.
"""
cls._local._context_instance = context
def get():
"""Get current context instance.
Returns:
current context as Context.
"""
if not hasattr(Context._local, '_context_instance') :
return None
return Context._local._context_instance
| |
#
# @file TestInternalConsistencyChecks.py
# @brief Tests the internal consistency validation.
#
# @author Akiya Jouraku (Python conversion)
# @author Sarah Keating
#
# ====== WARNING ===== WARNING ===== WARNING ===== WARNING ===== WARNING ======
#
# DO NOT EDIT THIS FILE.
#
# This file was generated automatically by converting the file located at
# src/sbml/test/TestInternalConsistencyChecks.cpp
# using the conversion program dev/utilities/translateTests/translateTests.pl.
# Any changes made here will be lost the next time the file is regenerated.
#
# -----------------------------------------------------------------------------
# This file is part of libSBML. Please visit http://sbml.org for more
# information about SBML, and the latest version of libSBML.
#
# Copyright 2005-2010 California Institute of Technology.
# Copyright 2002-2005 California Institute of Technology and
# Japan Science and Technology Corporation.
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation. A copy of the license agreement is provided
# in the file named "LICENSE.txt" included with this software distribution
# and also available online as http://sbml.org/software/libsbml/license.html
# -----------------------------------------------------------------------------
import sys
import unittest
import libsbml
class TestInternalConsistencyChecks(unittest.TestCase):
def test_internal_consistency_check_20306(self):
d = libsbml.SBMLDocument(3,1)
m = d.createModel()
fd = m.createFunctionDefinition()
fd.setId("fd")
errors = d.checkInternalConsistency()
self.assert_( errors == 1 )
self.assert_( d.getError(0).getErrorId() == 20306 )
ast = libsbml.parseFormula("lambda(x, 2*x)")
fd.setMath(ast)
errors = d.checkInternalConsistency()
self.assert_( errors == 0 )
d = None
pass
def test_internal_consistency_check_20307(self):
d = libsbml.SBMLDocument(3,1)
m = d.createModel()
fd = m.createFunctionDefinition()
ast = libsbml.parseFormula("lambda(x, 2*x)")
fd.setMath(ast)
errors = d.checkInternalConsistency()
self.assert_( errors == 1 )
self.assert_( d.getError(0).getErrorId() == 20307 )
fd.setId("fd")
d.getErrorLog().clearLog()
errors = d.checkInternalConsistency()
self.assert_( errors == 0 )
d = None
pass
def test_internal_consistency_check_20419(self):
d = libsbml.SBMLDocument(3,1)
m = d.createModel()
ud = m.createUnitDefinition()
errors = d.checkInternalConsistency()
self.assert_( errors == 1 )
self.assert_( d.getError(0).getErrorId() == 20419 )
ud.setId("ud")
d.getErrorLog().clearLog()
errors = d.checkInternalConsistency()
self.assert_( errors == 0 )
d = None
pass
def test_internal_consistency_check_20421(self):
d = libsbml.SBMLDocument(3,1)
m = d.createModel()
ud = m.createUnitDefinition()
ud.setId("ud")
u = ud.createUnit()
errors = d.checkInternalConsistency()
self.assert_( errors == 4 )
self.assert_( d.getError(0).getErrorId() == 20421 )
self.assert_( d.getError(1).getErrorId() == 20421 )
self.assert_( d.getError(2).getErrorId() == 20421 )
self.assert_( d.getError(3).getErrorId() == 20421 )
u.setKind(libsbml.UNIT_KIND_MOLE)
d.getErrorLog().clearLog()
errors = d.checkInternalConsistency()
self.assert_( errors == 3 )
self.assert_( d.getError(0).getErrorId() == 20421 )
self.assert_( d.getError(1).getErrorId() == 20421 )
self.assert_( d.getError(2).getErrorId() == 20421 )
u.setExponent(1.0)
d.getErrorLog().clearLog()
errors = d.checkInternalConsistency()
self.assert_( errors == 2 )
self.assert_( d.getError(0).getErrorId() == 20421 )
self.assert_( d.getError(1).getErrorId() == 20421 )
u.setScale(0)
d.getErrorLog().clearLog()
errors = d.checkInternalConsistency()
self.assert_( errors == 1 )
self.assert_( d.getError(0).getErrorId() == 20421 )
u.setMultiplier(1.0)
d.getErrorLog().clearLog()
errors = d.checkInternalConsistency()
self.assert_( errors == 0 )
d = None
pass
def test_internal_consistency_check_20517(self):
d = libsbml.SBMLDocument(3,1)
m = d.createModel()
c = m.createCompartment()
errors = d.checkInternalConsistency()
self.assert_( errors == 2 )
self.assert_( d.getError(0).getErrorId() == 20517 )
self.assert_( d.getError(1).getErrorId() == 20517 )
c.setId("c")
d.getErrorLog().clearLog()
errors = d.checkInternalConsistency()
self.assert_( errors == 1 )
self.assert_( d.getError(0).getErrorId() == 20517 )
c.setConstant(True)
d.getErrorLog().clearLog()
errors = d.checkInternalConsistency()
self.assert_( errors == 0 )
d = None
pass
def test_internal_consistency_check_20623(self):
d = libsbml.SBMLDocument(3,1)
m = d.createModel()
c = m.createCompartment()
c.setId("c")
c.setConstant(True)
s = m.createSpecies()
errors = d.checkInternalConsistency()
self.assert_( errors == 5 )
self.assert_( d.getError(0).getErrorId() == 20623 )
self.assert_( d.getError(1).getErrorId() == 20614 )
self.assert_( d.getError(2).getErrorId() == 20623 )
self.assert_( d.getError(3).getErrorId() == 20623 )
self.assert_( d.getError(4).getErrorId() == 20623 )
s.setId("s")
d.getErrorLog().clearLog()
errors = d.checkInternalConsistency()
self.assert_( errors == 4 )
self.assert_( d.getError(0).getErrorId() == 20614 )
self.assert_( d.getError(1).getErrorId() == 20623 )
self.assert_( d.getError(2).getErrorId() == 20623 )
self.assert_( d.getError(3).getErrorId() == 20623 )
s.setCompartment("c")
d.getErrorLog().clearLog()
errors = d.checkInternalConsistency()
self.assert_( errors == 3 )
self.assert_( d.getError(0).getErrorId() == 20623 )
self.assert_( d.getError(1).getErrorId() == 20623 )
self.assert_( d.getError(2).getErrorId() == 20623 )
s.setHasOnlySubstanceUnits(True)
d.getErrorLog().clearLog()
errors = d.checkInternalConsistency()
self.assert_( errors == 2 )
self.assert_( d.getError(0).getErrorId() == 20623 )
self.assert_( d.getError(1).getErrorId() == 20623 )
s.setBoundaryCondition(True)
d.getErrorLog().clearLog()
errors = d.checkInternalConsistency()
self.assert_( errors == 1 )
self.assert_( d.getError(0).getErrorId() == 20623 )
s.setConstant(True)
d.getErrorLog().clearLog()
errors = d.checkInternalConsistency()
self.assert_( errors == 0 )
d = None
pass
def test_internal_consistency_check_20706(self):
d = libsbml.SBMLDocument(3,1)
m = d.createModel()
p = m.createParameter()
errors = d.checkInternalConsistency()
self.assert_( errors == 2 )
self.assert_( d.getError(0).getErrorId() == 20706 )
self.assert_( d.getError(1).getErrorId() == 20706 )
p.setId("c")
d.getErrorLog().clearLog()
errors = d.checkInternalConsistency()
self.assert_( errors == 1 )
self.assert_( d.getError(0).getErrorId() == 20706 )
p.setConstant(True)
d.getErrorLog().clearLog()
errors = d.checkInternalConsistency()
self.assert_( errors == 0 )
d = None
pass
def test_internal_consistency_check_20804(self):
d = libsbml.SBMLDocument(3,1)
m = d.createModel()
ia = m.createInitialAssignment()
ia.setSymbol("fd")
errors = d.checkInternalConsistency()
self.assert_( errors == 1 )
self.assert_( d.getError(0).getErrorId() == 20804 )
ast = libsbml.parseFormula("lambda(x, 2*x)")
ia.setMath(ast)
d.getErrorLog().clearLog()
errors = d.checkInternalConsistency()
self.assert_( errors == 0 )
d = None
pass
def test_internal_consistency_check_20805(self):
d = libsbml.SBMLDocument(3,1)
m = d.createModel()
ia = m.createInitialAssignment()
ast = libsbml.parseFormula("lambda(x, 2*x)")
ia.setMath(ast)
errors = d.checkInternalConsistency()
self.assert_( errors == 1 )
self.assert_( d.getError(0).getErrorId() == 20805 )
ia.setSymbol("fd")
d.getErrorLog().clearLog()
errors = d.checkInternalConsistency()
self.assert_( errors == 0 )
d = None
pass
def test_internal_consistency_check_20907_alg(self):
d = libsbml.SBMLDocument(3,1)
m = d.createModel()
r = m.createAlgebraicRule()
errors = d.checkInternalConsistency()
self.assert_( errors == 1 )
self.assert_( d.getError(0).getErrorId() == 20907 )
ast = libsbml.parseFormula("lambda(x, 2*x)")
r.setMath(ast)
d.getErrorLog().clearLog()
errors = d.checkInternalConsistency()
self.assert_( errors == 0 )
d = None
pass
def test_internal_consistency_check_20907_assign(self):
d = libsbml.SBMLDocument(3,1)
m = d.createModel()
r = m.createAssignmentRule()
r.setVariable("fd")
errors = d.checkInternalConsistency()
self.assert_( errors == 1 )
self.assert_( d.getError(0).getErrorId() == 20907 )
ast = libsbml.parseFormula("lambda(x, 2*x)")
r.setMath(ast)
d.getErrorLog().clearLog()
errors = d.checkInternalConsistency()
self.assert_( errors == 0 )
d = None
pass
def test_internal_consistency_check_20907_rate(self):
d = libsbml.SBMLDocument(3,1)
m = d.createModel()
r = m.createRateRule()
r.setVariable("fd")
errors = d.checkInternalConsistency()
self.assert_( errors == 1 )
self.assert_( d.getError(0).getErrorId() == 20907 )
ast = libsbml.parseFormula("lambda(x, 2*x)")
r.setMath(ast)
d.getErrorLog().clearLog()
errors = d.checkInternalConsistency()
self.assert_( errors == 0 )
d = None
pass
def test_internal_consistency_check_20908(self):
d = libsbml.SBMLDocument(3,1)
m = d.createModel()
r = m.createAssignmentRule()
ast = libsbml.parseFormula("lambda(x, 2*x)")
r.setMath(ast)
errors = d.checkInternalConsistency()
self.assert_( errors == 1 )
self.assert_( d.getError(0).getErrorId() == 20908 )
r.setVariable("fd")
d.getErrorLog().clearLog()
errors = d.checkInternalConsistency()
self.assert_( errors == 0 )
d = None
pass
def test_internal_consistency_check_20909(self):
d = libsbml.SBMLDocument(3,1)
m = d.createModel()
r = m.createRateRule()
ast = libsbml.parseFormula("lambda(x, 2*x)")
r.setMath(ast)
errors = d.checkInternalConsistency()
self.assert_( errors == 1 )
self.assert_( d.getError(0).getErrorId() == 20909 )
r.setVariable("fd")
d.getErrorLog().clearLog()
errors = d.checkInternalConsistency()
self.assert_( errors == 0 )
d = None
pass
def test_internal_consistency_check_21007(self):
d = libsbml.SBMLDocument(3,1)
m = d.createModel()
r = m.createConstraint()
errors = d.checkInternalConsistency()
self.assert_( errors == 1 )
self.assert_( d.getError(0).getErrorId() == 21007 )
ast = libsbml.parseFormula("lambda(x, 2*x)")
r.setMath(ast)
d.getErrorLog().clearLog()
errors = d.checkInternalConsistency()
self.assert_( errors == 0 )
d = None
pass
def test_internal_consistency_check_21101(self):
d = libsbml.SBMLDocument(3,1)
m = d.createModel()
r = m.createReaction()
r.setId("r")
r.setReversible(True)
r.setFast(False)
errors = d.checkInternalConsistency()
self.assert_( errors == 1 )
self.assert_( d.getError(0).getErrorId() == 21101 )
sr = r.createReactant()
sr.setSpecies("s")
sr.setConstant(True)
d.getErrorLog().clearLog()
errors = d.checkInternalConsistency()
self.assert_( errors == 0 )
d = None
pass
def test_internal_consistency_check_21110(self):
d = libsbml.SBMLDocument(3,1)
m = d.createModel()
r = m.createReaction()
sr = r.createProduct()
sr.setSpecies("s")
sr.setConstant(True)
errors = d.checkInternalConsistency()
self.assert_( errors == 3 )
self.assert_( d.getError(0).getErrorId() == 21110 )
self.assert_( d.getError(1).getErrorId() == 21110 )
self.assert_( d.getError(2).getErrorId() == 21110 )
r.setId("r")
d.getErrorLog().clearLog()
errors = d.checkInternalConsistency()
self.assert_( errors == 2 )
self.assert_( d.getError(0).getErrorId() == 21110 )
self.assert_( d.getError(1).getErrorId() == 21110 )
r.setReversible(True)
d.getErrorLog().clearLog()
errors = d.checkInternalConsistency()
self.assert_( errors == 1 )
self.assert_( d.getError(0).getErrorId() == 21110 )
r.setFast(False)
d.getErrorLog().clearLog()
errors = d.checkInternalConsistency()
self.assert_( errors == 0 )
d = None
pass
def test_internal_consistency_check_21116(self):
d = libsbml.SBMLDocument(3,1)
m = d.createModel()
r = m.createReaction()
r.setId("r")
r.setReversible(True)
r.setFast(False)
sr = r.createReactant()
errors = d.checkInternalConsistency()
self.assert_( errors == 2 )
self.assert_( d.getError(0).getErrorId() == 21116 )
self.assert_( d.getError(1).getErrorId() == 21116 )
sr.setSpecies("s")
d.getErrorLog().clearLog()
errors = d.checkInternalConsistency()
self.assert_( errors == 1 )
self.assert_( d.getError(0).getErrorId() == 21116 )
sr.setConstant(True)
d.getErrorLog().clearLog()
errors = d.checkInternalConsistency()
self.assert_( errors == 0 )
d = None
pass
def test_internal_consistency_check_21117(self):
d = libsbml.SBMLDocument(3,1)
m = d.createModel()
r = m.createReaction()
r.setId("r")
r.setReversible(True)
r.setFast(False)
sr = r.createReactant()
sr.setSpecies("s")
sr.setConstant(True)
msr = r.createModifier()
d.getErrorLog().clearLog()
errors = d.checkInternalConsistency()
self.assert_( errors == 1 )
self.assert_( d.getError(0).getErrorId() == 21117 )
msr.setSpecies("s")
d.getErrorLog().clearLog()
errors = d.checkInternalConsistency()
self.assert_( errors == 0 )
d = None
pass
def test_internal_consistency_check_21130(self):
d = libsbml.SBMLDocument(3,1)
m = d.createModel()
r = m.createReaction()
r.setId("r")
r.setReversible(True)
r.setFast(False)
sr = r.createReactant()
sr.setSpecies("s")
sr.setConstant(True)
kl = r.createKineticLaw()
lp = kl.createLocalParameter()
lp.setId("s")
errors = d.checkInternalConsistency()
self.assert_( errors == 1 )
self.assert_( d.getError(0).getErrorId() == 21130 )
ast = libsbml.parseFormula("2*x")
kl.setMath(ast)
d.getErrorLog().clearLog()
errors = d.checkInternalConsistency()
self.assert_( errors == 0 )
d = None
pass
def test_internal_consistency_check_21172(self):
d = libsbml.SBMLDocument(3,1)
m = d.createModel()
r = m.createReaction()
r.setId("r")
r.setReversible(True)
r.setFast(False)
sr = r.createReactant()
sr.setSpecies("s")
sr.setConstant(True)
kl = r.createKineticLaw()
ast = libsbml.parseFormula("2*x")
kl.setMath(ast)
lp = kl.createLocalParameter()
errors = d.checkInternalConsistency()
self.assert_( errors == 1 )
self.assert_( d.getError(0).getErrorId() == 21172 )
lp.setId("pp")
d.getErrorLog().clearLog()
errors = d.checkInternalConsistency()
self.assert_( errors == 0 )
d = None
pass
def test_internal_consistency_check_21201(self):
d = libsbml.SBMLDocument(3,1)
m = d.createModel()
r = m.createEvent()
r.setUseValuesFromTriggerTime(True)
ea = r.createEventAssignment()
ea.setVariable("s")
ast = libsbml.parseFormula("2*x")
ea.setMath(ast)
d.getErrorLog().clearLog()
errors = d.checkInternalConsistency()
self.assert_( errors == 1 )
self.assert_( d.getError(0).getErrorId() == 21201 )
t = r.createTrigger()
t.setPersistent(True)
t.setInitialValue(False)
t.setMath(ast)
d.getErrorLog().clearLog()
errors = d.checkInternalConsistency()
self.assert_( errors == 0 )
d = None
pass
def test_internal_consistency_check_21203(self):
d = libsbml.SBMLDocument(3,1)
m = d.createModel()
r = m.createEvent()
r.setUseValuesFromTriggerTime(True)
ast = libsbml.parseFormula("2*x")
t = r.createTrigger()
t.setMath(ast)
t.setPersistent(True)
t.setInitialValue(False)
d.getErrorLog().clearLog()
errors = d.checkInternalConsistency()
self.assert_( errors == 0 )
#self.assert_( d.getError(0).getErrorId() == 21203 )
ea = r.createEventAssignment()
ea.setVariable("ea")
ea.setMath(ast)
d.getErrorLog().clearLog()
errors = d.checkInternalConsistency()
self.assert_( errors == 0 )
d = None
pass
def test_internal_consistency_check_21209(self):
d = libsbml.SBMLDocument(3,1)
m = d.createModel()
r = m.createEvent()
r.setUseValuesFromTriggerTime(True)
ea = r.createEventAssignment()
ea.setVariable("s")
ast = libsbml.parseFormula("2*x")
ea.setMath(ast)
t = r.createTrigger()
t.setPersistent(True)
t.setInitialValue(False)
d.getErrorLog().clearLog()
errors = d.checkInternalConsistency()
self.assert_( errors == 1 )
self.assert_( d.getError(0).getErrorId() == 21209 )
t.setMath(ast)
d.getErrorLog().clearLog()
errors = d.checkInternalConsistency()
self.assert_( errors == 0 )
d = None
pass
def test_internal_consistency_check_21210(self):
d = libsbml.SBMLDocument(3,1)
m = d.createModel()
r = m.createEvent()
r.setUseValuesFromTriggerTime(True)
ast = libsbml.parseFormula("2*x")
t = r.createTrigger()
t.setMath(ast)
t.setPersistent(True)
t.setInitialValue(False)
ea = r.createEventAssignment()
ea.setVariable("ea")
ea.setMath(ast)
delay = r.createDelay()
d.getErrorLog().clearLog()
errors = d.checkInternalConsistency()
self.assert_( errors == 1 )
self.assert_( d.getError(0).getErrorId() == 21210 )
delay.setMath(ast)
d.getErrorLog().clearLog()
errors = d.checkInternalConsistency()
self.assert_( errors == 0 )
d = None
pass
def test_internal_consistency_check_21213(self):
d = libsbml.SBMLDocument(3,1)
m = d.createModel()
r = m.createEvent()
r.setUseValuesFromTriggerTime(True)
ea = r.createEventAssignment()
ea.setVariable("s")
ast = libsbml.parseFormula("2*x")
t = r.createTrigger()
t.setPersistent(True)
t.setInitialValue(False)
t.setMath(ast)
d.getErrorLog().clearLog()
errors = d.checkInternalConsistency()
self.assert_( errors == 1 )
self.assert_( d.getError(0).getErrorId() == 21213 )
ea.setMath(ast)
d.getErrorLog().clearLog()
errors = d.checkInternalConsistency()
self.assert_( errors == 0 )
d = None
pass
def test_internal_consistency_check_21214(self):
d = libsbml.SBMLDocument(3,1)
m = d.createModel()
r = m.createEvent()
r.setUseValuesFromTriggerTime(True)
ea = r.createEventAssignment()
ast = libsbml.parseFormula("2*x")
ea.setMath(ast)
t = r.createTrigger()
t.setPersistent(True)
t.setInitialValue(False)
t.setMath(ast)
d.getErrorLog().clearLog()
errors = d.checkInternalConsistency()
self.assert_( errors == 1 )
self.assert_( d.getError(0).getErrorId() == 21214 )
ea.setVariable("s")
d.getErrorLog().clearLog()
errors = d.checkInternalConsistency()
self.assert_( errors == 0 )
d = None
pass
def test_internal_consistency_check_21225(self):
d = libsbml.SBMLDocument(3,1)
m = d.createModel()
r = m.createEvent()
ea = r.createEventAssignment()
ea.setVariable("s")
ast = libsbml.parseFormula("2*x")
ea.setMath(ast)
t = r.createTrigger()
t.setPersistent(True)
t.setInitialValue(False)
t.setMath(ast)
d.getErrorLog().clearLog()
errors = d.checkInternalConsistency()
self.assert_( errors == 1 )
self.assert_( d.getError(0).getErrorId() == 21225 )
r.setUseValuesFromTriggerTime(True)
d.getErrorLog().clearLog()
errors = d.checkInternalConsistency()
self.assert_( errors == 0 )
d = None
pass
def test_internal_consistency_check_21226(self):
d = libsbml.SBMLDocument(3,1)
m = d.createModel()
r = m.createEvent()
r.setUseValuesFromTriggerTime(True)
ea = r.createEventAssignment()
ea.setVariable("s")
ast = libsbml.parseFormula("2*x")
ea.setMath(ast)
t = r.createTrigger()
t.setMath(ast)
d.getErrorLog().clearLog()
errors = d.checkInternalConsistency()
self.assert_( errors == 2 )
self.assert_( d.getError(0).getErrorId() == 21226 )
self.assert_( d.getError(1).getErrorId() == 21226 )
t.setPersistent(True)
d.getErrorLog().clearLog()
errors = d.checkInternalConsistency()
self.assert_( errors == 1 )
self.assert_( d.getError(0).getErrorId() == 21226 )
t.setInitialValue(False)
d.getErrorLog().clearLog()
errors = d.checkInternalConsistency()
self.assert_( errors == 0 )
d = None
pass
def test_internal_consistency_check_21231(self):
d = libsbml.SBMLDocument(3,1)
m = d.createModel()
r = m.createEvent()
r.setUseValuesFromTriggerTime(True)
ast = libsbml.parseFormula("2*x")
t = r.createTrigger()
t.setMath(ast)
t.setPersistent(True)
t.setInitialValue(False)
ea = r.createEventAssignment()
ea.setVariable("ea")
ea.setMath(ast)
prior = r.createPriority()
d.getErrorLog().clearLog()
errors = d.checkInternalConsistency()
self.assert_( errors == 1 )
self.assert_( d.getError(0).getErrorId() == 21231 )
prior.setMath(ast)
d.getErrorLog().clearLog()
errors = d.checkInternalConsistency()
self.assert_( errors == 0 )
d = None
pass
def test_internal_consistency_check_99901(self):
d = libsbml.SBMLDocument(2,4)
c = libsbml.Compartment(2,4)
d.setLevelAndVersion(1,2,False)
m = d.createModel()
dim = 2
c.setSpatialDimensions(dim)
c.setId("c")
m.addCompartment(c)
errors = d.checkInternalConsistency()
self.assert_( errors == 1 )
self.assert_( d.getError(0).getErrorId() == 10103 )
d = None
pass
def test_internal_consistency_check_99902(self):
d = libsbml.SBMLDocument(2,4)
c = libsbml.Compartment(2,4)
d.setLevelAndVersion(1,2,False)
m = d.createModel()
c.setCompartmentType("hh")
c.setId("c")
m.addCompartment(c)
errors = d.checkInternalConsistency()
self.assert_( errors == 1 )
self.assert_( d.getError(0).getErrorId() == 10103 )
d = None
pass
def test_internal_consistency_check_99903(self):
d = libsbml.SBMLDocument(2,4)
c = libsbml.Compartment(2,4)
d.setLevelAndVersion(1,2,False)
m = d.createModel()
c.setConstant(True)
c.setId("c")
m.addCompartment(c)
r = m.createAssignmentRule()
r.setVariable("c")
r.setFormula("2*3")
errors = d.checkInternalConsistency()
self.assert_( errors == 3 )
d = None
pass
def test_internal_consistency_check_99903_localparam(self):
d = libsbml.SBMLDocument(2,4)
p = libsbml.Parameter(2,4)
d.setLevelAndVersion(1,2,False)
m = d.createModel()
c = m.createCompartment()
c.setId("cc")
r = m.createReaction()
r.setId("r")
sr = r.createReactant()
sr.setSpecies("s")
kl = r.createKineticLaw()
kl.setFormula("2")
p.setId("p")
p.setConstant(False)
kl.addParameter(p)
errors = d.checkInternalConsistency()
self.assert_( errors == 0 )
d = None
pass
def test_internal_consistency_check_99903_param(self):
d = libsbml.SBMLDocument(2,4)
p = libsbml.Parameter(2,4)
d.setLevelAndVersion(1,2,False)
m = d.createModel()
c = m.createCompartment()
c.setId("cc")
p.setConstant(True)
p.setId("c")
m.addParameter(p)
r = m.createAssignmentRule()
r.setVariable("c")
r.setFormula("2*3")
errors = d.checkInternalConsistency()
self.assert_( errors == 2 )
d = None
pass
def test_internal_consistency_check_99904(self):
d = libsbml.SBMLDocument(2,4)
c = libsbml.Compartment(2,4)
d.setLevelAndVersion(1,2,False)
m = d.createModel()
c.setId("c")
c.setMetaId("mmm")
m.addCompartment(c)
errors = d.checkInternalConsistency()
self.assert_( errors == 1 )
self.assert_( d.getError(0).getErrorId() == 10103 )
d = None
pass
def test_internal_consistency_check_99904_kl(self):
d = libsbml.SBMLDocument(2,4)
kl = libsbml.KineticLaw(2,4)
m = d.createModel()
d.setLevelAndVersion(1,2,False)
c = m.createCompartment()
c.setId("cc")
r = m.createReaction()
r.setId("r")
sr = r.createReactant()
sr.setSpecies("s")
kl.setFormula("2")
kl.setMetaId("mmm")
r.setKineticLaw(kl)
errors = d.checkInternalConsistency()
self.assert_( errors == 0 )
d = None
pass
def test_internal_consistency_check_99904_model(self):
d = libsbml.SBMLDocument(2,4)
d.setLevelAndVersion(1,2,False)
m = libsbml.Model(2,4)
c = m.createCompartment()
c.setId("cc")
m.setMetaId("mmm")
d.setModel(m)
errors = d.checkInternalConsistency()
self.assert_( errors == 1 )
self.assert_( d.getError(0).getErrorId() == 20201 )
d = None
pass
def test_internal_consistency_check_99904_param(self):
d = libsbml.SBMLDocument(2,4)
p = libsbml.Parameter(2,4)
d.setLevelAndVersion(1,2,False)
m = d.createModel()
c = m.createCompartment()
c.setId("cc")
p.setId("p")
p.setMetaId("mmm")
m.addParameter(p)
errors = d.checkInternalConsistency()
self.assert_( errors == 0 )
d = None
pass
def test_internal_consistency_check_99904_react(self):
d = libsbml.SBMLDocument(2,4)
r = libsbml.Reaction(2,4)
d.setLevelAndVersion(1,2,False)
m = d.createModel()
c = m.createCompartment()
c.setId("cc")
r.setId("r")
r.setMetaId("mmm")
m.addReaction(r)
errors = d.checkInternalConsistency()
self.assert_( errors == 0 )
d = None
pass
def test_internal_consistency_check_99904_rule_alg(self):
d = libsbml.SBMLDocument(2,4)
r = libsbml.AlgebraicRule(2,4)
d.setLevelAndVersion(1,2,False)
m = d.createModel()
c = m.createCompartment()
c.setId("cc")
r.setMetaId("mmm")
r.setFormula("2")
m.addRule(r)
errors = d.checkInternalConsistency()
self.assert_( errors == 0 )
d = None
pass
def test_internal_consistency_check_99904_rule_assign(self):
d = libsbml.SBMLDocument(2,4)
r = libsbml.AssignmentRule(2,4)
d.setLevelAndVersion(1,2,False)
m = d.createModel()
c = m.createCompartment()
c.setId("cc")
c.setConstant(False)
r.setVariable("cc")
r.setFormula("2")
r.setMetaId("mmm")
m.addRule(r)
errors = d.checkInternalConsistency()
self.assert_( errors == 0 )
d = None
pass
def test_internal_consistency_check_99904_rule_rate(self):
d = libsbml.SBMLDocument(2,4)
r = libsbml.RateRule(2,4)
d.setLevelAndVersion(1,2,False)
m = d.createModel()
c = m.createCompartment()
c.setId("cc")
c.setConstant(False)
r.setVariable("cc")
r.setFormula("2")
r.setMetaId("mmm")
m.addRule(r)
errors = d.checkInternalConsistency()
self.assert_( errors == 0 )
d = None
pass
def test_internal_consistency_check_99904_species(self):
d = libsbml.SBMLDocument(2,4)
s = libsbml.Species(2,4)
d.setLevelAndVersion(1,2,False)
m = d.createModel()
c = m.createCompartment()
c.setId("c")
s.setCompartment("c")
s.setId("s")
s.setMetaId("mmm")
m.addSpecies(s)
errors = d.checkInternalConsistency()
self.assert_( errors == 0 )
d = None
pass
def test_internal_consistency_check_99904_speciesRef(self):
d = libsbml.SBMLDocument(2,4)
sr = libsbml.SpeciesReference(2,4)
d.setLevelAndVersion(1,2,False)
m = d.createModel()
c = m.createCompartment()
c.setId("c")
s = m.createSpecies()
s.setId("s")
s.setCompartment("c")
r = m.createReaction()
r.setId("r")
sr.setSpecies("s")
sr.setMetaId("mmm")
r.addProduct(sr)
errors = d.checkInternalConsistency()
self.assert_( errors == 1 )
self.assert_( d.getError(0).getErrorId() == 21101 )
d = None
pass
def test_internal_consistency_check_99904_unit(self):
d = libsbml.SBMLDocument(2,4)
u = libsbml.Unit(2,4)
d.setLevelAndVersion(1,2,False)
m = d.createModel()
c = m.createCompartment()
c.setId("cc")
ud = m.createUnitDefinition()
ud.setId("ud")
u.setMetaId("mmm")
u.setKind(libsbml.UNIT_KIND_MOLE)
ud.addUnit(u)
errors = d.checkInternalConsistency()
self.assert_( errors == 0 )
d = None
pass
def test_internal_consistency_check_99904_unitdef(self):
d = libsbml.SBMLDocument(2,4)
u = libsbml.UnitDefinition(2,4)
d.setLevelAndVersion(1,2,False)
m = d.createModel()
c = m.createCompartment()
c.setId("cc")
u.setId("ud")
u.setMetaId("mmm")
u.createUnit()
m.addUnitDefinition(u)
errors = d.checkInternalConsistency()
self.assert_( errors == 0 )
d = None
pass
def test_internal_consistency_check_99905(self):
d = libsbml.SBMLDocument(2,4)
c = libsbml.Compartment(2,4)
d.setLevelAndVersion(1,2,False)
m = d.createModel()
c.setId("c")
c.setSBOTerm(2)
m.addCompartment(c)
errors = d.checkInternalConsistency()
self.assert_( errors == 1 )
self.assert_( d.getError(0).getErrorId() == 10103 )
d = None
pass
def test_internal_consistency_check_99905_ct(self):
d = libsbml.SBMLDocument(2,4)
ct = libsbml.CompartmentType(2,4)
m = d.createModel()
d.setLevelAndVersion(2,2,False)
ct.setId("ct")
ct.setSBOTerm(5)
m.addCompartmentType(ct)
errors = d.checkInternalConsistency()
self.assert_( errors == 0 )
d = None
pass
def test_internal_consistency_check_99905_delay(self):
d = libsbml.SBMLDocument(2,4)
delay = libsbml.Delay(2,4)
e = libsbml.Event(2,4)
m = d.createModel()
d.setLevelAndVersion(2,2,False)
delay.setSBOTerm(5)
e.setDelay(delay)
m.addEvent(e)
errors = d.checkInternalConsistency()
self.assert_( errors == 0 )
d = None
pass
def test_internal_consistency_check_99905_species(self):
d = libsbml.SBMLDocument(2,4)
s = libsbml.Species(2,4)
d.setLevelAndVersion(1,2,False)
m = d.createModel()
c = m.createCompartment()
c.setId("c")
s.setId("s")
s.setCompartment("c")
s.setSBOTerm(2)
m.addSpecies(s)
errors = d.checkInternalConsistency()
self.assert_( errors == 0 )
d = None
pass
def test_internal_consistency_check_99905_st(self):
d = libsbml.SBMLDocument(2,4)
ct = libsbml.SpeciesType(2,4)
m = d.createModel()
d.setLevelAndVersion(2,2,False)
ct.setId("st")
ct.setSBOTerm(5)
m.addSpeciesType(ct)
errors = d.checkInternalConsistency()
self.assert_( errors == 0 )
d = None
pass
def test_internal_consistency_check_99905_stoichmath(self):
d = libsbml.SBMLDocument(2,4)
sm = libsbml.StoichiometryMath(2,4)
m = d.createModel()
d.setLevelAndVersion(2,2,False)
s = m.createSpecies()
s.setId("s")
c = m.createCompartment()
c.setId("c")
s.setCompartment("c")
r = m.createReaction()
r.setId("r")
sr = r.createProduct()
sr.setSpecies("s")
sm.setSBOTerm(5)
sr.setStoichiometryMath(sm)
errors = d.checkInternalConsistency()
self.assert_( errors == 0 )
d = None
pass
def test_internal_consistency_check_99905_trigger(self):
d = libsbml.SBMLDocument(2,4)
trigger = libsbml.Trigger(2,4)
e = libsbml.Event(2,4)
m = d.createModel()
d.setLevelAndVersion(2,2,False)
trigger.setSBOTerm(5)
e.setTrigger(trigger)
m.addEvent(e)
errors = d.checkInternalConsistency()
self.assert_( errors == 0 )
d = None
pass
def test_internal_consistency_check_99905_unit(self):
d = libsbml.SBMLDocument(2,4)
u = libsbml.Unit(2,4)
d.setLevelAndVersion(2,2,False)
m = d.createModel()
ud = m.createUnitDefinition()
ud.setId("ud")
u.setKind(libsbml.UNIT_KIND_MOLE)
u.setSBOTerm(9)
ud.addUnit(u)
errors = d.checkInternalConsistency()
self.assert_( errors == 0 )
d = None
pass
def test_internal_consistency_check_99905_unitdef(self):
d = libsbml.SBMLDocument(2,4)
u = libsbml.UnitDefinition(2,4)
d.setLevelAndVersion(2,2,False)
m = d.createModel()
u.setId("ud")
u.setSBOTerm(9)
u.createUnit()
m.addUnitDefinition(u)
errors = d.checkInternalConsistency()
self.assert_( errors == 0 )
d = None
pass
def test_internal_consistency_check_99906(self):
d = libsbml.SBMLDocument(2,4)
c = libsbml.Compartment(2,4)
d.setLevelAndVersion(1,2,False)
m = d.createModel()
c.setId("c")
c.setUnits("mole")
m.addCompartment(c)
errors = d.checkInternalConsistency()
self.assert_( errors == 1 )
self.assert_( d.getError(0).getErrorId() == 10103 )
d = None
pass
def test_internal_consistency_check_99907(self):
d = libsbml.SBMLDocument(2,4)
c = libsbml.Compartment(2,4)
d.setLevelAndVersion(1,2,False)
m = d.createModel()
c.setId("c")
c.unsetVolume()
m.addCompartment(c)
errors = d.checkInternalConsistency()
self.assert_( errors == 1 )
self.assert_( d.getError(0).getErrorId() == 10103 )
d = None
pass
def test_internal_consistency_check_99908(self):
d = libsbml.SBMLDocument(2,4)
ct = libsbml.CompartmentType(2,4)
m = d.createModel()
d.setLevelAndVersion(2,1,False)
ct.setId("ct")
m.addCompartmentType(ct)
errors = d.checkInternalConsistency()
self.assert_( errors == 0 )
d = None
pass
def test_internal_consistency_check_99909(self):
d = libsbml.SBMLDocument(2,4)
ct = libsbml.Constraint(2,4)
m = d.createModel()
d.setLevelAndVersion(2,1,False)
m.addConstraint(ct)
errors = d.checkInternalConsistency()
self.assert_( errors == 0 )
d = None
pass
def test_internal_consistency_check_99910(self):
d = libsbml.SBMLDocument(2,4)
e = libsbml.Event(2,4)
m = d.createModel()
d.setLevelAndVersion(1,2,False)
c = m.createCompartment()
c.setId("cc")
c.setConstant(False)
m.addEvent(e)
errors = d.checkInternalConsistency()
self.assert_( errors == 0 )
d = None
pass
def test_internal_consistency_check_99911_ea(self):
d = libsbml.SBMLDocument(2,4)
d.setLevelAndVersion(2,1,False)
m = d.createModel()
c = m.createCompartment()
c.setId("c")
c.setConstant(False)
e = m.createEvent()
ast = libsbml.parseFormula("2*x")
t = e.createTrigger()
t.setMath(ast)
ea = libsbml.EventAssignment(2,4)
ea.setVariable("c")
ea.setSBOTerm(2)
ea.setMath(ast)
e.addEventAssignment(ea)
errors = d.checkInternalConsistency()
self.assert_( errors == 1 )
self.assert_( d.getError(0).getErrorId() == 21203 )
d = None
pass
def test_internal_consistency_check_99911_event(self):
d = libsbml.SBMLDocument(2,4)
e = libsbml.Event(2,4)
m = d.createModel()
d.setLevelAndVersion(2,1,False)
e.setSBOTerm(2)
m.addEvent(e)
errors = d.checkInternalConsistency()
self.assert_( errors == 0 )
d = None
pass
def test_internal_consistency_check_99911_fd(self):
d = libsbml.SBMLDocument(2,4)
m = d.createModel()
fd = libsbml.FunctionDefinition(2,4)
d.setLevelAndVersion(2,1,False)
fd.setId("fd")
fd.setSBOTerm(2)
m.addFunctionDefinition(fd)
errors = d.checkInternalConsistency()
self.assert_( errors == 0 )
d = None
pass
def test_internal_consistency_check_99911_kl(self):
d = libsbml.SBMLDocument(2,4)
d.setLevelAndVersion(2,1,False)
m = d.createModel()
r = m.createReaction()
r.setId("r")
sr = r.createReactant()
sr.setSpecies("s")
kl = libsbml.KineticLaw(2,4)
kl.setSBOTerm(2)
p = kl.createParameter()
p.setId("p")
r.setKineticLaw(kl)
errors = d.checkInternalConsistency()
self.assert_( errors == 0 )
d = None
pass
def test_internal_consistency_check_99911_model(self):
d = libsbml.SBMLDocument(2,4)
d.setLevelAndVersion(2,1,False)
m = libsbml.Model(2,4)
m.setSBOTerm(2)
d.setModel(m)
errors = d.checkInternalConsistency()
self.assert_( errors == 1 )
self.assert_( d.getError(0).getErrorId() == 20201 )
d = None
pass
def test_internal_consistency_check_99911_param(self):
d = libsbml.SBMLDocument(2,4)
m = d.createModel()
p = libsbml.Parameter(2,4)
d.setLevelAndVersion(2,1,False)
p.setId("p")
p.setSBOTerm(2)
m.addParameter(p)
errors = d.checkInternalConsistency()
self.assert_( errors == 0 )
d = None
pass
def test_internal_consistency_check_99911_react(self):
d = libsbml.SBMLDocument(2,4)
m = d.createModel()
r = libsbml.Reaction(2,4)
d.setLevelAndVersion(2,1,False)
r.setId("r")
r.setSBOTerm(2)
m.addReaction(r)
errors = d.checkInternalConsistency()
self.assert_( errors == 0 )
d = None
pass
def test_internal_consistency_check_99911_rule_alg(self):
d = libsbml.SBMLDocument(2,4)
m = d.createModel()
r = libsbml.AlgebraicRule(2,4)
d.setLevelAndVersion(2,1,False)
r.setSBOTerm(2)
m.addRule(r)
errors = d.checkInternalConsistency()
self.assert_( errors == 0 )
d = None
pass
def test_internal_consistency_check_99911_rule_assign(self):
d = libsbml.SBMLDocument(2,4)
m = d.createModel()
p = m.createParameter()
p.setId("p")
p.setConstant(False)
r = libsbml.AssignmentRule(2,4)
d.setLevelAndVersion(2,1,False)
r.setVariable("p")
r.setSBOTerm(2)
m.addRule(r)
errors = d.checkInternalConsistency()
self.assert_( errors == 0 )
d = None
pass
def test_internal_consistency_check_99911_rule_rate(self):
d = libsbml.SBMLDocument(2,4)
m = d.createModel()
p = m.createParameter()
p.setId("p")
p.setConstant(False)
r = libsbml.RateRule(2,4)
d.setLevelAndVersion(2,1,False)
r.setVariable("p")
r.setSBOTerm(2)
m.addRule(r)
errors = d.checkInternalConsistency()
self.assert_( errors == 0 )
d = None
pass
def test_internal_consistency_check_99911_speciesRef(self):
d = libsbml.SBMLDocument(2,4)
sr = libsbml.SpeciesReference(2,4)
d.setLevelAndVersion(1,2,False)
m = d.createModel()
c = m.createCompartment()
c.setId("c")
s = m.createSpecies()
s.setId("s")
r = m.createReaction()
r.setId("r")
s.setCompartment("c")
sr.setSpecies("s")
sr.setSBOTerm(4)
r.addReactant(sr)
errors = d.checkInternalConsistency()
self.assert_( errors == 1 )
self.assert_( d.getError(0).getErrorId() == 21101 )
d = None
pass
def test_internal_consistency_check_99912(self):
d = libsbml.SBMLDocument(2,4)
fd = libsbml.FunctionDefinition(2,4)
m = d.createModel()
d.setLevelAndVersion(1,2,False)
c = m.createCompartment()
c.setId("cc")
c.setConstant(False)
m.addFunctionDefinition(fd)
errors = d.checkInternalConsistency()
self.assert_( errors == 0 )
d = None
pass
def test_internal_consistency_check_99913(self):
d = libsbml.SBMLDocument(2,4)
ia = libsbml.InitialAssignment(2,4)
m = d.createModel()
d.setLevelAndVersion(1,2,False)
c = m.createCompartment()
c.setId("cc")
c.setConstant(False)
m.addInitialAssignment(ia)
errors = d.checkInternalConsistency()
self.assert_( errors == 0 )
d = None
pass
def test_internal_consistency_check_99914(self):
d = libsbml.SBMLDocument(2,4)
m = d.createModel()
r = libsbml.AlgebraicRule(2,4)
d.setLevelAndVersion(2,1,False)
r.setVariable("kk")
m.addRule(r)
errors = d.checkInternalConsistency()
self.assert_( errors == 0 )
d = None
pass
def test_internal_consistency_check_99915_alg(self):
d = libsbml.SBMLDocument(2,4)
m = d.createModel()
r = libsbml.AlgebraicRule(2,4)
d.setLevelAndVersion(2,1,False)
r.setUnits("kk")
m.addRule(r)
errors = d.checkInternalConsistency()
self.assert_( errors == 0 )
d = None
pass
def test_internal_consistency_check_99915_assign(self):
d = libsbml.SBMLDocument(2,4)
d.setLevelAndVersion(1,2,False)
m = d.createModel()
c = m.createCompartment()
c.setId("c")
c.setConstant(False)
r = m.createAssignmentRule()
r.setL1TypeCode(libsbml.SBML_SPECIES_CONCENTRATION_RULE)
r.setVariable("c")
r.setFormula("2")
r.setUnits("mmm")
errors = d.checkInternalConsistency()
self.assert_( errors == 0 )
d = None
pass
def test_internal_consistency_check_99915_rate(self):
d = libsbml.SBMLDocument(2,4)
d.setLevelAndVersion(1,2,False)
m = d.createModel()
c = m.createCompartment()
c.setId("c")
c.setConstant(False)
r = m.createRateRule()
r.setL1TypeCode(libsbml.SBML_SPECIES_CONCENTRATION_RULE)
r.setFormula("2")
r.setVariable("c")
r.setUnits("mmm")
errors = d.checkInternalConsistency()
self.assert_( errors == 0 )
d = None
pass
def test_internal_consistency_check_99916_reaction(self):
d = libsbml.SBMLDocument(2,4)
s = libsbml.Species(2,4)
d.setLevelAndVersion(1,2,False)
m = d.createModel()
c = m.createCompartment()
c.setId("c")
r = m.createReaction()
r.setId("r")
sr = r.createReactant()
s.setId("s")
s.setCompartment("c")
s.setConstant(True)
sr.setSpecies("s")
m.addSpecies(s)
errors = d.checkInternalConsistency()
self.assert_( errors == 0 )
d = None
pass
def test_internal_consistency_check_99916_rule(self):
d = libsbml.SBMLDocument(2,4)
s = libsbml.Species(2,4)
d.setLevelAndVersion(1,2,False)
m = d.createModel()
c = m.createCompartment()
c.setId("c")
s.setId("s")
s.setCompartment("c")
s.setConstant(True)
m.addSpecies(s)
r = m.createAssignmentRule()
r.setVariable("s")
r.setFormula("2")
errors = d.checkInternalConsistency()
self.assert_( errors == 2 )
d = None
pass
def test_internal_consistency_check_99917(self):
d = libsbml.SBMLDocument(2,4)
s = libsbml.Species(2,4)
d.setLevelAndVersion(1,2,False)
m = d.createModel()
c = m.createCompartment()
c.setId("c")
s.setId("s")
s.setCompartment("c")
s.setSpatialSizeUnits("kkk")
m.addSpecies(s)
errors = d.checkInternalConsistency()
self.assert_( errors == 0 )
d = None
pass
def test_internal_consistency_check_99918(self):
d = libsbml.SBMLDocument(2,4)
s = libsbml.Species(2,4)
d.setLevelAndVersion(1,2,False)
m = d.createModel()
c = m.createCompartment()
c.setId("c")
s.setId("s")
s.setCompartment("c")
s.setSpeciesType("kkk")
m.addSpecies(s)
errors = d.checkInternalConsistency()
self.assert_( errors == 0 )
d = None
pass
def test_internal_consistency_check_99919(self):
d = libsbml.SBMLDocument(2,4)
s = libsbml.Species(2,4)
d.setLevelAndVersion(1,2,False)
m = d.createModel()
c = m.createCompartment()
c.setId("c")
s.setId("s")
s.setCompartment("c")
s.setHasOnlySubstanceUnits(True)
m.addSpecies(s)
errors = d.checkInternalConsistency()
self.assert_( errors == 0 )
d = None
pass
def test_internal_consistency_check_99920(self):
d = libsbml.SBMLDocument(2,4)
sr = libsbml.SpeciesReference(2,4)
d.setLevelAndVersion(1,2,False)
m = d.createModel()
c = m.createCompartment()
c.setId("c")
s = m.createSpecies()
s.setId("s")
r = m.createReaction()
r.setId("r")
s.setCompartment("c")
sr.setSpecies("s")
sr.setId("mmm")
r.addProduct(sr)
errors = d.checkInternalConsistency()
self.assert_( errors == 1 )
self.assert_( d.getError(0).getErrorId() == 21101 )
d = None
pass
def test_internal_consistency_check_99921(self):
d = libsbml.SBMLDocument(2,4)
sr = libsbml.SpeciesReference(2,4)
d.setLevelAndVersion(2,1,False)
m = d.createModel()
c = m.createCompartment()
c.setId("c")
s = m.createSpecies()
s.setId("s")
r = m.createReaction()
r.setId("r")
s.setCompartment("c")
sr.setSpecies("s")
sr.setName("mmm")
r.addReactant(sr)
errors = d.checkInternalConsistency()
self.assert_( errors == 1 )
self.assert_( d.getError(0).getErrorId() == 21101 )
d = None
pass
def test_internal_consistency_check_99922(self):
d = libsbml.SBMLDocument(2,4)
ct = libsbml.SpeciesType(2,4)
m = d.createModel()
ct.setId("st")
d.setLevelAndVersion(2,1,False)
m.addSpeciesType(ct)
errors = d.checkInternalConsistency()
self.assert_( errors == 0 )
d = None
pass
def test_internal_consistency_check_99923(self):
d = libsbml.SBMLDocument(2,4)
sm = libsbml.StoichiometryMath(2,4)
m = d.createModel()
d.setLevelAndVersion(1,2,False)
s = m.createSpecies()
s.setId("s")
c = m.createCompartment()
c.setId("c")
s.setCompartment("c")
r = m.createReaction()
r.setId("r")
sr = r.createProduct()
sr.setSpecies("s")
sr.setStoichiometryMath(sm)
errors = d.checkInternalConsistency()
self.assert_( errors == 0 )
d = None
pass
def test_internal_consistency_check_99924(self):
d = libsbml.SBMLDocument(2,4)
u = libsbml.Unit(2,4)
d.setLevelAndVersion(1,2,False)
m = d.createModel()
c = m.createCompartment()
c.setId("cc")
ud = m.createUnitDefinition()
ud.setId("ud")
u.setKind(libsbml.UNIT_KIND_MOLE)
u.setMultiplier(9)
ud.addUnit(u)
errors = d.checkInternalConsistency()
self.assert_( errors == 0 )
d = None
pass
def test_internal_consistency_check_99925(self):
d = libsbml.SBMLDocument(2,4)
u = libsbml.Unit(2,4)
d.setLevelAndVersion(1,2,False)
m = d.createModel()
c = m.createCompartment()
c.setId("cc")
ud = m.createUnitDefinition()
ud.setId("ud")
u.setKind(libsbml.UNIT_KIND_MOLE)
u.setOffset(9)
ud.addUnit(u)
errors = d.checkInternalConsistency()
self.assert_( errors == 0 )
d = None
pass
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestInternalConsistencyChecks))
return suite
if __name__ == "__main__":
if unittest.TextTestRunner(verbosity=1).run(suite()).wasSuccessful() :
sys.exit(0)
else:
sys.exit(1)
| |
from permuta import Permutation, Permutations
from permuta.misc import flatten, binary_search, choose, exact_cover
from permstruct import X, P, N, empty, generate_all_of_length, construct_rule
from permstruct.permutation_sets import SimpleGeneratingRule, GeneratingRule, StaticPermutationSet
from itertools import product
import random, sys
from copy import deepcopy
def avoids_312_vinc(perm):
for i in range(len(perm)):
for j in range(i+1, len(perm)):
k = j + 1
if k < len(perm) and perm[j] < perm[k] < perm[i]:
return False
return True
def avoids_231_vinc(perm):
for i in range(len(perm)):
j = i + 1
for k in range(j+1, len(perm)):
if perm[k] < perm[i] < perm[j]:
return False
return True
def avoids_123_vinc(perm):
for i in range(len(perm)):
for j in range(i+1, len(perm)):
k = j + 1
if k < len(perm) and perm[i] < perm[j] < perm[k]:
return False
return True
def avoids_312_covinc(perm):
for i in range(len(perm)):
for j in range(i+1, len(perm)):
for k in range(j+1, len(perm)):
if perm[j] < perm[k] < perm[i] and perm[i] == 1 + perm[k]:
return False
return True
def avoids_132_covinc(perm):
for i in range(len(perm)):
for j in range(i+1, len(perm)):
for k in range(j+1, len(perm)):
if perm[i] < perm[k] < perm[j] and perm[j] == 1 + perm[k]:
return False
return True
def avoids_231_bivinc(perm):
for i in range(0, len(perm)):
j = i + 1
for k in range(j+1, len(perm)):
if perm[k] < perm[j] and perm[i] == 1 + perm[k]:
return False
return True
# for l in range(1, 8):
# cnt = 0
# for p in Permutations(l):
# if avoids_231_vinc(p) and p.avoids([1,2,3]):
# cnt += 1
# # print(p)
# print(l, cnt)
#
# import sys
# sys.exit(0)
avoiders_len_3 = []
for p in Permutations(3):
avoiders_len_3.append((lambda perm: perm.avoids(p),StaticPermutationSet.from_predicate(lambda x: x.avoids(p), 6, description='Av(%s)' % str(p))))
# avoiders_len_3.append((lambda perm: len(perm) >= 3 and perm.avoids(p),StaticPermutationSet.from_predicate(lambda x: x.avoids(p), 6, description='Av(%s)' % str(p))))
incr = SimpleGeneratingRule(Permutation([1,2]), [X, P], description='increasing').to_static(8, empty)
decr = SimpleGeneratingRule(Permutation([2,1]), [X, P], description='decreasing').to_static(8, empty)
incr_nonempty = SimpleGeneratingRule(Permutation([1,2]), [X, P], description='increasing nonempty').to_static(8, {1:[Permutation([1])]})
decr_nonempty = SimpleGeneratingRule(Permutation([2,1]), [X, P], description='decreasing nonempty').to_static(8, {1:[Permutation([1])]})
max_len = 6
n_range = (2, 3) # number of rows (min, max)
m_range = (2, 3) # numbor of columns (min, max)
max_nonempty = 4
max_ec_cnt = 4
# permProp = lambda perm: perm.avoids([1,2])
permProp = lambda perm: perm.avoids([2,3,1])
# permProp = lambda perm: perm.avoids([1,4,2,3])
# permProp = lambda perm: perm.avoids([1,3,4,2])
# permProp = lambda perm : perm.avoids([2,3,1]) and perm.avoids([1,2,3])
# permProp = avoids_312_vinc
# permProp = avoids_123_vinc
# permProp = avoids_231_bivinc
# permProp = lambda p: avoids_231_vinc(p) and p.avoids([1,2,3])
# permProp = lambda p: avoids_123_vinc(p) and avoids_312_covinc(p)
# permProp = lambda p: avoids_132_covinc(p) and avoids_123_vinc(p)
# for l in range(1, 10):
# cnt = 0
# for p in Permutations(l):
# if permProp(p):
# cnt += 1
# print(cnt)
#
# import sys
# sys.exit(0)
# Cute example
# G = GeneratingRule([
# [N, N, P],
# [X, P, N],
# ])
# G = GeneratingRule([
# [N,P,N],
# [N,P,N],
# [X,N,decr],
# ])
# G = GeneratingRule([
# [N,P,N],
# [N,P,N],
# [X,N,decr],
# ])
# G = GeneratingRule([
# [decr,N,decr],
# [N,P,N],
# ])
# +-+-+-+-+
# |1|1|2|1|
# +-+-+-+-+
# |1|o|1|1|
# +-+-+-+-+
# |1|1|o|1|
# +-+-+-+-+
# |1|1|1|X|
# +-+-+-+-+
# 1: empty permutation
# 2: Av([2, 1, 3])
# 1 1 1 3 9 31 111 409
# G = GeneratingRule([
# [P,P,P],
# [P,P,P],
# [P,P,P]
# ])
# G = GeneratingRule([
# [P,P],
# [P,P],
# ])
G = GeneratingRule([
[P,P,P,P],
[P,P,P,P],
[P,P,P,P],
[P,P,P,P],
])
# av321_132 = (lambda perm: perm.avoids(Permutation([3,2,1])) and perm.avoids(Permutation([1,3,2])),StaticPermutationSet.from_predicate(lambda x: x.avoids(p), 6, description='Av(%s,%s)' % (Permutation([3,2,1]), Permutation([1,3,2]))))
# inp_without_incr = (lambda perm: perm.avoids(p), StaticPermutationSet.from_predicate(lambda x: x.avoids(p), 6, description='input without incr'))
# # res = generate_all_of_length(10, G, {0:[()], 1:[(1,)]}, 2)
res = generate_all_of_length(8, G, {0:[()]}, 2)
# # print(res)
for l in res:
# print(res)
print(len(res[l]))
#
# import sys
# sys.exit(0)
inputs = [
(permProp, X),
(lambda perm: len(perm) == 1, P),
(lambda perm: perm == Permutation(sorted(perm)), incr),
(lambda perm: perm == Permutation(sorted(perm)[::-1]), decr),
(lambda perm: len(perm) >= 1 and perm == Permutation(sorted(perm)), incr_nonempty),
(lambda perm: len(perm) >= 1 and perm == Permutation(sorted(perm)[::-1]), decr_nonempty),
]
# inputs += avoiders_len_3
construct_rule(permProp,
max_len,
n_range,
m_range,
max_nonempty,
max_ec_cnt,
inputs,
ignore_first=1,
allow_overlap_in_first=True)
| |
#!/usr/bin/env python
"""Generates the pins file for the CC3200."""
from __future__ import print_function
import argparse
import sys
import csv
SUPPORTED_AFS = { 'UART': ('TX', 'RX', 'RTS', 'CTS'),
'SPI': ('CLK', 'MOSI', 'MISO', 'CS0'),
#'I2S': ('CLK', 'FS', 'DAT0', 'DAT1'),
'I2C': ('SDA', 'SCL'),
'TIM': ('PWM'),
'SD': ('CLK', 'CMD', 'DAT0'),
'ADC': ('CH0', 'CH1', 'CH2', 'CH3')
}
def parse_port_pin(name_str):
"""Parses a string and returns a (port, gpio_bit) tuple."""
if len(name_str) < 3:
raise ValueError("Expecting pin name to be at least 3 characters")
if name_str[:2] != 'GP':
raise ValueError("Expecting pin name to start with GP")
if not name_str[2:].isdigit():
raise ValueError("Expecting numeric GPIO number")
port = int(int(name_str[2:]) / 8)
gpio_bit = 1 << int(int(name_str[2:]) % 8)
return (port, gpio_bit)
class AF:
"""Holds the description of an alternate function"""
def __init__(self, name, idx, fn, unit, type):
self.name = name
self.idx = idx
if self.idx > 15:
self.idx = -1
self.fn = fn
self.unit = unit
self.type = type
def print(self):
print (' AF({:16s}, {:4d}, {:8s}, {:4d}, {:8s}), // {}'.format(self.name, self.idx, self.fn, self.unit, self.type, self.name))
class Pin:
"""Holds the information associated with a pin."""
def __init__(self, name, port, gpio_bit, pin_num):
self.name = name
self.port = port
self.gpio_bit = gpio_bit
self.pin_num = pin_num
self.board_pin = False
self.afs = []
def add_af(self, af):
self.afs.append(af)
def print(self):
print('// {}'.format(self.name))
if len(self.afs):
print('const pin_af_t pin_{}_af[] = {{'.format(self.name))
for af in self.afs:
af.print()
print('};')
print('pin_obj_t pin_{:4s} = PIN({:6s}, {:1d}, {:3d}, {:2d}, pin_{}_af, {});\n'.format(
self.name, self.name, self.port, self.gpio_bit, self.pin_num, self.name, len(self.afs)))
else:
print('pin_obj_t pin_{:4s} = PIN({:6s}, {:1d}, {:3d}, {:2d}, NULL, 0);\n'.format(
self.name, self.name, self.port, self.gpio_bit, self.pin_num))
def print_header(self, hdr_file):
hdr_file.write('extern pin_obj_t pin_{:s};\n'.format(self.name))
class Pins:
def __init__(self):
self.board_pins = [] # list of pin objects
def find_pin(self, port, gpio_bit):
for pin in self.board_pins:
if pin.port == port and pin.gpio_bit == gpio_bit:
return pin
def find_pin_by_num(self, pin_num):
for pin in self.board_pins:
if pin.pin_num == pin_num:
return pin
def find_pin_by_name(self, name):
for pin in self.board_pins:
if pin.name == name:
return pin
def parse_af_file(self, filename, pin_col, pinname_col, af_start_col):
with open(filename, 'r') as csvfile:
rows = csv.reader(csvfile)
for row in rows:
try:
(port_num, gpio_bit) = parse_port_pin(row[pinname_col])
except:
continue
if not row[pin_col].isdigit():
raise ValueError("Invalid pin number {:s} in row {:s}".format(row[pin_col]), row)
# Pin numbers must start from 0 when used with the TI API
pin_num = int(row[pin_col]) - 1;
pin = Pin(row[pinname_col], port_num, gpio_bit, pin_num)
self.board_pins.append(pin)
af_idx = 0
for af in row[af_start_col:]:
af_splitted = af.split('_')
fn_name = af_splitted[0].rstrip('0123456789')
if fn_name in SUPPORTED_AFS:
type_name = af_splitted[1]
if type_name in SUPPORTED_AFS[fn_name]:
unit_idx = af_splitted[0][-1]
pin.add_af(AF(af, af_idx, fn_name, int(unit_idx), type_name))
af_idx += 1
def parse_board_file(self, filename, cpu_pin_col):
with open(filename, 'r') as csvfile:
rows = csv.reader(csvfile)
for row in rows:
# Pin numbers must start from 0 when used with the TI API
if row[cpu_pin_col].isdigit():
pin = self.find_pin_by_num(int(row[cpu_pin_col]) - 1)
else:
pin = self.find_pin_by_name(row[cpu_pin_col])
if pin:
pin.board_pin = True
def print_named(self, label, pins):
print('')
print('STATIC const mp_rom_map_elem_t pin_{:s}_pins_locals_dict_table[] = {{'.format(label))
for pin in pins:
if pin.board_pin:
print(' {{ MP_ROM_QSTR(MP_QSTR_{:6s}), MP_ROM_PTR(&pin_{:6s}) }},'.format(pin.name, pin.name))
print('};')
print('MP_DEFINE_CONST_DICT(pin_{:s}_pins_locals_dict, pin_{:s}_pins_locals_dict_table);'.format(label, label));
def print(self):
for pin in self.board_pins:
if pin.board_pin:
pin.print()
self.print_named('board', self.board_pins)
print('')
def print_header(self, hdr_filename):
with open(hdr_filename, 'wt') as hdr_file:
for pin in self.board_pins:
if pin.board_pin:
pin.print_header(hdr_file)
def print_qstr(self, qstr_filename):
with open(qstr_filename, 'wt') as qstr_file:
pin_qstr_set = set([])
af_qstr_set = set([])
for pin in self.board_pins:
if pin.board_pin:
pin_qstr_set |= set([pin.name])
for af in pin.afs:
af_qstr_set |= set([af.name])
print('// Board pins', file=qstr_file)
for qstr in sorted(pin_qstr_set):
print('Q({})'.format(qstr), file=qstr_file)
print('\n// Pin AFs', file=qstr_file)
for qstr in sorted(af_qstr_set):
print('Q({})'.format(qstr), file=qstr_file)
def main():
parser = argparse.ArgumentParser(
prog="make-pins.py",
usage="%(prog)s [options] [command]",
description="Generate board specific pin file"
)
parser.add_argument(
"-a", "--af",
dest="af_filename",
help="Specifies the alternate function file for the chip",
default="cc3200_af.csv"
)
parser.add_argument(
"-b", "--board",
dest="board_filename",
help="Specifies the board file",
)
parser.add_argument(
"-p", "--prefix",
dest="prefix_filename",
help="Specifies beginning portion of generated pins file",
default="cc3200_prefix.c"
)
parser.add_argument(
"-q", "--qstr",
dest="qstr_filename",
help="Specifies name of generated qstr header file",
default="build/pins_qstr.h"
)
parser.add_argument(
"-r", "--hdr",
dest="hdr_filename",
help="Specifies name of generated pin header file",
default="build/pins.h"
)
args = parser.parse_args(sys.argv[1:])
pins = Pins()
print('// This file was automatically generated by make-pins.py')
print('//')
if args.af_filename:
print('// --af {:s}'.format(args.af_filename))
pins.parse_af_file(args.af_filename, 0, 1, 3)
if args.board_filename:
print('// --board {:s}'.format(args.board_filename))
pins.parse_board_file(args.board_filename, 1)
if args.prefix_filename:
print('// --prefix {:s}'.format(args.prefix_filename))
print('')
with open(args.prefix_filename, 'r') as prefix_file:
print(prefix_file.read())
pins.print()
pins.print_qstr(args.qstr_filename)
pins.print_header(args.hdr_filename)
if __name__ == "__main__":
main()
| |
"""Utilities to evaluate models with respect to a variable
"""
# Author: Alexander Fabisch <afabisch@informatik.uni-bremen.de>
#
# License: BSD 3 clause
import numpy as np
import warnings
from .base import is_classifier, clone
from .cross_validation import _check_cv
from .utils import check_arrays
from .externals.joblib import Parallel, delayed
from .metrics.scorer import get_scorer
from .grid_search import _check_scorable, _split, _fit, _score
def learning_curve(estimator, X, y, train_sizes=np.linspace(0.1, 1.0, 10),
cv=None, scoring=None, exploit_incremental_learning=False,
n_jobs=1, pre_dispatch="all", verbose=0):
"""Learning curve
Determines cross-validated training and test scores for different training
set sizes.
A cross-validation generator splits the whole dataset k times in training
and test data. Subsets of the training set with varying sizes will be used
to train the estimator and a score for each training subset size and the
test set will be computed. Afterwards, the scores will be averaged over
all k runs for each training subset size.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
train_sizes : array-like, shape = (n_ticks,), dtype float or int
Relative or absolute numbers of training examples that will be used to
generate the learning curve. If the dtype is float, it is regarded as a
fraction of the maximum size of the training set (that is determined
by the selected validation method), i.e. it has to be within (0, 1].
Otherwise it is interpreted as absolute sizes of the training sets.
Note that for classification the number of samples usually have to
be big enough to contain at least one sample from each class.
(default: np.linspace(0.1, 1.0, 10))
cv : integer, cross-validation generator, optional
If an integer is passed, it is the number of folds (defaults to 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
exploit_incremental_learning : boolean, optional, default: False
If the estimator supports incremental learning, this will be
used to speed up fitting for different training set sizes.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_sizes_abs : array, shape = [n_unique_ticks,], dtype int
Numbers of training examples that has been used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
train_scores : array, shape = [n_ticks,]
Scores on training sets.
test_scores : array, shape = [n_ticks,]
Scores on test set.
Notes
-----
See :ref:`examples/plot_learning_curve.py <example_plot_learning_curve.py>`
"""
if exploit_incremental_learning and not hasattr(estimator, "partial_fit"):
raise ValueError("An estimator must support the partial_fit interface "
"to exploit incremental learning")
X, y = check_arrays(X, y, sparse_format='csr', allow_lists=True)
# Make a list since we will be iterating multiple times over the folds
cv = list(_check_cv(cv, X, y, classifier=is_classifier(estimator)))
# HACK as long as boolean indices are allowed in cv generators
if cv[0][0].dtype == bool:
new_cv = []
for i in range(len(cv)):
new_cv.append((np.nonzero(cv[i][0])[0], np.nonzero(cv[i][1])[0]))
cv = new_cv
n_max_training_samples = len(cv[0][0])
# Because the lengths of folds can be significantly different, it is
# not guaranteed that we use all of the available training data when we
# use the first 'n_max_training_samples' samples.
train_sizes_abs = _translate_train_sizes(train_sizes,
n_max_training_samples)
n_unique_ticks = train_sizes_abs.shape[0]
if verbose > 0:
print("[learning_curve] Training set sizes: " + str(train_sizes_abs))
_check_scorable(estimator, scoring=scoring)
scorer = get_scorer(scoring)
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
if exploit_incremental_learning:
if is_classifier(estimator):
classes = np.unique(y)
else:
classes = None
out = parallel(delayed(_incremental_fit_estimator)(
estimator, X, y, classes, train, test, train_sizes_abs, scorer,
verbose) for train, test in cv)
else:
out = parallel(delayed(_fit_estimator)(
estimator, X, y, train, test, n_train_samples, scorer, verbose)
for train, test in cv for n_train_samples in train_sizes_abs)
out = np.array(out)
n_cv_folds = out.shape[0]/n_unique_ticks
out = out.reshape(n_cv_folds, n_unique_ticks, 2)
avg_over_cv = np.asarray(out).mean(axis=0).reshape(n_unique_ticks, 2)
return train_sizes_abs, avg_over_cv[:, 0], avg_over_cv[:, 1]
def _translate_train_sizes(train_sizes, n_max_training_samples):
"""Determine absolute sizes of training subsets and validate 'train_sizes'.
Examples:
_translate_train_sizes([0.5, 1.0], 10) -> [5, 10]
_translate_train_sizes([5, 10], 10) -> [5, 10]
Parameters
----------
train_sizes : array-like, shape = (n_ticks,), dtype float or int
Numbers of training examples that will be used to generate the
learning curve. If the dtype is float, it is regarded as a
fraction of 'n_max_training_samples', i.e. it has to be within (0, 1].
n_max_training_samples : int
Maximum number of training samples (upper bound of 'train_sizes').
Returns
-------
train_sizes_abs : array, shape = [n_unique_ticks,], dtype int
Numbers of training examples that will be used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
"""
train_sizes_abs = np.asarray(train_sizes)
n_ticks = train_sizes_abs.shape[0]
n_min_required_samples = np.min(train_sizes_abs)
n_max_required_samples = np.max(train_sizes_abs)
if np.issubdtype(train_sizes_abs.dtype, np.float):
if n_min_required_samples <= 0.0 or n_max_required_samples > 1.0:
raise ValueError("train_sizes has been interpreted as fractions "
"of the maximum number of training samples and "
"must be within (0, 1], but is within [%f, %f]."
% (n_min_required_samples,
n_max_required_samples))
train_sizes_abs = (train_sizes_abs
* n_max_training_samples).astype(np.int)
train_sizes_abs = np.clip(train_sizes_abs, 1,
n_max_training_samples)
else:
if (n_min_required_samples <= 0 or
n_max_required_samples > n_max_training_samples):
raise ValueError("train_sizes has been interpreted as absolute "
"numbers of training samples and must be within "
"(0, %d], but is within [%d, %d]."
% (n_max_training_samples,
n_min_required_samples,
n_max_required_samples))
train_sizes_abs = np.unique(train_sizes_abs)
if n_ticks > train_sizes_abs.shape[0]:
warnings.warn("Removed duplicate entries from 'train_sizes'. Number "
"of ticks will be less than than the size of "
"'train_sizes' %d instead of %d)."
% (train_sizes_abs.shape[0], n_ticks), RuntimeWarning)
return train_sizes_abs
def _fit_estimator(base_estimator, X, y, train, test,
n_train_samples, scorer, verbose):
"""Train estimator on a training subset and compute scores."""
train_subset = train[:n_train_samples]
estimator = clone(base_estimator)
X_train, y_train = _split(estimator, X, y, train_subset)
X_test, y_test = _split(estimator, X, y, test, train_subset)
_fit(estimator.fit, X_train, y_train)
train_score = _score(estimator, X_train, y_train, scorer)
test_score = _score(estimator, X_test, y_test, scorer)
return train_score, test_score
def _incremental_fit_estimator(base_estimator, X, y, classes, train, test,
train_sizes, scorer, verbose):
"""Train estimator on training subsets incrementally and compute scores."""
estimator = clone(base_estimator)
train_scores, test_scores = [], []
partitions = zip(train_sizes, np.split(train, train_sizes)[:-1])
for n_train_samples, partial_train in partitions:
X_train, y_train = _split(estimator, X, y, train[:n_train_samples])
X_partial_train, y_partial_train = _split(estimator, X, y,
partial_train)
X_test, y_test = _split(estimator, X, y, test, train[:n_train_samples])
_fit(estimator.partial_fit, X_partial_train, y_partial_train,
classes=classes)
train_scores.append(_score(estimator, X_train, y_train, scorer))
test_scores.append(_score(estimator, X_test, y_test, scorer))
return np.array((train_scores, test_scores)).T
| |
"""Config flow to configure deCONZ component."""
import asyncio
from pprint import pformat
from urllib.parse import urlparse
import async_timeout
from pydeconz.errors import RequestError, ResponseError
from pydeconz.utils import (
async_discovery,
async_get_api_key,
async_get_bridge_id,
normalize_bridge_id,
)
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.components import ssdp
from homeassistant.const import CONF_API_KEY, CONF_HOST, CONF_PORT
from homeassistant.core import callback
from homeassistant.helpers import aiohttp_client
from .const import (
CONF_ALLOW_CLIP_SENSOR,
CONF_ALLOW_DECONZ_GROUPS,
CONF_ALLOW_NEW_DEVICES,
CONF_BRIDGE_ID,
DEFAULT_PORT,
DOMAIN,
LOGGER,
)
from .gateway import get_gateway_from_config_entry
DECONZ_MANUFACTURERURL = "http://www.dresden-elektronik.de"
CONF_SERIAL = "serial"
CONF_MANUAL_INPUT = "Manually define gateway"
@callback
def get_master_gateway(hass):
"""Return the gateway which is marked as master."""
for gateway in hass.data[DOMAIN].values():
if gateway.master:
return gateway
class DeconzFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a deCONZ config flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_PUSH
_hassio_discovery = None
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get the options flow for this handler."""
return DeconzOptionsFlowHandler(config_entry)
def __init__(self):
"""Initialize the deCONZ config flow."""
self.bridge_id = None
self.bridges = []
self.deconz_config = {}
async def async_step_user(self, user_input=None):
"""Handle a deCONZ config flow start.
Let user choose between discovered bridges and manual configuration.
If no bridge is found allow user to manually input configuration.
"""
if user_input is not None:
if CONF_MANUAL_INPUT == user_input[CONF_HOST]:
return await self.async_step_manual_input()
for bridge in self.bridges:
if bridge[CONF_HOST] == user_input[CONF_HOST]:
self.bridge_id = bridge[CONF_BRIDGE_ID]
self.deconz_config = {
CONF_HOST: bridge[CONF_HOST],
CONF_PORT: bridge[CONF_PORT],
}
return await self.async_step_link()
session = aiohttp_client.async_get_clientsession(self.hass)
try:
with async_timeout.timeout(10):
self.bridges = await async_discovery(session)
except (asyncio.TimeoutError, ResponseError):
self.bridges = []
LOGGER.debug("Discovered deCONZ gateways %s", pformat(self.bridges))
if self.bridges:
hosts = []
for bridge in self.bridges:
hosts.append(bridge[CONF_HOST])
hosts.append(CONF_MANUAL_INPUT)
return self.async_show_form(
step_id="user",
data_schema=vol.Schema({vol.Optional(CONF_HOST): vol.In(hosts)}),
)
return await self.async_step_manual_input()
async def async_step_manual_input(self, user_input=None):
"""Manual configuration."""
if user_input:
self.deconz_config = user_input
return await self.async_step_link()
return self.async_show_form(
step_id="manual_input",
data_schema=vol.Schema(
{
vol.Required(CONF_HOST): str,
vol.Required(CONF_PORT, default=DEFAULT_PORT): int,
}
),
)
async def async_step_link(self, user_input=None):
"""Attempt to link with the deCONZ bridge."""
errors = {}
LOGGER.debug(
"Preparing linking with deCONZ gateway %s", pformat(self.deconz_config)
)
if user_input is not None:
session = aiohttp_client.async_get_clientsession(self.hass)
try:
with async_timeout.timeout(10):
api_key = await async_get_api_key(session, **self.deconz_config)
except (ResponseError, RequestError, asyncio.TimeoutError):
errors["base"] = "no_key"
else:
self.deconz_config[CONF_API_KEY] = api_key
return await self._create_entry()
return self.async_show_form(step_id="link", errors=errors)
async def _create_entry(self):
"""Create entry for gateway."""
if not self.bridge_id:
session = aiohttp_client.async_get_clientsession(self.hass)
try:
with async_timeout.timeout(10):
self.bridge_id = await async_get_bridge_id(
session, **self.deconz_config
)
await self.async_set_unique_id(self.bridge_id)
self._abort_if_unique_id_configured(
updates={
CONF_HOST: self.deconz_config[CONF_HOST],
CONF_PORT: self.deconz_config[CONF_PORT],
CONF_API_KEY: self.deconz_config[CONF_API_KEY],
}
)
except asyncio.TimeoutError:
return self.async_abort(reason="no_bridges")
return self.async_create_entry(title=self.bridge_id, data=self.deconz_config)
async def async_step_reauth(self, config: dict):
"""Trigger a reauthentication flow."""
self.context["title_placeholders"] = {CONF_HOST: config[CONF_HOST]}
self.deconz_config = {
CONF_HOST: config[CONF_HOST],
CONF_PORT: config[CONF_PORT],
}
return await self.async_step_link()
async def async_step_ssdp(self, discovery_info):
"""Handle a discovered deCONZ bridge."""
if (
discovery_info.get(ssdp.ATTR_UPNP_MANUFACTURER_URL)
!= DECONZ_MANUFACTURERURL
):
return self.async_abort(reason="not_deconz_bridge")
LOGGER.debug("deCONZ SSDP discovery %s", pformat(discovery_info))
self.bridge_id = normalize_bridge_id(discovery_info[ssdp.ATTR_UPNP_SERIAL])
parsed_url = urlparse(discovery_info[ssdp.ATTR_SSDP_LOCATION])
entry = await self.async_set_unique_id(self.bridge_id)
if entry and entry.source == "hassio":
return self.async_abort(reason="already_configured")
self._abort_if_unique_id_configured(
updates={CONF_HOST: parsed_url.hostname, CONF_PORT: parsed_url.port}
)
self.context["title_placeholders"] = {"host": parsed_url.hostname}
self.deconz_config = {
CONF_HOST: parsed_url.hostname,
CONF_PORT: parsed_url.port,
}
return await self.async_step_link()
async def async_step_hassio(self, discovery_info):
"""Prepare configuration for a Hass.io deCONZ bridge.
This flow is triggered by the discovery component.
"""
LOGGER.debug("deCONZ HASSIO discovery %s", pformat(discovery_info))
self.bridge_id = normalize_bridge_id(discovery_info[CONF_SERIAL])
await self.async_set_unique_id(self.bridge_id)
self._abort_if_unique_id_configured(
updates={
CONF_HOST: discovery_info[CONF_HOST],
CONF_PORT: discovery_info[CONF_PORT],
CONF_API_KEY: discovery_info[CONF_API_KEY],
}
)
self._hassio_discovery = discovery_info
return await self.async_step_hassio_confirm()
async def async_step_hassio_confirm(self, user_input=None):
"""Confirm a Hass.io discovery."""
if user_input is not None:
self.deconz_config = {
CONF_HOST: self._hassio_discovery[CONF_HOST],
CONF_PORT: self._hassio_discovery[CONF_PORT],
CONF_API_KEY: self._hassio_discovery[CONF_API_KEY],
}
return await self._create_entry()
return self.async_show_form(
step_id="hassio_confirm",
description_placeholders={"addon": self._hassio_discovery["addon"]},
)
class DeconzOptionsFlowHandler(config_entries.OptionsFlow):
"""Handle deCONZ options."""
def __init__(self, config_entry):
"""Initialize deCONZ options flow."""
self.config_entry = config_entry
self.options = dict(config_entry.options)
self.gateway = None
async def async_step_init(self, user_input=None):
"""Manage the deCONZ options."""
self.gateway = get_gateway_from_config_entry(self.hass, self.config_entry)
return await self.async_step_deconz_devices()
async def async_step_deconz_devices(self, user_input=None):
"""Manage the deconz devices options."""
if user_input is not None:
self.options.update(user_input)
return self.async_create_entry(title="", data=self.options)
return self.async_show_form(
step_id="deconz_devices",
data_schema=vol.Schema(
{
vol.Optional(
CONF_ALLOW_CLIP_SENSOR,
default=self.gateway.option_allow_clip_sensor,
): bool,
vol.Optional(
CONF_ALLOW_DECONZ_GROUPS,
default=self.gateway.option_allow_deconz_groups,
): bool,
vol.Optional(
CONF_ALLOW_NEW_DEVICES,
default=self.gateway.option_allow_new_devices,
): bool,
}
),
)
| |
#!/usr/bin/env python
#
# The ID3v2 specification is at http://id3.org
#
# Introduces:
# class
# self
# docstrings
# bytearray
# str.format alignment
#
# Copyright (c) 2013, pynewb
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are
# permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this list
# of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
# * Neither the name of pynewb nor the names of its contributors may be used to endorse
# or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
# SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function
import argparse
import os
import os.path
import sys
import mp3_event_parser
def isprint(ch):
'''Gets whether a byte represents an ASCII printable character'''
return ch >= 32 and ch < 127
def print_bytes(stringbuf):
'''Prints bytes in hex and ASCII translation, 16 bytes per line'''
hexbuf = ""
buf = ""
for c in stringbuf:
ch = ord(c)
hexbuf = hexbuf + "{0:02x} ".format(ch)
if isprint(ch):
buf = buf + c
else:
buf = buf + '.'
if len(buf) >= 16:
print(hexbuf, ' ', buf)
hexbuf = ""
buf = ""
if len(buf) > 0:
while len(buf) < 16:
hexbuf = hexbuf + ' '
buf = buf + ' '
print(hexbuf, ' ', buf)
def print_apic_frame(frame_dict):
'''Prints data for an ID3v2.3 attached picture frame'''
print("{0:>40s} : {1}".format('Attached picture mime type', frame_dict['mime_type']))
print("{0:>40s} : {1}".format('Attached picture description', frame_dict['description_string']))
print("{0:>40s} : {1:d}".format('Attached picture data length', len(frame_dict['picture_data'])))
def print_comm_frame(frame_dict):
'''Prints data for an ID3v2.3 comment frame'''
print("{0:>40s} : {1}".format('Comment language', frame_dict['language']))
print("{0:>40s} : {1}".format('Comment description', frame_dict['descriptor_string']))
print("{0:>40s} : {1}".format('Comment text', frame_dict['comment_string']))
def print_geob_frame(frame_dict):
'''Prints data for an ID3v2.3 general encapsulated object frame'''
print("{0:>40s} : {1}".format('General encapsulated object mime type', frame_dict['mime_type']))
print("{0:>40s} : {1}".format('General encapsulated object description', frame_dict['description_string']))
print("{0:>40s} : {1}".format('General encapsulated object filename', frame_dict['filename_string']))
print("{0:>40s} : {1:d}".format('General encapsulated object data length', len(frame_dict['binary_data'])))
def print_mcdi_frame(frame_dict):
'''Prints data for an ID3v2.3 music CD identifier frame'''
print("{0:>40s} : {1:d}".format('Music CD identifier data length', len(frame_dict['identifier_data'])))
def print_priv_frame(frame_dict):
'''Prints data for an ID3v2.3 private frame'''
print("{0:>40s} : {1}".format('Private owner', frame_dict['owner_string']))
print("{0:>40s} : {1:d}".format('Private data length', len(frame_dict['private_data'])))
if frame_dict['owner_string'] == 'WM/UniqueFileIdentifier' or frame_dict['owner_string'] == 'WM/Provider':
private_len, private_string = mp3_event_parser.unpack_unicode(str(frame_dict['private_data']))
if len(private_string) > 0:
print("{0:>40s} : {1}".format('Private data', private_string))
def print_text_info_frame(frame_name, frame_dict):
'''Prints data for an ID3v2.3 text info frame'''
print("{0:>40s} : {1}".format(frame_name, frame_dict['frame_string']))
def print_uslt_frame(frame_dict):
'''Prints data for an ID3v2.3 unsynchronized lyric translation frame'''
print("{0:>40s} : {1}".format('Unsynchronized lyric translation language', frame_dict['language']))
print("{0:>40s} : {1}".format('Unsynchronized lyric translation description', frame_dict['descriptor_string']))
print("{0:>40s} : {1}".format('Unsynchronized lyric translation text', frame_dict['lyrics_string']))
class ID3v2Printer(object):
'''A handler for the ID3v2 file parser that prints the parsed pieces'''
def __init__(self, aatpath, hexdump, print_headers, frame_types):
self.aatpath = aatpath
self.hexdump = hexdump
self.print_headers = print_headers
if frame_types:
self.frame_types = frame_types.split(',')
else:
self.frame_types = None
def on_aatpath(self, artist, album, track):
if self.aatpath:
print("Artist: {0} Album: {1} Track: {2}".format(artist, album, track))
def on_id3v2_header(self, version, revision, flags, size):
if self.print_headers:
print("ID3v2 version {0:d} revision {1:d} flags {2:02x} size {3:d}".format(version, revision, flags, size))
def on_id3v2dot3_frame(self, frame_type, frame_dict):
if self.frame_types and frame_type not in self.frame_types:
return
if frame_type == 'APIC':
print_apic_frame(frame_dict)
elif frame_type == 'COMM':
print_comm_frame(frame_dict)
elif frame_type == 'GEOB':
print_geob_frame(frame_dict)
elif frame_type == 'MCDI':
print_mcdi_frame(frame_dict)
elif frame_type == 'PRIV':
print_priv_frame(frame_dict)
elif frame_type == 'TALB':
print_text_info_frame('Album/Movie/Show Title', frame_dict)
elif frame_type == 'TBPM':
print_text_info_frame('BPM (beats per minute)', frame_dict)
elif frame_type == 'TCOM':
print_text_info_frame('Composer', frame_dict)
elif frame_type == 'TCON':
print_text_info_frame('Content type', frame_dict)
elif frame_type == 'TCOP':
print_text_info_frame('Copyright message', frame_dict)
elif frame_type == 'TENC':
print_text_info_frame('Encoded by', frame_dict)
elif frame_type == 'TFLT':
print_text_info_frame('File type', frame_dict)
elif frame_type == 'TIT1':
print_text_info_frame('Content group description', frame_dict)
elif frame_type == 'TIT2':
print_text_info_frame('Title/songname/content description', frame_dict)
elif frame_type == 'TIT3':
print_text_info_frame('Subtitle/Description refinement', frame_dict)
elif frame_type == 'TLEN':
print_text_info_frame('Length', frame_dict)
elif frame_type == 'TPE1':
print_text_info_frame('Lead performer(s)/Soloist(s)', frame_dict)
elif frame_type == 'TPE2':
print_text_info_frame('Band/orchestra/accompaniment', frame_dict)
elif frame_type == 'TPE3':
print_text_info_frame('Conductor/performer refinement', frame_dict)
elif frame_type == 'TPOS':
print_text_info_frame('Part of set', frame_dict)
elif frame_type == 'TPUB':
print_text_info_frame('Publisher', frame_dict)
elif frame_type == 'TRCK':
print_text_info_frame('Track number/Position in set', frame_dict)
elif frame_type == 'TXXX':
print_text_info_frame('User defined text information frame', frame_dict)
elif frame_type == 'TYER':
print_text_info_frame('Year', frame_dict)
elif frame_type == 'USLT':
print_uslt_frame(frame_dict)
else:
print("Do not know frame type", frame_type, file=sys.stderr)
def on_id3v2dot3_frame_header(self, frame_type, frame_size, frame_flags):
if self.print_headers:
print("type: {0} size: {1:d} flags: {2:04x}".format(frame_type, frame_size, frame_flags))
def on_path(self, path):
print(path)
def on_raw_id3v2_header(self, header):
if self.hexdump:
print_bytes(header)
def on_raw_id3v2dot3_frame(self, frame_type, frame_data):
if self.hexdump:
print_bytes(frame_data)
def on_raw_id3v2dot3_frame_header(self, frame_header):
if self.hexdump:
print_bytes(frame_header)
def walk_mp3_and_parse(dirpath, aatpath, parser_handler):
'''Walks a directory tree for MP3 files and parses them'''
if not os.path.isdir(dirpath):
print(dirpath + " is not a directory", file=sys.stderr)
return
parser = None
for root, dirs, files in os.walk(dirpath):
for file in files:
if file.endswith('.mp3'):
if parser is None:
parser = mp3_event_parser.ID3v2Parser()
parser.parse_id3v2_file(os.path.join(root, file), aatpath, parser_handler)
if __name__ == '__main__':
'''Entry point if run as a standalone script'''
parser = argparse.ArgumentParser(description='List MP3 file information')
parser.add_argument('directories', metavar='directory', nargs='+',
help='The directories to traverse')
parser.add_argument('--aatpath', dest='aatpath', action='store_const',
const=True, default=False,
help='Derive artist/album/track from the file path')
parser.add_argument('--frame-types', dest='frame_types', action='store',
default='',
help='Frame types to print (default is all)')
parser.add_argument('--hexdump', dest='hexdump', action='store_const',
const=True, default=False,
help='Print a hex dump of frame information')
parser.add_argument('--print-headers', dest='print_headers', action='store_const',
const=True, default=False,
help='Print file and frame headers')
args = parser.parse_args()
parser_handler = ID3v2Printer(args.aatpath, args.hexdump, args.print_headers, args.frame_types)
for directory in args.directories:
walk_mp3_and_parse(os.path.expanduser(directory), args.aatpath, parser_handler)
| |
from functools import partial
from pubsub import pub
from threading import Thread
from time import sleep
import wx
from wx.lib.agw.floatspin import FloatSpin
from spacq.gui.tool.box import load_csv, save_csv, Dialog, MessageDialog
from spacq.interface.units import Quantity
"""
Configuration for a ch6VoltageSource.
"""
class ch6VoltageSourceTunerDialog(Dialog):
"""
A dialog for tuning a voltage source port.
"""
def __init__(self, parent, global_store, ok_callback, port, *args, **kwargs):
Dialog.__init__(self, parent, title='Port {0} tuning'.format(port.num))
self.global_store = global_store
self.ok_callback = ok_callback
self.port = port
# Dialog.
dialog_box = wx.BoxSizer(wx.VERTICAL)
## Self-calibration.
calibration_static_box = wx.StaticBox(self, label='DAC self-calibration')
calibration_box = wx.StaticBoxSizer(calibration_static_box, wx.VERTICAL)
dialog_box.Add(calibration_box, flag=wx.EXPAND|wx.ALL, border=5)
self.calibrate_button = wx.Button(self, label='Self-calibrate')
self.Bind(wx.EVT_BUTTON, self.OnCalibrate, self.calibrate_button)
calibration_box.Add(self.calibrate_button, flag=wx.EXPAND)
## Tuning.
tuning_static_box = wx.StaticBox(self, label='Tuning')
tuning_box = wx.StaticBoxSizer(tuning_static_box, wx.VERTICAL)
dialog_box.Add(tuning_box, flag=wx.EXPAND)
### Autotune.
autotuning_static_box = wx.StaticBox(self, label='Autotuning')
autotuning_box = wx.StaticBoxSizer(autotuning_static_box, wx.VERTICAL)
tuning_box.Add(autotuning_box, flag=wx.EXPAND|wx.ALL, border=5)
autotuning_sizer = wx.FlexGridSizer(rows=3, cols=2, hgap=5)
autotuning_box.Add(autotuning_sizer, flag=wx.CENTER)
autotuning_sizer.Add(wx.StaticText(self, label='Resource name:'),
flag=wx.ALIGN_CENTER_VERTICAL|wx.ALIGN_RIGHT)
self.resource_name_input = wx.TextCtrl(self, size=(300,-1))
autotuning_sizer.Add(self.resource_name_input)
autotuning_sizer.Add(wx.StaticText(self, label='Max:'),
flag=wx.ALIGN_CENTER_VERTICAL|wx.ALIGN_RIGHT)
self.automax_input = FloatSpin(self, value=1, min_val=-10, max_val=10, increment=1,
digits=5)
autotuning_sizer.Add(self.automax_input)
autotuning_sizer.Add(wx.StaticText(self, label='Min:'),
flag=wx.ALIGN_CENTER_VERTICAL|wx.ALIGN_RIGHT)
self.automin_input = FloatSpin(self, value=-1, min_val=-10, max_val=10, increment=1,
digits=5)
autotuning_sizer.Add(self.automin_input)
self.autotune_button = wx.Button(self, label='Autotune')
self.Bind(wx.EVT_BUTTON, self.OnAutotune, self.autotune_button)
autotuning_box.Add(self.autotune_button, flag=wx.EXPAND)
### Manual tune.
tuning_sizer = wx.FlexGridSizer(rows=2, cols=2, hgap=5)
tuning_box.Add(tuning_sizer, flag=wx.CENTER)
tuning_sizer.Add(wx.StaticText(self, label='Gain:'),
flag=wx.ALIGN_CENTER_VERTICAL|wx.ALIGN_RIGHT)
self.gain_input = FloatSpin(self, value=0, min_val=-1e6, max_val=1e6, increment=1,
digits=5)
tuning_sizer.Add(self.gain_input)
tuning_sizer.Add(wx.StaticText(self, label='Offset:'),
flag=wx.ALIGN_CENTER_VERTICAL|wx.ALIGN_RIGHT)
self.offset_input = FloatSpin(self, value=0, min_val=-1e6, max_val=1e6, increment=1,
digits=5)
tuning_sizer.Add(self.offset_input)
## End buttons.
button_box = wx.BoxSizer(wx.HORIZONTAL)
dialog_box.Add(button_box, flag=wx.CENTER|wx.ALL, border=5)
ok_button = wx.Button(self, wx.ID_OK)
self.Bind(wx.EVT_BUTTON, self.OnOk, ok_button)
button_box.Add(ok_button)
cancel_button = wx.Button(self, wx.ID_CANCEL)
button_box.Add(cancel_button)
self.SetSizerAndFit(dialog_box)
def autotune(self, resource):
gain, offset = self.port.autotune(resource, set_result=False,
min_value=self.automin_input.GetValue(),
max_value=self.automax_input.GetValue())
wx.CallAfter(self.gain_input.SetValue, gain)
wx.CallAfter(self.offset_input.SetValue, offset)
wx.CallAfter(self.autotune_button.Enable)
def self_calbrate(self):
self.port.apply_settings(calibrate=True)
sleep(self.port.calibration_delay)
wx.CallAfter(self.calibrate_button.Enable)
def SetValue(self, gain, offset):
self.gain_input.SetValue(gain)
self.offset_input.SetValue(offset)
def GetValue(self):
return (self.gain_input.GetValue(), self.offset_input.GetValue())
def OnAutotune(self, evt=None):
name = self.resource_name_input.Value
if not name:
MessageDialog(self, 'No resource provided').Show()
return
try:
resource = self.global_store.resources[name]
except KeyError:
MessageDialog(self, name, 'Missing resource').Show()
return
if not resource.readable:
MessageDialog(self, name, 'Unreadable resource').Show()
return
self.autotune_button.Disable()
thr = Thread(target=self.autotune, args=(resource,))
thr.daemon = True
thr.start()
def OnCalibrate(self, evt=None):
self.calibrate_button.Disable()
thr = Thread(target=self.self_calbrate)
thr.daemon = True
thr.start()
def OnOk(self, evt=None):
self.ok_callback(self)
self.Destroy()
class ch6VoltageSourceSettingsPanel(wx.Panel):
"""
All the settings for a voltage source.
"""
def __init__(self, parent, global_store, vsrc, *args, **kwargs):
wx.Panel.__init__(self, parent, *args, **kwargs)
self.global_store = global_store
self.vsrc = vsrc
self.port_value_inputs = []
self.port_buttons = []
# Panel.
panel_box = wx.BoxSizer(wx.VERTICAL)
## Ports.
ports_box = wx.FlexGridSizer(rows=3, cols=2)
panel_box.Add(ports_box)
for port in xrange(6):
port_static_box = wx.StaticBox(self, label='Port {0} '.format(port))
port_box = wx.StaticBoxSizer(port_static_box, wx.HORIZONTAL)
ports_box.Add(port_box, flag=wx.ALL, border=5)
spin = FloatSpin(self, value=0, min_val=-5, max_val=5, increment=1, digits=6)
self.port_value_inputs.append(spin)
port_box.Add(spin)
port_box.Add(wx.StaticText(self, label='V'))
set_button = wx.Button(self, label='Set', style=wx.BU_EXACTFIT)
set_button.Bind(wx.EVT_BUTTON, partial(self.OnSetVoltage, port))
port_box.Add(set_button)
tune_button = wx.Button(self, label='Tune...', style=wx.BU_EXACTFIT)
tune_button.Bind(wx.EVT_BUTTON, partial(self.OnTune, port))
port_box.Add(tune_button)
self.port_buttons.append((set_button, tune_button))
## All ports.
button_static_box = wx.StaticBox(self, label='All ports')
button_box = wx.StaticBoxSizer(button_static_box, wx.HORIZONTAL)
panel_box.Add(button_box, flag=wx.CENTER)
### Zero.
zero_all_button = wx.Button(self, label='Zero')
self.Bind(wx.EVT_BUTTON, self.OnZeroAll, zero_all_button)
button_box.Add(zero_all_button, flag=wx.CENTER)
### Self-calibrate.
self.calibrate_all_button = wx.Button(self, label='Self-calibrate')
self.Bind(wx.EVT_BUTTON, self.OnCalibrateAll, self.calibrate_all_button)
button_box.Add(self.calibrate_all_button, flag=wx.CENTER)
### Load tuning.
tuning_data_static_box = wx.StaticBox(self, label='Tuning data')
tuning_data_box = wx.StaticBoxSizer(tuning_data_static_box, wx.HORIZONTAL)
button_box.Add(tuning_data_box)
#### Save.
tuning_data_save_button = wx.Button(self, label='Save...')
self.Bind(wx.EVT_BUTTON, self.OnSave, tuning_data_save_button)
tuning_data_box.Add(tuning_data_save_button)
#### Load.
tuning_data_load_button = wx.Button(self, label='Load...')
self.Bind(wx.EVT_BUTTON, self.OnLoad, tuning_data_load_button)
tuning_data_box.Add(tuning_data_load_button)
self.SetSizer(panel_box)
def self_calbrate_all(self):
delay = 0 # s
for port in self.vsrc.ports:
# Use the largest delay.
if port.calibration_delay > delay:
delay = port.calibration_delay
port.apply_settings(calibrate=True)
sleep(delay)
wx.CallAfter(self.calibrate_all_button.Enable)
def zero_all(self):
for port in self.vsrc.ports:
port.voltage = Quantity(0.0, 'V')
def OnSetVoltage(self, port_num, evt=None):
try:
self.vsrc.ports[port_num].voltage = Quantity(self.port_value_inputs[port_num].GetValue(), 'V')
except ValueError as e:
MessageDialog(self, str(e), 'Invalid value').Show()
def OnTune(self, port_num, evt=None):
port = self.vsrc.ports[port_num]
def ok_callback(dlg):
port.gain, port.offset = dlg.GetValue()
dlg = ch6VoltageSourceTunerDialog(self, self.global_store, ok_callback, port)
dlg.SetValue(port.gain, port.offset)
dlg.Show()
def OnCalibrateAll(self, evt=None):
self.calibrate_all_button.Disable()
thr = Thread(target=self.self_calbrate_all)
thr.daemon = True
thr.start()
def OnZeroAll(self, evt=None):
thr = Thread(target=self.zero_all)
thr.daemon = True
thr.start()
def OnSave(self, evt=None):
values = [[port.gain, port.offset] for port in self.vsrc.ports]
try:
save_csv(self, values)
except IOError as e:
MessageDialog(self, str(e), 'Save error').Show()
return
def OnLoad(self, evt=None):
try:
result = load_csv(self)
if result is None:
return
has_header, values, _ = result
if has_header:
port_values = values[1:]
else:
port_values = values
if len(port_values) != len(self.vsrc.ports):
raise ValueError('Invalid number of ports.')
for i, port_value in enumerate(port_values):
if len(port_value) != 2:
raise ValueError('Invalid number of settings for port {0}.'.format(i))
try:
float(port_value[0])
float(port_value[1])
except TypeError:
raise ValueError('Not a number for port {0}.'.format(i))
except (IOError, ValueError) as e:
MessageDialog(self, str(e), 'Load error').Show()
return
for port, values in zip(self.vsrc.ports, port_values):
port.gain = float(values[0])
port.offset = float(values[1])
class ch6VoltageSourceSettingsDialog(Dialog):
"""
A wrapper for ch6VoltageSourceSettingsPanel.
"""
def __init__(self, parent, global_store, vsrc_name, *args, **kwargs):
# If the device doesn't exist, give up.
try:
vsrc = global_store.devices[vsrc_name].device
except (KeyError, AttributeError):
self.Destroy()
return
Dialog.__init__(self, parent, title='Six channel voltage source settings', *args, **kwargs)
self.vsrc_name = vsrc_name
# Dialog.
dialog_box = wx.BoxSizer(wx.VERTICAL)
## Settings panel.
self.panel = ch6VoltageSourceSettingsPanel(self, global_store, vsrc)
dialog_box.Add(self.panel)
self.SetSizerAndFit(dialog_box)
# Subscriptions.
pub.subscribe(self.msg_device, 'device.added')
pub.subscribe(self.msg_device, 'device.removed')
def msg_device(self, name, value=None):
if name == self.vsrc_name:
# Device has changed, so we can't trust it anymore.
self.Destroy()
return
| |
# Copyright 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
from neutron.api.v2 import attributes as attr
from neutron.openstack.common import uuidutils
from neutron.plugins.common import constants
from neutron.tests.unit.api.v2 import test_base
from webob import exc
from neutron_lbaas.extensions import loadbalancer
from neutron_lbaas.extensions import loadbalancerv2
from neutron_lbaas.tests import base
_uuid = uuidutils.generate_uuid
_get_path = test_base._get_path
class LoadBalancerExtensionTestCase(base.ExtensionTestCase):
fmt = 'json'
def setUp(self):
super(LoadBalancerExtensionTestCase, self).setUp()
self._setUpExtension(
'neutron_lbaas.extensions.loadbalancer.LoadBalancerPluginBase',
constants.LOADBALANCER, loadbalancer.RESOURCE_ATTRIBUTE_MAP,
loadbalancer.Loadbalancer, 'lb', use_quota=True)
def test_vip_create(self):
vip_id = _uuid()
data = {'vip': {'name': 'vip1',
'description': 'descr_vip1',
'subnet_id': _uuid(),
'address': '127.0.0.1',
'protocol_port': 80,
'protocol': 'HTTP',
'pool_id': _uuid(),
'session_persistence': {'type': 'HTTP_COOKIE'},
'connection_limit': 100,
'admin_state_up': True,
'tenant_id': _uuid()}}
return_value = copy.copy(data['vip'])
return_value.update({'status': "ACTIVE", 'id': vip_id})
instance = self.plugin.return_value
instance.create_vip.return_value = return_value
res = self.api.post(_get_path('lb/vips', fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
instance.create_vip.assert_called_with(mock.ANY,
vip=data)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
res = self.deserialize(res)
self.assertIn('vip', res)
self.assertEqual(res['vip'], return_value)
def test_vip_list(self):
vip_id = _uuid()
return_value = [{'name': 'vip1',
'admin_state_up': True,
'tenant_id': _uuid(),
'id': vip_id}]
instance = self.plugin.return_value
instance.get_vips.return_value = return_value
res = self.api.get(_get_path('lb/vips', fmt=self.fmt))
instance.get_vips.assert_called_with(mock.ANY, fields=mock.ANY,
filters=mock.ANY)
self.assertEqual(res.status_int, exc.HTTPOk.code)
def test_vip_update(self):
vip_id = _uuid()
update_data = {'vip': {'admin_state_up': False}}
return_value = {'name': 'vip1',
'admin_state_up': False,
'tenant_id': _uuid(),
'status': "ACTIVE",
'id': vip_id}
instance = self.plugin.return_value
instance.update_vip.return_value = return_value
res = self.api.put(_get_path('lb/vips', id=vip_id, fmt=self.fmt),
self.serialize(update_data))
instance.update_vip.assert_called_with(mock.ANY, vip_id,
vip=update_data)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('vip', res)
self.assertEqual(res['vip'], return_value)
def test_vip_get(self):
vip_id = _uuid()
return_value = {'name': 'vip1',
'admin_state_up': False,
'tenant_id': _uuid(),
'status': "ACTIVE",
'id': vip_id}
instance = self.plugin.return_value
instance.get_vip.return_value = return_value
res = self.api.get(_get_path('lb/vips', id=vip_id, fmt=self.fmt))
instance.get_vip.assert_called_with(mock.ANY, vip_id,
fields=mock.ANY)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('vip', res)
self.assertEqual(res['vip'], return_value)
def test_vip_delete(self):
self._test_entity_delete('vip')
def test_pool_create(self):
pool_id = _uuid()
hm_id = _uuid()
data = {'pool': {'name': 'pool1',
'description': 'descr_pool1',
'subnet_id': _uuid(),
'protocol': 'HTTP',
'lb_method': 'ROUND_ROBIN',
'health_monitors': [hm_id],
'admin_state_up': True,
'tenant_id': _uuid()}}
return_value = copy.copy(data['pool'])
return_value['provider'] = 'lbaas'
return_value.update({'status': "ACTIVE", 'id': pool_id})
instance = self.plugin.return_value
instance.create_pool.return_value = return_value
res = self.api.post(_get_path('lb/pools', fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
data['pool']['provider'] = attr.ATTR_NOT_SPECIFIED
instance.create_pool.assert_called_with(mock.ANY,
pool=data)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
res = self.deserialize(res)
self.assertIn('pool', res)
self.assertEqual(res['pool'], return_value)
def test_pool_list(self):
pool_id = _uuid()
return_value = [{'name': 'pool1',
'admin_state_up': True,
'tenant_id': _uuid(),
'id': pool_id}]
instance = self.plugin.return_value
instance.get_pools.return_value = return_value
res = self.api.get(_get_path('lb/pools', fmt=self.fmt))
instance.get_pools.assert_called_with(mock.ANY, fields=mock.ANY,
filters=mock.ANY)
self.assertEqual(res.status_int, exc.HTTPOk.code)
def test_pool_update(self):
pool_id = _uuid()
update_data = {'pool': {'admin_state_up': False}}
return_value = {'name': 'pool1',
'admin_state_up': False,
'tenant_id': _uuid(),
'status': "ACTIVE",
'id': pool_id}
instance = self.plugin.return_value
instance.update_pool.return_value = return_value
res = self.api.put(_get_path('lb/pools', id=pool_id, fmt=self.fmt),
self.serialize(update_data))
instance.update_pool.assert_called_with(mock.ANY, pool_id,
pool=update_data)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('pool', res)
self.assertEqual(res['pool'], return_value)
def test_pool_get(self):
pool_id = _uuid()
return_value = {'name': 'pool1',
'admin_state_up': False,
'tenant_id': _uuid(),
'status': "ACTIVE",
'id': pool_id}
instance = self.plugin.return_value
instance.get_pool.return_value = return_value
res = self.api.get(_get_path('lb/pools', id=pool_id, fmt=self.fmt))
instance.get_pool.assert_called_with(mock.ANY, pool_id,
fields=mock.ANY)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('pool', res)
self.assertEqual(res['pool'], return_value)
def test_pool_delete(self):
self._test_entity_delete('pool')
def test_pool_stats(self):
pool_id = _uuid()
stats = {'stats': 'dummy'}
instance = self.plugin.return_value
instance.stats.return_value = stats
path = _get_path('lb/pools', id=pool_id,
action="stats", fmt=self.fmt)
res = self.api.get(path)
instance.stats.assert_called_with(mock.ANY, pool_id)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('stats', res)
self.assertEqual(res['stats'], stats['stats'])
def test_member_create(self):
member_id = _uuid()
data = {'member': {'pool_id': _uuid(),
'address': '127.0.0.1',
'protocol_port': 80,
'weight': 1,
'admin_state_up': True,
'tenant_id': _uuid()}}
return_value = copy.copy(data['member'])
return_value.update({'status': "ACTIVE", 'id': member_id})
instance = self.plugin.return_value
instance.create_member.return_value = return_value
res = self.api.post(_get_path('lb/members', fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
instance.create_member.assert_called_with(mock.ANY,
member=data)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
res = self.deserialize(res)
self.assertIn('member', res)
self.assertEqual(res['member'], return_value)
def test_member_list(self):
member_id = _uuid()
return_value = [{'name': 'member1',
'admin_state_up': True,
'tenant_id': _uuid(),
'id': member_id}]
instance = self.plugin.return_value
instance.get_members.return_value = return_value
res = self.api.get(_get_path('lb/members', fmt=self.fmt))
instance.get_members.assert_called_with(mock.ANY, fields=mock.ANY,
filters=mock.ANY)
self.assertEqual(res.status_int, exc.HTTPOk.code)
def test_member_update(self):
member_id = _uuid()
update_data = {'member': {'admin_state_up': False}}
return_value = {'admin_state_up': False,
'tenant_id': _uuid(),
'status': "ACTIVE",
'id': member_id}
instance = self.plugin.return_value
instance.update_member.return_value = return_value
res = self.api.put(_get_path('lb/members', id=member_id,
fmt=self.fmt),
self.serialize(update_data))
instance.update_member.assert_called_with(mock.ANY, member_id,
member=update_data)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('member', res)
self.assertEqual(res['member'], return_value)
def test_member_get(self):
member_id = _uuid()
return_value = {'admin_state_up': False,
'tenant_id': _uuid(),
'status': "ACTIVE",
'id': member_id}
instance = self.plugin.return_value
instance.get_member.return_value = return_value
res = self.api.get(_get_path('lb/members', id=member_id,
fmt=self.fmt))
instance.get_member.assert_called_with(mock.ANY, member_id,
fields=mock.ANY)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('member', res)
self.assertEqual(res['member'], return_value)
def test_member_delete(self):
self._test_entity_delete('member')
def test_health_monitor_create(self):
health_monitor_id = _uuid()
data = {'health_monitor': {'type': 'HTTP',
'delay': 2,
'timeout': 1,
'max_retries': 3,
'http_method': 'GET',
'url_path': '/path',
'expected_codes': '200-300',
'admin_state_up': True,
'tenant_id': _uuid()}}
return_value = copy.copy(data['health_monitor'])
return_value.update({'status': "ACTIVE", 'id': health_monitor_id})
instance = self.plugin.return_value
instance.create_health_monitor.return_value = return_value
res = self.api.post(_get_path('lb/health_monitors',
fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
instance.create_health_monitor.assert_called_with(mock.ANY,
health_monitor=data)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
res = self.deserialize(res)
self.assertIn('health_monitor', res)
self.assertEqual(res['health_monitor'], return_value)
def test_health_monitor_create_with_timeout_negative(self):
data = {'health_monitor': {'type': 'HTTP',
'delay': 2,
'timeout': -1,
'max_retries': 3,
'http_method': 'GET',
'url_path': '/path',
'expected_codes': '200-300',
'admin_state_up': True,
'tenant_id': _uuid()}}
res = self.api.post(_get_path('lb/health_monitors',
fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt,
expect_errors=True)
self.assertEqual(400, res.status_int)
def test_health_monitor_list(self):
health_monitor_id = _uuid()
return_value = [{'type': 'HTTP',
'admin_state_up': True,
'tenant_id': _uuid(),
'id': health_monitor_id}]
instance = self.plugin.return_value
instance.get_health_monitors.return_value = return_value
res = self.api.get(_get_path('lb/health_monitors', fmt=self.fmt))
instance.get_health_monitors.assert_called_with(
mock.ANY, fields=mock.ANY, filters=mock.ANY)
self.assertEqual(res.status_int, exc.HTTPOk.code)
def test_health_monitor_update(self):
health_monitor_id = _uuid()
update_data = {'health_monitor': {'admin_state_up': False}}
return_value = {'type': 'HTTP',
'admin_state_up': False,
'tenant_id': _uuid(),
'status': "ACTIVE",
'id': health_monitor_id}
instance = self.plugin.return_value
instance.update_health_monitor.return_value = return_value
res = self.api.put(_get_path('lb/health_monitors',
id=health_monitor_id,
fmt=self.fmt),
self.serialize(update_data))
instance.update_health_monitor.assert_called_with(
mock.ANY, health_monitor_id, health_monitor=update_data)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('health_monitor', res)
self.assertEqual(res['health_monitor'], return_value)
def test_health_monitor_get(self):
health_monitor_id = _uuid()
return_value = {'type': 'HTTP',
'admin_state_up': False,
'tenant_id': _uuid(),
'status': "ACTIVE",
'id': health_monitor_id}
instance = self.plugin.return_value
instance.get_health_monitor.return_value = return_value
res = self.api.get(_get_path('lb/health_monitors',
id=health_monitor_id,
fmt=self.fmt))
instance.get_health_monitor.assert_called_with(
mock.ANY, health_monitor_id, fields=mock.ANY)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('health_monitor', res)
self.assertEqual(res['health_monitor'], return_value)
def test_health_monitor_delete(self):
self._test_entity_delete('health_monitor')
def test_create_pool_health_monitor(self):
health_monitor_id = _uuid()
data = {'health_monitor': {'id': health_monitor_id,
'tenant_id': _uuid()}}
return_value = copy.copy(data['health_monitor'])
instance = self.plugin.return_value
instance.create_pool_health_monitor.return_value = return_value
res = self.api.post('/lb/pools/id1/health_monitors',
self.serialize(data),
content_type='application/%s' % self.fmt)
instance.create_pool_health_monitor.assert_called_with(
mock.ANY, pool_id='id1', health_monitor=data)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
res = self.deserialize(res)
self.assertIn('health_monitor', res)
self.assertEqual(res['health_monitor'], return_value)
def test_delete_pool_health_monitor(self):
health_monitor_id = _uuid()
res = self.api.delete('/lb/pools/id1/health_monitors/%s' %
health_monitor_id)
instance = self.plugin.return_value
instance.delete_pool_health_monitor.assert_called_with(
mock.ANY, health_monitor_id, pool_id='id1')
self.assertEqual(res.status_int, exc.HTTPNoContent.code)
class LoadBalancerExtensionV2TestCase(base.ExtensionTestCase):
fmt = 'json'
def setUp(self):
super(LoadBalancerExtensionV2TestCase, self).setUp()
self._setUpExtension(
'neutron_lbaas.extensions.loadbalancerv2.LoadBalancerPluginBaseV2',
constants.LOADBALANCERV2, loadbalancerv2.RESOURCE_ATTRIBUTE_MAP,
loadbalancerv2.Loadbalancerv2, 'lbaas', use_quota=True)
def test_loadbalancer_create(self):
lb_id = _uuid()
data = {'loadbalancer': {'name': 'lb1',
'description': 'descr_lb1',
'tenant_id': _uuid(),
'vip_subnet_id': _uuid(),
'admin_state_up': True,
'vip_address': '127.0.0.1'}}
return_value = copy.copy(data['loadbalancer'])
return_value.update({'id': lb_id})
instance = self.plugin.return_value
instance.create_loadbalancer.return_value = return_value
res = self.api.post(_get_path('lbaas/loadbalancers', fmt=self.fmt),
self.serialize(data),
content_type='application/{0}'.format(self.fmt))
data['loadbalancer'].update({'provider': attr.ATTR_NOT_SPECIFIED})
instance.create_loadbalancer.assert_called_with(mock.ANY,
loadbalancer=data)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
res = self.deserialize(res)
self.assertIn('loadbalancer', res)
self.assertEqual(res['loadbalancer'], return_value)
def test_loadbalancer_list(self):
lb_id = _uuid()
return_value = [{'name': 'lb1',
'admin_state_up': True,
'tenant_id': _uuid(),
'id': lb_id}]
instance = self.plugin.return_value
instance.get_loadbalancers.return_value = return_value
res = self.api.get(_get_path('lbaas/loadbalancers', fmt=self.fmt))
instance.get_loadbalancers.assert_called_with(mock.ANY,
fields=mock.ANY,
filters=mock.ANY)
self.assertEqual(res.status_int, exc.HTTPOk.code)
def test_loadbalancer_update(self):
lb_id = _uuid()
update_data = {'loadbalancer': {'admin_state_up': False}}
return_value = {'name': 'lb1',
'admin_state_up': False,
'tenant_id': _uuid(),
'id': lb_id}
instance = self.plugin.return_value
instance.update_loadbalancer.return_value = return_value
res = self.api.put(_get_path('lbaas/loadbalancers',
id=lb_id,
fmt=self.fmt),
self.serialize(update_data))
instance.update_loadbalancer.assert_called_with(
mock.ANY, lb_id, loadbalancer=update_data)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('loadbalancer', res)
self.assertEqual(res['loadbalancer'], return_value)
def test_loadbalancer_get(self):
lb_id = _uuid()
return_value = {'name': 'lb1',
'admin_state_up': False,
'tenant_id': _uuid(),
'id': lb_id}
instance = self.plugin.return_value
instance.get_loadbalancer.return_value = return_value
res = self.api.get(_get_path('lbaas/loadbalancers',
id=lb_id,
fmt=self.fmt))
instance.get_loadbalancer.assert_called_with(mock.ANY, lb_id,
fields=mock.ANY)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('loadbalancer', res)
self.assertEqual(res['loadbalancer'], return_value)
def test_loadbalancer_delete(self):
self._test_entity_delete('loadbalancer')
def test_listener_create(self):
listener_id = _uuid()
data = {'listener': {'tenant_id': _uuid(),
'name': 'listen-name-1',
'description': 'listen-1-desc',
'protocol': 'HTTP',
'protocol_port': 80,
'default_tls_container_id': None,
'sni_container_ids': [],
'connection_limit': 100,
'admin_state_up': True,
'loadbalancer_id': _uuid()}}
return_value = copy.copy(data['listener'])
return_value.update({'id': listener_id})
del return_value['loadbalancer_id']
instance = self.plugin.return_value
instance.create_listener.return_value = return_value
res = self.api.post(_get_path('lbaas/listeners', fmt=self.fmt),
self.serialize(data),
content_type='application/{0}'.format(self.fmt))
instance.create_listener.assert_called_with(mock.ANY,
listener=data)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
res = self.deserialize(res)
self.assertIn('listener', res)
self.assertEqual(res['listener'], return_value)
def test_listener_list(self):
listener_id = _uuid()
return_value = [{'admin_state_up': True,
'tenant_id': _uuid(),
'id': listener_id}]
instance = self.plugin.return_value
instance.get_listeners.return_value = return_value
res = self.api.get(_get_path('lbaas/listeners', fmt=self.fmt))
instance.get_listeners.assert_called_with(mock.ANY,
fields=mock.ANY,
filters=mock.ANY)
self.assertEqual(res.status_int, exc.HTTPOk.code)
def test_listener_update(self):
listener_id = _uuid()
update_data = {'listener': {'admin_state_up': False}}
return_value = {'name': 'listener1',
'admin_state_up': False,
'tenant_id': _uuid(),
'id': listener_id}
instance = self.plugin.return_value
instance.update_listener.return_value = return_value
res = self.api.put(_get_path('lbaas/listeners',
id=listener_id,
fmt=self.fmt),
self.serialize(update_data))
instance.update_listener.assert_called_with(
mock.ANY, listener_id, listener=update_data)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('listener', res)
self.assertEqual(res['listener'], return_value)
def test_listener_get(self):
listener_id = _uuid()
return_value = {'name': 'listener1',
'admin_state_up': False,
'tenant_id': _uuid(),
'id': listener_id}
instance = self.plugin.return_value
instance.get_listener.return_value = return_value
res = self.api.get(_get_path('lbaas/listeners',
id=listener_id,
fmt=self.fmt))
instance.get_listener.assert_called_with(mock.ANY, listener_id,
fields=mock.ANY)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('listener', res)
self.assertEqual(res['listener'], return_value)
def test_listener_delete(self):
self._test_entity_delete('listener')
def test_pool_create(self):
pool_id = _uuid()
data = {'pool': {'name': 'pool1',
'description': 'descr_pool1',
'protocol': 'HTTP',
'lb_algorithm': 'ROUND_ROBIN',
'admin_state_up': True,
'tenant_id': _uuid(),
'listener_id': _uuid(),
'session_persistence': {}}}
return_value = copy.copy(data['pool'])
return_value.update({'id': pool_id})
del return_value['listener_id']
instance = self.plugin.return_value
instance.create_pool.return_value = return_value
res = self.api.post(_get_path('lbaas/pools', fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
instance.create_pool.assert_called_with(mock.ANY, pool=data)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
res = self.deserialize(res)
self.assertIn('pool', res)
self.assertEqual(res['pool'], return_value)
def test_pool_list(self):
pool_id = _uuid()
return_value = [{'name': 'pool1',
'admin_state_up': True,
'tenant_id': _uuid(),
'id': pool_id}]
instance = self.plugin.return_value
instance.get_pools.return_value = return_value
res = self.api.get(_get_path('lbaas/pools', fmt=self.fmt))
instance.get_pools.assert_called_with(mock.ANY, fields=mock.ANY,
filters=mock.ANY)
self.assertEqual(res.status_int, exc.HTTPOk.code)
def test_pool_update(self):
pool_id = _uuid()
update_data = {'pool': {'admin_state_up': False}}
return_value = {'name': 'pool1',
'admin_state_up': False,
'tenant_id': _uuid(),
'id': pool_id}
instance = self.plugin.return_value
instance.update_pool.return_value = return_value
res = self.api.put(_get_path('lbaas/pools', id=pool_id,
fmt=self.fmt),
self.serialize(update_data))
instance.update_pool.assert_called_with(mock.ANY, pool_id,
pool=update_data)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('pool', res)
self.assertEqual(res['pool'], return_value)
def test_pool_get(self):
pool_id = _uuid()
return_value = {'name': 'pool1',
'admin_state_up': False,
'tenant_id': _uuid(),
'id': pool_id}
instance = self.plugin.return_value
instance.get_pool.return_value = return_value
res = self.api.get(_get_path('lbaas/pools', id=pool_id,
fmt=self.fmt))
instance.get_pool.assert_called_with(mock.ANY, pool_id,
fields=mock.ANY)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('pool', res)
self.assertEqual(res['pool'], return_value)
def test_pool_delete(self):
self._test_entity_delete('pool')
def test_pool_member_create(self):
subnet_id = _uuid()
member_id = _uuid()
data = {'member': {'address': '10.0.0.1',
'protocol_port': 80,
'weight': 1,
'subnet_id': subnet_id,
'admin_state_up': True,
'tenant_id': _uuid()}}
return_value = copy.copy(data['member'])
return_value.update({'id': member_id})
instance = self.plugin.return_value
instance.create_pool_member.return_value = return_value
res = self.api.post(_get_path('lbaas/pools/pid1/members',
fmt=self.fmt),
self.serialize(data),
content_type='application/%s'
% self.fmt)
instance.create_pool_member.assert_called_with(mock.ANY,
pool_id='pid1',
member=data)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
res = self.deserialize(res)
self.assertIn('member', res)
self.assertEqual(res['member'], return_value)
def test_pool_member_list(self):
member_id = _uuid()
return_value = [{'name': 'member1',
'admin_state_up': True,
'tenant_id': _uuid(),
'id': member_id}]
instance = self.plugin.return_value
instance.get_pools.return_value = return_value
res = self.api.get(_get_path('lbaas/pools/pid1/members',
fmt=self.fmt))
instance.get_pool_members.assert_called_with(mock.ANY,
fields=mock.ANY,
filters=mock.ANY,
pool_id='pid1')
self.assertEqual(res.status_int, exc.HTTPOk.code)
def test_pool_member_update(self):
member_id = _uuid()
update_data = {'member': {'admin_state_up': False}}
return_value = {'admin_state_up': False,
'tenant_id': _uuid(),
'id': member_id}
instance = self.plugin.return_value
instance.update_pool_member.return_value = return_value
res = self.api.put(_get_path('lbaas/pools/pid1/members',
id=member_id,
fmt=self.fmt),
self.serialize(update_data))
instance.update_pool_member.assert_called_with(
mock.ANY, member_id, pool_id='pid1',
member=update_data)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('member', res)
self.assertEqual(res['member'], return_value)
def test_pool_member_get(self):
member_id = _uuid()
return_value = {'admin_state_up': False,
'tenant_id': _uuid(),
'id': member_id}
instance = self.plugin.return_value
instance.get_pool_member.return_value = return_value
res = self.api.get(_get_path('lbaas/pools/pid1/members',
id=member_id, fmt=self.fmt))
instance.get_pool_member.assert_called_with(mock.ANY,
member_id,
fields=mock.ANY,
pool_id='pid1')
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('member', res)
self.assertEqual(res['member'], return_value)
def test_pool_member_delete(self):
entity_id = _uuid()
res = self.api.delete(
test_base._get_path('lbaas/pools/pid1/members',
id=entity_id, fmt=self.fmt))
delete_entity = getattr(self.plugin.return_value,
"delete_pool_member")
delete_entity.assert_called_with(mock.ANY, entity_id,
pool_id='pid1')
self.assertEqual(res.status_int, exc.HTTPNoContent.code)
def test_health_monitor_create(self):
health_monitor_id = _uuid()
data = {'healthmonitor': {'type': 'HTTP',
'delay': 2,
'timeout': 1,
'max_retries': 3,
'http_method': 'GET',
'url_path': '/path',
'expected_codes': '200-300',
'admin_state_up': True,
'tenant_id': _uuid(),
'pool_id': _uuid()}}
return_value = copy.copy(data['healthmonitor'])
return_value.update({'id': health_monitor_id})
del return_value['pool_id']
instance = self.plugin.return_value
instance.create_healthmonitor.return_value = return_value
res = self.api.post(_get_path('lbaas/healthmonitors',
fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
instance.create_healthmonitor.assert_called_with(
mock.ANY, healthmonitor=data)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
res = self.deserialize(res)
self.assertIn('healthmonitor', res)
self.assertEqual(res['healthmonitor'], return_value)
def test_health_monitor_create_with_timeout_negative(self):
data = {'healthmonitor': {'type': 'HTTP',
'delay': 2,
'timeout': -1,
'max_retries': 3,
'http_method': 'GET',
'url_path': '/path',
'expected_codes': '200-300',
'admin_state_up': True,
'tenant_id': _uuid(),
'pool_id': _uuid()}}
res = self.api.post(_get_path('lbaas/healthmonitors',
fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt,
expect_errors=True)
self.assertEqual(400, res.status_int)
def test_health_monitor_list(self):
health_monitor_id = _uuid()
return_value = [{'type': 'HTTP',
'admin_state_up': True,
'tenant_id': _uuid(),
'id': health_monitor_id}]
instance = self.plugin.return_value
instance.get_healthmonitors.return_value = return_value
res = self.api.get(_get_path('lbaas/healthmonitors', fmt=self.fmt))
instance.get_healthmonitors.assert_called_with(
mock.ANY, fields=mock.ANY, filters=mock.ANY)
self.assertEqual(res.status_int, exc.HTTPOk.code)
def test_health_monitor_update(self):
health_monitor_id = _uuid()
update_data = {'healthmonitor': {'admin_state_up': False}}
return_value = {'type': 'HTTP',
'admin_state_up': False,
'tenant_id': _uuid(),
'id': health_monitor_id}
instance = self.plugin.return_value
instance.update_healthmonitor.return_value = return_value
res = self.api.put(_get_path('lbaas/healthmonitors',
id=health_monitor_id,
fmt=self.fmt),
self.serialize(update_data))
instance.update_healthmonitor.assert_called_with(
mock.ANY, health_monitor_id, healthmonitor=update_data)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('healthmonitor', res)
self.assertEqual(res['healthmonitor'], return_value)
def test_health_monitor_get(self):
health_monitor_id = _uuid()
return_value = {'type': 'HTTP',
'admin_state_up': False,
'tenant_id': _uuid(),
'id': health_monitor_id}
instance = self.plugin.return_value
instance.get_healthmonitor.return_value = return_value
res = self.api.get(_get_path('lbaas/healthmonitors',
id=health_monitor_id,
fmt=self.fmt))
instance.get_healthmonitor.assert_called_with(
mock.ANY, health_monitor_id, fields=mock.ANY)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('healthmonitor', res)
self.assertEqual(res['healthmonitor'], return_value)
def test_health_monitor_delete(self):
entity_id = _uuid()
res = self.api.delete(
test_base._get_path('lbaas/healthmonitors',
id=entity_id, fmt=self.fmt))
delete_entity = getattr(self.plugin.return_value,
"delete_healthmonitor")
delete_entity.assert_called_with(mock.ANY, entity_id)
self.assertEqual(res.status_int, exc.HTTPNoContent.code)
def test_load_balancer_stats(self):
load_balancer_id = _uuid()
stats = {'stats': 'dummy'}
instance = self.plugin.return_value
instance.stats.return_value = stats
path = _get_path('lbaas/loadbalancers', id=load_balancer_id,
action="stats", fmt=self.fmt)
res = self.api.get(path)
instance.stats.assert_called_with(mock.ANY, load_balancer_id)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('stats', res)
self.assertEqual(res['stats'], stats['stats'])
def test_load_balancer_statuses(self):
load_balancer_id = _uuid()
statuses = {'statuses': {'loadbalancer': {}}}
instance = self.plugin.return_value
instance.statuses.return_value = statuses
path = _get_path('lbaas/loadbalancers', id=load_balancer_id,
action="statuses", fmt=self.fmt)
res = self.api.get(path)
instance.statuses.assert_called_with(mock.ANY, load_balancer_id)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('statuses', res)
self.assertEqual(res['statuses'], statuses['statuses'])
| |
from decimal import Decimal
from django import forms
from django.template import loader
from django.template import RequestContext
from django.utils.translation import ugettext_lazy as _
from l10n.utils import moneyfmt
from livesettings import config_value, config_value_safe
from payment import signals
from payment.config import labelled_gateway_choices
from payment.models import CreditCardDetail
from payment.utils import get_or_create_order
from product.models import Discount, TaxClass, Price
from product.prices import PriceAdjustmentCalc, PriceAdjustment
from product.utils import find_best_auto_discount
from satchmo_store.contact.forms import ProxyContactForm, ContactInfoForm
from satchmo_store.contact.models import Contact
from satchmo_store.shop.models import Cart, Order
from satchmo_store.shop.signals import satchmo_shipping_price_query
from satchmo_utils.dynamic import lookup_template
from satchmo_utils.views import CreditCard
from shipping.config import shipping_methods, shipping_method_by_key
from shipping.signals import shipping_choices_query
from shipping.utils import update_shipping
from signals_ahoy.signals import form_init, form_initialdata, form_presave, form_postsave, form_validate
from tax.templatetags.satchmo_tax import _get_taxprocessor
from threaded_multihost import threadlocals
import calendar
import datetime
import logging
log = logging.getLogger('payment.forms')
MONTHS = [(month,'%02d'%month) for month in range(1,13)]
def _get_cheapest_shipping(shipping_dict):
"""Use the shipping_dict as returned by _get_shipping_choices
to figure the cheapest shipping option."""
least = None
leastcost = None
for key, value in shipping_dict.items():
current = value['cost']
if leastcost is None or current < leastcost:
least = key
leastcost = current
return least
def _get_shipping_choices(request, paymentmodule, cart, contact, default_view_tax=False, order=None):
"""Iterate through legal shipping modules, building the list for display to the user.
Returns the shipping choices list, along with a dictionary of shipping choices, useful
for building javascript that operates on shipping choices.
"""
shipping_options = []
shipping_dict = {}
rendered = {}
if not order:
try:
order = Order.objects.from_request(request)
except Order.DoesNotExist:
pass
discount = None
if order:
try:
discount = Discount.objects.by_code(order.discount_code)
except Discount.DoesNotExist:
pass
if not cart.is_shippable:
methods = [shipping_method_by_key('NoShipping'),]
else:
methods = shipping_methods()
tax_shipping = config_value_safe('TAX','TAX_SHIPPING', False)
shipping_tax = None
if tax_shipping:
taxer = _get_taxprocessor(request)
shipping_tax = TaxClass.objects.get(title=config_value('TAX', 'TAX_CLASS'))
for method in methods:
method.calculate(cart, contact)
if method.valid(order=order):
template = lookup_template(paymentmodule, 'shipping/options.html')
t = loader.get_template(template)
shipcost = finalcost = method.cost()
if discount and order:
order.shipping_cost = shipcost
discount.calc(order)
shipdiscount = discount.item_discounts.get('Shipping', 0)
else:
shipdiscount = 0
# set up query to determine shipping price to show
shipprice = Price()
shipprice.price = shipcost
shipadjust = PriceAdjustmentCalc(shipprice)
if shipdiscount:
shipadjust += PriceAdjustment('discount', _('Discount'), shipdiscount)
satchmo_shipping_price_query.send(cart, adjustment=shipadjust)
shipdiscount = shipadjust.total_adjustment()
if shipdiscount:
finalcost -= shipdiscount
shipping_dict[method.id] = {'cost' : shipcost, 'discount' : shipdiscount, 'final' : finalcost}
taxed_shipping_price = None
if tax_shipping:
taxcost = taxer.by_price(shipping_tax, finalcost)
total = finalcost + taxcost
taxed_shipping_price = moneyfmt(total)
shipping_dict[method.id]['taxedcost'] = total
shipping_dict[method.id]['tax'] = taxcost
c = RequestContext(request, {
'amount': finalcost,
'description' : method.description(),
'method' : method.method(),
'expected_delivery' : method.expectedDelivery(),
'default_view_tax' : default_view_tax,
'shipping_tax': shipping_tax,
'taxed_shipping_price': taxed_shipping_price})
rendered[method.id] = t.render(c)
#now sort by price, low to high
sortme = [(value['cost'], key) for key, value in shipping_dict.items()]
sortme.sort()
shipping_options = [(key, rendered[key]) for cost, key in sortme]
shipping_choices_query.send(sender=cart, cart=cart,
paymentmodule=paymentmodule, contact=contact,
default_view_tax=default_view_tax, order=order,
shipping_options = shipping_options,
shipping_dict = shipping_dict)
return shipping_options, shipping_dict
def _find_sale(cart):
if cart.numItems > 0:
products = [item.product for item in cart.cartitem_set.all()]
sale = find_best_auto_discount(products)
else:
sale = None
return sale
class CustomChargeForm(forms.Form):
orderitem = forms.IntegerField(required=True, widget=forms.HiddenInput())
amount = forms.DecimalField(label=_('New price'), required=False)
shipping = forms.DecimalField(label=_('Shipping adjustment'), required=False)
notes = forms.CharField(_("Notes"), required=False, initial="Your custom item is ready.")
def __init__(self, *args, **kwargs):
initial = kwargs.get('initial', {})
form_initialdata.send('CustomChargeForm', form=self, initial=initial)
kwargs['initial'] = initial
super(CustomChargeForm, self).__init__(*args, **kwargs)
form_init.send(CustomChargeForm, form=self)
def clean(self, *args, **kwargs):
super(CustomChargeForm, self).clean(*args, **kwargs)
form_validate.send(CustomChargeForm, form=self)
return self.cleaned_data
class PaymentMethodForm(ProxyContactForm):
paymentmethod = forms.ChoiceField(
label=_('Payment method'),
choices=labelled_gateway_choices(),
widget=forms.RadioSelect,
required=True
)
def __init__(self, cart=None, order=None, *args, **kwargs):
super(PaymentMethodForm, self).__init__(*args, **kwargs)
self.cart = cart
# Send a signal to perform additional filtering of available payment methods.
# Receivers have cart/order passed in variables to check the contents and modify methods
# list if neccessary.
payment_choices = labelled_gateway_choices()
signals.payment_methods_query.send(
PaymentMethodForm,
methods=payment_choices,
cart=cart,
order=order,
contact=self._contact
)
if self.fields['paymentmethod'].initial == None:
self.fields['paymentmethod'].initial = payment_choices[0][0]
if len(payment_choices) == 1:
self.fields['paymentmethod'].widget = forms.HiddenInput()
else:
self.fields['paymentmethod'].widget = forms.RadioSelect()
self.fields['paymentmethod'].choices = payment_choices
def clean(self):
# allow additional validation
form_validate.send(PaymentMethodForm, form=self)
return self.cleaned_data
class PaymentContactInfoForm(PaymentMethodForm, ContactInfoForm):
payment_required_fields = None
def __init__(self, *args, **kwargs):
super(PaymentContactInfoForm, self).__init__(*args, **kwargs)
if not self.cart:
request = threadlocals.get_current_request()
self.cart = Cart.objects.from_request(request)
self.fields['discount'] = forms.CharField(max_length=30, required=False)
self.payment_required_fields = {}
if config_value('PAYMENT', 'USE_DISCOUNTS'):
if not self.fields['discount'].initial:
sale = _find_sale(self.cart)
if sale:
self.fields['discount'].initial = sale.code
else:
self.fields['discount'].widget = forms.HiddenInput()
# Listeners of the form_init signal (below) may modify the dict of
# payment_required_fields. For example, if your CUSTOM_PAYMENT requires
# customer's city, put the following code in the listener:
#
# form.payment_required_fields['CUSTOM_PAYMENT'] = ['city']
#
form_init.send(PaymentContactInfoForm, form=self)
def save(self, request, *args, **kwargs):
form_presave.send(PaymentContactInfoForm, form=self)
contactid = super(PaymentContactInfoForm, self).save(*args, **kwargs)
contact = Contact.objects.get(pk=contactid)
cart = kwargs.get('cart', None)
if not cart:
cart = Cart.objects.from_request(request)
if not cart.customer:
cart.customer = contact
cart.save()
self.order = get_or_create_order(request, cart, contact, self.cleaned_data)
form_postsave.send(PaymentContactInfoForm, form=self)
return contactid
def clean(self):
try:
paymentmethod = self.cleaned_data['paymentmethod']
except KeyError:
self._errors['paymentmethod'] = forms.util.ErrorList([_('This field is required')])
return self.cleaned_data
required_fields = self.payment_required_fields.get(paymentmethod, [])
msg = _('Selected payment method requires this field to be filled')
for fld in required_fields:
if not (self.cleaned_data.has_key(fld) and self.cleaned_data[fld]):
self._errors[fld] = forms.util.ErrorList([msg])
elif fld == 'state':
self.enforce_state = True
try:
self._check_state(self.cleaned_data['state'], self.cleaned_data['country'])
except forms.ValidationError, e:
self._errors[fld] = e.messages
super(PaymentContactInfoForm, self).clean()
return self.cleaned_data
def clean_discount(self):
""" Check if discount exists and is valid. """
if not config_value('PAYMENT', 'USE_DISCOUNTS'):
return ''
data = self.cleaned_data['discount']
if data:
try:
discount = Discount.objects.get(code=data, active=True)
except Discount.DoesNotExist:
raise forms.ValidationError(_('Invalid discount code.'))
request = threadlocals.get_current_request()
try:
contact = Contact.objects.from_request(request)
except Contact.DoesNotExist:
contact = None
valid, msg = discount.isValid(self.cart, contact=contact)
if not valid:
raise forms.ValidationError(msg)
# TODO: validate that it can work with these products
return data
class SimplePayShipForm(forms.Form):
shipping = forms.ChoiceField(widget=forms.RadioSelect(), required=False)
def __init__(self, request, paymentmodule, *args, **kwargs):
super(SimplePayShipForm, self).__init__(*args, **kwargs)
try:
order = Order.objects.from_request(request)
except Order.DoesNotExist:
order = None
self.order = order
self.orderpayment = None
self.paymentmodule = paymentmodule
try:
self.tempCart = Cart.objects.from_request(request)
if self.tempCart.numItems > 0:
products = [item.product for item in self.tempCart.cartitem_set.all()]
except Cart.DoesNotExist:
self.tempCart = None
try:
self.tempContact = Contact.objects.from_request(request)
except Contact.DoesNotExist:
self.tempContact = None
if kwargs.has_key('default_view_tax'):
default_view_tax = kwargs['default_view_tax']
else:
default_view_tax = config_value_safe('TAX', 'TAX_SHIPPING', False)
shipping_choices, shipping_dict = _get_shipping_choices(request, paymentmodule, self.tempCart, self.tempContact, default_view_tax=default_view_tax)
cheapshipping = _get_cheapest_shipping(shipping_dict)
self.cheapshipping = cheapshipping
discount = None
if order and order.discount_code:
try:
discount = Discount.objects.by_code(order.discount_code)
# 'discount' object could be NullDiscount instance
if discount and hasattr(discount, 'shipping') and discount.shipping == "FREECHEAP":
if cheapshipping:
shipping_choices = [opt for opt in shipping_choices if opt[0] == cheapshipping]
shipping_dict = {cheapshipping: shipping_dict[cheapshipping]}
except Discount.DoesNotExist:
pass
# possibly hide the shipping based on store config
shiphide = config_value('SHIPPING','HIDING')
# Handle a partial payment and make sure we don't show a shipping choice after one has
# already been chosen
if self.order and self.order.is_partially_paid and shipping_dict.get(self.order.shipping_model, False):
self.fields['shipping'] = forms.CharField(max_length=30, initial=self.order.shipping_model,
widget=forms.HiddenInput(attrs={'value' : shipping_choices[0][0]}))
self.shipping_hidden = True
# Possibly hide if there is only 1 choise
elif shiphide in ('YES', 'DESCRIPTION') and len(shipping_choices) == 1:
self.fields['shipping'] = forms.CharField(max_length=30, initial=shipping_choices[0][0],
widget=forms.HiddenInput(attrs={'value' : shipping_choices[0][0]}))
if shiphide == 'DESCRIPTION':
self.shipping_hidden = False
self.shipping_description = shipping_choices[0][1]
else:
self.shipping_hidden = True
self.shipping_description = ""
elif len(shipping_choices) == 0:
self.shipping_hidden = True
else:
self.fields['shipping'].choices = shipping_choices
if config_value('SHIPPING','SELECT_CHEAPEST'):
if cheapshipping is not None:
self.fields['shipping'].initial = cheapshipping
self.shipping_hidden = False
self.shipping_dict = shipping_dict
form_init.send(SimplePayShipForm, form=self)
def clean_shipping(self):
shipping = self.cleaned_data['shipping']
if not shipping and self.tempCart.is_shippable:
raise forms.ValidationError(_('This field is required.'))
return shipping
def is_needed(self):
"""Check to see if this form is even needed
it is *not* needed if:
- we have an order
- the order balance is zero
- No shipping needs to be selected
"""
needed = True
if self.order and self.tempContact and self.tempCart:
order = self.order
if order.is_shippable and len(self.shipping_dict) == 1:
update_shipping(order, self.shipping_dict.keys()[0], self.tempContact, self.tempCart)
order.recalculate_total(save=False)
needed = not order.paid_in_full
if not needed:
log.debug('%s can skip the payment step - no info needed', order)
return needed
def save(self, request, cart, contact, payment_module, data=None):
form_presave.send(SimplePayShipForm, form=self)
if data is None:
data = self.cleaned_data
self.order = get_or_create_order(request, cart, contact, data)
if payment_module:
processor_module = payment_module.MODULE.load_module('processor')
processor = processor_module.PaymentProcessor(payment_module)
self.orderpayment = processor.create_pending_payment(order=self.order)
else:
self.orderpayment = None
form_postsave.send(SimplePayShipForm, form=self)
class CreditPayShipForm(SimplePayShipForm):
credit_type = forms.ChoiceField()
credit_number = forms.CharField(max_length=20, widget=forms.TextInput(attrs={'autocomplete':'off'}))
month_expires = forms.ChoiceField(choices=MONTHS)
year_expires = forms.ChoiceField()
ccv = forms.CharField(max_length=4, label='Sec code', widget=forms.TextInput(attrs={'autocomplete':'off'}))
def __init__(self, request, paymentmodule, *args, **kwargs):
creditchoices = paymentmodule.CREDITCHOICES.choice_values
super(CreditPayShipForm, self).__init__(request, paymentmodule, *args, **kwargs)
self.cc = None
self.fields['credit_type'].choices = creditchoices
num_years = config_value('PAYMENT', 'CC_NUM_YEARS')
year_now = datetime.date.today().year
self.fields['year_expires'].choices = [(year, year) for year in range(year_now, year_now+num_years+1)]
self.tempCart = Cart.objects.from_request(request)
initial = kwargs.get('initial', None)
if initial:
if initial.get('credit_number', None):
self.fields['credit_number'].widget = forms.PasswordInput()
if initial.get('ccv', None):
self.fields['ccv'].widget = forms.PasswordInput()
try:
self.tempContact = Contact.objects.from_request(request)
except Contact.DoesNotExist:
self.tempContact = None
def clean(self):
super(CreditPayShipForm, self).clean()
data = self.cleaned_data
if not self.is_valid():
log.debug('form not valid, no early auth')
return data
early = config_value('PAYMENT', 'AUTH_EARLY')
if early:
processor_module = self.paymentmodule.MODULE.load_module('processor')
processor = processor_module.PaymentProcessor(self.paymentmodule)
if processor.can_authorize():
log.debug('Processing early capture/release for: %s', self.order)
processor_module = self.paymentmodule.MODULE.load_module('processor')
processor = processor_module.PaymentProcessor(self.paymentmodule)
if self.order:
# we have to make a payment object and save the credit card data to
# make an auth/release.
orderpayment = processor.create_pending_payment(order=self.order,
amount=Decimal('0.01'))
op = orderpayment.capture
cc = CreditCardDetail(orderpayment=op,
expire_month=data['month_expires'],
expire_year=data['year_expires'],
credit_type=data['credit_type'])
cc.storeCC(data['credit_number'])
cc.save()
# set ccv into cache
cc.ccv = data['ccv']
self.cc = cc
results = processor.authorize_and_release(order=self.order)
if not results.success:
log.debug('Payment module error: %s', results)
raise forms.ValidationError(results.message)
else:
log.debug('Payment module capture/release success for %s', self.order)
else:
log.debug('Payment module %s cannot do credit authorizations, ignoring AUTH_EARLY setting.',
self.paymentmodule.MODULE.value)
return data
def clean_credit_number(self):
""" Check if credit card is valid. """
data = self.cleaned_data
credit_number = data['credit_number']
card = CreditCard(credit_number, data['credit_type'])
results, msg = card.verifyCardTypeandNumber()
if not results:
raise forms.ValidationError(msg)
return credit_number
def clean_month_expires(self):
return int(self.cleaned_data['month_expires'])
def clean_year_expires(self):
""" Check if credit card has expired. """
month = self.cleaned_data['month_expires']
year = int(self.cleaned_data['year_expires'])
max_day = calendar.monthrange(year, month)[1]
if datetime.date.today() > datetime.date(year=year, month=month, day=max_day):
raise forms.ValidationError(_('Your card has expired.'))
return year
def clean_ccv(self):
""" Validate a proper CCV is entered. Remember it can have a leading 0 so don't convert to int and return it"""
try:
check = int(self.cleaned_data['ccv'])
return self.cleaned_data['ccv'].strip()
except ValueError:
raise forms.ValidationError(_('Invalid ccv.'))
def save(self, request, cart, contact, payment_module, data=None):
"""Save the order and the credit card information for this orderpayment"""
form_presave.send(CreditPayShipForm, form=self)
if data is None:
data = self.cleaned_data
assert(data)
super(CreditPayShipForm, self).save(request, cart, contact, payment_module, data=data)
if self.orderpayment:
op = self.orderpayment.capture
cc = CreditCardDetail(orderpayment=op,
expire_month=data['month_expires'],
expire_year=data['year_expires'],
credit_type=data['credit_type'])
cc.storeCC(data['credit_number'])
cc.save()
# set ccv into cache
cc.ccv = data['ccv']
self.cc = cc
form_postsave.send(CreditPayShipForm, form=self)
| |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Lint as: python3
"""Tests for lit_nlp.components.minimal_targeted_counterfactuals."""
from typing import List
import unittest.mock as mock
from absl.testing import absltest
from lit_nlp.api import dataset as lit_dataset
from lit_nlp.api import model as lit_model
from lit_nlp.api import types as lit_types
from lit_nlp.components import minimal_targeted_counterfactuals
from lit_nlp.lib import caching
import numpy as np
import scipy.special as scipy_special
ANIMALS = ['unknown', 'elephant', 'ant', 'whale', 'seal']
class ClassificationTestDataset(lit_dataset.Dataset):
"""A test dataset for classification testing."""
def spec(self) -> lit_types.Spec:
return {
'size': lit_types.CategoryLabel(vocab=['small', 'medium', 'large']),
'weight': lit_types.Scalar(),
'legs': lit_types.Boolean(),
'description': lit_types.String(),
'animal': lit_types.CategoryLabel(vocab=ANIMALS),
}
@property
def examples(self) -> List[lit_types.JsonDict]:
return [
{
'size': 'small',
'weight': 0.01,
'legs': True,
'description': 'small but strong',
'animal': 'ant'
},
{
'size': 'large',
'weight': 0.8,
'legs': True,
'description': 'has a trunk',
'animal': 'elephant'
},
{
'size': 'medium',
'weight': 0.2,
'legs': False,
'description': 'makes strange sounds',
'animal': 'seal'
},
{
'size': 'large',
'weight': 2.5,
'legs': False,
'description': 'excellent water displacement',
'animal': 'whale'
},
]
class ClassificationTestModel(lit_model.Model):
"""A test model for testing tabular hot-flips on classification tasks."""
def __init__(self, dataset: lit_dataset.Dataset) -> None:
super().__init__()
self._dataset = dataset
def max_minibatch_size(self, **unused) -> int:
return 2
def input_spec(self) -> lit_types.Spec:
return {
'size': lit_types.CategoryLabel(vocab=['small', 'medium', 'large']),
'weight': lit_types.Scalar(),
'legs': lit_types.Boolean(),
'description': lit_types.String(),
}
def output_spec(self) -> lit_types.Spec:
return {
'preds':
lit_types.MulticlassPreds(
parent='animal', vocab=ANIMALS, null_idx=0)
}
def predict_minibatch(self, inputs: List[lit_types.JsonDict],
**unused) -> List[lit_types.JsonDict]:
output = []
def predict_example(ex: lit_types.JsonDict) -> lit_types.JsonDict:
"""Returns model predictions for a given example.
The method uses the animal test dataset as the ground truth. The method
compares the given example features to the dataset features for all
animals. The closer the feature values are, the higher the contribution to
the corresponding class logit is.
Args:
ex: an example to run prediction for.
Returns:
The softmax values for the animal class prediction.
"""
# Logit values for ['unknown', 'elephant', 'ant', 'whale'].
logits = np.zeros((len(ANIMALS),))
for db_rec in self._dataset.examples:
animal_index = ANIMALS.index(db_rec['animal'])
for field_name in self._dataset.spec():
if ex[field_name] is None or db_rec[field_name] is None:
continue
if field_name == 'animal':
continue
field_spec_value = self._dataset.spec()[field_name]
if (isinstance(field_spec_value, lit_types.CategoryLabel) or
isinstance(field_spec_value, lit_types.Boolean)) and (
ex[field_name] == db_rec[field_name]):
logits[animal_index] += 1
if isinstance(field_spec_value, lit_types.Scalar):
logits[animal_index] += 1.0 - abs(ex[field_name] -
db_rec[field_name])
return scipy_special.softmax(logits)
for example in inputs:
output.append({'preds': predict_example(example)})
return output
class RegressionTestDataset(lit_dataset.Dataset):
"""A test dataset for regression testing."""
def spec(self) -> lit_types.Spec:
return {
'x_1': lit_types.Scalar(),
'x_2': lit_types.Scalar(),
'y': lit_types.Scalar(),
}
@property
def examples(self) -> List[lit_types.JsonDict]:
return [
{
'x_1': 0.0,
'x_2': 0.0,
'y': 0.0
},
{
'x_1': 0.5,
'x_2': 0.4,
'y': 1.0
},
]
class RegressionTestModel(lit_model.Model):
"""A test model for testing tabular hot-flips on regression tasks."""
def max_minibatch_size(self, **unused) -> int:
return 2
def input_spec(self) -> lit_types.Spec:
return {
'x_1': lit_types.Scalar(),
'x_2': lit_types.Scalar(),
}
def output_spec(self) -> lit_types.Spec:
return {'score': lit_types.RegressionScore(parent='y')}
def predict_minibatch(self, inputs: List[lit_types.JsonDict],
**unused) -> List[lit_types.JsonDict]:
output = []
def predict_example(ex: lit_types.JsonDict) -> lit_types.JsonDict:
x1 = ex['x_1']
x2 = ex['x_2']
return 2 * x1**2 + x2
for example in inputs:
output.append({'score': predict_example(example)})
return output
class ClassificationTabularMtcTest(absltest.TestCase):
"""Tests tabular hot-flips on classification tasks."""
def setUp(self):
super().setUp()
dataset = lit_dataset.IndexedDataset(
base=ClassificationTestDataset(), id_fn=caching.input_hash)
self._dataset = dataset
self._model = ClassificationTestModel(self._dataset)
self._gen = minimal_targeted_counterfactuals.TabularMTC()
self._example = {
'size': 'large',
'weight': 1.2,
'legs': False,
'description': 'big water animal',
'animal': 'whale'
}
self._config = {
'Prediction key': 'preds',
'dataset_name': 'classification_test_dataset'
}
def test_test_model(self):
"""Tests the tests model predict method."""
dataset = ClassificationTestDataset()
model = ClassificationTestModel(dataset)
preds = list(model.predict(dataset.examples))
self.assertEqual(np.argmax(preds[0]['preds']), 2)
self.assertEqual(np.argmax(preds[1]['preds']), 1)
self.assertEqual(np.argmax(preds[2]['preds']), 4)
self.assertEqual(np.argmax(preds[3]['preds']), 3)
def test_prediction_key_required(self):
"""Tests the case when the client doesn't specify the prediction key."""
self._config['Prediction key'] = ''
with self.assertRaisesRegex(ValueError,
'Please provide the prediction key'):
self._gen.generate(
example=self._example,
model=self._model,
dataset=self._dataset,
config=self._config)
def test_incorrect_prediction_key(self):
"""Tests the case when the client specifies a key that doesn't exist."""
self._config['Prediction key'] = 'wrong_key'
with self.assertRaisesRegex(ValueError, 'Invalid prediction key'):
self._gen.generate(
example=self._example,
model=self._model,
dataset=self._dataset,
config=self._config)
def test_unsupported_model(self):
"""Tests the case when the passed model is not supported."""
mocked_model = mock.MagicMock()
output_spec = {'preds': lit_types.ImageBytes}
mocked_model.output_spec = mock.MagicMock(return_value=output_spec)
with self.assertRaisesRegex(
ValueError, 'Only classification and regression models are supported'):
self._gen.generate(
example=self._example,
model=mocked_model,
dataset=self._dataset,
config=self._config)
def test_no_model(self):
"""Tests the case when no model is passed."""
with self.assertRaisesRegex(ValueError,
'Please provide a model for this generator'):
self._gen.generate(
example=self._example,
model=None,
dataset=self._dataset,
config=self._config)
def test_max_number_of_records(self):
"""Tests that a client can specify a desired number of flips to return."""
self._config['Number of examples'] = '2'
result = self._gen.generate(
example=self._example,
model=self._model,
dataset=self._dataset,
config=self._config)
self.assertLen(result, 2)
def test_text_fields_equal_to_target(self):
"""Tests that non-scalar non-categorical features has correct value.
The values of non-scalar, non-categorical features should be the same as in
the input example.
"""
output = self._gen.generate(
example=self._example,
model=self._model,
dataset=self._dataset,
config=self._config)
s = {o['description'] for o in output}
self.assertLen(s, 1)
self.assertIn('big water animal', s)
def test_mtc_prediction_is_argmax(self):
output = self._gen.generate(
example=self._example,
model=self._model,
dataset=self._dataset,
config=self._config)
y_actual = output[0]['animal']
y_expected = self._predict_and_return_argmax_label(output[0])
self.assertEqual(y_actual, y_expected)
def test_output_is_counterfactuals(self):
"""Tests that the returned values are indeed counterfactuals."""
output = self._gen.generate(
example=self._example,
model=self._model,
dataset=self._dataset,
config=self._config)
self.assertGreaterEqual(len(output), 1)
target_prediction = self._predict_and_return_argmax_label(self._example)
for cf_example in output:
cf_prediction = self._predict_and_return_argmax_label(cf_example)
self.assertNotEqual(cf_prediction, target_prediction)
def test_config_spec(self):
"""Tests that the generator returns spec with correct fields."""
spec = self._gen.config_spec()
self.assertIn('Number of examples', spec)
self.assertIn('Maximum number of columns to change', spec)
self.assertIn('Regression threshold', spec)
self.assertIn('Prediction key', spec)
def test_example_field_is_none(self):
"""Tests the case when a feature is assigned None value."""
self._example['weight'] = None
output = self._gen.generate(
example=self._example,
model=self._model,
dataset=self._dataset,
config=self._config)
self.assertNotEmpty(output)
def _predict_and_return_argmax_label(self, example):
"""Given an example, returns the index of the top prediction."""
model_out = self._model.predict([example])
softmax = list(model_out)[0]['preds']
argmax = np.argmax(softmax)
return self._model.output_spec()['preds'].vocab[argmax]
class RegressionTabularMtcTest(absltest.TestCase):
"""Tests tabular hot-flips with regression models."""
def setUp(self):
super().setUp()
dataset = lit_dataset.IndexedDataset(
base=RegressionTestDataset(), id_fn=caching.input_hash)
self._dataset = dataset
self._model = RegressionTestModel()
self._gen = minimal_targeted_counterfactuals.TabularMTC()
self._example = {'x_1': 1.0, 'x_2': 1.0}
self._config = {
'Prediction key': 'score',
'dataset_name': 'regression_test_dataset'
}
def test_test_regression_model(self):
"""Tests the predict method of the regression model."""
model = RegressionTestModel()
example = {'x_1': 3, 'x_2': 2}
pred = list(model.predict([example]))[0]
self.assertEqual(pred['score'], 20)
def test_output_is_below_threshold_counterfactuals(self):
"""Tests the case when the target prediction is above the threshold.
If the target (reference) prediction is above the decision boundary
threshold, the predictions for all counterfactuals should be below the
threshold.
"""
threshold = 2.8
self._config['Regression threshold'] = str(threshold)
self._example = {'x_1': 1.0, 'x_2': 1.0}
output = self._gen.generate(
example=self._example,
model=self._model,
dataset=self._dataset,
config=self._config)
target_score = self._predict_and_return_score(self._example)
self.assertGreaterEqual(target_score, threshold)
self.assertNotEmpty(output)
for cf_example in output:
cf_score = self._predict_and_return_score(cf_example)
self.assertLess(cf_score, threshold)
def test_output_is_above_threshold_counterfactuals(self):
"""Tests the case when the target prediction is below the threshold.
If the target (reference) prediction is below the decision boundary
threshold, the predictions for all counterfactuals should be above or equal
the threshold.
"""
threshold = 0.1
self._config['Regression threshold'] = str(threshold)
self._example = {'x_1': 0.0, 'x_2': -5.0}
output = self._gen.generate(
example=self._example,
model=self._model,
dataset=self._dataset,
config=self._config)
target_score = self._predict_and_return_score(self._example)
self.assertLess(target_score, threshold)
self.assertNotEmpty(output)
for cf_example in output:
cf_score = self._predict_and_return_score(cf_example)
self.assertGreaterEqual(cf_score, threshold)
def test_no_counterfactuals_found(self):
"""Tests the case when there no counterfactuals in the database."""
threshold = 4.0
self._config['Regression threshold'] = str(threshold)
self._example = {'x_1': 1.0, 'x_2': 1.0}
output = self._gen.generate(
example=self._example,
model=self._model,
dataset=self._dataset,
config=self._config)
self.assertEmpty(output)
def test_max_num_of_changed_columns(self):
"""Tests the client can set the number of features that can be changed."""
self._config['Regression threshold'] = '0.25'
self._config['Maximum number of columns to change'] = '1'
self._example = {'x_1': 0.3, 'x_2': 0.3}
output_1 = self._gen.generate(
example=self._example,
model=self._model,
dataset=self._dataset,
config=self._config)
self._config['Maximum number of columns to change'] = '2'
output_2 = self._gen.generate(
example=self._example,
model=self._model,
dataset=self._dataset,
config=self._config)
self.assertNotEmpty(output_1)
self.assertNotEmpty(output_2)
self.assertGreater(len(output_2), len(output_1))
def test_parent_field_updated(self):
threshold = 0.8
self._config['Regression threshold'] = str(threshold)
self._example = {'x_1': 0.0, 'x_2': 0.0}
output = self._gen.generate(
example=self._example,
model=self._model,
dataset=self._dataset,
config=self._config)
y_actual = output[0]['y']
y_expected = self._predict_and_return_score(output[0])
self.assertEqual(y_actual, y_expected)
def _predict_and_return_score(self, example):
"""Given an example, returns the regression score."""
model_out = self._model.predict([example])
return list(model_out)[0]['score']
if __name__ == '__main__':
absltest.main()
| |
# -*- coding: utf-8 -*-
#
# Copyright 2015 MarkLogic Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0#
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# File History
# ------------
#
# Norman Walsh 05/07/2015 Initial development
"""
Classes for dealing with database merge blackouts
"""
class MergeBlackout:
"""
A merge blackout period. This is an abstract class.
"""
def __init__(self):
raise ValueError("Do not instantiate MergeBlackout directly")
def blackout_type(self):
"""
The blackout type.
"""
return self._config['blackout_type']
def limit(self):
"""
The limit.
"""
return self._config['limit']
def merge_priority(self):
"""
The merge priority.
"""
return self._config['merge-priority']
@classmethod
def recurringDuration(cls, priority, limit, days, start_time, duration):
"""
Create a recurring blackout with a duration.
"""
# FIXME: validate args
return MergeBlackoutRecurringDuration(priority,limit,days,
start_time, duration)
@classmethod
def recurringStartEnd(cls, priority, limit, days, start_time, end_time):
"""
Create a recurring blackout with a start and end time.
"""
# FIXME: validate args
return MergeBlackoutRecurringStartEnd(priority,limit,days,
start_time, end_time)
@classmethod
def recurringAllDay(cls, priority, limit, days):
"""
Create a recurring blackout that lasts all day.
"""
# FIXME: validate args
return MergeBlackoutRecurringAllDay(priority,limit, days)
@classmethod
def oneTimeDuration(cls, priority, limit, start_date, start_time, duration):
"""
Create a one-time blackout with a duration.
"""
# FIXME: validate args
return MergeBlackoutOneTimeDuration(priority,limit,
start_date, start_time, duration)
@classmethod
def oneTimeStartEnd(cls, priority, limit,
start_date, start_time,
end_date, end_time):
"""
Create a one-time blackout with a start and end time.
"""
# FIXME: validate args
return MergeBlackoutOneTimeStartEnd(priority,limit,
start_date, start_time,
end_date, end_time)
class MergeBlackoutRecurringDuration(MergeBlackout):
"""
A recurring merge blackout period for a duration
"""
def __init__(self, priority, limit, days, start_time, duration):
"""
Create a recurring merge blackout period for a duration
"""
self._config = {
'blackout-type': 'recurring',
'merge-priority': priority,
'limit': limit,
'day': days,
'period': {
'start-time': start_time,
'duration': duration
}
}
def days():
"""
The days.
"""
return self._config['days']
def start_time():
"""
The start time.
"""
return self._config['period']['start-time']
def duration():
"""
The duration.
"""
return self._config['period']['duration']
class MergeBlackoutRecurringStartEnd(MergeBlackout):
"""
A recurring merge blackout period with start and end times
"""
def __init__(self, priority, limit, days, start_time, end_time):
"""
Create a recurring merge blackout period with start and end times
"""
self._config = {
'blackout-type': "recurring",
'merge-priority': priority,
'limit': limit,
'day': days,
'period': {
'start-time': start_time,
'end-time': end_time
}
}
def days():
"""
The days.
"""
return self._config['days']
def start_time():
"""
The start time.
"""
return self._config['period']['start-time']
def end_time():
"""
The end time.
"""
return self._config['period']['end-time']
class MergeBlackoutRecurringAllDay(MergeBlackout):
"""
A recurring merge blackout period for a whole day
"""
def __init__(self, priority, limit, days):
"""
Create a recurring merge blackout period for a whole day
"""
self._config = {
'blackout-type': "recurring",
'merge-priority': priority,
'limit': limit,
'day': days,
'period': None
}
def days():
"""
The days.
"""
return self._config['days']
class MergeBlackoutOneTimeDuration(MergeBlackout):
"""
A one time merge blackout period with a duration
"""
def __init__(self, priority, limit, start_date, start_time, duration):
"""
Create a one time merge blackout period with a duration
"""
self._config = {
'blackout-type': "once",
'merge-priority': priority,
'limit': limit,
'period': {
'start-date': start_date,
'start-time': start_time,
'duration': duration
}
}
def start_date():
"""
The start date.
"""
return self._config['period']['start-date']
def start_time():
"""
The start time.
"""
return self._config['period']['start-time']
def duration():
"""
The duration.
"""
return self._config['period']['duration']
class MergeBlackoutOneTimeStartEnd(MergeBlackout):
"""
A one time merge blackout period with start and end times
"""
def __init__(self, priority, limit, start_date, start_time, end_date, end_time):
"""
Create a one time merge blackout period with start and end times
"""
self._config = {
'blackout-type': "once",
'merge-priority': priority,
'limit': limit,
'period': {
'start-date': start_date,
'start-time': start_time,
'end-date': end_date,
'end-time': end_time,
}
}
def start_date():
"""
The start date.
"""
return self._config['period']['start-date']
def start_time():
"""
The start time.
"""
return self._config['period']['start-time']
def end_date():
"""
The end date.
"""
return self._config['period']['end-date']
def end_time():
"""
The end time.
"""
return self._config['period']['end-time']
| |
#===============================================================================
# Imports
#===============================================================================
import sys
import ctypes
from .wintypes import *
#===============================================================================
# Globals/Aliases
#===============================================================================
PPYTHON = PVOID
PPYTRACEFUNC = PVOID
PUSERDATA = PVOID
#===============================================================================
# Classes
#===============================================================================
class TRACE_STORE_METADATA(Structure):
_fields_ = [
('NumberOfRecords', ULARGE_INTEGER),
('RecordSize', LARGE_INTEGER),
]
PTRACE_STORE_METADATA = POINTER(TRACE_STORE_METADATA)
class _TRACE_STORE_METADATA(Union):
_fields_ = [
('Metadata', TRACE_STORE_METADATA),
('pMetadata', PTRACE_STORE_METADATA),
]
class TRACE_STORE_MEMORY_MAP(Structure):
_fields_ = [
('SlimReadWriteLock', SRWLOCK),
('MappingHandle', HANDLE),
('MappingSize', LARGE_INTEGER),
('BaseAddress', PVOID),
('ExtendAtAddress', PVOID),
('EndAddress', PVOID),
('PrevAddress', PVOID),
('NextAddress', PVOID),
]
PTRACE_STORE_MEMORY_MAP = POINTER(TRACE_STORE_MEMORY_MAP)
class TRACE_STORE(Structure):
_fields_ = [
('TraceStores', PVOID),
('FileHandle', HANDLE),
('InitialSize', LARGE_INTEGER),
('ExtensionSize', LARGE_INTEGER),
('FileInfo', FILE_STANDARD_INFO),
('CriticalSection', PCRITICAL_SECTION),
('DroppedRecords', ULONG),
('MemoryMap', TRACE_STORE_MEMORY_MAP),
('NextMemoryMap', TRACE_STORE_MEMORY_MAP),
('MetadataStore', PVOID),
('AllocateRecords', PVOID),
('', _TRACE_STORE_METADATA),
]
PTRACE_STORE = POINTER(TRACE_STORE)
class TRACE_STORES(Structure):
_fields_ = [
('Size', USHORT),
('NumberOfTraceStores', USHORT),
('Reserved', ULONG),
('Events', TRACE_STORE),
('Frames', TRACE_STORE),
('Modules', TRACE_STORE),
('Functions', TRACE_STORE),
('Exceptions', TRACE_STORE),
('Lines', TRACE_STORE),
('EventsMetadata', TRACE_STORE),
('FramesMetadata', TRACE_STORE),
('ModulesMetadata', TRACE_STORE),
('FunctionsMetadata', TRACE_STORE),
('ExceptionsMetadata', TRACE_STORE),
('LinesMetadata', TRACE_STORE),
]
PTRACE_STORES = POINTER(TRACE_STORES)
class TRACE_SESSION(Structure):
_fields_ = [
('Size', DWORD),
('SessionId', LARGE_INTEGER),
('MachineGuid', GUID),
('Sid', PVOID),
('UserName', PCWSTR),
('ComputerName', PCWSTR),
('DomainName', PCWSTR),
('SystemTime', FILETIME),
]
PTRACE_SESSION = POINTER(TRACE_SESSION)
class TRACE_CONTEXT(Structure):
_fields_ = [
('Size', ULONG),
('SequenceId', ULONG),
('TraceSession', POINTER(TRACE_SESSION)),
('TraceStores', POINTER(TRACE_STORES)),
('SystemTimerFunction', PVOID),
('UserData', PVOID),
]
PTRACE_CONTEXT = POINTER(TRACE_CONTEXT)
class PYTHON_TRACE_CONTEXT(Structure):
_fields_ = [
('Size', ULONG),
('Python', PPYTHON),
('TraceContext', PTRACE_CONTEXT),
('PythonTraceFunction', PVOID),
('UserData', PVOID),
]
PPYTHON_TRACE_CONTEXT = POINTER(PYTHON_TRACE_CONTEXT)
#===============================================================================
# Functions
#===============================================================================
def vspyprof(path=None, dll=None):
assert path or dll
if not dll:
dll = ctypes.PyDLL(path)
dll.CreateProfiler.restype = c_void_p
dll.CreateCustomProfiler.restype = c_void_p
dll.CreateCustomProfiler.argtypes = [c_void_p, ctypes.c_void_p]
dll.CloseThread.argtypes = [c_void_p]
dll.CloseProfiler.argtypes = [c_void_p]
dll.InitProfiler.argtypes = [c_void_p]
dll.InitProfiler.restype = c_void_p
#dll.SetTracing.argtypes = [c_void_p]
#dll.UnsetTracing.argtypes = [c_void_p]
#dll.IsTracing.argtypes = [c_void_p]
#dll.IsTracing.restype = c_bool
return dll
def pytrace(path=None, dll=None):
assert path or dll
dll = vspyprof(path, dll)
dll.CreateTracer.restype = PVOID
dll.CreateTracer.argtypes = [PVOID, PVOID]
dll.InitializeTraceStores.restype = BOOL
dll.InitializeTraceStores.argtypes = [
PWSTR,
PVOID,
PDWORD,
PDWORD,
]
return dll
def tracer(path=None, dll=None):
assert path or dll
if not dll:
dll = ctypes.PyDLL(path)
dll.InitializeTraceStores.restype = BOOL
dll.InitializeTraceStores.argtypes = [
PWSTR,
PVOID,
PDWORD,
PDWORD,
]
dll.InitializeTraceContext.restype = BOOL
dll.InitializeTraceContext.argtypes = [
PTRACE_CONTEXT,
PDWORD,
PTRACE_SESSION,
PTRACE_STORES,
PVOID,
]
dll.InitializeTraceSession.restype = BOOL
dll.InitializeTraceSession.argtypes = [
PTRACE_SESSION,
PDWORD
]
#dll.CallSystemTimer.restype = BOOL
#dll.CallSystemTimer.argtypes = [
# PFILETIME,
# PVOID,
#]
return dll
def python(path=None, dll=None):
assert path or dll
if not dll:
dll = ctypes.PyDLL(path)
dll.InitializePython.restype = BOOL
dll.InitializePython.argtypes = [
HMODULE,
PVOID,
PDWORD
]
return dll
def pythontracer(path=None, dll=None):
assert path or dll
if not dll:
dll = ctypes.PyDLL(path)
dll.InitializePythonTraceContext.restype = BOOL
dll.InitializePythonTraceContext.argtypes = [
PPYTHON_TRACE_CONTEXT,
PULONG,
PPYTHON,
PTRACE_CONTEXT,
PPYTRACEFUNC,
PUSERDATA
]
dll.StartTracing.restype = BOOL
dll.StartTracing.argtypes = [ PPYTHON_TRACE_CONTEXT, ]
dll.StopTracing.restype = BOOL
dll.StopTracing.argtypes = [ PPYTHON_TRACE_CONTEXT, ]
return dll
# vim:set ts=8 sw=4 sts=4 tw=80 et :
| |
r"""JSON (JavaScript Object Notation) <http://json.org> is a subset of
JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data
interchange format.
:mod:`simplejson` exposes an API familiar to users of the standard library
:mod:`marshal` and :mod:`pickle` modules. It is the externally maintained
version of the :mod:`json` library contained in Python 2.6, but maintains
compatibility with Python 2.4 and Python 2.5 and (currently) has
significant performance advantages, even without using the optional C
extension for speedups.
Encoding basic Python object hierarchies::
>>> import simplejson as json
>>> json.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}])
'["foo", {"bar": ["baz", null, 1.0, 2]}]'
>>> print json.dumps("\"foo\bar")
"\"foo\bar"
>>> print json.dumps(u'\u1234')
"\u1234"
>>> print json.dumps('\\')
"\\"
>>> print json.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True)
{"a": 0, "b": 0, "c": 0}
>>> from StringIO import StringIO
>>> io = StringIO()
>>> json.dump(['streaming API'], io)
>>> io.getvalue()
'["streaming API"]'
Compact encoding::
>>> import simplejson as json
>>> json.dumps([1,2,3,{'4': 5, '6': 7}], separators=(',',':'))
'[1,2,3,{"4":5,"6":7}]'
Pretty printing::
>>> import simplejson as json
>>> s = json.dumps({'4': 5, '6': 7}, sort_keys=True, indent=' ')
>>> print '\n'.join([l.rstrip() for l in s.splitlines()])
{
"4": 5,
"6": 7
}
Decoding JSON::
>>> import simplejson as json
>>> obj = [u'foo', {u'bar': [u'baz', None, 1.0, 2]}]
>>> json.loads('["foo", {"bar":["baz", null, 1.0, 2]}]') == obj
True
>>> json.loads('"\\"foo\\bar"') == u'"foo\x08ar'
True
>>> from StringIO import StringIO
>>> io = StringIO('["streaming API"]')
>>> json.load(io)[0] == 'streaming API'
True
Specializing JSON object decoding::
>>> import simplejson as json
>>> def as_complex(dct):
... if '__complex__' in dct:
... return complex(dct['real'], dct['imag'])
... return dct
...
>>> json.loads('{"__complex__": true, "real": 1, "imag": 2}',
... object_hook=as_complex)
(1+2j)
>>> from decimal import Decimal
>>> json.loads('1.1', parse_float=Decimal) == Decimal('1.1')
True
Specializing JSON object encoding::
>>> import simplejson as json
>>> def encode_complex(obj):
... if isinstance(obj, complex):
... return [obj.real, obj.imag]
... raise TypeError(repr(o) + " is not JSON serializable")
...
>>> json.dumps(2 + 1j, default=encode_complex)
'[2.0, 1.0]'
>>> json.JSONEncoder(default=encode_complex).encode(2 + 1j)
'[2.0, 1.0]'
>>> ''.join(json.JSONEncoder(default=encode_complex).iterencode(2 + 1j))
'[2.0, 1.0]'
Using simplejson.tool from the shell to validate and pretty-print::
$ echo '{"json":"obj"}' | python -m simplejson.tool
{
"json": "obj"
}
$ echo '{ 1.2:3.4}' | python -m simplejson.tool
Expecting property name: line 1 column 2 (char 2)
"""
__version__ = '2.3.3'
__all__ = [
'dump', 'dumps', 'load', 'loads',
'JSONDecoder', 'JSONDecodeError', 'JSONEncoder',
'OrderedDict',
]
__author__ = 'Bob Ippolito <bob@redivi.com>'
from decimal import Decimal
from decoder import JSONDecoder, JSONDecodeError
from encoder import JSONEncoder
def _import_OrderedDict():
import collections
try:
return collections.OrderedDict
except AttributeError:
import ordered_dict
return ordered_dict.OrderedDict
OrderedDict = _import_OrderedDict()
def _import_c_make_encoder():
try:
from simplejson._speedups import make_encoder
return make_encoder
except ImportError:
return None
_default_encoder = JSONEncoder(
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
encoding='utf-8',
default=None,
use_decimal=True,
namedtuple_as_object=True,
tuple_as_array=True,
)
def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, use_decimal=True,
namedtuple_as_object=True, tuple_as_array=True,
**kw):
"""Serialize ``obj`` as a JSON formatted stream to ``fp`` (a
``.write()``-supporting file-like object).
If ``skipkeys`` is true then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is false, then the some chunks written to ``fp``
may be ``unicode`` instances, subject to normal Python ``str`` to
``unicode`` coercion rules. Unless ``fp.write()`` explicitly
understands ``unicode`` (as in ``codecs.getwriter()``) this is likely
to cause an error.
If ``check_circular`` is false, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is false, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``)
in strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If *indent* is a string, then JSON array elements and object members
will be pretty-printed with a newline followed by that string repeated
for each level of nesting. ``None`` (the default) selects the most compact
representation without any newlines. For backwards compatibility with
versions of simplejson earlier than 2.1.0, an integer is also accepted
and is converted to a string with that many spaces.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
If *use_decimal* is true (default: ``True``) then decimal.Decimal
will be natively serialized to JSON with full precision.
If *namedtuple_as_object* is true (default: ``True``),
:class:`tuple` subclasses with ``_asdict()`` methods will be encoded
as JSON objects.
If *tuple_as_array* is true (default: ``True``),
:class:`tuple` (and subclasses) will be encoded as JSON arrays.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (not skipkeys and ensure_ascii and
check_circular and allow_nan and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and use_decimal
and namedtuple_as_object and tuple_as_array and not kw):
iterable = _default_encoder.iterencode(obj)
else:
if cls is None:
cls = JSONEncoder
iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding,
default=default, use_decimal=use_decimal,
namedtuple_as_object=namedtuple_as_object,
tuple_as_array=tuple_as_array,
**kw).iterencode(obj)
# could accelerate with writelines in some versions of Python, at
# a debuggability cost
for chunk in iterable:
fp.write(chunk)
def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, use_decimal=True,
namedtuple_as_object=True,
tuple_as_array=True,
**kw):
"""Serialize ``obj`` to a JSON formatted ``str``.
If ``skipkeys`` is false then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is false, then the return value will be a
``unicode`` instance subject to normal Python ``str`` to ``unicode``
coercion rules instead of being escaped to an ASCII ``str``.
If ``check_circular`` is false, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is false, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in
strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a string, then JSON array elements and object members
will be pretty-printed with a newline followed by that string repeated
for each level of nesting. ``None`` (the default) selects the most compact
representation without any newlines. For backwards compatibility with
versions of simplejson earlier than 2.1.0, an integer is also accepted
and is converted to a string with that many spaces.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
If *use_decimal* is true (default: ``True``) then decimal.Decimal
will be natively serialized to JSON with full precision.
If *namedtuple_as_object* is true (default: ``True``),
:class:`tuple` subclasses with ``_asdict()`` methods will be encoded
as JSON objects.
If *tuple_as_array* is true (default: ``True``),
:class:`tuple` (and subclasses) will be encoded as JSON arrays.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (not skipkeys and ensure_ascii and
check_circular and allow_nan and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and use_decimal
and namedtuple_as_object and tuple_as_array and not kw):
return _default_encoder.encode(obj)
if cls is None:
cls = JSONEncoder
return cls(
skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding, default=default,
use_decimal=use_decimal,
namedtuple_as_object=namedtuple_as_object,
tuple_as_array=tuple_as_array,
**kw).encode(obj)
_default_decoder = JSONDecoder(encoding=None, object_hook=None,
object_pairs_hook=None)
def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, object_pairs_hook=None,
use_decimal=False, namedtuple_as_object=True, tuple_as_array=True,
**kw):
"""Deserialize ``fp`` (a ``.read()``-supporting file-like object containing
a JSON document) to a Python object.
*encoding* determines the encoding used to interpret any
:class:`str` objects decoded by this instance (``'utf-8'`` by
default). It has no effect when decoding :class:`unicode` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as :class:`unicode`.
*object_hook*, if specified, will be called with the result of every
JSON object decoded and its return value will be used in place of the
given :class:`dict`. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
*object_pairs_hook* is an optional function that will be called with
the result of any object literal decode with an ordered list of pairs.
The return value of *object_pairs_hook* will be used instead of the
:class:`dict`. This feature can be used to implement custom decoders
that rely on the order that the key and value pairs are decoded (for
example, :func:`collections.OrderedDict` will remember the order of
insertion). If *object_hook* is also defined, the *object_pairs_hook*
takes priority.
*parse_float*, if specified, will be called with the string of every
JSON float to be decoded. By default, this is equivalent to
``float(num_str)``. This can be used to use another datatype or parser
for JSON floats (e.g. :class:`decimal.Decimal`).
*parse_int*, if specified, will be called with the string of every
JSON int to be decoded. By default, this is equivalent to
``int(num_str)``. This can be used to use another datatype or parser
for JSON integers (e.g. :class:`float`).
*parse_constant*, if specified, will be called with one of the
following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
can be used to raise an exception if invalid JSON numbers are
encountered.
If *use_decimal* is true (default: ``False``) then it implies
parse_float=decimal.Decimal for parity with ``dump``.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
return loads(fp.read(),
encoding=encoding, cls=cls, object_hook=object_hook,
parse_float=parse_float, parse_int=parse_int,
parse_constant=parse_constant, object_pairs_hook=object_pairs_hook,
use_decimal=use_decimal, **kw)
def loads(s, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, object_pairs_hook=None,
use_decimal=False, **kw):
"""Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON
document) to a Python object.
*encoding* determines the encoding used to interpret any
:class:`str` objects decoded by this instance (``'utf-8'`` by
default). It has no effect when decoding :class:`unicode` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as :class:`unicode`.
*object_hook*, if specified, will be called with the result of every
JSON object decoded and its return value will be used in place of the
given :class:`dict`. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
*object_pairs_hook* is an optional function that will be called with
the result of any object literal decode with an ordered list of pairs.
The return value of *object_pairs_hook* will be used instead of the
:class:`dict`. This feature can be used to implement custom decoders
that rely on the order that the key and value pairs are decoded (for
example, :func:`collections.OrderedDict` will remember the order of
insertion). If *object_hook* is also defined, the *object_pairs_hook*
takes priority.
*parse_float*, if specified, will be called with the string of every
JSON float to be decoded. By default, this is equivalent to
``float(num_str)``. This can be used to use another datatype or parser
for JSON floats (e.g. :class:`decimal.Decimal`).
*parse_int*, if specified, will be called with the string of every
JSON int to be decoded. By default, this is equivalent to
``int(num_str)``. This can be used to use another datatype or parser
for JSON integers (e.g. :class:`float`).
*parse_constant*, if specified, will be called with one of the
following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
can be used to raise an exception if invalid JSON numbers are
encountered.
If *use_decimal* is true (default: ``False``) then it implies
parse_float=decimal.Decimal for parity with ``dump``.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
if (cls is None and encoding is None and object_hook is None and
parse_int is None and parse_float is None and
parse_constant is None and object_pairs_hook is None
and not use_decimal and not kw):
return _default_decoder.decode(s)
if cls is None:
cls = JSONDecoder
if object_hook is not None:
kw['object_hook'] = object_hook
if object_pairs_hook is not None:
kw['object_pairs_hook'] = object_pairs_hook
if parse_float is not None:
kw['parse_float'] = parse_float
if parse_int is not None:
kw['parse_int'] = parse_int
if parse_constant is not None:
kw['parse_constant'] = parse_constant
if use_decimal:
if parse_float is not None:
raise TypeError("use_decimal=True implies parse_float=Decimal")
kw['parse_float'] = Decimal
return cls(encoding=encoding, **kw).decode(s)
def _toggle_speedups(enabled):
import simplejson.decoder as dec
import simplejson.encoder as enc
import simplejson.scanner as scan
c_make_encoder = _import_c_make_encoder()
if enabled:
dec.scanstring = dec.c_scanstring or dec.py_scanstring
enc.c_make_encoder = c_make_encoder
enc.encode_basestring_ascii = (enc.c_encode_basestring_ascii or
enc.py_encode_basestring_ascii)
scan.make_scanner = scan.c_make_scanner or scan.py_make_scanner
else:
dec.scanstring = dec.py_scanstring
enc.c_make_encoder = None
enc.encode_basestring_ascii = enc.py_encode_basestring_ascii
scan.make_scanner = scan.py_make_scanner
dec.make_scanner = scan.make_scanner
global _default_decoder
_default_decoder = JSONDecoder(
encoding=None,
object_hook=None,
object_pairs_hook=None,
)
global _default_encoder
_default_encoder = JSONEncoder(
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
encoding='utf-8',
default=None,
)
| |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A library to train Inception using multiple GPU's with synchronous updates.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from datetime import datetime
import os.path
import re
import time
import numpy as np
import tensorflow as tf
from inception import image_processing
from inception import inception_model as inception
from inception.slim import slim
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('train_dir', '/tmp/imagenet_train',
"""Directory where to write event logs """
"""and checkpoint.""")
tf.app.flags.DEFINE_integer('max_steps', 10000000,
"""Number of batches to run.""")
tf.app.flags.DEFINE_string('subset', 'train',
"""Either 'train' or 'validation'.""")
# Flags governing the hardware employed for running TensorFlow.
tf.app.flags.DEFINE_integer('num_gpus', 1,
"""How many GPUs to use.""")
tf.app.flags.DEFINE_boolean('log_device_placement', False,
"""Whether to log device placement.""")
# Flags governing the type of training.
tf.app.flags.DEFINE_boolean('fine_tune', False,
"""If set, randomly initialize the final layer """
"""of weights in order to train the network on a """
"""new task.""")
tf.app.flags.DEFINE_string('pretrained_model_checkpoint_path', '',
"""If specified, restore this pretrained model """
"""before beginning any training.""")
# **IMPORTANT**
# Please note that this learning rate schedule is heavily dependent on the
# hardware architecture, batch size and any changes to the model architecture
# specification. Selecting a finely tuned learning rate schedule is an
# empirical process that requires some experimentation. Please see README.md
# more guidance and discussion.
#
# With 8 Tesla K40's and a batch size = 256, the following setup achieves
# precision@1 = 73.5% after 100 hours and 100K steps (20 epochs).
# Learning rate decay factor selected from http://arxiv.org/abs/1404.5997.
tf.app.flags.DEFINE_float('initial_learning_rate', 0.1,
"""Initial learning rate.""")
tf.app.flags.DEFINE_float('num_epochs_per_decay', 30.0,
"""Epochs after which learning rate decays.""")
tf.app.flags.DEFINE_float('learning_rate_decay_factor', 0.16,
"""Learning rate decay factor.""")
# Constants dictating the learning rate schedule.
RMSPROP_DECAY = 0.9 # Decay term for RMSProp.
RMSPROP_MOMENTUM = 0.9 # Momentum in RMSProp.
RMSPROP_EPSILON = 1.0 # Epsilon term for RMSProp.
def _tower_loss(images, labels, num_classes, scope):
"""Calculate the total loss on a single tower running the ImageNet model.
We perform 'batch splitting'. This means that we cut up a batch across
multiple GPU's. For instance, if the batch size = 32 and num_gpus = 2,
then each tower will operate on an batch of 16 images.
Args:
images: Images. 4D tensor of size [batch_size, FLAGS.image_size,
FLAGS.image_size, 3].
labels: 1-D integer Tensor of [batch_size].
num_classes: number of classes
scope: unique prefix string identifying the ImageNet tower, e.g.
'tower_0'.
Returns:
Tensor of shape [] containing the total loss for a batch of data
"""
# When fine-tuning a model, we do not restore the logits but instead we
# randomly initialize the logits. The number of classes in the output of the
# logit is the number of classes in specified Dataset.
restore_logits = not FLAGS.fine_tune
# Build inference Graph.
logits = inception.inference(images, num_classes, for_training=True,
restore_logits=restore_logits,
scope=scope)
# Build the portion of the Graph calculating the losses. Note that we will
# assemble the total_loss using a custom function below.
split_batch_size = images.get_shape().as_list()[0]
inception.loss(logits, labels, batch_size=split_batch_size)
# Assemble all of the losses for the current tower only.
losses = tf.get_collection(slim.losses.LOSSES_COLLECTION, scope)
# Calculate the total loss for the current tower.
regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
total_loss = tf.add_n(losses + regularization_losses, name='total_loss')
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summmary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on TensorBoard.
loss_name = re.sub('%s_[0-9]*/' % inception.TOWER_NAME, '', l.op.name)
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
tf.scalar_summary(loss_name +' (raw)', l)
tf.scalar_summary(loss_name, loss_averages.average(l))
with tf.control_dependencies([loss_averages_op]):
total_loss = tf.identity(total_loss)
return total_loss
def _average_gradients(tower_grads):
"""Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over individual gradients. The inner list is over the gradient
calculation for each tower.
Returns:
List of pairs of (gradient, variable) where the gradient has been averaged
across all towers.
"""
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
for g, _ in grad_and_vars:
# Add 0 dimension to the gradients to represent the tower.
expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
# Average over the 'tower' dimension.
grad = tf.concat(0, grads)
grad = tf.reduce_mean(grad, 0)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
def train(dataset):
"""Train on dataset for a number of steps."""
with tf.Graph().as_default(), tf.device('/cpu:0'):
# Create a variable to count the number of train() calls. This equals the
# number of batches processed * FLAGS.num_gpus.
global_step = tf.get_variable(
'global_step', [],
initializer=tf.constant_initializer(0), trainable=False)
# Calculate the learning rate schedule.
num_batches_per_epoch = (dataset.num_examples_per_epoch() /
FLAGS.batch_size)
decay_steps = int(num_batches_per_epoch * FLAGS.num_epochs_per_decay)
# Decay the learning rate exponentially based on the number of steps.
lr = tf.train.exponential_decay(FLAGS.initial_learning_rate,
global_step,
decay_steps,
FLAGS.learning_rate_decay_factor,
staircase=True)
# Create an optimizer that performs gradient descent.
opt = tf.train.RMSPropOptimizer(lr, RMSPROP_DECAY,
momentum=RMSPROP_MOMENTUM,
epsilon=RMSPROP_EPSILON)
# Get images and labels for ImageNet and split the batch across GPUs.
assert FLAGS.batch_size % FLAGS.num_gpus == 0, (
'Batch size must be divisible by number of GPUs')
split_batch_size = int(FLAGS.batch_size / FLAGS.num_gpus)
# Override the number of preprocessing threads to account for the increased
# number of GPU towers.
num_preprocess_threads = FLAGS.num_preprocess_threads * FLAGS.num_gpus
images, labels = image_processing.distorted_inputs(
dataset,
num_preprocess_threads=num_preprocess_threads)
input_summaries = copy.copy(tf.get_collection(tf.GraphKeys.SUMMARIES))
# Number of classes in the Dataset label set plus 1.
# Label 0 is reserved for an (unused) background class.
num_classes = dataset.num_classes() + 1
# Split the batch of images and labels for towers.
images_splits = tf.split(0, FLAGS.num_gpus, images)
labels_splits = tf.split(0, FLAGS.num_gpus, labels)
# Calculate the gradients for each model tower.
tower_grads = []
for i in xrange(FLAGS.num_gpus):
with tf.device('/gpu:%d' % i):
with tf.name_scope('%s_%d' % (inception.TOWER_NAME, i)) as scope:
# Force all Variables to reside on the CPU.
with slim.arg_scope([slim.variables.variable], device='/cpu:0'):
# Calculate the loss for one tower of the ImageNet model. This
# function constructs the entire ImageNet model but shares the
# variables across all towers.
loss = _tower_loss(images_splits[i], labels_splits[i], num_classes,
scope)
# Reuse variables for the next tower.
tf.get_variable_scope().reuse_variables()
# Retain the summaries from the final tower.
summaries = tf.get_collection(tf.GraphKeys.SUMMARIES, scope)
# Retain the Batch Normalization updates operations only from the
# final tower. Ideally, we should grab the updates from all towers
# but these stats accumulate extremely fast so we can ignore the
# other stats from the other towers without significant detriment.
batchnorm_updates = tf.get_collection(slim.ops.UPDATE_OPS_COLLECTION,
scope)
# Calculate the gradients for the batch of data on this ImageNet
# tower.
grads = opt.compute_gradients(loss)
# Keep track of the gradients across all towers.
tower_grads.append(grads)
# We must calculate the mean of each gradient. Note that this is the
# synchronization point across all towers.
grads = _average_gradients(tower_grads)
# Add a summaries for the input processing and global_step.
summaries.extend(input_summaries)
# Add a summary to track the learning rate.
summaries.append(tf.scalar_summary('learning_rate', lr))
# Add histograms for gradients.
for grad, var in grads:
if grad is not None:
summaries.append(
tf.histogram_summary(var.op.name + '/gradients', grad))
# Apply the gradients to adjust the shared variables.
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
# Add histograms for trainable variables.
for var in tf.trainable_variables():
summaries.append(tf.histogram_summary(var.op.name, var))
# Track the moving averages of all trainable variables.
# Note that we maintain a "double-average" of the BatchNormalization
# global statistics. This is more complicated then need be but we employ
# this for backward-compatibility with our previous models.
variable_averages = tf.train.ExponentialMovingAverage(
inception.MOVING_AVERAGE_DECAY, global_step)
# Another possiblility is to use tf.slim.get_variables().
variables_to_average = (tf.trainable_variables() +
tf.moving_average_variables())
variables_averages_op = variable_averages.apply(variables_to_average)
# Group all updates to into a single train op.
batchnorm_updates_op = tf.group(*batchnorm_updates)
train_op = tf.group(apply_gradient_op, variables_averages_op,
batchnorm_updates_op)
# Create a saver.
saver = tf.train.Saver(tf.all_variables())
# Build the summary operation from the last tower summaries.
summary_op = tf.merge_summary(summaries)
# Build an initialization operation to run below.
init = tf.initialize_all_variables()
# Start running operations on the Graph. allow_soft_placement must be set to
# True to build towers on GPU, as some of the ops do not have GPU
# implementations.
sess = tf.Session(config=tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=FLAGS.log_device_placement))
sess.run(init)
if FLAGS.pretrained_model_checkpoint_path:
assert tf.gfile.Exists(FLAGS.pretrained_model_checkpoint_path)
variables_to_restore = tf.get_collection(
slim.variables.VARIABLES_TO_RESTORE)
restorer = tf.train.Saver(variables_to_restore)
restorer.restore(sess, FLAGS.pretrained_model_checkpoint_path)
print('%s: Pre-trained model restored from %s' %
(datetime.now(), FLAGS.pretrained_model_checkpoint_path))
# Start the queue runners.
tf.train.start_queue_runners(sess=sess)
summary_writer = tf.train.SummaryWriter(
FLAGS.train_dir,
graph_def=sess.graph.as_graph_def(add_shapes=True))
for step in xrange(FLAGS.max_steps):
start_time = time.time()
_, loss_value = sess.run([train_op, loss])
duration = time.time() - start_time
assert not np.isnan(loss_value), 'Model diverged with loss = NaN'
if step % 10 == 0:
examples_per_sec = FLAGS.batch_size / float(duration)
format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
'sec/batch)')
print(format_str % (datetime.now(), step, loss_value,
examples_per_sec, duration))
if step % 100 == 0:
summary_str = sess.run(summary_op)
summary_writer.add_summary(summary_str, step)
# Save the model checkpoint periodically.
if step % 500 == 0 or (step + 1) == FLAGS.max_steps:
checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=step)
| |
# -*- coding: utf-8 -*-
# Copyright 2017 ProjectQ-Framework (www.projectq.ch)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for projectq.meta._compute.py"""
import types
import weakref
import pytest
from projectq import MainEngine
from projectq.cengines import CompareEngine, DummyEngine
from projectq.meta import DirtyQubitTag, _compute
from projectq.ops import CNOT, NOT, Allocate, Deallocate, FlushGate, H, Rx, Ry
def test_compute_tag():
tag0 = _compute.ComputeTag()
tag1 = _compute.ComputeTag()
class MyTag(object):
pass
assert not tag0 == MyTag()
assert not tag0 != tag1
assert tag0 == tag1
def test_uncompute_tag():
tag0 = _compute.UncomputeTag()
tag1 = _compute.UncomputeTag()
class MyTag(object):
pass
assert not tag0 == MyTag()
assert not tag0 != tag1
assert tag0 == tag1
def test_compute_engine():
backend = DummyEngine(save_commands=True)
compute_engine = _compute.ComputeEngine()
eng = MainEngine(backend=backend, engine_list=[compute_engine])
ancilla = eng.allocate_qubit() # Ancilla
H | ancilla
Rx(0.6) | ancilla
ancilla[0].__del__()
# Test that adding later a new tag to one of the previous commands
# does not add this tags to cmds saved in compute_engine because
# this one does need to make a deepcopy and not store a reference.
assert backend.received_commands[1].gate == H
backend.received_commands[1].tags.append("TagAddedLater")
assert backend.received_commands[1].tags[-1] == "TagAddedLater"
compute_engine.end_compute()
new_qubit = eng.allocate_qubit()
Ry(0.5) | new_qubit
compute_engine.run_uncompute()
eng.flush()
assert backend.received_commands[0].gate == Allocate
assert backend.received_commands[0].tags == [_compute.ComputeTag()]
assert backend.received_commands[1].gate == H
assert backend.received_commands[1].tags == [_compute.ComputeTag(), "TagAddedLater"]
assert backend.received_commands[2].gate == Rx(0.6)
assert backend.received_commands[2].tags == [_compute.ComputeTag()]
assert backend.received_commands[3].gate == Deallocate
assert backend.received_commands[3].tags == [_compute.ComputeTag()]
assert backend.received_commands[4].gate == Allocate
assert backend.received_commands[4].tags == []
assert backend.received_commands[5].gate == Ry(0.5)
assert backend.received_commands[5].tags == []
assert backend.received_commands[6].gate == Allocate
assert backend.received_commands[6].tags == [_compute.UncomputeTag()]
assert backend.received_commands[7].gate == Rx(-0.6)
assert backend.received_commands[7].tags == [_compute.UncomputeTag()]
assert backend.received_commands[8].gate == H
assert backend.received_commands[8].tags == [_compute.UncomputeTag()]
assert backend.received_commands[9].gate == Deallocate
assert backend.received_commands[9].tags == [_compute.UncomputeTag()]
def test_uncompute_engine():
backend = DummyEngine(save_commands=True)
uncompute_engine = _compute.UncomputeEngine()
eng = MainEngine(backend=backend, engine_list=[uncompute_engine])
qubit = eng.allocate_qubit()
H | qubit
assert backend.received_commands[0].gate == Allocate
assert backend.received_commands[0].tags == [_compute.UncomputeTag()]
assert backend.received_commands[1].gate == H
assert backend.received_commands[1].tags == [_compute.UncomputeTag()]
def test_outside_qubit_deallocated_in_compute():
# Test that there is an error if a qubit is deallocated which has
# not been allocated within the with Compute(eng) context
eng = MainEngine(backend=DummyEngine(), engine_list=[DummyEngine()])
qubit = eng.allocate_qubit()
with pytest.raises(_compute.QubitManagementError):
with _compute.Compute(eng):
qubit[0].__del__()
def test_deallocation_using_custom_uncompute():
# Test that qubits allocated within Compute and Uncompute
# section have all been deallocated
eng = MainEngine(backend=DummyEngine(), engine_list=[DummyEngine()])
# Allowed versions:
with _compute.Compute(eng):
ancilla = eng.allocate_qubit()
ancilla[0].__del__()
with _compute.CustomUncompute(eng):
ancilla2 = eng.allocate_qubit()
ancilla2[0].__del__()
with _compute.Compute(eng):
ancilla3 = eng.allocate_qubit()
with _compute.CustomUncompute(eng):
ancilla3[0].__del__()
def test_deallocation_using_custom_uncompute2():
# Test not allowed version:
eng = MainEngine(backend=DummyEngine(), engine_list=[DummyEngine()])
with _compute.Compute(eng):
ancilla = eng.allocate_qubit()
with pytest.raises(_compute.QubitManagementError):
with _compute.CustomUncompute(eng):
pass
H | ancilla
def test_deallocation_using_custom_uncompute3():
# Test not allowed version:
eng = MainEngine(backend=DummyEngine(), engine_list=[DummyEngine()])
with _compute.Compute(eng):
pass
with pytest.raises(_compute.QubitManagementError):
with _compute.CustomUncompute(eng):
ancilla = eng.allocate_qubit()
H | ancilla
def test_automatic_deallocation_of_qubit_in_uncompute():
# Test that automatic uncomputation deallocates qubit
# which was created during compute context.
backend = DummyEngine(save_commands=True)
eng = MainEngine(backend=backend, engine_list=[DummyEngine()])
with _compute.Compute(eng):
ancilla = eng.allocate_qubit()
assert ancilla[0].id != -1
Rx(0.6) | ancilla
# Test that ancilla qubit has been registered in MainEngine.active_qubits
assert ancilla[0] in eng.active_qubits
_compute.Uncompute(eng)
# Test that ancilla id has been set to -1
assert ancilla[0].id == -1
# Test that ancilla is not anymore in active qubits
assert not ancilla[0] in eng.active_qubits
assert backend.received_commands[1].gate == Rx(0.6)
assert backend.received_commands[2].gate == Rx(-0.6)
# Test that there are no additional deallocate gates
assert len(backend.received_commands) == 4
def test_compute_uncompute_no_additional_qubits():
# No ancilla qubit created in compute section
backend0 = DummyEngine(save_commands=True)
compare_engine0 = CompareEngine()
eng0 = MainEngine(backend=backend0, engine_list=[compare_engine0])
qubit = eng0.allocate_qubit()
with _compute.Compute(eng0):
Rx(0.5) | qubit
H | qubit
_compute.Uncompute(eng0)
eng0.flush(deallocate_qubits=True)
assert backend0.received_commands[0].gate == Allocate
assert backend0.received_commands[1].gate == Rx(0.5)
assert backend0.received_commands[2].gate == H
assert backend0.received_commands[3].gate == Rx(-0.5)
assert backend0.received_commands[4].gate == Deallocate
assert backend0.received_commands[0].tags == []
assert backend0.received_commands[1].tags == [_compute.ComputeTag()]
assert backend0.received_commands[2].tags == []
assert backend0.received_commands[3].tags == [_compute.UncomputeTag()]
assert backend0.received_commands[4].tags == []
# Same using CustomUncompute and test using CompareEngine
backend1 = DummyEngine(save_commands=True)
compare_engine1 = CompareEngine()
eng1 = MainEngine(backend=backend1, engine_list=[compare_engine1])
qubit = eng1.allocate_qubit()
with _compute.Compute(eng1):
Rx(0.5) | qubit
H | qubit
with _compute.CustomUncompute(eng1):
Rx(-0.5) | qubit
eng1.flush(deallocate_qubits=True)
assert compare_engine0 == compare_engine1
def test_compute_uncompute_with_statement():
# Allocating and deallocating qubit within Compute
backend = DummyEngine(save_commands=True)
compare_engine0 = CompareEngine()
# Allow dirty qubits
dummy_cengine = DummyEngine()
def allow_dirty_qubits(self, meta_tag):
return meta_tag == DirtyQubitTag
dummy_cengine.is_meta_tag_handler = types.MethodType(allow_dirty_qubits, dummy_cengine)
eng = MainEngine(backend=backend, engine_list=[compare_engine0, dummy_cengine])
qubit = eng.allocate_qubit()
with _compute.Compute(eng):
Rx(0.9) | qubit
ancilla = eng.allocate_qubit(dirty=True)
# ancilla2 will be deallocated in Uncompute section:
ancilla2 = eng.allocate_qubit()
# Test that ancilla is registered in MainEngine.active_qubits:
assert ancilla[0] in eng.active_qubits
H | qubit
Rx(0.5) | ancilla
CNOT | (ancilla, qubit)
Rx(0.7) | qubit
Rx(-0.5) | ancilla
ancilla[0].__del__()
H | qubit
_compute.Uncompute(eng)
eng.flush(deallocate_qubits=True)
assert len(backend.received_commands) == 22
# Test each Command has correct gate
assert backend.received_commands[0].gate == Allocate
assert backend.received_commands[1].gate == Rx(0.9)
assert backend.received_commands[2].gate == Allocate
assert backend.received_commands[3].gate == Allocate
assert backend.received_commands[4].gate == H
assert backend.received_commands[5].gate == Rx(0.5)
assert backend.received_commands[6].gate == NOT
assert backend.received_commands[7].gate == Rx(0.7)
assert backend.received_commands[8].gate == Rx(-0.5)
assert backend.received_commands[9].gate == Deallocate
assert backend.received_commands[10].gate == H
assert backend.received_commands[11].gate == Allocate
assert backend.received_commands[12].gate == Rx(0.5)
assert backend.received_commands[13].gate == Rx(-0.7)
assert backend.received_commands[14].gate == NOT
assert backend.received_commands[15].gate == Rx(-0.5)
assert backend.received_commands[16].gate == H
assert backend.received_commands[17].gate == Deallocate
assert backend.received_commands[18].gate == Deallocate
assert backend.received_commands[19].gate == Rx(-0.9)
assert backend.received_commands[20].gate == Deallocate
assert backend.received_commands[21].gate == FlushGate()
# Test that each command has correct tags
assert backend.received_commands[0].tags == []
assert backend.received_commands[1].tags == [_compute.ComputeTag()]
assert backend.received_commands[2].tags == [DirtyQubitTag(), _compute.ComputeTag()]
for cmd in backend.received_commands[3:9]:
assert cmd.tags == [_compute.ComputeTag()]
assert backend.received_commands[9].tags == [DirtyQubitTag(), _compute.ComputeTag()]
assert backend.received_commands[10].tags == []
assert backend.received_commands[11].tags == [
DirtyQubitTag(),
_compute.UncomputeTag(),
]
for cmd in backend.received_commands[12:18]:
assert cmd.tags == [_compute.UncomputeTag()]
assert backend.received_commands[18].tags == [
DirtyQubitTag(),
_compute.UncomputeTag(),
]
assert backend.received_commands[19].tags == [_compute.UncomputeTag()]
assert backend.received_commands[20].tags == []
assert backend.received_commands[21].tags == []
# Test that each command has correct qubits
# Note that ancilla qubit in compute should be
# different from ancilla qubit in uncompute section
qubit_id = backend.received_commands[0].qubits[0][0].id
ancilla_compt_id = backend.received_commands[2].qubits[0][0].id
ancilla_uncompt_id = backend.received_commands[11].qubits[0][0].id
ancilla2_id = backend.received_commands[3].qubits[0][0].id
assert backend.received_commands[1].qubits[0][0].id == qubit_id
assert backend.received_commands[4].qubits[0][0].id == qubit_id
assert backend.received_commands[5].qubits[0][0].id == ancilla_compt_id
assert backend.received_commands[6].qubits[0][0].id == qubit_id
assert backend.received_commands[6].control_qubits[0].id == ancilla_compt_id
assert backend.received_commands[7].qubits[0][0].id == qubit_id
assert backend.received_commands[8].qubits[0][0].id == ancilla_compt_id
assert backend.received_commands[9].qubits[0][0].id == ancilla_compt_id
assert backend.received_commands[10].qubits[0][0].id == qubit_id
assert backend.received_commands[12].qubits[0][0].id == ancilla_uncompt_id
assert backend.received_commands[13].qubits[0][0].id == qubit_id
assert backend.received_commands[14].qubits[0][0].id == qubit_id
assert backend.received_commands[14].control_qubits[0].id == ancilla_uncompt_id
assert backend.received_commands[15].qubits[0][0].id == ancilla_uncompt_id
assert backend.received_commands[16].qubits[0][0].id == qubit_id
assert backend.received_commands[17].qubits[0][0].id == ancilla2_id
assert backend.received_commands[18].qubits[0][0].id == ancilla_uncompt_id
assert backend.received_commands[19].qubits[0][0].id == qubit_id
assert backend.received_commands[20].qubits[0][0].id == qubit_id
# Test that ancilla qubits should have seperate ids
assert ancilla_uncompt_id != ancilla_compt_id
# Do the same thing with CustomUncompute and compare using the
# CompareEngine:
backend1 = DummyEngine(save_commands=True)
compare_engine1 = CompareEngine()
# Allow dirty qubits
dummy_cengine1 = DummyEngine()
def allow_dirty_qubits(self, meta_tag):
return meta_tag == DirtyQubitTag
dummy_cengine1.is_meta_tag_handler = types.MethodType(allow_dirty_qubits, dummy_cengine1)
eng1 = MainEngine(backend=backend1, engine_list=[compare_engine1, dummy_cengine1])
qubit = eng1.allocate_qubit()
with _compute.Compute(eng1):
Rx(0.9) | qubit
ancilla = eng1.allocate_qubit(dirty=True)
# ancilla2 will be deallocated in Uncompute section:
ancilla2 = eng1.allocate_qubit()
# Test that ancilla is registered in MainEngine.active_qubits:
assert ancilla[0] in eng1.active_qubits
H | qubit
Rx(0.5) | ancilla
CNOT | (ancilla, qubit)
Rx(0.7) | qubit
Rx(-0.5) | ancilla
ancilla[0].__del__()
H | qubit
with _compute.CustomUncompute(eng1):
ancilla = eng1.allocate_qubit(dirty=True)
Rx(0.5) | ancilla
Rx(-0.7) | qubit
CNOT | (ancilla, qubit)
Rx(-0.5) | ancilla
H | qubit
assert ancilla[0] in eng1.active_qubits
ancilla2[0].__del__()
ancilla[0].__del__()
Rx(-0.9) | qubit
eng1.flush(deallocate_qubits=True)
assert compare_engine0 == compare_engine1
def test_exception_if_no_compute_but_uncompute():
eng = MainEngine(backend=DummyEngine(), engine_list=[DummyEngine()])
with pytest.raises(_compute.NoComputeSectionError):
with _compute.CustomUncompute(eng):
pass
def test_exception_if_no_compute_but_uncompute2():
eng = MainEngine(backend=DummyEngine(), engine_list=[DummyEngine()])
with pytest.raises(_compute.NoComputeSectionError):
_compute.Uncompute(eng)
def test_qubit_management_error():
eng = MainEngine(backend=DummyEngine(), engine_list=[DummyEngine()])
with _compute.Compute(eng):
ancilla = eng.allocate_qubit() # noqa: F841
eng.active_qubits = weakref.WeakSet()
with pytest.raises(_compute.QubitManagementError):
_compute.Uncompute(eng)
def test_qubit_management_error2():
eng = MainEngine(backend=DummyEngine(), engine_list=[DummyEngine()])
with _compute.Compute(eng):
ancilla = eng.allocate_qubit() # noqa: F841
local_ancilla = eng.allocate_qubit()
local_ancilla[0].__del__()
eng.active_qubits = weakref.WeakSet()
with pytest.raises(_compute.QubitManagementError):
_compute.Uncompute(eng)
def test_only_single_error_in_costum_uncompute():
eng = MainEngine(backend=DummyEngine(), engine_list=[])
with _compute.Compute(eng):
eng.allocate_qubit()
# Tests that QubitManagementError is not sent in addition
with pytest.raises(RuntimeError):
with _compute.CustomUncompute(eng):
raise RuntimeError
| |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2008 John Paulett (john -at- paulett.org)
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
"""Python library for serializing any arbitrary object graph into JSON.
It can take almost any Python object and turn the object into JSON.
Additionally, it can reconstitute the object back into Python.
>>> import jsonpickle
>>> from samples import Thing
Create an object.
>>> obj = Thing('A String')
>>> print obj.name
A String
Use jsonpickle to transform the object into a JSON string.
>>> pickled = jsonpickle.encode(obj)
>>> print pickled
{"py/object": "samples.Thing", "name": "A String", "child": null}
Use jsonpickle to recreate a Python object from a JSON string
>>> unpickled = jsonpickle.decode(pickled)
>>> str(unpickled.name)
'A String'
.. warning::
Loading a JSON string from an untrusted source represents a potential
security vulnerability. jsonpickle makes no attempt to sanitize the input.
The new object has the same type and data, but essentially is now a copy of
the original.
>>> obj == unpickled
False
>>> obj.name == unpickled.name
True
>>> type(obj) == type(unpickled)
True
If you will never need to load (regenerate the Python class from JSON), you can
pass in the keyword unpicklable=False to prevent extra information from being
added to JSON.
>>> oneway = jsonpickle.encode(obj, unpicklable=False)
>>> print oneway
{"name": "A String", "child": null}
"""
from jsonpickle.pickler import Pickler
from jsonpickle.unpickler import Unpickler
__version__ = '0.4.0'
__all__ = ('encode', 'decode')
SUPPORTED_BACKENDS = ('json',
'simplejson',
'demjson',
'django.util.simplejson')
class JSONPluginMgr(object):
"""The JSONPluginMgr handles encoding and decoding.
It tries these modules in this order:
simplejson, json, demjson
simplejson is a fast and popular backend and is tried first.
json comes with python2.6 and is tried second.
demjson is the most permissive backend and is tried last.
"""
def __init__(self):
## The names of backends that have been successfully imported
self._backend_names = []
## A dictionary mapping backend names to encode/decode functions
self._encoders = {}
self._decoders = {}
## Options to pass to specific encoders
json_opts = ((), {'sort_keys': True})
self._encoder_options = {
'json': json_opts,
'simplejson': json_opts,
'django.util.simplejson': json_opts,
}
## The exception class that is thrown when a decoding error occurs
self._decoder_exceptions = {}
## Whether we've loaded any backends successfully
self._verified = False
## Try loading simplejson and demjson
self.load_backend('simplejson', 'dumps', 'loads', ValueError)
self.load_backend('json', 'dumps', 'loads', ValueError)
self.load_backend('demjson', 'encode', 'decode', 'JSONDecodeError')
## Experimental support
self.load_backend('jsonlib', 'write', 'read', 'ReadError')
self.load_backend('yajl', 'dumps', 'loads', ValueError)
def _verify(self):
"""Ensures that we've loaded at least one JSON backend."""
if self._verified:
return
raise AssertionError('jsonpickle requires at least one of the '
'following:\n'
' python2.6, simplejson, or demjson')
def load_backend(self, name, encode_name, decode_name, decode_exc):
"""
Load a JSON backend by name.
This method loads a backend and sets up references to that
backend's encode/decode functions and exception classes.
:param encode_name: is the name of the backend's encode method.
The method should take an object and return a string.
:param decode_name: names the backend's method for the reverse
operation -- returning a Python object from a string.
:param decode_exc: can be either the name of the exception class
used to denote decoding errors, or it can be a direct reference
to the appropriate exception class itself. If it is a name,
then the assumption is that an exception class of that name
can be found in the backend module's namespace.
"""
try:
## Load the JSON backend
mod = __import__(name)
except ImportError:
return
try:
## Handle submodules, e.g. django.utils.simplejson
components = name.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
except AttributeError:
return
try:
## Setup the backend's encode/decode methods
self._encoders[name] = getattr(mod, encode_name)
self._decoders[name] = getattr(mod, decode_name)
except AttributeError:
self.remove_backend(name)
return
try:
if type(decode_exc) is str:
## This backend's decoder exception is part of the backend
self._decoder_exceptions[name] = getattr(mod, decode_exc)
else:
## simplejson uses the ValueError exception
self._decoder_exceptions[name] = decode_exc
except AttributeError:
self.remove_backend(name)
return
## Setup the default args and kwargs for this encoder
self._encoder_options[name] = ([], {})
## Add this backend to the list of candidate backends
self._backend_names.append(name)
## Indicate that we successfully loaded a JSON backend
self._verified = True
def remove_backend(self, name):
"""Remove all entries for a particular backend."""
self._encoders.pop(name, None)
self._decoders.pop(name, None)
self._decoder_exceptions.pop(name, None)
self._encoder_options.pop(name, None)
if name in self._backend_names:
self._backend_names.remove(name)
self._verified = bool(self._backend_names)
def encode(self, obj):
"""
Attempt to encode an object into JSON.
This tries the loaded backends in order and passes along the last
exception if no backend is able to encode the object.
"""
self._verify()
for idx, name in enumerate(self._backend_names):
try:
optargs, optkwargs = self._encoder_options[name]
encoder_kwargs = optkwargs.copy()
encoder_args = (obj,) + tuple(optargs)
return self._encoders[name](*encoder_args, **encoder_kwargs)
except Exception:
if idx == len(self._backend_names) - 1:
raise
def decode(self, string):
"""
Attempt to decode an object from a JSON string.
This tries the loaded backends in order and passes along the last
exception if no backends are able to decode the string.
"""
self._verify()
for idx, name in enumerate(self._backend_names):
try:
return self._decoders[name](string)
except self._decoder_exceptions[name], e:
if idx == len(self._backend_names) - 1:
raise e
else:
pass # and try a more forgiving encoder, e.g. demjson
def set_preferred_backend(self, name):
"""
Set the preferred json backend.
If a preferred backend is set then jsonpickle tries to use it
before any other backend.
For example::
set_preferred_backend('simplejson')
If the backend is not one of the built-in jsonpickle backends
(json/simplejson, or demjson) then you must load the backend
prior to calling set_preferred_backend.
AssertionError is raised if the backend has not been loaded.
"""
if name in self._backend_names:
self._backend_names.remove(name)
self._backend_names.insert(0, name)
else:
errmsg = 'The "%s" backend has not been loaded.' % name
raise AssertionError(errmsg)
def set_encoder_options(self, name, *args, **kwargs):
"""
Associate encoder-specific options with an encoder.
After calling set_encoder_options, any calls to jsonpickle's
encode method will pass the supplied args and kwargs along to
the appropriate backend's encode method.
For example::
set_encoder_options('simplejson', sort_keys=True, indent=4)
set_encoder_options('demjson', compactly=False)
See the appropriate encoder's documentation for details about
the supported arguments and keyword arguments.
"""
self._encoder_options[name] = (args, kwargs)
# Initialize a JSONPluginMgr
json = JSONPluginMgr()
# Export specific JSONPluginMgr methods into the jsonpickle namespace
set_preferred_backend = json.set_preferred_backend
set_encoder_options = json.set_encoder_options
load_backend = json.load_backend
remove_backend = json.remove_backend
def encode(value, unpicklable=True, max_depth=None):
"""
Return a JSON formatted representation of value, a Python object.
The keyword argument 'unpicklable' defaults to True.
If set to False, the output will not contain the information
necessary to turn the JSON data back into Python objects.
The keyword argument 'max_depth' defaults to None.
If set to a non-negative integer then jsonpickle will not recurse
deeper than 'max_depth' steps into the object. Anything deeper
than 'max_depth' is represented using a Python repr() of the object.
>>> encode('my string')
'"my string"'
>>> encode(36)
'36'
>>> encode({'foo': True})
'{"foo": true}'
>>> encode({'foo': True}, max_depth=0)
'"{\\'foo\\': True}"'
>>> encode({'foo': True}, max_depth=1)
'{"foo": "True"}'
"""
j = Pickler(unpicklable=unpicklable,
max_depth=max_depth)
return json.encode(j.flatten(value))
def decode(string):
"""
Convert a JSON string into a Python object.
>>> str(decode('"my string"'))
'my string'
>>> decode('36')
36
"""
j = Unpickler()
return j.restore(json.decode(string))
| |
import jira
import settings
import jinja2
import sys
import argparse
import logging
import datetime
from main import send_on_email, get_jira_list
ITERATION = 'startOfDay(-{}), endOfDay(-1)'
def render(scope):
"""
Fill Jinja2 template given values and return it as a string
:param scope: hash with template variables and their values
:return: rendered template as a string
"""
jinja_env = jinja2.Environment(loader=jinja2.FileSystemLoader('templates'))
template = jinja_env.get_template('weekly_report.html')
return template.render(scope)
def get_weekly_issues(j, project, assignees, iteration_length):
"""
Get all issues from Jira where any of testing team members marked as an assignee
:param j: jira instance to request to
:param project: project short-name in jira
:param assignees: team members list
:param iteration_length: iteration length in days
:return: list of jira issues
"""
weekly_issues = j.search_issues(
(
'project = {project} AND status was Testing DURING ({period}) ' # a bit of Jira jql hacks
'AND assignee WAS IN ({assignees}) DURING ({period}) '
).format(
project=project,
assignees=get_jira_list(assignees),
period=ITERATION.format(iteration_length),
),
fields='summary',
expand='changelog',
maxResults=settings.MAX_RESULTS,
)
return weekly_issues
def get_tested_issues(j, project, assignees, iteration_length):
"""
Get all issues from Jira where any of testing team members marked as an assignee
:param j: jira instance to request to
:param project: project short-name in jira
:param assignees: team members list
:param iteration_length: iteration length in days
:return: list of jira issues
"""
weekly_issues = j.search_issues(
(
'project = {project} AND status was Testing DURING ({period}) ' # a bit of Jira jql hacks
'AND assignee WAS IN ({assignees}) DURING ({period}) '
'AND status != Testing'
).format(
project=project,
assignees=get_jira_list(assignees),
period=ITERATION.format(iteration_length),
),
fields='summary',
expand='changelog',
maxResults=settings.MAX_RESULTS,
)
return weekly_issues
def get_state_pairs(states):
"""
Iterate over states changing elements
:param states: list of states
:return:
"""
for n in range(len(states) - 1):
yield states[n], states[n+1]
def check_state_history(states):
"""
Test a state changing history elements
:param states:
:return:
"""
for from_state, to_state in get_state_pairs(states):
if from_state != u'Testing': # skip uninteresting pairs
continue
if to_state != u'Awaiting for deploy': # check state changing was valid
return False
return True
def get_returned_issues(weekly_issues, iteration_length):
"""
Get only returned to testing issues
:param weekly_issues: all jira issues
:param iteration_length: iteration length in days
:return: only returned to testing issues
"""
logger = logging.getLogger(__name__)
returned_issues = []
previous_week_start = datetime.datetime.now() - datetime.timedelta(days=iteration_length)
previous_week_end = datetime.datetime.now() - datetime.timedelta(days=1)
for issue in weekly_issues:
states_history = []
# Place all task states in the states_history array
for history in issue.changelog.histories:
for item in history.items:
if item.field == 'status':
history_date = datetime.datetime.strptime(
history.created,
'%Y-%m-%dT%H:%M:%S.000+0300'
)
if previous_week_start <= history_date <= previous_week_end:
logger.debug('Add',
history_date.strftime('%Y-%m-%d'),
item.fromString, 'to',
item.toString
)
states_history.append(item.toString)
else:
logger.debug('Skip',
history_date.strftime('%Y-%m-%d'),
item.fromString,
'to',
item.toString)
if not check_state_history(states_history):
returned_issues.append(issue)
return returned_issues
def get_closed_bugs(j, project, assignees, iteration_length):
"""
Get bugs closed in last iteration
:param j: jira instance to request to
:param project: project short-name in jira
:param assignees: team members list
:param iteration_length: iteration length in days
:return: list of jira issues
"""
closed_bugs = j.search_issues(
(
'project = {project} '
'AND issuetype = Bug '
'AND status changed to Closed DURING({period}) '
'AND assignee WAS in ({assignees})'
).format(
project=project,
assignees=get_jira_list(assignees),
period=ITERATION.format(iteration_length),
),
maxResults=settings.MAX_RESULTS,
)
return closed_bugs
def get_testing_issues(j, project, assignees, **kwargs):
"""
:param j: jira instance to request to
:param project: project short-name in jira
:param assignees: team members list
:return:
"""
testing_issues = j.search_issues(
(
'project = {project} '
'AND status = Testing '
'AND assignee in ({assignees}) '
).format(
project=project,
assignees=get_jira_list(assignees),
),
maxResults=settings.MAX_RESULTS,
)
return testing_issues
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--verbose',
action='store_true',
default=False,
help='be verbose')
parser.add_argument('-l', '--length',
type=int,
default=settings.ITERATION_LENGTH,
help='Iteration length in days (default={})'.format(settings.ITERATION_LENGTH))
args = parser.parse_args()
iteration_length = args.length
log_level = logging.DEBUG if args.verbose else logging.INFO
logging.basicConfig(level=log_level)
logger = logging.getLogger(__name__)
smtp_auth = settings.SMTP_USER, settings.SMTP_PASS
today = datetime.datetime.now()
iteration_start = today - datetime.timedelta(days=iteration_length)
iteration_end = today - datetime.timedelta(days=1)
report_subject = settings.WEEKLY_SUBJECT.format(
iteration_start.strftime('%d.%m'),
iteration_end.strftime('%d.%m')
)
j = jira.JIRA(
server=settings.JIRA_URL,
basic_auth=(
settings.JIRA_USER,
settings.JIRA_PASS
)
)
report_query_data = {
'j': j,
'project': settings.JIRA_PROJECT,
'assignees': settings.FUNC,
'iteration_length': iteration_length,
}
weekly_issues = get_weekly_issues(**report_query_data)
logger.info('Got {} weekly issues'.format(len(weekly_issues)))
tested_issues = get_tested_issues(**report_query_data)
logger.info('Got {} tested issues'.format(len(tested_issues)))
returned_issues = get_returned_issues(weekly_issues, iteration_length)
logger.info('Got {} returned issues'.format(len(returned_issues)))
closed_bugs = get_closed_bugs(**report_query_data)
logger.info('Got {} closed bugs'.format(len(closed_bugs)))
testing_issues = get_testing_issues(**report_query_data)
logger.info('Got {} open tasks'.format(len(testing_issues)))
scope = {
'jira': settings.JIRA_URL,
'weekly_issues': weekly_issues,
'tested_issues': tested_issues,
'returned_issues': returned_issues,
'closed_bugs': closed_bugs,
'testing_issues': testing_issues,
}
send_on_email(
render(scope),
report_subject,
settings.EMAIL_FROM,
settings.EMAIL_TO,
smtp_auth
)
return 0
if __name__ == '__main__':
sys.exit(main())
| |
import pytest
import pickle
import ray
from ray.experimental.dag import (
DAGNode,
PARENT_CLASS_NODE_KEY,
PREV_CLASS_METHOD_CALL_KEY,
)
@ray.remote
class Counter:
def __init__(self, init_value=0):
self.i = init_value
def inc(self):
self.i += 1
def get(self):
return self.i
@ray.remote
class Actor:
def __init__(self, init_value):
self.i = init_value
def inc(self, x):
self.i += x
def get(self):
return self.i
def test_serialize_warning():
node = DAGNode([], {}, {}, {})
with pytest.raises(ValueError):
pickle.dumps(node)
def test_basic_actor_dag(shared_ray_instance):
@ray.remote
def combine(x, y):
return x + y
a1 = Actor.bind(10)
res = a1.get.bind()
print(res)
assert ray.get(res.execute()) == 10
a2 = Actor.bind(10)
a1.inc.bind(2)
a1.inc.bind(4)
a2.inc.bind(6)
dag = combine.bind(a1.get.bind(), a2.get.bind())
print(dag)
assert ray.get(dag.execute()) == 32
def test_class_as_class_constructor_arg(shared_ray_instance):
@ray.remote
class OuterActor:
def __init__(self, inner_actor):
self.inner_actor = inner_actor
def inc(self, x):
self.inner_actor.inc.remote(x)
def get(self):
return ray.get(self.inner_actor.get.remote())
outer = OuterActor.bind(Actor.bind(10))
outer.inc.bind(2)
dag = outer.get.bind()
print(dag)
assert ray.get(dag.execute()) == 12
def test_class_as_function_constructor_arg(shared_ray_instance):
@ray.remote
def f(actor_handle):
return ray.get(actor_handle.get.remote())
dag = f.bind(Actor.bind(10))
print(dag)
assert ray.get(dag.execute()) == 10
def test_basic_actor_dag_constructor_options(shared_ray_instance):
a1 = Actor.bind(10)
dag = a1.get.bind()
print(dag)
assert ray.get(dag.execute()) == 10
a1 = Actor.options(name="Actor", namespace="test", max_pending_calls=10).bind(10)
dag = a1.get.bind()
print(dag)
# Ensure execution result is identical with .options() in init()
assert ray.get(dag.execute()) == 10
# Ensure options are passed in
assert a1.get_options().get("name") == "Actor"
assert a1.get_options().get("namespace") == "test"
assert a1.get_options().get("max_pending_calls") == 10
def test_actor_method_options(shared_ray_instance):
a1 = Actor.bind(10)
dag = a1.get.options(name="actor_method_options").bind()
print(dag)
assert ray.get(dag.execute()) == 10
assert dag.get_options().get("name") == "actor_method_options"
def test_basic_actor_dag_constructor_invalid_options(shared_ray_instance):
a1 = Actor.options(num_cpus=-1).bind(10)
invalid_dag = a1.get.bind()
with pytest.raises(ValueError, match=".*Resource quantities may not be negative.*"):
ray.get(invalid_dag.execute())
def test_actor_options_complicated(shared_ray_instance):
"""Test a more complicated setup where we apply .options() in both
constructor and method call with overlapping keys, and ensure end to end
options correctness.
"""
@ray.remote
def combine(x, y):
return x + y
a1 = Actor.options(name="a1_v0").bind(10)
res = a1.get.options(name="v1").bind()
print(res)
assert ray.get(res.execute()) == 10
assert a1.get_options().get("name") == "a1_v0"
assert res.get_options().get("name") == "v1"
a1 = Actor.options(name="a1_v1").bind(10) # Cannot
a2 = Actor.options(name="a2_v0").bind(10)
a1.inc.options(name="v1").bind(2)
a1.inc.options(name="v2").bind(4)
a2.inc.options(name="v3").bind(6)
dag = combine.options(name="v4").bind(a1.get.bind(), a2.get.bind())
print(dag)
assert ray.get(dag.execute()) == 32
test_a1 = dag.get_args()[0] # call graph for a1.get.bind()
test_a2 = dag.get_args()[1] # call graph for a2.get.bind()
assert test_a2.get_options() == {} # No .options() at outer call
# refer to a2 constructor .options() call
assert (
test_a2.get_other_args_to_resolve()[PARENT_CLASS_NODE_KEY]
.get_options()
.get("name")
== "a2_v0"
)
# refer to actor method a2.inc.options() call
assert (
test_a2.get_other_args_to_resolve()[PREV_CLASS_METHOD_CALL_KEY]
.get_options()
.get("name")
== "v3"
)
# refer to a1 constructor .options() call
assert (
test_a1.get_other_args_to_resolve()[PARENT_CLASS_NODE_KEY]
.get_options()
.get("name")
== "a1_v1"
)
# refer to latest actor method a1.inc.options() call
assert (
test_a1.get_other_args_to_resolve()[PREV_CLASS_METHOD_CALL_KEY]
.get_options()
.get("name")
== "v2"
)
# refer to first bound actor method a1.inc.options() call
assert (
test_a1.get_other_args_to_resolve()[PREV_CLASS_METHOD_CALL_KEY]
.get_other_args_to_resolve()[PREV_CLASS_METHOD_CALL_KEY]
.get_options()
.get("name")
== "v1"
)
def test_pass_actor_handle(shared_ray_instance):
@ray.remote
class Actor:
def ping(self):
return "hello"
@ray.remote
def caller(handle):
assert isinstance(handle, ray.actor.ActorHandle), handle
return ray.get(handle.ping.remote())
a1 = Actor.bind()
dag = caller.bind(a1)
print(dag)
assert ray.get(dag.execute()) == "hello"
def test_dynamic_pipeline(shared_ray_instance):
@ray.remote
class Model:
def __init__(self, arg):
self.arg = arg
def forward(self, x):
return self.arg + str(x)
@ray.remote
class ModelSelection:
def is_even(self, x):
return x % 2 == 0
@ray.remote
def pipeline(x, m1, m2, selection):
sel = selection.is_even.remote(x)
if ray.get(sel):
result = m1.forward.remote(x)
else:
result = m2.forward.remote(x)
return ray.get(result)
m1 = Model.bind("Even: ")
m2 = Model.bind("Odd: ")
selection = ModelSelection.bind()
even_input = pipeline.bind(20, m1, m2, selection)
print(even_input)
assert ray.get(even_input.execute()) == "Even: 20"
odd_input = pipeline.bind(21, m1, m2, selection)
print(odd_input)
assert ray.get(odd_input.execute()) == "Odd: 21"
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| |
"""Enemy and player ships."""
import logging
import random
from xo1 import Surface, Renderable
from xoinvader import app
from xoinvader import collision
from xoinvader.animation import AnimationManager
from xoinvader.pickup import Pickup
from xoinvader.weapon import Blaster, Laser, UM, EBlaster, Weapon
from xoinvader.utils import clamp, Point, InfiniteList
from xoinvader.collision import Collider
from xoinvader.common import Settings, get_config, _ROOT
LOG = logging.getLogger(__name__)
# pylint: disable=missing-docstring
CONFIG = get_config().ship
# Think about composition
class Ship(Renderable):
"""Base class for all ships. Contains basic ship logic."""
compound = True
def __init__(self, pos: Point):
super().__init__(pos)
self._dx = None
self._fire = False
self._weapon = None
self._weapons = InfiniteList()
self._wbay = None
self._direction = 0
self._max_hull = None
self._max_shield = None
self._hull = None
self._shield = None
self._destroy = False
self._collider = None
# first initialization
self._apply_config(CONFIG[self.type])
def _apply_config(self, config):
"""Apply values from object's configuration."""
if not config:
raise ValueError
self._dx = int(config.dx)
self._hull = int(config.hull)
self._shield = int(config.shield)
self._max_hull = int(config.max_hull)
self._max_shield = int(config.max_shield)
@property
def direction(self):
return self._direction
@direction.setter
def direction(self, value):
if value == 0:
self._direction = 0
else:
self._direction = 1 if value > 0 else -1
@property
def max_hull(self):
return self._max_hull
@property
def max_shield(self):
return self._max_shield
def get_hull_percentage(self):
"""Return hull percentage."""
return self._hull * 100.0 / self._max_hull
def get_shield_percentage(self):
"""Return shield percentage."""
return self._shield * 100.0 / self._max_shield
def get_weapon_percentage(self):
"""Return weapon load percentage."""
return self._weapon.load_percentage()
def get_renderable_objects(self):
"""CORP stub."""
return self._weapons
def move_left(self):
"""Change direction."""
self._direction = -1
def move_right(self):
"""Change direction."""
self._direction = 1
def toggle_fire(self):
"""Toggle current weapon fire mode."""
self._fire = not self._fire
def next_weapon(self):
"""Select next weapon."""
self._weapon = self._weapons.next()
def prev_weapon(self):
"""Select previous weapon."""
self._weapon = self._weapons.prev()
def add_weapon(self, weapon: Weapon, autoselect: bool = True):
"""Add new weapon and optionally select it."""
self._weapons.append(weapon)
if autoselect:
self._weapon = weapon
def update_position(self, dt: int):
"""Update ship position.
Allows to go behind field borders.
"""
self._pos.x += self._direction * self._dx * dt
self._direction = 0
def update(self, dt: int):
"""Update ship object's state."""
self.update_position(dt)
if self.out_of_border():
self.destroy()
for weapon in self._weapons:
weapon.update(dt)
if self._fire:
try:
# FIXME: [fix-in-place-coercing]
# Coercing Point in-place smells badly
# (needed now because animation can set float pos to
# enemy ships), we need to add type ensurance mechanism
# to cast automatically maybe.
self._weapon.make_shot(self._pos[int] + self._wbay)
except ValueError:
self.next_weapon()
self.refresh_shield()
def destroy(self):
"""Self-destroying routine."""
if not self._destroy:
LOG.debug("Destroying ship %s", self)
self._destroy = True
if self._destroyed_by_player:
self._maybe_drop_something()
app.current().state.collision.remove(self._collider)
app.current().state.remove(self)
def out_of_border(self) -> bool:
border = Settings.layout.field.border
pos = self._pos[int]
return (
pos.x > self._image.width + border.x
or pos.x + self._image.width < 0
or int(pos.y) > self._image.height + border.y
or int(pos.y) + self._image.height < 0
)
def take_damage(self, damage: int):
"""Calculate and apply damage to shield and hull."""
# shield absorbs all damage
if self._shield >= damage:
self._shield -= damage
return
# shield fully discharged
damage = clamp(damage - self._shield, 0, damage)
self._shield = 0
# hull takes all rest damage
self._hull = clamp(self._hull - damage, 0, self._hull)
def refresh_shield(self, amount: int = 1):
"""Refresh shield."""
if self._shield == self._max_shield:
return
self._shield = clamp(self._shield + amount, 0, self._max_shield)
def collect(self, collectible):
"""Collect bonus or power-up and apply it now or later.
:param xoinvader.Collectible collectible:
"""
if collectible.instantaneous:
collectible.apply(self)
else:
raise Exception("_collectibles not implemented")
def refill_hull(self, amount: int = 0):
"""Refill hull."""
self._hull = clamp(self._hull + amount, 0, self._max_hull)
def refill_all_weapons(self, percent: int):
"""Refill all mounted weapons with provided persentage."""
for weapon in self.weapons:
self.refill_weapon(weapon, percent * weapon.max_ammo / 100)
def refill_weapon(self, weapon: Weapon, amount: int):
"""Refill provided or current weapon."""
if not weapon:
weapon = self._weapon
weapon.refill(amount)
class GenericXEnemy(Ship):
"""Generic X enemy class."""
def __init__(self, pos):
super(GenericXEnemy, self).__init__(pos)
self._image = Surface.from_file(_ROOT / (CONFIG[self.type]["image"]))
self._collider = Collider.simple(self)
self._fire = True
self._wbay = Point(x=self._image.width // 2, y=1)
self.add_weapon(EBlaster(self._wbay))
self._destroyed_by_player = False
self._animgr = AnimationManager()
# TODO: rethink this method
# Problem of emerging such methods is more complex problem
# of creating and configuring GameObjects.
def add_animation(self, *args, **kwargs):
self._animgr.add(*args, **kwargs)
def _maybe_drop_something(self):
drop_chance = 0.4
if 1 - random.random() > drop_chance:
return
drop = Pickup.from_droptable(self)
if drop is not None:
app.current().state.add(drop(self.pos))
def take_damage(self, amount: int):
"""Naive wrapper to distinguish destroy by player and out of boundary."""
super().take_damage(amount)
if self._hull <= 0:
self._destroyed_by_player = True
def update(self, dt):
if self._hull <= 0:
# TODO: [scoring]
# * Parametrize scores, move them to ships.conf
# * implement better scoring mechanism
app.current().state.add_player_score(10)
self.destroy()
return
self._animgr.update(dt)
super(GenericXEnemy, self).update(dt)
@collision.register("GenericXEnemy", "BasicPlasmaCannon")
@collision.register("GenericXEnemy", "BasicLaserCharge")
@collision.register("GenericXEnemy", "BasicUnguidedMissile")
def collide(self, other, rect):
self.take_damage(other.damage)
other.destroy()
class PlayerShip(Ship):
"""PlayerShip class."""
def __init__(self, pos):
super(PlayerShip, self).__init__(pos)
self.image = Surface.from_file(_ROOT / (CONFIG[self.type]["image"]))
# FIXME: Center the ship where it's created
self._pos = Point(
x=pos.x - self._image.width // 2, y=pos.y - self._image.height
)
self._collider = Collider.simple(self)
self._fire = False
self._wbay = Point(x=self._image.width // 2, y=-1)
self._weapons = InfiniteList([
Blaster(self._wbay),
Laser(self._wbay),
UM(self._wbay),
])
self._weapon = self._weapons.current()
def update_position(self, dt):
"""Update player ship position.
Prohibits moving out behind the border.
"""
border = Settings.layout.field.camera
right = self._pos.x + self._image.width
if right >= border.x - 1 and self._direction > 0:
self._pos.x = border.x - self._image.width
elif self._pos.x <= 1 and self._direction < 0:
self._pos.x = 1
else:
# NOTE: Converting float to int reduces ship teleportation because
# we have no pixels.
self._pos.x += int(self._direction * self._dx * dt / 1000)
self._direction = 0
def update(self, dt):
if self._hull <= 0:
app.current().trigger_state(
"GameOverState", score=app.current().state.score
)
super(PlayerShip, self).update(dt)
def get_weapon_info(self):
"""Return information about current weapon."""
return "Weapon: {w} | [{c}/{m}]".format(
w=self._weapon.type, c=self._weapon.ammo, m=self._weapon.max_ammo
)
@collision.register("PlayerShip", "EBasicPlasmaCannon")
def collide(self, other, rect):
self.take_damage(other.damage)
other.destroy()
| |
import ivy
from ivy import treegraph as tg
def taxid_count (tree):
records = tree.split(",")
tis = [r.split('_ti')[1].replace(")", "") for r in records]
return tis
def color_vertices(taxonomy, treegraph, tid):
"""
tid: NCBI taxon id
Color the vertices of `treegraph` that are members of taxon `tid`
"""
nxt, bck = taxonomy.hindex[taxonomy.taxid_vertex[tid]]
colored = treegraph.new_vertex_property('bool')
# `colored` is a boolean vertex property map that will flag those
# vertices in the unrooted treegraph that are in convex subtrees
# corresponding to taxon `tid`
seen = set()
lvs = set()
for v in treegraph.vertices():
if v.out_degree() == 1: # leaf
seen.add(v)
taxv = taxonomy.taxid_vertex[treegraph.vertex_taxid[v]]
if taxonomy.incertae_sedis[taxv]:
p = taxv.in_neighbours().next()
pn, pb = taxonomy.hindex[p]
if nxt >= pn and bck <= pb:
colored[v] = 1
lvs.add(v)
else:
n, b = taxonomy.hindex[taxv]
if n >= nxt and b <= bck:
colored[v] = 1
lvs.add(v)
def gather():
s = set()
for v in lvs:
for n in v.out_neighbours():
if not colored[n]: s.add(n)
return s
def check(x):
i = 0
for y in x.out_neighbours():
if not colored[y]: i += 1
return i
verts = gather()
while 1:
for x in verts:
if check(x) == 1:
lvs.add(x)
colored[x] = 1
v = gather()
if v == verts: break
verts = v
c = tg.defaultdict(list)
# `c` is a mapping of positive integer values to lists of colored
# vertices in treegraph. The integers are counts of adjacent
# vertices that are not colored. So if `taxv` corresponds to a
# convex subgraph of `treegraph`, `c` should be {1: [x]}, where x
# is the vertex point of attachment
for v in treegraph.vertices():
if colored[v]:
i = check(v)
if i: c[i].append(v)
return colored, c
def proc(g, line, merged, probfile, outfile):
pbtree, tl, ad, s = line.split()
print 'processing', pbtree
r = ivy.newick.parse(s) # the root node of the tree of interest
lvs = r.leaves()
rps = [] # rootpaths of leaf nodes, where each rootpath is a list
# of taxids from leaf to root
leaf_tid_counts = tg.Counter()
try:
for lf in lvs:
# assign/compute attributes of leaves
w = lf.label.split('_')
lf.gi = int(w[-2][2:])
lf.taxid = int(w[-1][2:])
leaf_tid_counts[lf.taxid] += 1
if lf.taxid not in g.taxid_vertex and lf.taxid in merged:
lf.taxid = merged[lf.taxid]
## lf.taxv = g.taxid_vertex[lf.taxid]
taxv = g.taxid_vertex[lf.taxid]
lf.taxid_next, lf.taxid_back = g.hindex[taxv]
lf.taxid_rootpath = tg.taxid_rootpath(g, lf.taxid)
for i, x in enumerate(lf.taxid_rootpath):
if x not in g.taxid_vertex and x in merged:
lf.taxid_rootpath[i] = merged[x]
rps.append(lf.taxid_rootpath)
except:
print '!!! problem assigning leaf taxids'
probfile.write('%s\n' % pbtree)
#return []
r.mrca = tg.rootpath_mrca(rps) # taxid of mrca of all tree's leaves
taxids = set()
for rp in rps:
# trim rootpaths: make them terminate with mrca
while 1:
if rp[-1] == r.mrca: break
else: rp.pop()
assert rp
taxids.update(rp)
# create a taxonomy (sub)graph of only those taxids represented in r
## taxidsubg = tg.taxid_subgraph(g, taxids)
taxidsubg = tg.taxid_new_subgraph(g, taxids)
taxidsubg.vfilt = taxidsubg.new_vertex_property('bool')
## r.taxv = taxidsubg.taxid_vertex[r.mrca]
# no need to check for convexity for singleton tip taxa
for x in [ taxidsubg.taxid_vertex[lf.taxid] for lf in lvs
if leaf_tid_counts[lf.taxid]==1 ]:
taxidsubg.vfilt[x] = 0
# an undirected graph having the same topology as r, used for
# checking convexity of taxa
treegraph = tg.gt.Graph(directed=False)
treegraph.mrca = r.mrca
print 'mrca:', g.taxid_name(r.mrca)
treegraph.vertex_taxid = tg.get_or_create_vp(treegraph, 'taxid', 'int')
## treegraph.vertex_taxv = tg.get_or_create_vp(treegraph, 'taxv', 'int')
v2lf = {}
N = len(r)
verts = treegraph.add_vertex(N)
for n in r: # for each node in r
# store its treegraph vertex
n.v = verts.next()
if not n.children:
treegraph.vertex_taxid[n.v] = n.taxid
## treegraph.vertex_taxv[n.v] = int(n.taxv)
v2lf[n.v] = n
if n.parent:
treegraph.add_edge(n.parent.v, n.v)
treegraph_leaves = [ x for x in treegraph.vertices() if x.out_degree()==1 ]
convex = {} # for storing the convex subgraphs
def traverse(taxv):
"""
`taxv` is a vertex in the taxonomy graph. This function checks whether
it is convex in `treegraph`; if yes, stores the info in
`convex`; if no, it recursively checks descendants of `taxv` for
convexity
"""
tid = taxidsubg.vertex_taxid[taxv]
print 'checking', tid, taxidsubg.vertex_name[taxv]
p, c = color_vertices(g, treegraph, tid)
if len(c)==1 and len(c[1])==1: # taxv/tid is convex
print '...success'
rv = c[1][0] # rv is the root of the convex subtree
treegraph.set_vertex_filter(p)
## lvs = [ x for x in treegraph.vertices() if x.out_degree()==1 ]
lvs = [ x for x in treegraph_leaves if p[x] ]
if len(lvs) > 2:
# we are only interested in convex subgraphs having
# more than 2 leaves
rootpaths = []
for lf in lvs:
ti = treegraph.vertex_taxid[lf]
tv = taxidsubg.taxid_vertex[ti]
if not taxidsubg.incertae_sedis[tv]:
rootpaths.append(tg.taxid_rootpath(taxidsubg, ti))
if rootpaths:
mrca = tg.rootpath_mrca(rootpaths)
print 'traverse: mrca', mrca
ancv = [taxidsubg.taxid_vertex[mrca]]
while ancv[-1] != taxv:
# STRANGE EDGE CASES HERE
try: ancv.append(ancv[-1].in_neighbours().next())
except StopIteration: pass
k = '.'.join([ str(taxidsubg.vertex_taxid[x])
for x in ancv ])
convex[k] = (rv, p)
treegraph.set_vertex_filter(None)
else:
treegraph.set_vertex_filter(None)
for n in taxv.out_neighbours():
traverse(n)
for v in taxidsubg.root.out_neighbours(): traverse(v)
## print 'done'
def make_newick(root, seen):
children = [ x for x in root.out_neighbours() if x not in seen ]
if children:
seen.update(children)
s = '(%s)' % ','.join(
[ make_newick(c, seen) for c in children ]
)
else:
s = v2lf[root].label.replace(',','').replace('(','').replace(')','')
return s
newicks = []
for k, (root, p) in convex.items():
treegraph.set_vertex_filter(p)
s = make_newick(root, set([root]))
treegraph.set_vertex_filter(None)
names = ','.join([ g.taxid_name(int(x)) for x in k.split('.') ])
tis = taxid_count(s)
if len(set(tis)) > 2:
outfile.write('%s\t%s\t%s\t%s;\n' % (pbtree, k, names, s))
print 'wrote subtree:', names
for n in r.postiter():
n.parent = None; del n.children
if __name__ == "__main__":
merged = {}
with open('ncbi/merged.dmp') as f:
for line in f:
v = line.split()
merged[int(v[0])] = int(v[2])
g = tg.load_taxonomy_graph('ncbi/ncbi.xml.gz')
probfile = open('readable.problem_subtrees','w')
outfile = open('readable.convex_subtrees','w')
problem_trees = open('problem_trees.out', 'a')
with open('filtered-trees.out') as f:
for line in f:
try:
proc(g, line, merged, probfile, outfile)
except:
id = line.split()[0]
problem_trees.write(id)
outfile.close()
probfile.close()
problem_trees.close()
| |
from __future__ import absolute_import
import json
import logging
import warnings
try:
from itertools import zip_longest
except ImportError:
from itertools import izip_longest as zip_longest
from pip._vendor import six
from pip.basecommand import Command
from pip.exceptions import CommandError
from pip.index import PackageFinder
from pip.utils import (
get_installed_distributions, dist_is_editable)
from pip.utils.deprecation import RemovedInPip10Warning
from pip.cmdoptions import make_option_group, index_group
logger = logging.getLogger(__name__)
class ListCommand(Command):
"""
List installed packages, including editables.
Packages are listed in a case-insensitive sorted order.
"""
name = 'list'
usage = """
%prog [options]"""
summary = 'List installed packages.'
def __init__(self, *args, **kw):
super(ListCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option(
'-o', '--outdated',
action='store_true',
default=False,
help='List outdated packages')
cmd_opts.add_option(
'-u', '--uptodate',
action='store_true',
default=False,
help='List uptodate packages')
cmd_opts.add_option(
'-e', '--editable',
action='store_true',
default=False,
help='List editable projects.')
cmd_opts.add_option(
'-l', '--local',
action='store_true',
default=False,
help=('If in a virtualenv that has global access, do not list '
'globally-installed packages.'),
)
self.cmd_opts.add_option(
'--user',
dest='user',
action='store_true',
default=False,
help='Only output packages installed in user-site.')
cmd_opts.add_option(
'--pre',
action='store_true',
default=False,
help=("Include pre-release and development versions. By default, "
"pip only finds stable versions."),
)
cmd_opts.add_option(
'--format',
action='store',
dest='list_format',
choices=('legacy', 'columns', 'freeze', 'json'),
help="Select the output format among: legacy (default), columns, "
"freeze or json.",
)
cmd_opts.add_option(
'--not-required',
action='store_true',
dest='not_required',
help="List packages that are not dependencies of "
"installed packages.",
)
index_opts = make_option_group(index_group, self.parser)
self.parser.insert_option_group(0, index_opts)
self.parser.insert_option_group(0, cmd_opts)
def _build_package_finder(self, options, index_urls, session):
"""
Create a package finder appropriate to this list command.
"""
return PackageFinder(
find_links=options.find_links,
index_urls=index_urls,
allow_all_prereleases=options.pre,
trusted_hosts=options.trusted_hosts,
process_dependency_links=options.process_dependency_links,
session=session,
)
def run(self, options, args):
if options.allow_external:
warnings.warn(
"--allow-external has been deprecated and will be removed in "
"the future. Due to changes in the repository protocol, it no "
"longer has any effect.",
RemovedInPip10Warning,
)
if options.allow_all_external:
warnings.warn(
"--allow-all-external has been deprecated and will be removed "
"in the future. Due to changes in the repository protocol, it "
"no longer has any effect.",
RemovedInPip10Warning,
)
if options.allow_unverified:
warnings.warn(
"--allow-unverified has been deprecated and will be removed "
"in the future. Due to changes in the repository protocol, it "
"no longer has any effect.",
RemovedInPip10Warning,
)
if options.list_format is None:
warnings.warn(
"The default format will switch to columns in the future. "
"You can use --format=(legacy|columns) (or define a "
"format=(legacy|columns) in your pip.conf under the [list] "
"section) to disable this warning.",
RemovedInPip10Warning,
)
if options.outdated and options.uptodate:
raise CommandError(
"Options --outdated and --uptodate cannot be combined.")
packages = get_installed_distributions(
local_only=options.local,
user_only=options.user,
editables_only=options.editable,
)
if options.outdated:
packages = self.get_outdated(packages, options)
elif options.uptodate:
packages = self.get_uptodate(packages, options)
if options.not_required:
packages = self.get_not_required(packages, options)
self.output_package_listing(packages, options)
def get_outdated(self, packages, options):
return [
dist for dist in self.iter_packages_latest_infos(packages, options)
if dist.latest_version > dist.parsed_version
]
def get_uptodate(self, packages, options):
return [
dist for dist in self.iter_packages_latest_infos(packages, options)
if dist.latest_version == dist.parsed_version
]
def get_not_required(self, packages, options):
dep_keys = set()
for dist in packages:
dep_keys.update(requirement.key for requirement in dist.requires())
return set(pkg for pkg in packages if pkg.key not in dep_keys)
def iter_packages_latest_infos(self, packages, options):
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index:
logger.debug('Ignoring indexes: %s', ','.join(index_urls))
index_urls = []
dependency_links = []
for dist in packages:
if dist.has_metadata('dependency_links.txt'):
dependency_links.extend(
dist.get_metadata_lines('dependency_links.txt'),
)
with self._build_session(options) as session:
finder = self._build_package_finder(options, index_urls, session)
finder.add_dependency_links(dependency_links)
for dist in packages:
typ = 'unknown'
all_candidates = finder.find_all_candidates(dist.key)
if not options.pre:
# Remove prereleases
all_candidates = [candidate for candidate in all_candidates
if not candidate.version.is_prerelease]
if not all_candidates:
continue
best_candidate = max(all_candidates,
key=finder._candidate_sort_key)
remote_version = best_candidate.version
if best_candidate.location.is_wheel:
typ = 'wheel'
else:
typ = 'sdist'
# This is dirty but makes the rest of the code much cleaner
dist.latest_version = remote_version
dist.latest_filetype = typ
yield dist
def output_legacy(self, dist):
if dist_is_editable(dist):
return '%s (%s, %s)' % (
dist.project_name,
dist.version,
dist.location,
)
else:
return '%s (%s)' % (dist.project_name, dist.version)
def output_legacy_latest(self, dist):
return '%s - Latest: %s [%s]' % (
self.output_legacy(dist),
dist.latest_version,
dist.latest_filetype,
)
def output_package_listing(self, packages, options):
packages = sorted(
packages,
key=lambda dist: dist.project_name.lower(),
)
if options.list_format == 'columns' and packages:
data, header = format_for_columns(packages, options)
self.output_package_listing_columns(data, header)
elif options.list_format == 'freeze':
for dist in packages:
logger.info("%s==%s", dist.project_name, dist.version)
elif options.list_format == 'json':
logger.info(format_for_json(packages, options))
else: # legacy
for dist in packages:
if options.outdated:
logger.info(self.output_legacy_latest(dist))
else:
logger.info(self.output_legacy(dist))
def output_package_listing_columns(self, data, header):
# insert the header first: we need to know the size of column names
if len(data) > 0:
data.insert(0, header)
pkg_strings, sizes = tabulate(data)
# Create and add a separator.
if len(data) > 0:
pkg_strings.insert(1, " ".join(map(lambda x: '-' * x, sizes)))
for val in pkg_strings:
logger.info(val)
def tabulate(vals):
# From pfmoore on GitHub:
# https://github.com/pypa/pip/issues/3651#issuecomment-216932564
assert len(vals) > 0
sizes = [0] * max(len(x) for x in vals)
for row in vals:
sizes = [max(s, len(str(c))) for s, c in zip_longest(sizes, row)]
result = []
for row in vals:
display = " ".join([str(c).ljust(s) if c is not None else ''
for s, c in zip_longest(sizes, row)])
result.append(display)
return result, sizes
def format_for_columns(pkgs, options):
"""
Convert the package data into something usable
by output_package_listing_columns.
"""
running_outdated = options.outdated
# Adjust the header for the `pip list --outdated` case.
if running_outdated:
header = ["Package", "Version", "Latest", "Type"]
else:
header = ["Package", "Version"]
data = []
if any(dist_is_editable(x) for x in pkgs):
header.append("Location")
for proj in pkgs:
# if we're working on the 'outdated' list, separate out the
# latest_version and type
row = [proj.project_name, proj.version]
if running_outdated:
row.append(proj.latest_version)
row.append(proj.latest_filetype)
if dist_is_editable(proj):
row.append(proj.location)
data.append(row)
return data, header
def format_for_json(packages, options):
data = []
for dist in packages:
info = {
'name': dist.project_name,
'version': six.text_type(dist.version),
}
if options.outdated:
info['latest_version'] = six.text_type(dist.latest_version)
info['latest_filetype'] = dist.latest_filetype
data.append(info)
return json.dumps(data)
| |
import unittest
from unittest import mock
from rest_framework.exceptions import AuthenticationFailed
from tethys_portal.middleware import TethysSocialAuthExceptionMiddleware, TethysAppAccessMiddleware, \
TethysMfaRequiredMiddleware
from django.core.exceptions import PermissionDenied
class TethysPortalMiddlewareTests(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
@mock.patch('tethys_portal.middleware.isinstance')
@mock.patch('tethys_portal.middleware.hasattr')
@mock.patch('tethys_portal.middleware.redirect')
def test_process_exception_anonymous_user(self, mock_redirect, mock_hasattr, mock_isinstance):
mock_request = mock.MagicMock()
mock_exception = mock.MagicMock()
mock_hasattr.return_value = True
mock_isinstance.return_value = True
mock_request.user.is_anonymous = True
obj = TethysSocialAuthExceptionMiddleware()
obj.process_exception(mock_request, mock_exception)
mock_redirect.assert_called_once_with('accounts:login')
@mock.patch('tethys_portal.middleware.isinstance')
@mock.patch('tethys_portal.middleware.hasattr')
@mock.patch('tethys_portal.middleware.redirect')
def test_process_exception_user(self, mock_redirect, mock_hasattr, mock_isinstance):
mock_request = mock.MagicMock()
mock_exception = mock.MagicMock()
mock_hasattr.return_value = True
mock_isinstance.return_value = True
mock_request.user.is_anonymous = False
mock_request.user.username = 'foo'
obj = TethysSocialAuthExceptionMiddleware()
obj.process_exception(mock_request, mock_exception)
mock_redirect.assert_called_once_with('user:settings')
@mock.patch('tethys_portal.middleware.pretty_output')
@mock.patch('tethys_portal.middleware.messages.success')
@mock.patch('tethys_portal.middleware.isinstance')
@mock.patch('tethys_portal.middleware.hasattr')
@mock.patch('tethys_portal.middleware.redirect')
def test_process_exception_isinstance_google(self, mock_redirect, mock_hasattr, mock_isinstance, mock_success,
mock_pretty_output):
mock_request = mock.MagicMock()
mock_request.user.is_anonymous = True
mock_exception = mock.MagicMock()
mock_exception.backend.name = 'google'
mock_hasattr.return_value = True
mock_isinstance.side_effect = False, True
obj = TethysSocialAuthExceptionMiddleware()
obj.process_exception(mock_request, mock_exception)
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertEqual(1, len(po_call_args))
self.assertIn('google', po_call_args[0][0][0])
call_args = mock_success.call_args_list
self.assertEqual(mock_request, call_args[0][0][0])
self.assertEqual('The Google account you tried to connect to has already been associated with another '
'account.', call_args[0][0][1])
mock_redirect.assert_called_once_with('accounts:login')
@mock.patch('tethys_portal.middleware.pretty_output')
@mock.patch('tethys_portal.middleware.messages.success')
@mock.patch('tethys_portal.middleware.isinstance')
@mock.patch('tethys_portal.middleware.hasattr')
@mock.patch('tethys_portal.middleware.redirect')
def test_process_exception_isinstance_linkedin(self, mock_redirect, mock_hasattr, mock_isinstance,
mock_success, mock_pretty_output):
mock_request = mock.MagicMock()
mock_request.user.is_anonymous = True
mock_exception = mock.MagicMock()
mock_exception.backend.name = 'linkedin'
mock_hasattr.return_value = True
mock_isinstance.side_effect = False, True
obj = TethysSocialAuthExceptionMiddleware()
obj.process_exception(mock_request, mock_exception)
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertEqual(1, len(po_call_args))
self.assertIn('linkedin', po_call_args[0][0][0])
call_args = mock_success.call_args_list
self.assertEqual(mock_request, call_args[0][0][0])
self.assertEqual('The LinkedIn account you tried to connect to has already been associated with another '
'account.', call_args[0][0][1])
mock_redirect.assert_called_once_with('accounts:login')
@mock.patch('tethys_portal.middleware.pretty_output')
@mock.patch('tethys_portal.middleware.messages.success')
@mock.patch('tethys_portal.middleware.isinstance')
@mock.patch('tethys_portal.middleware.hasattr')
@mock.patch('tethys_portal.middleware.redirect')
def test_process_exception_isinstance_hydroshare(self, mock_redirect, mock_hasattr, mock_isinstance,
mock_success,
mock_pretty_output):
mock_request = mock.MagicMock()
mock_request.user.is_anonymous = True
mock_exception = mock.MagicMock()
mock_exception.backend.name = 'hydroshare'
mock_hasattr.return_value = True
mock_isinstance.side_effect = False, True
obj = TethysSocialAuthExceptionMiddleware()
obj.process_exception(mock_request, mock_exception)
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertEqual(1, len(po_call_args))
self.assertIn('hydroshare', po_call_args[0][0][0])
call_args = mock_success.call_args_list
self.assertEqual(mock_request, call_args[0][0][0])
self.assertEqual('The HydroShare account you tried to connect to has already been associated with '
'another account.', call_args[0][0][1])
mock_redirect.assert_called_once_with('accounts:login')
@mock.patch('tethys_portal.middleware.pretty_output')
@mock.patch('tethys_portal.middleware.messages.success')
@mock.patch('tethys_portal.middleware.isinstance')
@mock.patch('tethys_portal.middleware.hasattr')
@mock.patch('tethys_portal.middleware.redirect')
def test_process_exception_isinstance_facebook(self, mock_redirect, mock_hasattr, mock_isinstance, mock_success,
mock_pretty_output):
mock_request = mock.MagicMock()
mock_request.user.is_anonymous = True
mock_exception = mock.MagicMock()
mock_exception.backend.name = 'facebook'
mock_hasattr.return_value = True
mock_isinstance.side_effect = False, True
obj = TethysSocialAuthExceptionMiddleware()
obj.process_exception(mock_request, mock_exception)
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertEqual(1, len(po_call_args))
self.assertIn('facebook', po_call_args[0][0][0])
call_args = mock_success.call_args_list
self.assertEqual(mock_request, call_args[0][0][0])
self.assertEqual('The Facebook account you tried to connect to has already been associated with '
'another account.', call_args[0][0][1])
mock_redirect.assert_called_once_with('accounts:login')
@mock.patch('tethys_portal.middleware.pretty_output')
@mock.patch('tethys_portal.middleware.messages.success')
@mock.patch('tethys_portal.middleware.isinstance')
@mock.patch('tethys_portal.middleware.hasattr')
@mock.patch('tethys_portal.middleware.redirect')
def test_process_exception_isinstance_social(self, mock_redirect, mock_hasattr, mock_isinstance,
mock_success,
mock_pretty_output):
mock_request = mock.MagicMock()
mock_request.user.is_anonymous = False
mock_request.user.username = 'foo'
mock_exception = mock.MagicMock()
mock_exception.backend.name = 'social'
mock_hasattr.return_value = True
mock_isinstance.side_effect = False, True
obj = TethysSocialAuthExceptionMiddleware()
obj.process_exception(mock_request, mock_exception)
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertEqual(1, len(po_call_args))
self.assertIn('social', po_call_args[0][0][0])
call_args = mock_success.call_args_list
self.assertEqual(mock_request, call_args[0][0][0])
self.assertEqual('The social account you tried to connect to has already been associated with '
'another account.', call_args[0][0][1])
mock_redirect.assert_called_once_with('user:settings')
@mock.patch('tethys_portal.middleware.messages.success')
@mock.patch('tethys_portal.middleware.isinstance')
@mock.patch('tethys_portal.middleware.hasattr')
@mock.patch('tethys_portal.middleware.redirect')
def test_process_exception_isinstance_exception_with_anonymous_user(self, mock_redirect, mock_hasattr,
mock_isinstance, mock_success):
mock_request = mock.MagicMock()
mock_request.user.is_anonymous = False
mock_request.user.username = 'foo'
mock_exception = mock.MagicMock()
mock_exception.backend.name = 'social'
mock_hasattr.return_value = True
mock_isinstance.side_effect = False, False, True
obj = TethysSocialAuthExceptionMiddleware()
obj.process_exception(mock_request, mock_exception)
call_args = mock_success.call_args_list
self.assertEqual(mock_request, call_args[0][0][0])
self.assertEqual('Unable to disconnect from this social account.', call_args[0][0][1])
mock_redirect.assert_called_once_with('user:settings')
@mock.patch('tethys_portal.middleware.messages.success')
@mock.patch('tethys_portal.middleware.isinstance')
@mock.patch('tethys_portal.middleware.hasattr')
@mock.patch('tethys_portal.middleware.redirect')
def test_process_exception_isinstance_exception_user(self, mock_redirect, mock_hasattr, mock_isinstance,
mock_success):
mock_request = mock.MagicMock()
mock_request.user.is_anonymous = True
mock_exception = mock.MagicMock()
mock_exception.backend.name = 'social'
mock_hasattr.return_value = True
mock_isinstance.side_effect = False, False, True
obj = TethysSocialAuthExceptionMiddleware()
obj.process_exception(mock_request, mock_exception)
call_args = mock_success.call_args_list
self.assertEqual(mock_request, call_args[0][0][0])
self.assertEqual('Unable to disconnect from this social account.', call_args[0][0][1])
mock_redirect.assert_called_once_with('accounts:login')
@mock.patch('tethys_portal.middleware.get_active_app')
def test_app_access_app_none(self, mock_app):
mock_app.return_value = None
mock_request = mock.MagicMock()
mock_request.return_value = True
obj = TethysAppAccessMiddleware(mock_request)
result = obj.__call__(mock_request)
self.assertTrue(result)
@mock.patch('tethys_portal.middleware.handler_404')
@mock.patch('tethys_portal.middleware.get_active_app')
def test_app_access_disabled(self, mock_app, mock_404):
mock_app.return_value = mock.MagicMock(enabled=False)
mock_request1 = mock.MagicMock()
obj1 = TethysAppAccessMiddleware(mock_request1)
obj1.__call__(mock_request1)
self.assertEqual(mock_404.call_args_list[0][0][2], "This app is disabled. A user with admin permissions can "
"enable this app from the app settings page.")
mock_request2 = mock.MagicMock()
mock_request2.user.is_staff = False
obj2 = TethysAppAccessMiddleware(mock_request2)
obj2.__call__(mock_request2)
self.assertEqual(mock_404.call_args_list[0][0][1], PermissionDenied)
@mock.patch('tethys_portal.middleware.user_can_access_app')
@mock.patch('tethys_portal.middleware.get_active_app')
def test_app_access_has_permission(self, mock_app, mock_has_perm):
mock_app.return_value = mock.MagicMock(enabled=True)
mock_request = mock.MagicMock()
obj = TethysAppAccessMiddleware(mock_request)
obj.__call__(mock_request)
self.assertEqual(mock_has_perm.call_args_list[0][0][1], mock_app())
@mock.patch('tethys_portal.middleware.handler_404')
@mock.patch('tethys_portal.middleware.user_can_access_app')
@mock.patch('tethys_portal.middleware.get_active_app')
def test_app_access_no_permission(self, mock_app, mock_has_perm, mock_404):
mock_app.return_value = mock.MagicMock(enabled=True)
mock_request = mock.MagicMock()
mock_has_perm.return_value = False
obj = TethysAppAccessMiddleware(mock_request)
obj.__call__(mock_request)
self.assertEqual(mock_404.call_args_list[0][0][1], PermissionDenied)
@staticmethod
def mock_request_with_user(path='/apps', with_sso=False, is_staff=False):
"""
Build a mock request with a mock user.
"""
mock_request = mock.MagicMock(path=path)
mock_request.user = mock.MagicMock(is_staff=is_staff)
mock_request.user.social_auth.count = mock.MagicMock(return_value=1 if with_sso else 0)
return mock_request
@mock.patch('tethys_portal.middleware.redirect')
@mock.patch('tethys_portal.middleware.has_mfa')
@mock.patch('tethys_portal.middleware.settings')
def test_mfa_required_all_true__normal_user(self, mock_settings, mock_has_mfa, mock_redirect):
mock_settings.MFA_REQUIRED = True
mock_settings.SSO_MFA_REQUIRED = True
mock_settings.ADMIN_MFA_REQUIRED = True
mock_get_response = mock.MagicMock()
mock_request = self.mock_request_with_user()
mock_has_mfa.return_value = False
TethysMfaRequiredMiddleware(mock_get_response)(mock_request)
# required for all users
mock_redirect.assert_called_once_with('mfa_home')
@mock.patch('tethys_portal.middleware.redirect')
@mock.patch('tethys_portal.middleware.has_mfa')
@mock.patch('tethys_portal.middleware.settings')
def test_mfa_required_all_true__sso_user(self, mock_settings, mock_has_mfa, mock_redirect):
mock_settings.MFA_REQUIRED = True
mock_settings.SSO_MFA_REQUIRED = True
mock_settings.ADMIN_MFA_REQUIRED = True
mock_get_response = mock.MagicMock()
mock_request = self.mock_request_with_user(with_sso=True)
mock_has_mfa.return_value = False
TethysMfaRequiredMiddleware(mock_get_response)(mock_request)
# required for all users
mock_redirect.assert_called_once_with('mfa_home')
@mock.patch('tethys_portal.middleware.redirect')
@mock.patch('tethys_portal.middleware.has_mfa')
@mock.patch('tethys_portal.middleware.settings')
def test_mfa_required_all_true__staff_user(self, mock_settings, mock_has_mfa, mock_redirect):
mock_settings.MFA_REQUIRED = True
mock_settings.SSO_MFA_REQUIRED = True
mock_settings.ADMIN_MFA_REQUIRED = True
mock_get_response = mock.MagicMock()
mock_request = self.mock_request_with_user(is_staff=True)
mock_has_mfa.return_value = False
TethysMfaRequiredMiddleware(mock_get_response)(mock_request)
# required for all users
mock_redirect.assert_called_once_with('mfa_home')
@mock.patch('tethys_portal.middleware.redirect')
@mock.patch('tethys_portal.middleware.has_mfa')
@mock.patch('tethys_portal.middleware.settings')
def test_mfa_required__all_false__normal_user(self, mock_settings, mock_has_mfa, mock_redirect):
mock_settings.MFA_REQUIRED = False
mock_settings.SSO_MFA_REQUIRED = False
mock_settings.ADMIN_MFA_REQUIRED = False
mock_get_response = mock.MagicMock()
mock_request = self.mock_request_with_user()
mock_has_mfa.return_value = False
TethysMfaRequiredMiddleware(mock_get_response)(mock_request)
# not required
mock_redirect.assert_not_called()
@mock.patch('tethys_portal.middleware.redirect')
@mock.patch('tethys_portal.middleware.has_mfa')
@mock.patch('tethys_portal.middleware.settings')
def test_mfa_required__all_false__sso_user(self, mock_settings, mock_has_mfa, mock_redirect):
mock_settings.MFA_REQUIRED = False
mock_settings.SSO_MFA_REQUIRED = False
mock_settings.ADMIN_MFA_REQUIRED = False
mock_get_response = mock.MagicMock()
mock_request = self.mock_request_with_user(with_sso=True)
mock_has_mfa.return_value = False
TethysMfaRequiredMiddleware(mock_get_response)(mock_request)
# not required
mock_redirect.assert_not_called()
@mock.patch('tethys_portal.middleware.redirect')
@mock.patch('tethys_portal.middleware.has_mfa')
@mock.patch('tethys_portal.middleware.settings')
def test_mfa_required__all_false__staff_user(self, mock_settings, mock_has_mfa, mock_redirect):
mock_settings.MFA_REQUIRED = False
mock_settings.SSO_MFA_REQUIRED = False
mock_settings.ADMIN_MFA_REQUIRED = False
mock_get_response = mock.MagicMock()
mock_request = self.mock_request_with_user(is_staff=True)
mock_has_mfa.return_value = False
TethysMfaRequiredMiddleware(mock_get_response)(mock_request)
# not required
mock_redirect.assert_not_called()
@mock.patch('tethys_portal.middleware.redirect')
@mock.patch('tethys_portal.middleware.has_mfa')
@mock.patch('tethys_portal.middleware.settings')
def test_mfa_required__mfa_required_false__normal_user(self, mock_settings, mock_has_mfa, mock_redirect):
mock_settings.MFA_REQUIRED = False
mock_settings.SSO_MFA_REQUIRED = True
mock_settings.ADMIN_MFA_REQUIRED = True
mock_get_response = mock.MagicMock()
mock_request = self.mock_request_with_user()
mock_has_mfa.return_value = False
TethysMfaRequiredMiddleware(mock_get_response)(mock_request)
# not required for all users user
mock_redirect.assert_not_called()
@mock.patch('tethys_portal.middleware.redirect')
@mock.patch('tethys_portal.middleware.has_mfa')
@mock.patch('tethys_portal.middleware.settings')
def test_mfa_required__mfa_required_false__sso_user(self, mock_settings, mock_has_mfa, mock_redirect):
mock_settings.MFA_REQUIRED = False
mock_settings.SSO_MFA_REQUIRED = True
mock_settings.ADMIN_MFA_REQUIRED = True
mock_get_response = mock.MagicMock()
mock_request = self.mock_request_with_user(with_sso=True)
mock_has_mfa.return_value = False
TethysMfaRequiredMiddleware(mock_get_response)(mock_request)
# not required for all users user
mock_redirect.assert_not_called()
@mock.patch('tethys_portal.middleware.redirect')
@mock.patch('tethys_portal.middleware.has_mfa')
@mock.patch('tethys_portal.middleware.settings')
def test_mfa_required__mfa_required_false__staff_user(self, mock_settings, mock_has_mfa, mock_redirect):
mock_settings.MFA_REQUIRED = False
mock_settings.SSO_MFA_REQUIRED = True
mock_settings.ADMIN_MFA_REQUIRED = True
mock_get_response = mock.MagicMock()
mock_request = self.mock_request_with_user(is_staff=True)
mock_has_mfa.return_value = False
TethysMfaRequiredMiddleware(mock_get_response)(mock_request)
# not required for all users user
mock_redirect.assert_not_called()
@mock.patch('tethys_portal.middleware.redirect')
@mock.patch('tethys_portal.middleware.has_mfa')
@mock.patch('tethys_portal.middleware.settings')
def test_mfa_required__sso_mfa_required_false__normal_user(self, mock_settings, mock_has_mfa, mock_redirect):
mock_settings.MFA_REQUIRED = True
mock_settings.SSO_MFA_REQUIRED = False
mock_settings.ADMIN_MFA_REQUIRED = True
mock_get_response = mock.MagicMock()
mock_request = self.mock_request_with_user()
mock_has_mfa.return_value = False
TethysMfaRequiredMiddleware(mock_get_response)(mock_request)
# required for non-sso users
mock_redirect.assert_called_once_with('mfa_home')
@mock.patch('tethys_portal.middleware.redirect')
@mock.patch('tethys_portal.middleware.has_mfa')
@mock.patch('tethys_portal.middleware.settings')
def test_mfa_required__sso_mfa_required_false__sso_user(self, mock_settings, mock_has_mfa, mock_redirect):
mock_settings.MFA_REQUIRED = True
mock_settings.SSO_MFA_REQUIRED = False
mock_settings.ADMIN_MFA_REQUIRED = True
mock_get_response = mock.MagicMock()
mock_request = self.mock_request_with_user(with_sso=True)
mock_has_mfa.return_value = False
TethysMfaRequiredMiddleware(mock_get_response)(mock_request)
# not required for SSO user
mock_redirect.assert_not_called()
@mock.patch('tethys_portal.middleware.redirect')
@mock.patch('tethys_portal.middleware.has_mfa')
@mock.patch('tethys_portal.middleware.settings')
def test_mfa_required__sso_mfa_required_false__staff_user(self, mock_settings, mock_has_mfa, mock_redirect):
mock_settings.MFA_REQUIRED = True
mock_settings.SSO_MFA_REQUIRED = False
mock_settings.ADMIN_MFA_REQUIRED = True
mock_get_response = mock.MagicMock()
mock_request = self.mock_request_with_user(is_staff=True)
mock_has_mfa.return_value = False
TethysMfaRequiredMiddleware(mock_get_response)(mock_request)
# required for admins/staff user
mock_redirect.assert_called_once_with('mfa_home')
@mock.patch('tethys_portal.middleware.redirect')
@mock.patch('tethys_portal.middleware.has_mfa')
@mock.patch('tethys_portal.middleware.settings')
def test_mfa_required__admin_mfa_required_false__normal_user(self, mock_settings, mock_has_mfa, mock_redirect):
mock_settings.MFA_REQUIRED = True
mock_settings.SSO_MFA_REQUIRED = True
mock_settings.ADMIN_MFA_REQUIRED = False
mock_get_response = mock.MagicMock()
mock_request = self.mock_request_with_user()
mock_has_mfa.return_value = False
TethysMfaRequiredMiddleware(mock_get_response)(mock_request)
# required for non admin/staff user
mock_redirect.assert_called_once_with('mfa_home')
@mock.patch('tethys_portal.middleware.redirect')
@mock.patch('tethys_portal.middleware.has_mfa')
@mock.patch('tethys_portal.middleware.settings')
def test_mfa_required__admin_mfa_required_false__sso_user(self, mock_settings, mock_has_mfa, mock_redirect):
mock_settings.MFA_REQUIRED = True
mock_settings.SSO_MFA_REQUIRED = True
mock_settings.ADMIN_MFA_REQUIRED = False
mock_get_response = mock.MagicMock()
mock_request = self.mock_request_with_user(with_sso=True)
mock_has_mfa.return_value = False
TethysMfaRequiredMiddleware(mock_get_response)(mock_request)
# required for sso users
mock_redirect.assert_called_once_with('mfa_home')
@mock.patch('tethys_portal.middleware.redirect')
@mock.patch('tethys_portal.middleware.has_mfa')
@mock.patch('tethys_portal.middleware.settings')
def test_mfa_required__admin_mfa_required_false__admin_user(self, mock_settings, mock_has_mfa, mock_redirect):
mock_settings.MFA_REQUIRED = True
mock_settings.SSO_MFA_REQUIRED = True
mock_settings.ADMIN_MFA_REQUIRED = False
mock_get_response = mock.MagicMock()
mock_request = self.mock_request_with_user(is_staff=True)
mock_has_mfa.return_value = False
TethysMfaRequiredMiddleware(mock_get_response)(mock_request)
# not required for admin/staff user
mock_redirect.assert_not_called()
@mock.patch('tethys_portal.middleware.redirect')
@mock.patch('tethys_portal.middleware.has_mfa')
@mock.patch('tethys_portal.middleware.settings')
def test_mfa_required_excluded_paths(self, mock_settings, mock_has_mfa, mock_redirect):
mock_settings.MFA_REQUIRED = True
mock_settings.SSO_MFA_REQUIRED = True
mock_settings.ADMIN_MFA_REQUIRED = True
mock_has_mfa.return_value = False
mock_get_response = mock.MagicMock()
excluded_paths = [
'/',
'/accounts/login/',
'/accounts/logout/',
'/oauth2/foo/',
'/user/bar/',
'/captcha/jar/',
'/devices/123/',
'/mfa/add/'
]
for path in excluded_paths:
mock_request = self.mock_request_with_user(path=path)
TethysMfaRequiredMiddleware(mock_get_response)(mock_request)
# do not react on these paths
mock_redirect.assert_not_called()
@mock.patch('tethys_portal.middleware.TokenAuthentication.authenticate')
@mock.patch('tethys_portal.middleware.redirect')
@mock.patch('tethys_portal.middleware.has_mfa')
@mock.patch('tethys_portal.middleware.settings')
def test_mfa_required_all_true__valid_token__normal_user(self, mock_settings, mock_has_mfa, mock_redirect, _):
mock_settings.MFA_REQUIRED = True
mock_settings.SSO_MFA_REQUIRED = True
mock_settings.ADMIN_MFA_REQUIRED = True
mock_get_response = mock.MagicMock()
mock_request = self.mock_request_with_user()
mock_request.headers = {'Authorization': 'Token abcdefghijklmnopqrstuvwxyz'}
mock_has_mfa.return_value = False
TethysMfaRequiredMiddleware(mock_get_response)(mock_request)
# not required for valid token
mock_redirect.assert_not_called()
@mock.patch('tethys_portal.middleware.TokenAuthentication.authenticate')
@mock.patch('tethys_portal.middleware.redirect')
@mock.patch('tethys_portal.middleware.has_mfa')
@mock.patch('tethys_portal.middleware.settings')
def test_mfa_required_all_true__valid_token__sso_user(self, mock_settings, mock_has_mfa, mock_redirect, _):
mock_settings.MFA_REQUIRED = True
mock_settings.SSO_MFA_REQUIRED = True
mock_settings.ADMIN_MFA_REQUIRED = True
mock_get_response = mock.MagicMock()
mock_request = self.mock_request_with_user(with_sso=True)
mock_request.headers = {'Authorization': 'Token abcdefghijklmnopqrstuvwxyz'}
mock_has_mfa.return_value = False
TethysMfaRequiredMiddleware(mock_get_response)(mock_request)
# not required for valid token
mock_redirect.assert_not_called()
@mock.patch('tethys_portal.middleware.TokenAuthentication.authenticate')
@mock.patch('tethys_portal.middleware.redirect')
@mock.patch('tethys_portal.middleware.has_mfa')
@mock.patch('tethys_portal.middleware.settings')
def test_mfa_required_all_true__valid_token__staff_user(self, mock_settings, mock_has_mfa, mock_redirect, _):
mock_settings.MFA_REQUIRED = True
mock_settings.SSO_MFA_REQUIRED = True
mock_settings.ADMIN_MFA_REQUIRED = True
mock_get_response = mock.MagicMock()
mock_request = self.mock_request_with_user(is_staff=True)
mock_request.headers = {'Authorization': 'Token abcdefghijklmnopqrstuvwxyz'}
mock_has_mfa.return_value = False
TethysMfaRequiredMiddleware(mock_get_response)(mock_request)
# not required for valid token
mock_redirect.assert_not_called()
@mock.patch('tethys_portal.middleware.TokenAuthentication.authenticate')
@mock.patch('tethys_portal.middleware.redirect')
@mock.patch('tethys_portal.middleware.has_mfa')
@mock.patch('tethys_portal.middleware.settings')
def test_mfa_required_all_true__invalid_token__normal_user(self, mock_settings, mock_has_mfa, mock_redirect,
mock_authenticate):
mock_settings.MFA_REQUIRED = True
mock_settings.SSO_MFA_REQUIRED = True
mock_settings.ADMIN_MFA_REQUIRED = True
mock_get_response = mock.MagicMock()
mock_request = self.mock_request_with_user()
mock_request.headers = {'Authorization': 'Token abcdefghijklmnopqrstuvwxyz'}
mock_authenticate.side_effect = AuthenticationFailed
mock_has_mfa.return_value = False
TethysMfaRequiredMiddleware(mock_get_response)(mock_request)
# required for all users
mock_redirect.assert_called_once_with('mfa_home')
@mock.patch('tethys_portal.middleware.TokenAuthentication.authenticate')
@mock.patch('tethys_portal.middleware.redirect')
@mock.patch('tethys_portal.middleware.has_mfa')
@mock.patch('tethys_portal.middleware.settings')
def test_mfa_required_all_true__invalid_token__sso_user(self, mock_settings, mock_has_mfa, mock_redirect,
mock_authenticate):
mock_settings.MFA_REQUIRED = True
mock_settings.SSO_MFA_REQUIRED = True
mock_settings.ADMIN_MFA_REQUIRED = True
mock_get_response = mock.MagicMock()
mock_request = self.mock_request_with_user(with_sso=True)
mock_request.headers = {'Authorization': 'Token abcdefghijklmnopqrstuvwxyz'}
mock_authenticate.side_effect = AuthenticationFailed
mock_has_mfa.return_value = False
TethysMfaRequiredMiddleware(mock_get_response)(mock_request)
# required for all users
mock_redirect.assert_called_once_with('mfa_home')
@mock.patch('tethys_portal.middleware.TokenAuthentication.authenticate')
@mock.patch('tethys_portal.middleware.redirect')
@mock.patch('tethys_portal.middleware.has_mfa')
@mock.patch('tethys_portal.middleware.settings')
def test_mfa_required_all_true__invalid_token__staff_user(self, mock_settings, mock_has_mfa, mock_redirect,
mock_authenticate):
mock_settings.MFA_REQUIRED = True
mock_settings.SSO_MFA_REQUIRED = True
mock_settings.ADMIN_MFA_REQUIRED = True
mock_get_response = mock.MagicMock()
mock_request = self.mock_request_with_user(is_staff=True)
mock_request.headers = {'Authorization': 'Token abcdefghijklmnopqrstuvwxyz'}
mock_authenticate.side_effect = AuthenticationFailed
mock_has_mfa.return_value = False
TethysMfaRequiredMiddleware(mock_get_response)(mock_request)
# required for all users
mock_redirect.assert_called_once_with('mfa_home')
| |
# -*- coding: utf-8 -*-
"""Finds attributes of classes and functions in files"""
import ast
import os
from hal.files.models.files import Document
from hal.files.models.system import get_folder_name, is_file, \
is_folder
MODULE_SEP = "."
class ModuleFile:
"""File attributes"""
def __init__(self, path, root_package):
"""
:param path: Path of file to parse
"""
self.path = path
self.package = self._find_package(root_package)
self.tree = self._parse()
def _parse(self):
"""Parses file contents
:return: Tree hierarchy of file
"""
with open(self.path, "rt") as reader:
return ast.parse(reader.read(), filename=self.path)
def _find_package(self, root_package):
"""Finds package name of file
:param root_package: root package
:return: package name
"""
package = self.path.replace(root_package, "")
if package.endswith(".py"):
package = package[:-3]
package = package.replace(os.path.sep, MODULE_SEP)
root_package = get_folder_name(root_package)
package = root_package + package # add root
return package
def get_tree(self):
"""Finds tree hierarchy of file
:return: Tree
"""
return ModuleTree(self.tree, root_package=self.package)
class ModuleTree:
"""Hierarchy"""
def __init__(self, tree, root_package):
"""
:param tree: ast tree
"""
self.tree = tree
self.package = root_package
def _get_instances(self, instance):
"""Finds all instances of instance in tree
:param instance: type of object
:return: list of objects in tree of same instance
"""
return [
x
for x in self.tree.body
if isinstance(x, instance)
]
def get_classes(self):
"""Finds classes in file
:return: list of top-level classes
"""
instances = self._get_instances(ast.ClassDef)
instances = [
PyClass(instance, self.package)
for instance in instances
]
return instances
def get_functions(self):
"""Finds top-level functions in file
:return: list of top-level functions
"""
instances = self._get_instances(ast.FunctionDef)
instances = [
PyFunction(instance, self.package)
for instance in instances
]
return instances
def get_name(self):
"""Finds name of tree
:return: name
"""
return self.tree.name
class ModuleTreeObject(ModuleTree):
"""Object of Python tree"""
def __init__(self, tree, root_package):
super().__init__(tree, root_package)
self.full_package = self.package + MODULE_SEP + self.get_name()
class PyClass(ModuleTreeObject):
"""Python parsed class"""
def get_functions(self, include_meta=False):
"""Finds top-level functions in file
:param include_meta: whether include meta functions like (__init__)
:return: list of top-level functions
"""
instances = self._get_instances(ast.FunctionDef)
instances = [
PyFunction(instance, self.full_package) # fix package name
for instance in instances
]
if not include_meta:
instances = [
instance # fix package name
for instance in instances
if not instance.get_name().startswith("__")
]
return instances
class PyFunction(ModuleTreeObject):
"""Python parsed method"""
def _get_modules(path):
"""Finds modules in folder recursively
:param path: directory
:return: list of modules
"""
lst = []
folder_contents = os.listdir(path)
is_python_module = "__init__.py" in folder_contents
if is_python_module:
for file in folder_contents:
full_path = os.path.join(path, file)
if is_file(full_path):
lst.append(full_path)
if is_folder(full_path):
lst += _get_modules(full_path) # recurse in folder
return list(set(lst))
def get_modules(folder, include_meta=False):
"""Finds modules (recursively) in folder
:param folder: root folder
:param include_meta: whether include meta files like (__init__ or
__version__)
:return: list of modules
"""
files = [
file
for file in _get_modules(folder)
if is_file(file) # just files
]
if not include_meta:
files = [
file
for file in files
if not Document(file).name.startswith("__")
]
return files
def get_class_name(obj):
"""Finds name of class of object
:param obj: object
:return: Name of class
"""
return str(obj.__class__.__name__)
def get_method_name(func):
"""Finds name of method
:param func: method
:return: Name of method
"""
return str(func.__name__)
| |
#!/usr/bin/python
"""program to populate the omero_id into the imageObservation table so we can then index them with pure java from the database and solr experiment index"""
import os
import sys
import os.path
import argparse
import mysql.connector
from mysql.connector import errorcode
import psycopg2
from common import splitString
from database import getDbConnection,getFullResolutionFilePaths
from OmeroPropertiesParser import OmeroPropertiesParser
def main(argv):
print "running main method of get_omero_ids - using postgresQL directly!!"
parser = argparse.ArgumentParser(
description='Populate omero_ids into the komp2 image_record_observation table so we can then index them with pure java from the database and solr experiment index. This version uses postgresQl directly'
)
parser.add_argument('-H', '--host', dest='komp2Host',
help='Hostname for server hosting komp2 db'
)
parser.add_argument('-p', '--port', dest='komp2Port',
help='Port by which to connect to komp2 db'
)
parser.add_argument('-u', '--user', dest='komp2User',
help='Username for connecting to komp2 db'
)
parser.add_argument('-db', '--database', dest='komp2Db',
help='Database to connect to for komp2db'
)
parser.add_argument('--pass', dest='komp2Pass',
help='Password for komp2db'
)
parser.add_argument('--omeroDbUser', dest='omeroDbUser',
help='name of the omero postgres database')
parser.add_argument('--omeroDbPass', dest='omeroDbPass',
help='Password for the omero postgress database')
parser.add_argument('--omeroDbName', dest='omeroDbName',
help='Name of the postgres database omero uses')
parser.add_argument('--omeroDbHost', dest='omeroDbHost',
help='Hostname for the server hosting the omero postgres database')
parser.add_argument('--omeroDbPort', dest='omeroDbPort',
help='Port to connect on the postgres server hosting the omero database')
parser.add_argument('--profile', dest='profile', default='dev',
help='Name of profile from which to read config: ' + \
'dev, prod, live, ... Assumed to be present ' + \
'in configfiles/profilename/application.properties'
)
parser.add_argument('--profile-path', dest='profilePath',
help='Explicit path to file from which to read ' + \
'profile e.g. ' + \
'/home/kola/configfiles/dev/application.properties'
)
args = parser.parse_args()
# Get values from property file and use as defaults that can be overridden
# by command line parameters
if args.profilePath is not None:
try:
pp = OmeroPropertiesParser()
omeroProps = pp.getOmeroProps(args.profilePath)
except Exception as e:
print "Could not read application properties file from " + args.profilePath
print "Error was: " + str(e)
return
else:
try:
pp = OmeroPropertiesParser(args.profile)
omeroProps = pp.getOmeroProps()
except Exception as e:
print "Could not read application properties file for profile " + args.profile
print "Error was: " + str(e)
return
komp2Host = args.komp2Host if args.komp2Host<>None else omeroProps['komp2host']
print "setting komp2Host="+komp2Host
komp2Port = args.komp2Port if args.komp2Port<>None else omeroProps['komp2port']
print 'setting komp2Port='+komp2Port
komp2db = args.komp2Db if args.komp2Db<>None else omeroProps['komp2db']
print 'setting komp2db='+komp2db
komp2User = args.komp2User if args.komp2User<>None else omeroProps['komp2user']
komp2Pass = args.komp2Pass if args.komp2Pass<>None else omeroProps['komp2pass']
global loadedCount
loadedCount=0
print "about to run getdb with arguments komp2db="+komp2db
dbConn=getDbConnection(komp2Host, komp2Port, komp2db, komp2User, komp2Pass)
#cnx=getDbConnection(komp2Host, komp2Port, komp2db, komp2User, komp2Pass)
# Get Postgres connection for directly querying omero database
try:
print "Attempting to connect directly to Postgres DB"
omeroDbUser = args.omeroDbUser if args.omeroDbUser is not None else omeroProps['omerodbuser']
omeroDbPass = args.omeroDbPass if args.omeroDbPass is not None else omeroProps['omerodbpass']
omeroDbName = args.omeroDbName if args.omeroDbName is not None else omeroProps['omerodbname']
omeroDbHost = args.omeroDbHost if args.omeroDbHost is not None else omeroProps['omerodbhost']
if args.omeroDbPort is not None:
omeroDbPort = args.omeroDbPort
elif 'omerodbport' in omeroProps:
omeroDbPort = omeroProps['omerodbport']
else:
omeroDbPort = '5432'
psqlConn = psycopg2.connect(database=omeroDbName, user=omeroDbUser,
password=omeroDbPass, host=omeroDbHost,
port=omeroDbPort)
print "Connected to Postgres DB"
except KeyError as e:
print "Could not connect to omero postgres database. Key " + str(e) + \
" not present in omero properties file. Aborting!"
sys.exit()
except Exception as e:
print "Could not connect to omero postgres database. Error: " + str(e)
sys.exit()
getOmeroIdsAndPaths(dbConn, psqlConn)
dbConn.close()
psqlConn.close()
def getOmeroIdsAndPaths(dbConn, psqlConn):
# We query the postgres DB for details of the root user in the assumption that all necessary image and annotation
# steps were done via this user.
# Get a cursor for the postgres db
pg_cur = psqlConn.cursor()
query = "SELECT id FROM experimenter WHERE lastname='root'"
pg_cur.execute(query)
if pg_cur.rowcount != 1:
print "Error - expected one row from query to get user ID for root. got " + str(pg_cur.rowcount) + " - exiting"
sys.exit(-1)
my_expId = str(pg_cur.fetchone()[0])
query = "SELECT id, name FROM project WHERE owner_id=" + my_expId
pg_cur.execute(query)
for project_id, project_name in pg_cur.fetchall():
print "Processing project: " + project_name
query = "Select ds.id, ds.name from dataset ds inner join projectdatasetlink pdsl on ds.id=pdsl.child where pdsl.parent="+str(project_id)
#print query
pg_cur.execute(query)
for dataset in pg_cur.fetchall():
dataset_id, dataset_name = dataset
print "Processing dataset: " + dataset_name
if dataset_name.find('MGP_EEI_114_001') >= 0:
query = "SELECT i.id, fse.clientpath, i.name FROM image i " + \
"INNER JOIN datasetimagelink dsil ON i.id=dsil.child " + \
"INNER JOIN filesetentry fse ON i.fileset=fse.fileset " + \
"WHERE dsil.parent=" + str(dataset_id) + " "\
"AND (fse.clientpath LIKE '%lif' OR fse.clientpath LIKE '%lei')"
#print query
pg_cur.execute(query)
for omero_id, image_path, image_name in pg_cur.fetchall():
newpath = os.path.split(image_path.split('impc/')[-1])[0]
image_path = os.path.join(newpath,image_name)
#print "Processing image: " + image_path
storeOmeroId(dbConn, omero_id, image_path)
else:
query = "SELECT i.id, fse.clientpath, i.name FROM image i " + \
"INNER JOIN datasetimagelink dsil ON i.id=dsil.child " + \
"INNER JOIN filesetentry fse ON i.fileset=fse.fileset " + \
"WHERE dsil.parent=" + str(dataset_id)
#print query
pg_cur.execute(query)
for omero_id, image_path, image_name in pg_cur.fetchall():
#print "Processing image: " + image_path
storeOmeroId(dbConn, omero_id, image_path)
# Deal with annotations if present
query = "SELECT a.id, of.path, of.name FROM annotation a " + \
"INNER JOIN datasetannotationlink dsal ON a.id=dsal.child " + \
"INNER JOIN originalfile of ON a.file=of.id " + \
"WHERE dsal.parent=" + str(dataset_id)
pg_cur.execute(query)
if pg_cur.rowcount > 0:
print "Processing annotations for dataset: " + dataset_name
#print query
for annotation_id, annotation_dir, annotation_name in pg_cur.fetchall():
annotation_id=str(annotation_id)
if annotation_dir is not None and annotation_name is not None:
annotation_path = os.path.join(annotation_dir,annotation_name)
#print "Annotation details: " + annotation_id + ": " + annotation_path
storeOmeroId(dbConn, annotation_id, annotation_path)
else:
message = "Cannot update annotation_id: " + str(annotation_id) + \
" - annotation_dir = " + str(annotation_dir) + \
", annotation_name = " + str(annotation_name)
print message
continue
def storeOmeroId(cnx, omero_id, originalUploadedFilePathToOmero):
global loadedCount
loadedCount=loadedCount+1
if loadedCount % 1000==0:
print "loadedCount="+str(loadedCount)
if splitString in originalUploadedFilePathToOmero:
fullResolutionFilePath=originalUploadedFilePathToOmero.split(splitString,1)[1]#destinationFilePath.replace(nfsDir,"")
elif "images/impc/" in originalUploadedFilePathToOmero:
fullResolutionFilePath=originalUploadedFilePathToOmero.split("images/impc/",1)[1]
else:
fullResolutionFilePath=originalUploadedFilePathToOmero#just use this String if impc is not in the original file path as this must be an annotation which we will just have the relatvie path
try:
mysql_cur = cnx.cursor(buffered=True)
mysql_cur.execute("""UPDATE image_record_observation SET omero_id=%s WHERE full_resolution_file_path=%s""", (omero_id, fullResolutionFilePath))
except mysql.connector.Error as err:
print(err)
if __name__ == "__main__":
main(sys.argv[1:])
| |
import re
from lib import BaseTest
class CreateMirror1Test(BaseTest):
"""
create mirror: all architectures + all components
"""
runCmd = "aptly mirror create --ignore-signatures mirror1 http://mirror.yandex.ru/debian/ wheezy"
def check(self):
self.check_output()
self.check_cmd_output("aptly mirror show mirror1", "mirror_show")
class CreateMirror2Test(BaseTest):
"""
create mirror: all architectures and 1 component
"""
runCmd = "aptly mirror create --ignore-signatures mirror2 http://mirror.yandex.ru/debian/ wheezy main"
def check(self):
self.check_output()
self.check_cmd_output("aptly mirror show mirror2", "mirror_show")
class CreateMirror3Test(BaseTest):
"""
create mirror: some architectures and 2 components
"""
runCmd = "aptly -architectures=i386,amd64 mirror create --ignore-signatures mirror3 http://mirror.yandex.ru/debian/ wheezy main contrib"
def check(self):
self.check_output()
self.check_cmd_output("aptly mirror show mirror3", "mirror_show")
class CreateMirror4Test(BaseTest):
"""
create mirror: missing component
"""
expectedCode = 1
runCmd = "aptly -architectures=i386,amd64 mirror create --ignore-signatures mirror4 http://mirror.yandex.ru/debian/ wheezy life"
class CreateMirror5Test(BaseTest):
"""
create mirror: missing architecture
"""
expectedCode = 1
runCmd = "aptly -architectures=i386,nano68 mirror create --ignore-signatures mirror5 http://mirror.yandex.ru/debian/ wheezy"
class CreateMirror6Test(BaseTest):
"""
create mirror: missing release
"""
expectedCode = 1
runCmd = "aptly mirror create --keyring=aptlytest.gpg mirror6 http://mirror.yandex.ru/debian/ suslik"
class CreateMirror7Test(BaseTest):
"""
create mirror: architectures fixed via config file
"""
runCmd = "aptly mirror create --ignore-signatures mirror7 http://mirror.yandex.ru/debian/ wheezy main contrib"
configOverride = {"architectures": ["i386", "amd64"]}
def check(self):
self.check_output()
self.check_cmd_output("aptly mirror show mirror7", "mirror_show")
class CreateMirror8Test(BaseTest):
"""
create mirror: already exists
"""
fixtureCmds = [
"aptly mirror create --ignore-signatures mirror8 http://mirror.yandex.ru/debian/ wheezy main contrib"
]
runCmd = "aptly mirror create --ignore-signatures mirror8 http://mirror.yandex.ru/debian/ wheezy main contrib"
expectedCode = 1
class CreateMirror9Test(BaseTest):
"""
create mirror: repo with InRelease verification
"""
runCmd = "aptly mirror create --keyring=aptlytest.gpg mirror9 http://mirror.yandex.ru/debian/ wheezy-backports"
fixtureGpg = True
def outputMatchPrepare(_, s):
return re.sub(r'Signature made .* using|Warning: using insecure memory!\n', '', s)
def check(self):
def removeDates(s):
return re.sub(r"(Date|Valid-Until): [,0-9:+A-Za-z -]+\n", "", s)
self.check_output()
self.check_cmd_output("aptly mirror show mirror9", "mirror_show", match_prepare=removeDates)
class CreateMirror10Test(BaseTest):
"""
create mirror: repo with InRelease verification, failure
"""
runCmd = "aptly mirror create --keyring=aptlytest.gpg mirror10 http://mirror.yandex.ru/debian-backports/ squeeze-backports"
fixtureGpg = False
gold_processor = BaseTest.expand_environ
expectedCode = 1
def outputMatchPrepare(_, s):
return re.sub(r'Signature made .* using|gpgv: keyblock resource .*$|gpgv: Can\'t check signature: .*$', '', s, flags=re.MULTILINE)
class CreateMirror11Test(BaseTest):
"""
create mirror: repo with Release + Release.gpg verification
"""
runCmd = "aptly mirror create --keyring=aptlytest.gpg mirror11 http://mirror.yandex.ru/debian/ wheezy"
fixtureGpg = True
def outputMatchPrepare(_, s):
return re.sub(r'Signature made .* using', '', s)
def check(self):
self.check_output()
self.check_cmd_output("aptly mirror show mirror11", "mirror_show")
class CreateMirror12Test(BaseTest):
"""
create mirror: repo with Release+Release.gpg verification, failure
"""
runCmd = "aptly mirror create --keyring=aptlytest.gpg mirror12 http://mirror.yandex.ru/debian/ wheezy"
fixtureGpg = False
gold_processor = BaseTest.expand_environ
expectedCode = 1
def outputMatchPrepare(_, s):
return re.sub(r'Signature made .* using|gpgv: keyblock resource .*$|gpgv: Can\'t check signature: .*$', '', s, flags=re.MULTILINE)
class CreateMirror13Test(BaseTest):
"""
create mirror: skip verification using config file
"""
runCmd = "aptly mirror create mirror13 http://mirror.yandex.ru/debian/ wheezy"
configOverride = {"gpgDisableVerify": True}
def check(self):
self.check_output()
self.check_cmd_output("aptly mirror show mirror13", "mirror_show")
class CreateMirror14Test(BaseTest):
"""
create mirror: flat repository
"""
runCmd = "aptly mirror create -keyring=aptlytest.gpg mirror14 https://cloud.r-project.org/bin/linux/debian jessie-cran3/"
fixtureGpg = True
def outputMatchPrepare(_, s):
return re.sub(r'Signature made .* using', '', s)
def check(self):
def removeDates(s):
return re.sub(r"(Date|Valid-Until): [,0-9:+A-Za-z -]+\n", "", s)
self.check_output()
self.check_cmd_output("aptly mirror show mirror14", "mirror_show", match_prepare=removeDates)
class CreateMirror15Test(BaseTest):
"""
create mirror: flat repository + components
"""
runCmd = "aptly mirror create -keyring=aptlytest.gpg mirror14 https://cloud.r-project.org/bin/linux/debian jessie-cran3/ main"
expectedCode = 1
class CreateMirror16Test(BaseTest):
"""
create mirror: there's no "source" architecture
"""
expectedCode = 1
runCmd = "aptly -architectures=source mirror create -ignore-signatures mirror16 http://mirror.yandex.ru/debian/ wheezy"
class CreateMirror17Test(BaseTest):
"""
create mirror: mirror with sources enabled
"""
runCmd = "aptly -architectures=i386 mirror create -ignore-signatures -with-sources mirror17 http://mirror.yandex.ru/debian/ wheezy"
def check(self):
self.check_output()
self.check_cmd_output("aptly mirror show mirror17", "mirror_show")
class CreateMirror18Test(BaseTest):
"""
create mirror: mirror with ppa URL
"""
fixtureGpg = True
configOverride = {
"ppaDistributorID": "ubuntu",
"ppaCodename": "maverick",
}
runCmd = "aptly mirror create -keyring=aptlytest.gpg mirror18 ppa:gladky-anton/gnuplot"
def outputMatchPrepare(_, s):
return re.sub(r'Signature made .* using', '', s)
def check(self):
self.check_output()
self.check_cmd_output("aptly mirror show mirror18", "mirror_show")
class CreateMirror19Test(BaseTest):
"""
create mirror: mirror with / in distribution
"""
fixtureGpg = True
runCmd = "aptly -architectures='i386' mirror create -keyring=aptlytest.gpg -with-sources mirror19 http://security.debian.org/ wheezy/updates main"
def outputMatchPrepare(_, s):
return re.sub(r'Signature made .* using', '', s)
def check(self):
def removeDates(s):
return re.sub(r"(Date|Valid-Until): [,0-9:+A-Za-z -]+\n", "", s)
self.check_output()
self.check_cmd_output("aptly mirror show mirror19", "mirror_show", match_prepare=removeDates)
class CreateMirror20Test(BaseTest):
"""
create mirror: using failing HTTP_PROXY
"""
fixtureGpg = True
runCmd = "aptly -architectures='i386' mirror create -keyring=aptlytest.gpg -with-sources mirror20 http://security.debian.org/ wheezy/updates main"
environmentOverride = {"HTTP_PROXY": "127.0.0.1:3137"}
expectedCode = 1
def outputMatchPrepare(_, s):
return s.replace('getsockopt: ', '').replace('proxyconnect tcp', 'http: error connecting to proxy http://127.0.0.1:3137')
class CreateMirror21Test(BaseTest):
"""
create mirror: flat repository in subdir
"""
runCmd = "aptly mirror create -keyring=aptlytest.gpg mirror21 http://pkg.jenkins-ci.org/debian-stable binary/"
fixtureGpg = True
def outputMatchPrepare(_, s):
return re.sub(r'Signature made .* using', '', s)
def check(self):
def removeSHA512(s):
return re.sub(r"SHA512: .+\n", "", s)
def removeDates(s):
return re.sub(r"(Date|Valid-Until): [,0-9:+A-Za-z -]+\n", "", s)
self.check_output()
self.check_cmd_output("aptly mirror show mirror21", "mirror_show", match_prepare=lambda s: removeSHA512(removeDates(s)))
class CreateMirror22Test(BaseTest):
"""
create mirror: mirror with filter
"""
runCmd = "aptly mirror create -ignore-signatures -filter='nginx | Priority (required)' mirror22 http://security.debian.org/ wheezy/updates main"
def check(self):
def removeDates(s):
return re.sub(r"(Date|Valid-Until): [,0-9:+A-Za-z -]+\n", "", s)
self.check_output()
self.check_cmd_output("aptly mirror show mirror22", "mirror_show", match_prepare=removeDates)
class CreateMirror23Test(BaseTest):
"""
create mirror: mirror with wrong filter
"""
runCmd = "aptly mirror create -ignore-signatures -filter='nginx | ' mirror23 http://security.debian.org/ wheezy/updates main"
expectedCode = 1
class CreateMirror24Test(BaseTest):
"""
create mirror: disable config value with option
"""
runCmd = "aptly mirror create -ignore-signatures=false -keyring=aptlytest.gpg mirror24 http://security.debian.org/ wheezy/updates main"
fixtureGpg = True
def outputMatchPrepare(_, s):
return re.sub(r'Signature made .* using', '', s)
configOverride = {
"gpgDisableVerify": True
}
class CreateMirror25Test(BaseTest):
"""
create mirror: mirror with udebs enabled
"""
runCmd = "aptly -architectures=i386 mirror create -ignore-signatures -with-udebs mirror25 http://mirror.yandex.ru/debian/ wheezy"
def check(self):
self.check_output()
self.check_cmd_output("aptly mirror show mirror25", "mirror_show")
class CreateMirror26Test(BaseTest):
"""
create mirror: flat mirror with udebs
"""
runCmd = "aptly mirror create -keyring=aptlytest.gpg -with-udebs mirror26 http://pkg.jenkins-ci.org/debian-stable binary/"
fixtureGpg = True
expectedCode = 1
class CreateMirror27Test(BaseTest):
"""
create mirror: component with slashes, no stripping
"""
runCmd = "aptly mirror create --ignore-signatures mirror27 http://linux.dell.com/repo/community/ubuntu wheezy openmanage/740"
def check(self):
self.check_output()
self.check_cmd_output("aptly mirror show mirror27", "mirror_show")
class CreateMirror28Test(BaseTest):
"""
create mirror: -force-components
"""
runCmd = "aptly mirror create -ignore-signatures -force-components mirror28 http://downloads-distro.mongodb.org/repo/ubuntu-upstart dist 10gen"
def check(self):
def removeDates(s):
return re.sub(r"(Date|Valid-Until): [,0-9:+A-Za-z -]+\n", "", s)
self.check_output()
self.check_cmd_output("aptly mirror show mirror28", "mirror_show", match_prepare=removeDates)
| |
import pypsa
import numpy as np
def test_basic_sector_coupling():
override_component_attrs = pypsa.descriptors.Dict(
{k: v.copy() for k, v in pypsa.components.component_attrs.items()}
)
override_component_attrs["Link"].loc["bus2"] = [
"string",
np.nan,
np.nan,
"2nd bus",
"Input (optional)",
]
override_component_attrs["Link"].loc["bus3"] = [
"string",
np.nan,
np.nan,
"3rd bus",
"Input (optional)",
]
override_component_attrs["Link"].loc["efficiency2"] = [
"static or series",
"per unit",
1.0,
"2nd bus efficiency",
"Input (optional)",
]
override_component_attrs["Link"].loc["efficiency3"] = [
"static or series",
"per unit",
1.0,
"3rd bus efficiency",
"Input (optional)",
]
override_component_attrs["Link"].loc["p2"] = [
"series",
"MW",
0.0,
"2nd bus output",
"Output",
]
override_component_attrs["Link"].loc["p3"] = [
"series",
"MW",
0.0,
"3rd bus output",
"Output",
]
n = pypsa.Network(override_component_attrs=override_component_attrs)
n.set_snapshots(range(10))
n.add("Bus", "bus")
n.add("Load", "load", bus="bus", p_set=1.0)
n.add("Bus", "transport")
n.add("Load", "transport", bus="transport", p_set=1.0)
n.add("Bus", "diesel")
n.add("Store", "diesel", bus="diesel", e_cyclic=True, e_nom=1000.0)
n.add("Bus", "hydrogen")
n.add("Store", "hydrogen", bus="hydrogen", e_cyclic=True, e_nom=1000.0)
n.add(
"Link", "electrolysis", p_nom=2.0, efficiency=0.8, bus0="bus", bus1="hydrogen"
)
n.add(
"Link",
"FT",
p_nom=4,
bus0="hydrogen",
bus1="diesel",
bus2="co2 stored",
efficiency=1.0,
efficiency2=-1,
)
# minus sign because opposite to how fossil fuels used:
# CH4 burning puts CH4 down, atmosphere up
n.add("Carrier", "co2", co2_emissions=-1.0)
# this tracks CO2 in the atmosphere
n.add("Bus", "co2 atmosphere", carrier="co2")
# NB: can also be negative
n.add("Store", "co2 atmosphere", e_nom=1000, e_min_pu=-1, bus="co2 atmosphere")
# this tracks CO2 stored, e.g. underground
n.add("Bus", "co2 stored")
# NB: can also be negative
n.add("Store", "co2 stored", e_nom=1000, e_min_pu=-1, bus="co2 stored")
n.add(
"Link",
"DAC",
bus0="bus",
bus1="co2 stored",
bus2="co2 atmosphere",
efficiency=1,
efficiency2=-1,
p_nom=5.0,
)
n.add(
"Link",
"diesel car",
bus0="diesel",
bus1="transport",
bus2="co2 atmosphere",
efficiency=1.0,
efficiency2=1.0,
p_nom=2.0,
)
n.add("Bus", "gas")
n.add("Store", "gas", e_initial=50, e_nom=50, marginal_cost=20, bus="gas")
n.add(
"Link",
"OCGT",
bus0="gas",
bus1="bus",
bus2="co2 atmosphere",
p_nom_extendable=True,
efficiency=0.5,
efficiency2=1,
)
n.add(
"Link",
"OCGT+CCS",
bus0="gas",
bus1="bus",
bus2="co2 stored",
bus3="co2 atmosphere",
p_nom_extendable=True,
efficiency=0.4,
efficiency2=0.9,
efficiency3=0.1,
)
# Add a cheap and a expensive biomass generator.
biomass_marginal_cost = [20.0, 50.0]
biomass_stored = [40.0, 15.0]
for i in range(2):
n.add("Bus", "biomass" + str(i))
n.add(
"Store",
"biomass" + str(i),
bus="biomass" + str(i),
e_nom_extendable=True,
marginal_cost=biomass_marginal_cost[i],
e_nom=biomass_stored[i],
e_initial=biomass_stored[i],
)
# simultaneously empties and refills co2 atmosphere
n.add(
"Link",
"biomass" + str(i),
bus0="biomass" + str(i),
bus1="bus",
p_nom_extendable=True,
efficiency=0.5,
)
n.add(
"Link",
"biomass+CCS" + str(i),
bus0="biomass" + str(i),
bus1="bus",
bus2="co2 stored",
bus3="co2 atmosphere",
p_nom_extendable=True,
efficiency=0.4,
efficiency2=1.0,
efficiency3=-1,
)
# can go to -50, but at some point can't generate enough electricity for DAC and demand
target = -50
n.add(
"GlobalConstraint",
"co2_limit",
sense="<=",
carrier_attribute="co2_emissions",
constant=target,
)
status, condition = n.lopf()
assert status == "ok"
| |
import json
import redis
from redis_graph_py3.redis_graph_functions import Build_Configuration
from redis_graph_py3.redis_graph_functions import Query_Configuration
class Construct_Farm(Build_Configuration):
def __init__( self ):
redis_handle = redis.StrictRedis( host = "localhost", port=6379, db = 14 )
super().__init__(redis_handle )
def construct_system( self,name,properties={}):
self.construct_node( push_namespace = True, relationship="SYSTEM", label = name,
properties=properties)
def end_system( self):
self.pop_namespace()
def construct_site( self,name, address, properties={}):
properties["address"] = address
self.construct_node( push_namespace=True,relationship="SITE", label=name,
properties =properties)
def end_site( self ):
self.pop_namespace()
def add_redis_data_store( self, name, ip, port=6379, properties = {} ):
properties["ip"] = ip
properties["port"] = port
self.construct_node( push_namespace=True,relationship="DATA_STORE", label=name,
properties= properties )
def start_moisture_store( self ):
self.construct_node( push_namespace=True,relationship="MOISTURE_STORE", label="MOISTURE_STORE",
properties= {} )
def end_moisture_store( self ):
self.pop_namespace()
def add_moisture_sensor_store( self, name, description, description_map, depth_map, update_time ):
properties = {}
properties["description"] = description
properties["description_map"] = json.dumps(description_map)
properties["update_time"] = update_time
properties["depth_map"] = json.dumps(depth_map)
self.construct_node( push_namespace=True,relationship="MOISTURE_DATA", label=name,
properties= properties )
def add_status_store( self, name, queue_name):
properties = {}
properties["queue_name"] = queue_name
self.construct_node( push_namespace=True,relationship="STATUS_STORE", label=name,
properties= properties )
def start_info_store( self ):
self.construct_node( push_namespace=True,relationship="INFO_STORE", label="INFO_STORE",
properties= {} )
def add_eto_store(self ):
self.construct_node( push_namespace=False,relationship="ETO_STORE", label="ETO_STORE",
properties= {} )
def add_air_temperature_humidity_store(self):
self.construct_node( push_namespace=False,relationship="TEMP_HUMIDITY", label="TEMP_HUMIDITY",
properties= {} )
def add_air_temperature_humidity_daily_log(self):
self.construct_node( push_namespace=False,relationship="TEMP_HUMIDITY_DAILY", label="TEMP_HUMIDITY_DAILY",
properties= {} )
self.construct_node( push_namespace=False,relationship="TEMP_HUMIDITY_DAILY_ETO", label="TEMP_HUMIDITY_DAILY_ETO",
properties= {} )
def end_info_store(self):
self.pop_namespace()
def end_redis_data_store( self):
self.pop_namespace()
def add_udp_io_sever(self, name, ip,remote_type, port, properties={} ):
properties["ip"] = ip
properties["remote_type"] = remote_type
properties["port"] = port
return self.construct_node( push_namespace=True,relationship="UDP_IO_SERVER",
label=name, properties = properties )
def end_udp_io_server(self ):
self.pop_namespace()
def add_rtu_interface(self, name ,protocol, baud_rate, properties={} ):
properties["protocol"]= protocol
properties["baud_rate"] = baud_rate
return self.construct_node( push_namespace=True,relationship="RTU_INTERFACE",
label=name,properties = properties)
def end_rtu_interface( self ):
self.pop_namespace()
def add_remote( self, name,modbus_address,type, function, properties = {}):
properties["modbus_address"] = modbus_address
properties["type"] = type
properties["function"] = function
self.construct_node( push_namespace=True,relationship="REMOTE", label=name,
properties = properties )
def construct_controller( self,name, ip,type,properties={} ):
properties["name"] = name
properties["ip"] = ip
self.construct_node( push_namespace=True,relationship="CONTROLLER", label=name,
properties = properties)
def end_controller( self ):
self.pop_namespace()
def start_service( self, properties = {} ):
self.construct_node( push_namespace=TRUE,relationship="SERVICES", label=name,
properties = properties)
def construct_web_server( self, name,url,properties = {} ):
properties["url"] = url
self.construct_node( push_namespace=False,relationship="WEB_SERVER", label=name,
properties = properties)
def add_rabbitmq_command_rpc_queue( self,name, properties = {} ):
self.construct_node( push_namespace=False,relationship="COMMAND_RPC_QUEUE", label=name,
properties = properties)
def add_rabbitmq_web_rpc_queue( self,name, properties = {} ):
self.construct_node( push_namespace=False,relationship="WEB_RPC_QUEUE", label=name,
properties = properties)
def add_rabbitmq_event_queue( self,name, properties = {} ):
self.construct_node( push_namespace=False,relationship="RABBITMQ_EVENT_QUEUE", label=name,
properties = properties)
def add_rabbitmq_status_queue( self,name,vhost,queue,port,server ):
properties = {}
properties["vhost"] = vhost
properties["queue"] = queue
properties["port"] = port
properties["server"] = server
self.construct_node( push_namespace=False,relationship="RABBITMQ_STATUS_QUEUE", label=name,
properties = properties)
def add_ntpd_server( self,name, properties = {} ):
self.construct_node( push_namespace=False,relationship="NTPD_SERVER", label=name,
properties = properties)
def start_eto_server( self,name, properties = {} ):
self.construct_node( push_namespace=False,relationship="ETO_SERVER", label=name, properties = properties)
def add_eto_setup_code( self, access_codes, altitude , properties = {} ):
properties["messo_eto"] = json.dumps( access_codes["messo_eto"] )
properties["messo_precp"] = json.dumps( access_codes["messo_precp"] )
properties["cimis_eto"] = json.dumps( access_codes["cimis_eto"] )
properties["cimis_spatial"] = json.dumps( access_codes["cimis_spatial"])
properties["altitude"] = altitude
self.construct_node( push_namespace=False,relationship="ETO_SETUP_DATA", label="ETO_SETUP_DATA",
properties = properties)
def end_eto_server(self):
self.pop_namespace()
def add_linux_server_monitor( self, name,properties = {} ):
properties["name"] = "Linux Server Monitor"
self.construct_node( push_namespace=False,relationship="LINUX_SERVER_MONITOR", label=name, properties = properties)
def add_schedule_monitoring( self, name,properties = {} ):
self.construct_node( push_namespace=False,relationship="NTPD_SERVER", label=name, properties = properties)
def add_moisture_monitoring( self, name, properties = {} ):
self.construct_node( push_namespace=False,relationship="NTPD_SERVER", label=name, properties = properties)
def irrigation_monitoring( self, name,properties = {} ):
self.construct_node( push_namespace=False,relationship="IRRIGATION_MONITOR", label=name, properties = properties)
def add_device_monitoring( self, name, properties = {} ):
self.construct_node( push_namespace=False,relationship="DEVICE_MONITOR", label=name, properties = properties)
def add_process_monitoring( self,name, properties = {} ):
self.construct_node( push_namespace=False,relationship="PROCESS_MONITOR", label=name, properties = properties)
def add_watch_dog_monitoring( self,name, properties = {} ):
self.construct_node( push_namespace=False,relationship="WATCH_DOG_MONITORING", label=name, properties = properties)
def add_io_collection( self, name, properties = {} ):
self.construct_node( push_namespace=False,relationship="PROCESS_MONITOR", label=name, properties = properties)
def add_local_ai( self, name, properties = {} ):
self.construct_node( push_namespace=False,relationship="PROCESS_MONITOR", label=name, properties = properties)
class Graph_Management(Query_Configuration):
def __init__( self , controller_name, io_server_name, data_store_name ):
self.redis_handle = redis.StrictRedis( host = "localhost", port=6379, db = 14 )
super().__init__( self.redis_handle)
self.controller_name = controller_name
self.io_server_name = io_server_name
self.data_store_name = data_store_name
self.initialize_cb_handlers()
def find_remotes( self ):
data = self.match_terminal_relationship( "REMOTE_UNIT", label= None , starting_set = None )
return data
def find_data_stores( self ):
data = self.match_terminal_relationship( "DATA_STORE", label= None , starting_set = None )
return data
def find_io_servers( self ):
data = self.match_terminal_relationship( "UDP_IO_SERVER", label= None , starting_set = None )
return data
def initialize_cb_handlers( self ):
self.cb_handlers = {}
def add_cb_handler( self, tag, function ):
self.cb_handlers[ tag ] = function
def verify_handler( self, tag ):
try:
return self.cb_handlers.has_key(tag)
except:
#print "handlers:", type(self.cb_handlers)
#print "tag", tag
raise
def execute_cb_handlers( self, tag, value, parameters ): # parameters is a list
function = self.cb_handlers[tag]
return function( tag, value , parameters )
| |
"""
Generic utility functions useful for writing Python code in general
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import OrderedDict
import warnings
import logging
import os
import re
import subprocess
import numpy as np
from six.moves import xrange
known_number_types = (int, float, np.float16, np.float32, np.float64,
np.int8, np.int16, np.int32, np.int32, np.int64,
np.uint8, np.uint16, np.uint32, np.uint64)
CLEVERHANS_ROOT = os.path.dirname(os.path.dirname(__file__))
class _ArgsWrapper(object):
"""
Wrapper that allows attribute access to dictionaries
"""
def __init__(self, args):
if not isinstance(args, dict):
args = vars(args)
self.args = args
def __getattr__(self, name):
return self.args.get(name)
class AccuracyReport(object):
"""
An object summarizing the accuracy results for experiments involving
training on clean examples or adversarial examples, then evaluating
on clean or adversarial examples.
"""
def __init__(self):
self.clean_train_clean_eval = 0.
self.clean_train_adv_eval = 0.
self.adv_train_clean_eval = 0.
self.adv_train_adv_eval = 0.
# Training data accuracy results to be used by tutorials
self.train_clean_train_clean_eval = 0.
self.train_clean_train_adv_eval = 0.
self.train_adv_train_clean_eval = 0.
self.train_adv_train_adv_eval = 0.
def batch_indices(batch_nb, data_length, batch_size):
"""
This helper function computes a batch start and end index
:param batch_nb: the batch number
:param data_length: the total length of the data being parsed by batches
:param batch_size: the number of inputs in each batch
:return: pair of (start, end) indices
"""
# Batch start and end index
start = int(batch_nb * batch_size)
end = int((batch_nb + 1) * batch_size)
# When there are not enough inputs left, we reuse some to complete the
# batch
if end > data_length:
shift = end - data_length
start -= shift
end -= shift
return start, end
def other_classes(nb_classes, class_ind):
"""
Returns a list of class indices excluding the class indexed by class_ind
:param nb_classes: number of classes in the task
:param class_ind: the class index to be omitted
:return: list of class indices excluding the class indexed by class_ind
"""
if class_ind < 0 or class_ind >= nb_classes:
error_str = "class_ind must be within the range (0, nb_classes - 1)"
raise ValueError(error_str)
other_classes_list = list(range(nb_classes))
other_classes_list.remove(class_ind)
return other_classes_list
def to_categorical(y, nb_classes, num_classes=None):
"""
Converts a class vector (integers) to binary class matrix.
This is adapted from the Keras function with the same name.
:param y: class vector to be converted into a matrix
(integers from 0 to nb_classes).
:param nb_classes: nb_classes: total number of classes.
:param num_classses: depricated version of nb_classes
:return: A binary matrix representation of the input.
"""
if num_classes is not None:
if nb_classes is not None:
raise ValueError("Should not specify both nb_classes and its deprecated "
"alias, num_classes")
warnings.warn("`num_classes` is deprecated. Switch to `nb_classes`."
" `num_classes` may be removed on or after 2019-04-23.")
nb_classes = num_classes
del num_classes
y = np.array(y, dtype='int').ravel()
n = y.shape[0]
categorical = np.zeros((n, nb_classes))
categorical[np.arange(n), y] = 1
return categorical
def random_targets(gt, nb_classes):
"""
Take in an array of correct labels and randomly select a different label
for each label in the array. This is typically used to randomly select a
target class in targeted adversarial examples attacks (i.e., when the
search algorithm takes in both a source class and target class to compute
the adversarial example).
:param gt: the ground truth (correct) labels. They can be provided as a
1D vector or 2D array of one-hot encoded labels.
:param nb_classes: The number of classes for this task. The random class
will be chosen between 0 and nb_classes such that it
is different from the correct class.
:return: A numpy array holding the randomly-selected target classes
encoded as one-hot labels.
"""
# If the ground truth labels are encoded as one-hot, convert to labels.
if len(gt.shape) == 2:
gt = np.argmax(gt, axis=1)
# This vector will hold the randomly selected labels.
result = np.zeros(gt.shape, dtype=np.int32)
for class_ind in xrange(nb_classes):
# Compute all indices in that class.
in_cl = gt == class_ind
size = np.sum(in_cl)
# Compute the set of potential targets for this class.
potential_targets = other_classes(nb_classes, class_ind)
# Draw with replacement random targets among the potential targets.
result[in_cl] = np.random.choice(potential_targets, size=size)
# Encode vector of random labels as one-hot labels.
result = to_categorical(result, nb_classes)
result = result.astype(np.int32)
return result
def pair_visual(*args, **kwargs):
"""Deprecation wrapper"""
warnings.warn("`pair_visual` has moved to `cleverhans.plot.pyplot_image`. "
"cleverhans.utils.pair_visual may be removed on or after "
"2019-04-24.")
from cleverhans.plot.pyplot_image import pair_visual as new_pair_visual
return new_pair_visual(*args, **kwargs)
def grid_visual(*args, **kwargs):
"""Deprecation wrapper"""
warnings.warn("`grid_visual` has moved to `cleverhans.plot.pyplot_image`. "
"cleverhans.utils.grid_visual may be removed on or after "
"2019-04-24.")
from cleverhans.plot.pyplot_image import grid_visual as new_grid_visual
return new_grid_visual(*args, **kwargs)
def get_logits_over_interval(*args, **kwargs):
"""Deprecation wrapper"""
warnings.warn("`get_logits_over_interval` has moved to "
"`cleverhans.plot.pyplot_image`. "
"cleverhans.utils.get_logits_over_interval may be removed on "
"or after 2019-04-24.")
# pylint:disable=line-too-long
from cleverhans.plot.pyplot_image import get_logits_over_interval as new_get_logits_over_interval
return new_get_logits_over_interval(*args, **kwargs)
def linear_extrapolation_plot(*args, **kwargs):
"""Deprecation wrapper"""
warnings.warn("`linear_extrapolation_plot` has moved to "
"`cleverhans.plot.pyplot_image`. "
"cleverhans.utils.linear_extrapolation_plot may be removed on "
"or after 2019-04-24.")
# pylint:disable=line-too-long
from cleverhans.plot.pyplot_image import linear_extrapolation_plot as new_linear_extrapolation_plot
return new_linear_extrapolation_plot(*args, **kwargs)
def set_log_level(level, name="cleverhans"):
"""
Sets the threshold for the cleverhans logger to level
:param level: the logger threshold. You can find values here:
https://docs.python.org/2/library/logging.html#levels
:param name: the name used for the cleverhans logger
"""
logging.getLogger(name).setLevel(level)
def get_log_level(name="cleverhans"):
"""
Gets the current threshold for the cleverhans logger
:param name: the name used for the cleverhans logger
"""
return logging.getLogger(name).getEffectiveLevel()
class TemporaryLogLevel(object):
"""
A ContextManager that changes a log level temporarily.
Note that the log level will be set back to its original value when
the context manager exits, even if the log level has been changed
again in the meantime.
"""
def __init__(self, level, name):
self.name = name
self.level = level
def __enter__(self):
self.old_level = get_log_level(self.name)
set_log_level(self.level, self.name)
def __exit__(self, type, value, traceback):
set_log_level(self.old_level, self.name)
return True
def create_logger(name):
"""
Create a logger object with the given name.
If this is the first time that we call this method, then initialize the
formatter.
"""
base = logging.getLogger("cleverhans")
if len(base.handlers) == 0:
ch = logging.StreamHandler()
formatter = logging.Formatter('[%(levelname)s %(asctime)s %(name)s] ' +
'%(message)s')
ch.setFormatter(formatter)
base.addHandler(ch)
return base
def deterministic_dict(normal_dict):
"""
Returns a version of `normal_dict` whose iteration order is always the same
"""
out = OrderedDict()
for key in sorted(normal_dict.keys()):
out[key] = normal_dict[key]
return out
def ordered_union(l1, l2):
"""
Return the union of l1 and l2, with a deterministic ordering.
(Union of python sets does not necessarily have a consisten iteration
order)
:param l1: list of items
:param l2: list of items
:returns: list containing one copy of each item that is in l1 or in l2
"""
out = []
for e in l1 + l2:
if e not in out:
out.append(e)
return out
def safe_zip(*args):
"""like zip but with these properties:
- returns a list, rather than an iterator. This is the old Python2 zip behavior.
- a guarantee that all arguments are the same length.
(normal zip silently drops entries to make them the same length)
"""
length = len(args[0])
if not all(len(arg) == length for arg in args):
raise ValueError("Lengths of arguments do not match: "
+ str([len(arg) for arg in args]))
return list(zip(*args))
def shell_call(command, **kwargs):
"""Calls shell command with argument substitution.
Args:
command: command represented as a list. Each element of the list is one
token of the command. For example "cp a b" becomes ['cp', 'a', 'b']
If any element of the list looks like '${NAME}' then it will be replaced
by value from **kwargs with key 'NAME'.
**kwargs: dictionary with argument substitution
Returns:
output of the command
Raises:
subprocess.CalledProcessError if command return value is not zero
This function is useful when you need to do variable substitution prior
running the command. Below are few examples of how it works:
shell_call(['cp', 'a', 'b'], a='asd') calls command 'cp a b'
shell_call(['cp', '${a}', 'b'], a='asd') calls command 'cp asd b',
'${a}; was replaced with 'asd' before calling the command
"""
# Regular expression to find instances of '${NAME}' in a string
CMD_VARIABLE_RE = re.compile('^\\$\\{(\\w+)\\}$')
command = list(command)
for i in range(len(command)):
m = CMD_VARIABLE_RE.match(command[i])
if m:
var_id = m.group(1)
if var_id in kwargs:
command[i] = kwargs[var_id]
str_command = ' '.join(command)
logging.debug('Executing shell command: %s' % str_command)
return subprocess.check_output(command)
def deep_copy(numpy_dict):
"""
Returns a copy of a dictionary whose values are numpy arrays.
Copies their values rather than copying references to them.
"""
out = {}
for key in numpy_dict:
out[key] = numpy_dict[key].copy()
return out
| |
import ctypes
import numpy
import sys
import os
import os.path
from numpy.compat import asbytes
def _load_library(libname, loader_path):
""" A small fork of numpy.ctypeslib.load_library
to support windll.
"""
if ctypes.__version__ < '1.0.1':
import warnings
warnings.warn("All features of ctypes interface may not work " \
"with ctypes < 1.0.1")
ext = os.path.splitext(libname)[1]
if not ext:
# Try to load library with platform-specific name, otherwise
# default to libname.[so|pyd]. Sometimes, these files are built
# erroneously on non-linux platforms.
libname_ext = ['%s.so' % libname, '%s.pyd' % libname]
if sys.platform == 'win32':
libname_ext.insert(0, '%s.dll' % libname)
elif sys.platform == 'darwin':
libname_ext.insert(0, '%s.dylib' % libname)
else:
libname_ext = [libname]
loader_path = os.path.abspath(loader_path)
if not os.path.isdir(loader_path):
libdir = os.path.dirname(loader_path)
else:
libdir = loader_path
if sys.platform == 'win32':
loader = ctypes.windll
else:
loader = ctypes.cdll
for ln in libname_ext:
try:
return loader[os.path.join(libdir, ln)]
except OSError:
pass
# TODO: Setup errno and other bits to something correctly
raise OSError('Unable to find library in any of the foloowing paths: %s' %
[os.path.join(libdir, ln) for ln in libname_ext])
lib_dirs = [os.path.dirname(__file__),
'/lib',
'/usr/lib',
'/usr/local/lib',
'/opt/local/lib',
]
if 'HOME' in os.environ:
lib_dirs.append(os.path.join(os.environ['HOME'], 'lib'))
API = {
'FreeImage_Load': (ctypes.c_void_p,
[ctypes.c_int, ctypes.c_char_p, ctypes.c_int]),
'FreeImage_GetWidth': (ctypes.c_uint,
[ctypes.c_void_p]),
'FreeImage_GetHeight': (ctypes.c_uint,
[ctypes.c_void_p]),
'FreeImage_GetImageType': (ctypes.c_uint,
[ctypes.c_void_p]),
'FreeImage_GetBPP': (ctypes.c_uint,
[ctypes.c_void_p]),
'FreeImage_GetPitch': (ctypes.c_uint,
[ctypes.c_void_p]),
'FreeImage_GetBits': (ctypes.c_void_p,
[ctypes.c_void_p]),
}
# Albert's ctypes pattern
def register_api(lib,api):
for f, (restype, argtypes) in api.items():
func = getattr(lib, f)
func.restype = restype
func.argtypes = argtypes
_FI = None
for d in lib_dirs:
for libname in ('freeimage', 'FreeImage',
'libfreeimage', 'libFreeImage'):
try:
_FI = _load_library(libname, d)
except OSError:
pass
else:
break
if _FI is not None:
break
if not _FI:
raise OSError('Could not find libFreeImage in any of the following '
'directories: \'%s\'' % '\', \''.join(lib_dirs))
register_api(_FI, API)
if sys.platform == 'win32':
_functype = ctypes.WINFUNCTYPE
else:
_functype = ctypes.CFUNCTYPE
@_functype(None, ctypes.c_int, ctypes.c_char_p)
def _error_handler(fif, message):
raise RuntimeError('FreeImage error: %s' % message)
_FI.FreeImage_SetOutputMessage(_error_handler)
class FI_TYPES(object):
FIT_UNKNOWN = 0
FIT_BITMAP = 1
FIT_UINT16 = 2
FIT_INT16 = 3
FIT_UINT32 = 4
FIT_INT32 = 5
FIT_FLOAT = 6
FIT_DOUBLE = 7
FIT_COMPLEX = 8
FIT_RGB16 = 9
FIT_RGBA16 = 10
FIT_RGBF = 11
FIT_RGBAF = 12
dtypes = {
FIT_BITMAP: numpy.uint8,
FIT_UINT16: numpy.uint16,
FIT_INT16: numpy.int16,
FIT_UINT32: numpy.uint32,
FIT_INT32: numpy.int32,
FIT_FLOAT: numpy.float32,
FIT_DOUBLE: numpy.float64,
FIT_COMPLEX: numpy.complex128,
FIT_RGB16: numpy.uint16,
FIT_RGBA16: numpy.uint16,
FIT_RGBF: numpy.float32,
FIT_RGBAF: numpy.float32
}
fi_types = {
(numpy.uint8, 1): FIT_BITMAP,
(numpy.uint8, 3): FIT_BITMAP,
(numpy.uint8, 4): FIT_BITMAP,
(numpy.uint16, 1): FIT_UINT16,
(numpy.int16, 1): FIT_INT16,
(numpy.uint32, 1): FIT_UINT32,
(numpy.int32, 1): FIT_INT32,
(numpy.float32, 1): FIT_FLOAT,
(numpy.float64, 1): FIT_DOUBLE,
(numpy.complex128, 1): FIT_COMPLEX,
(numpy.uint16, 3): FIT_RGB16,
(numpy.uint16, 4): FIT_RGBA16,
(numpy.float32, 3): FIT_RGBF,
(numpy.float32, 4): FIT_RGBAF
}
extra_dims = {
FIT_UINT16: [],
FIT_INT16: [],
FIT_UINT32: [],
FIT_INT32: [],
FIT_FLOAT: [],
FIT_DOUBLE: [],
FIT_COMPLEX: [],
FIT_RGB16: [3],
FIT_RGBA16: [4],
FIT_RGBF: [3],
FIT_RGBAF: [4]
}
@classmethod
def get_type_and_shape(cls, bitmap):
w = _FI.FreeImage_GetWidth(bitmap)
h = _FI.FreeImage_GetHeight(bitmap)
fi_type = _FI.FreeImage_GetImageType(bitmap)
if not fi_type:
raise ValueError('Unknown image pixel type')
dtype = cls.dtypes[fi_type]
if fi_type == cls.FIT_BITMAP:
bpp = _FI.FreeImage_GetBPP(bitmap)
if bpp == 8:
extra_dims = []
elif bpp == 24:
extra_dims = [3]
elif bpp == 32:
extra_dims = [4]
else:
raise ValueError('Cannot convert %d BPP bitmap' % bpp)
else:
extra_dims = cls.extra_dims[fi_type]
return numpy.dtype(dtype), extra_dims + [w, h]
class IO_FLAGS(object):
#Bmp
BMP_DEFAULT = 0
BMP_SAVE_RLE = 1
#Png
PNG_DEFAULT = 0
PNG_IGNOREGAMMA = 1
#Gif
GIF_DEFAULT = 0
GIF_LOAD256 = 1
GIF_PLAYBACK = 2
#Ico
ICO_DEFAULT = 0
ICO_MAKEALPHA = 1
#Tiff
TIFF_DEFAULT = 0
TIFF_CMYK = 0x0001
TIFF_NONE = 0x0800
TIFF_PACKBITS = 0x0100
TIFF_DEFLATE = 0x0200
TIFF_ADOBE_DEFLATE = 0x0400
TIFF_CCITTFAX3 = 0x1000
TIFF_CCITTFAX4 = 0x2000
TIFF_LZW = 0x4000
TIFF_JPEG = 0x8000
#Jpeg
JPEG_DEFAULT = 0
JPEG_FAST = 1
JPEG_ACCURATE = 2
JPEG_QUALITYSUPERB = 0x80
JPEG_QUALITYGOOD = 0x100
JPEG_QUALITYNORMAL = 0x200
JPEG_QUALITYAVERAGE = 0x400
JPEG_QUALITYBAD = 0x800
JPEG_CMYK = 0x1000
JPEG_PROGRESSIVE = 0x2000
#Others...
CUT_DEFAULT = 0
DDS_DEFAULT = 0
HDR_DEFAULT = 0
IFF_DEFAULT = 0
KOALA_DEFAULT = 0
LBM_DEFAULT = 0
MNG_DEFAULT = 0
PCD_DEFAULT = 0
PCD_BASE = 1
PCD_BASEDIV4 = 2
PCD_BASEDIV16 = 3
PCX_DEFAULT = 0
PNM_DEFAULT = 0
PNM_SAVE_RAW = 0
PNM_SAVE_ASCII = 1
PSD_DEFAULT = 0
RAS_DEFAULT = 0
TARGA_DEFAULT = 0
TARGA_LOAD_RGB888 = 1
WBMP_DEFAULT = 0
XBM_DEFAULT = 0
class METADATA_MODELS(object):
FIMD_NODATA = -1
FIMD_COMMENTS = 0
FIMD_EXIF_MAIN = 1
FIMD_EXIF_EXIF = 2
FIMD_EXIF_GPS = 3
FIMD_EXIF_MAKERNOTE = 4
FIMD_EXIF_INTEROP = 5
FIMD_IPTC = 6
FIMD_XMP = 7
FIMD_GEOTIFF = 8
FIMD_ANIMATION = 9
FIMD_CUSTOM = 10
def read(filename, flags=0):
"""Read an image to a numpy array of shape (width, height) for
greyscale images, or shape (width, height, nchannels) for RGB or
RGBA images.
"""
bitmap = _read_bitmap(filename, flags)
try:
return _array_from_bitmap(bitmap)
finally:
_FI.FreeImage_Unload(bitmap)
def read_multipage(filename, flags=0):
"""Read a multipage image to a list of numpy arrays, where each
array is of shape (width, height) for greyscale images, or shape
(nchannels, width, height) for RGB or RGBA images.
"""
filename = asbytes(filename)
ftype = _FI.FreeImage_GetFileType(filename, 0)
if ftype == -1:
raise ValueError('Cannot determine type of file %s' % filename)
create_new = False
read_only = True
keep_cache_in_memory = True
multibitmap = _FI.FreeImage_OpenMultiBitmap(ftype, filename, create_new,
read_only, keep_cache_in_memory,
flags)
if not multibitmap:
raise ValueError('Could not open %s as multi-page image.' % filename)
try:
multibitmap = ctypes.c_void_p(multibitmap)
pages = _FI.FreeImage_GetPageCount(multibitmap)
arrays = []
for i in range(pages):
bitmap = _FI.FreeImage_LockPage(multibitmap, i)
bitmap = ctypes.c_void_p(bitmap)
try:
arrays.append(_array_from_bitmap(bitmap))
finally:
_FI.FreeImage_UnlockPage(multibitmap, bitmap, False)
return arrays
finally:
_FI.FreeImage_CloseMultiBitmap(multibitmap, 0)
def _read_bitmap(filename, flags):
"""Load a file to a FreeImage bitmap pointer"""
filename = asbytes(filename)
ftype = _FI.FreeImage_GetFileType(filename, 0)
if ftype == -1:
raise ValueError('Cannot determine type of file %s' % filename)
bitmap = _FI.FreeImage_Load(ftype, filename, flags)
if not bitmap:
raise ValueError('Could not load file %s' % filename)
return ctypes.c_void_p(bitmap)
def _wrap_bitmap_bits_in_array(bitmap, shape, dtype):
"""Return an ndarray view on the data in a FreeImage bitmap. Only
valid for as long as the bitmap is loaded (if single page) / locked
in memory (if multipage).
"""
pitch = _FI.FreeImage_GetPitch(bitmap)
height = shape[-1]
byte_size = height * pitch
itemsize = dtype.itemsize
if len(shape) == 3:
strides = (itemsize, shape[0]*itemsize, pitch)
else:
strides = (itemsize, pitch)
bits = _FI.FreeImage_GetBits(bitmap)
array = numpy.ndarray(shape, dtype=dtype,
buffer=(ctypes.c_char*byte_size).from_address(bits),
strides=strides)
return array
def _array_from_bitmap(bitmap):
"""Convert a FreeImage bitmap pointer to a numpy array
"""
dtype, shape = FI_TYPES.get_type_and_shape(bitmap)
array = _wrap_bitmap_bits_in_array(bitmap, shape, dtype)
# swizzle the color components and flip the scanlines to go from
# FreeImage's BGR[A] and upside-down internal memory format to something
# more normal
def n(arr):
return arr[..., ::-1].T
if len(shape) == 3 and _FI.FreeImage_IsLittleEndian() and \
dtype.type == numpy.uint8:
b = n(array[0])
g = n(array[1])
r = n(array[2])
if shape[0] == 3:
return numpy.dstack( (r, g, b) )
elif shape[0] == 4:
a = n(array[3])
return numpy.dstack( (r, g, b, a) )
else:
raise ValueError('Cannot handle images of shape %s' % shape)
# We need to copy because array does *not* own its memory
# after bitmap is freed.
return n(array).copy()
def string_tag(bitmap, key, model=METADATA_MODELS.FIMD_EXIF_MAIN):
"""Retrieve the value of a metadata tag with the given string key as a
string."""
tag = ctypes.c_int()
if not _FI.FreeImage_GetMetadata(model, bitmap, str(key),
ctypes.byref(tag)):
return
char_ptr = ctypes.c_char * _FI.FreeImage_GetTagLength(tag)
return char_ptr.from_address(_FI.FreeImage_GetTagValue(tag)).raw()
def write(array, filename, flags=0):
"""Write a (width, height) or (width, height, nchannels) array to
a greyscale, RGB, or RGBA image, with file type deduced from the
filename.
"""
filename = asbytes(filename)
ftype = _FI.FreeImage_GetFIFFromFilename(filename)
if ftype == -1:
raise ValueError('Cannot determine type for %s' % filename)
bitmap, fi_type = _array_to_bitmap(array)
try:
if fi_type == FI_TYPES.FIT_BITMAP:
can_write = _FI.FreeImage_FIFSupportsExportBPP(ftype,
_FI.FreeImage_GetBPP(bitmap))
else:
can_write = _FI.FreeImage_FIFSupportsExportType(ftype, fi_type)
if not can_write:
raise TypeError('Cannot save image of this format '
'to this file type')
res = _FI.FreeImage_Save(ftype, bitmap, filename, flags)
if not res:
raise RuntimeError('Could not save image properly.')
finally:
_FI.FreeImage_Unload(bitmap)
def write_multipage(arrays, filename, flags=0):
"""Write a list of (width, height) or (nchannels, width, height)
arrays to a multipage greyscale, RGB, or RGBA image, with file type
deduced from the filename.
"""
filename = asbytes(filename)
ftype = _FI.FreeImage_GetFIFFromFilename(filename)
if ftype == -1:
raise ValueError('Cannot determine type of file %s' % filename)
create_new = True
read_only = False
keep_cache_in_memory = True
multibitmap = _FI.FreeImage_OpenMultiBitmap(ftype, filename,
create_new, read_only,
keep_cache_in_memory, 0)
if not multibitmap:
raise ValueError('Could not open %s for writing multi-page image.' %
filename)
try:
multibitmap = ctypes.c_void_p(multibitmap)
for array in arrays:
bitmap, fi_type = _array_to_bitmap(array)
_FI.FreeImage_AppendPage(multibitmap, bitmap)
finally:
_FI.FreeImage_CloseMultiBitmap(multibitmap, flags)
# 4-byte quads of 0,v,v,v from 0,0,0,0 to 0,255,255,255
_GREY_PALETTE = numpy.arange(0, 0x01000000, 0x00010101, dtype=numpy.uint32)
def _array_to_bitmap(array):
"""Allocate a FreeImage bitmap and copy a numpy array into it.
"""
shape = array.shape
dtype = array.dtype
r,c = shape[:2]
if len(shape) == 2:
n_channels = 1
w_shape = (c,r)
elif len(shape) == 3:
n_channels = shape[2]
w_shape = (n_channels,c,r)
else:
n_channels = shape[0]
try:
fi_type = FI_TYPES.fi_types[(dtype.type, n_channels)]
except KeyError:
raise ValueError('Cannot write arrays of given type and shape.')
itemsize = array.dtype.itemsize
bpp = 8 * itemsize * n_channels
bitmap = _FI.FreeImage_AllocateT(fi_type, c, r, bpp, 0, 0, 0)
if not bitmap:
raise RuntimeError('Could not allocate image for storage')
try:
def n(arr): # normalise to freeimage's in-memory format
return arr.T[:,::-1]
bitmap = ctypes.c_void_p(bitmap)
wrapped_array = _wrap_bitmap_bits_in_array(bitmap, w_shape, dtype)
# swizzle the color components and flip the scanlines to go to
# FreeImage's BGR[A] and upside-down internal memory format
if len(shape) == 3 and _FI.FreeImage_IsLittleEndian() and \
dtype.type == numpy.uint8:
wrapped_array[0] = n(array[:,:,2])
wrapped_array[1] = n(array[:,:,1])
wrapped_array[2] = n(array[:,:,0])
if shape[2] == 4:
wrapped_array[3] = n(array[:,:,3])
else:
wrapped_array[:] = n(array)
if len(shape) == 2 and dtype.type == numpy.uint8:
palette = _FI.FreeImage_GetPalette(bitmap)
if not palette:
raise RuntimeError('Could not get image palette')
ctypes.memmove(palette, _GREY_PALETTE.ctypes.data, 1024)
return bitmap, fi_type
except:
_FI.FreeImage_Unload(bitmap)
raise
def imread(filename):
"""
img = imread(filename)
Reads an image from file `filename`
Parameters
----------
filename : file name
Returns
-------
img : ndarray
"""
img = read(filename)
return img
def imsave(filename, img):
'''
imsave(filename, img)
Save image to disk
Image type is inferred from filename
Parameters
----------
filename : file name
img : image to be saved as nd array
'''
write(img, filename)
| |
# ******************************************************************************
# Copyright 2014-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
"""
To test pool layer operations between NervanaGPU, NervanaCPU, NervanaMKL against numpy.
"""
import itertools as itt
import numpy as np
import pytest
from neon import logger as neon_logger
from utils import allclose_with_out
# how many times to repeat the fprop and bprop
repeat = 5
def sliceable(dim, pad=0):
"""
collapse outer dimensions into one and preserve inner dimension
this allows for easy cpu operations in numpy
"""
dim0 = np.prod(dim[:-1]) + pad
return (dim0, dim[-1])
def pixel_indices(pool, kj, mt, pr, qs):
C = pool.C
J, T, R, S = pool.JTRS
D, H, W = pool.DHW
HW = H * W
DHW = D * H * W
idx = []
for j in range(J):
c = kj + j
ci = c * DHW
cb = c >= 0 and c < C
for t in range(T):
z = mt + t
zi = ci + z * HW
zb = cb and z >= 0 and z < D
for r in range(R):
y = pr + r
yi = zi + y * W
yb = zb and y >= 0 and y < H
for s in range(S):
x = qs + s
if yb and x >= 0 and x < W:
xi = yi + x
idx.append(xi)
return idx
def run_backend_pool(lib, layer, I, E, dtype):
beI = lib.array(I, dtype=dtype)
beE = lib.array(E, dtype=dtype)
beO = lib.zeros(layer.dimO, dtype=dtype)
beA = lib.zeros(layer.dimO, dtype=np.int8)
beB = lib.zeros(layer.dimI, dtype=dtype)
for i in range(repeat):
lib.fprop_pool(layer, beI, beO, beA)
lib.bprop_pool(layer, beE, beB, beA)
return beO, beB
def run_numpy_pool(op, cpuI, cpuE, dytpe, be_layer):
# pass in the backend layer for the parameters
dimI = be_layer.dimI
dimO = be_layer.dimO
op = be_layer.op
K = be_layer.K
N = be_layer.N
M, P, Q = be_layer.MPQ
pad_j, pad_d, pad_h, pad_w = be_layer.padding
str_j, str_d, str_h, str_w = be_layer.strides
# cpu output arrays
cpuO = np.empty(dimO, dtype=dytpe)
cpuB = np.zeros(sliceable(dimI, 1), dtype=dytpe)
for i in range(repeat):
cpuB.fill(0)
for k in range(K):
kj = k * str_j - pad_j
for m in range(M):
mt = m * str_d - pad_d
for p in range(P):
pr = p * str_h - pad_h
for q in range(Q):
qs = q * str_w - pad_w
idx = pixel_indices(be_layer, kj, mt, pr, qs)
if op == "max":
cpuO[k, m, p, q, :] = np.max(cpuI[idx, :], axis=0)
b_idx = np.argmax(cpuI[idx, :], axis=0)
for n in range(N):
cpuB[idx[b_idx[n]], n] += cpuE[k, m, p, q, n]
elif op == "avg":
cpuO[k, m, p, q, :] = np.mean(cpuI[idx, :], axis=0)
cpuB[idx, :] += cpuE[k, m, p, q, :] * (1.0 / len(idx))
elif op == "l2":
cpuO[k, m, p, q, :] = np.sqrt(
np.sum(cpuI[idx, :] ** 2, axis=0))
return cpuO, cpuB
def pytest_generate_tests(metafunc):
if 'poolargs' in metafunc.fixturenames:
fargs = []
op_list = ["avg", "max"]
fargs = itt.product(op_list)
metafunc.parametrize('poolargs', fargs)
def test_pool_layer_mkl(poolargs, backend_pair_bench_mkl):
op = poolargs[0]
dtype = np.float32
nm, nc = backend_pair_bench_mkl
N, C = 32, 32
D, H, W = 1, 32, 32
J, T, R, S = 2, 1, 3, 3
padding_j, padding_d, padding_h, padding_w = 0, 0, 0, 0
strides_j, strides_d, strides_h, strides_w = 2, 1, 2, 2
pool_nm = nm.pool_layer(
dtype,
op,
N,
C, D, H, W,
J, T, R, S,
padding_j, padding_d, padding_h, padding_w,
strides_j, strides_d, strides_h, strides_w)
pool_nc = nc.pool_layer(
dtype,
op,
N,
C, D, H, W,
J, T, R, S,
padding_j, padding_d, padding_h, padding_w,
strides_j, strides_d, strides_h, strides_w)
assert pool_nm.dimI == pool_nc.dimI
assert pool_nm.dimO == pool_nc.dimO
dimI = pool_nm.dimI
dimO = pool_nm.dimO
# generating input arrays for inputs and errors
cpuI = np.random.uniform(0.0, 1.0, sliceable(dimI, 1)).astype(
np.float16).astype(dtype)
cpuE = np.random.uniform(-0.2, 0.2, dimO).astype(dtype)
# zero pad the last row of cpu input for the sake of numpy
if op == "max":
cpuI[-1, :] = np.finfo(dtype).min
else:
cpuI[-1, :] = 0
# ========= MKL, CPU and numpy ==========
beI = cpuI[:-1, :].reshape(dimI)
beE = cpuE
nmO, nmB = run_backend_pool(nm, pool_nm, beI, beE, dtype)
ncO, ncB = run_backend_pool(nc, pool_nc, beI, beE, dtype)
cpuO, cpuB = run_numpy_pool(op, cpuI, cpuE, dtype, pool_nm)
for opA, nmA, ncA, cpuA in (
("fprop", nmO, ncO, cpuO),
("bprop", nmB, ncB.reshape(dimI), cpuB[:-1, :].reshape(dimI))):
neon_logger.display(opA)
assert allclose_with_out(nmA.get(), ncA.get(), rtol=0, atol=1e-4)
assert allclose_with_out(ncA.get(), cpuA, rtol=0, atol=1e-5)
@pytest.mark.hasgpu
def test_pool_layer(poolargs, backend_pair_bench):
op = poolargs[0]
dtype = np.float32
ng, nc = backend_pair_bench
N, C = 32, 32
D, H, W = 1, 32, 32
J, T, R, S = 2, 1, 3, 3
padding_j, padding_d, padding_h, padding_w = 0, 0, 0, 0
strides_j, strides_d, strides_h, strides_w = 2, 1, 2, 2
pool_ng = ng.pool_layer(
dtype,
op,
N,
C, D, H, W,
J, T, R, S,
padding_j, padding_d, padding_h, padding_w,
strides_j, strides_d, strides_h, strides_w)
pool_nc = nc.pool_layer(
dtype,
op,
N,
C, D, H, W,
J, T, R, S,
padding_j, padding_d, padding_h, padding_w,
strides_j, strides_d, strides_h, strides_w)
assert pool_ng.dimI == pool_nc.dimI
assert pool_ng.dimO == pool_nc.dimO
dimI = pool_ng.dimI
dimO = pool_ng.dimO
# generating input arrays for inputs and errors
cpuI = np.random.uniform(0.0, 1.0, sliceable(dimI, 1)).astype(
np.float16).astype(dtype)
cpuE = np.random.uniform(-0.2, 0.2, dimO).astype(dtype)
# zero pad the last row of cpu input for the sake of numpy
if op == "max":
cpuI[-1, :] = np.finfo(dtype).min
else:
cpuI[-1, :] = 0
# ========= GPU, CPU and numpy ==========
beI = cpuI[:-1, :].reshape(dimI)
beE = cpuE
ngO, ngB = run_backend_pool(ng, pool_ng, beI, beE, dtype)
ncO, ncB = run_backend_pool(nc, pool_nc, beI, beE, dtype)
cpuO, cpuB = run_numpy_pool(op, cpuI, cpuE, dtype, pool_ng)
for opA, ngA, ncA, cpuA in (
("fprop", ngO, ncO, cpuO),
("bprop", ngB, ncB.reshape(dimI), cpuB[:-1, :].reshape(dimI))):
neon_logger.display(opA)
assert allclose_with_out(ngA.get(), ncA.get(), rtol=0, atol=1e-4)
assert allclose_with_out(ncA.get(), cpuA, rtol=0, atol=1e-5)
if __name__ == '__main__':
fargs = ["max"]
test_pool_layer(fargs)
test_pool_layer_mkl(fargs)
| |
from http.server import HTTPServer, SimpleHTTPRequestHandler
import logging
import queue
from socketserver import ThreadingMixIn
import threading
import time
import traceback
import ray.cloudpickle as pickle
from ray.rllib.env.policy_client import PolicyClient, \
_create_embedded_rollout_worker
from ray.rllib.offline.input_reader import InputReader
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.utils.annotations import override, PublicAPI
logger = logging.getLogger(__name__)
class PolicyServerInput(ThreadingMixIn, HTTPServer, InputReader):
"""REST policy server that acts as an offline data source.
This launches a multi-threaded server that listens on the specified host
and port to serve policy requests and forward experiences to RLlib. For
high performance experience collection, it implements InputReader.
For an example, run `examples/cartpole_server.py` along
with `examples/cartpole_client.py --inference-mode=local|remote`.
Examples:
>>> pg = PGTrainer(
... env="CartPole-v0", config={
... "input": lambda ioctx:
... PolicyServerInput(ioctx, addr, port),
... "num_workers": 0, # Run just 1 server, in the trainer.
... }
>>> while True:
>>> pg.train()
>>> client = PolicyClient("localhost:9900", inference_mode="local")
>>> eps_id = client.start_episode()
>>> action = client.get_action(eps_id, obs)
>>> ...
>>> client.log_returns(eps_id, reward)
>>> ...
>>> client.log_returns(eps_id, reward)
"""
@PublicAPI
def __init__(self, ioctx, address, port, idle_timeout=3.0):
"""Create a PolicyServerInput.
This class implements rllib.offline.InputReader, and can be used with
any Trainer by configuring
{"num_workers": 0,
"input": lambda ioctx: PolicyServerInput(ioctx, addr, port)}
Note that by setting num_workers: 0, the trainer will only create one
rollout worker / PolicyServerInput. Clients can connect to the launched
server using rllib.env.PolicyClient.
Args:
ioctx (IOContext): IOContext provided by RLlib.
address (str): Server addr (e.g., "localhost").
port (int): Server port (e.g., 9900).
"""
self.rollout_worker = ioctx.worker
self.samples_queue = queue.Queue()
self.metrics_queue = queue.Queue()
self.idle_timeout = idle_timeout
def get_metrics():
completed = []
while True:
try:
completed.append(self.metrics_queue.get_nowait())
except queue.Empty:
break
return completed
# Forwards client-reported rewards directly into the local rollout
# worker. This is a bit of a hack since it is patching the get_metrics
# function of the sampler.
if self.rollout_worker.sampler is not None:
self.rollout_worker.sampler.get_metrics = get_metrics
# Create a request handler that receives commands from the clients
# and sends data and metrics into the queues.
handler = _make_handler(self.rollout_worker, self.samples_queue,
self.metrics_queue)
HTTPServer.__init__(self, (address, port), handler)
logger.info("Starting connector server at {}:{}".format(address, port))
# Start the serving thread, listening on socket and handling commands.
serving_thread = threading.Thread(
name="server", target=self.serve_forever)
serving_thread.daemon = True
serving_thread.start()
# Start a dummy thread that puts empty SampleBatches on the queue, just
# in case we don't receive anything from clients (or there aren't
# any). The latter would block sample collection entirely otherwise,
# even if other workers' PolicyServerInput receive incoming data from
# actual clients.
heart_beat_thread = threading.Thread(
name="heart-beat", target=self._put_empty_sample_batch_every_n_sec)
heart_beat_thread.daemon = True
heart_beat_thread.start()
@override(InputReader)
def next(self):
return self.samples_queue.get()
def _put_empty_sample_batch_every_n_sec(self):
# Places an empty SampleBatch every `idle_timeout` seconds onto the
# `samples_queue`. This avoids hanging of all RolloutWorkers parallel
# to this one in case this PolicyServerInput does not have incoming
# data (e.g. no client connected).
while True:
time.sleep(self.idle_timeout)
self.samples_queue.put(SampleBatch())
def _make_handler(rollout_worker, samples_queue, metrics_queue):
# Only used in remote inference mode. We must create a new rollout worker
# then since the original worker doesn't have the env properly wrapped in
# an ExternalEnv interface.
child_rollout_worker = None
inference_thread = None
lock = threading.Lock()
def setup_child_rollout_worker():
nonlocal lock
nonlocal child_rollout_worker
nonlocal inference_thread
with lock:
if child_rollout_worker is None:
(child_rollout_worker,
inference_thread) = _create_embedded_rollout_worker(
rollout_worker.creation_args(), report_data)
child_rollout_worker.set_weights(rollout_worker.get_weights())
def report_data(data):
nonlocal child_rollout_worker
batch = data["samples"]
batch.decompress_if_needed()
samples_queue.put(batch)
for rollout_metric in data["metrics"]:
metrics_queue.put(rollout_metric)
if child_rollout_worker is not None:
child_rollout_worker.set_weights(rollout_worker.get_weights(),
rollout_worker.get_global_vars())
class Handler(SimpleHTTPRequestHandler):
def __init__(self, *a, **kw):
super().__init__(*a, **kw)
def do_POST(self):
content_len = int(self.headers.get("Content-Length"), 0)
raw_body = self.rfile.read(content_len)
parsed_input = pickle.loads(raw_body)
try:
response = self.execute_command(parsed_input)
self.send_response(200)
self.end_headers()
self.wfile.write(pickle.dumps(response))
except Exception:
self.send_error(500, traceback.format_exc())
def execute_command(self, args):
command = args["command"]
response = {}
# Local inference commands:
if command == PolicyClient.GET_WORKER_ARGS:
logger.info("Sending worker creation args to client.")
response["worker_args"] = rollout_worker.creation_args()
elif command == PolicyClient.GET_WEIGHTS:
logger.info("Sending worker weights to client.")
response["weights"] = rollout_worker.get_weights()
response["global_vars"] = rollout_worker.get_global_vars()
elif command == PolicyClient.REPORT_SAMPLES:
logger.info("Got sample batch of size {} from client.".format(
args["samples"].count))
report_data(args)
# Remote inference commands:
elif command == PolicyClient.START_EPISODE:
setup_child_rollout_worker()
assert inference_thread.is_alive()
response["episode_id"] = (
child_rollout_worker.env.start_episode(
args["episode_id"], args["training_enabled"]))
elif command == PolicyClient.GET_ACTION:
assert inference_thread.is_alive()
response["action"] = child_rollout_worker.env.get_action(
args["episode_id"], args["observation"])
elif command == PolicyClient.LOG_ACTION:
assert inference_thread.is_alive()
child_rollout_worker.env.log_action(
args["episode_id"], args["observation"], args["action"])
elif command == PolicyClient.LOG_RETURNS:
assert inference_thread.is_alive()
if args["done"]:
child_rollout_worker.env.log_returns(
args["episode_id"], args["reward"], args["info"],
args["done"])
else:
child_rollout_worker.env.log_returns(
args["episode_id"], args["reward"], args["info"])
elif command == PolicyClient.END_EPISODE:
assert inference_thread.is_alive()
child_rollout_worker.env.end_episode(args["episode_id"],
args["observation"])
else:
raise ValueError("Unknown command: {}".format(command))
return response
return Handler
| |
###############################################################################
# Caleydo - Visualization for Molecular Biology - http://caleydo.org
# Copyright (c) The Caleydo Team. All rights reserved.
# Licensed under the new BSD license, available at http://caleydo.org/license
###############################################################################
from builtins import str, object
import json
import io
import os
import csv
import numpy as np
from .dataset_def import ADataSetProvider, AColumn, AMatrix, AStratification, ATable, AVector
from . import config
from functools import cmp_to_key
def assign_ids(ids, idtype):
from .plugin import lookup
manager = lookup('idmanager')
return np.array(manager(ids, idtype))
def fix_id(fqname):
from .util import fix_id
return fix_id(fqname)
def basic_description(data, type, path):
import datetime
from .security import current_username
desc = dict(type=type,
name=data.get('name', 'Uploaded File'),
description=data.get('description', ''),
creator=current_username,
ts=datetime.datetime.utcnow(),
path=os.path.basename(path))
if 'group' in data:
desc['group'] = data['group']
if 'permissions' in data:
desc['permissions'] = data['permissions']
if 'buddies' in data:
desc['buddies'] = data['buddies']
return desc
class CSVEntryMixin(object):
def __init__(self, desc, project):
self._desc = desc
folder = project.folder + '/data/' if not hasattr(project, 'inplace') else project.folder
self._path = os.path.join(folder, self._desc['path'])
del self._desc['path']
self._project = project
self._loaded = None
def load(self):
if self._loaded is not None:
return self._loaded
data = []
with io.open(self._path, 'r', newline='', encoding=self._desc.get('encoding', 'utf-8')) as csvfile:
reader = csv.reader(csvfile, delimiter=self._desc.get('separator', ','), quotechar=str(self._desc.get('quotechar', '|')))
data.extend(reader)
# print data
def to_num(s):
try:
return float(s) # for int, long and float
except ValueError:
return s
header = data[0]
data = [[to_num(v) if i > 0 else v for i, v in enumerate(row)] for row in data[1:]]
data.insert(0, header)
# convert to col, row and data
self._loaded = self._process(data)
return self._loaded
def _process(self, data):
return data
def to_description(self):
return self._desc
def idtypes(self):
return [v for k, v in self._desc.items() if k in ['rowtype', 'coltype', 'idtype']]
def guess_color(name, i):
name = name.lower()
colors = dict(name='blue', female='red', deceased='#e41a1b', living='#377eb8')
if name in colors:
return colors[name]
l = ['#8dd3c7', '#ffffb3', '#bebada', '#fb8072', '#80b1d3', '#fdb462', '#b3de69', '#fccde5', '#d9d9d9', '#bc80bd',
'#ccebc5', '#ffed6f']
return l[i % len(l)]
def cmp_string(a, b):
if a == b:
return 0
return -1 if a < b else +1
class CSVStratification(CSVEntryMixin, AStratification):
def __init__(self, desc, project):
AStratification.__init__(self, desc['name'], project.id, desc['type'], desc.get('id', None))
CSVEntryMixin.__init__(self, desc, project)
desc['fqname'] = self.fqname
desc['id'] = self.id
self.idtype = desc['idtype']
for i, g in enumerate(desc['groups']):
if 'color' not in g:
g['color'] = guess_color(g['name'], i)
def _process(self, data):
def to_string(v):
if type(v) is float:
return str(int(v))
return str(v)
d = [dict(row=row[0], i=i, cluster=to_string(row[1])) for i, row in enumerate(data[1:])]
groups = [str(g['name']) for g in self._desc['groups']]
def cmp(a, b):
ga = groups.index(a['cluster'])
gb = groups.index(b['cluster'])
if ga != gb:
return ga - gb
r = cmp_string(a['cluster'], b['cluster'])
if r != 0:
return r
return cmp_string(a['row'], b['row']) if r == 0 else r
d.sort(key=cmp_to_key(cmp)) # sort by cluster;
clusters = dict()
for di in d:
c = di['cluster']
if c in clusters:
clusters[c].append(di['i'])
else:
clusters[c] = [di['i']]
colors = {g['name']: g['color'] for g in self._desc['groups']}
clusters = [dict(name=k, range=clusters.get(k, []), color=colors.get(k, 'gray')) for k in groups]
rows = np.array([di[0] for di in data[1:]])
return {'rows': rows,
'rowIds': assign_ids(rows, self.idtype),
'groups': clusters
}
def rows(self, range=None):
n = self.load()['rows']
if range is None:
return n
return n[range.asslice()]
def rowids(self, range=None):
n = self.load()['rowIds']
if range is None:
return n
return n[range.asslice()]
def groups(self):
return self.load()['groups']
def asjson(self, range=None):
return self.load()
@staticmethod
def parse(data, path, project, id=None):
desc = basic_description(data, 'stratification', path)
desc['idtype'] = data.get('idtype', data.get('rowtype', 'unknown'))
for k, v in data.items():
if k not in desc:
desc[k] = v
if id is not None:
desc['id'] = id
if 'size0' in data and 'ngroups' in data:
desc['size'] = [int(data['size0'])]
del desc['size0']
desc['ngroups'] = int(data['ngroups'])
else: # derive from the data
clusters = set()
count = 0
with io.open(path, 'r', newline='', encoding=desc.get('encoding', 'utf-8')) as csvfile:
reader = csv.reader(csvfile, delimiter=desc.get('separator', ','), quotechar=str(desc.get('quotechar', '|')))
for row in reader:
count += 1
clusters.add(row[1])
desc['size'] = [count]
desc['ngroups'] = len(clusters)
return CSVStratification(desc, project)
class CSVMatrix(CSVEntryMixin, AMatrix):
def __init__(self, desc, project):
AMatrix.__init__(self, desc['name'], project.id, desc['type'], desc.get('id', None))
CSVEntryMixin.__init__(self, desc, project)
desc['fqname'] = self.fqname
desc['id'] = self.id
self.rowtype = desc['rowtype']
self.coltype = desc['coltype']
self.value = desc['value']['type']
self.range = desc['value']['range']
self.shape = desc['size']
def _process(self, data):
cols = np.array(data[0][1:])
rows = np.array([x[0] for x in data[1:]])
is_number = self.value == 'real' or self.value == 'int'
if is_number:
vs = [[np.NaN if v == 'NA' or v == '' else v for v in x[1:]] for x in data[1:]]
# import numpy.ma as ma
# dd = ma.masked_equal(np.array(vs), np.NaN)
dd = np.array(vs)
else:
dd = np.array([x[1:] for x in data[1:]])
return {'cols': cols,
'colIds': assign_ids(cols, self.coltype),
'rows': rows,
'rowIds': assign_ids(rows, self.rowtype),
'data': dd
}
def rows(self, range=None):
n = self.load()['rows']
if range is None:
return n
return n[range.asslice()]
def rowids(self, range=None):
n = self.load()['rowIds']
if range is None:
return n
return n[range.asslice()]
def cols(self, range=None):
n = self.load()['cols']
if range is None:
return n
return n[range.asslice()]
def colids(self, range=None):
n = self.load()['colIds']
if range is None:
return n
return n[range.asslice()]
def asnumpy(self, range=None):
n = self.load()['data']
if range is None:
return n
rows = range[0].asslice()
cols = range[1].asslice()
d = None
if isinstance(rows, list) and isinstance(cols, list):
# fancy indexing in two dimension doesn't work
d_help = n[rows, :]
d = d_help[:, cols]
else:
d = n[rows, cols]
if d.ndim == 1:
# two options one row and n columns or the other way around
if rows is Ellipsis or (isinstance(rows, list) and len(rows) > 1):
d = d.reshape((d.shape[0], 1))
else:
d = d.reshape((1, d.shape[0]))
elif d.ndim == 0:
d = d.reshape((1, 1))
return d
@staticmethod
def parse(data, path, project, id=None):
desc = basic_description(data, 'matrix', path)
desc['rowtype'] = data.get('rowtype', 'unknown')
desc['coltype'] = data.get('coltype', 'unknown')
desc['value'] = dict(type=data.get('value_type', 'real'))
for k, v in data.items():
if k not in desc:
desc[k] = v
if id is not None:
desc['id'] = id
if all((k in data) for k in ['size0', 'size1', 'value_min', 'value_max']):
desc['size'] = [int(data['size0']), int(data['size1'])]
del desc['size0']
del desc['size1']
desc['value']['range'] = [float(data['value_min']), float(data['value_max'])]
del desc['value_min']
del desc['value_max']
else: # derive from the data
rows = 0
cols = None
min_v = None
max_v = None
with io.open(path, 'r', newline='', encoding=desc.get('encoding', 'utf-8')) as csvfile:
reader = csv.reader(csvfile, delimiter=desc.get('separator', ','), quotechar=str(desc.get('quotechar', '|')))
for row in reader:
if cols is None:
cols = len(row) - 1
else:
rows += 1
min_act = min((float(f) for f in row[1:]))
min_v = min_act if min_v is None else min(min_act, min_v)
max_act = max((float(f) for f in row[1:]))
max_v = max_act if max_v is None else max(max_act, max_v)
desc['size'] = [rows, cols]
desc['value']['range'] = [float(data['value_min']) if 'value_min' in data else min_v,
float(data['value_max']) if 'value_max' in data else max_v]
return CSVMatrix(desc, project)
class CSVColumn(AColumn):
def __init__(self, desc, table):
super(CSVColumn, self).__init__(desc['name'], desc['value']['type'])
self._desc = desc
self._table = table
def asnumpy(self, range=None):
import pandas as pd
p = self._table.aspandas(range)[self.name]
if isinstance(p, pd.Series):
return p.values
return np.array([p])
def process(self, index, data):
is_number = self.type == 'real' or self.type == 'int'
if is_number:
return [np.NaN if d[index] == 'NA' or d[index] == '' else d[index] for d in data]
else:
return [d[index] for d in data]
def dump(self):
return self._desc
class CSVTable(CSVEntryMixin, ATable):
def __init__(self, desc, project):
ATable.__init__(self, desc['name'], project.id, desc['type'], desc.get('id', None))
CSVEntryMixin.__init__(self, desc, project)
desc['fqname'] = self.fqname
desc['id'] = self.id
self.idtype = desc['idtype']
self.columns = [CSVColumn(d, self) for d in desc['columns']]
self.shape = desc['size']
def _process(self, data):
rows = np.array([x[0] for x in data[1:]])
import pandas as pd
objs = {c.name: c.process(i + 1, data[1:]) for i, c in enumerate(self.columns)}
df = pd.DataFrame(objs, columns=[c.name for c in self.columns])
df.index = rows
return {'rows': rows,
'rowIds': assign_ids(rows, self.idtype),
'df': df
}
def rows(self, range=None):
n = self.load()['rows']
if range is None:
return n
return n[range.asslice()]
def rowids(self, range=None):
n = self.load()['rowIds']
if range is None:
return n
return n[range.asslice()]
def aspandas(self, range=None):
n = self.load()['df']
if range is None:
return n
return n.iloc[range.asslice(no_ellipsis=True)]
@staticmethod
def parse(data, path, id=None):
pass
class CSVVector(CSVEntryMixin, AVector):
def __init__(self, desc, project):
AVector.__init__(self, desc['name'], project.id, desc['type'], desc.get('id', None))
CSVEntryMixin.__init__(self, desc, project)
desc['fqname'] = self.fqname
self.idtype = desc['idtype']
self.value = desc['value']['type']
self.range = desc['value']['range']
self.shape = desc['size']
def _process(self, data):
is_number = self.value == 'real' or self.value == 'int'
if is_number:
data = [np.NaN if x[1] == 'NA' or x[1] == '' else x[1] for x in data[1:]]
else:
data = [x[1] for x in data[1:]]
rows = np.array([x[0] for x in data[1:]])
return {'rows': rows,
'rowIds': assign_ids(rows, self.idtype),
'data': np.array(data)
}
def rows(self, range=None):
n = self.load()['rows']
if range is None:
return n
return n[range.asslice()]
def rowids(self, range=None):
n = self.load()['rowIds']
if range is None:
return n
return n[range.asslice()]
def asnumpy(self, range=None):
n = self.load()['data']
if range is None:
return n
d = n[range[0].asslice()]
if d.ndim == 0:
d = d.reshape((1,))
return d
@staticmethod
def parse(data, path, project, id=None):
pass
def to_files(plugins):
for plugin in plugins:
index = os.path.join(plugin.folder + '/data/' if not hasattr(plugin, 'inplace') else plugin.folder, 'index.json')
if not os.path.isfile(index):
continue
with open(index, 'r') as f:
desc = json.load(f)
for di in desc:
if di['type'] == 'matrix':
yield CSVMatrix(di, plugin)
elif di['type'] == 'table':
yield CSVTable(di, plugin)
elif di['type'] == 'vector':
yield CSVVector(di, plugin)
elif di['type'] == 'stratification':
yield CSVStratification(di, plugin)
class DataPlugin(object):
def __init__(self, folder):
# add a magic plugin for the static data dir
self.inplace = True # avoid adding the data suffix
self.folder = folder
self.id = os.path.basename(folder)
def save(self, f):
import werkzeug.utils
from .util import random_id
if not os.path.exists(self.folder):
os.makedirs(self.folder)
filename = os.path.basename(f.filename)
filename = werkzeug.utils.secure_filename(filename + random_id(3) + '.csv')
path = os.path.join(self.folder, filename)
f.save(path)
return path
def append(self, desc, path):
desc['path'] = os.path.basename(path)
index = os.path.join(self.folder, 'index.json')
old = []
if os.path.isfile(index):
with io.open(index, 'r', newline='', encoding=desc.get('encoding', 'utf-8')) as f:
old = json.load(f)
old.append(desc)
with io.open(index, 'w', newline='', encoding=desc.get('encoding', 'utf-8')) as f:
json.dump(old, f, indent=1)
class StaticFileProvider(ADataSetProvider):
def __init__(self, plugins):
self.files = list(to_files(plugins))
cc = config.view('phovea_server')
self.data_plugin = DataPlugin(os.path.join(cc.dataDir, 'data'))
self.files.extend(to_files([self.data_plugin]))
import glob
extras = [DataPlugin(f) for f in (os.path.dirname(f) for f in glob.glob(cc.dataDir + '/*/index.json')) if
os.path.basename(f) != 'data']
self.files.extend(to_files(extras))
def __iter__(self):
return iter((f for f in self.files if f.can_read()))
def upload(self, data, files, id=None):
if 'csv' != data.get('_provider', 'csv'):
return None # not the right provider
type = data.get('type', 'unknown')
parsers = dict(matrix=CSVMatrix.parse, table=CSVTable.parse, vector=CSVVector.parse,
stratification=CSVStratification.parse)
if type not in parsers:
return None # unknown type
f = files[list(files.keys())[0]]
path = self.data_plugin.save(f)
r = parsers[type](data, path, self.data_plugin, id)
if r:
self.data_plugin.append(r._desc, path)
self.files.append(r)
else:
os.remove(path) # delete file again
return r
def create():
"""
entry point of this plugin
"""
from .plugin import plugins
return StaticFileProvider(plugins())
| |
import sys
import math
import attr
from beastling.util import xml
__all__ = ['Distribution', 'Calibration']
registered_distributions = (
("Beta", "beast.math.distributions.Beta"),
("Exponential", "beast.math.distributions.Exponential"),
("InverseGamma", "beast.math.distributions.InverseGamma"),
("LogNormal", "beast.math.distributions.LogNormalDistributionModel"),
("Gamma", "beast.math.distributions.Gamma"),
("Uniform", "beast.math.distributions.Uniform"),
("LaplaceDistribution", "beast.math.distributions.LaplaceDistribution"),
("OneOnX", "beast.math.distributions.OneOnX"),
("Normal", "beast.math.distributions.Normal"),
)
DISTRIBUTIONS = {
"normal": ("Normal", ("mean", "sigma")),
"lognormal": ("LogNormal", ("M", "S")),
"uniform": ("Uniform", ("lower", "upper")),
}
ALL_DISTRIBUTIONS = set(DISTRIBUTIONS.keys()).union(['rlognormal', 'point'])
def add_prior_density_description(compound_distribution, distribution):
"""Create a distribution of the specified type inside an ET element.
Create an ET sub-element describing a Beast real-parameter
distribution inside the ET element `compound_distribution`
reflecting the properties of `distribution`.
Parameters
----------
compound_distribution: ET.Element
The xml tag to which the distribution should be added.
distribution: configuration.Calibration-like
A description of the distribution.
Must have a the offset, dist and param attributes.
Returns
-------
None
Side Effects
------------
Creates a sub-element in `compound_distribution`.
May register distributions with global map list.
"""
dist_type, ps = DISTRIBUTIONS[distribution.dist]
attribs = {
"id": "DistributionFor{:}".format(compound_distribution.attrib["id"]),
"name": "distr",
"offset": "0.0"}
if distribution.offset:
attribs["offset"] = str(distribution.offset)
for parameter, value in zip(ps, distribution.param):
attribs[parameter] = str(value)
getattr(xml, dist_type)(compound_distribution, attrib=attribs)
def parse_prior_string(cs, prior_name="?", is_point=False):
"""Parse a prior-describing string.
The basic format of such a string is [offset + ]distribution
Offset is a number, distribution can describe a probability
density function of normal, lognormal (including rlognormal, a
reparametrization where the mean is given in real space, not
in log space) or uniform type in one of the following
ways. Pseudo-densities with infinite integral are permitted.
Parameters separated using `,` are directly the parameters of the
distribution. A range separated by a `-` gives the 95% interval of
that distribution. (This behaviour may change in the future.)
>>> parse = parse_prior_string
>>> # Parameters of a normal distribution
>>> parse("0, 1")
(0.0, 'normal', (0.0, 1.0))
>>> # Parameters of some other distribution
>>> parse(" rlognormal(1, 1)")
(0.0, 'lognormal', (0.0, 1.0))
>>> # A distribution shape and its 95%-interval
>>> parse("normal (1-5)")
(0.0, 'normal', (3.0, 1.0204081632653061))
>>> parse("1 - 5")
(0.0, 'normal', (3.0, 1.0204081632653061))
>>> parse(">1200")
(0.0, 'uniform', (1200.0, 9223372036854775807))
>>> parse("< 1200")
(0.0, 'uniform', (0.0, 1200.0))
All of these strings can also be used for point distributions
>>> parse("normal (1-5)", is_point=True)
(0.0, 'normal', (3.0, 1.0204081632653061))
but in addition, point distributions support fixed values.
>>> parse("300", is_point=True)
(0.0, 'point', (300.0, 300.0))
In some cases, in particular for lognormal distributions, it may
be useful to specify an offset. This is possibly with the syntax
>>> parse("4 + lognormal(1, 1)")
(4.0, 'lognormal', (1.0, 1.0))
The offset must appear *before* the distribution, the other order
is not permitted.
>>> parse("lognormal(1, 1) + 4") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
parse("lognormal(1, 1) + 4")
File "beastling/distributions.py", line 148, in parse_prior_string
offset = float(os.strip())
ValueError: could not convert string to float: 'lognormal(1, 1)'
Note
====
For uniform distributions, "uniform(0-1) does not give the 95%
interval, but the lower and upper bounds.
While the ">1" and "<1" notations generate uniform
distributions, the bare "0-1" notation generates a normal
distribution.
Parameters
==========
cs: str
A string describing a single-value probability distribution.
name: str
The name of the prior distribution, used only for error
reporting purposes.
is_point: bool
defines whether the distribution is permitted to be a constant
(point) distribution.
Returns
=======
offset: int
type: str
A known distribution type
parameters: tuple of floats
The parameters for that distribution
"""
orig_cs = cs[:]
# Find offset
if cs.count("+") == 1:
os, dist = cs.split("+")
offset = float(os.strip())
cs = dist.strip()
else:
offset = 0.0
def fmt_err(msg, *args):
return ("Prior specification '{:}' for {:} " + msg).format(orig_cs, prior_name, *args)
# Parse distribution
if cs.count("(") == 1 and cs.count(")") == 1:
dist_type, cs = cs.split("(", 1)
dist_type = dist_type.strip().lower()
if dist_type not in ALL_DISTRIBUTIONS:
raise ValueError(fmt_err("uses an unknown distribution {:}!", dist_type))
cs = cs[0:-1]
else:
# Default to normal
dist_type = "normal"
# Parse / infer params
if cs.count(",") == 1 and not any([x in cs for x in ("<", ">")]):
# We've got explicit params
p1, p2 = map(float, cs.split(","))
elif cs.count("-") == 1 and not any([x in cs for x in (",", "<", ">")]):
# We've got a 95% HPD range
lower, upper = map(float, cs.split("-"))
if upper <= lower:
raise ValueError(fmt_err(
"has an upper bound {:} which is not higher than its lower bound {:}!",
upper, lower))
mid = (lower + upper) / 2.0
if dist_type == "normal":
p1 = (upper + lower) / 2.0
p2 = (upper - mid) / 1.96
elif dist_type == "lognormal":
p1 = math.log(mid)
p2a = (p1 - math.log(lower)) / 1.96
p2b = (math.log(upper) - p1) / 1.96
p2 = (p2a + p2b) / 2.0
elif dist_type == "uniform":
p1 = lower
p2 = upper
elif (cs.count("<") == 1 or cs.count(">") == 1) and not any(
[x in cs for x in (",", "-")]):
# We've got a single bound
dist_type = "uniform"
sign, bound = cs[0], cs[1:]
if sign == "<":
p1 = 0.0
p2 = float(bound.strip())
elif sign == ">":
p1 = float(bound.strip())
p2 = sys.maxsize
else:
raise ValueError(fmt_err("cannot be parsed!"))
elif is_point:
# Last chance: It's a single language pinned to a
# single date, so make sure to pin it to that date
# late and nothing else is left to do with this
# prior specification.
try:
dist_type = "point"
p1 = float(cs)
p2 = p1
except ValueError:
raise ValueError(fmt_err("cannot be parsed!"))
else:
raise ValueError(fmt_err("cannot be parsed!"))
# If this is a lognormal prior specification with the mean in
# realspace, adjust
if dist_type == "rlognormal":
p1 = math.log(p1)
dist_type = "lognormal"
# All done!
return offset, dist_type, (p1, p2)
def valid_params(instance, attribute, value):
if not isinstance(value, tuple) \
and len(value) == 2 \
and all(isinstance(v, float) for v in value):
raise ValueError('invalid params {0}'.format(value))
@attr.s
class Distribution(object):
param = attr.ib(validator=valid_params)
dist = attr.ib(
validator=attr.validators.in_(ALL_DISTRIBUTIONS),
default='normal')
offset = attr.ib(
validator=attr.validators.instance_of(float),
default=0.0)
@staticmethod
def parse_prior_string(cs, **kw):
offset, dist, param = parse_prior_string(cs, **kw)
return dict(offset=offset, dist=dist, param=param)
@classmethod
def from_string(cls, string, context=None, is_point=False):
"""Create a Distribution object from a prior description string.
"""
return cls(**cls.parse_prior_string(string, prior_name=context, is_point=is_point))
def generate_xml_element(self, parent):
add_prior_density_description(compound_distribution=parent, distribution=self)
def mean(self):
if self.dist in ("normal", "point"):
return self.offset + self.param[0]
if self.dist == "lognormal":
return self.offset + math.exp(self.param[0])
if self.dist == "uniform":
return self.offset + sum(self.param) / 2.0
raise NotImplementedError
@attr.s
class Calibration(Distribution):
langs = attr.ib(default=attr.Factory(list), validator=attr.validators.instance_of((list, set)))
originate = attr.ib(default=False, validator=attr.validators.instance_of(bool))
@classmethod
def from_string(cls, string, context=None, is_point=False, **kw):
kw.update(cls.parse_prior_string(string, prior_name=context, is_point=is_point))
return cls(**kw)
| |
# Copyright 2011 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for `nova.wsgi`."""
import os.path
import tempfile
import testtools
import eventlet
import requests
import nova.exception
from nova import test
from nova.tests import utils
import nova.wsgi
import urllib2
import webob
SSL_CERT_DIR = os.path.normpath(os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'ssl_cert'))
class TestLoaderNothingExists(test.NoDBTestCase):
"""Loader tests where os.path.exists always returns False."""
def setUp(self):
super(TestLoaderNothingExists, self).setUp()
self.stubs.Set(os.path, 'exists', lambda _: False)
def test_relpath_config_not_found(self):
self.flags(api_paste_config='api-paste.ini')
self.assertRaises(
nova.exception.ConfigNotFound,
nova.wsgi.Loader,
)
def test_asbpath_config_not_found(self):
self.flags(api_paste_config='/etc/nova/api-paste.ini')
self.assertRaises(
nova.exception.ConfigNotFound,
nova.wsgi.Loader,
)
class TestLoaderNormalFilesystem(test.NoDBTestCase):
"""Loader tests with normal filesystem (unmodified os.path module)."""
_paste_config = """
[app:test_app]
use = egg:Paste#static
document_root = /tmp
"""
def setUp(self):
super(TestLoaderNormalFilesystem, self).setUp()
self.config = tempfile.NamedTemporaryFile(mode="w+t")
self.config.write(self._paste_config.lstrip())
self.config.seek(0)
self.config.flush()
self.loader = nova.wsgi.Loader(self.config.name)
def test_config_found(self):
self.assertEqual(self.config.name, self.loader.config_path)
def test_app_not_found(self):
self.assertRaises(
nova.exception.PasteAppNotFound,
self.loader.load_app,
"nonexistent app",
)
def test_app_found(self):
url_parser = self.loader.load_app("test_app")
self.assertEqual("/tmp", url_parser.directory)
def tearDown(self):
self.config.close()
super(TestLoaderNormalFilesystem, self).tearDown()
class TestWSGIServer(test.NoDBTestCase):
"""WSGI server tests."""
def test_no_app(self):
server = nova.wsgi.Server("test_app", None)
self.assertEqual("test_app", server.name)
def test_start_random_port(self):
server = nova.wsgi.Server("test_random_port", None,
host="127.0.0.1", port=0)
server.start()
self.assertNotEqual(0, server.port)
server.stop()
server.wait()
@testtools.skipIf(not utils.is_ipv6_supported(), "no ipv6 support")
def test_start_random_port_with_ipv6(self):
server = nova.wsgi.Server("test_random_port", None,
host="::1", port=0)
server.start()
self.assertEqual("::1", server.host)
self.assertNotEqual(0, server.port)
server.stop()
server.wait()
def test_uri_length_limit(self):
server = nova.wsgi.Server("test_uri_length_limit", None,
host="127.0.0.1", max_url_len=16384)
server.start()
uri = "http://127.0.0.1:%d/%s" % (server.port, 10000 * 'x')
resp = requests.get(uri)
eventlet.sleep(0)
self.assertNotEqual(resp.status_code,
requests.codes.REQUEST_URI_TOO_LARGE)
uri = "http://127.0.0.1:%d/%s" % (server.port, 20000 * 'x')
resp = requests.get(uri)
eventlet.sleep(0)
self.assertEqual(resp.status_code,
requests.codes.REQUEST_URI_TOO_LARGE)
server.stop()
server.wait()
class TestWSGIServerWithSSL(test.NoDBTestCase):
"""WSGI server with SSL tests."""
def setUp(self):
super(TestWSGIServerWithSSL, self).setUp()
self.flags(enabled_ssl_apis=['fake_ssl'],
ssl_cert_file=os.path.join(SSL_CERT_DIR, 'certificate.crt'),
ssl_key_file=os.path.join(SSL_CERT_DIR, 'privatekey.key'))
def test_ssl_server(self):
def test_app(env, start_response):
start_response('200 OK', {})
return ['PONG']
fake_ssl_server = nova.wsgi.Server("fake_ssl", test_app,
host="127.0.0.1", port=0,
use_ssl=True)
fake_ssl_server.start()
self.assertNotEqual(0, fake_ssl_server.port)
cli = eventlet.connect(("localhost", fake_ssl_server.port))
cli = eventlet.wrap_ssl(cli,
ca_certs=os.path.join(SSL_CERT_DIR, 'ca.crt'))
cli.write('POST / HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nContent-length:4\r\n\r\nPING')
response = cli.read(8192)
self.assertEqual(response[-4:], "PONG")
fake_ssl_server.stop()
fake_ssl_server.wait()
def test_two_servers(self):
def test_app(env, start_response):
start_response('200 OK', {})
return ['PONG']
fake_ssl_server = nova.wsgi.Server("fake_ssl", test_app,
host="127.0.0.1", port=0, use_ssl=True)
fake_ssl_server.start()
self.assertNotEqual(0, fake_ssl_server.port)
fake_server = nova.wsgi.Server("fake", test_app,
host="127.0.0.1", port=0)
fake_server.start()
self.assertNotEqual(0, fake_server.port)
cli = eventlet.connect(("localhost", fake_ssl_server.port))
cli = eventlet.wrap_ssl(cli,
ca_certs=os.path.join(SSL_CERT_DIR, 'ca.crt'))
cli.write('POST / HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nContent-length:4\r\n\r\nPING')
response = cli.read(8192)
self.assertEqual(response[-4:], "PONG")
cli = eventlet.connect(("localhost", fake_server.port))
cli.sendall('POST / HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nContent-length:4\r\n\r\nPING')
response = cli.recv(8192)
self.assertEqual(response[-4:], "PONG")
fake_ssl_server.stop()
fake_ssl_server.wait()
@testtools.skipIf(not utils.is_ipv6_supported(), "no ipv6 support")
def test_app_using_ipv6_and_ssl(self):
greetings = 'Hello, World!!!'
@webob.dec.wsgify
def hello_world(req):
return greetings
server = nova.wsgi.Server("fake_ssl",
hello_world,
host="::1",
port=0,
use_ssl=True)
server.start()
response = urllib2.urlopen('https://[::1]:%d/' % server.port)
self.assertEqual(greetings, response.read())
server.stop()
server.wait()
| |
import logging
import json
from datetime import (datetime, timedelta)
import pytz
import requests
from oauthlib.oauth1.rfc5849 import SIGNATURE_HMAC, SIGNATURE_TYPE_AUTH_HEADER
from oauthlib.common import generate_token
from requests_oauthlib import OAuth1 as OAuth1Manager
from six.moves.urllib.parse import parse_qsl
from oauthlib.oauth2.rfc6749.clients import base as oauthlib_base
log = logging.getLogger(__name__)
"""Bearer Types
Currently the built in API's use requests-oauthlib, (uses oauthlib under the
hood).
oauthlib's Client supports a property called: `default_token_placement`
requests-oauthlib does not currently support passing this value down to the
`oauthlib.Client`. This is bad news for Services like Instagram who only
accept the token in the URI not in the header. requests-oauthlib will always
send the access token in the header.
`chatterbox.api.Oauth2Api`, will, during it's initialization look back to the
`chatterbox.drivers.*` and pull `bearer_type` now. It will then reach into
the `OAuth2Session._client` and set the `default_token_placement` property.
Acceptable values are derived directly from oauthlib:
https://github.com/idan/oauthlib/blob/master/oauthlib/oauth2/rfc6749/clients/base.py#L23-L25
see this issue on requests-oauthlib:
https://github.com/requests/requests-oauthlib/issues/170
"""
BEARER_HEADER = oauthlib_base.AUTH_HEADER
BEARER_BODY = oauthlib_base.BODY
BEARER_URI = oauthlib_base.URI_QUERY
class OAuthError(Exception):
pass
class OAuthDenied(Exception):
pass
class OAuth(object):
verify = True
signature_method = SIGNATURE_HMAC
signature_type = SIGNATURE_TYPE_AUTH_HEADER
def __init__(self, client_id, client_secret):
log.debug("Initializing OAuth Driver")
self.client_id = client_id
self.client_secret = client_secret
self.request = None
self.alias = self.__class__.__name__.lower()
@property
def session(self):
try:
return self.request.session
except:
return {}
def get_request_token_url(self):
return self.request_token_url
def get_access_token_url(self):
return self.access_token_url
def get_authorize_url(self, redirect_url, scopes):
params = self.get_authorize_params(
redirect_url=redirect_url,
scopes=scopes,
)
req = requests.Request(url=self.authorize_url, params=params)
return req.prepare().url
def callback(self, data, redirect_uri):
"""
Receives the full callback from the service and returns a 2-tuple
containing the user token and user secret (if applicable).
"""
raise NotImplementedError("callback() must be defined in a subclass")
class OAuth1(OAuth):
returns_token = True
def parse_token(self, content):
content = dict(parse_qsl(content))
return {
'access_token': content['oauth_token'],
'secret': content['oauth_token_secret'],
}
def get_request_token_params(self, redirect_url, scopes):
return {}
def get_request_token_response(self, redirect_url, scopes):
auth = OAuth1Manager(client_key=self.client_id,
client_secret=self.client_secret,
callback_uri=redirect_url,
signature_method=self.signature_method,
signature_type=self.signature_type)
return requests.post(self.get_request_token_url(), auth=auth,
params=self.get_request_token_params(redirect_url, scopes),
verify=self.verify)
def get_authorize_params(self, redirect_url, scopes):
resp = self.get_request_token_response(redirect_url, scopes)
try:
data = self.parse_token(resp.content)
except Exception:
raise OAuthError('Unable to parse access token')
self.session["%s_temp_secret" % self.alias] = data['secret']
if not self.returns_token:
redirect_url += ('?oauth_token=%s' % data['access_token'])
return {
'oauth_token': data['access_token'],
'oauth_callback': redirect_url,
}
def get_access_token_response(self, token, secret, verifier=None):
auth = OAuth1Manager(client_key=self.client_id,
client_secret=self.client_secret,
resource_owner_key=token,
resource_owner_secret=secret,
verifier=verifier,
signature_method=self.signature_method,
signature_type=self.signature_type)
return requests.post(self.get_access_token_url(), auth=auth,
verify=self.verify)
def callback(self, data, redirect_uri):
token = data['oauth_token']
verifier = data.get('oauth_verifier', None)
secret = self.session.pop('%s_temp_secret' % self.alias, None)
resp = self.get_access_token_response(token, secret, verifier)
try:
return self.parse_token(resp.content)
except Exception:
raise OAuthError('Unable to parse access token')
def api(self, key, domain, path, method='GET', params=None, data=None,
headers=None):
protocol = self.https and 'https' or 'http'
url = '%s://%s%s' % (protocol, domain, path)
auth = OAuth1Manager(client_key=self.client_id,
client_secret=self.client_secret,
resource_owner_key=key.access_token,
resource_owner_secret=key.secret,
signature_method=self.signature_method,
signature_type=self.signature_type)
return requests.request(method, url, auth=auth, params=params or {},
data=data or {}, headers=headers or {},
verify=self.verify, stream=True)
class OAuth2(OAuth):
auth = None
supports_state = True
token_type = "Bearer"
bearer_type = BEARER_HEADER
def parse_token(self, content):
data = json.loads(content)
expires_in = data.get('expires_in', None)
if expires_in:
now = datetime.now(pytz.utc)
data["expires_at"] = now + timedelta(seconds=expires_in)
return data
def get_scope_string(self, scopes):
out = " ".join(scopes)
return out
def get_authorize_params(self, redirect_url, scopes):
state = generate_token()
self.session['chatterbox_%s_state' % self.alias] = state
if not self.supports_state:
redirect_url += ('?state=%s' % state)
params = {
'client_id': self.client_id,
'response_type': 'code',
'redirect_uri': redirect_url,
'state': state,
}
if any(scopes):
params['scope'] = self.get_scope_string(scopes)
return params
def get_access_token_response(self, redirect_url, data):
return requests.post(self.get_access_token_url(), {
'client_id': self.client_id,
'client_secret': self.client_secret,
'grant_type': 'authorization_code',
'code': data.get('code', None),
'redirect_uri': redirect_url
}, verify=self.verify, auth=self.auth)
def callback(self, data, redirect_url):
state = self.session.pop('chatterbox_%s_state' % self.alias, None)
# state = flask.session['%s_state' % self.alias]
if 'state' in data and state != data.get('state', None):
# potential CSRF
raise OAuthDenied("invalid state")
if not self.supports_state:
redirect_url += ('?state=%s' % state)
resp = self.get_access_token_response(redirect_url, data)
return self.parse_token(resp.content)
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for numpy_io."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.estimator.inputs import numpy_io
from tensorflow.python.framework import errors
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
class NumpyIoTest(test.TestCase):
def testNumpyInputFn(self):
a = np.arange(4) * 1.0
b = np.arange(32, 36)
x = {'a': a, 'b': b}
y = np.arange(-32, -28)
with self.test_session() as session:
input_fn = numpy_io.numpy_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
features, target = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
res = session.run([features, target])
self.assertAllEqual(res[0]['a'], [0, 1])
self.assertAllEqual(res[0]['b'], [32, 33])
self.assertAllEqual(res[1], [-32, -31])
session.run([features, target])
with self.assertRaises(errors.OutOfRangeError):
session.run([features, target])
coord.request_stop()
coord.join(threads)
def testNumpyInputFnWithVeryLargeBatchSizeAndMultipleEpochs(self):
a = np.arange(2) * 1.0
b = np.arange(32, 34)
x = {'a': a, 'b': b}
y = np.arange(-32, -30)
with self.test_session() as session:
input_fn = numpy_io.numpy_input_fn(
x, y, batch_size=128, shuffle=False, num_epochs=2)
features, target = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
res = session.run([features, target])
self.assertAllEqual(res[0]['a'], [0, 1, 0, 1])
self.assertAllEqual(res[0]['b'], [32, 33, 32, 33])
self.assertAllEqual(res[1], [-32, -31, -32, -31])
with self.assertRaises(errors.OutOfRangeError):
session.run([features, target])
coord.request_stop()
coord.join(threads)
def testNumpyInputFnWithZeroEpochs(self):
a = np.arange(4) * 1.0
b = np.arange(32, 36)
x = {'a': a, 'b': b}
y = np.arange(-32, -28)
with self.test_session() as session:
input_fn = numpy_io.numpy_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=0)
features, target = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
with self.assertRaises(errors.OutOfRangeError):
session.run([features, target])
coord.request_stop()
coord.join(threads)
def testNumpyInputFnWithBatchSizeNotDividedByDataSize(self):
batch_size = 2
a = np.arange(5) * 1.0
b = np.arange(32, 37)
x = {'a': a, 'b': b}
y = np.arange(-32, -27)
with self.test_session() as session:
input_fn = numpy_io.numpy_input_fn(
x, y, batch_size=batch_size, shuffle=False, num_epochs=1)
features, target = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
res = session.run([features, target])
self.assertAllEqual(res[0]['a'], [0, 1])
self.assertAllEqual(res[0]['b'], [32, 33])
self.assertAllEqual(res[1], [-32, -31])
res = session.run([features, target])
self.assertAllEqual(res[0]['a'], [2, 3])
self.assertAllEqual(res[0]['b'], [34, 35])
self.assertAllEqual(res[1], [-30, -29])
res = session.run([features, target])
self.assertAllEqual(res[0]['a'], [4])
self.assertAllEqual(res[0]['b'], [36])
self.assertAllEqual(res[1], [-28])
with self.assertRaises(errors.OutOfRangeError):
session.run([features, target])
coord.request_stop()
coord.join(threads)
def testNumpyInputFnWithBatchSizeNotDividedByDataSizeAndMultipleEpochs(self):
batch_size = 2
a = np.arange(3) * 1.0
b = np.arange(32, 35)
x = {'a': a, 'b': b}
y = np.arange(-32, -29)
with self.test_session() as session:
input_fn = numpy_io.numpy_input_fn(
x, y, batch_size=batch_size, shuffle=False, num_epochs=3)
features, target = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
res = session.run([features, target])
self.assertAllEqual(res[0]['a'], [0, 1])
self.assertAllEqual(res[0]['b'], [32, 33])
self.assertAllEqual(res[1], [-32, -31])
res = session.run([features, target])
self.assertAllEqual(res[0]['a'], [2, 0])
self.assertAllEqual(res[0]['b'], [34, 32])
self.assertAllEqual(res[1], [-30, -32])
res = session.run([features, target])
self.assertAllEqual(res[0]['a'], [1, 2])
self.assertAllEqual(res[0]['b'], [33, 34])
self.assertAllEqual(res[1], [-31, -30])
res = session.run([features, target])
self.assertAllEqual(res[0]['a'], [0, 1])
self.assertAllEqual(res[0]['b'], [32, 33])
self.assertAllEqual(res[1], [-32, -31])
res = session.run([features, target])
self.assertAllEqual(res[0]['a'], [2])
self.assertAllEqual(res[0]['b'], [34])
self.assertAllEqual(res[1], [-30])
with self.assertRaises(errors.OutOfRangeError):
session.run([features, target])
coord.request_stop()
coord.join(threads)
def testNumpyInputFnWithBatchSizeLargerThanDataSize(self):
batch_size = 10
a = np.arange(4) * 1.0
b = np.arange(32, 36)
x = {'a': a, 'b': b}
y = np.arange(-32, -28)
with self.test_session() as session:
input_fn = numpy_io.numpy_input_fn(
x, y, batch_size=batch_size, shuffle=False, num_epochs=1)
features, target = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
res = session.run([features, target])
self.assertAllEqual(res[0]['a'], [0, 1, 2, 3])
self.assertAllEqual(res[0]['b'], [32, 33, 34, 35])
self.assertAllEqual(res[1], [-32, -31, -30, -29])
with self.assertRaises(errors.OutOfRangeError):
session.run([features, target])
coord.request_stop()
coord.join(threads)
def testNumpyInputFnWithDifferentDimensionsOfFeatures(self):
a = np.array([[1, 2], [3, 4]])
b = np.array([5, 6])
x = {'a': a, 'b': b}
y = np.arange(-32, -30)
with self.test_session() as session:
input_fn = numpy_io.numpy_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
features, target = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
res = session.run([features, target])
self.assertAllEqual(res[0]['a'], [[1, 2], [3, 4]])
self.assertAllEqual(res[0]['b'], [5, 6])
self.assertAllEqual(res[1], [-32, -31])
coord.request_stop()
coord.join(threads)
def testNumpyInputFnWithXAsNonDict(self):
x = np.arange(32, 36)
y = np.arange(4)
with self.test_session():
with self.assertRaisesRegexp(TypeError, 'x must be dict'):
failing_input_fn = numpy_io.numpy_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
failing_input_fn()
def testNumpyInputFnWithTargetKeyAlreadyInX(self):
array = np.arange(32, 36)
x = {'__target_key__': array}
y = np.arange(4)
with self.test_session():
input_fn = numpy_io.numpy_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
input_fn()
self.assertAllEqual(x['__target_key__'], array)
self.assertAllEqual(x['__target_key___n'], y)
def testNumpyInputFnWithMismatchLengthOfInputs(self):
a = np.arange(4) * 1.0
b = np.arange(32, 36)
x = {'a': a, 'b': b}
x_mismatch_length = {'a': np.arange(1), 'b': b}
y_longer_length = np.arange(10)
with self.test_session():
with self.assertRaisesRegexp(
ValueError, 'Length of tensors in x and y is mismatched.'):
failing_input_fn = numpy_io.numpy_input_fn(
x, y_longer_length, batch_size=2, shuffle=False, num_epochs=1)
failing_input_fn()
with self.assertRaisesRegexp(
ValueError, 'Length of tensors in x and y is mismatched.'):
failing_input_fn = numpy_io.numpy_input_fn(
x=x_mismatch_length,
y=None,
batch_size=2,
shuffle=False,
num_epochs=1)
failing_input_fn()
if __name__ == '__main__':
test.main()
| |
# Copyright (c) 2001-2007 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
A win32event based implementation of the Twisted main loop.
This requires win32all or ActivePython to be installed.
Maintainer: Itamar Shtull-Trauring
LIMITATIONS:
1. WaitForMultipleObjects and thus the event loop can only handle 64 objects.
2. Process running has some problems (see Process docstring).
TODO:
1. Event loop handling of writes is *very* problematic (this is causing failed tests).
Switch to doing it the correct way, whatever that means (see below).
2. Replace icky socket loopback waker with event based waker (use dummyEvent object)
3. Switch everyone to using Free Software so we don't have to deal with proprietary APIs.
ALTERNATIVE SOLUTIONS:
- IIRC, sockets can only be registered once. So we switch to a structure
like the poll() reactor, thus allowing us to deal with write events in
a decent fashion. This should allow us to pass tests, but we're still
limited to 64 events.
Or:
- Instead of doing a reactor, we make this an addon to the select reactor.
The WFMO event loop runs in a separate thread. This means no need to maintain
separate code for networking, 64 event limit doesn't apply to sockets,
we can run processes and other win32 stuff in default event loop. The
only problem is that we're stuck with the icky socket based waker.
Another benefit is that this could be extended to support >64 events
in a simpler manner than the previous solution.
The 2nd solution is probably what will get implemented.
"""
# System imports
import time
import sys
from zope.interface import implements
# Win32 imports
from win32file import WSAEventSelect, FD_READ, FD_CLOSE, FD_ACCEPT, FD_CONNECT
from win32event import CreateEvent, MsgWaitForMultipleObjects
from win32event import WAIT_OBJECT_0, WAIT_TIMEOUT, QS_ALLINPUT, QS_ALLEVENTS
import win32gui
# Twisted imports
from twisted.internet import posixbase
from twisted.python import log, threadable, failure
from twisted.internet.interfaces import IReactorFDSet, IReactorProcess
from twisted.internet._dumbwin32proc import Process
class Win32Reactor(posixbase.PosixReactorBase):
"""
Reactor that uses Win32 event APIs.
@ivar _reads: A dictionary mapping L{FileDescriptor} instances to a
win32 event object used to check for read events for that descriptor.
@ivar _writes: A dictionary mapping L{FileDescriptor} instances to a
arbitrary value. Keys in this dictionary will be given a chance to
write out their data.
@ivar _events: A dictionary mapping win32 event object to tuples of
L{FileDescriptor} instances and event masks.
"""
implements(IReactorFDSet, IReactorProcess)
dummyEvent = CreateEvent(None, 0, 0, None)
def __init__(self):
self._reads = {}
self._writes = {}
self._events = {}
posixbase.PosixReactorBase.__init__(self)
def _makeSocketEvent(self, fd, action, why):
"""
Make a win32 event object for a socket.
"""
event = CreateEvent(None, 0, 0, None)
WSAEventSelect(fd, event, why)
self._events[event] = (fd, action)
return event
def addEvent(self, event, fd, action):
"""
Add a new win32 event to the event loop.
"""
self._events[event] = (fd, action)
def removeEvent(self, event):
"""
Remove an event.
"""
del self._events[event]
def addReader(self, reader):
"""
Add a socket FileDescriptor for notification of data available to read.
"""
if reader not in self._reads:
self._reads[reader] = self._makeSocketEvent(
reader, 'doRead', FD_READ | FD_ACCEPT | FD_CONNECT | FD_CLOSE)
def addWriter(self, writer):
"""
Add a socket FileDescriptor for notification of data available to write.
"""
if writer not in self._writes:
self._writes[writer] = 1
def removeReader(self, reader):
"""Remove a Selectable for notification of data available to read.
"""
if reader in self._reads:
del self._events[self._reads[reader]]
del self._reads[reader]
def removeWriter(self, writer):
"""Remove a Selectable for notification of data available to write.
"""
if writer in self._writes:
del self._writes[writer]
def removeAll(self):
"""
Remove all selectables, and return a list of them.
"""
return self._removeAll(self._reads, self._writes)
def getReaders(self):
return self._reads.keys()
def getWriters(self):
return self._writes.keys()
def doWaitForMultipleEvents(self, timeout):
log.msg(channel='system', event='iteration', reactor=self)
if timeout is None:
#timeout = INFINITE
timeout = 100
else:
timeout = int(timeout * 1000)
if not (self._events or self._writes):
# sleep so we don't suck up CPU time
time.sleep(timeout / 1000.0)
return
canDoMoreWrites = 0
for fd in self._writes.keys():
if log.callWithLogger(fd, self._runWrite, fd):
canDoMoreWrites = 1
if canDoMoreWrites:
timeout = 0
handles = self._events.keys() or [self.dummyEvent]
val = MsgWaitForMultipleObjects(handles, 0, timeout, QS_ALLINPUT | QS_ALLEVENTS)
if val == WAIT_TIMEOUT:
return
elif val == WAIT_OBJECT_0 + len(handles):
exit = win32gui.PumpWaitingMessages()
if exit:
self.callLater(0, self.stop)
return
elif val >= WAIT_OBJECT_0 and val < WAIT_OBJECT_0 + len(handles):
fd, action = self._events[handles[val - WAIT_OBJECT_0]]
log.callWithLogger(fd, self._runAction, action, fd)
def _runWrite(self, fd):
closed = 0
try:
closed = fd.doWrite()
except:
closed = sys.exc_info()[1]
log.deferr()
if closed:
self.removeReader(fd)
self.removeWriter(fd)
try:
fd.connectionLost(failure.Failure(closed))
except:
log.deferr()
elif closed is None:
return 1
def _runAction(self, action, fd):
try:
closed = getattr(fd, action)()
except:
closed = sys.exc_info()[1]
log.deferr()
if closed:
self._disconnectSelectable(fd, closed, action == 'doRead')
doIteration = doWaitForMultipleEvents
def spawnProcess(self, processProtocol, executable, args=(), env={}, path=None, uid=None, gid=None, usePTY=0, childFDs=None):
"""Spawn a process."""
if uid is not None:
raise ValueError("Setting UID is unsupported on this platform.")
if gid is not None:
raise ValueError("Setting GID is unsupported on this platform.")
if usePTY:
raise ValueError("PTYs are unsupported on this platform.")
if childFDs is not None:
raise ValueError(
"Custom child file descriptor mappings are unsupported on "
"this platform.")
args, env = self._checkProcessArgs(args, env)
return Process(self, processProtocol, executable, args, env, path)
def install():
threadable.init(1)
r = Win32Reactor()
import main
main.installReactor(r)
__all__ = ["Win32Reactor", "install"]
| |
"""
Rogue-like map utilitys such as line-of-sight, field-of-view, and path-finding.
"""
import itertools as _itertools
import math as _math
from tcod import ffi as _ffi
from tcod import lib as _lib
import tdl as _tdl
from . import style as _style
_FOVTYPES = {'BASIC' : 0, 'DIAMOND': 1, 'SHADOW': 2, 'RESTRICTIVE': 12, 'PERMISSIVE': 11}
def _get_fov_type(fov):
"Return a FOV from a string"
oldFOV = fov
fov = str(fov).upper()
if fov in _FOVTYPES:
return _FOVTYPES[fov]
if fov[:10] == 'PERMISSIVE' and fov[10].isdigit() and fov[10] != '9':
return 4 + int(fov[10])
raise _tdl.TDLError('No such fov option as %s' % oldFOV)
class Map(object):
"""Fast field-of-view and path-finding on stored data.
Set map conditions with the walkable and transparency attributes, this
object can be iterated and checked for containment similar to consoles.
For example, you can set all tiles and transparent and walkable with the
following code::
map = tdl.map.Map(80, 60)
for x,y in map:
map.transparent[x,y] = true
map.walkable[x,y] = true
@ivar transparent: Map transparency, access this attribute with
map.transparency[x,y]
Set to True to allow field-of-view rays, False will
block field-of-view.
Transparent tiles only affect field-of-view.
@ivar walkable: Map accessibility, access this attribute with
map.walkable[x,y]
Set to True to allow path-finding through that tile,
False will block passage to that tile.
Walkable tiles only affect path-finding.
@ivar fov: Map tiles touched by a field-of-view computation,
access this attribute with map.fov[x,y]
Is True if a the tile is if view, otherwise False.
You can set to this attribute if you want, but you'll typically
be using it to read the field-of-view of a L{compute_fov} call.
@since: 1.5.0
"""
class _MapAttribute(object):
def __init__(self, map, bit_index):
self.map = map
self.bit_index = bit_index
self.bit = 1 << bit_index
self.bit_inverse = 0xFF ^ self.bit
def __getitem__(self, key):
return bool(self.map._array_cdata[key[1]][key[0]] & self.bit)
def __setitem__(self, key, value):
self.map._array_cdata[key[1]][key[0]] = (
(self.map._array_cdata[key[1]][key[0]] & self.bit_inverse) |
(self.bit * bool(value))
)
def __init__(self, width, height):
"""Create a new Map with width and height.
@type width: int
@type height: int
@param width: Width of the new Map instance, in tiles.
@param width: Height of the new Map instance, in tiles.
"""
self.width = width
self.height = height
self._map_cdata = _lib.TCOD_map_new(width, height)
# cast array into cdata format: uint8[y][x]
# for quick Python access
self._array_cdata = _ffi.new('uint8[%i][%i]' % (width, height))
# flat array to pass to TDL's C helpers
self._array_cdata_flat = _ffi.cast('uint8 *', self._array_cdata)
self.transparent = self._MapAttribute(self, 0)
self.walkable = self._MapAttribute(self, 1)
self.fov = self._MapAttribute(self, 2)
def __del__(self):
if self._map_cdata:
_lib.TCOD_map_delete(self._map_cdata)
self._map_cdata = None
def compute_fov(self, x, y, fov='PERMISSIVE', radius=None, light_walls=True,
sphere=True, cumulative=False):
"""Compute the field-of-view of this Map and return an iterator of the
points touched.
@type x: int
@type y: int
@param x: x center of the field-of-view
@param y: y center of the field-of-view
@type fov: string
@param fov: The type of field-of-view to be used. Available types are:
'BASIC', 'DIAMOND', 'SHADOW', 'RESTRICTIVE', 'PERMISSIVE',
'PERMISSIVE0', 'PERMISSIVE1', ..., 'PERMISSIVE8'
@type radius: int
@param radius: Raduis of the field-of-view.
@type light_walls: boolean
@param light_walls: Include or exclude wall tiles in the field-of-view.
@type sphere: boolean
@param sphere: True for a spherical field-of-view.
False for a square one.
@type cumulative: boolean
@param cumulative:
@rtype: iter((x, y), ...)
@return: An iterator of (x, y) points of tiles touched by the
field-of-view.
Unexpected behaviour can happen if you modify the Map while
using the iterator.
You can use the Map's fov attribute as an alternative to this
iterator.
"""
# refresh cdata
_lib.TDL_map_data_from_buffer(self._map_cdata,
self._array_cdata_flat)
if radius is None: # infinite radius
radius = max(self.width, self.height)
_lib.TCOD_map_compute_fov(self._map_cdata, x, y, radius, light_walls,
_get_fov_type(fov))
_lib.TDL_map_fov_to_buffer(self._map_cdata,
self._array_cdata_flat, cumulative)
def iterate_fov():
_array_cdata = self._array_cdata
for y in range(self.width):
for x in range(self.height):
if(_array_cdata[y][x] & 4):
yield (x, y)
return iterate_fov()
def compute_path(self, start_x, start_y, dest_x, dest_y,
diagonal_cost=_math.sqrt(2)):
"""Get the shortest path between two points.
The start position is not included in the list.
@type diagnalCost: float
@param diagnalCost: Multiplier for diagonal movement.
Can be set to zero to disable diagonal movement
entirely.
@rtype: [(x, y), ...]
@return: Returns a the shortest list of points to get to the destination
position from the starting position
"""
# refresh cdata
_lib.TDL_map_data_from_buffer(self._map_cdata,
self._array_cdata_flat)
path_cdata = _lib.TCOD_path_new_using_map(self._map_cdata, diagonal_cost)
try:
_lib.TCOD_path_compute(path_cdata, start_x, start_y, dest_x, dest_y)
x = _ffi.new('int *')
y = _ffi.new('int *')
length = _lib.TCOD_path_size(path_cdata)
path = [None] * length
for i in range(length):
_lib.TCOD_path_get(path_cdata, i, x, y)
path[i] = ((x[0], y[0]))
finally:
_lib.TCOD_path_delete(path_cdata)
return path
def __iter__(self):
return _itertools.product(range(self.width), range(self.height))
def __contains__(self, position):
x, y = position
return (0 <= x < self.width) and (0 <= y < self.height)
class AStar(object):
"""A* pathfinder
Using this class requires a callback detailed in L{AStar.__init__}
@undocumented: getPath
"""
__slots__ = ('_as_parameter_', '_callback', '__weakref__')
def __init__(self, width, height, callback,
diagnalCost=_math.sqrt(2), advanced=False):
"""Create an A* pathfinder using a callback.
Before crating this instance you should make one of two types of
callbacks:
- A function that returns the cost to move to (x, y)
or
- A function that returns the cost to move between
(destX, destY, sourceX, sourceY)
If path is blocked the function should return zero or None.
When using the second type of callback be sure to set advanced=True
@type width: int
@param width: width of the pathfinding area in tiles
@type height: int
@param height: height of the pathfinding area in tiles
@type callback: function
@param callback: A callback taking parameters depending on the setting
of 'advanced' and returning the cost of
movement for an open tile or zero for a
blocked tile.
@type diagnalCost: float
@param diagnalCost: Multiplier for diagonal movement.
Can be set to zero to disable diagonal movement
entirely.
@type advanced: boolean
@param advanced: A simple callback with 2 positional parameters may not
provide enough information. Setting this to True will
call the callback with 2 additional parameters giving
you both the destination and the source of movement.
When True the callback will need to accept
(destX, destY, sourceX, sourceY) as parameters.
Instead of just (destX, destY).
"""
if not diagnalCost: # set None or False to zero
diagnalCost = 0.0
if advanced:
def newCallback(sourceX, sourceY, destX, destY, null):
pathCost = callback(destX, destY, sourceX, sourceY)
if pathCost:
return pathCost
return 0.0
else:
def newCallback(sourceX, sourceY, destX, destY, null):
pathCost = callback(destX, destY) # expecting a float or 0
if pathCost:
return pathCost
return 0.0
# float(int, int, int, int, void*)
self._callback = _ffi.callback('TCOD_path_func_t')(newCallback)
self._as_parameter_ = _lib.TCOD_path_new_using_function(width, height,
self._callback, _ffi.NULL, diagnalCost)
def __del__(self):
if self._as_parameter_:
_lib.TCOD_path_delete(self._as_parameter_)
self._as_parameter_ = None
def get_path(self, origX, origY, destX, destY):
"""
Get the shortest path from origXY to destXY.
@rtype: [(x, y), ...]
@return: Returns a list walking the path from origXY to destXY.
This excludes the starting point and includes the destination.
If no path is found then an empty list is returned.
"""
found = _lib.TCOD_path_compute(self._as_parameter_, origX, origY, destX, destY)
if not found:
return [] # path not found
x, y = _ffi.new('int *'), _ffi.new('int *')
recalculate = True
path = []
while _lib.TCOD_path_walk(self._as_parameter_, x, y, recalculate):
path.append((x[0], y[0]))
return path
def quick_fov(x, y, callback, fov='PERMISSIVE', radius=7.5, lightWalls=True, sphere=True):
"""All field-of-view functionality in one call.
Before using this call be sure to make a function, lambda, or method that takes 2
positional parameters and returns True if light can pass through the tile or False
for light-blocking tiles and for indexes that are out of bounds of the
dungeon.
This function is 'quick' as in no hassle but can quickly become a very slow
function call if a large radius is used or the callback provided itself
isn't optimized.
Always check if the index is in bounds both in the callback and in the
returned values. These values can go into the negatives as well.
@type x: int
@param x: x center of the field-of-view
@type y: int
@param y: y center of the field-of-view
@type callback: function
@param callback: This should be a function that takes two positional arguments x,y
and returns True if the tile at that position is transparent
or False if the tile blocks light or is out of bounds.
@type fov: string
@param fov: The type of field-of-view to be used. Available types are:
'BASIC', 'DIAMOND', 'SHADOW', 'RESTRICTIVE', 'PERMISSIVE',
'PERMISSIVE0', 'PERMISSIVE1', ..., 'PERMISSIVE8'
@type radius: float
@param radius: Raduis of the field-of-view.
When sphere is True a floating point can be used to fine-tune
the range. Otherwise the radius is just rounded up.
Be careful as a large radius has an exponential affect on
how long this function takes.
@type lightWalls: boolean
@param lightWalls: Include or exclude wall tiles in the field-of-view.
@type sphere: boolean
@param sphere: True for a spherical field-of-view. False for a square one.
@rtype: set((x, y), ...)
@return: Returns a set of (x, y) points that are within the field-of-view.
"""
trueRadius = radius
radius = int(_math.ceil(radius))
mapSize = radius * 2 + 1
fov = _get_fov_type(fov)
setProp = _lib.TCOD_map_set_properties # make local
inFOV = _lib.TCOD_map_is_in_fov
tcodMap = _lib.TCOD_map_new(mapSize, mapSize)
try:
# pass no.1, write callback data to the tcodMap
for x_, y_ in _itertools.product(range(mapSize), range(mapSize)):
pos = (x_ + x - radius,
y_ + y - radius)
transparent = bool(callback(*pos))
setProp(tcodMap, x_, y_, transparent, False)
# pass no.2, compute fov and build a list of points
_lib.TCOD_map_compute_fov(tcodMap, radius, radius, radius, lightWalls, fov)
touched = set() # points touched by field of view
for x_, y_ in _itertools.product(range(mapSize), range(mapSize)):
if sphere and _math.hypot(x_ - radius, y_ - radius) > trueRadius:
continue
if inFOV(tcodMap, x_, y_):
touched.add((x_ + x - radius, y_ + y - radius))
finally:
_lib.TCOD_map_delete(tcodMap)
return touched
def bresenham(x1, y1, x2, y2):
"""
Return a list of points in a bresenham line.
Implementation hastily copied from RogueBasin.
@return: Returns a list of (x, y) points, including both the start and
endpoints.
"""
points = []
issteep = abs(y2-y1) > abs(x2-x1)
if issteep:
x1, y1 = y1, x1
x2, y2 = y2, x2
rev = False
if x1 > x2:
x1, x2 = x2, x1
y1, y2 = y2, y1
rev = True
deltax = x2 - x1
deltay = abs(y2-y1)
error = int(deltax / 2)
y = y1
ystep = None
if y1 < y2:
ystep = 1
else:
ystep = -1
for x in range(x1, x2 + 1):
if issteep:
points.append((y, x))
else:
points.append((x, y))
error -= deltay
if error < 0:
y += ystep
error += deltax
# Reverse the list if the coordinates were reversed
if rev:
points.reverse()
return points
__all__ = [_var for _var in locals().keys() if _var[0] != '_']
quickFOV = _style.backport(quick_fov)
AStar.getPath = _style.backport(AStar.get_path)
| |
import matplotlib
import paths
matplotlib.rc_file(paths.pcpath('pubfiguresrc'))
import os
import pylab as pl
from astropy import table
from paths import analysispath
import numpy as np
from astropy import coordinates
from astropy import units as u
import heating
pcfittable = table.Table.read(os.path.join(analysispath,
'fitted_line_parameters_Chi2Constraints.ipac'),
format='ascii.ipac')
lolim = pcfittable['tmax1sig_chi2'] > 340
maps = np.char.startswith(pcfittable['Source_Name'], 'Map')
ok = ~np.isnan(pcfittable['tmin1sig_chi2']) & (pcfittable['width'] < 40) & (pcfittable['h2coratio321303']/pcfittable['eh2coratio321303'] > 5) & pcfittable['is_good'].astype('bool')
flags = {'is_map': maps,
'is_lolim': lolim,
'is_ok': ok}
# Don't plot these for now...
pcfittable = pcfittable[(~lolim) & ok]
maps = np.char.startswith(pcfittable['Source_Name'], 'Map')
lolim_conservative = pcfittable['tmax1sig_chi2'] > 150
fig4 = pl.figure(4)
fig4.clf()
ax = fig4.add_subplot(1,3,1)
ax.errorbar(pcfittable['temperature_chi2'], pcfittable['density_chi2'],
yerr=[pcfittable['density_chi2']-pcfittable['dmin1sig_chi2'],
pcfittable['dmax1sig_chi2']-pcfittable['density_chi2']],
xerr=[pcfittable['temperature_chi2']-pcfittable['tmin1sig_chi2'],
pcfittable['tmax1sig_chi2']-pcfittable['temperature_chi2']],
linestyle='none', marker='s', linewidth=1, alpha=0.5)
ax2 = fig4.add_subplot(1,3,2)
# Don't do this any more: it relies on having the RADEX fits, which we don't.
#ax2.errorbar(pcfittable['temperature_chi2'], pcfittable['temperature'],
# yerr=pcfittable['etemperature'],
# xerr=[pcfittable['temperature_chi2']-pcfittable['tmin1sig_chi2'],
# pcfittable['tmax1sig_chi2']-pcfittable['temperature_chi2']],
# linestyle='none', marker='s', linewidth=1, alpha=0.5)
ax2.plot([0,300],[0,300],'k--',linewidth=2,alpha=0.5)
fig5 = pl.figure(5)
fig5.clf()
ax5 = fig5.gca()
ax5.errorbar(coordinates.Angle(pcfittable['GLON']*u.deg).wrap_at(180*u.deg).value[maps],
pcfittable['temperature_chi2'][maps],
yerr=[(pcfittable['temperature_chi2']-pcfittable['tmin1sig_chi2'])[maps],
(pcfittable['tmax1sig_chi2']-pcfittable['temperature_chi2'])[maps]],
capsize=0, markeredgecolor='none',
linestyle='none', marker='s', linewidth=1, alpha=0.5, color='r')
ax5.set_ylim(0,150)
ax5.set_ylabel("Temperature (K)")
ax5.set_xlabel("Galactic Longitude ($^{\\circ}$)")
fig5.savefig(paths.fpath('chi2_temperature_vs_glon_byfield.pdf'),
bbox_inches='tight')
ax5.errorbar(coordinates.Angle(pcfittable['GLON']*u.deg).wrap_at(180*u.deg).value[~maps],
pcfittable['temperature_chi2'][~maps],
yerr=[(pcfittable['temperature_chi2']-pcfittable['tmin1sig_chi2'])[~maps],
(pcfittable['tmax1sig_chi2']-pcfittable['temperature_chi2'])[~maps]],
capsize=0, markeredgecolor='none',
linestyle='none', marker='s', linewidth=1, alpha=0.5)
fig5.savefig(paths.fpath('chi2_temperature_vs_glon_fieldsandsources.pdf'),
bbox_inches='tight')
fig6 = pl.figure(6)
fig6.clf()
ax6 = fig6.gca()
mask = maps&~lolim_conservative
ax6.errorbar(pcfittable['higaldusttem'][mask],
pcfittable['temperature_chi2'][mask],
yerr=[(pcfittable['temperature_chi2']-pcfittable['tmin1sig_chi2'])[mask],
(pcfittable['tmax1sig_chi2']-pcfittable['temperature_chi2'])[mask]],
linestyle='none', marker='s', linewidth=1, alpha=0.5, color='r', capsize=0)
ax6.plot([15,30],[15,30],'k--')
mask = maps&lolim_conservative
ax6.plot(pcfittable['higaldusttem'][mask],
pcfittable['tmin1sig_chi2'][mask],
marker='^',
markersize=10,
markeredgecolor='none',
color='r',
alpha=0.5,
linestyle='none')
ax6.set_xlabel("HiGal Dust Temperature (K)")
ax6.set_ylabel("H$_2$CO Temperature (K)")
ax6.set_ylim(0,200)
ax6.set_xlim(15,30)
fig6.savefig(paths.fpath('chi2_temperature_vs_higaltemperature_byfield.pdf'),
bbox_inches='tight')
mask = (~maps)&(~lolim_conservative)
ax6.errorbar(pcfittable['higaldusttem'][mask],
pcfittable['temperature_chi2'][mask],
yerr=[(pcfittable['temperature_chi2']-pcfittable['tmin1sig_chi2'])[mask],
(pcfittable['tmax1sig_chi2']-pcfittable['temperature_chi2'])[mask]],
capsize=0,
markeredgecolor='none',
markersize=10,
linestyle='none', marker='s', linewidth=0.5, alpha=0.5, color='b')
mask = (~maps)&lolim_conservative
ax6.plot(pcfittable['higaldusttem'][mask],
pcfittable['tmin1sig_chi2'][mask],
marker='^',
markersize=10,
markeredgecolor='none',
color='b',
alpha=0.5,
linestyle='none')
ax6.set_ylim(10,150)
ax6.set_xlim(15,30)
fig6.savefig(paths.fpath('chi2_temperature_vs_higaltemperature_fieldsandsources_notitle.pdf'),
bbox_inches='tight')
ax6.set_title("Hand-selected regions")
fig6.savefig(paths.fpath('chi2_temperature_vs_higaltemperature_fieldsandsources.pdf'),
bbox_inches='tight')
fig7 = pl.figure(7)
fig7.clf()
ax7 = fig7.gca()
mask = maps&~lolim_conservative
ax7.errorbar(pcfittable['width'][mask]*(8*np.log(2))**0.5,
pcfittable['temperature_chi2'][mask],
yerr=[(pcfittable['temperature_chi2']-pcfittable['tmin1sig_chi2'])[mask],
(pcfittable['tmax1sig_chi2']-pcfittable['temperature_chi2'])[mask]],
capsize=0,
markersize=10,
markeredgecolor='none',
linestyle='none', marker='s', linewidth=0.5, alpha=0.6, color='r')
mask = maps&lolim_conservative
ax7.plot(pcfittable['width'][mask]*(8*np.log(2))**0.5,
pcfittable['tmin1sig_chi2'][mask],
marker='^',
markersize=10,
markeredgecolor='none',
color='r',
alpha=0.4,
linestyle='none')
linewidths = np.linspace(0,pcfittable['width'].max())*u.km/u.s
ax7.plot(linewidths*2.35, [heating.tkin_all(10**4*u.cm**-3, sigma, 10*u.pc,
5*u.km/u.s/u.pc, 30*u.K)
for sigma in linewidths],
linestyle='--', color='k', label='$n=10^4$ cm$^{-3}$', zorder=-5)
ax7.plot(linewidths*2.35, [heating.tkin_all(10**4*u.cm**-3, sigma, 10*u.pc,
1*u.km/u.s/u.pc, 30*u.K)
for sigma in linewidths],
linestyle='--', color='r', label='$n=10^4$ cm$^{-3}$, $dv/dr=1$', zorder=-5, linewidth=2, alpha=0.5)
ax7.plot(linewidths*2.35, [heating.tkin_all(10**4*u.cm**-3, sigma, 20*u.pc,
5*u.km/u.s/u.pc, 30*u.K)
for sigma in linewidths],
linestyle='--', color='b', label='$n=10^4$ cm$^{-3}$, $L=20$ pc', zorder=-5, alpha=0.5, linewidth=2)
ax7.plot(linewidths*2.35, [heating.tkin_all(10**5*u.cm**-3, sigma, 10*u.pc,
5*u.km/u.s/u.pc, 30*u.K)
for sigma in linewidths],
linestyle=':', color='k', label='$n=10^5$ cm$^{-3}$', zorder=-5)
ax7.plot(linewidths*2.35, [heating.tkin_all(10**6*u.cm**-3, sigma, 10*u.pc,
5*u.km/u.s/u.pc, 30*u.K)
for sigma in linewidths],
linestyle='-.', color='k', label='$n=10^6$ cm$^{-3}$', zorder=-5)
ax7.plot(linewidths*2.35, [heating.tkin_all(10**5*u.cm**-3, sigma, 10*u.pc,
5*u.km/u.s/u.pc, 30*u.K, crir=1e-15*u.s**-1)
for sigma in linewidths],
linestyle='-', color='g', label='$n=10^5$ cm$^{-3}$, $\zeta_{CR}=10^{-15}$ s$^{-1}$', zorder=-10, alpha=0.25, linewidth=4)
ax7.plot(linewidths*2.35, [heating.tkin_all(10**5*u.cm**-3, sigma, 10*u.pc,
5*u.km/u.s/u.pc, 30*u.K, crir=1e-14*u.s**-1)
for sigma in linewidths],
linestyle=':', color='purple', label='$n=10^5$ cm$^{-3}$, $\zeta_{CR}=10^{-14}$ s$^{-1}$', zorder=-10, alpha=0.25, linewidth=4)
box = ax7.get_position()
ax7.set_position([box.x0, box.y0, box.width * 0.7, box.height])
ax7.legend(loc='center left', fontsize=16, bbox_to_anchor=(1.0, 0.75))
ax7.set_xlabel("Line FWHM (km s$^{-1}$)")
ax7.set_ylabel("Temperature (K)")
ax7.set_ylim(10,150)
fig7.savefig(paths.fpath('chi2_temperature_vs_linewidth_byfield.pdf'),
bbox_inches='tight')
mask = (~maps)&(~lolim_conservative)
ax7.errorbar(pcfittable['width'][mask]*(8*np.log(2))**0.5,
pcfittable['temperature_chi2'][mask],
yerr=[(pcfittable['temperature_chi2']-pcfittable['tmin1sig_chi2'])[mask],
(pcfittable['tmax1sig_chi2']-pcfittable['temperature_chi2'])[mask]],
capsize=0,
markeredgecolor='none',
markersize=10,
linestyle='none', marker='s', linewidth=0.5, alpha=0.6, color='b')
mask = (~maps)&lolim_conservative
ax7.plot(pcfittable['width'][mask]*(8*np.log(2))**0.5,
pcfittable['tmin1sig_chi2'][mask],
marker='^',
markersize=10,
markeredgecolor='none',
color='b',
alpha=0.4,
linestyle='none')
ax7.set_ylim(10,150)
fig7.savefig(paths.fpath('chi2_temperature_vs_linewidth_fieldsandsources.pdf'),
bbox_inches='tight')
fig8 = pl.figure(8)
fig8.clf()
ax8 = fig8.gca()
ax8.errorbar(pcfittable['ampH2CO'][maps],
pcfittable['temperature_chi2'][maps],
yerr=[(pcfittable['temperature_chi2']-pcfittable['tmin1sig_chi2'])[maps],
(pcfittable['tmax1sig_chi2']-pcfittable['temperature_chi2'])[maps]],
linestyle='none', marker='s', linewidth=1, alpha=0.5, color='r')
ax8.set_xlabel("H2CO Peak Amplitude")
ax8.set_ylabel("Temperature (K)")
fig8.savefig(paths.fpath('chi2_temperature_vs_h2coamp_byfield.pdf'),
bbox_inches='tight')
ax8.errorbar(pcfittable['ampH2CO'][~maps],
pcfittable['temperature_chi2'][~maps],
yerr=[(pcfittable['temperature_chi2']-pcfittable['tmin1sig_chi2'])[~maps],
(pcfittable['tmax1sig_chi2']-pcfittable['temperature_chi2'])[~maps]],
linestyle='none', marker='s', linewidth=1, alpha=0.5, color='b')
fig8.savefig(paths.fpath('chi2_temperature_vs_h2coamp_fieldsandsources.pdf'),
bbox_inches='tight')
fig9 = pl.figure(9)
fig9.clf()
ax9 = fig9.gca()
ax9.set_xscale('log')
ax9.errorbar(pcfittable['higalcolumndens'][maps],
pcfittable['temperature_chi2'][maps],
yerr=[(pcfittable['temperature_chi2']-pcfittable['tmin1sig_chi2'])[maps],
(pcfittable['tmax1sig_chi2']-pcfittable['temperature_chi2'])[maps]],
linestyle='none', marker='s', linewidth=1, alpha=0.5, color='r')
ax9.set_xlabel("Hi-Gal Fitted Column Density")
ax9.set_ylabel("Temperature (K)")
fig9.savefig(paths.fpath('chi2_temperature_vs_higalcolumn_byfield.pdf'),
bbox_inches='tight')
ax9.errorbar(pcfittable['higalcolumndens'][~maps],
pcfittable['temperature_chi2'][~maps],
yerr=[(pcfittable['temperature_chi2']-pcfittable['tmin1sig_chi2'])[~maps],
(pcfittable['tmax1sig_chi2']-pcfittable['temperature_chi2'])[~maps]],
linestyle='none', marker='s', linewidth=1, alpha=0.5, color='b')
fig9.savefig(paths.fpath('chi2_temperature_vs_higalcolumn_fieldsandsources.pdf'),
bbox_inches='tight')
fig10 = pl.figure(10)
fig10.clf()
ax10 = fig10.gca()
ax10.errorbar(pcfittable['width'][maps]*(8*np.log(2))**0.5,
pcfittable['h2coratio321303'][maps],
yerr=pcfittable['eh2coratio321303'][maps],
linestyle='none', marker='s', linewidth=1, alpha=0.5, color='r')
ax10.set_xlabel("Line FWHM (km s$^{-1}$)")
ax10.set_ylabel("Ratio 321/303")
fig10.savefig(paths.fpath('ratio_vs_linewidth_byfield.pdf'),
bbox_inches='tight')
ax10.errorbar(pcfittable['width'][~maps]*(8*np.log(2))**0.5,
pcfittable['h2coratio321303'][~maps],
yerr=pcfittable['eh2coratio321303'][~maps],
linestyle='none', marker='s', linewidth=1, alpha=0.5, color='b')
fig10.savefig(paths.fpath('ratio_vs_linewidth_fieldsandsources.pdf'),
bbox_inches='tight')
fig11 = pl.figure(11)
fig11.clf()
ax11 = fig11.gca()
ax11.errorbar(pcfittable['higaldusttem'][maps],
pcfittable['h2coratio321303'][maps],
yerr=pcfittable['eh2coratio321303'][maps],
linestyle='none', marker='s', linewidth=1, alpha=0.5, color='r')
ax11.set_ylim(0,200)
ax11.set_xlim(15,30)
ax11.set_xlabel("HiGal Fitted Temperature")
ax11.set_ylabel("Ratio 321/303")
fig11.savefig(paths.fpath('ratio_vs_higaltemperature_byfield.pdf'),
bbox_inches='tight')
ax11.errorbar(pcfittable['higaldusttem'][~maps],
pcfittable['h2coratio321303'][~maps],
yerr=pcfittable['eh2coratio321303'][~maps],
linestyle='none', marker='s', linewidth=1, alpha=0.5, color='b')
fig11.savefig(paths.fpath('ratio_vs_higaltemperature_fieldsandsources.pdf'),
bbox_inches='tight')
# RADEX fitting has been removed
#fig12 = pl.figure(12)
#fig12.clf()
#ax = fig12.add_subplot(1,1,1)
#ax.errorbar(pcfittable['temperature_chi2'], pcfittable['temperature'],
# yerr=pcfittable['etemperature'],
# xerr=[pcfittable['temperature_chi2']-pcfittable['tmin1sig_chi2'],
# pcfittable['tmax1sig_chi2']-pcfittable['temperature_chi2']],
# linestyle='none', marker='s', linewidth=1, alpha=0.5)
#ax.plot([0,300],[0,300],'k--',linewidth=2,alpha=0.5)
#ax.set_title("DEBUG: RADEX+pyspeckit-fitted temperature vs. $\\chi^2$ temperature")
#ax.set_xlabel("$\\chi^2$ Temperature")
#ax.set_ylabel("RADEX+pyspeckit Temperature")
#ax.axis([0,350,0,350])
fig13 = pl.figure(13)
fig13.clf()
ax13 = fig13.gca()
ax13.errorbar(pcfittable['area'][maps],
pcfittable['temperature_chi2'][maps],
yerr=[pcfittable['temperature_chi2'][maps]-pcfittable['tmin1sig_chi2'][maps],
pcfittable['tmax1sig_chi2'][maps]-pcfittable['temperature_chi2'][maps]],
linestyle='none', marker='s', linewidth=1, alpha=0.5, color='r')
ax13.set_xlabel("Area (square degrees)")
ax13.set_ylabel("Temperature (K)")
ax13.set_xscale('log')
fig13.savefig(paths.fpath('temperature_vs_area_byfield.pdf'),
bbox_inches='tight')
ax13.errorbar(pcfittable['area'][~maps],
pcfittable['temperature_chi2'][~maps],
yerr=[pcfittable['temperature_chi2'][~maps]-pcfittable['tmin1sig_chi2'][~maps],
pcfittable['tmax1sig_chi2'][~maps]-pcfittable['temperature_chi2'][~maps]],
linestyle='none', marker='s', linewidth=1, alpha=0.5, color='b')
fig13.savefig(paths.fpath('temperature_vs_area_fieldsandsources.pdf'),
bbox_inches='tight')
fig14 = pl.figure(14)
fig14.clf()
ax14 = fig14.gca()
ax14.errorbar(pcfittable['higalcolumndens'][maps],
pcfittable['temperature_chi2'][maps],
yerr=[(pcfittable['temperature_chi2']-pcfittable['tmin1sig_chi2'])[maps],
(pcfittable['tmax1sig_chi2']-pcfittable['temperature_chi2'])[maps]],
linestyle='none', marker='s', linewidth=1, alpha=0.5, color='r')
#ax14.plot([15,30],[15,30],'k--')
ax14.set_xlabel("HiGal Fitted Column Density")
ax14.set_ylabel("Temperature (K)")
fig14.savefig(paths.fpath('chi2_temperature_vs_higaldustcol_byfield.pdf'),
bbox_inches='tight')
ax14.errorbar(pcfittable['higalcolumndens'][~maps],
pcfittable['temperature_chi2'][~maps],
yerr=[(pcfittable['temperature_chi2']-pcfittable['tmin1sig_chi2'])[~maps],
(pcfittable['tmax1sig_chi2']-pcfittable['temperature_chi2'])[~maps]],
linestyle='none', marker='s', linewidth=1, alpha=0.5, color='b')
fig14.savefig(paths.fpath('chi2_temperature_vs_higaldustcol_fieldsandsources.pdf'),
bbox_inches='tight')
# pcfittable[np.abs(pcfittable['temperature_chi2']-pcfittable['higaldusttem'])/pcfittable['higaldusttem'] < 1.5].pprint()
fig15 = pl.figure(15)
fig15.clf()
ax15 = fig15.gca()
mask = maps&~lolim_conservative
ax15.errorbar(pcfittable['tkin_turb'][mask],
pcfittable['temperature_chi2'][mask],
yerr=[(pcfittable['temperature_chi2']-pcfittable['tmin1sig_chi2'])[mask],
(pcfittable['tmax1sig_chi2']-pcfittable['temperature_chi2'])[mask]],
capsize=0,
markersize=10,
markeredgecolor='none',
linestyle='none', marker='s', linewidth=0.5, alpha=0.6, color='r')
mask = maps&lolim_conservative
ax15.plot(pcfittable['tkin_turb'][mask],
pcfittable['tmin1sig_chi2'][mask],
marker='^',
markersize=10,
markeredgecolor='none',
color='r',
alpha=0.4,
linestyle='none')
mask = (maps) & (~lolim_conservative) & ((pcfittable['tmin1sig_chi2'] > pcfittable['tkin_turb']) | (pcfittable['tmax1sig_chi2'] < pcfittable['tkin_turb']))
ax15.plot(pcfittable['tkin_turb'][mask],
pcfittable['temperature_chi2'][mask],
marker='s',
markersize=15,
markeredgecolor='r',
markerfacecolor='none',
markeredgewidth=0.5,
alpha=0.4,
linestyle='none')
mask = (maps) & (lolim_conservative) & ((pcfittable['tmin1sig_chi2'] > pcfittable['tkin_turb']))
ax15.plot(pcfittable['tkin_turb'][mask],
pcfittable['tmin1sig_chi2'][mask],
marker='^',
markersize=15,
markeredgecolor='r',
markerfacecolor='none',
markeredgewidth=0.5,
alpha=0.4,
linestyle='none')
# Sources with T_predicted >> T_measured
#high_badpredictions = (pcfittable['tkin_turb'] > pcfittable['tmax1sig_chi2'])&(~lolim_conservative)
#high_badpredictions = (pcfittable['tkin_turb'] > 120)&(~lolim_conservative)
#for row,is_map in zip(pcfittable[high_badpredictions], maps[high_badpredictions]):
# xy = np.array((row['tkin_turb'], row['temperature_chi2']))
# ax15.annotate("{0}_{1}".format(row['Source_Name'], row['ComponentID']),
# xy,
# xytext=xy-(15, 7),
# color='r' if is_map else 'b'
# )
ax15.plot([0,200], [0,200], 'k--', alpha=0.5, zorder=-5)
ax15.set_xlabel("Turbulence-driven Temperature (K)")
ax15.set_ylabel("H$_2$CO Temperature (K)")
ax15.set_ylim(10,150)
ax15.set_xlim(10,180)
fig15.savefig(paths.fpath('chi2_temperature_vs_turbulenttemperature_byfield.pdf'),
bbox_inches='tight')
mask = (~maps)&(~lolim_conservative)
ax15.errorbar(pcfittable['tkin_turb'][mask],
pcfittable['temperature_chi2'][mask],
yerr=[(pcfittable['temperature_chi2']-pcfittable['tmin1sig_chi2'])[mask],
(pcfittable['tmax1sig_chi2']-pcfittable['temperature_chi2'])[mask]],
capsize=0,
markeredgecolor='none',
markersize=10,
linestyle='none', marker='s', linewidth=0.5, alpha=0.6, color='b')
mask = (~maps)&lolim_conservative
ax15.plot(pcfittable['tkin_turb'][mask],
pcfittable['tmin1sig_chi2'][mask],
marker='^',
markersize=10,
markeredgecolor='none',
color='b',
alpha=0.4,
linestyle='none')
mask = (~maps) & (~lolim_conservative) & ((pcfittable['tmin1sig_chi2'] > pcfittable['tkin_turb']) | (pcfittable['tmax1sig_chi2'] < pcfittable['tkin_turb']))
ax15.plot(pcfittable['tkin_turb'][mask],
pcfittable['temperature_chi2'][mask],
marker='s',
markersize=15,
markeredgecolor='b',
markerfacecolor='none',
markeredgewidth=0.5,
alpha=0.4,
linestyle='none')
mask = (~maps) & (lolim_conservative) & ((pcfittable['tmin1sig_chi2'] > pcfittable['tkin_turb']))
ax15.plot(pcfittable['tkin_turb'][mask],
pcfittable['tmin1sig_chi2'][mask],
marker='^',
markersize=15,
markeredgecolor='b',
markerfacecolor='none',
markeredgewidth=0.5,
alpha=0.4,
linestyle='none')
ax15.set_ylim(10,150)
fig15.savefig(paths.fpath('chi2_temperature_vs_turbulenttemperature_fieldsandsources_notitle.pdf'),
bbox_inches='tight')
ax15.set_title("Hand-selected regions")
fig15.savefig(paths.fpath('chi2_temperature_vs_turbulenttemperature_fieldsandsources.pdf'),
bbox_inches='tight')
| |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Data parser and processing.
Parse image and ground truths in a dataset to training targets and package them
into (image, labels) tuple for ShapeMask.
Weicheng Kuo, Anelia Angelova, Jitendra Malik, Tsung-Yi Lin
ShapeMask: Learning to Segment Novel Objects by Refining Shape Priors.
arXiv:1904.03239.
"""
import tensorflow as tf
from official.vision.detection.dataloader import anchor
from official.vision.detection.dataloader import mode_keys as ModeKeys
from official.vision.detection.dataloader import tf_example_decoder
from official.vision.detection.utils import box_utils
from official.vision.detection.utils import class_utils
from official.vision.detection.utils import dataloader_utils
from official.vision.detection.utils import input_utils
def pad_to_size(input_tensor, size):
"""Pads data with zeros to a given length at the first dimension if needed.
Args:
input_tensor: `Tensor` with any dimension.
size: `int` number for the first dimension of output Tensor.
Returns:
`Tensor` with the first dimension padded to `size` if the first diemsion
is less than `size`, otherwise no padding.
"""
input_shape = tf.shape(input_tensor)
padding_shape = []
# Computes the padding length on the first dimension.
padding_length = tf.maximum(0, size - tf.shape(input_tensor)[0])
assert_length = tf.Assert(
tf.greater_equal(padding_length, 0), [padding_length])
with tf.control_dependencies([assert_length]):
padding_shape.append(padding_length)
# Copies shapes of the rest of input shape dimensions.
for i in range(1, len(input_shape)):
padding_shape.append(tf.shape(input=input_tensor)[i])
# Pads input tensor to the fixed first dimension.
paddings = tf.cast(tf.zeros(padding_shape), input_tensor.dtype)
padded_tensor = tf.concat([input_tensor, paddings], axis=0)
return padded_tensor
class Parser(object):
"""ShapeMask Parser to parse an image and its annotations into a dictionary of tensors."""
def __init__(self,
output_size,
min_level,
max_level,
num_scales,
aspect_ratios,
anchor_size,
use_category=True,
outer_box_scale=1.0,
box_jitter_scale=0.025,
num_sampled_masks=8,
mask_crop_size=32,
mask_min_level=3,
mask_max_level=5,
upsample_factor=4,
match_threshold=0.5,
unmatched_threshold=0.5,
aug_rand_hflip=False,
aug_scale_min=1.0,
aug_scale_max=1.0,
skip_crowd_during_training=True,
max_num_instances=100,
use_bfloat16=True,
mask_train_class='all',
mode=None):
"""Initializes parameters for parsing annotations in the dataset.
Args:
output_size: `Tensor` or `list` for [height, width] of output image. The
output_size should be divided by the largest feature stride 2^max_level.
min_level: `int` number of minimum level of the output feature pyramid.
max_level: `int` number of maximum level of the output feature pyramid.
num_scales: `int` number representing intermediate scales added
on each level. For instances, num_scales=2 adds one additional
intermediate anchor scales [2^0, 2^0.5] on each level.
aspect_ratios: `list` of float numbers representing the aspect raito
anchors added on each level. The number indicates the ratio of width to
height. For instances, aspect_ratios=[1.0, 2.0, 0.5] adds three anchors
on each scale level.
anchor_size: `float` number representing the scale of size of the base
anchor to the feature stride 2^level.
use_category: if `False`, treat all object in all classes in one
foreground category.
outer_box_scale: `float` number in a range of [1.0, inf) representing
the scale from object box to outer box. The mask branch predicts
instance mask enclosed in outer box.
box_jitter_scale: `float` number representing the noise magnitude to
jitter the training groundtruth boxes for mask branch.
num_sampled_masks: `int` number of sampled masks for training.
mask_crop_size: `list` for [height, width] of output training masks.
mask_min_level: `int` number indicating the minimum feature level to
obtain instance features.
mask_max_level: `int` number indicating the maximum feature level to
obtain instance features.
upsample_factor: `int` factor of upsampling the fine mask predictions.
match_threshold: `float` number between 0 and 1 representing the
lower-bound threshold to assign positive labels for anchors. An anchor
with a score over the threshold is labeled positive.
unmatched_threshold: `float` number between 0 and 1 representing the
upper-bound threshold to assign negative labels for anchors. An anchor
with a score below the threshold is labeled negative.
aug_rand_hflip: `bool`, if True, augment training with random
horizontal flip.
aug_scale_min: `float`, the minimum scale applied to `output_size` for
data augmentation during training.
aug_scale_max: `float`, the maximum scale applied to `output_size` for
data augmentation during training.
skip_crowd_during_training: `bool`, if True, skip annotations labeled with
`is_crowd` equals to 1.
max_num_instances: `int` number of maximum number of instances in an
image. The groundtruth data will be padded to `max_num_instances`.
use_bfloat16: `bool`, if True, cast output image to tf.bfloat16.
mask_train_class: a string of experiment mode: `all`, `voc` or `nonvoc`.
mode: a ModeKeys. Specifies if this is training, evaluation, prediction
or prediction with groundtruths in the outputs.
"""
self._mode = mode
self._mask_train_class = mask_train_class
self._max_num_instances = max_num_instances
self._skip_crowd_during_training = skip_crowd_during_training
self._is_training = (mode == ModeKeys.TRAIN)
self._example_decoder = tf_example_decoder.TfExampleDecoder(
include_mask=True)
# Anchor.
self._output_size = output_size
self._min_level = min_level
self._max_level = max_level
self._num_scales = num_scales
self._aspect_ratios = aspect_ratios
self._anchor_size = anchor_size
self._match_threshold = match_threshold
self._unmatched_threshold = unmatched_threshold
# Data augmentation.
self._aug_rand_hflip = aug_rand_hflip
self._aug_scale_min = aug_scale_min
self._aug_scale_max = aug_scale_max
# Device.
self._use_bfloat16 = use_bfloat16
# ShapeMask specific.
# Control of which category to use.
self._use_category = use_category
self._num_sampled_masks = num_sampled_masks
self._mask_crop_size = mask_crop_size
self._mask_min_level = mask_min_level
self._mask_max_level = mask_max_level
self._outer_box_scale = outer_box_scale
self._box_jitter_scale = box_jitter_scale
self._up_sample_factor = upsample_factor
# Data is parsed depending on the model Modekey.
if mode == ModeKeys.TRAIN:
self._parse_fn = self._parse_train_data
elif mode == ModeKeys.EVAL:
self._parse_fn = self._parse_eval_data
elif mode == ModeKeys.PREDICT or mode == ModeKeys.PREDICT_WITH_GT:
self._parse_fn = self._parse_predict_data
else:
raise ValueError('mode is not defined.')
def __call__(self, value):
"""Parses data to an image and associated training labels.
Args:
value: a string tensor holding a serialized tf.Example proto.
Returns:
inputs:
image: image tensor that is preproessed to have normalized value and
dimension [output_size[0], output_size[1], 3]
mask_boxes: sampled boxes that tightly enclose the training masks. The
box is represented in [y1, x1, y2, x2] format. The tensor is sampled
to the fixed dimension [self._num_sampled_masks, 4].
mask_outer_boxes: loose box that enclose sampled tight box. The
box is represented in [y1, x1, y2, x2] format. The tensor is sampled
to the fixed dimension [self._num_sampled_masks, 4].
mask_classes: the class ids of sampled training masks. The tensor has
shape [self._num_sampled_masks].
labels:
cls_targets: ordered dictionary with keys
[min_level, min_level+1, ..., max_level]. The values are tensor with
shape [height_l, width_l, anchors_per_location]. The height_l and
width_l represent the dimension of class logits at l-th level.
box_targets: ordered dictionary with keys
[min_level, min_level+1, ..., max_level]. The values are tensor with
shape [height_l, width_l, anchors_per_location * 4]. The height_l and
width_l represent the dimension of bounding box regression output at
l-th level.
num_positives: number of positive anchors in the image.
anchor_boxes: ordered dictionary with keys
[min_level, min_level+1, ..., max_level]. The values are tensor with
shape [height_l, width_l, 4] representing anchor boxes at each level.
image_scale: 2D float `Tensor` representing scale factors that apply
to [height, width] of input image.
mask_targets: training binary mask targets. The tensor has shape
[self._num_sampled_masks, self._mask_crop_size, self._mask_crop_size].
mask_is_valid: the binary tensor to indicate if the sampled masks are
valide. The sampled masks are invalid when no mask annotations are
included in the image. The tensor has shape [1].
groundtruths:
source_id: source image id. Default value -1 if the source id is empty
in the groundtruth annotation.
boxes: groundtruth bounding box annotations. The box is represented in
[y1, x1, y2, x2] format. The tensor is padded with -1 to the fixed
dimension [self._max_num_instances, 4].
classes: groundtruth classes annotations. The tensor is padded with
-1 to the fixed dimension [self._max_num_instances].
areas: groundtruth areas annotations. The tensor is padded with -1
to the fixed dimension [self._max_num_instances].
is_crowds: groundtruth annotations to indicate if an annotation
represents a group of instances by value {0, 1}. The tensor is
padded with 0 to the fixed dimension [self._max_num_instances].
"""
with tf.name_scope('parser'):
data = self._example_decoder.decode(value)
return self._parse_fn(data)
def _parse_train_data(self, data):
"""Parse data for ShapeMask training."""
classes = data['groundtruth_classes']
boxes = data['groundtruth_boxes']
masks = data['groundtruth_instance_masks']
is_crowds = data['groundtruth_is_crowd']
# Skips annotations with `is_crowd` = True.
if self._skip_crowd_during_training and self._is_training:
num_groundtrtuhs = tf.shape(classes)[0]
with tf.control_dependencies([num_groundtrtuhs, is_crowds]):
indices = tf.cond(
tf.greater(tf.size(is_crowds), 0),
lambda: tf.where(tf.logical_not(is_crowds))[:, 0],
lambda: tf.cast(tf.range(num_groundtrtuhs), tf.int64))
classes = tf.gather(classes, indices)
boxes = tf.gather(boxes, indices)
masks = tf.gather(masks, indices)
# Gets original image and its size.
image = data['image']
image_shape = tf.shape(image)[0:2]
# If not using category, makes all categories with id = 0.
if not self._use_category:
classes = tf.cast(tf.greater(classes, 0), dtype=tf.float32)
# Normalizes image with mean and std pixel values.
image = input_utils.normalize_image(image)
# Flips image randomly during training.
if self._aug_rand_hflip:
image, boxes, masks = input_utils.random_horizontal_flip(
image, boxes, masks)
# Converts boxes from normalized coordinates to pixel coordinates.
boxes = box_utils.denormalize_boxes(boxes, image_shape)
# Resizes and crops image.
image, image_info = input_utils.resize_and_crop_image(
image,
self._output_size,
self._output_size,
aug_scale_min=self._aug_scale_min,
aug_scale_max=self._aug_scale_max)
image_scale = image_info[2, :]
offset = image_info[3, :]
# Resizes and crops boxes and masks.
boxes = input_utils.resize_and_crop_boxes(
boxes, image_scale, image_info[1, :], offset)
# Filters out ground truth boxes that are all zeros.
indices = box_utils.get_non_empty_box_indices(boxes)
boxes = tf.gather(boxes, indices)
classes = tf.gather(classes, indices)
masks = tf.gather(masks, indices)
# Assigns anchors.
input_anchor = anchor.Anchor(
self._min_level, self._max_level, self._num_scales,
self._aspect_ratios, self._anchor_size, self._output_size)
anchor_labeler = anchor.AnchorLabeler(
input_anchor, self._match_threshold, self._unmatched_threshold)
(cls_targets,
box_targets,
num_positives) = anchor_labeler.label_anchors(
boxes,
tf.cast(tf.expand_dims(classes, axis=1), tf.float32))
# Sample groundtruth masks/boxes/classes for mask branch.
num_masks = tf.shape(masks)[0]
mask_shape = tf.shape(masks)[1:3]
# Pad sampled boxes/masks/classes to a constant batch size.
padded_boxes = pad_to_size(boxes, self._num_sampled_masks)
padded_classes = pad_to_size(classes, self._num_sampled_masks)
padded_masks = pad_to_size(masks, self._num_sampled_masks)
# Randomly sample groundtruth masks for mask branch training. For the image
# without groundtruth masks, it will sample the dummy padded tensors.
rand_indices = tf.random.shuffle(
tf.range(tf.maximum(num_masks, self._num_sampled_masks)))
rand_indices = tf.math.mod(rand_indices, tf.maximum(num_masks, 1))
rand_indices = rand_indices[0:self._num_sampled_masks]
rand_indices = tf.reshape(rand_indices, [self._num_sampled_masks])
sampled_boxes = tf.gather(padded_boxes, rand_indices)
sampled_classes = tf.gather(padded_classes, rand_indices)
sampled_masks = tf.gather(padded_masks, rand_indices)
# Jitter the sampled boxes to mimic the noisy detections.
sampled_boxes = box_utils.jitter_boxes(
sampled_boxes, noise_scale=self._box_jitter_scale)
sampled_boxes = box_utils.clip_boxes(sampled_boxes, self._output_size)
# Compute mask targets in feature crop. A feature crop fully contains a
# sampled box.
mask_outer_boxes = box_utils.compute_outer_boxes(
sampled_boxes, tf.shape(image)[0:2], scale=self._outer_box_scale)
mask_outer_boxes = box_utils.clip_boxes(mask_outer_boxes, self._output_size)
# Compensate the offset of mask_outer_boxes to map it back to original image
# scale.
mask_outer_boxes_ori = mask_outer_boxes
mask_outer_boxes_ori += tf.tile(tf.expand_dims(offset, axis=0), [1, 2])
mask_outer_boxes_ori /= tf.tile(tf.expand_dims(image_scale, axis=0), [1, 2])
norm_mask_outer_boxes_ori = box_utils.normalize_boxes(
mask_outer_boxes_ori, mask_shape)
# Set sampled_masks shape to [batch_size, height, width, 1].
sampled_masks = tf.cast(tf.expand_dims(sampled_masks, axis=-1), tf.float32)
mask_targets = tf.image.crop_and_resize(
sampled_masks,
norm_mask_outer_boxes_ori,
box_indices=tf.range(self._num_sampled_masks),
crop_size=[self._mask_crop_size, self._mask_crop_size],
method='bilinear',
extrapolation_value=0,
name='train_mask_targets')
mask_targets = tf.where(tf.greater_equal(mask_targets, 0.5),
tf.ones_like(mask_targets),
tf.zeros_like(mask_targets))
mask_targets = tf.squeeze(mask_targets, axis=-1)
if self._up_sample_factor > 1:
fine_mask_targets = tf.image.crop_and_resize(
sampled_masks,
norm_mask_outer_boxes_ori,
box_indices=tf.range(self._num_sampled_masks),
crop_size=[
self._mask_crop_size * self._up_sample_factor,
self._mask_crop_size * self._up_sample_factor
],
method='bilinear',
extrapolation_value=0,
name='train_mask_targets')
fine_mask_targets = tf.where(
tf.greater_equal(fine_mask_targets, 0.5),
tf.ones_like(fine_mask_targets), tf.zeros_like(fine_mask_targets))
fine_mask_targets = tf.squeeze(fine_mask_targets, axis=-1)
else:
fine_mask_targets = mask_targets
# If bfloat16 is used, casts input image to tf.bfloat16.
if self._use_bfloat16:
image = tf.cast(image, dtype=tf.bfloat16)
valid_image = tf.cast(tf.not_equal(num_masks, 0), tf.int32)
if self._mask_train_class == 'all':
mask_is_valid = valid_image * tf.ones_like(sampled_classes, tf.int32)
else:
# Get the intersection of sampled classes with training splits.
mask_valid_classes = tf.cast(
tf.expand_dims(
class_utils.coco_split_class_ids(self._mask_train_class), 1),
sampled_classes.dtype)
match = tf.reduce_any(
tf.equal(tf.expand_dims(sampled_classes, 0), mask_valid_classes), 0)
mask_is_valid = valid_image * tf.cast(match, tf.int32)
# Packs labels for model_fn outputs.
labels = {
'cls_targets': cls_targets,
'box_targets': box_targets,
'anchor_boxes': input_anchor.multilevel_boxes,
'num_positives': num_positives,
'image_info': image_info,
# For ShapeMask.
'mask_targets': mask_targets,
'fine_mask_targets': fine_mask_targets,
'mask_is_valid': mask_is_valid,
}
inputs = {
'image': image,
'image_info': image_info,
'mask_boxes': sampled_boxes,
'mask_outer_boxes': mask_outer_boxes,
'mask_classes': sampled_classes,
}
return inputs, labels
def _parse_predict_data(self, data):
"""Parse data for ShapeMask training."""
classes = data['groundtruth_classes']
boxes = data['groundtruth_boxes']
masks = data['groundtruth_instance_masks']
# Gets original image and its size.
image = data['image']
image_shape = tf.shape(image)[0:2]
# If not using category, makes all categories with id = 0.
if not self._use_category:
classes = tf.cast(tf.greater(classes, 0), dtype=tf.float32)
# Normalizes image with mean and std pixel values.
image = input_utils.normalize_image(image)
# Converts boxes from normalized coordinates to pixel coordinates.
boxes = box_utils.denormalize_boxes(boxes, image_shape)
# Resizes and crops image.
image, image_info = input_utils.resize_and_crop_image(
image,
self._output_size,
self._output_size,
aug_scale_min=1.0,
aug_scale_max=1.0)
image_scale = image_info[2, :]
offset = image_info[3, :]
# Resizes and crops boxes and masks.
boxes = input_utils.resize_and_crop_boxes(
boxes, image_scale, image_info[1, :], offset)
masks = input_utils.resize_and_crop_masks(
tf.expand_dims(masks, axis=-1), image_scale, self._output_size, offset)
# Filters out ground truth boxes that are all zeros.
indices = box_utils.get_non_empty_box_indices(boxes)
boxes = tf.gather(boxes, indices)
classes = tf.gather(classes, indices)
# Assigns anchors.
input_anchor = anchor.Anchor(
self._min_level, self._max_level, self._num_scales,
self._aspect_ratios, self._anchor_size, self._output_size)
anchor_labeler = anchor.AnchorLabeler(
input_anchor, self._match_threshold, self._unmatched_threshold)
# If bfloat16 is used, casts input image to tf.bfloat16.
if self._use_bfloat16:
image = tf.cast(image, dtype=tf.bfloat16)
labels = {
'anchor_boxes': input_anchor.multilevel_boxes,
'image_info': image_info,
}
if self._mode == ModeKeys.PREDICT_WITH_GT:
# Converts boxes from normalized coordinates to pixel coordinates.
groundtruths = {
'source_id': data['source_id'],
'height': data['height'],
'width': data['width'],
'num_detections': tf.shape(data['groundtruth_classes']),
'boxes': box_utils.denormalize_boxes(
data['groundtruth_boxes'], image_shape),
'classes': data['groundtruth_classes'],
# 'masks': tf.squeeze(masks, axis=-1),
'areas': data['groundtruth_area'],
'is_crowds': tf.cast(data['groundtruth_is_crowd'], tf.int32),
}
groundtruths['source_id'] = dataloader_utils.process_source_id(
groundtruths['source_id'])
groundtruths = dataloader_utils.pad_groundtruths_to_fixed_size(
groundtruths, self._max_num_instances)
# Computes training labels.
(cls_targets,
box_targets,
num_positives) = anchor_labeler.label_anchors(
boxes,
tf.cast(tf.expand_dims(classes, axis=1), tf.float32))
# Packs labels for model_fn outputs.
labels.update({
'cls_targets': cls_targets,
'box_targets': box_targets,
'num_positives': num_positives,
'groundtruths': groundtruths,
})
inputs = {
'image': image,
'image_info': image_info,
}
return inputs, labels
| |
import re
import numpy as np
from .utils import str2array, stringfy, sort_intervals
def _bed6_to_GeneAnnot(bed6):
# print(bed6)
lines = bed6.splitlines()
g_annot = 0
for line in lines:
chrom, start, end, name, score, strand = line.split('\t')
if g_annot == 0:
g_annot = InteractiveAnnotation(start, end, strand,
chrom=chrom, transcript_id=name, starts_offset=0)
else:
g_annot.starts = np.hstack([g_annot.starts, int(start)])
g_annot.ends = np.hstack([g_annot.ends, int(end)])
return g_annot
class InteractiveAnnotation:
def __init__(self, starts, ends, strand, cds_starts=None, cds_ends=None,
starts_offset=1, orientation='Unknown', **kwargs):
"""
A interactive and flexible genomic anotation.
Parameters
----------
starts
ends
strand
cds_starts
cds_ends
starts_offset: 0 or 1 (default = 1)
GTF/GFF => use 1
BED => use 0
extb => use 0
orientation
kwargs
"""
# self.starts = np.array([np.nan])
# self.ends = np.array([np.nan])
# self.strand = None
# self.cds_starts = np.array([np.nan])
# self.cds_ends = np.array([np.nan])
self.chrom = None
self.transcript_id = None
self.gene_id = None
self.thickStart = None
self.thickEnd = None
# create/update all custom defined kwargs
for k, v in kwargs.items():
setattr(self, k, v)
if type(starts) == str:
starts = str2array(starts)
ends = str2array(ends)
self.starts = starts.copy()
self.ends = ends.copy()
if not re.match('-|\+', strand):
raise Exception('invalid strand value: %s' % strand)
self.strand = strand
# assert len(self.starts) == len(self.ends)
# make starts 0-based
self.starts -= starts_offset
if cds_starts and cds_ends:
cds_starts = str2array(cds_starts)
cds_ends = str2array(cds_ends)
assert len(cds_starts) == len(cds_ends)
# make starts 0-based
cds_starts -= starts_offset
self.cds_starts = cds_starts
self.cds_ends = cds_ends
self.thickStart = np.min(cds_starts)
self.thickEnd = np.max(cds_ends)
else:
self.cds_starts = np.array([np.nan])
self.cds_ends = np.array([np.nan])
self._fix_orientation(orientation)
# self._reverse()
def __len__(self):
return len(self.starts)
def blockCount(self):
return len(self.starts)
def blockSizes(self):
return self.ends - self.starts
@property
def exons(self):
exons = self.ends - self.starts
return exons
def internal_exons(self):
# first, *internal, last = self.exons
# return internal
return self.exons[1:-1]
@property
def introns(self):
# introns = np.array([np.nan])
# # introns = None
if len(self) > 1:
return self.starts[1:] - self.ends[:-1]
return np.array([np.nan])
@property
def cds(self):
if np.isnan(np.sum(self.cds_starts)): # and self.cds_ends:
return np.array([np.nan])
else:
return self.cds_ends - self.cds_starts
@property
def orf_blocks(self):
if np.isnan(np.sum(self.cds_starts)):
return np.array([np.nan])
else:
return self.cds_ends - self.cds_starts
@property
def exon_contrib_to_orf(self):
# if self.cds == 'NA':
if np.isnan(np.sum(self.cds_starts)):
return np.zeros_like(self.exons)
else:
start = self.cds_starts[0]
stop = self.cds_ends[-1]
contrib = np.zeros_like(self.exons, dtype=np.float32)
if len(self) == 1:
coding = stop - start
total = self.ends[-1] - self.starts[0]
return np.array([coding/total])
else:
# find exon with start codon
for i, g_start in enumerate(self.starts):
if start >= g_start:
startIndex = i
break
# find exon with stop codon
for i, g_end in reversed(list(enumerate(self.ends))):
if stop > g_end:
stopIndex = i + 1
break
elif stop == g_end:
stopIndex = i
break
for i in range(self.blockCount()):
if i < startIndex:
contrib[i] = 0
elif i == startIndex:
contrib[i] = (self.ends[i] - start) / self.orf_size
elif i < stopIndex:
contrib[i] = self.exons[i] / self.orf_size
elif i == stopIndex:
# if stop == self.ends[i]:
# contrib[i] = 1
# else:
# contrib[i] = (stop - self.starts[i]) / (self.ends[i] - self.starts[i])
contrib[i] = (stop - self.starts[i]) / self.orf_size
else:
contrib[i] = 0
return contrib
def _find_orf_index(self):
start = self.thickStart
stop = self.thickEnd
for i, g_start in enumerate(self.starts):
if start >= g_start:
startIndex = i
break
# find exon with stop codon
for i, g_end in reversed(list(enumerate(self.ends))):
if stop > g_end:
stopIndex = i + 1
break
elif stop == g_end:
stopIndex = i
break
return startIndex, stopIndex
@property
def orf_size(self):
try:
return sum(self.orf_blocks)
except TypeError:
return np.array([np.nan])
@property
def start(self):
return self.starts[0]
@property
def end(self):
return self.ends[-1]
# @end.setter
# def end(self, value):
# self.ends[-1] = value
# def BedTool(self, format='bed'):
# try:
# from pybedtools import BedTool
# except ImportError:
# raise ImportError("pybedtools is required for this function")
#
# return BedTool(self.format(format), from_string=True)
def merge_small_gaps(self, gap=15):
"""
Merge gaps smaller or equals the specified amount.
Useful to remove gaps that should not be treated as introns.
Reference: http://codereview.stackexchange.com/a/69249
Parameters
----------
gap: int
size of the gap
pybedtools: Boolean
use pybedtools (and bedtools). should be removed in future releases.
Returns
-------
a new GeneAnnotation with gaps properly merged
"""
#ToDO: add argument to allow this to be done to cds_starts / cds_ends?
intervals = [(s, e) for s, e in zip(self.starts, self.ends)]
# sorted_by_lower_bound = sorted(intervals, key=lambda tup: tup[0])
# hopefully we dont need to sort this data... we may activate this for some messed up data, though
merged = []
for higher in intervals: # sorted_by_lower_bound: # intervals already sorted.
if not merged:
merged.append(higher)
else:
lower = merged[-1]
# test for intersection between lower and higher:
# we know via sorting that lower[0] <= higher[0]
if higher[0] <= lower[1] + gap:
upper_bound = max(lower[1], higher[1])
merged[-1] = (lower[0], upper_bound) # replace by merged interval
else:
merged.append(higher)
starts, ends = zip(*merged)
# self.starts = np.array(starts, dtype=np.int64)
# self.ends = np.array(ends, dtype=np.int64)
setattr(self, 'starts', np.array(starts, dtype=np.int64))
setattr(self, 'ends', np.array(ends, dtype=np.int64))
return self
def _fix_orientation(self, orientation='Unknown'):
if orientation == 'transcript' and len(self) > 1:
if orientation == 'transcript':
self._reverse()
elif orientation == 'Unknown':
diff = self.starts[-1] - self.starts[0]
if diff < 0:
self._reverse()
if orientation == 'Unknown' and len(self) > 1:
#if orientation != 'genomic' and self.blockCount() > 1:
self.starts, self.ends = sort_intervals(self.starts, self.ends)
if not np.isnan(np.sum(self.cds_starts)):
self.cds_starts, self.cds_ends = sort_intervals(self.cds_starts, self.cds_ends)
def _reverse(self):
# print('reversing')
self.starts = self.starts[::-1]
self.ends = self.ends[::-1]
if not np.isnan(np.sum(self.cds_starts)):
self.cds_starts = self.cds_starts[::-1]
self.cds_ends = self.cds_ends[::-1]
def __str__(self):
return '{} {}'.format(self.transcript_id, stringfy(self.exons))
def format(self, format):
if format == 'extb':
chr_str = '%s:%s-%s' % (self.chrom, self.start + 1, self.end)
extb = [
# self.internal_exons,
chr_str,
self.strand,
self.transcript_id,
self.gene_id,
self.blockCount(),
sum(self.exons),
self.exons,
self.introns,
# self.cds,
# self.starts,
]
return '\t'.join(stringfy(x) for x in extb)
elif format == 'bed':
# if np.isnan(np.sum(self.cds_starts)):
# thickStart = self.start
# thickStop = self.start
#
# else:
# thickStart = self.cds_starts[0]
# thickStop = self.cds_ends[-1]
#
if hasattr(self, 'itemRgb'):
item_rgb = self.itemRgb
else:
item_rgb = "200,155,55"
if self.thickStart and self.thickEnd:
thickStart = self.thickStart
thickEnd = self.thickEnd
else:
thickStart = thickEnd = self.start
bed = [
self.chrom,
self.start,
self.end,
self.transcript_id,
"1000",
self.strand,
thickStart,
thickEnd,
item_rgb,
self.blockCount(),
self.exons,
self.starts - self.start
]
return '\t'.join(stringfy(x) for x in bed)
elif format == 'bed6':
block_count = self.blockCount()
bed = ['']*block_count
for idx in range(block_count):
line = [
self.chrom,
self.starts[idx],
self.ends[idx],
self.transcript_id,
"1000",
self.strand]
bed[idx] = '\t'.join(stringfy(x) for x in line)
return '\n'.join(bed)
elif format == 'intron-bed':
block_count = self.blockCount()
if block_count > 1:
intron_count = block_count - 1
intron_names = np.arange(1, intron_count + 1)
if self.strand == '-':
intron_names = intron_names[::-1]
bed = ['']*intron_count
for idx in range(intron_count):
line = [
self.chrom,
self.ends[idx],
self.starts[idx + 1],
'{}_Intron_{:03d}'.format(self.transcript_id, intron_names[idx]),
"999",
self.strand]
bed[idx] = '\t'.join(stringfy(x) for x in line)
return '\n'.join(bed)
return ''
else:
super(InteractiveAnnotation, self).__format__(format)
| |
#
# Module providing the `SyncManager` class for dealing
# with shared objects
#
# multiprocessing/managers.py
#
# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
#
__all__ = [ 'BaseManager', 'SyncManager', 'BaseProxy', 'Token' ]
#
# Imports
#
import os
import sys
import weakref
import threading
import array
import Queue
from traceback import format_exc
from multiprocessing import Process, current_process, active_children, Pool, util, connection
from multiprocessing.process import AuthenticationString
from multiprocessing.forking import exit, Popen, assert_spawning, ForkingPickler
from multiprocessing.util import Finalize, info
try:
from cPickle import PicklingError
except ImportError:
from pickle import PicklingError
#
# Register some things for pickling
#
def reduce_array(a):
return array.array, (a.typecode, a.tostring())
ForkingPickler.register(array.array, reduce_array)
view_types = [type(getattr({}, name)()) for name in ('items','keys','values')]
#
# Type for identifying shared objects
#
class Token(object):
'''
Type to uniquely indentify a shared object
'''
__slots__ = ('typeid', 'address', 'id')
def __init__(self, typeid, address, id):
(self.typeid, self.address, self.id) = (typeid, address, id)
def __getstate__(self):
return (self.typeid, self.address, self.id)
def __setstate__(self, state):
(self.typeid, self.address, self.id) = state
def __repr__(self):
return 'Token(typeid=%r, address=%r, id=%r)' % \
(self.typeid, self.address, self.id)
#
# Function for communication with a manager's server process
#
def dispatch(c, id, methodname, args=(), kwds={}):
'''
Send a message to manager using connection `c` and return response
'''
c.send((id, methodname, args, kwds))
kind, result = c.recv()
if kind == '#RETURN':
return result
raise convert_to_error(kind, result)
def convert_to_error(kind, result):
if kind == '#ERROR':
return result
elif kind == '#TRACEBACK':
assert type(result) is str
return RemoteError(result)
elif kind == '#UNSERIALIZABLE':
assert type(result) is str
return RemoteError('Unserializable message: %s\n' % result)
else:
return ValueError('Unrecognized message type')
class RemoteError(Exception):
def __str__(self):
return ('\n' + '-'*75 + '\n' + str(self.args[0]) + '-'*75)
#
# Functions for finding the method names of an object
#
def all_methods(obj):
'''
Return a list of names of methods of `obj`
'''
temp = []
for name in dir(obj):
func = getattr(obj, name)
if hasattr(func, '__call__'):
temp.append(name)
return temp
def public_methods(obj):
'''
Return a list of names of methods of `obj` which do not start with '_'
'''
return [name for name in all_methods(obj) if name[0] != '_']
#
# Server which is run in a process controlled by a manager
#
class Server(object):
'''
Server class which runs in a process controlled by a manager object
'''
public = ['shutdown', 'create', 'accept_connection', 'get_methods',
'debug_info', 'number_of_objects', 'dummy', 'incref', 'decref']
def __init__(self, registry, address, authkey, serializer):
assert isinstance(authkey, bytes)
self.registry = registry
self.authkey = AuthenticationString(authkey)
Listener, Client = listener_client[serializer]
# do authentication later
self.listener = Listener(address=address, backlog=5)
self.address = self.listener.address
self.id_to_obj = {'0': (None, ())}
self.id_to_refcount = {}
self.mutex = threading.RLock()
self.stop = 0
def serve_forever(self):
'''
Run the server forever
'''
current_process()._manager_server = self
try:
try:
while 1:
try:
c = self.listener.accept()
except (OSError, IOError):
continue
t = threading.Thread(target=self.handle_request, args=(c,))
t.daemon = True
t.start()
except (KeyboardInterrupt, SystemExit):
pass
finally:
self.stop = 999
self.listener.close()
def handle_request(self, c):
'''
Handle a new connection
'''
funcname = result = request = None
try:
connection.deliver_challenge(c, self.authkey)
connection.answer_challenge(c, self.authkey)
request = c.recv()
ignore, funcname, args, kwds = request
assert funcname in self.public, '%r unrecognized' % funcname
func = getattr(self, funcname)
except Exception:
msg = ('#TRACEBACK', format_exc())
else:
try:
result = func(c, *args, **kwds)
except Exception:
msg = ('#TRACEBACK', format_exc())
else:
msg = ('#RETURN', result)
try:
c.send(msg)
except Exception, e:
try:
c.send(('#TRACEBACK', format_exc()))
except Exception:
pass
util.info('Failure to send message: %r', msg)
util.info(' ... request was %r', request)
util.info(' ... exception was %r', e)
c.close()
def serve_client(self, conn):
'''
Handle requests from the proxies in a particular process/thread
'''
util.debug('starting server thread to service %r',
threading.current_thread().name)
recv = conn.recv
send = conn.send
id_to_obj = self.id_to_obj
while not self.stop:
try:
methodname = obj = None
request = recv()
ident, methodname, args, kwds = request
obj, exposed, gettypeid = id_to_obj[ident]
if methodname not in exposed:
raise AttributeError(
'method %r of %r object is not in exposed=%r' %
(methodname, type(obj), exposed)
)
function = getattr(obj, methodname)
try:
res = function(*args, **kwds)
except Exception, e:
msg = ('#ERROR', e)
else:
typeid = gettypeid and gettypeid.get(methodname, None)
if typeid:
rident, rexposed = self.create(conn, typeid, res)
token = Token(typeid, self.address, rident)
msg = ('#PROXY', (rexposed, token))
else:
msg = ('#RETURN', res)
except AttributeError:
if methodname is None:
msg = ('#TRACEBACK', format_exc())
else:
try:
fallback_func = self.fallback_mapping[methodname]
result = fallback_func(
self, conn, ident, obj, *args, **kwds
)
msg = ('#RETURN', result)
except Exception:
msg = ('#TRACEBACK', format_exc())
except EOFError:
util.debug('got EOF -- exiting thread serving %r',
threading.current_thread().name)
sys.exit(0)
except Exception:
msg = ('#TRACEBACK', format_exc())
try:
try:
send(msg)
except Exception, e:
send(('#UNSERIALIZABLE', repr(msg)))
except Exception, e:
util.info('exception in thread serving %r',
threading.current_thread().name)
util.info(' ... message was %r', msg)
util.info(' ... exception was %r', e)
conn.close()
sys.exit(1)
def fallback_getvalue(self, conn, ident, obj):
return obj
def fallback_str(self, conn, ident, obj):
return str(obj)
def fallback_repr(self, conn, ident, obj):
return repr(obj)
fallback_mapping = {
'__str__':fallback_str,
'__repr__':fallback_repr,
'#GETVALUE':fallback_getvalue
}
def dummy(self, c):
pass
def debug_info(self, c):
'''
Return some info --- useful to spot problems with refcounting
'''
self.mutex.acquire()
try:
result = []
keys = self.id_to_obj.keys()
keys.sort()
for ident in keys:
if ident != '0':
result.append(' %s: refcount=%s\n %s' %
(ident, self.id_to_refcount[ident],
str(self.id_to_obj[ident][0])[:75]))
return '\n'.join(result)
finally:
self.mutex.release()
def number_of_objects(self, c):
'''
Number of shared objects
'''
return len(self.id_to_obj) - 1 # don't count ident='0'
def shutdown(self, c):
'''
Shutdown this process
'''
try:
try:
util.debug('manager received shutdown message')
c.send(('#RETURN', None))
if sys.stdout != sys.__stdout__:
util.debug('resetting stdout, stderr')
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
util._run_finalizers(0)
for p in active_children():
util.debug('terminating a child process of manager')
p.terminate()
for p in active_children():
util.debug('terminating a child process of manager')
p.join()
util._run_finalizers()
util.info('manager exiting with exitcode 0')
except:
import traceback
traceback.print_exc()
finally:
exit(0)
def create(self, c, typeid, *args, **kwds):
'''
Create a new shared object and return its id
'''
self.mutex.acquire()
try:
callable, exposed, method_to_typeid, proxytype = \
self.registry[typeid]
if callable is None:
assert len(args) == 1 and not kwds
obj = args[0]
else:
obj = callable(*args, **kwds)
if exposed is None:
exposed = public_methods(obj)
if method_to_typeid is not None:
assert type(method_to_typeid) is dict
exposed = list(exposed) + list(method_to_typeid)
ident = '%x' % id(obj) # convert to string because xmlrpclib
# only has 32 bit signed integers
util.debug('%r callable returned object with id %r', typeid, ident)
self.id_to_obj[ident] = (obj, set(exposed), method_to_typeid)
if ident not in self.id_to_refcount:
self.id_to_refcount[ident] = 0
# increment the reference count immediately, to avoid
# this object being garbage collected before a Proxy
# object for it can be created. The caller of create()
# is responsible for doing a decref once the Proxy object
# has been created.
self.incref(c, ident)
return ident, tuple(exposed)
finally:
self.mutex.release()
def get_methods(self, c, token):
'''
Return the methods of the shared object indicated by token
'''
return tuple(self.id_to_obj[token.id][1])
def accept_connection(self, c, name):
'''
Spawn a new thread to serve this connection
'''
threading.current_thread().name = name
c.send(('#RETURN', None))
self.serve_client(c)
def incref(self, c, ident):
self.mutex.acquire()
try:
self.id_to_refcount[ident] += 1
finally:
self.mutex.release()
def decref(self, c, ident):
self.mutex.acquire()
try:
assert self.id_to_refcount[ident] >= 1
self.id_to_refcount[ident] -= 1
if self.id_to_refcount[ident] == 0:
del self.id_to_obj[ident], self.id_to_refcount[ident]
util.debug('disposing of obj with id %r', ident)
finally:
self.mutex.release()
#
# Class to represent state of a manager
#
class State(object):
__slots__ = ['value']
INITIAL = 0
STARTED = 1
SHUTDOWN = 2
#
# Mapping from serializer name to Listener and Client types
#
listener_client = {
'pickle' : (connection.Listener, connection.Client),
'xmlrpclib' : (connection.XmlListener, connection.XmlClient)
}
#
# Definition of BaseManager
#
class BaseManager(object):
'''
Base class for managers
'''
_registry = {}
_Server = Server
def __init__(self, address=None, authkey=None, serializer='pickle'):
if authkey is None:
authkey = current_process().authkey
self._address = address # XXX not final address if eg ('', 0)
self._authkey = AuthenticationString(authkey)
self._state = State()
self._state.value = State.INITIAL
self._serializer = serializer
self._Listener, self._Client = listener_client[serializer]
def __reduce__(self):
return type(self).from_address, \
(self._address, self._authkey, self._serializer)
def get_server(self):
'''
Return server object with serve_forever() method and address attribute
'''
assert self._state.value == State.INITIAL
return Server(self._registry, self._address,
self._authkey, self._serializer)
def connect(self):
'''
Connect manager object to the server process
'''
Listener, Client = listener_client[self._serializer]
conn = Client(self._address, authkey=self._authkey)
dispatch(conn, None, 'dummy')
self._state.value = State.STARTED
def start(self, initializer=None, initargs=()):
'''
Spawn a server process for this manager object
'''
assert self._state.value == State.INITIAL
if initializer is not None and not hasattr(initializer, '__call__'):
raise TypeError('initializer must be a callable')
# pipe over which we will retrieve address of server
reader, writer = connection.Pipe(duplex=False)
# spawn process which runs a server
self._process = Process(
target=type(self)._run_server,
args=(self._registry, self._address, self._authkey,
self._serializer, writer, initializer, initargs),
)
ident = ':'.join(str(i) for i in self._process._identity)
self._process.name = type(self).__name__ + '-' + ident
self._process.start()
# get address of server
writer.close()
self._address = reader.recv()
reader.close()
# register a finalizer
self._state.value = State.STARTED
self.shutdown = util.Finalize(
self, type(self)._finalize_manager,
args=(self._process, self._address, self._authkey,
self._state, self._Client),
exitpriority=0
)
@classmethod
def _run_server(cls, registry, address, authkey, serializer, writer,
initializer=None, initargs=()):
'''
Create a server, report its address and run it
'''
if initializer is not None:
initializer(*initargs)
# create server
server = cls._Server(registry, address, authkey, serializer)
# inform parent process of the server's address
writer.send(server.address)
writer.close()
# run the manager
util.info('manager serving at %r', server.address)
server.serve_forever()
def _create(self, typeid, *args, **kwds):
'''
Create a new shared object; return the token and exposed tuple
'''
assert self._state.value == State.STARTED, 'server not yet started'
conn = self._Client(self._address, authkey=self._authkey)
try:
id, exposed = dispatch(conn, None, 'create', (typeid,)+args, kwds)
finally:
conn.close()
return Token(typeid, self._address, id), exposed
def join(self, timeout=None):
'''
Join the manager process (if it has been spawned)
'''
self._process.join(timeout)
def _debug_info(self):
'''
Return some info about the servers shared objects and connections
'''
conn = self._Client(self._address, authkey=self._authkey)
try:
return dispatch(conn, None, 'debug_info')
finally:
conn.close()
def _number_of_objects(self):
'''
Return the number of shared objects
'''
conn = self._Client(self._address, authkey=self._authkey)
try:
return dispatch(conn, None, 'number_of_objects')
finally:
conn.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.shutdown()
@staticmethod
def _finalize_manager(process, address, authkey, state, _Client):
'''
Shutdown the manager process; will be registered as a finalizer
'''
if process.is_alive():
util.info('sending shutdown message to manager')
try:
conn = _Client(address, authkey=authkey)
try:
dispatch(conn, None, 'shutdown')
finally:
conn.close()
except Exception:
pass
process.join(timeout=0.2)
if process.is_alive():
util.info('manager still alive')
if hasattr(process, 'terminate'):
util.info('trying to `terminate()` manager process')
process.terminate()
process.join(timeout=0.1)
if process.is_alive():
util.info('manager still alive after terminate')
state.value = State.SHUTDOWN
try:
del BaseProxy._address_to_local[address]
except KeyError:
pass
address = property(lambda self: self._address)
@classmethod
def register(cls, typeid, callable=None, proxytype=None, exposed=None,
method_to_typeid=None, create_method=True):
'''
Register a typeid with the manager type
'''
if '_registry' not in cls.__dict__:
cls._registry = cls._registry.copy()
if proxytype is None:
proxytype = AutoProxy
exposed = exposed or getattr(proxytype, '_exposed_', None)
method_to_typeid = method_to_typeid or \
getattr(proxytype, '_method_to_typeid_', None)
if method_to_typeid:
for key, value in method_to_typeid.items():
assert type(key) is str, '%r is not a string' % key
assert type(value) is str, '%r is not a string' % value
cls._registry[typeid] = (
callable, exposed, method_to_typeid, proxytype
)
if create_method:
def temp(self, *args, **kwds):
util.debug('requesting creation of a shared %r object', typeid)
token, exp = self._create(typeid, *args, **kwds)
proxy = proxytype(
token, self._serializer, manager=self,
authkey=self._authkey, exposed=exp
)
conn = self._Client(token.address, authkey=self._authkey)
dispatch(conn, None, 'decref', (token.id,))
return proxy
temp.__name__ = typeid
setattr(cls, typeid, temp)
#
# Subclass of set which get cleared after a fork
#
class ProcessLocalSet(set):
def __init__(self):
util.register_after_fork(self, lambda obj: obj.clear())
def __reduce__(self):
return type(self), ()
#
# Definition of BaseProxy
#
class BaseProxy(object):
'''
A base for proxies of shared objects
'''
_address_to_local = {}
_mutex = util.ForkAwareThreadLock()
def __init__(self, token, serializer, manager=None,
authkey=None, exposed=None, incref=True):
BaseProxy._mutex.acquire()
try:
tls_idset = BaseProxy._address_to_local.get(token.address, None)
if tls_idset is None:
tls_idset = util.ForkAwareLocal(), ProcessLocalSet()
BaseProxy._address_to_local[token.address] = tls_idset
finally:
BaseProxy._mutex.release()
# self._tls is used to record the connection used by this
# thread to communicate with the manager at token.address
self._tls = tls_idset[0]
# self._idset is used to record the identities of all shared
# objects for which the current process owns references and
# which are in the manager at token.address
self._idset = tls_idset[1]
self._token = token
self._id = self._token.id
self._manager = manager
self._serializer = serializer
self._Client = listener_client[serializer][1]
if authkey is not None:
self._authkey = AuthenticationString(authkey)
elif self._manager is not None:
self._authkey = self._manager._authkey
else:
self._authkey = current_process().authkey
if incref:
self._incref()
util.register_after_fork(self, BaseProxy._after_fork)
def _connect(self):
util.debug('making connection to manager')
name = current_process().name
if threading.current_thread().name != 'MainThread':
name += '|' + threading.current_thread().name
conn = self._Client(self._token.address, authkey=self._authkey)
dispatch(conn, None, 'accept_connection', (name,))
self._tls.connection = conn
def _callmethod(self, methodname, args=(), kwds={}):
'''
Try to call a method of the referrent and return a copy of the result
'''
try:
conn = self._tls.connection
except AttributeError:
util.debug('thread %r does not own a connection',
threading.current_thread().name)
self._connect()
conn = self._tls.connection
conn.send((self._id, methodname, args, kwds))
kind, result = conn.recv()
if kind == '#RETURN':
return result
elif kind == '#PROXY':
exposed, token = result
proxytype = self._manager._registry[token.typeid][-1]
proxy = proxytype(
token, self._serializer, manager=self._manager,
authkey=self._authkey, exposed=exposed
)
conn = self._Client(token.address, authkey=self._authkey)
dispatch(conn, None, 'decref', (token.id,))
return proxy
raise convert_to_error(kind, result)
def _getvalue(self):
'''
Get a copy of the value of the referent
'''
return self._callmethod('#GETVALUE')
def _incref(self):
conn = self._Client(self._token.address, authkey=self._authkey)
dispatch(conn, None, 'incref', (self._id,))
util.debug('INCREF %r', self._token.id)
self._idset.add(self._id)
state = self._manager and self._manager._state
self._close = util.Finalize(
self, BaseProxy._decref,
args=(self._token, self._authkey, state,
self._tls, self._idset, self._Client),
exitpriority=10
)
@staticmethod
def _decref(token, authkey, state, tls, idset, _Client):
idset.discard(token.id)
# check whether manager is still alive
if state is None or state.value == State.STARTED:
# tell manager this process no longer cares about referent
try:
util.debug('DECREF %r', token.id)
conn = _Client(token.address, authkey=authkey)
dispatch(conn, None, 'decref', (token.id,))
except Exception, e:
util.debug('... decref failed %s', e)
else:
util.debug('DECREF %r -- manager already shutdown', token.id)
# check whether we can close this thread's connection because
# the process owns no more references to objects for this manager
if not idset and hasattr(tls, 'connection'):
util.debug('thread %r has no more proxies so closing conn',
threading.current_thread().name)
tls.connection.close()
del tls.connection
def _after_fork(self):
self._manager = None
try:
self._incref()
except Exception, e:
# the proxy may just be for a manager which has shutdown
util.info('incref failed: %s' % e)
def __reduce__(self):
kwds = {}
if Popen.thread_is_spawning():
kwds['authkey'] = self._authkey
if getattr(self, '_isauto', False):
kwds['exposed'] = self._exposed_
return (RebuildProxy,
(AutoProxy, self._token, self._serializer, kwds))
else:
return (RebuildProxy,
(type(self), self._token, self._serializer, kwds))
def __deepcopy__(self, memo):
return self._getvalue()
def __repr__(self):
return '<%s object, typeid %r at %s>' % \
(type(self).__name__, self._token.typeid, '0x%x' % id(self))
def __str__(self):
'''
Return representation of the referent (or a fall-back if that fails)
'''
try:
return self._callmethod('__repr__')
except Exception:
return repr(self)[:-1] + "; '__str__()' failed>"
#
# Function used for unpickling
#
def RebuildProxy(func, token, serializer, kwds):
'''
Function used for unpickling proxy objects.
If possible the shared object is returned, or otherwise a proxy for it.
'''
server = getattr(current_process(), '_manager_server', None)
if server and server.address == token.address:
return server.id_to_obj[token.id][0]
else:
incref = (
kwds.pop('incref', True) and
not getattr(current_process(), '_inheriting', False)
)
return func(token, serializer, incref=incref, **kwds)
#
# Functions to create proxies and proxy types
#
def MakeProxyType(name, exposed, _cache={}):
'''
Return an proxy type whose methods are given by `exposed`
'''
exposed = tuple(exposed)
try:
return _cache[(name, exposed)]
except KeyError:
pass
dic = {}
for meth in exposed:
exec '''def %s(self, *args, **kwds):
return self._callmethod(%r, args, kwds)''' % (meth, meth) in dic
ProxyType = type(name, (BaseProxy,), dic)
ProxyType._exposed_ = exposed
_cache[(name, exposed)] = ProxyType
return ProxyType
def AutoProxy(token, serializer, manager=None, authkey=None,
exposed=None, incref=True):
'''
Return an auto-proxy for `token`
'''
_Client = listener_client[serializer][1]
if exposed is None:
conn = _Client(token.address, authkey=authkey)
try:
exposed = dispatch(conn, None, 'get_methods', (token,))
finally:
conn.close()
if authkey is None and manager is not None:
authkey = manager._authkey
if authkey is None:
authkey = current_process().authkey
ProxyType = MakeProxyType('AutoProxy[%s]' % token.typeid, exposed)
proxy = ProxyType(token, serializer, manager=manager, authkey=authkey,
incref=incref)
proxy._isauto = True
return proxy
#
# Types/callables which we will register with SyncManager
#
class Namespace(object):
def __init__(self, **kwds):
self.__dict__.update(kwds)
def __repr__(self):
items = self.__dict__.items()
temp = []
for name, value in items:
if not name.startswith('_'):
temp.append('%s=%r' % (name, value))
temp.sort()
return 'Namespace(%s)' % str.join(', ', temp)
class Value(object):
def __init__(self, typecode, value, lock=True):
self._typecode = typecode
self._value = value
def get(self):
return self._value
def set(self, value):
self._value = value
def __repr__(self):
return '%s(%r, %r)'%(type(self).__name__, self._typecode, self._value)
value = property(get, set)
def Array(typecode, sequence, lock=True):
return array.array(typecode, sequence)
#
# Proxy types used by SyncManager
#
class IteratorProxy(BaseProxy):
# XXX remove methods for Py3.0 and Py2.6
_exposed_ = ('__next__', 'next', 'send', 'throw', 'close')
def __iter__(self):
return self
def __next__(self, *args):
return self._callmethod('__next__', args)
def next(self, *args):
return self._callmethod('next', args)
def send(self, *args):
return self._callmethod('send', args)
def throw(self, *args):
return self._callmethod('throw', args)
def close(self, *args):
return self._callmethod('close', args)
class AcquirerProxy(BaseProxy):
_exposed_ = ('acquire', 'release')
def acquire(self, blocking=True):
return self._callmethod('acquire', (blocking,))
def release(self):
return self._callmethod('release')
def __enter__(self):
return self._callmethod('acquire')
def __exit__(self, exc_type, exc_val, exc_tb):
return self._callmethod('release')
class ConditionProxy(AcquirerProxy):
# XXX will Condition.notfyAll() name be available in Py3.0?
_exposed_ = ('acquire', 'release', 'wait', 'notify', 'notify_all')
def wait(self, timeout=None):
return self._callmethod('wait', (timeout,))
def notify(self):
return self._callmethod('notify')
def notify_all(self):
return self._callmethod('notify_all')
class EventProxy(BaseProxy):
_exposed_ = ('is_set', 'set', 'clear', 'wait')
def is_set(self):
return self._callmethod('is_set')
def set(self):
return self._callmethod('set')
def clear(self):
return self._callmethod('clear')
def wait(self, timeout=None):
return self._callmethod('wait', (timeout,))
class NamespaceProxy(BaseProxy):
_exposed_ = ('__getattribute__', '__setattr__', '__delattr__')
def __getattr__(self, key):
if key[0] == '_':
return object.__getattribute__(self, key)
callmethod = object.__getattribute__(self, '_callmethod')
return callmethod('__getattribute__', (key,))
def __setattr__(self, key, value):
if key[0] == '_':
return object.__setattr__(self, key, value)
callmethod = object.__getattribute__(self, '_callmethod')
return callmethod('__setattr__', (key, value))
def __delattr__(self, key):
if key[0] == '_':
return object.__delattr__(self, key)
callmethod = object.__getattribute__(self, '_callmethod')
return callmethod('__delattr__', (key,))
class ValueProxy(BaseProxy):
_exposed_ = ('get', 'set')
def get(self):
return self._callmethod('get')
def set(self, value):
return self._callmethod('set', (value,))
value = property(get, set)
BaseListProxy = MakeProxyType('BaseListProxy', (
'__add__', '__contains__', '__delitem__', '__delslice__',
'__getitem__', '__getslice__', '__len__', '__mul__',
'__reversed__', '__rmul__', '__setitem__', '__setslice__',
'append', 'count', 'extend', 'index', 'insert', 'pop', 'remove',
'reverse', 'sort', '__imul__'
)) # XXX __getslice__ and __setslice__ unneeded in Py3.0
class ListProxy(BaseListProxy):
def __iadd__(self, value):
self._callmethod('extend', (value,))
return self
def __imul__(self, value):
self._callmethod('__imul__', (value,))
return self
DictProxy = MakeProxyType('DictProxy', (
'__contains__', '__delitem__', '__getitem__', '__len__',
'__setitem__', 'clear', 'copy', 'get', 'has_key', 'items',
'keys', 'pop', 'popitem', 'setdefault', 'update', 'values'
))
ArrayProxy = MakeProxyType('ArrayProxy', (
'__len__', '__getitem__', '__setitem__', '__getslice__', '__setslice__'
)) # XXX __getslice__ and __setslice__ unneeded in Py3.0
PoolProxy = MakeProxyType('PoolProxy', (
'apply', 'apply_async', 'close', 'imap', 'imap_unordered', 'join',
'map', 'map_async', 'terminate'
))
PoolProxy._method_to_typeid_ = {
'apply_async': 'AsyncResult',
'map_async': 'AsyncResult',
'imap': 'Iterator',
'imap_unordered': 'Iterator'
}
#
# Definition of SyncManager
#
class SyncManager(BaseManager):
'''
Subclass of `BaseManager` which supports a number of shared object types.
The types registered are those intended for the synchronization
of threads, plus `dict`, `list` and `Namespace`.
The `multiprocessing.Manager()` function creates started instances of
this class.
'''
SyncManager.register('Queue', Queue.Queue)
SyncManager.register('JoinableQueue', Queue.Queue)
SyncManager.register('Event', threading.Event, EventProxy)
SyncManager.register('Lock', threading.Lock, AcquirerProxy)
SyncManager.register('RLock', threading.RLock, AcquirerProxy)
SyncManager.register('Semaphore', threading.Semaphore, AcquirerProxy)
SyncManager.register('BoundedSemaphore', threading.BoundedSemaphore,
AcquirerProxy)
SyncManager.register('Condition', threading.Condition, ConditionProxy)
SyncManager.register('Pool', Pool, PoolProxy)
SyncManager.register('list', list, ListProxy)
SyncManager.register('dict', dict, DictProxy)
SyncManager.register('Value', Value, ValueProxy)
SyncManager.register('Array', Array, ArrayProxy)
SyncManager.register('Namespace', Namespace, NamespaceProxy)
# types returned by methods of PoolProxy
SyncManager.register('Iterator', proxytype=IteratorProxy, create_method=False)
SyncManager.register('AsyncResult', create_method=False)
| |
"""
An implementation of:
@TECHREPORT{ adams-mackay-2007,
AUTHOR = {Ryan Prescott Adams and David J.C. MacKay},
TITLE = "{B}ayesian Online Changepoint Detection",
INSTITUTION = "University of Cambridge",
ADDRESS = "Cambridge, UK",
YEAR = "2007",
NOTE = "arXiv:0710.3742v1 [stat.ML]"
}
"""
from __future__ import print_function, division
import numpy as np
from numpy.random import gamma, randn, rand
from scipy.special import gammaln
import matplotlib.pyplot as plt
def constant_hazard(r, _lambda):
"""
A simple constant-rate hazard function that gives geomtrically-drawn
intervals between changepoints. We'll specify the rate via a mean.
To quote the paper (section 2.1: "THE CHANGEPOINT PRIOR"):
"In the special case where P_{gap}(g) is a discrete exponential
(geometric) distribution with timescale lambda, the process is
memoryless and the hazard function is constant at H(tau) = 1/lambda"
Args:
* r (np.ndarray or scalar)
* _lambda (float)
Returns:
* p (np.ndarray with shape = r.shape): probability of a changepoint
"""
if isinstance(r, np.ndarray):
shape = r.shape
else:
shape = 1
probability = np.ones(shape) / _lambda
return probability
def studentpdf(x, mu, var, nu):
"""
Returns the pdf(x) for Student T distribution.
scipy.stats.distributions.t.pdf(x=x-mu, df=nu) comes close
to replicating studentpdf but Kevin Murphy's studentpdf
function includes a 'var' variable which scipy's version does not.
"""
# Using a mixture of code from studentpdf.m and
# scipy.stats.distributions.t_gen._pdf()
r = np.asarray(nu*1.0)
c = np.exp(gammaln((r+1)/2) - gammaln(r/2))
c /= np.sqrt(r * np.pi * var) * (1+((x-mu)**2)/(r*var))**((r+1)/2)
return c
def row_to_column_vector(row_vector):
return np.matrix(row_vector).transpose()
def inference(x, hazard_func, mu0=0, kappa0=1, alpha0=1, beta0=1):
"""
Args:
* x (np.ndarray): data
* hazard_func (function):
This is a handle to a function that takes one argument, the number of
time increments since the last changepoint, and returns a value in the
interval [0,1] that is the probability of a changepoint.
e.g. hazard_func=lambda beliefs: constant_hazard(beliefs, 200)
* mu0, kappa0, alpha0, beta0 (float): specify normal-inverse-gamma prior.
This data is Gaussian with unknown mean and variance. We are going to
use the standard conjugate prior of a normal-inverse-gamma. Note that
one cannot use non-informative priors for changepoint detection in
this construction. The normal-inverse-gamma yields a closed-form
predictive distribution, which makes it easy to use in this context.
There are lots of references out there for doing this kind of inference:
- Chris Bishop's "Pattern Recognition and Machine Learning" Chapter 2
- Also, Kevin Murphy's lecture notes.
"""
# First, setup the matrix that will hold our beliefs about the current
# run lengths. We'll initialize it all to zero at first. Obviously
# we're assuming here that we know how long we're going to do the
# inference. You can imagine other data structures that don't make that
# assumption (e.g. linked lists). We're doing this because it's easy.
beliefs = np.zeros([x.size+1, x.size+1])
# At time t=0, we actually have complete knowledge about the run
# length. It is definitely zero. See the paper for other possible
# boundary conditions. 'beliefs' is called 'R' in gaussdemo.m.
beliefs[0,0] = 1.0
# Convert floats to arrays
mu0 = np.array([mu0])
kappa0 = np.array([kappa0])
alpha0 = np.array([alpha0])
beta0 = np.array([beta0])
# Track the current set of parameters. These start out at the prior and
# accumulate data as we proceed.
muT = mu0
kappaT = kappa0
alphaT = alpha0
betaT = beta0
# Keep track of the maximums.
maxes = np.zeros([x.size+1, x.size+1])
# Loop over the data like we're seeing it all for the first time.
for t in range(x.size):
# Evaluate the predictive distribution for the new datum under each of
# the parameters. This is the standard thing from Bayesian inference.
predprobs = studentpdf(x[t], muT,
betaT*(kappaT+1)/(alphaT*kappaT),
2 * alphaT)
# Evaluate the hazard function for this interval.
haz = hazard_func(np.arange(t+1))
# Evaluate the growth probabilities - shift the probabilities down and to
# the right, scaled by the hazard function and the predictive
# probabilities.
beliefs[1:t+2,t+1] = beliefs[0:t+1,t] * predprobs * (1-haz)
# Evaluate the probability that there *was* a changepoint and we're
# accumulating the mass back down at beliefs = 0.
beliefs[0,t+1] = (beliefs[0:t+1,t] * predprobs * haz).sum()
# Renormalize the run length probabilities for improved numerical
# stability.
beliefs[:,t+1] = beliefs[:,t+1] / beliefs[:,t+1].sum()
# Update the parameter sets for each possible run length.
# TODO: continue porting from here...
muT0 = np.concatenate([mu0 , (kappaT*muT + x[t]) / (kappaT+1) ])
kappaT0 = np.concatenate([kappa0, kappaT + 1 ])
alphaT0 = np.concatenate([alpha0, alphaT + 0.5 ])
betaT0 = np.concatenate([beta0 , kappaT +
(kappaT*(x[t]-muT)**2)/(2*(kappaT+1))])
muT = muT0
kappaT = kappaT0
alphaT = alphaT0
betaT = betaT0
# Store the maximum, to plot later.
maxes[t] = np.where(beliefs[:,t]==beliefs[:,t].max())[0]
return beliefs, maxes
def generate_test_data(n, hazard_func, mu0=0, kappa0=1, alpha0=1, beta0=1):
"""
Args:
* n (int): number of data elements to return
* hazard_func, mu0, kappa0, alpha0, beta0: see doc for inference()
Returns: x, changepoints
* x (np.ndarray of length n): data
* changepoints (list of ints): indices of changepoints
"""
x = np.zeros(n) # this will hold the data
changepoints = [0] # Store the times of changepoints. It's useful to see them.
def generate_params():
# Generate the parameters of the Gaussian from the prior.
curr_ivar = gamma(alpha0) * beta0
curr_mean = (((kappa0 * curr_ivar) ** -0.5) * randn()) + mu0
return curr_ivar, curr_mean
curr_ivar, curr_mean = generate_params()
curr_run = 0 # Initial run length is zero
# Now, loop forward in time and generate data.
for t in range(n):
# Get the probability of a new changepoint.
p = hazard_func(curr_run)
# Randomly generate a changepoint, perhaps.
if rand() < p:
# Generate new Gaussian parameters from the prior.
curr_ivar, curr_mean = generate_params()
# The run length drops back to zero.
curr_run = 0
# Add this changepoint to the end of the list.
changepoints.append(t)
else:
# Increment the run length if there was no changepoint.
curr_run += 1
# Draw data from the current parameters.
x[t] = (curr_ivar ** -0.5) * randn() + curr_mean
return x, changepoints
def test(data_input='random'):
# First, we will specify the prior. We will then generate some fake data
# from the prior specification. We will then perform inference. Then
# we'll plot some things.
hazard_func = lambda r: constant_hazard(r, _lambda=200)
if data_input == 'random':
# generate test data
N = 100 # how many data points to generate?
x, changepoints = generate_test_data(N, hazard_func)
elif data_input == 'ones':
x = np.ones(N)
changepoints = []
elif data_input == 'signature':
from pda.channel import Channel
from os import path
DATA_DIR = '/data/mine/domesticPowerData/BellendenRd/wattsUp'
#SIG_DATA_FILENAME = 'breadmaker1.csv'
SIG_DATA_FILENAME = 'washingmachine1.csv'
chan = Channel()
chan.load_wattsup(path.join(DATA_DIR, SIG_DATA_FILENAME))
x = chan.series.values[142:1647]
N = x.size
# plot
fig = plt.figure()
ax = fig.add_subplot(2,1,1)
ax.plot(x)
ylim = ax.get_ylim()
for cp in changepoints:
ax.plot([cp, cp], ylim, color='k')
# do inference
beliefs, maxes = inference(x, hazard_func)
# plot beliefs
beliefs = beliefs.astype(np.float32)
#print(beliefs)
ax2 = fig.add_subplot(2,1,2, sharex=ax)
ax2.imshow(-np.log(beliefs), interpolation='none', aspect='auto',
origin='lower', cmap=plt.cm.Blues)
ax2.plot(maxes, color='r')
ax2.set_xlim([0, N])
ax2.set_ylim([0, ax2.get_ylim()[1]])
plt.draw()
return beliefs, maxes
#test()
| |
"""Feeds for badge"""
import datetime
import hashlib
import json
import urllib
from django.contrib.syndication.views import Feed, FeedDoesNotExist
from django.utils.feedgenerator import (SyndicationFeed, Rss201rev2Feed,
Atom1Feed, get_tag_uri)
from django.shortcuts import get_object_or_404
from django.contrib.auth.models import User
from django.conf import settings
try:
from tower import ugettext_lazy as _
except ImportError:
from django.utils.translation import ugettext_lazy as _
try:
from commons.urlresolvers import reverse
except ImportError:
from django.core.urlresolvers import reverse
from . import validate_jsonp
from .models import (Badge, Award, Nomination, Progress,
BadgeAwardNotAllowedException,
DEFAULT_BADGE_IMAGE)
MAX_FEED_ITEMS = getattr(settings, 'BADGER_MAX_FEED_ITEMS', 15)
class BaseJSONFeedGenerator(SyndicationFeed):
"""JSON feed generator"""
# TODO:liberate - Can this class be a generally-useful lib?
mime_type = 'application/json'
def _encode_complex(self, obj):
if isinstance(obj, datetime.datetime):
return obj.isoformat()
def build_item(self, item):
"""Simple base item formatter.
Omit some named keys and any keys with false-y values"""
omit_keys = ('obj', 'unique_id', )
return dict((k, v) for k, v in item.items()
if v and k not in omit_keys)
def build_feed(self):
"""Simple base feed formatter.
Omit some named keys and any keys with false-y values"""
omit_keys = ('obj', 'request', 'id', )
feed_data = dict((k, v) for k, v in self.feed.items()
if v and k not in omit_keys)
feed_data['items'] = [self.build_item(item) for item in self.items]
return feed_data
def write(self, outfile, encoding):
request = self.feed['request']
# Check for a callback param, validate it before use
callback = request.GET.get('callback', None)
if callback is not None:
if not validate_jsonp.is_valid_jsonp_callback_value(callback):
callback = None
# Build the JSON string, wrapping it in a callback param if necessary.
json_string = json.dumps(self.build_feed(),
default=self._encode_complex)
if callback:
outfile.write('%s(%s)' % (callback, json_string))
else:
outfile.write(json_string)
class BaseFeed(Feed):
"""Base feed for all of badger, allows switchable generator from URL route
and other niceties"""
# TODO:liberate - Can this class be a generally-useful lib?
json_feed_generator = BaseJSONFeedGenerator
rss_feed_generator = Rss201rev2Feed
atom_feed_generator = Atom1Feed
def __call__(self, request, *args, **kwargs):
self.request = request
return super(BaseFeed, self).__call__(request, *args, **kwargs)
def get_object(self, request, format):
self.link = request.build_absolute_uri('/')
if format == 'json':
self.feed_type = self.json_feed_generator
elif format == 'rss':
self.feed_type = self.rss_feed_generator
else:
self.feed_type = self.atom_feed_generator
return super(BaseFeed, self).get_object(request)
def feed_extra_kwargs(self, obj):
return {'request': self.request, 'obj': obj, }
def item_extra_kwargs(self, obj):
return {'obj': obj, }
def item_pubdate(self, obj):
return obj.created
def item_author_link(self, obj):
if not obj.creator or not hasattr(obj.creator, 'get_absolute_url'):
return None
else:
return self.request.build_absolute_uri(
obj.creator.get_absolute_url())
def item_author_name(self, obj):
if not obj.creator:
return None
else:
return '%s' % obj.creator
def item_description(self, obj):
if obj.image:
image_url = obj.image.url
else:
image_url = '%simg/default-badge.png' % settings.MEDIA_URL
return """
<div>
<a href="%(href)s"><img alt="%(alt)s" src="%(image_url)s" /></a>
</div>
""" % dict(
alt=unicode(obj),
href=self.request.build_absolute_uri(obj.get_absolute_url()),
image_url=self.request.build_absolute_uri(image_url)
)
class AwardActivityStreamJSONFeedGenerator(BaseJSONFeedGenerator):
pass
class AwardActivityStreamAtomFeedGenerator(Atom1Feed):
pass
class AwardsFeed(BaseFeed):
"""Base class for all feeds listing awards"""
title = _(u'Recently awarded badges')
subtitle = None
json_feed_generator = AwardActivityStreamJSONFeedGenerator
atom_feed_generator = AwardActivityStreamAtomFeedGenerator
def item_title(self, obj):
return _(u'{badgetitle} awarded to {username}').format(
badgetitle=obj.badge.title, username=obj.user.username)
def item_author_link(self, obj):
if not obj.creator:
return None
else:
return self.request.build_absolute_uri(
reverse('badger.views.awards_by_user',
args=(obj.creator.username,)))
def item_link(self, obj):
return self.request.build_absolute_uri(
reverse('badger.views.award_detail',
args=(obj.badge.slug, obj.pk, )))
class AwardsRecentFeed(AwardsFeed):
"""Feed of all recent badge awards"""
def items(self):
return (Award.objects
.order_by('-created')
.all()[:MAX_FEED_ITEMS])
class AwardsByUserFeed(AwardsFeed):
"""Feed of recent badge awards for a user"""
def get_object(self, request, format, username):
super(AwardsByUserFeed, self).get_object(request, format)
user = get_object_or_404(User, username=username)
self.title = _(u'Badges recently awarded to {username}').format(
username=user.username)
self.link = request.build_absolute_uri(
reverse('badger.views.awards_by_user', args=(user.username,)))
return user
def items(self, user):
return (Award.objects
.filter(user=user)
.order_by('-created')
.all()[:MAX_FEED_ITEMS])
class AwardsByBadgeFeed(AwardsFeed):
"""Feed of recent badge awards for a badge"""
def get_object(self, request, format, slug):
super(AwardsByBadgeFeed, self).get_object(request, format)
badge = get_object_or_404(Badge, slug=slug)
self.title = _(u'Recent awards of "{badgetitle}"').format(
badgetitle=badge.title)
self.link = request.build_absolute_uri(
reverse('badger.views.awards_by_badge', args=(badge.slug,)))
return badge
def items(self, badge):
return (Award.objects
.filter(badge=badge).order_by('-created')
.all()[:MAX_FEED_ITEMS])
class BadgesJSONFeedGenerator(BaseJSONFeedGenerator):
pass
class BadgesFeed(BaseFeed):
"""Base class for all feeds listing badges"""
title = _(u'Recently created badges')
json_feed_generator = BadgesJSONFeedGenerator
def item_title(self, obj):
return obj.title
def item_link(self, obj):
return self.request.build_absolute_uri(
reverse('badger.views.detail',
args=(obj.slug, )))
class BadgesRecentFeed(BadgesFeed):
def items(self):
return (Badge.objects
.order_by('-created')
.all()[:MAX_FEED_ITEMS])
class BadgesByUserFeed(BadgesFeed):
"""Feed of badges recently created by a user"""
def get_object(self, request, format, username):
super(BadgesByUserFeed, self).get_object(request, format)
user = get_object_or_404(User, username=username)
self.title = _(u'Badges recently created by {username}').format(
username=user.username)
self.link = request.build_absolute_uri(
reverse('badger.views.badges_by_user', args=(user.username,)))
return user
def items(self, user):
return (Badge.objects
.filter(creator=user)
.order_by('-created')
.all()[:MAX_FEED_ITEMS])
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for experimental sql input op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sqlite3
from tensorflow.contrib.data.python.kernel_tests import dataset_serialization_test_base
from tensorflow.contrib.data.python.ops import readers
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class SqlDatasetTestBase(test.TestCase):
def _createSqlDataset(self, output_types, num_repeats=1):
dataset = readers.SqlDataset(self.driver_name, self.data_source_name,
self.query, output_types).repeat(num_repeats)
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
return init_op, get_next
def setUp(self):
self.data_source_name = os.path.join(test.get_temp_dir(), "tftest.sqlite")
self.driver_name = array_ops.placeholder_with_default(
array_ops.constant("sqlite", dtypes.string), shape=[])
self.query = array_ops.placeholder(dtypes.string, shape=[])
conn = sqlite3.connect(self.data_source_name)
c = conn.cursor()
c.execute("DROP TABLE IF EXISTS students")
c.execute("DROP TABLE IF EXISTS people")
c.execute("DROP TABLE IF EXISTS townspeople")
c.execute(
"CREATE TABLE IF NOT EXISTS students (id INTEGER NOT NULL PRIMARY KEY, "
"first_name VARCHAR(100), last_name VARCHAR(100), motto VARCHAR(100), "
"school_id VARCHAR(100), favorite_nonsense_word VARCHAR(100), "
"desk_number INTEGER, income INTEGER, favorite_number INTEGER, "
"favorite_big_number INTEGER, favorite_negative_number INTEGER, "
"favorite_medium_sized_number INTEGER, brownie_points INTEGER, "
"account_balance INTEGER, registration_complete INTEGER)")
c.executemany(
"INSERT INTO students (first_name, last_name, motto, school_id, "
"favorite_nonsense_word, desk_number, income, favorite_number, "
"favorite_big_number, favorite_negative_number, "
"favorite_medium_sized_number, brownie_points, account_balance, "
"registration_complete) "
"VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
[("John", "Doe", "Hi!", "123", "n\0nsense", 9, 0, 2147483647,
9223372036854775807, -2, 32767, 0, 0, 1),
("Jane", "Moe", "Hi again!", "1000", "nonsense\0", 127, -20000,
-2147483648, -9223372036854775808, -128, -32768, 255, 65535, 0)])
c.execute(
"CREATE TABLE IF NOT EXISTS people (id INTEGER NOT NULL PRIMARY KEY, "
"first_name VARCHAR(100), last_name VARCHAR(100), state VARCHAR(100))")
c.executemany(
"INSERT INTO PEOPLE (first_name, last_name, state) VALUES (?, ?, ?)",
[("Benjamin", "Franklin", "Pennsylvania"), ("John", "Doe",
"California")])
c.execute(
"CREATE TABLE IF NOT EXISTS townspeople (id INTEGER NOT NULL PRIMARY "
"KEY, first_name VARCHAR(100), last_name VARCHAR(100), victories "
"FLOAT, accolades FLOAT, triumphs FLOAT)")
c.executemany(
"INSERT INTO townspeople (first_name, last_name, victories, "
"accolades, triumphs) VALUES (?, ?, ?, ?, ?)",
[("George", "Washington", 20.00,
1331241.321342132321324589798264627463827647382647382643874,
9007199254740991.0),
("John", "Adams", -19.95,
1331241321342132321324589798264627463827647382647382643874.0,
9007199254740992.0)])
conn.commit()
conn.close()
class SqlDatasetTest(SqlDatasetTestBase):
# Test that SqlDataset can read from a database table.
def testReadResultSet(self):
init_op, get_next = self._createSqlDataset((dtypes.string, dtypes.string,
dtypes.string), 2)
with self.test_session() as sess:
for _ in range(2): # Run twice to verify statelessness of db operations.
sess.run(
init_op,
feed_dict={
self.query: "SELECT first_name, last_name, motto FROM students "
"ORDER BY first_name DESC"
})
for _ in range(2): # Dataset is repeated. See setUp.
self.assertEqual((b"John", b"Doe", b"Hi!"), sess.run(get_next))
self.assertEqual((b"Jane", b"Moe", b"Hi again!"), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test that SqlDataset works on a join query.
def testReadResultSetJoinQuery(self):
init_op, get_next = self._createSqlDataset((dtypes.string, dtypes.string,
dtypes.string))
with self.test_session() as sess:
sess.run(
init_op,
feed_dict={
self.query:
"SELECT students.first_name, state, motto FROM students "
"INNER JOIN people "
"ON students.first_name = people.first_name "
"AND students.last_name = people.last_name"
})
self.assertEqual((b"John", b"California", b"Hi!"), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test that SqlDataset can read a database entry with a null-terminator
# in the middle of the text and place the entry in a `string` tensor.
def testReadResultSetNullTerminator(self):
init_op, get_next = self._createSqlDataset((dtypes.string, dtypes.string,
dtypes.string))
with self.test_session() as sess:
sess.run(
init_op,
feed_dict={
self.query:
"SELECT first_name, last_name, favorite_nonsense_word "
"FROM students ORDER BY first_name DESC"
})
self.assertEqual((b"John", b"Doe", b"n\0nsense"), sess.run(get_next))
self.assertEqual((b"Jane", b"Moe", b"nonsense\0"), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test that SqlDataset works when used on two different queries.
# Because the output types of the dataset must be determined at graph-creation
# time, the two queries must have the same number and types of columns.
def testReadResultSetReuseSqlDataset(self):
init_op, get_next = self._createSqlDataset((dtypes.string, dtypes.string,
dtypes.string))
with self.test_session() as sess:
sess.run(
init_op,
feed_dict={
self.query: "SELECT first_name, last_name, motto FROM students "
"ORDER BY first_name DESC"
})
self.assertEqual((b"John", b"Doe", b"Hi!"), sess.run(get_next))
self.assertEqual((b"Jane", b"Moe", b"Hi again!"), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
sess.run(
init_op,
feed_dict={
self.query: "SELECT first_name, last_name, state FROM people "
"ORDER BY first_name DESC"
})
self.assertEqual((b"John", b"Doe", b"California"), sess.run(get_next))
self.assertEqual((b"Benjamin", b"Franklin", b"Pennsylvania"),
sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test that an `OutOfRangeError` is raised on the first call to
# `get_next_str_only` if result set is empty.
def testReadEmptyResultSet(self):
init_op, get_next = self._createSqlDataset((dtypes.string, dtypes.string,
dtypes.string))
with self.test_session() as sess:
sess.run(
init_op,
feed_dict={
self.query: "SELECT first_name, last_name, motto FROM students "
"WHERE first_name = 'Nonexistent'"
})
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test that an error is raised when `driver_name` is invalid.
def testReadResultSetWithInvalidDriverName(self):
init_op = self._createSqlDataset((dtypes.string, dtypes.string,
dtypes.string))[0]
with self.test_session() as sess:
with self.assertRaises(errors.InvalidArgumentError):
sess.run(
init_op,
feed_dict={
self.driver_name: "sqlfake",
self.query: "SELECT first_name, last_name, motto FROM students "
"ORDER BY first_name DESC"
})
# Test that an error is raised when a column name in `query` is nonexistent
def testReadResultSetWithInvalidColumnName(self):
init_op, get_next = self._createSqlDataset((dtypes.string, dtypes.string,
dtypes.string))
with self.test_session() as sess:
sess.run(
init_op,
feed_dict={
self.query:
"SELECT first_name, last_name, fake_column FROM students "
"ORDER BY first_name DESC"
})
with self.assertRaises(errors.UnknownError):
sess.run(get_next)
# Test that an error is raised when there is a syntax error in `query`.
def testReadResultSetOfQueryWithSyntaxError(self):
init_op, get_next = self._createSqlDataset((dtypes.string, dtypes.string,
dtypes.string))
with self.test_session() as sess:
sess.run(
init_op,
feed_dict={
self.query:
"SELEmispellECT first_name, last_name, motto FROM students "
"ORDER BY first_name DESC"
})
with self.assertRaises(errors.UnknownError):
sess.run(get_next)
# Test that an error is raised when the number of columns in `query`
# does not match the length of `output_types`.
def testReadResultSetWithMismatchBetweenColumnsAndOutputTypes(self):
init_op, get_next = self._createSqlDataset((dtypes.string, dtypes.string,
dtypes.string))
with self.test_session() as sess:
sess.run(
init_op,
feed_dict={
self.query: "SELECT first_name, last_name FROM students "
"ORDER BY first_name DESC"
})
with self.assertRaises(errors.InvalidArgumentError):
sess.run(get_next)
# Test that no results are returned when `query` is an insert query rather
# than a select query. In particular, the error refers to the number of
# output types passed to the op not matching the number of columns in the
# result set of the query (namely, 0 for an insert statement.)
def testReadResultSetOfInsertQuery(self):
init_op, get_next = self._createSqlDataset((dtypes.string, dtypes.string,
dtypes.string))
with self.test_session() as sess:
sess.run(
init_op,
feed_dict={
self.query:
"INSERT INTO students (first_name, last_name, motto) "
"VALUES ('Foo', 'Bar', 'Baz'), ('Fizz', 'Buzz', 'Fizzbuzz')"
})
with self.assertRaises(errors.InvalidArgumentError):
sess.run(get_next)
# Test that `SqlDataset` can read an integer from a SQLite database table and
# place it in an `int8` tensor.
def testReadResultSetInt8(self):
init_op, get_next = self._createSqlDataset((dtypes.string, dtypes.int8))
with self.test_session() as sess:
sess.run(
init_op,
feed_dict={
self.query: "SELECT first_name, desk_number FROM students "
"ORDER BY first_name DESC"
})
self.assertEqual((b"John", 9), sess.run(get_next))
self.assertEqual((b"Jane", 127), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test that `SqlDataset` can read a negative or 0-valued integer from a
# SQLite database table and place it in an `int8` tensor.
def testReadResultSetInt8NegativeAndZero(self):
init_op, get_next = self._createSqlDataset((dtypes.string, dtypes.int8,
dtypes.int8))
with self.test_session() as sess:
sess.run(
init_op,
feed_dict={
self.query: "SELECT first_name, income, favorite_negative_number "
"FROM students "
"WHERE first_name = 'John' ORDER BY first_name DESC"
})
self.assertEqual((b"John", 0, -2), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test that `SqlDataset` can read a large (positive or negative) integer from
# a SQLite database table and place it in an `int8` tensor.
def testReadResultSetInt8MaxValues(self):
init_op, get_next = self._createSqlDataset((dtypes.int8, dtypes.int8))
with self.test_session() as sess:
sess.run(
init_op,
feed_dict={
self.query:
"SELECT desk_number, favorite_negative_number FROM students "
"ORDER BY first_name DESC"
})
self.assertEqual((9, -2), sess.run(get_next))
# Max and min values of int8
self.assertEqual((127, -128), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test that `SqlDataset` can read an integer from a SQLite database table and
# place it in an `int16` tensor.
def testReadResultSetInt16(self):
init_op, get_next = self._createSqlDataset((dtypes.string, dtypes.int16))
with self.test_session() as sess:
sess.run(
init_op,
feed_dict={
self.query: "SELECT first_name, desk_number FROM students "
"ORDER BY first_name DESC"
})
self.assertEqual((b"John", 9), sess.run(get_next))
self.assertEqual((b"Jane", 127), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test that `SqlDataset` can read a negative or 0-valued integer from a
# SQLite database table and place it in an `int16` tensor.
def testReadResultSetInt16NegativeAndZero(self):
init_op, get_next = self._createSqlDataset((dtypes.string, dtypes.int16,
dtypes.int16))
with self.test_session() as sess:
sess.run(
init_op,
feed_dict={
self.query: "SELECT first_name, income, favorite_negative_number "
"FROM students "
"WHERE first_name = 'John' ORDER BY first_name DESC"
})
self.assertEqual((b"John", 0, -2), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test that `SqlDataset` can read a large (positive or negative) integer from
# a SQLite database table and place it in an `int16` tensor.
def testReadResultSetInt16MaxValues(self):
init_op, get_next = self._createSqlDataset((dtypes.string, dtypes.int16))
with self.test_session() as sess:
sess.run(
init_op,
feed_dict={
self.query: "SELECT first_name, favorite_medium_sized_number "
"FROM students ORDER BY first_name DESC"
})
# Max value of int16
self.assertEqual((b"John", 32767), sess.run(get_next))
# Min value of int16
self.assertEqual((b"Jane", -32768), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test that `SqlDataset` can read an integer from a SQLite database table and
# place it in an `int32` tensor.
def testReadResultSetInt32(self):
init_op, get_next = self._createSqlDataset((dtypes.string, dtypes.int32))
with self.test_session() as sess:
sess.run(
init_op,
feed_dict={
self.query: "SELECT first_name, desk_number FROM students "
"ORDER BY first_name DESC"
})
self.assertEqual((b"John", 9), sess.run(get_next))
self.assertEqual((b"Jane", 127), sess.run(get_next))
# Test that `SqlDataset` can read a negative or 0-valued integer from a
# SQLite database table and place it in an `int32` tensor.
def testReadResultSetInt32NegativeAndZero(self):
init_op, get_next = self._createSqlDataset((dtypes.string, dtypes.int32))
with self.test_session() as sess:
sess.run(
init_op,
feed_dict={
self.query: "SELECT first_name, income FROM students "
"ORDER BY first_name DESC"
})
self.assertEqual((b"John", 0), sess.run(get_next))
self.assertEqual((b"Jane", -20000), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test that `SqlDataset` can read a large (positive or negative) integer from
# a SQLite database table and place it in an `int32` tensor.
def testReadResultSetInt32MaxValues(self):
init_op, get_next = self._createSqlDataset((dtypes.string, dtypes.int32))
with self.test_session() as sess:
sess.run(
init_op,
feed_dict={
self.query: "SELECT first_name, favorite_number FROM students "
"ORDER BY first_name DESC"
})
# Max value of int32
self.assertEqual((b"John", 2147483647), sess.run(get_next))
# Min value of int32
self.assertEqual((b"Jane", -2147483648), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test that `SqlDataset` can read a numeric `varchar` from a SQLite database
# table and place it in an `int32` tensor.
def testReadResultSetInt32VarCharColumnAsInt(self):
init_op, get_next = self._createSqlDataset((dtypes.string, dtypes.int32))
with self.test_session() as sess:
sess.run(
init_op,
feed_dict={
self.query: "SELECT first_name, school_id FROM students "
"ORDER BY first_name DESC"
})
self.assertEqual((b"John", 123), sess.run(get_next))
self.assertEqual((b"Jane", 1000), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test that `SqlDataset` can read an integer from a SQLite database table
# and place it in an `int64` tensor.
def testReadResultSetInt64(self):
init_op, get_next = self._createSqlDataset((dtypes.string, dtypes.int64))
with self.test_session() as sess:
sess.run(
init_op,
feed_dict={
self.query: "SELECT first_name, desk_number FROM students "
"ORDER BY first_name DESC"
})
self.assertEqual((b"John", 9), sess.run(get_next))
self.assertEqual((b"Jane", 127), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test that `SqlDataset` can read a negative or 0-valued integer from a
# SQLite database table and place it in an `int64` tensor.
def testReadResultSetInt64NegativeAndZero(self):
init_op, get_next = self._createSqlDataset((dtypes.string, dtypes.int64))
with self.test_session() as sess:
sess.run(
init_op,
feed_dict={
self.query: "SELECT first_name, income FROM students "
"ORDER BY first_name DESC"
})
self.assertEqual((b"John", 0), sess.run(get_next))
self.assertEqual((b"Jane", -20000), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test that `SqlDataset` can read a large (positive or negative) integer from
# a SQLite database table and place it in an `int64` tensor.
def testReadResultSetInt64MaxValues(self):
init_op, get_next = self._createSqlDataset((dtypes.string, dtypes.int64))
with self.test_session() as sess:
sess.run(
init_op,
feed_dict={
self.query:
"SELECT first_name, favorite_big_number FROM students "
"ORDER BY first_name DESC"
})
# Max value of int64
self.assertEqual((b"John", 9223372036854775807), sess.run(get_next))
# Min value of int64
self.assertEqual((b"Jane", -9223372036854775808), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test that `SqlDataset` can read an integer from a SQLite database table and
# place it in a `uint8` tensor.
def testReadResultSetUInt8(self):
init_op, get_next = self._createSqlDataset((dtypes.string, dtypes.uint8))
with self.test_session() as sess:
sess.run(
init_op,
feed_dict={
self.query: "SELECT first_name, desk_number FROM students "
"ORDER BY first_name DESC"
})
self.assertEqual((b"John", 9), sess.run(get_next))
self.assertEqual((b"Jane", 127), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test that `SqlDataset` can read the minimum and maximum uint8 values from a
# SQLite database table and place them in `uint8` tensors.
def testReadResultSetUInt8MinAndMaxValues(self):
init_op, get_next = self._createSqlDataset((dtypes.string, dtypes.uint8))
with self.test_session() as sess:
sess.run(
init_op,
feed_dict={
self.query: "SELECT first_name, brownie_points FROM students "
"ORDER BY first_name DESC"
})
# Min value of uint8
self.assertEqual((b"John", 0), sess.run(get_next))
# Max value of uint8
self.assertEqual((b"Jane", 255), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test that `SqlDataset` can read an integer from a SQLite database table
# and place it in a `uint16` tensor.
def testReadResultSetUInt16(self):
init_op, get_next = self._createSqlDataset((dtypes.string, dtypes.uint16))
with self.test_session() as sess:
sess.run(
init_op,
feed_dict={
self.query: "SELECT first_name, desk_number FROM students "
"ORDER BY first_name DESC"
})
self.assertEqual((b"John", 9), sess.run(get_next))
self.assertEqual((b"Jane", 127), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test that `SqlDataset` can read the minimum and maximum uint16 values from a
# SQLite database table and place them in `uint16` tensors.
def testReadResultSetUInt16MinAndMaxValues(self):
init_op, get_next = self._createSqlDataset((dtypes.string, dtypes.uint16))
with self.test_session() as sess:
sess.run(
init_op,
feed_dict={
self.query: "SELECT first_name, account_balance FROM students "
"ORDER BY first_name DESC"
})
# Min value of uint16
self.assertEqual((b"John", 0), sess.run(get_next))
# Max value of uint16
self.assertEqual((b"Jane", 65535), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test that `SqlDataset` can read a 0-valued and 1-valued integer from a
# SQLite database table and place them as `True` and `False` respectively
# in `bool` tensors.
def testReadResultSetBool(self):
init_op, get_next = self._createSqlDataset((dtypes.string, dtypes.bool))
with self.test_session() as sess:
sess.run(
init_op,
feed_dict={
self.query:
"SELECT first_name, registration_complete FROM students "
"ORDER BY first_name DESC"
})
self.assertEqual((b"John", True), sess.run(get_next))
self.assertEqual((b"Jane", False), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test that `SqlDataset` can read an integer that is not 0-valued or 1-valued
# from a SQLite database table and place it as `True` in a `bool` tensor.
def testReadResultSetBoolNotZeroOrOne(self):
init_op, get_next = self._createSqlDataset((dtypes.string, dtypes.bool))
with self.test_session() as sess:
sess.run(
init_op,
feed_dict={
self.query: "SELECT first_name, favorite_medium_sized_number "
"FROM students ORDER BY first_name DESC"
})
self.assertEqual((b"John", True), sess.run(get_next))
self.assertEqual((b"Jane", True), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test that `SqlDataset` can read a float from a SQLite database table
# and place it in a `float64` tensor.
def testReadResultSetFloat64(self):
init_op, get_next = self._createSqlDataset((dtypes.string, dtypes.string,
dtypes.float64))
with self.test_session() as sess:
sess.run(
init_op,
feed_dict={
self.query:
"SELECT first_name, last_name, victories FROM townspeople "
"ORDER BY first_name"
})
self.assertEqual((b"George", b"Washington", 20.0), sess.run(get_next))
self.assertEqual((b"John", b"Adams", -19.95), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test that `SqlDataset` can read a float from a SQLite database table beyond
# the precision of 64-bit IEEE, without throwing an error. Test that
# `SqlDataset` identifies such a value as equal to itself.
def testReadResultSetFloat64OverlyPrecise(self):
init_op, get_next = self._createSqlDataset((dtypes.string, dtypes.string,
dtypes.float64))
with self.test_session() as sess:
sess.run(
init_op,
feed_dict={
self.query:
"SELECT first_name, last_name, accolades FROM townspeople "
"ORDER BY first_name"
})
self.assertEqual(
(b"George", b"Washington",
1331241.321342132321324589798264627463827647382647382643874),
sess.run(get_next))
self.assertEqual(
(b"John", b"Adams",
1331241321342132321324589798264627463827647382647382643874.0),
sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test that `SqlDataset` can read a float from a SQLite database table,
# representing the largest integer representable as a 64-bit IEEE float
# such that the previous integer is also representable as a 64-bit IEEE float.
# Test that `SqlDataset` can distinguish these two numbers.
def testReadResultSetFloat64LargestConsecutiveWholeNumbersNotEqual(self):
init_op, get_next = self._createSqlDataset((dtypes.string, dtypes.string,
dtypes.float64))
with self.test_session() as sess:
sess.run(
init_op,
feed_dict={
self.query:
"SELECT first_name, last_name, triumphs FROM townspeople "
"ORDER BY first_name"
})
self.assertNotEqual((b"George", b"Washington", 9007199254740992.0),
sess.run(get_next))
self.assertNotEqual((b"John", b"Adams", 9007199254740991.0),
sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
class SqlDatasetSerializationTest(
SqlDatasetTestBase,
dataset_serialization_test_base.DatasetSerializationTestBase):
def _build_dataset(self, num_repeats):
data_source_name = os.path.join(test.get_temp_dir(), "tftest.sqlite")
driver_name = array_ops.placeholder_with_default(
array_ops.constant("sqlite", dtypes.string), shape=[])
query = ("SELECT first_name, last_name, motto FROM students ORDER BY "
"first_name DESC")
output_types = (dtypes.string, dtypes.string, dtypes.string)
return readers.SqlDataset(driver_name, data_source_name, query,
output_types).repeat(num_repeats)
def testSQLSaveable(self):
num_repeats = 4
num_outputs = num_repeats * 2
self.run_core_tests(lambda: self._build_dataset(num_repeats),
lambda: self._build_dataset(num_repeats // 2),
num_outputs)
if __name__ == "__main__":
test.main()
| |
from abc import ABCMeta, abstractmethod
from .exceptions import PageNotFound, ServerError
import datetime
import http.client
import json
import random
import string
import logging
import re
import urllib.request, urllib.parse, urllib.error
import xml.dom.minidom as xml
log = logging.getLogger('ehb_datasources')
class Driver(object, metaclass=ABCMeta):
'''
Abstract electronic honest broker (ehb) datasource driver class
'''
def __init__(self, url, username, password, secure):
self.url = url
self.username = username
self.password = password
self.secure = secure
@abstractmethod
def get(self, record_id=None, *args, **kwargs):
'''
Should enable getting one or more records. Should return None if no
records are found.
If record_id is supplied (other driver dependent options may allow
calls without this parameter) and no record exists, this method should
raise RecordDoesNotExist
'''
pass
@abstractmethod
def delete(self, *args, **kwargs):
'''
Should enable deleting one or more records
'''
pass
@abstractmethod
def create(self, record_id_prefix, record_id_validator, *args, **kwargs):
'''
Should enable creating a new record
Inputs:
* record_id_prefix = a prefix to prepend to the record_id
(particularly intended for identifying a record as belonging to
a group)
* record_id_validator is function that accepts two positional
arguments, the first is the new record id and the second is a
boolean indicating if the external system path should be
included in the compared record set.
Checks if the newly produced record id is valid WRT to the eHB.
It will return 0 for valid and an integer > 0 corresponding to an
error code if not valid
Output:
Primary id of the newly created record or RecordCreationError
'''
pass
@abstractmethod
def update(self, *args, **kwargs):
'''
Should enable updating one or more records
'''
pass
@abstractmethod
def configure(self, driver_configuration='', *args, **kwargs):
'''
Perform any necessary driver configuration actions.
`driver_configuration` is a string representation of
driver_configuration values. The expected format is driver dependent
'''
pass
@abstractmethod
def meta(self, *args, **kwargs):
'''
Should enable obtaining meta data for the underlying data store if
appropriate
'''
pass
@abstractmethod
def subRecordSelectionForm(self, form_url='', *args, **kwargs):
'''
Should return a string representation of an html form to be used to
select additional input data forms for a specific record. If there is
only a top level form this method can just return the single form.
`form_url` is the base url for this form and should be used for setting
links to sub-forms.
'''
pass
@abstractmethod
def subRecordForm(self, external_record, form_spec='', *args, **kwargs):
'''
Should return a string representation of an html form to be used as
data entry for a specific record (or portion of the record).
`external_record` is an appropriate representation of the ehb-service
externalRecord class, e.g
ehb-client.external_record_request_handler.ExternalRecord
`form_spec` is a string representing any required additional form
specification information, this is driver dependent.
'''
pass
@abstractmethod
def processForm(self, request, external_record, form_spec='', *args,
**kwargs):
'''
Given the HTTP request, which has the raw data, process the data.
Inputs:
* request: the HTTP request object
* external_record: an appropriate representation of the ehb-service
externalRecord class, e.g
ehb-client.external_record_request_handler.ExternalRecord
`form_spec` is a string representing any required additional form
specification information, this is driver dependent.
OUTPUT:
* List of error messages, None if successful
'''
pass
def create_random_record_id(self, size=9,
chars=string.ascii_uppercase + string.digits,
validator_func=None, max_attempts=10):
'''
Attempts to create a new random record id. If supplied it will use the
validator_func to verify that the random id does not already exist.
If it does, it will try a new random id up to max_attempts number of
times.
INPUTS:
* size: number of characters to include in id
* chars: valid character choices
* validator_func: function that accepts a string id and returns
True if the id is acceptable as a new record id
False otherwise
* max_attempts: the number of new id attempts to make
'''
if validator_func:
attempt_count = 0
while attempt_count < max_attempts:
pid = ''.join(random.choice(chars) for idx in range(size))
if validator_func(pid):
return pid
else:
attempt_count += 1
else:
return ''.join(random.choice(chars) for idx in range(size))
def new_record_form_required(self):
'''
Returns boolean indicating if the user is required to complete a form
in order to create a new record.
'''
return False
def create_new_record_form(self, request, *args, **kwargs):
'''
Should generate a string representation of an html form for creating a
new record or None if no information is needed from the user to
generate a new record. The request is supplied so that if the form had
been submitted previously with errors it can be recreated with the
appropriate data.
'''
None
def process_new_record_form(self, request, record_id_prefix,
record_id_validator, *args, **kwargs):
'''
Should process data in new record form and return the new record id
from the external system.
Inputs:
* request: Django HTTP request object
* record_id_prefix: a prefix to prepend to the record_id.
(particularly intended for identifying a record as belonging to
a group)
* record_id_validator: function that accepts two positional
arguments, the first is the new record id and the second is a
boolean indicating if the external system path should be
included in the compared record set.
Checks if the newly produced record id is valid WRT to the eHB.
It will return 0 for valid and an integer > 0 corresponding to
an error code if not valid
'''
None
class RequestHandler(object):
'''
The Request Handler object is designed to allow making multiple requests
from a fixed host
'''
def __init__(self, host, secure=False):
self.host = host
self.secure = secure
self.lastrequestbody = ''
self.currentConnection = None
FORMAT_JSON = 'json'
FORMAT_XML = 'xml'
FORMAT_CSV = 'csv'
def sendRequest(self, verb, path='', headers='', body=''):
self.closeConnection()
self.lastrequestbody = body
if(self.secure):
c = http.client.HTTPSConnection(self.host)
else:
c = http.client.HTTPConnection(self.host)
ts = datetime.datetime.now()
c.request(verb, path, body, headers)
log.debug(
"datasource request ({0}) {1}ms".format(
path,
(datetime.datetime.now() - ts).microseconds/1000)
)
r = c.getresponse()
return r
def POST(self, path='', headers='', body=''):
self.lastrequestbody = body
return self.sendRequest('POST', path, headers, body)
def GET(self, path='', headers='', body=''):
self.lastrequestbody = body
return self.sendRequest('GET', path, headers, body)
def PUT(self, path='', headers='', body=''):
self.lastrequestbody = body
return self.sendRequest('PUT', path, headers, body)
def closeConnection(self):
if self.currentConnection:
self.currentConnection.close()
def processResponse(self, response, path=''):
status = response.status
if status == 200:
return self.readAndClose(response)
elif status == 201:
return self.readAndClose(response)
elif status == 400:
msg = 'Bad Request: {}'.format(response.read())
self.closeConnection()
raise Exception(msg)
elif status == 406:
msg = "The data being imported was formatted incorrectly"
self.closeConnection()
raise Exception(msg)
elif status == 404:
self.closeConnection()
raise PageNotFound(path=path)
elif status == 500:
self.closeConnection()
raise ServerError
else:
self.closeConnection()
msg = 'Unknown response code from server: {}'.format(status)
raise Exception(msg)
def readAndClose(self, response):
rd = response.read()
self.closeConnection()
return rd
def raw_to_json(self, raw_string):
def controls_repl(matchobj):
if matchobj.group(1) == '\r':
return '\n'
else:
return matchobj.group(1)
def non_controls_repl(matchobj):
return matchobj.group(1)
try:
return json.loads(raw_string.decode('utf-8', 'backslashreplace'))
except:
raise
return json.loads(raw_string.decode('unicode-escape'))
else:
raise
def transformResponse(self, _format, responseString):
try:
if _format == self.FORMAT_JSON:
return self.raw_to_json(responseString)
if _format == self.FORMAT_XML:
return xml.parseString(responseString)
return responseString
except Exception:
# TODO: Pass up some informative error
raise
pass
def extract_data_from_post_request(self, request):
data = {} # this will hold the data submitted in the form
post_data = request._post
if post_data:
for k, v in list(post_data.items()):
data[k] = v
return data
| |
from __future__ import absolute_import
# Django settings for zulip project.
########################################################################
# Here's how settings for the Zulip project work:
#
# * settings.py contains non-site-specific and settings configuration
# for the Zulip Django app.
# * settings.py imports prod_settings.py, and any site-specific configuration
# belongs there. The template for prod_settings.py is prod_settings_template.py
#
# See http://zulip.readthedocs.io/en/latest/settings.html for more information
#
########################################################################
import os
import platform
import time
import sys
import six.moves.configparser
from zerver.lib.db import TimeTrackingConnection
import zerver.lib.logging_util
import six
########################################################################
# INITIAL SETTINGS
########################################################################
DEPLOY_ROOT = os.path.join(os.path.realpath(os.path.dirname(__file__)), '..')
config_file = six.moves.configparser.RawConfigParser()
config_file.read("/etc/zulip/zulip.conf")
# Whether this instance of Zulip is running in a production environment.
PRODUCTION = config_file.has_option('machine', 'deploy_type')
DEVELOPMENT = not PRODUCTION
secrets_file = six.moves.configparser.RawConfigParser()
if PRODUCTION:
secrets_file.read("/etc/zulip/zulip-secrets.conf")
else:
secrets_file.read(os.path.join(DEPLOY_ROOT, "zproject/dev-secrets.conf"))
def get_secret(key):
if secrets_file.has_option('secrets', key):
return secrets_file.get('secrets', key)
return None
# Make this unique, and don't share it with anybody.
SECRET_KEY = get_secret("secret_key")
# A shared secret, used to authenticate different parts of the app to each other.
SHARED_SECRET = get_secret("shared_secret")
# We use this salt to hash a user's email into a filename for their user-uploaded
# avatar. If this salt is discovered, attackers will only be able to determine
# that the owner of an email account has uploaded an avatar to Zulip, which isn't
# the end of the world. Don't use the salt where there is more security exposure.
AVATAR_SALT = get_secret("avatar_salt")
# SERVER_GENERATION is used to track whether the server has been
# restarted for triggering browser clients to reload.
SERVER_GENERATION = int(time.time())
if 'DEBUG' not in globals():
# Uncomment end of next line to test JS/CSS minification.
DEBUG = DEVELOPMENT # and platform.node() != 'your-machine'
if DEBUG:
INTERNAL_IPS = ('127.0.0.1',)
# Detect whether we're running as a queue worker; this impacts the logging configuration.
if len(sys.argv) > 2 and sys.argv[0].endswith('manage.py') and sys.argv[1] == 'process_queue':
IS_WORKER = True
else:
IS_WORKER = False
# This is overridden in test_settings.py for the test suites
TEST_SUITE = False
# The new user tutorial is enabled by default, but disabled for client tests.
TUTORIAL_ENABLED = True
# This is overridden in test_settings.py for the test suites
CASPER_TESTS = False
# Import variables like secrets from the prod_settings file
# Import prod_settings after determining the deployment/machine type
if PRODUCTION:
from .prod_settings import *
else:
from .dev_settings import *
########################################################################
# DEFAULT VALUES FOR SETTINGS
########################################################################
# For any settings that are not defined in prod_settings.py,
# we want to initialize them to sane default
DEFAULT_SETTINGS = {'TWITTER_CONSUMER_KEY': '',
'TWITTER_CONSUMER_SECRET': '',
'TWITTER_ACCESS_TOKEN_KEY': '',
'TWITTER_ACCESS_TOKEN_SECRET': '',
'EMAIL_CHANGE_CONFIRMATION_DAYS': 1,
'EMAIL_GATEWAY_PATTERN': '',
'EMAIL_GATEWAY_EXAMPLE': '',
'EMAIL_GATEWAY_BOT': None,
'EMAIL_GATEWAY_LOGIN': None,
'EMAIL_GATEWAY_PASSWORD': None,
'EMAIL_GATEWAY_IMAP_SERVER': None,
'EMAIL_GATEWAY_IMAP_PORT': None,
'EMAIL_GATEWAY_IMAP_FOLDER': None,
'EMAIL_GATEWAY_EXTRA_PATTERN_HACK': None,
'EMAIL_HOST': None,
'EMAIL_BACKEND': None,
'S3_KEY': '',
'S3_SECRET_KEY': '',
'S3_AVATAR_BUCKET': '',
'LOCAL_UPLOADS_DIR': None,
'MAX_FILE_UPLOAD_SIZE': 25,
'MAX_AVATAR_FILE_SIZE': 5,
'MAX_ICON_FILE_SIZE': 5,
'ERROR_REPORTING': True,
'BROWSER_ERROR_REPORTING': False,
'STAGING_ERROR_NOTIFICATIONS': False,
'EVENT_LOGS_ENABLED': False,
'SAVE_FRONTEND_STACKTRACES': False,
'JWT_AUTH_KEYS': {},
'NAME_CHANGES_DISABLED': False,
'DEPLOYMENT_ROLE_NAME': "",
'RABBITMQ_HOST': 'localhost',
'RABBITMQ_USERNAME': 'zulip',
'MEMCACHED_LOCATION': '127.0.0.1:11211',
'RATE_LIMITING': True,
'REDIS_HOST': '127.0.0.1',
'REDIS_PORT': 6379,
# The following bots only exist in non-VOYAGER installs
'ERROR_BOT': None,
'NEW_USER_BOT': None,
'NAGIOS_STAGING_SEND_BOT': None,
'NAGIOS_STAGING_RECEIVE_BOT': None,
'APNS_CERT_FILE': None,
'APNS_KEY_FILE': None,
'APNS_SANDBOX': True,
'ANDROID_GCM_API_KEY': None,
'INITIAL_PASSWORD_SALT': None,
'FEEDBACK_BOT': 'feedback@zulip.com',
'FEEDBACK_BOT_NAME': 'Zulip Feedback Bot',
'ADMINS': '',
'SHARE_THE_LOVE': False,
'INLINE_IMAGE_PREVIEW': True,
'INLINE_URL_EMBED_PREVIEW': False,
'CAMO_URI': '',
'ENABLE_FEEDBACK': PRODUCTION,
'SEND_MISSED_MESSAGE_EMAILS_AS_USER': False,
'SERVER_EMAIL': None,
'FEEDBACK_EMAIL': None,
'FEEDBACK_STREAM': None,
'WELCOME_EMAIL_SENDER': None,
'EMAIL_DELIVERER_DISABLED': False,
'ENABLE_GRAVATAR': True,
'DEFAULT_AVATAR_URI': '/static/images/default-avatar.png',
'AUTH_LDAP_SERVER_URI': "",
'EXTERNAL_URI_SCHEME': "https://",
'ZULIP_COM': False,
'SHOW_OSS_ANNOUNCEMENT': False,
'REGISTER_LINK_DISABLED': False,
'LOGIN_LINK_DISABLED': False,
'ABOUT_LINK_DISABLED': False,
'FIND_TEAM_LINK_DISABLED': True,
'CUSTOM_LOGO_URL': None,
'VERBOSE_SUPPORT_OFFERS': False,
'STATSD_HOST': '',
'OPEN_REALM_CREATION': False,
'REALMS_HAVE_SUBDOMAINS': False,
'SUBDOMAINS_HOMEPAGE': False,
'ROOT_SUBDOMAIN_ALIASES': ["www"],
'REMOTE_POSTGRES_HOST': '',
'REMOTE_POSTGRES_SSLMODE': '',
# Default GOOGLE_CLIENT_ID to the value needed for Android auth to work
'GOOGLE_CLIENT_ID': '835904834568-77mtr5mtmpgspj9b051del9i9r5t4g4n.apps.googleusercontent.com',
'SOCIAL_AUTH_GITHUB_KEY': None,
'SOCIAL_AUTH_GITHUB_ORG_NAME': None,
'SOCIAL_AUTH_GITHUB_TEAM_ID': None,
'SOCIAL_AUTH_FIELDS_STORED_IN_SESSION': ['subdomain'],
'DBX_APNS_CERT_FILE': None,
'DBX_APNS_KEY_FILE': None,
'PERSONAL_ZMIRROR_SERVER': None,
# Structurally, we will probably eventually merge
# analytics into part of the main server, rather
# than a separate app.
'EXTRA_INSTALLED_APPS': ['analytics'],
'DEFAULT_NEW_REALM_STREAMS': {
"social": {"description": "For socializing", "invite_only": False},
"general": {"description": "For general stuff", "invite_only": False},
"zulip": {"description": "For zulip stuff", "invite_only": False}
},
'REALM_CREATION_LINK_VALIDITY_DAYS': 7,
'TERMS_OF_SERVICE': None,
'TOS_VERSION': None,
'SYSTEM_ONLY_REALMS': {"zulip"},
'FIRST_TIME_TOS_TEMPLATE': None,
'USING_PGROONGA': False,
'POST_MIGRATION_CACHE_FLUSHING': False,
'ENABLE_FILE_LINKS': False,
'USE_WEBSOCKETS': True,
'ANALYTICS_LOCK_DIR': "/home/zulip/deployments/analytics-lock-dir",
'PASSWORD_MIN_LENGTH': 6,
'PASSWORD_MIN_ZXCVBN_QUALITY': 0.5,
'OFFLINE_THRESHOLD_SECS': 5 * 60,
}
for setting_name, setting_val in six.iteritems(DEFAULT_SETTINGS):
if setting_name not in vars():
vars()[setting_name] = setting_val
# Extend ALLOWED_HOSTS with localhost (needed to RPC to Tornado).
ALLOWED_HOSTS += ['127.0.0.1', 'localhost']
# These are the settings that we will check that the user has filled in for
# production deployments before starting the app. It consists of a series
# of pairs of (setting name, default value that it must be changed from)
REQUIRED_SETTINGS = [("EXTERNAL_HOST", "zulip.example.com"),
("ZULIP_ADMINISTRATOR", "zulip-admin@example.com"),
# SECRET_KEY doesn't really need to be here, in
# that we set it automatically, but just in
# case, it seems worth having in this list
("SECRET_KEY", ""),
("AUTHENTICATION_BACKENDS", ()),
("NOREPLY_EMAIL_ADDRESS", "noreply@example.com"),
("DEFAULT_FROM_EMAIL", "Zulip <zulip@example.com>"),
]
if ADMINS == "":
ADMINS = (("Zulip Administrator", ZULIP_ADMINISTRATOR),)
MANAGERS = ADMINS
# Voyager is a production zulip server that is not zulip.com or
# staging.zulip.com VOYAGER is the standalone all-on-one-server
# production deployment model for based on the original Zulip
# ENTERPRISE implementation. We expect most users of the open source
# project will be using VOYAGER=True in production.
VOYAGER = PRODUCTION and not ZULIP_COM
########################################################################
# STANDARD DJANGO SETTINGS
########################################################################
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'UTC'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# The ID, as an integer, of the current site in the django_site database table.
# This is used so that application data can hook into specific site(s) and a
# single database can manage content for multiple sites.
#
# We set this site's domain to 'zulip.com' in populate_db.
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
DEPLOY_ROOT = os.path.join(os.path.realpath(os.path.dirname(__file__)), '..')
# this directory will be used to store logs for development environment
DEVELOPMENT_LOG_DIRECTORY = os.path.join(DEPLOY_ROOT, 'var', 'log')
# Make redirects work properly behind a reverse proxy
USE_X_FORWARDED_HOST = True
# List of callables that know how to import templates from various sources.
LOADERS = [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
]
if PRODUCTION:
# Template caching is a significant performance win in production.
LOADERS = [('django.template.loaders.cached.Loader', LOADERS)]
TEMPLATES = [
{
'BACKEND': 'zproject.jinja2.backends.Jinja2',
'DIRS': [
os.path.join(DEPLOY_ROOT, 'templates'),
os.path.join(DEPLOY_ROOT, 'zerver', 'webhooks'),
],
'APP_DIRS': True,
'OPTIONS': {
'debug': DEBUG,
'environment': 'zproject.jinja2.environment',
'extensions': [
'jinja2.ext.i18n',
'jinja2.ext.autoescape',
'pipeline.jinja2.PipelineExtension',
],
'context_processors': [
'zerver.context_processors.add_settings',
'zerver.context_processors.add_metrics',
'django.template.context_processors.i18n',
],
},
},
]
MIDDLEWARE_CLASSES = (
# Our logging middleware should be the first middleware item.
'zerver.middleware.TagRequests',
'zerver.middleware.LogRequests',
'zerver.middleware.JsonErrorHandler',
'zerver.middleware.RateLimitMiddleware',
'zerver.middleware.FlushDisplayRecipientCache',
'django.middleware.common.CommonMiddleware',
'zerver.middleware.SessionHostDomainMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
)
ANONYMOUS_USER_ID = None
AUTH_USER_MODEL = "zerver.UserProfile"
TEST_RUNNER = 'zerver.lib.test_runner.Runner'
ROOT_URLCONF = 'zproject.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'zproject.wsgi.application'
# A site can include additional installed apps via the
# EXTRA_INSTALLED_APPS setting
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.staticfiles',
'confirmation',
'guardian',
'pipeline',
'zerver',
'social_django',
]
if USING_PGROONGA:
INSTALLED_APPS += ['pgroonga']
INSTALLED_APPS += EXTRA_INSTALLED_APPS
ZILENCER_ENABLED = 'zilencer' in INSTALLED_APPS
# Base URL of the Tornado server
# We set it to None when running backend tests or populate_db.
# We override the port number when running frontend tests.
TORNADO_SERVER = 'http://127.0.0.1:9993'
RUNNING_INSIDE_TORNADO = False
########################################################################
# DATABASE CONFIGURATION
########################################################################
DATABASES = {"default": {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'zulip',
'USER': 'zulip',
'PASSWORD': '', # Authentication done via certificates
'HOST': '', # Host = '' => connect through a local socket
'SCHEMA': 'zulip',
'CONN_MAX_AGE': 600,
'OPTIONS': {
'connection_factory': TimeTrackingConnection
},
}}
if DEVELOPMENT:
LOCAL_DATABASE_PASSWORD = get_secret("local_database_password")
DATABASES["default"].update({
'PASSWORD': LOCAL_DATABASE_PASSWORD,
'HOST': 'localhost'
})
elif REMOTE_POSTGRES_HOST != '':
DATABASES['default'].update({
'HOST': REMOTE_POSTGRES_HOST,
})
if get_secret("postgres_password") is not None:
DATABASES['default'].update({
'PASSWORD': get_secret("postgres_password"),
})
if REMOTE_POSTGRES_SSLMODE != '':
DATABASES['default']['OPTIONS']['sslmode'] = REMOTE_POSTGRES_SSLMODE
else:
DATABASES['default']['OPTIONS']['sslmode'] = 'verify-full'
if USING_PGROONGA:
# We need to have "pgroonga" schema before "pg_catalog" schema in
# the PostgreSQL search path, because "pgroonga" schema overrides
# the "@@" operator from "pg_catalog" schema, and "pg_catalog"
# schema is searched first if not specified in the search path.
# See also: http://www.postgresql.org/docs/current/static/runtime-config-client.html
pg_options = '-c search_path=%(SCHEMA)s,zulip,public,pgroonga,pg_catalog' % \
DATABASES['default']
DATABASES['default']['OPTIONS']['options'] = pg_options
########################################################################
# RABBITMQ CONFIGURATION
########################################################################
USING_RABBITMQ = True
RABBITMQ_PASSWORD = get_secret("rabbitmq_password")
########################################################################
# CACHING CONFIGURATION
########################################################################
SESSION_ENGINE = "django.contrib.sessions.backends.cached_db"
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.PyLibMCCache',
'LOCATION': MEMCACHED_LOCATION,
'TIMEOUT': 3600,
'OPTIONS': {
'verify_keys': True,
'tcp_nodelay': True,
'retry_timeout': 1,
}
},
'database': {
'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
'LOCATION': 'third_party_api_results',
# Basically never timeout. Setting to 0 isn't guaranteed
# to work, see https://code.djangoproject.com/ticket/9595
'TIMEOUT': 2000000000,
'OPTIONS': {
'MAX_ENTRIES': 100000000,
'CULL_FREQUENCY': 10,
}
},
}
########################################################################
# REDIS-BASED RATE LIMITING CONFIGURATION
########################################################################
RATE_LIMITING_RULES = [
(60, 100), # 100 requests max every minute
]
DEBUG_RATE_LIMITING = DEBUG
REDIS_PASSWORD = get_secret('redis_password')
########################################################################
# SECURITY SETTINGS
########################################################################
# Tell the browser to never send our cookies without encryption, e.g.
# when executing the initial http -> https redirect.
#
# Turn it off for local testing because we don't have SSL.
if PRODUCTION:
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
try:
# For get_updates hostname sharding.
domain = config_file.get('django', 'cookie_domain')
SESSION_COOKIE_DOMAIN = '.' + domain
CSRF_COOKIE_DOMAIN = '.' + domain
except six.moves.configparser.Error:
# Failing here is OK
pass
# Prevent Javascript from reading the CSRF token from cookies. Our code gets
# the token from the DOM, which means malicious code could too. But hiding the
# cookie will slow down some attackers.
CSRF_COOKIE_PATH = '/;HttpOnly'
CSRF_FAILURE_VIEW = 'zerver.middleware.csrf_failure'
if DEVELOPMENT:
# Use fast password hashing for creating testing users when not
# PRODUCTION. Saves a bunch of time.
PASSWORD_HASHERS = (
'django.contrib.auth.hashers.SHA1PasswordHasher',
'django.contrib.auth.hashers.PBKDF2PasswordHasher'
)
# Also we auto-generate passwords for the default users which you
# can query using ./manage.py print_initial_password
INITIAL_PASSWORD_SALT = get_secret("initial_password_salt")
else:
# For production, use the best password hashing algorithm: Argon2
# Zulip was originally on PBKDF2 so we need it for compatibility
PASSWORD_HASHERS = ('django.contrib.auth.hashers.Argon2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2PasswordHasher')
########################################################################
# API/BOT SETTINGS
########################################################################
if "EXTERNAL_API_PATH" not in vars():
EXTERNAL_API_PATH = EXTERNAL_HOST + "/api"
EXTERNAL_API_URI = EXTERNAL_URI_SCHEME + EXTERNAL_API_PATH
SERVER_URI = EXTERNAL_URI_SCHEME + EXTERNAL_HOST
if "NAGIOS_BOT_HOST" not in vars():
NAGIOS_BOT_HOST = EXTERNAL_HOST
S3_KEY = get_secret("s3_key")
S3_SECRET_KEY = get_secret("s3_secret_key")
# GCM tokens are IP-whitelisted; if we deploy to additional
# servers you will need to explicitly add their IPs here:
# https://cloud.google.com/console/project/apps~zulip-android/apiui/credential
ANDROID_GCM_API_KEY = get_secret("android_gcm_api_key")
GOOGLE_OAUTH2_CLIENT_SECRET = get_secret('google_oauth2_client_secret')
DROPBOX_APP_KEY = get_secret("dropbox_app_key")
MAILCHIMP_API_KEY = get_secret("mailchimp_api_key")
# This comes from our mandrill accounts page
MANDRILL_API_KEY = get_secret("mandrill_api_key")
# Twitter API credentials
# Secrecy not required because its only used for R/O requests.
# Please don't make us go over our rate limit.
TWITTER_CONSUMER_KEY = get_secret("twitter_consumer_key")
TWITTER_CONSUMER_SECRET = get_secret("twitter_consumer_secret")
TWITTER_ACCESS_TOKEN_KEY = get_secret("twitter_access_token_key")
TWITTER_ACCESS_TOKEN_SECRET = get_secret("twitter_access_token_secret")
# These are the bots that Zulip sends automated messages as.
INTERNAL_BOTS = [{'var_name': 'NOTIFICATION_BOT',
'email_template': 'notification-bot@%s',
'name': 'Notification Bot'},
{'var_name': 'EMAIL_GATEWAY_BOT',
'email_template': 'emailgateway@%s',
'name': 'Email Gateway'},
{'var_name': 'NAGIOS_SEND_BOT',
'email_template': 'nagios-send-bot@%s',
'name': 'Nagios Send Bot'},
{'var_name': 'NAGIOS_RECEIVE_BOT',
'email_template': 'nagios-receive-bot@%s',
'name': 'Nagios Receive Bot'},
{'var_name': 'WELCOME_BOT',
'email_template': 'welcome-bot@%s',
'name': 'Welcome Bot'}]
if PRODUCTION:
INTERNAL_BOTS += [
{'var_name': 'NAGIOS_STAGING_SEND_BOT',
'email_template': 'nagios-staging-send-bot@%s',
'name': 'Nagios Staging Send Bot'},
{'var_name': 'NAGIOS_STAGING_RECEIVE_BOT',
'email_template': 'nagios-staging-receive-bot@%s',
'name': 'Nagios Staging Receive Bot'},
]
INTERNAL_BOT_DOMAIN = "zulip.com"
# Set the realm-specific bot names
for bot in INTERNAL_BOTS:
if vars().get(bot['var_name']) is None:
bot_email = bot['email_template'] % (INTERNAL_BOT_DOMAIN,)
vars()[bot['var_name']] = bot_email
if EMAIL_GATEWAY_PATTERN != "":
EMAIL_GATEWAY_EXAMPLE = EMAIL_GATEWAY_PATTERN % ("support+abcdefg",)
DEPLOYMENT_ROLE_KEY = get_secret("deployment_role_key")
########################################################################
# STATSD CONFIGURATION
########################################################################
# Statsd is not super well supported; if you want to use it you'll need
# to set STATSD_HOST and STATSD_PREFIX.
if STATSD_HOST != '':
INSTALLED_APPS += ['django_statsd']
STATSD_PORT = 8125
STATSD_CLIENT = 'django_statsd.clients.normal'
########################################################################
# CAMO HTTPS CACHE CONFIGURATION
########################################################################
if CAMO_URI != '':
# This needs to be synced with the Camo installation
CAMO_KEY = get_secret("camo_key")
########################################################################
# STATIC CONTENT AND MINIFICATION SETTINGS
########################################################################
STATIC_URL = '/static/'
# ZulipStorage is a modified version of PipelineCachedStorage,
# and, like that class, it inserts a file hash into filenames
# to prevent the browser from using stale files from cache.
#
# Unlike PipelineStorage, it requires the files to exist in
# STATIC_ROOT even for dev servers. So we only use
# ZulipStorage when not DEBUG.
# This is the default behavior from Pipeline, but we set it
# here so that urls.py can read it.
PIPELINE_ENABLED = not DEBUG
if DEBUG:
STATICFILES_STORAGE = 'pipeline.storage.PipelineStorage'
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'pipeline.finders.PipelineFinder',
)
if PIPELINE_ENABLED:
STATIC_ROOT = os.path.abspath('prod-static/serve')
else:
STATIC_ROOT = os.path.abspath('static/')
else:
STATICFILES_STORAGE = 'zerver.storage.ZulipStorage'
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'pipeline.finders.PipelineFinder',
)
if PRODUCTION:
STATIC_ROOT = '/home/zulip/prod-static'
else:
STATIC_ROOT = os.path.abspath('prod-static/serve')
# If changing this, you need to also the hack modifications to this in
# our compilemessages management command.
LOCALE_PATHS = (os.path.join(STATIC_ROOT, 'locale'),)
# We want all temporary uploaded files to be stored on disk.
FILE_UPLOAD_MAX_MEMORY_SIZE = 0
STATICFILES_DIRS = ['static/']
STATIC_HEADER_FILE = 'zerver/static_header.txt'
# To use minified files in dev, set PIPELINE_ENABLED = True. For the full
# cache-busting behavior, you must also set DEBUG = False.
#
# You will need to run update-prod-static after changing
# static files.
#
# Useful reading on how this works is in
# https://zulip.readthedocs.io/en/latest/front-end-build-process.html
PIPELINE = {
'PIPELINE_ENABLED': PIPELINE_ENABLED,
'CSS_COMPRESSOR': 'pipeline.compressors.yui.YUICompressor',
'YUI_BINARY': '/usr/bin/env yui-compressor',
'STYLESHEETS': {
# If you add a style here, please update stylesheets()
# in frontend_tests/zjsunit/output.js as needed.
'activity': {
'source_filenames': ('styles/activity.css',),
'output_filename': 'min/activity.css'
},
'stats': {
'source_filenames': ('styles/stats.css',),
'output_filename': 'min/stats.css'
},
'portico': {
'source_filenames': (
'third/zocial/zocial.css',
'styles/portico.css',
'styles/pygments.css',
'styles/thirdparty-fonts.css',
'styles/fonts.css',
),
'output_filename': 'min/portico.css'
},
'landing-page': {
'source_filenames': (
'styles/landing-page.css',
),
'output_filename': 'min/landing.css'
},
# Two versions of the app CSS exist because of QTBUG-3467
'app-fontcompat': {
'source_filenames': (
'third/bootstrap-notify/css/bootstrap-notify.css',
'third/spectrum/spectrum.css',
'styles/components.css',
'styles/zulip.css',
'styles/settings.css',
'styles/subscriptions.css',
'styles/drafts.css',
'styles/informational-overlays.css',
'styles/compose.css',
'styles/reactions.css',
'styles/left-sidebar.css',
'styles/right-sidebar.css',
'styles/overlay.css',
'styles/pygments.css',
'styles/thirdparty-fonts.css',
'styles/media.css',
# We don't want fonts.css on QtWebKit, so its omitted here
),
'output_filename': 'min/app-fontcompat.css'
},
'app': {
'source_filenames': (
'third/bootstrap-notify/css/bootstrap-notify.css',
'third/spectrum/spectrum.css',
'third/jquery-perfect-scrollbar/css/perfect-scrollbar.css',
'styles/components.css',
'styles/zulip.css',
'styles/settings.css',
'styles/subscriptions.css',
'styles/drafts.css',
'styles/informational-overlays.css',
'styles/compose.css',
'styles/reactions.css',
'styles/left-sidebar.css',
'styles/right-sidebar.css',
'styles/overlay.css',
'styles/pygments.css',
'styles/thirdparty-fonts.css',
'styles/fonts.css',
'styles/media.css',
),
'output_filename': 'min/app.css'
},
'common': {
'source_filenames': (
'third/bootstrap/css/bootstrap.css',
'third/bootstrap/css/bootstrap-btn.css',
'third/bootstrap/css/bootstrap-responsive.css',
),
'output_filename': 'min/common.css'
},
},
'JAVASCRIPT': {},
}
# Useful reading on how this works is in
# https://zulip.readthedocs.io/en/latest/front-end-build-process.html
JS_SPECS = {
'common': {
'source_filenames': [
'node_modules/jquery/dist/jquery.js',
'node_modules/underscore/underscore.js',
'js/blueslip.js',
'third/bootstrap/js/bootstrap.js',
'js/common.js',
],
'output_filename': 'min/common.js'
},
'landing-page': {
'source_filenames': [
'js/portico/landing-page.js',
],
'output_filename': 'min/landing.js'
},
'signup': {
'source_filenames': [
'js/portico/signup.js',
'node_modules/jquery-validation/dist/jquery.validate.js',
],
'output_filename': 'min/signup.js'
},
'zxcvbn': {
'source_filenames': [],
'minifed_source_filenames': [
'node_modules/zxcvbn/dist/zxcvbn.js',
],
'output_filename': 'min/zxcvbn.js'
},
'api': {
'source_filenames': ['js/portico/api.js'],
'output_filename': 'min/api.js'
},
'app_debug': {
'source_filenames': ['js/debug.js'],
'output_filename': 'min/app_debug.js'
},
'app': {
'source_filenames': [
'third/bootstrap-notify/js/bootstrap-notify.js',
'third/html5-formdata/formdata.js',
'node_modules/jquery-validation/dist/jquery.validate.js',
'third/jquery-form/jquery.form.js',
'third/jquery-filedrop/jquery.filedrop.js',
'third/jquery-caret/jquery.caret.1.5.2.js',
'node_modules/xdate/src/xdate.js',
'third/jquery-mousewheel/jquery.mousewheel.js',
'third/jquery-throttle-debounce/jquery.ba-throttle-debounce.js',
'third/jquery-idle/jquery.idle.js',
'third/jquery-autosize/jquery.autosize.js',
'third/jquery-perfect-scrollbar/js/perfect-scrollbar.js',
'third/lazyload/lazyload.js',
'third/spectrum/spectrum.js',
'third/sockjs/sockjs-0.3.4.js',
'node_modules/string.prototype.codepointat/codepointat.js',
'node_modules/winchan/winchan.js',
'node_modules/handlebars/dist/handlebars.runtime.js',
'third/marked/lib/marked.js',
'generated/emoji/emoji_codes.js',
'templates/compiled.js',
'js/feature_flags.js',
'js/loading.js',
'js/util.js',
'js/dict.js',
'js/components.js',
'js/localstorage.js',
'js/drafts.js',
'js/channel.js',
'js/setup.js',
'js/unread_ui.js',
'js/muting.js',
'js/muting_ui.js',
'js/message_viewport.js',
'js/rows.js',
'js/people.js',
'js/unread.js',
'js/topic_list.js',
'js/pm_list.js',
'js/stream_list.js',
'js/filter.js',
'js/message_list_view.js',
'js/message_list.js',
'js/message_live_update.js',
'js/narrow.js',
'js/reload.js',
'js/compose_fade.js',
'js/fenced_code.js',
'js/echo.js',
'js/socket.js',
'js/compose.js',
'js/stream_color.js',
'js/stream_data.js',
'js/subs.js',
'js/message_edit.js',
'js/condense.js',
'js/resize.js',
'js/floating_recipient_bar.js',
'js/ui.js',
'js/pointer.js',
'js/click_handlers.js',
'js/scroll_bar.js',
'js/gear_menu.js',
'js/copy_and_paste.js',
'js/stream_popover.js',
'js/popovers.js',
'js/typeahead_helper.js',
'js/search_suggestion.js',
'js/search.js',
'js/composebox_typeahead.js',
'js/navigate.js',
'js/hotkey.js',
'js/favicon.js',
'js/notifications.js',
'js/hashchange.js',
'js/invite.js',
'js/message_flags.js',
'js/alert_words.js',
'js/alert_words_ui.js',
'js/attachments_ui.js',
'js/message_store.js',
'js/server_events.js',
'js/zulip.js',
'js/activity.js',
'js/user_events.js',
'js/colorspace.js',
'js/timerender.js',
'js/tutorial.js',
'js/templates.js',
'js/upload_widget.js',
'js/avatar.js',
'js/realm_icon.js',
'js/settings.js',
'js/admin.js',
'js/tab_bar.js',
'js/emoji.js',
'js/referral.js',
'js/custom_markdown.js',
'js/bot_data.js',
'js/reactions.js',
# JS bundled by webpack is also included here if PIPELINE_ENABLED setting is true
],
'output_filename': 'min/app.js'
},
'activity': {
'source_filenames': [
'third/sorttable/sorttable.js',
],
'output_filename': 'min/activity.js'
},
'stats': {
'source_filenames': [
'js/stats/stats.js',
],
'minifed_source_filenames': [
'node_modules/plotly.js/dist/plotly-basic.min.js',
],
'output_filename': 'min/stats.js'
},
# We also want to minify sockjs separately for the sockjs iframe transport
'sockjs': {
'source_filenames': ['third/sockjs/sockjs-0.3.4.js'],
'output_filename': 'min/sockjs-0.3.4.min.js'
},
}
if PIPELINE_ENABLED:
# This is also done in test_settings.py, see comment there..
JS_SPECS['app']['source_filenames'].append('js/bundle.js')
app_srcs = JS_SPECS['app']['source_filenames']
########################################################################
# LOGGING SETTINGS
########################################################################
ZULIP_PATHS = [
("SERVER_LOG_PATH", "/var/log/zulip/server.log"),
("ERROR_FILE_LOG_PATH", "/var/log/zulip/errors.log"),
("MANAGEMENT_LOG_PATH", "/var/log/zulip/manage.log"),
("WORKER_LOG_PATH", "/var/log/zulip/workers.log"),
("PERSISTENT_QUEUE_FILENAME", "/home/zulip/tornado/event_queues.pickle"),
("JSON_PERSISTENT_QUEUE_FILENAME", "/home/zulip/tornado/event_queues.json"),
("EMAIL_MIRROR_LOG_PATH", "/var/log/zulip/email_mirror.log"),
("EMAIL_DELIVERER_LOG_PATH", "/var/log/zulip/email-deliverer.log"),
("LDAP_SYNC_LOG_PATH", "/var/log/zulip/sync_ldap_user_data.log"),
("QUEUE_ERROR_DIR", "/var/log/zulip/queue_error"),
("STATS_DIR", "/home/zulip/stats"),
("DIGEST_LOG_PATH", "/var/log/zulip/digest.log"),
("ANALYTICS_LOG_PATH", "/var/log/zulip/analytics.log"),
]
# The Event log basically logs most significant database changes,
# which can be useful for debugging.
if EVENT_LOGS_ENABLED:
ZULIP_PATHS.append(("EVENT_LOG_DIR", "/home/zulip/logs/event_log"))
else:
EVENT_LOG_DIR = None
for (var, path) in ZULIP_PATHS:
if DEVELOPMENT:
# if DEVELOPMENT, store these files in the Zulip checkout
path = os.path.join(DEVELOPMENT_LOG_DIRECTORY, os.path.basename(path))
# only `JSON_PERSISTENT_QUEUE_FILENAME` will be stored in `var`
if var == 'JSON_PERSISTENT_QUEUE_FILENAME':
path = os.path.join(os.path.join(DEPLOY_ROOT, 'var'), os.path.basename(path))
vars()[var] = path
ZULIP_WORKER_TEST_FILE = '/tmp/zulip-worker-test-file'
if IS_WORKER:
FILE_LOG_PATH = WORKER_LOG_PATH
else:
FILE_LOG_PATH = SERVER_LOG_PATH
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'default': {
'format': '%(asctime)s %(levelname)-8s %(message)s'
}
},
'filters': {
'ZulipLimiter': {
'()': 'zerver.lib.logging_util.ZulipLimiter',
},
'EmailLimiter': {
'()': 'zerver.lib.logging_util.EmailLimiter',
},
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse',
},
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue',
},
'nop': {
'()': 'zerver.lib.logging_util.ReturnTrue',
},
'require_really_deployed': {
'()': 'zerver.lib.logging_util.RequireReallyDeployed',
},
'skip_200_and_304': {
'()': 'django.utils.log.CallbackFilter',
'callback': zerver.lib.logging_util.skip_200_and_304,
},
},
'handlers': {
'zulip_admins': {
'level': 'ERROR',
'class': 'zerver.logging_handlers.AdminZulipHandler',
# For testing the handler delete the next line
'filters': ['ZulipLimiter', 'require_debug_false', 'require_really_deployed'],
'formatter': 'default'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'default'
},
'file': {
'level': 'DEBUG',
'class': 'logging.handlers.WatchedFileHandler',
'formatter': 'default',
'filename': FILE_LOG_PATH,
},
'errors_file': {
'level': 'WARNING',
'class': 'logging.handlers.WatchedFileHandler',
'formatter': 'default',
'filename': ERROR_FILE_LOG_PATH,
},
},
'loggers': {
'': {
'handlers': ['console', 'file', 'errors_file'],
'level': 'INFO',
'propagate': False,
},
'django': {
'handlers': (['zulip_admins'] if ERROR_REPORTING else [] +
['console', 'file', 'errors_file']),
'level': 'INFO',
'propagate': False,
},
'zulip.requests': {
'handlers': ['console', 'file', 'errors_file'],
'level': 'INFO',
'propagate': False,
},
'zulip.queue': {
'handlers': ['console', 'file', 'errors_file'],
'level': 'WARNING',
'propagate': False,
},
'zulip.management': {
'handlers': ['file', 'errors_file'],
'level': 'INFO',
'propagate': False,
},
'requests': {
'handlers': ['console', 'file', 'errors_file'],
'level': 'WARNING',
'propagate': False,
},
'django.security.DisallowedHost': {
'handlers': ['file'],
'propagate': False,
},
'django.server': {
'handlers': ['console', 'file'],
'propagate': False,
'filters': ['skip_200_and_304'],
},
'django.template': {
'handlers': ['console'],
'filters': ['require_debug_true'],
'level': 'DEBUG',
'propagate': False,
},
## Uncomment the following to get all database queries logged to the console
# 'django.db': {
# 'handlers': ['console'],
# 'level': 'DEBUG',
# 'propagate': False,
# },
}
}
ACCOUNT_ACTIVATION_DAYS = 7
LOGIN_REDIRECT_URL = '/'
# Client-side polling timeout for get_events, in milliseconds.
# We configure this here so that the client test suite can override it.
# We already kill the connection server-side with heartbeat events,
# but it's good to have a safety. This value should be greater than
# (HEARTBEAT_MIN_FREQ_SECS + 10)
POLL_TIMEOUT = 90 * 1000
# iOS App IDs
ZULIP_IOS_APP_ID = 'com.zulip.Zulip'
DBX_IOS_APP_ID = 'com.dropbox.Zulip'
########################################################################
# SSO AND LDAP SETTINGS
########################################################################
USING_APACHE_SSO = ('zproject.backends.ZulipRemoteUserBackend' in AUTHENTICATION_BACKENDS)
if len(AUTHENTICATION_BACKENDS) == 1 and (AUTHENTICATION_BACKENDS[0] ==
"zproject.backends.ZulipRemoteUserBackend"):
HOME_NOT_LOGGED_IN = "/accounts/login/sso"
ONLY_SSO = True
else:
HOME_NOT_LOGGED_IN = '/login'
ONLY_SSO = False
AUTHENTICATION_BACKENDS += ('zproject.backends.ZulipDummyBackend',)
POPULATE_PROFILE_VIA_LDAP = bool(AUTH_LDAP_SERVER_URI)
if POPULATE_PROFILE_VIA_LDAP and \
'zproject.backends.ZulipLDAPAuthBackend' not in AUTHENTICATION_BACKENDS:
AUTHENTICATION_BACKENDS += ('zproject.backends.ZulipLDAPUserPopulator',)
else:
POPULATE_PROFILE_VIA_LDAP = 'zproject.backends.ZulipLDAPAuthBackend' in AUTHENTICATION_BACKENDS or POPULATE_PROFILE_VIA_LDAP
########################################################################
# GITHUB AUTHENTICATION SETTINGS
########################################################################
# SOCIAL_AUTH_GITHUB_KEY is set in /etc/zulip/settings.py
SOCIAL_AUTH_GITHUB_SECRET = get_secret('social_auth_github_secret')
SOCIAL_AUTH_LOGIN_ERROR_URL = '/login/'
SOCIAL_AUTH_GITHUB_SCOPE = ['email']
SOCIAL_AUTH_GITHUB_ORG_KEY = SOCIAL_AUTH_GITHUB_KEY
SOCIAL_AUTH_GITHUB_ORG_SECRET = SOCIAL_AUTH_GITHUB_SECRET
SOCIAL_AUTH_GITHUB_TEAM_KEY = SOCIAL_AUTH_GITHUB_KEY
SOCIAL_AUTH_GITHUB_TEAM_SECRET = SOCIAL_AUTH_GITHUB_SECRET
########################################################################
# EMAIL SETTINGS
########################################################################
if EMAIL_BACKEND is not None:
# If the server admin specified a custom email backend, use that.
pass
elif not EMAIL_HOST and PRODUCTION:
# If an email host is not specified, fail silently and gracefully
EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend'
elif DEVELOPMENT:
# In the dev environment, emails are printed to the run-dev.py console.
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
else:
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST_PASSWORD = get_secret('email_password')
if EMAIL_GATEWAY_PASSWORD is None:
EMAIL_GATEWAY_PASSWORD = get_secret('email_gateway_password')
if vars().get("AUTH_LDAP_BIND_PASSWORD") is None:
AUTH_LDAP_BIND_PASSWORD = get_secret('auth_ldap_bind_password')
# Set the sender email address for Django traceback error reporting
if SERVER_EMAIL is None:
SERVER_EMAIL = DEFAULT_FROM_EMAIL
########################################################################
# MISC SETTINGS
########################################################################
if PRODUCTION:
# Filter out user data
DEFAULT_EXCEPTION_REPORTER_FILTER = 'zerver.filters.ZulipExceptionReporterFilter'
# This is a debugging option only
PROFILE_ALL_REQUESTS = False
CROSS_REALM_BOT_EMAILS = set(('feedback@zulip.com', 'notification-bot@zulip.com'))
CONTRIBUTORS_DATA = os.path.join(STATIC_ROOT, 'generated/github-contributors.json')
| |
"""Test the Coinbase config flow."""
from unittest.mock import patch
from coinbase.wallet.error import AuthenticationError
from requests.models import Response
from homeassistant import config_entries
from homeassistant.components.coinbase.const import (
CONF_CURRENCIES,
CONF_EXCHANGE_RATES,
CONF_YAML_API_TOKEN,
DOMAIN,
)
from homeassistant.const import CONF_API_KEY, CONF_API_TOKEN
from .common import (
init_mock_coinbase,
mock_get_current_user,
mock_get_exchange_rates,
mocked_get_accounts,
)
from .const import BAD_CURRENCY, BAD_EXCHANGE_RATE, GOOD_CURRENCY, GOOD_EXCHANGE_RATE
from tests.common import MockConfigEntry
async def test_form(hass):
"""Test we get the form."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
with patch(
"coinbase.wallet.client.Client.get_current_user",
return_value=mock_get_current_user(),
), patch(
"coinbase.wallet.client.Client.get_accounts", new=mocked_get_accounts
), patch(
"coinbase.wallet.client.Client.get_exchange_rates",
return_value=mock_get_exchange_rates(),
), patch(
"homeassistant.components.coinbase.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.coinbase.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_API_KEY: "123456",
CONF_API_TOKEN: "AbCDeF",
},
)
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == "Test User"
assert result2["data"] == {CONF_API_KEY: "123456", CONF_API_TOKEN: "AbCDeF"}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_invalid_auth(hass):
"""Test we handle invalid auth."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
response = Response()
response.status_code = 401
api_auth_error = AuthenticationError(
response,
"authentication_error",
"invalid signature",
[{"id": "authentication_error", "message": "invalid signature"}],
)
with patch(
"coinbase.wallet.client.Client.get_current_user",
side_effect=api_auth_error,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_API_KEY: "123456",
CONF_API_TOKEN: "AbCDeF",
},
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "invalid_auth"}
async def test_form_cannot_connect(hass):
"""Test we handle cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"coinbase.wallet.client.Client.get_current_user",
side_effect=ConnectionError,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_API_KEY: "123456",
CONF_API_TOKEN: "AbCDeF",
},
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "cannot_connect"}
async def test_form_catch_all_exception(hass):
"""Test we handle unknown exceptions."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"coinbase.wallet.client.Client.get_current_user",
side_effect=Exception,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_API_KEY: "123456",
CONF_API_TOKEN: "AbCDeF",
},
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "unknown"}
async def test_option_form(hass):
"""Test we handle a good wallet currency option."""
with patch(
"coinbase.wallet.client.Client.get_current_user",
return_value=mock_get_current_user(),
), patch(
"coinbase.wallet.client.Client.get_accounts", new=mocked_get_accounts
), patch(
"coinbase.wallet.client.Client.get_exchange_rates",
return_value=mock_get_exchange_rates(),
), patch(
"homeassistant.components.coinbase.update_listener"
) as mock_update_listener:
config_entry = await init_mock_coinbase(hass)
await hass.async_block_till_done()
result = await hass.config_entries.options.async_init(config_entry.entry_id)
await hass.async_block_till_done()
result2 = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
CONF_CURRENCIES: [GOOD_CURRENCY],
CONF_EXCHANGE_RATES: [GOOD_EXCHANGE_RATE],
},
)
assert result2["type"] == "create_entry"
await hass.async_block_till_done()
assert len(mock_update_listener.mock_calls) == 1
async def test_form_bad_account_currency(hass):
"""Test we handle a bad currency option."""
with patch(
"coinbase.wallet.client.Client.get_current_user",
return_value=mock_get_current_user(),
), patch(
"coinbase.wallet.client.Client.get_accounts", new=mocked_get_accounts
), patch(
"coinbase.wallet.client.Client.get_exchange_rates",
return_value=mock_get_exchange_rates(),
):
config_entry = await init_mock_coinbase(hass)
result = await hass.config_entries.options.async_init(config_entry.entry_id)
await hass.async_block_till_done()
result2 = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
CONF_CURRENCIES: [BAD_CURRENCY],
CONF_EXCHANGE_RATES: [],
},
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "currency_unavaliable"}
async def test_form_bad_exchange_rate(hass):
"""Test we handle a bad exchange rate."""
with patch(
"coinbase.wallet.client.Client.get_current_user",
return_value=mock_get_current_user(),
), patch(
"coinbase.wallet.client.Client.get_accounts", new=mocked_get_accounts
), patch(
"coinbase.wallet.client.Client.get_exchange_rates",
return_value=mock_get_exchange_rates(),
):
config_entry = await init_mock_coinbase(hass)
result = await hass.config_entries.options.async_init(config_entry.entry_id)
await hass.async_block_till_done()
result2 = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
CONF_CURRENCIES: [],
CONF_EXCHANGE_RATES: [BAD_EXCHANGE_RATE],
},
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "exchange_rate_unavaliable"}
async def test_option_catch_all_exception(hass):
"""Test we handle an unknown exception in the option flow."""
with patch(
"coinbase.wallet.client.Client.get_current_user",
return_value=mock_get_current_user(),
), patch(
"coinbase.wallet.client.Client.get_accounts", new=mocked_get_accounts
), patch(
"coinbase.wallet.client.Client.get_exchange_rates",
return_value=mock_get_exchange_rates(),
):
config_entry = await init_mock_coinbase(hass)
result = await hass.config_entries.options.async_init(config_entry.entry_id)
await hass.async_block_till_done()
with patch(
"coinbase.wallet.client.Client.get_accounts",
side_effect=Exception,
):
result2 = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
CONF_CURRENCIES: [],
CONF_EXCHANGE_RATES: ["ETH"],
},
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "unknown"}
async def test_yaml_import(hass):
"""Test YAML import works."""
conf = {
CONF_API_KEY: "123456",
CONF_YAML_API_TOKEN: "AbCDeF",
CONF_CURRENCIES: ["BTC", "USD"],
CONF_EXCHANGE_RATES: ["ATOM", "BTC"],
}
with patch(
"coinbase.wallet.client.Client.get_current_user",
return_value=mock_get_current_user(),
), patch(
"coinbase.wallet.client.Client.get_accounts", new=mocked_get_accounts
), patch(
"coinbase.wallet.client.Client.get_exchange_rates",
return_value=mock_get_exchange_rates(),
), patch(
"homeassistant.components.coinbase.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.coinbase.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_IMPORT}, data=conf
)
assert result["type"] == "create_entry"
assert result["title"] == "Test User"
assert result["data"] == {CONF_API_KEY: "123456", CONF_API_TOKEN: "AbCDeF"}
assert result["options"] == {
CONF_CURRENCIES: ["BTC", "USD"],
CONF_EXCHANGE_RATES: ["ATOM", "BTC"],
}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_yaml_existing(hass):
"""Test YAML ignored when already processed."""
MockConfigEntry(
domain=DOMAIN,
data={
CONF_API_KEY: "123456",
CONF_API_TOKEN: "AbCDeF",
},
).add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={
CONF_API_KEY: "123456",
CONF_YAML_API_TOKEN: "AbCDeF",
},
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
| |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import inspect
from typing import Union, Iterator, Tuple
from pyspark.sql.functions import mean, lit
from pyspark.testing.sqlutils import ReusedSQLTestCase, \
have_pandas, have_pyarrow, pandas_requirement_message, \
pyarrow_requirement_message
from pyspark.sql.pandas.typehints import infer_eval_type
from pyspark.sql.pandas.functions import pandas_udf, PandasUDFType
from pyspark.sql import Row
if have_pandas:
import pandas as pd
import numpy as np
from pandas.util.testing import assert_frame_equal
@unittest.skipIf(
not have_pandas or not have_pyarrow,
pandas_requirement_message or pyarrow_requirement_message)
class PandasUDFTypeHintsTests(ReusedSQLTestCase):
def test_type_annotation_scalar(self):
def func(col: pd.Series) -> pd.Series:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.SCALAR)
def func(col: pd.DataFrame, col1: pd.Series) -> pd.DataFrame:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.SCALAR)
def func(col: pd.DataFrame, *args: pd.Series) -> pd.Series:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.SCALAR)
def func(col: pd.Series, *args: pd.Series, **kwargs: pd.DataFrame) -> pd.Series:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.SCALAR)
def func(col: pd.Series, *, col2: pd.DataFrame) -> pd.DataFrame:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.SCALAR)
def func(col: Union[pd.Series, pd.DataFrame], *, col2: pd.DataFrame) -> pd.Series:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.SCALAR)
def test_type_annotation_scalar_iter(self):
def func(iter: Iterator[pd.Series]) -> Iterator[pd.Series]:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.SCALAR_ITER)
def func(iter: Iterator[Tuple[pd.DataFrame, pd.Series]]) -> Iterator[pd.DataFrame]:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.SCALAR_ITER)
def func(iter: Iterator[Tuple[pd.DataFrame, ...]]) -> Iterator[pd.Series]:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.SCALAR_ITER)
def func(
iter: Iterator[Tuple[Union[pd.DataFrame, pd.Series], ...]]
) -> Iterator[pd.Series]:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.SCALAR_ITER)
def test_type_annotation_group_agg(self):
def func(col: pd.Series) -> str:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.GROUPED_AGG)
def func(col: pd.DataFrame, col1: pd.Series) -> int:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.GROUPED_AGG)
def func(col: pd.DataFrame, *args: pd.Series) -> Row:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.GROUPED_AGG)
def func(col: pd.Series, *args: pd.Series, **kwargs: pd.DataFrame) -> str:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.GROUPED_AGG)
def func(col: pd.Series, *, col2: pd.DataFrame) -> float:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.GROUPED_AGG)
def func(col: Union[pd.Series, pd.DataFrame], *, col2: pd.DataFrame) -> float:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.GROUPED_AGG)
def test_type_annotation_negative(self):
def func(col: str) -> pd.Series:
pass
self.assertRaisesRegex(
NotImplementedError,
"Unsupported signature.*str",
infer_eval_type, inspect.signature(func))
def func(col: pd.DataFrame, col1: int) -> pd.DataFrame:
pass
self.assertRaisesRegex(
NotImplementedError,
"Unsupported signature.*int",
infer_eval_type, inspect.signature(func))
def func(col: Union[pd.DataFrame, str], col1: int) -> pd.DataFrame:
pass
self.assertRaisesRegex(
NotImplementedError,
"Unsupported signature.*str",
infer_eval_type, inspect.signature(func))
def func(col: pd.Series) -> Tuple[pd.DataFrame]:
pass
self.assertRaisesRegex(
NotImplementedError,
"Unsupported signature.*Tuple",
infer_eval_type, inspect.signature(func))
def func(col, *args: pd.Series) -> pd.Series:
pass
self.assertRaisesRegex(
ValueError,
"should be specified.*Series",
infer_eval_type, inspect.signature(func))
def func(col: pd.Series, *args: pd.Series, **kwargs: pd.DataFrame):
pass
self.assertRaisesRegex(
ValueError,
"should be specified.*Series",
infer_eval_type, inspect.signature(func))
def func(col: pd.Series, *, col2) -> pd.DataFrame:
pass
self.assertRaisesRegex(
ValueError,
"should be specified.*Series",
infer_eval_type, inspect.signature(func))
def test_scalar_udf_type_hint(self):
df = self.spark.range(10).selectExpr("id", "id as v")
def plus_one(v: Union[pd.Series, pd.DataFrame]) -> pd.Series:
return v + 1
plus_one = pandas_udf("long")(plus_one)
actual = df.select(plus_one(df.v).alias("plus_one"))
expected = df.selectExpr("(v + 1) as plus_one")
assert_frame_equal(expected.toPandas(), actual.toPandas())
def test_scalar_iter_udf_type_hint(self):
df = self.spark.range(10).selectExpr("id", "id as v")
def plus_one(itr: Iterator[pd.Series]) -> Iterator[pd.Series]:
for s in itr:
yield s + 1
plus_one = pandas_udf("long")(plus_one)
actual = df.select(plus_one(df.v).alias("plus_one"))
expected = df.selectExpr("(v + 1) as plus_one")
assert_frame_equal(expected.toPandas(), actual.toPandas())
def test_group_agg_udf_type_hint(self):
df = self.spark.range(10).selectExpr("id", "id as v")
def weighted_mean(v: pd.Series, w: pd.Series) -> float:
return np.average(v, weights=w)
weighted_mean = pandas_udf("double")(weighted_mean)
actual = df.groupby('id').agg(weighted_mean(df.v, lit(1.0))).sort('id')
expected = df.groupby('id').agg(mean(df.v).alias('weighted_mean(v, 1.0)')).sort('id')
assert_frame_equal(expected.toPandas(), actual.toPandas())
def test_ignore_type_hint_in_group_apply_in_pandas(self):
df = self.spark.range(10)
def pandas_plus_one(v: pd.DataFrame) -> pd.DataFrame:
return v + 1
actual = df.groupby('id').applyInPandas(pandas_plus_one, schema=df.schema).sort('id')
expected = df.selectExpr("id + 1 as id")
assert_frame_equal(expected.toPandas(), actual.toPandas())
def test_ignore_type_hint_in_cogroup_apply_in_pandas(self):
df = self.spark.range(10)
def pandas_plus_one(left: pd.DataFrame, right: pd.DataFrame) -> pd.DataFrame:
return left + 1
actual = df.groupby('id').cogroup(
self.spark.range(10).groupby("id")
).applyInPandas(pandas_plus_one, schema=df.schema).sort('id')
expected = df.selectExpr("id + 1 as id")
assert_frame_equal(expected.toPandas(), actual.toPandas())
def test_ignore_type_hint_in_map_in_pandas(self):
df = self.spark.range(10)
def pandas_plus_one(iter: Iterator[pd.DataFrame]) -> Iterator[pd.DataFrame]:
return map(lambda v: v + 1, iter)
actual = df.mapInPandas(pandas_plus_one, schema=df.schema)
expected = df.selectExpr("id + 1 as id")
assert_frame_equal(expected.toPandas(), actual.toPandas())
if __name__ == "__main__":
from pyspark.sql.tests.test_pandas_udf_typehints import * # noqa: #401
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| |
"""
parser.http package (imdb package).
This package provides the IMDbHTTPAccessSystem class used to access
IMDb's data through the web interface.
the imdb.IMDb function will return an instance of this class when
called with the 'accessSystem' argument set to "http" or "web"
or "html" (this is the default).
Copyright 2004-2018 Davide Alberani <da@erlug.linux.it>
2008 H. Turgut Uyar <uyar@tekir.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import logging
import socket
import ssl
from codecs import lookup
from urllib.parse import quote_plus
from urllib.request import FancyURLopener
from imdb import IMDbBase
from imdb.utils import analyze_title
from imdb._exceptions import IMDbDataAccessError, IMDbParserError
from . import (
companyParser,
movieParser,
personParser,
searchMovieParser,
searchPersonParser,
searchCompanyParser,
searchKeywordParser
)
from . import topBottomParser
# Logger for miscellaneous functions.
_aux_logger = logging.getLogger('imdbpy.parser.http.aux')
class _ModuleProxy:
"""A proxy to instantiate and access parsers."""
def __init__(self, module, defaultKeys=None):
"""Initialize a proxy for the given module; defaultKeys, if set,
muste be a dictionary of values to set for instanced objects."""
if defaultKeys is None:
defaultKeys = {}
self._defaultKeys = defaultKeys
self._module = module
def __getattr__(self, name):
"""Called only when no look-up is found."""
_sm = self._module
# Read the _OBJECTS dictionary to build the asked parser.
if name in _sm._OBJECTS:
_entry = _sm._OBJECTS[name]
# Initialize the parser.
kwds = {}
parserClass = _entry[0][0]
obj = parserClass(**kwds)
attrsToSet = self._defaultKeys.copy()
attrsToSet.update(_entry[1] or {})
# Set attribute to the object.
for key in attrsToSet:
setattr(obj, key, attrsToSet[key])
setattr(self, name, obj)
return obj
return getattr(_sm, name)
# The cookies for the "adult" search.
# Please don't mess with these account.
_cookie_id = 'BCYmoyqSm2WglmOzG-SrFWSvVpxsTZOB0qEOOqmAwCBxCbaNgKOxd0DTKzUvt7t04Pya5gV2tUrpDmYxrc1Dr54DQj2UX' \
'I7QI35__M5-HI2KrbOI3PjDz6M-_U3HG8topMfN64R24tmBixoZhMYXVaEc556lf0Z4gQNJVYRANXvwytP5v1lpfeToRlu9aVJwN4kT'
_cookie_uu = 'BCYquDS8Y2i8R1pJxS4nB77YrhjHHXeOea2Xl9KtZvE6RZKVfMvzTGU4Vl5-yxfPbgRSiFJasyf-hhPuVvXyaHlfeBjNl' \
'bFT8hz2HzFFkQ_SxKxq05J51gi7Fv4SaAws1M-i7zmQ1TRunfJqCVIYqPwIs2NO7s4_YDH2ZoISVGLgca8OY2K58HychOZB1oRWHVe' \
'AJNhLJMrCWJBuGRLCNnQK5X9tA0dPPntr2Ussy0ouul-N1GQz-8y5vda3JJ_C6xkwmHcA6JrOdOFO_HqMWjVSXuxGEdrXC919JM9H0' \
'vooVvKeVgAEJnTh2GiVlUJUoH3c'
class _FakeURLOpener(object):
"""Fake URLOpener object, used to return empty strings instead of
errors.
"""
def __init__(self, url, headers):
self.url = url
self.headers = headers
def read(self, *args, **kwds):
return ''
def close(self, *args, **kwds):
pass
def info(self, *args, **kwds):
return self.headers
class IMDbURLopener(FancyURLopener):
"""Fetch web pages and handle errors."""
_logger = logging.getLogger('imdbpy.parser.http.urlopener')
def __init__(self, *args, **kwargs):
self._last_url = ''
kwargs['context'] = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
FancyURLopener.__init__(self, *args, **kwargs)
# Headers to add to every request.
# XXX: IMDb's web server doesn't like urllib-based programs,
# so lets fake to be Mozilla.
# Wow! I'm shocked by my total lack of ethic! <g>
for header in ('User-Agent', 'User-agent', 'user-agent'):
self.del_header(header)
self.set_header('User-Agent', 'Mozilla/5.0')
self.set_header('Accept-Language', 'en-us,en;q=0.5')
# XXX: This class is used also to perform "Exact Primary
# [Title|Name]" searches, and so by default the cookie is set.
c_header = 'uu=%s; id=%s' % (_cookie_uu, _cookie_id)
self.set_header('Cookie', c_header)
def get_proxy(self):
"""Return the used proxy, or an empty string."""
return self.proxies.get('http', '')
def set_proxy(self, proxy):
"""Set the proxy."""
if not proxy:
if 'http' in self.proxies:
del self.proxies['http']
else:
if not proxy.lower().startswith('http://'):
proxy = 'http://%s' % proxy
self.proxies['http'] = proxy
def set_header(self, header, value, _overwrite=True):
"""Set a default header."""
if _overwrite:
self.del_header(header)
self.addheaders.append((header, value))
def get_header(self, header):
"""Return the first value of a header, or None
if not present."""
for index in range(len(self.addheaders)):
if self.addheaders[index][0] == header:
return self.addheaders[index][1]
return None
def del_header(self, header):
"""Remove a default header."""
for index in range(len(self.addheaders)):
if self.addheaders[index][0] == header:
del self.addheaders[index]
break
def retrieve_unicode(self, url, size=-1):
"""Retrieves the given URL, and returns a unicode string,
trying to guess the encoding of the data (assuming utf8
by default)"""
encode = None
try:
if size != -1:
self.set_header('Range', 'bytes=0-%d' % size)
uopener = self.open(url)
kwds = {}
content = uopener.read(**kwds)
self._last_url = uopener.url
# Maybe the server is so nice to tell us the charset...
server_encode = (uopener.info().get_charsets() or [None])[0]
# Otherwise, look at the content-type HTML meta tag.
if server_encode is None and content:
begin_h = content.find(b'text/html; charset=')
if begin_h != -1:
end_h = content[19 + begin_h:].find('"')
if end_h != -1:
server_encode = content[19 + begin_h:19 + begin_h + end_h]
if server_encode:
try:
if lookup(server_encode):
encode = server_encode
except (LookupError, ValueError, TypeError):
pass
uopener.close()
if size != -1:
self.del_header('Range')
self.close()
except IOError as e:
if size != -1:
# Ensure that the Range header is removed.
self.del_header('Range')
raise IMDbDataAccessError(
{'errcode': e.errno,
'errmsg': str(e.strerror),
'url': url,
'proxy': self.get_proxy(),
'exception type': 'IOError',
'original exception': e}
)
if encode is None:
encode = 'utf8'
# The detection of the encoding is error prone...
self._logger.warn('Unable to detect the encoding of the retrieved page [%s];'
' falling back to default utf8.', encode)
if isinstance(content, str):
return content
return str(content, encode, 'replace')
def http_error_default(self, url, fp, errcode, errmsg, headers):
if errcode == 404:
self._logger.warn('404 code returned for %s: %s (headers: %s)',
url, errmsg, headers)
return _FakeURLOpener(url, headers)
raise IMDbDataAccessError(
{'url': 'http:%s' % url,
'errcode': errcode,
'errmsg': errmsg,
'headers': headers,
'error type': 'http_error_default',
'proxy': self.get_proxy()}
)
def open_unknown(self, fullurl, data=None):
raise IMDbDataAccessError(
{'fullurl': fullurl,
'data': str(data),
'error type': 'open_unknown',
'proxy': self.get_proxy()}
)
def open_unknown_proxy(self, proxy, fullurl, data=None):
raise IMDbDataAccessError(
{'proxy': str(proxy),
'fullurl': fullurl,
'error type': 'open_unknown_proxy',
'data': str(data)}
)
class IMDbHTTPAccessSystem(IMDbBase):
"""The class used to access IMDb's data through the web."""
accessSystem = 'http'
_http_logger = logging.getLogger('imdbpy.parser.http')
def __init__(self, adultSearch=True, proxy=-1, cookie_id=-1,
timeout=30, cookie_uu=None, *arguments, **keywords):
"""Initialize the access system."""
IMDbBase.__init__(self, *arguments, **keywords)
self.urlOpener = IMDbURLopener()
self._getRefs = True
self._mdparse = False
self.set_timeout(timeout)
self.do_adult_search(adultSearch)
if cookie_id != -1:
if cookie_id is None:
self.del_cookies()
elif cookie_uu is not None:
self.set_cookies(cookie_id, cookie_uu)
if proxy != -1:
self.set_proxy(proxy)
_def = {'_modFunct': self._defModFunct, '_as': self.accessSystem}
# Proxy objects.
self.smProxy = _ModuleProxy(searchMovieParser, defaultKeys=_def)
self.spProxy = _ModuleProxy(searchPersonParser, defaultKeys=_def)
self.scompProxy = _ModuleProxy(searchCompanyParser, defaultKeys=_def)
self.skProxy = _ModuleProxy(searchKeywordParser, defaultKeys=_def)
self.mProxy = _ModuleProxy(movieParser, defaultKeys=_def)
self.pProxy = _ModuleProxy(personParser, defaultKeys=_def)
self.compProxy = _ModuleProxy(companyParser, defaultKeys=_def)
self.topBottomProxy = _ModuleProxy(topBottomParser, defaultKeys=_def)
def _normalize_movieID(self, movieID):
"""Normalize the given movieID."""
try:
return '%07d' % int(movieID)
except ValueError as e:
raise IMDbParserError('invalid movieID "%s": %s' % (movieID, e))
def _normalize_personID(self, personID):
"""Normalize the given personID."""
try:
return '%07d' % int(personID)
except ValueError as e:
raise IMDbParserError('invalid personID "%s": %s' % (personID, e))
def _normalize_companyID(self, companyID):
"""Normalize the given companyID."""
try:
return '%07d' % int(companyID)
except ValueError as e:
raise IMDbParserError('invalid companyID "%s": %s' % (companyID, e))
def get_imdbMovieID(self, movieID):
"""Translate a movieID in an imdbID; in this implementation
the movieID _is_ the imdbID.
"""
return movieID
def get_imdbPersonID(self, personID):
"""Translate a personID in an imdbID; in this implementation
the personID _is_ the imdbID.
"""
return personID
def get_imdbCompanyID(self, companyID):
"""Translate a companyID in an imdbID; in this implementation
the companyID _is_ the imdbID.
"""
return companyID
def get_proxy(self):
"""Return the used proxy or an empty string."""
return self.urlOpener.get_proxy()
def set_proxy(self, proxy):
"""Set the web proxy to use.
It should be a string like 'http://localhost:8080/'; if the
string is empty, no proxy will be used.
If set, the value of the environment variable HTTP_PROXY is
automatically used.
"""
self.urlOpener.set_proxy(proxy)
def set_timeout(self, timeout):
"""Set the default timeout, in seconds, of the connection."""
try:
timeout = int(timeout)
except Exception:
timeout = 0
if timeout <= 0:
timeout = None
socket.setdefaulttimeout(timeout)
def set_cookies(self, cookie_id, cookie_uu):
"""Set a cookie to access an IMDb's account."""
c_header = 'id=%s; uu=%s' % (cookie_id, cookie_uu)
self.urlOpener.set_header('Cookie', c_header)
def del_cookies(self):
"""Remove the used cookie."""
self.urlOpener.del_header('Cookie')
def do_adult_search(self, doAdult,
cookie_id=_cookie_id, cookie_uu=_cookie_uu):
"""If doAdult is true, 'adult' movies are included in the
search results; cookie_id and cookie_uu are optional
parameters to select a specific account (see your cookie
or cookies.txt file."""
if doAdult:
self.set_cookies(cookie_id, cookie_uu)
else:
self.urlOpener.del_header('Cookie')
def _retrieve(self, url, size=-1, _noCookies=False):
"""Retrieve the given URL."""
# print url
_cookies = None
# XXX: quite obscene, but in some very limited
# cases (/ttXXXXXXX/epdate) if the cookies
# are set, a 500 error is returned.
if _noCookies:
_cookies = self.urlOpener.get_header('Cookie')
self.del_cookies()
self._http_logger.debug('fetching url %s (size: %d)', url, size)
try:
ret = self.urlOpener.retrieve_unicode(url, size=size)
finally:
if _noCookies and _cookies:
self.urlOpener.set_header('Cookie', _cookies)
return ret
def _get_search_content(self, kind, ton, results):
"""Retrieve the web page for a given search.
kind can be 'tt' (for titles), 'nm' (for names),
or 'co' (for companies).
ton is the title or the name to search.
results is the maximum number of results to be retrieved."""
params = 'q=%s&s=%s&mx=%s' % (quote_plus(ton, safe=''), kind, str(results))
if kind == 'ep':
params = params.replace('s=ep&', 's=tt&ttype=ep&', 1)
cont = self._retrieve(self.urls['find'] % params)
# print 'URL:', imdbURL_find % params
if cont.find('Your search returned more than') == -1 or \
cont.find("displayed the exact matches") == -1:
return cont
# The retrieved page contains no results, because too many
# titles or names contain the string we're looking for.
params = 'q=%s&ls=%s&lm=0' % (quote_plus(ton, safe=''), kind)
size = 131072 + results * 512
return self._retrieve(self.urls['find'] % params, size=size)
def _search_movie(self, title, results):
cont = self._get_search_content('tt', title, results)
return self.smProxy.search_movie_parser.parse(cont, results=results)['data']
def _search_episode(self, title, results):
t_dict = analyze_title(title)
if t_dict['kind'] == 'episode':
title = t_dict['title']
cont = self._get_search_content('ep', title, results)
return self.smProxy.search_movie_parser.parse(cont, results=results)['data']
def get_movie_main(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'reference')
return self.mProxy.movie_parser.parse(cont, mdparse=self._mdparse)
def get_movie_full_credits(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'fullcredits')
return self.mProxy.full_credits_parser.parse(cont)
def get_movie_plot(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'plotsummary')
ret = self.mProxy.plot_parser.parse(cont, getRefs=self._getRefs)
ret['info sets'] = ('plot', 'synopsis')
return ret
def get_movie_awards(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'awards')
return self.mProxy.movie_awards_parser.parse(cont)
def get_movie_taglines(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'taglines')
return self.mProxy.taglines_parser.parse(cont)
def get_movie_keywords(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'keywords')
return self.mProxy.keywords_parser.parse(cont)
def get_movie_alternate_versions(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'alternateversions')
return self.mProxy.alternateversions_parser.parse(cont, getRefs=self._getRefs)
def get_movie_crazy_credits(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'crazycredits')
return self.mProxy.crazycredits_parser.parse(cont, getRefs=self._getRefs)
def get_movie_goofs(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'goofs')
return self.mProxy.goofs_parser.parse(cont, getRefs=self._getRefs)
def get_movie_quotes(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'quotes')
return self.mProxy.quotes_parser.parse(cont, getRefs=self._getRefs)
def get_movie_release_dates(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'releaseinfo')
ret = self.mProxy.releasedates_parser.parse(cont)
ret['info sets'] = ('release dates', 'akas')
return ret
get_movie_akas = get_movie_release_dates
get_movie_release_info = get_movie_release_dates
def get_movie_vote_details(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'ratings')
return self.mProxy.ratings_parser.parse(cont)
def get_movie_trivia(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'trivia')
return self.mProxy.trivia_parser.parse(cont, getRefs=self._getRefs)
def get_movie_connections(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'movieconnections')
return self.mProxy.connections_parser.parse(cont)
def get_movie_technical(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'technical')
return self.mProxy.tech_parser.parse(cont)
def get_movie_locations(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'locations')
return self.mProxy.locations_parser.parse(cont)
def get_movie_soundtrack(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'soundtrack')
return self.mProxy.soundtrack_parser.parse(cont)
def get_movie_reviews(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'reviews?count=9999999&start=0')
return self.mProxy.reviews_parser.parse(cont)
def get_movie_critic_reviews(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'criticreviews')
return self.mProxy.criticrev_parser.parse(cont)
def get_movie_external_reviews(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'externalreviews')
return self.mProxy.externalrev_parser.parse(cont)
def get_movie_external_sites(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'externalsites')
ret = self.mProxy.externalsites_parser.parse(cont)
ret['info sets'] = ('external sites', 'misc sites', 'sound clips',
'video sites', 'photo sites', 'official sites')
return ret
def get_movie_official_sites(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'officialsites')
ret = self.mProxy.officialsites_parser.parse(cont)
ret['info sets'] = ('external sites', 'misc sites', 'sound clips',
'video sites', 'photo sites', 'official sites')
return ret
def get_movie_misc_sites(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'miscsites')
ret = self.mProxy.misclinks_parser.parse(cont)
ret['info sets'] = ('external sites', 'misc sites', 'sound clips',
'video sites', 'photo sites', 'official sites')
return ret
def get_movie_sound_clips(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'soundsites')
ret = self.mProxy.soundclips_parser.parse(cont)
ret['info sets'] = ('external sites', 'misc sites', 'sound clips',
'video sites', 'photo sites', 'official sites')
return ret
def get_movie_video_clips(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'videosites')
ret = self.mProxy.videoclips_parser.parse(cont)
ret['info sets'] = ('external sites', 'misc sites', 'sound clips',
'video sites', 'photo sites', 'official sites')
return ret
def get_movie_photo_sites(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'photosites')
ret = self.mProxy.photosites_parser.parse(cont)
ret['info sets'] = ('external sites', 'misc sites', 'sound clips',
'video sites', 'photo sites', 'official sites')
return ret
def get_movie_news(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'news')
return self.mProxy.news_parser.parse(cont, getRefs=self._getRefs)
def _purge_seasons_data(self, data_d):
if '_current_season' in data_d['data']:
del data_d['data']['_current_season']
if '_seasons' in data_d['data']:
del data_d['data']['_seasons']
return data_d
def get_movie_episodes(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'episodes')
data_d = self.mProxy.season_episodes_parser.parse(cont)
if not data_d and 'data' in data_d:
return {}
_current_season = data_d['data'].get('_current_season', '')
_seasons = data_d['data'].get('_seasons') or []
data_d = self._purge_seasons_data(data_d)
data_d['data'].setdefault('episodes', {})
nr_eps = len(data_d['data']['episodes'].get(_current_season) or [])
for season in _seasons:
if season == _current_season:
continue
other_cont = self._retrieve(
self.urls['movie_main'] % movieID + 'episodes?season=' + str(season)
)
other_d = self.mProxy.season_episodes_parser.parse(other_cont)
other_d = self._purge_seasons_data(other_d)
other_d['data'].setdefault('episodes', {})
if not (other_d and other_d['data'] and other_d['data']['episodes'][season]):
continue
nr_eps += len(other_d['data']['episodes'].get(season) or [])
data_d['data']['episodes'][season] = other_d['data']['episodes'][season]
data_d['data']['number of episodes'] = nr_eps
return data_d
def get_movie_faqs(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'faq')
return self.mProxy.movie_faqs_parser.parse(cont, getRefs=self._getRefs)
def get_movie_airing(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'tvschedule')
return self.mProxy.airing_parser.parse(cont)
get_movie_tv_schedule = get_movie_airing
def get_movie_synopsis(self, movieID):
return self.get_movie_plot(movieID)
def get_movie_parents_guide(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'parentalguide')
return self.mProxy.parentsguide_parser.parse(cont)
def _search_person(self, name, results):
cont = self._get_search_content('nm', name, results)
return self.spProxy.search_person_parser.parse(cont, results=results)['data']
def get_person_main(self, personID):
cont = self._retrieve(self.urls['person_main'] % personID)
ret = self.pProxy.maindetails_parser.parse(cont)
ret['info sets'] = ('main', 'filmography')
return ret
def get_person_filmography(self, personID):
return self.get_person_main(personID)
def get_person_biography(self, personID):
cont = self._retrieve(self.urls['person_main'] % personID + 'bio')
return self.pProxy.bio_parser.parse(cont, getRefs=self._getRefs)
def get_person_awards(self, personID):
cont = self._retrieve(self.urls['person_main'] % personID + 'awards')
return self.pProxy.person_awards_parser.parse(cont)
def get_person_other_works(self, personID):
cont = self._retrieve(self.urls['person_main'] % personID + 'otherworks')
return self.pProxy.otherworks_parser.parse(cont, getRefs=self._getRefs)
def get_person_publicity(self, personID):
cont = self._retrieve(self.urls['person_main'] % personID + 'publicity')
return self.pProxy.publicity_parser.parse(cont)
def get_person_official_sites(self, personID):
cont = self._retrieve(self.urls['person_main'] % personID + 'officialsites')
return self.pProxy.person_officialsites_parser.parse(cont)
def get_person_news(self, personID):
cont = self._retrieve(self.urls['person_main'] % personID + 'news')
return self.pProxy.news_parser.parse(cont)
def get_person_genres_links(self, personID):
cont = self._retrieve(self.urls['person_main'] % personID + 'filmogenre')
return self.pProxy.person_genres_parser.parse(cont)
def get_person_keywords_links(self, personID):
cont = self._retrieve(self.urls['person_main'] % personID + 'filmokey')
return self.pProxy.person_keywords_parser.parse(cont)
def _search_company(self, name, results):
cont = self._get_search_content('co', name, results)
url = self.urlOpener._last_url
return self.scompProxy.search_company_parser.parse(cont, url=url,
results=results)['data']
def get_company_main(self, companyID):
cont = self._retrieve(self.urls['company_main'] % companyID)
ret = self.compProxy.company_main_parser.parse(cont)
return ret
def _search_keyword(self, keyword, results):
# XXX: the IMDb web server seems to have some serious problem with
# non-ascii keyword.
# E.g.: http://www.imdb.com/keyword/fianc%E9/
# will return a 500 Internal Server Error: Redirect Recursion.
try:
cont = self._get_search_content('kw', keyword, results)
except IMDbDataAccessError:
self._http_logger.warn('unable to search for keyword %s', keyword,
exc_info=True)
return []
return self.skProxy.search_keyword_parser.parse(cont, results=results)['data']
def _get_keyword(self, keyword, results):
try:
cont = self._retrieve(self.urls['keyword_main'] % keyword)
except IMDbDataAccessError:
self._http_logger.warn('unable to get keyword %s', keyword,
exc_info=True)
return []
return self.skProxy.search_moviekeyword_parser.parse(cont, results=results)['data']
def _get_top_bottom_movies(self, kind):
if kind == 'top':
parser = self.topBottomProxy.top250_parser
url = self.urls['top250']
elif kind == 'bottom':
parser = self.topBottomProxy.bottom100_parser
url = self.urls['bottom100']
else:
return []
cont = self._retrieve(url)
return parser.parse(cont)['data']
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.