gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
# encoding: UTF-8
"""Library for using local store for channel data."""
__copyright__ = "Copyright (c) 2015, Facility for Rare Isotope Beams"
__author__ = "Dylan Maxwell"
from collections import OrderedDict
class ChannelStore(object):
"""
Local store for channel data.
:param owner: default owner for properties and tags
"""
def __init__(self, owner=None):
self.owner = owner
self.channels = OrderedDict()
def set(self, channel, properties={}, tags=[]):
"""
Set the properties and tags of the specified channel or list of channels.
Note that this method is destructive and will remove data associated
with the specified channel. To update the channel properties use the
ChannelStore.update() method.
:param channel: channel name or list of channel names.
:param properties: dictionary of property values
:param tags: list of tags
"""
if isinstance(channel, (tuple,list)):
channels = channel
elif isinstance(channel, basestring):
channels = [ channel ]
else:
raise TypeError("Channel name must a string or list of strings")
for ch in channels:
data = CSData()
for name, value in properties.iteritems():
p = self._toProperty(name)
data.properties[p] = value
for name in tags:
t = self._toTag(name)
if t not in data.tags:
data.tags.append(t)
self.channels[ch] = data
def update(self, channel, properties={}, tags=[]):
"""
Update the properties and tags of the specified channel or list of channels.
:param channel: channel name or list of channel names.
:param properties: dictionary of property values
:param tags: list of tags
"""
if isinstance(channel, (tuple,list)):
channels = channel
elif isinstance(channel, basestring):
channels = [ channel ]
else:
raise TypeError("Channel name must a string or list of strings")
for ch in channels:
if ch in self.channels:
data = self.channels[ch]
else:
data = CSData()
self.channels[ch] = data
for name, value in properties.iteritems():
p = self._toProperty(name)
data.properties[p] = value
for name in tags:
t = self._toTag(name)
if t not in data.tags:
data.tags.append(t)
def query(self, channel="*", properties={}, tags=[]):
"""
Query this channel store with the specified expressions.
For example: store.query("*", { "system":"REA", "device":"BPM|PM" }, [ "T1" ])
:params channel: expression to match the channel
:params properties: dictionary of property expressions to match to property values
:params tags: list of expressions to match to tags
:return: ChannelStore
"""
raise NotImplementedError()
def properties(self, channel):
"""
Get properties for the specified channel.
:return: dictionary of property names and values
"""
props = {}
for prop, value in self.channels[channel].properties.iteritems():
props[prop.name] = value
return props
def tags(self, channel):
"""
Get tags for the specified channel.
:return: list of tag names
"""
tags = []
for tag in self.channels[channel].tags:
tags.append(tag.name)
return tags
def channelSet(self):
"""
Get a list of channels in this store.
"""
return self.channels.keys()
def propertySet(self):
"""
Search channel store and return a set of property names.
:return: set of property names
"""
props = set()
for data in self.channels.values():
for prop in data.properties.iterkeys():
props.add(prop.name)
return props
def tagSet(self):
"""
Search channel store and return a set of tag names.
:return: set of tag names
"""
tags = set()
for data in self.channels.values():
for tag in data.tags:
tags.add(tag.name)
return tags
def cspropertySet(self):
"""
Search channel store and return a set of properties.
:return: set of properties
"""
props = set()
for data in self.channels.values():
for prop in data.properties.iterkeys():
props.add(prop)
return props
def cstagSet(self):
"""
Search channel store and return a set of tags.
:returns: set of tags
"""
tags = set()
for data in self.channels.values():
for tag in data.tags:
tags.add(tag)
return tags
def _toProperty(self, prop):
"""
Convert a string, tuple or CSProperty object to a CSProperty.
"""
if isinstance(prop, CSProperty):
return CSProperty(prop.name, prop.owner)
if isinstance(prop, basestring):
return CSProperty(prop)
if isinstance(prop, (tuple,list)) and (len(prop) > 0):
if len(prop) > 1:
return CSProperty(prop[0], prop[1])
else:
return CSProperty(prop[0])
raise TypeError("Cannot build CSProperty from type: {}".format(type(prop)))
def _toTag(self, tag):
"""
Convert a string, tuple or CSTag object to a CSTag.
"""
if isinstance(tag, CSTag):
return CSTag(tag.name, tag.owner)
if isinstance(tag, basestring):
return CSTag(tag)
if isinstance(tag, (tuple,list)) and (len(tag) > 0):
if len(tag) > 1:
return CSTag(tag[0], tag[1])
else:
return CSTag(tag[0])
raise TypeError("Cannot build CSTag from type: {}".format(type(tag)))
class CSData(object):
def __init__(self):
self.tags = []
self.properties = {}
def __str__(self):
return "CSData{ properties=" + str(self.properties) + ", tags=" + str(self.tags) + "}"
class CSProperty(object):
def __init__(self, name, owner=None):
self.name = name
self.owner = owner
def __hash__(self):
return hash(self.name)
def __str__(self):
return "('" + str(self.name) + "', '" + str(self.owner) + "')"
class CSTag(object):
def __init__(self, name, owner=None):
self.name = name
self.owner = owner
def __hash__(self):
return hash(self.name)
def __str__(self):
return "('" + str(self.name) + "', '" + str(self.owner) + "')"
|
|
from __future__ import division
from __future__ import unicode_literals
import logging
import numbers
import sys
import pyvips
from pyvips import ffi, vips_lib, gobject_lib, \
glib_lib, Error, _to_bytes, _to_string, type_name, type_from_name
logger = logging.getLogger(__name__)
_is_PY2 = sys.version_info.major == 2
ffi.cdef('''
typedef struct _GValue {
GType gtype;
uint64_t data[2];
} GValue;
void g_value_init (GValue* value, GType gtype);
void g_value_unset (GValue* value);
GType g_type_fundamental (GType gtype);
int vips_enum_from_nick (const char* domain,
GType gtype, const char* str);
const char *vips_enum_nick (GType gtype, int value);
void g_value_set_boolean (GValue* value, int v_boolean);
void g_value_set_int (GValue* value, int i);
void g_value_set_double (GValue* value, double d);
void g_value_set_enum (GValue* value, int e);
void g_value_set_flags (GValue* value, unsigned int f);
void g_value_set_string (GValue* value, const char *str);
void g_value_set_object (GValue* value, void* object);
void vips_value_set_array_double (GValue* value,
const double* array, int n );
void vips_value_set_array_int (GValue* value,
const int* array, int n );
void vips_value_set_array_image (GValue *value, int n);
void vips_value_set_blob (GValue* value,
void (*free_fn)(void* data), void* data, size_t length);
int g_value_get_boolean (const GValue* value);
int g_value_get_int (GValue* value);
double g_value_get_double (GValue* value);
int g_value_get_enum (GValue* value);
unsigned int g_value_get_flags (GValue* value);
const char* g_value_get_string (GValue* value);
const char* vips_value_get_ref_string (const GValue* value,
size_t* length);
void* g_value_get_object (GValue* value);
double* vips_value_get_array_double (const GValue* value, int* n);
int* vips_value_get_array_int (const GValue* value, int* n);
VipsImage** vips_value_get_array_image (const GValue* value, int* n);
void* vips_value_get_blob (const GValue* value, size_t* length);
// need to make some of these by hand
GType vips_interpretation_get_type (void);
GType vips_operation_flags_get_type (void);
GType vips_band_format_get_type (void);
''')
class GValue(object):
"""Wrap GValue in a Python class.
This class wraps :class:`.GValue` in a convenient interface. You can use
instances of this class to get and set :class:`.GObject` properties.
On construction, :class:`.GValue` is all zero (empty). You can pass it to
a get function to have it filled by :class:`.GObject`, or use init to
set a type, set to set a value, then use it to set an object property.
GValue lifetime is managed automatically.
"""
# look up some common gtypes at init for speed
gbool_type = type_from_name('gboolean')
gint_type = type_from_name('gint')
gdouble_type = type_from_name('gdouble')
gstr_type = type_from_name('gchararray')
genum_type = type_from_name('GEnum')
gflags_type = type_from_name('GFlags')
gobject_type = type_from_name('GObject')
image_type = type_from_name('VipsImage')
array_int_type = type_from_name('VipsArrayInt')
array_double_type = type_from_name('VipsArrayDouble')
array_image_type = type_from_name('VipsArrayImage')
refstr_type = type_from_name('VipsRefString')
blob_type = type_from_name('VipsBlob')
pyvips.vips_lib.vips_band_format_get_type()
format_type = type_from_name('VipsBandFormat')
# map a gtype to the name of the corresponding Python type
_gtype_to_python = {
gbool_type: 'bool',
gint_type: 'int',
gdouble_type: 'float',
gstr_type: 'str',
refstr_type: 'str',
genum_type: 'str',
gflags_type: 'int',
gobject_type: 'GObject',
image_type: 'Image',
array_int_type: 'list[int]',
array_double_type: 'list[float]',
array_image_type: 'list[Image]',
blob_type: 'str'
}
@staticmethod
def gtype_to_python(gtype):
"""Map a gtype to the name of the Python type we use to represent it.
"""
fundamental = gobject_lib.g_type_fundamental(gtype)
if gtype in GValue._gtype_to_python:
return GValue._gtype_to_python[gtype]
if fundamental in GValue._gtype_to_python:
return GValue._gtype_to_python[fundamental]
return '<unknown type>'
@staticmethod
def to_enum(gtype, value):
"""Turn a string into an enum value ready to be passed into libvips.
"""
if isinstance(value, basestring if _is_PY2 else str):
enum_value = vips_lib.vips_enum_from_nick(b'pyvips', gtype,
_to_bytes(value))
if enum_value < 0:
raise Error('no value {0} in gtype {1} ({2})'.
format(value, type_name(gtype), gtype))
else:
enum_value = value
return enum_value
@staticmethod
def from_enum(gtype, enum_value):
"""Turn an int back into an enum string.
"""
cstr = vips_lib.vips_enum_nick(gtype, enum_value)
if cstr == 0:
raise Error('value not in enum')
return _to_string(ffi.string(cstr))
def __init__(self):
# allocate memory for the gvalue which will be freed on GC
self.pointer = ffi.new('GValue *')
# logger.debug('GValue.__init__: pointer = %s', self.pointer)
# and tag it to be unset on GC as well
self.gvalue = ffi.gc(self.pointer, gobject_lib.g_value_unset)
# logger.debug('GValue.__init__: gvalue = %s', self.gvalue)
def set_type(self, gtype):
"""Set the type of a GValue.
GValues have a set type, fixed at creation time. Use set_type to set
the type of a GValue before assigning to it.
GTypes are 32 or 64-bit integers (depending on the platform). See
type_find.
"""
gobject_lib.g_value_init(self.gvalue, gtype)
def set(self, value):
"""Set a GValue.
The value is converted to the type of the GValue, if possible, and
assigned.
"""
# logger.debug('GValue.set: value = %s', value)
gtype = self.gvalue.gtype
fundamental = gobject_lib.g_type_fundamental(gtype)
if gtype == GValue.gbool_type:
gobject_lib.g_value_set_boolean(self.gvalue, value)
elif gtype == GValue.gint_type:
gobject_lib.g_value_set_int(self.gvalue, int(value))
elif gtype == GValue.gdouble_type:
gobject_lib.g_value_set_double(self.gvalue, value)
elif fundamental == GValue.genum_type:
gobject_lib.g_value_set_enum(self.gvalue,
GValue.to_enum(gtype, value))
elif fundamental == GValue.gflags_type:
gobject_lib.g_value_set_flags(self.gvalue, value)
elif gtype == GValue.gstr_type or gtype == GValue.refstr_type:
gobject_lib.g_value_set_string(self.gvalue, _to_bytes(value))
elif fundamental == GValue.gobject_type:
gobject_lib.g_value_set_object(self.gvalue, value.pointer)
elif gtype == GValue.array_int_type:
if isinstance(value, numbers.Number):
value = [value]
array = ffi.new('int[]', value)
vips_lib.vips_value_set_array_int(self.gvalue, array, len(value))
elif gtype == GValue.array_double_type:
if isinstance(value, numbers.Number):
value = [value]
array = ffi.new('double[]', value)
vips_lib.vips_value_set_array_double(self.gvalue, array,
len(value))
elif gtype == GValue.array_image_type:
if isinstance(value, pyvips.Image):
value = [value]
vips_lib.vips_value_set_array_image(self.gvalue, len(value))
array = vips_lib.vips_value_get_array_image(self.gvalue, ffi.NULL)
for i, image in enumerate(value):
gobject_lib.g_object_ref(image.pointer)
array[i] = image.pointer
elif gtype == GValue.blob_type:
# we need to set the blob to a copy of the string that vips_lib
# can own
memory = glib_lib.g_malloc(len(value))
ffi.memmove(memory, value, len(value))
vips_lib.vips_value_set_blob(self.gvalue,
glib_lib.g_free, memory, len(value))
else:
raise Error('unsupported gtype for set {0}, fundamental {1}'.
format(type_name(gtype), type_name(fundamental)))
def get(self):
"""Get the contents of a GValue.
The contents of the GValue are read out as a Python type.
"""
# logger.debug('GValue.get: self = %s', self)
gtype = self.gvalue.gtype
fundamental = gobject_lib.g_type_fundamental(gtype)
result = None
if gtype == GValue.gbool_type:
result = bool(gobject_lib.g_value_get_boolean(self.gvalue))
elif gtype == GValue.gint_type:
result = gobject_lib.g_value_get_int(self.gvalue)
elif gtype == GValue.gdouble_type:
result = gobject_lib.g_value_get_double(self.gvalue)
elif fundamental == GValue.genum_type:
return GValue.from_enum(gtype,
gobject_lib.g_value_get_enum(self.gvalue))
elif fundamental == GValue.gflags_type:
result = gobject_lib.g_value_get_flags(self.gvalue)
elif gtype == GValue.gstr_type:
cstr = gobject_lib.g_value_get_string(self.gvalue)
if cstr != ffi.NULL:
result = _to_string(ffi.string(cstr))
elif gtype == GValue.refstr_type:
psize = ffi.new('size_t *')
cstr = vips_lib.vips_value_get_ref_string(self.gvalue, psize)
result = _to_string(ffi.string(cstr, psize[0]))
elif gtype == GValue.image_type:
# g_value_get_object() will not add a ref ... that is
# held by the gvalue
go = gobject_lib.g_value_get_object(self.gvalue)
vi = ffi.cast('VipsImage *', go)
# we want a ref that will last with the life of the vimage:
# this ref is matched by the unref that's attached to finalize
# by Image()
gobject_lib.g_object_ref(go)
result = pyvips.Image(vi)
elif gtype == GValue.array_int_type:
pint = ffi.new('int *')
array = vips_lib.vips_value_get_array_int(self.gvalue, pint)
result = []
for i in range(0, pint[0]):
result.append(array[i])
elif gtype == GValue.array_double_type:
pint = ffi.new('int *')
array = vips_lib.vips_value_get_array_double(self.gvalue, pint)
result = []
for i in range(0, pint[0]):
result.append(array[i])
elif gtype == GValue.array_image_type:
pint = ffi.new('int *')
array = vips_lib.vips_value_get_array_image(self.gvalue, pint)
result = []
for i in range(0, pint[0]):
vi = array[i]
gobject_lib.g_object_ref(vi)
image = pyvips.Image(vi)
result.append(image)
elif gtype == GValue.blob_type:
psize = ffi.new('size_t *')
array = vips_lib.vips_value_get_blob(self.gvalue, psize)
buf = ffi.cast("char*", array)
result = ffi.unpack(buf, psize[0])
else:
raise Error('unsupported gtype for get {0}'.
format(type_name(gtype)))
return result
__all__ = ['GValue']
|
|
"""
Module for managing Solaris logadm based log rotations.
"""
import logging
import shlex
import salt.utils.args
import salt.utils.decorators as decorators
import salt.utils.files
import salt.utils.stringutils
try:
from shlex import quote as _quote_args # pylint: disable=E0611
except ImportError:
from pipes import quote as _quote_args
log = logging.getLogger(__name__)
default_conf = "/etc/logadm.conf"
option_toggles = {
"-c": "copy",
"-l": "localtime",
"-N": "skip_missing",
}
option_flags = {
"-A": "age",
"-C": "count",
"-a": "post_command",
"-b": "pre_command",
"-e": "mail_addr",
"-E": "expire_command",
"-g": "group",
"-m": "mode",
"-M": "rename_command",
"-o": "owner",
"-p": "period",
"-P": "timestmp",
"-R": "old_created_command",
"-s": "size",
"-S": "max_size",
"-t": "template",
"-T": "old_pattern",
"-w": "entryname",
"-z": "compress_count",
}
def __virtual__():
"""
Only work on Solaris based systems
"""
if "Solaris" in __grains__["os_family"]:
return True
return (
False,
"The logadm execution module cannot be loaded: only available on Solaris.",
)
def _arg2opt(arg):
"""
Turn a pass argument into the correct option
"""
res = [o for o, a in option_toggles.items() if a == arg]
res += [o for o, a in option_flags.items() if a == arg]
return res[0] if res else None
def _parse_conf(conf_file=default_conf):
"""
Parse a logadm configuration file.
"""
ret = {}
with salt.utils.files.fopen(conf_file, "r") as ifile:
for line in ifile:
line = salt.utils.stringutils.to_unicode(line).strip()
if not line:
continue
if line.startswith("#"):
continue
splitline = line.split(" ", 1)
ret[splitline[0]] = splitline[1]
return ret
def _parse_options(entry, options, include_unset=True):
"""
Parse a logadm options string
"""
log_cfg = {}
options = shlex.split(options)
if not options:
return None
## identifier is entry or log?
if entry.startswith("/"):
log_cfg["log_file"] = entry
else:
log_cfg["entryname"] = entry
## parse options
# NOTE: we loop over the options because values may exist multiple times
index = 0
while index < len(options):
# log file
if index in [0, (len(options) - 1)] and options[index].startswith("/"):
log_cfg["log_file"] = options[index]
# check if toggle option
elif options[index] in option_toggles:
log_cfg[option_toggles[options[index]]] = True
# check if flag option
elif options[index] in option_flags and (index + 1) <= len(options):
log_cfg[option_flags[options[index]]] = (
int(options[index + 1])
if options[index + 1].isdigit()
else options[index + 1]
)
index += 1
# unknown options
else:
if "additional_options" not in log_cfg:
log_cfg["additional_options"] = []
if " " in options[index]:
log_cfg["dditional_options"] = "'{}'".format(options[index])
else:
log_cfg["additional_options"].append(options[index])
index += 1
## turn additional_options into string
if "additional_options" in log_cfg:
log_cfg["additional_options"] = " ".join(log_cfg["additional_options"])
## ensure we have a log_file
# NOTE: logadm assumes logname is a file if no log_file is given
if "log_file" not in log_cfg and "entryname" in log_cfg:
log_cfg["log_file"] = log_cfg["entryname"]
del log_cfg["entryname"]
## include unset
if include_unset:
# toggle optioons
for name in option_toggles.values():
if name not in log_cfg:
log_cfg[name] = False
# flag options
for name in option_flags.values():
if name not in log_cfg:
log_cfg[name] = None
return log_cfg
def show_conf(conf_file=default_conf, name=None):
"""
Show configuration
conf_file : string
path to logadm.conf, defaults to /etc/logadm.conf
name : string
optional show only a single entry
CLI Example:
.. code-block:: bash
salt '*' logadm.show_conf
salt '*' logadm.show_conf name=/var/log/syslog
"""
cfg = _parse_conf(conf_file)
# filter
if name and name in cfg:
return {name: cfg[name]}
elif name:
return {name: "not found in {}".format(conf_file)}
else:
return cfg
def list_conf(conf_file=default_conf, log_file=None, include_unset=False):
"""
Show parsed configuration
.. versionadded:: 2018.3.0
conf_file : string
path to logadm.conf, defaults to /etc/logadm.conf
log_file : string
optional show only one log file
include_unset : boolean
include unset flags in output
CLI Example:
.. code-block:: bash
salt '*' logadm.list_conf
salt '*' logadm.list_conf log=/var/log/syslog
salt '*' logadm.list_conf include_unset=False
"""
cfg = _parse_conf(conf_file)
cfg_parsed = {}
## parse all options
for entry in cfg:
log_cfg = _parse_options(entry, cfg[entry], include_unset)
cfg_parsed[
log_cfg["log_file"] if "log_file" in log_cfg else log_cfg["entryname"]
] = log_cfg
## filter
if log_file and log_file in cfg_parsed:
return {log_file: cfg_parsed[log_file]}
elif log_file:
return {log_file: "not found in {}".format(conf_file)}
else:
return cfg_parsed
@decorators.memoize
def show_args():
"""
Show which arguments map to which flags and options.
.. versionadded:: 2018.3.0
CLI Example:
.. code-block:: bash
salt '*' logadm.show_args
"""
mapping = {"flags": {}, "options": {}}
for flag, arg in option_toggles.items():
mapping["flags"][flag] = arg
for option, arg in option_flags.items():
mapping["options"][option] = arg
return mapping
def rotate(name, pattern=None, conf_file=default_conf, **kwargs):
"""
Set up pattern for logging.
name : string
alias for entryname
pattern : string
alias for log_file
conf_file : string
optional path to alternative configuration file
kwargs : boolean|string|int
optional additional flags and parameters
.. note::
``name`` and ``pattern`` were kept for backwards compatibility reasons.
``name`` is an alias for the ``entryname`` argument, ``pattern`` is an alias
for ``log_file``. These aliases will only be used if the ``entryname`` and
``log_file`` arguments are not passed.
For a full list of arguments see ```logadm.show_args```.
CLI Example:
.. code-block:: bash
salt '*' logadm.rotate myapplog pattern='/var/log/myapp/*.log' count=7
salt '*' logadm.rotate myapplog log_file='/var/log/myapp/*.log' count=4 owner=myappd mode='0700'
"""
## cleanup kwargs
kwargs = salt.utils.args.clean_kwargs(**kwargs)
## inject name into kwargs
if "entryname" not in kwargs and name and not name.startswith("/"):
kwargs["entryname"] = name
## inject pattern into kwargs
if "log_file" not in kwargs:
if pattern and pattern.startswith("/"):
kwargs["log_file"] = pattern
# NOTE: for backwards compatibility check if name is a path
elif name and name.startswith("/"):
kwargs["log_file"] = name
## build command
log.debug("logadm.rotate - kwargs: %s", kwargs)
command = "logadm -f {}".format(conf_file)
for arg, val in kwargs.items():
if arg in option_toggles.values() and val:
command = "{} {}".format(
command,
_arg2opt(arg),
)
elif arg in option_flags.values():
command = "{} {} {}".format(command, _arg2opt(arg), _quote_args(str(val)))
elif arg != "log_file":
log.warning("Unknown argument %s, don't know how to map this!", arg)
if "log_file" in kwargs:
# NOTE: except from ```man logadm```
# If no log file name is provided on a logadm command line, the entry
# name is assumed to be the same as the log file name. For example,
# the following two lines achieve the same thing, keeping two copies
# of rotated log files:
#
# % logadm -C2 -w mylog /my/really/long/log/file/name
# % logadm -C2 -w /my/really/long/log/file/name
if "entryname" not in kwargs:
command = "{} -w {}".format(command, _quote_args(kwargs["log_file"]))
else:
command = "{} {}".format(command, _quote_args(kwargs["log_file"]))
log.debug("logadm.rotate - command: %s", command)
result = __salt__["cmd.run_all"](command, python_shell=False)
if result["retcode"] != 0:
return dict(Error="Failed in adding log", Output=result["stderr"])
return dict(Result="Success")
def remove(name, conf_file=default_conf):
"""
Remove log pattern from logadm
CLI Example:
.. code-block:: bash
salt '*' logadm.remove myapplog
"""
command = "logadm -f {} -r {}".format(conf_file, name)
result = __salt__["cmd.run_all"](command, python_shell=False)
if result["retcode"] != 0:
return dict(
Error="Failure in removing log. Possibly already removed?",
Output=result["stderr"],
)
return dict(Result="Success")
|
|
import copy
from soundrts.lib.sound import distance
from soundrts.lib.nofloat import square_of_distance
try:
from hashlib import md5
except ImportError:
from md5 import md5
import os.path
import Queue
import re
import string
import time
from lib import collision
from constants import COLLISION_RADIUS, VIRTUAL_TIME_INTERVAL, PROFILE
from definitions import rules, get_ai_names, load_ai
from lib.log import warning, exception, info
from lib.nofloat import to_int, int_distance, PRECISION
from paths import MAPERROR_PATH
import res
from worldability import Ability
from worldclient import DummyClient
from worldexit import passage
from worldorders import ORDERS_DICT
from worldplayerbase import Player, normalize_cost_or_resources
from worldplayercomputer import Computer
from worldplayerhuman import Human
import worldrandom
from worldresource import Deposit, Meadow
from worldroom import Square
from worldunit import Unit, Worker, Soldier, Building, Effect
from worldupgrade import Upgrade
GLOBAL_FOOD_LIMIT = 80
class Type(object):
def init_dict(self, target):
target.type_name = self.type_name
for k, v in self.dct.items():
if k == "class":
continue
if (hasattr(self.cls, k) or
k.endswith("_bonus") and hasattr(self.cls, k[:-6])
) and not callable(getattr(self.cls, k, None)):
if k == "cost":
normalize_cost_or_resources(v)
setattr(target, k, v)
elif target is self:
warning("in %s: %s doesn't have any attribute called '%s'", self.type_name, self.cls.__name__, k)
def __init__(self, name, bases, dct):
self.__name__ = name
self.type_name = name
self.cls = bases[0]
if "sight_range" in dct:
del dct["sight_range"]
dct["bonus_height"] = 1
info("in %s: replacing sight_range 1 with bonus_height 1", name)
if "special_range" in dct:
del dct["special_range"]
dct["range"] = 12 * PRECISION
dct["minimal_range"] = 4 * PRECISION
dct["is_ballistic"] = 1
info("in %s: replacing special_range 1 with range 12, minimal_range 4 and is_ballistic 1", name)
self.dct = dct
self.init_dict(self)
def __call__(self, *args, **kargs):
result = self.cls(self, *args, **kargs)
return result
def __getattr__(self, name):
if name[:2] != "__":
return getattr(self.cls, name)
else:
raise AttributeError
# POSSIBLE TODO: a world doesn't know what is a player or a game...
# ... except for the resources and tech?!!! and alliances, ennemies
# rename "player" to "economy", "country", "tribe", "side",
# "force", "faction", "team"?)
class World(object):
def __init__(self, default_triggers, seed=0):
self.default_triggers = default_triggers
self.id = self.get_next_id()
worldrandom.seed(int(seed))
self.time = 0
self.squares = []
self.active_objects = []
self.players = []
self.ex_players = []
self.unit_classes = {}
self.objects = {}
self.harm_target_types = {}
self._command_queue = Queue.Queue()
def __getstate__(self):
odict = self.__dict__.copy()
del odict["_command_queue"]
return odict
def __setstate__(self, dict):
self.__dict__.update(dict)
self._command_queue = Queue.Queue()
def remove_links_for_savegame(self): # avoid pickle recursion problem
for z in self.squares:
for e in z.exits:
e.place = None
def restore_links_for_savegame(self):
for z in self.squares:
for e in z.exits:
e.place = z
self.set_neighbours()
_next_id = 0 # reset ID for each world to avoid big numbers
def get_next_id(self, increment=True):
if increment:
self._next_id += 1
return str(self._next_id)
else:
return str(self._next_id + 1)
# Why use a different id for orders: get_next_id() would have worked too,
# but with higher risks of synchronization errors. This way is probably
# more sturdy.
_next_order_id = 0
def get_next_order_id(self):
self._next_order_id += 1
return self._next_order_id
current_player_number = 0
def get_next_player_number(self):
self.current_player_number += 1
return self.current_player_number
def get_objects(self, x, y, radius, filter=lambda x: True):
radius_2 = radius * radius
return [o for z in self.squares for o in z.objects
if filter(o) and square_of_distance(x, y, o.x, o.y) <= radius_2]
def get_place_from_xy(self, x, y):
return self.grid.get((x / self.square_width,
y / self.square_width))
def clean(self):
for p in self.players + self.ex_players:
p.clean()
for z in self.squares:
z.clean()
self.__dict__ = {}
def _get_objects_values(self):
names_to_check = ["x", "y", "hp", "action_target"]
if self.time == 0:
names_to_check += ["id", "player"]
objects_to_check = []
for z in self.squares:
objects_to_check += z.objects
else:
objects_to_check = self.active_objects
for o in objects_to_check:
for name in names_to_check:
if hasattr(o, name):
value = getattr(o, name)
if name in ["action_target", "player"]:
if hasattr(value, "id"):
value = value.id
else:
continue
yield "%s%s" % (name, value)
def get_objects_string(self):
return "".join(self._get_objects_values())
def get_digest(self):
d = md5(str(self.time))
for p in self.players:
d.update(str(len(p.units)))
for z in self.squares:
d.update(str(len(z.objects)))
for ov in self._get_objects_values():
d.update(ov)
return d.hexdigest()
_previous_slow_update = 0
def update(self):
# normal updates
for p in self.players[:]:
if p in self.players:
try:
p.update()
except:
exception("")
for o in self.active_objects[:]:
# much faster way to check if "o in self.active_objects":
if o.place is not None:
try:
o.update()
except:
exception("")
# slow updates (called every second)
if self.time >= self._previous_slow_update + 1000:
for o in self.active_objects[:]:
# much faster way to check if "o in self.active_objects":
if o.place is not None:
try:
o.slow_update()
except:
exception("")
for p in self.players[:]:
if p in self.players:
try:
p.slow_update()
except:
exception("")
self._previous_slow_update += 1000
# signal the end of the updates for this time
self.time += VIRTUAL_TIME_INTERVAL
for p in self.players[:]:
if p.is_human():
p.ready = False
try:
def _copy(l):
return set(copy.copy(o) for o in l)
collision_debug = None
# collision_debug = copy.deepcopy(self.collision)
if p.is_local_human(): # avoid unnecessary copies
if p.cheatmode:
observed_before_squares = self.squares
else:
observed_before_squares = p.observed_before_squares
p.push("voila", self.time,
_copy(p.memory), _copy(p.perception),
p.observed_squares.keys(),
observed_before_squares,
collision_debug)
except:
exception("")
# if no "true" player is playing any more, end the game
if not self.true_playing_players:
for p in self.players:
p.quit_game()
ground = []
global_food_limit = GLOBAL_FOOD_LIMIT
# move the following methods to Map
unit_base_classes = {"worker": Worker, "soldier": Soldier,
"building": Building,
"effect": Effect,
"deposit": Deposit,
"upgrade": Upgrade, "ability": Ability}
def unit_class(self, s):
"""Get a class-like unit generator from its name.
Example: unit_class("peasant") to get a peasant generator
At the moment, unit_classes contains also: upgrades, abilities...
"""
if not self.unit_classes.has_key(s):
try:
base = self.unit_base_classes[rules.get(s, "class")[0]]
except:
if rules.get(s, "class") != ["faction"]:
warning("no class defined for %s", s)
self.unit_classes[s] = None
return
try:
dct = rules.get_dict(s)
t = Type(s, (base,), dct)
if base is Upgrade:
t = base(s, dct) # factory-prototypes are only for units
self.unit_classes[s] = t
except:
exception("problem with unit_class('%s')", s)
self.unit_classes[s] = None
return
return self.unit_classes[s]
def _get_classnames(self, condition):
result = []
for c in rules.classnames():
uc = self.unit_class(c)
if uc is not None and condition(uc):
result.append(c)
return result
def get_makers(self, t):
def can_make(uc, t):
for a in ("can_build", "can_train", "can_upgrade_to"):
if t in getattr(uc, a, []):
return True
if t.__class__ != str:
t = t.__name__
return self._get_classnames(lambda uc: can_make(uc, t))
def get_units(self):
return self._get_classnames(lambda uc: issubclass(uc.cls, Unit))
def get_soldiers(self):
return self._get_classnames(lambda uc: issubclass(uc.cls, Soldier))
def get_deposits(self, resource_index):
return self._get_classnames(lambda uc: issubclass(uc.cls, Deposit) and uc.resource_type == resource_index)
# map creation
def set_neighbours(self):
for square in set(self.grid.values()):
square.set_neighbours()
def _create_squares_and_grid(self):
self.grid = {}
for col in range(self.nb_columns):
for row in range(self.nb_lines):
square = Square(self, col, row, self.square_width)
self.grid[square.name] = square
self.grid[(col, row)] = square
square.high_ground = square.name in self.high_grounds
self.set_neighbours()
xmax = self.nb_columns * self.square_width
res = COLLISION_RADIUS * 2 / 3
self.collision = {"ground": collision.CollisionMatrix(xmax, res),
"air": collision.CollisionMatrix(xmax, res)}
def _meadows(self):
m = []
for square in sorted([x for x in self.grid.keys() if isinstance(x, str)]):
m.extend([square] * self.nb_meadows_by_square)
m.extend(self.additional_meadows)
for square in self.remove_meadows:
if square in m:
m.remove(square)
return m
def _create_resources(self):
for z, cls, n in self.map_objects:
C = self.unit_class(cls)
if self.grid[z].can_receive("ground"): # avoids using the spiral
resource = C(self.grid[z], n)
resource.building_land = Meadow(self.grid[z])
resource.building_land.delete()
for z in self._meadows():
Meadow(self.grid[z])
def _arrange_resources_symmetrically(self):
xc = self.nb_columns * 10 / 2
yc = self.nb_lines * 10 / 2
for z in self.squares:
z.arrange_resources_symmetrically(xc, yc)
def _we_places(self, i):
t = string.ascii_lowercase
col = t.find(i[0]) + 1
if col == self.nb_columns:
col = 0
j = t[col] + i[1:]
if not self.grid.has_key(j):
map_error("", "The west-east passage starting from %s doesn't exist." % i)
return self.grid[i].east_side(), self.grid[j].west_side()
def _sn_places(self, i):
line = int(i[1:]) + 1
if line == self.nb_lines + 1:
line = 1
j = i[0] + str(line)
if not self.grid.has_key(j):
map_error("", "The south-north passage starting from %s doesn't exist." % i)
return self.grid[i].north_side(), self.grid[j].south_side()
def _create_passages(self):
for t, squares in self.west_east:
for i in squares:
passage(self._we_places(i), t)
for t, squares in self.south_north:
for i in squares:
passage(self._sn_places(i), t)
self.g = {}
for z in self.squares:
for e in z.exits:
self.g[e] = {}
for f in z.exits:
if f is not e:
self.g[e][f] = int_distance(e.x, e.y, f.x, f.y)
self.g[e][e.other_side] = 0
def _build_map(self):
self._create_squares_and_grid()
self._create_resources()
self._arrange_resources_symmetrically()
self._create_passages()
def _add_start_to(self, starts, resources, items, sq=None):
def is_a_square(x):
return x[0] in string.ascii_letters and x[1] in string.digits\
and (len(x) == 2 or len(x) == 3 and x[2] in string.digits)
start = []
multiplicator = 1
for x in items:
if is_a_square(x):
sq = x
multiplicator = 1
elif x[0] == "-":
start.append([None, x])
elif re.match("[0-9]+$", x):
multiplicator = int(x)
else:
start.extend([[sq, self.unit_class(x)]] * multiplicator)
multiplicator = 1
starts.append([resources, start, []])
@property
def nb_res(self):
return int(rules.get("parameters", "nb_of_resource_types")[0])
def _add_start(self, w, words, line):
# get start type
if w == "player":
n = "players_starts"
else:
n = "computers_starts"
# get resources
starting_resources = []
for c in words[1:1+self.nb_res]:
try:
starting_resources.append(to_int(c))
except:
map_error(line, "expected an integer but found %s" % c)
# build start
self._add_start_to(getattr(self, n), starting_resources, words[1+self.nb_res:])
def _list_to_tree(self, words):
cache = [[]]
for w in words:
if w == "(":
cache.append([])
elif w == ")":
cache[-2].append(cache.pop())
else:
cache[-1].append(w)
return cache[0]
def _add_trigger(self, words):
owners, condition, action = self._list_to_tree(words)
if isinstance(owners, str):
owners = [owners]
for o in owners:
if o == "computers":
for s in self.computers_starts:
s[2].append([condition, action])
elif o == "players":
for s in self.players_starts:
s[2].append([condition, action])
elif o == "all":
for s in self.computers_starts + self.players_starts:
s[2].append([condition, action])
elif o[:-1] == "computer":
try:
self.computers_starts[int(o[-1:]) - 1][2].append([condition, action])
except:
map_error("", "error in trigger for %s: unknown owner" % o)
elif o[:-1] == "player":
try:
self.players_starts[int(o[-1:]) - 1][2].append([condition, action])
except:
map_error("", "error in trigger for %s: unknown owner" % o)
else:
map_error("", "error in trigger for %s: unknown owner" % o)
def _load_map(self, map):
def random_choice_repl(matchobj):
return worldrandom.choice(matchobj.group(1).split("\n#end_choice\n"))
def check_squares(squares):
for sq in squares:
if re.match("^[a-z]+[0-9]+$", sq) is None:
map_error(line, "%s is not a square" % sq)
self.objective = []
self.intro = []
self.timer_coefficient = 1
triggers = []
self.map_objects = []
self.computers_starts = []
self.players_starts = []
self.starting_units = []
squares_words = ["starting_squares",
"additional_meadows", "remove_meadows",
"high_grounds"]
self.square_width = 12 # default value
self.nb_lines = 0
self.nb_columns = 0
self.nb_rows = 0 # deprecated (was incorrectly used for columns instead of lines)
self.nb_meadows_by_square = 0
self.west_east = []
self.south_north = []
# "squares words"
self.starting_squares = []
self.additional_meadows = []
self.remove_meadows = []
self.high_grounds = []
self.starting_resources = [0 for _ in range(self.nb_res)]
self.nb_players_min = 1
self.nb_players_max = 1
s = map.read() # "universal newlines"
s = re.sub("(?m);.*$", "", s) # remove comments
s = re.sub("(?m)^[ \t]*$\n", "", s) # remove empty lines
s = re.sub(r"(?m)\\[ \t]*$\n", " ", s) # join lines ending with "\"
s = s.replace("(", " ( ")
s = s.replace(")", " ) ")
s = re.sub(r"\s*\n\s*", r"\n", s) # strip lines
s = re.sub(r"(?ms)^#random_choice\n(.*?)\n#end_random_choice$", random_choice_repl, s)
s = re.sub(r"(?m)^(goldmine|wood)s\s+([0-9]+)\s+(.*)$", r"\1 \2 \3", s)
s = re.sub(r"(south_north|west_east)_paths", r"\1 path", s)
s = re.sub(r"(south_north|west_east)_bridges", r"\1 bridge", s)
for line in s.split("\n"): # TODO: error msg
words = line.strip().split()
if not words:
continue # empty line
w = words[0]
if w[0:1] == ";":
continue # comment
for _w in words[1:]:
if w in ["south_north", "west_east"]:
continue # TODO: check that the exit type_name is defined in style
for _w in _w.split(","):
if _w and _w[0] == "-": _w = _w[1:]
if re.match("^([a-z]+[0-9]+|[0-9]+(.[0-9]*)?|.[0-9]+)$", _w) is None and \
not hasattr(Player, "lang_" + _w) and \
_w not in rules.classnames() and \
_w not in get_ai_names() and \
_w not in ["(", ")", "all", "players", "computers"] and \
_w not in ORDERS_DICT:
map_error(line, "unknown: %s" % _w)
if w in ["title", "objective", "intro"]:
setattr(self, w, [int(x) for x in words[1:]]) # TODO: error msg (sounds)
elif w in ["square_width", "nb_rows", "nb_columns", "nb_lines",
"nb_players_min", "nb_players_max", "scenario",
"nb_meadows_by_square",
"global_food_limit",
"timer_coefficient"]:
try:
setattr(self, w, int(words[1]))
if w == "nb_rows":
self.nb_columns = self.nb_rows
warning("nb_rows is deprecated, use nb_columns instead")
except:
map_error(line, "%s must be an integer" % w)
elif w in ["south_north", "west_east"]:
squares = words[2:]
check_squares(squares)
getattr(self, w).append((words[1], squares))
elif w in squares_words:
squares = words[1:]
check_squares(squares)
getattr(self, w).extend(squares)
elif w in ["starting_resources"]:
self.starting_resources = []
for c in words[1:]:
try:
self.starting_resources.append(to_int(c))
except:
map_error(line, "expected an integer but found %s" % c)
elif rules.get(w, "class") == ["deposit"]:
for sq in words[2:]: # TODO: error msg (squares)
self.map_objects.append([sq, w, words[1]])
elif w in ["starting_units"]:
getattr(self, w).extend(words[1:]) # TODO: error msg (types)
elif w in ["player", "computer_only", "computer"]:
self._add_start(w, words, line)
elif w == "trigger":
triggers.append(words[1:])
else:
map_error(line, "unknown command: %s" % w)
# build self.players_starts
for sq in self.starting_squares:
self._add_start_to(self.players_starts,
self.starting_resources, self.starting_units, sq)
if self.nb_players_min > self.nb_players_max:
map_error("", "nb_players_min > nb_players_max")
if len(self.players_starts) < self.nb_players_max:
map_error("", "not enough starting places for nb_players_max")
# 2 multiplayer map types: with or without standard triggers
# TODO: select in a menu: User Map Settings, melee, free for all, etc
if not triggers and self.default_triggers:
triggers = self.default_triggers
for t in triggers:
self._add_trigger(t)
def load_and_build_map(self, map):
if os.path.exists(MAPERROR_PATH):
try:
os.remove(MAPERROR_PATH)
except:
warning("cannot remove map error file")
try:
rules.load(res.get_text_file("rules", append=True), map.campaign_rules, map.additional_rules)
load_ai(res.get_text_file("ai", append=True), map.campaign_ai, map.additional_ai)
self._load_map(map)
self.map = map
self.square_width = int(self.square_width * PRECISION)
self._build_map()
if self.objective:
self.introduction = [4020] + self.objective
else:
self.introduction = []
except MapError, msg:
warning("map error: %s", msg)
self.map_error = "map error: %s" % msg
return False
return True
def get_factions(self):
return [c for c in rules.classnames()
if rules.get(c, "class") == ["faction"]]
# move this to Game?
def current_nb_human_players(self):
n = 0
for p in self.players:
if p.is_human():
n += 1
return n
def true_players(self):
return [p for p in self.players if not p.neutral]
@property
def true_playing_players(self):
return [p for p in self.true_players() if p.is_playing]
@property
def food_limit(self):
return self.global_food_limit
def _add_player(self, player_class, client, start, *args):
client.player = player_class(self, client, *args)
self.players.append(client.player)
client.player.start = start
def populate_map(self, players, alliances, factions=()):
# add "true" (non neutral) players
worldrandom.shuffle(self.players_starts)
for client in players:
start = self.players_starts.pop()
if client.__class__.__name__ == "DummyClient":
self._add_player(Computer, client, start, False)
else:
self._add_player(Human, client, start)
# create the alliances
if alliances:
for p, pa in zip(self.players, alliances):
for other, oa in zip(self.players, alliances):
if other is not p and oa == pa:
p.allied.append(other)
else: # computer players are allied by default
for p in self.players:
if isinstance(p, Computer):
for other in self.players:
if other is not p and isinstance(other, Computer):
p.allied.append(other)
# set the factions for players
if factions:
for p, pr in zip(self.players, factions):
if pr == "random_faction":
p.faction = worldrandom.choice(self.get_factions())
else:
p.faction = pr
# add "neutral" (independent) computers
for start in self.computers_starts:
self._add_player(Computer, DummyClient(), start, True)
# init all players positions
for player in self.players:
player.init_position()
self.admin = players[0] # define get_admin()?
def loop(self):
def _loop():
while(self.__dict__): # cf clean()
if not self._command_queue.empty():
player, order = self._command_queue.get()
try:
player.execute_command(order)
except:
exception("")
else:
time.sleep(.01)
if PROFILE:
import cProfile
cProfile.runctx("_loop()", globals(), locals(), "world_profile.tmp")
import pstats
for n in ("interface_profile.tmp", "world_profile.tmp"):
p = pstats.Stats(n)
p.strip_dirs()
p.sort_stats('time', 'cumulative').print_stats(30)
p.print_callers(30)
p.print_callees(20)
p.sort_stats('cumulative').print_stats(50)
else:
_loop()
def queue_command(self, player, order):
self._command_queue.put((player, order))
class MapError(Exception):
pass
def map_error(line, msg):
w = 'error in "%s": %s' % (line, msg)
try:
open(MAPERROR_PATH, "w").write(w)
except:
warning("could not write in %s", MAPERROR_PATH)
raise MapError(w)
|
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: label-transform-param.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='label-transform-param.proto',
package='com.webank.ai.fate.core.mlmodel.buffer',
syntax='proto3',
serialized_options=_b('B\030LabelTransformParamProto'),
serialized_pb=_b('\n\x1blabel-transform-param.proto\x12&com.webank.ai.fate.core.mlmodel.buffer\"\xfa\x03\n\x13LabelTransformParam\x12\x64\n\rlabel_encoder\x18\x01 \x03(\x0b\x32M.com.webank.ai.fate.core.mlmodel.buffer.LabelTransformParam.LabelEncoderEntry\x12i\n\x10\x65ncoder_key_type\x18\x02 \x03(\x0b\x32O.com.webank.ai.fate.core.mlmodel.buffer.LabelTransformParam.EncoderKeyTypeEntry\x12m\n\x12\x65ncoder_value_type\x18\x03 \x03(\x0b\x32Q.com.webank.ai.fate.core.mlmodel.buffer.LabelTransformParam.EncoderValueTypeEntry\x1a\x33\n\x11LabelEncoderEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a\x35\n\x13\x45ncoderKeyTypeEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a\x37\n\x15\x45ncoderValueTypeEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\x1a\x42\x18LabelTransformParamProtob\x06proto3')
)
_LABELTRANSFORMPARAM_LABELENCODERENTRY = _descriptor.Descriptor(
name='LabelEncoderEntry',
full_name='com.webank.ai.fate.core.mlmodel.buffer.LabelTransformParam.LabelEncoderEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='com.webank.ai.fate.core.mlmodel.buffer.LabelTransformParam.LabelEncoderEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='com.webank.ai.fate.core.mlmodel.buffer.LabelTransformParam.LabelEncoderEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=415,
serialized_end=466,
)
_LABELTRANSFORMPARAM_ENCODERKEYTYPEENTRY = _descriptor.Descriptor(
name='EncoderKeyTypeEntry',
full_name='com.webank.ai.fate.core.mlmodel.buffer.LabelTransformParam.EncoderKeyTypeEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='com.webank.ai.fate.core.mlmodel.buffer.LabelTransformParam.EncoderKeyTypeEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='com.webank.ai.fate.core.mlmodel.buffer.LabelTransformParam.EncoderKeyTypeEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=468,
serialized_end=521,
)
_LABELTRANSFORMPARAM_ENCODERVALUETYPEENTRY = _descriptor.Descriptor(
name='EncoderValueTypeEntry',
full_name='com.webank.ai.fate.core.mlmodel.buffer.LabelTransformParam.EncoderValueTypeEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='com.webank.ai.fate.core.mlmodel.buffer.LabelTransformParam.EncoderValueTypeEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='com.webank.ai.fate.core.mlmodel.buffer.LabelTransformParam.EncoderValueTypeEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=523,
serialized_end=578,
)
_LABELTRANSFORMPARAM = _descriptor.Descriptor(
name='LabelTransformParam',
full_name='com.webank.ai.fate.core.mlmodel.buffer.LabelTransformParam',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='label_encoder', full_name='com.webank.ai.fate.core.mlmodel.buffer.LabelTransformParam.label_encoder', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='encoder_key_type', full_name='com.webank.ai.fate.core.mlmodel.buffer.LabelTransformParam.encoder_key_type', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='encoder_value_type', full_name='com.webank.ai.fate.core.mlmodel.buffer.LabelTransformParam.encoder_value_type', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_LABELTRANSFORMPARAM_LABELENCODERENTRY, _LABELTRANSFORMPARAM_ENCODERKEYTYPEENTRY, _LABELTRANSFORMPARAM_ENCODERVALUETYPEENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=72,
serialized_end=578,
)
_LABELTRANSFORMPARAM_LABELENCODERENTRY.containing_type = _LABELTRANSFORMPARAM
_LABELTRANSFORMPARAM_ENCODERKEYTYPEENTRY.containing_type = _LABELTRANSFORMPARAM
_LABELTRANSFORMPARAM_ENCODERVALUETYPEENTRY.containing_type = _LABELTRANSFORMPARAM
_LABELTRANSFORMPARAM.fields_by_name['label_encoder'].message_type = _LABELTRANSFORMPARAM_LABELENCODERENTRY
_LABELTRANSFORMPARAM.fields_by_name['encoder_key_type'].message_type = _LABELTRANSFORMPARAM_ENCODERKEYTYPEENTRY
_LABELTRANSFORMPARAM.fields_by_name['encoder_value_type'].message_type = _LABELTRANSFORMPARAM_ENCODERVALUETYPEENTRY
DESCRIPTOR.message_types_by_name['LabelTransformParam'] = _LABELTRANSFORMPARAM
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
LabelTransformParam = _reflection.GeneratedProtocolMessageType('LabelTransformParam', (_message.Message,), {
'LabelEncoderEntry' : _reflection.GeneratedProtocolMessageType('LabelEncoderEntry', (_message.Message,), {
'DESCRIPTOR' : _LABELTRANSFORMPARAM_LABELENCODERENTRY,
'__module__' : 'label_transform_param_pb2'
# @@protoc_insertion_point(class_scope:com.webank.ai.fate.core.mlmodel.buffer.LabelTransformParam.LabelEncoderEntry)
})
,
'EncoderKeyTypeEntry' : _reflection.GeneratedProtocolMessageType('EncoderKeyTypeEntry', (_message.Message,), {
'DESCRIPTOR' : _LABELTRANSFORMPARAM_ENCODERKEYTYPEENTRY,
'__module__' : 'label_transform_param_pb2'
# @@protoc_insertion_point(class_scope:com.webank.ai.fate.core.mlmodel.buffer.LabelTransformParam.EncoderKeyTypeEntry)
})
,
'EncoderValueTypeEntry' : _reflection.GeneratedProtocolMessageType('EncoderValueTypeEntry', (_message.Message,), {
'DESCRIPTOR' : _LABELTRANSFORMPARAM_ENCODERVALUETYPEENTRY,
'__module__' : 'label_transform_param_pb2'
# @@protoc_insertion_point(class_scope:com.webank.ai.fate.core.mlmodel.buffer.LabelTransformParam.EncoderValueTypeEntry)
})
,
'DESCRIPTOR' : _LABELTRANSFORMPARAM,
'__module__' : 'label_transform_param_pb2'
# @@protoc_insertion_point(class_scope:com.webank.ai.fate.core.mlmodel.buffer.LabelTransformParam)
})
_sym_db.RegisterMessage(LabelTransformParam)
_sym_db.RegisterMessage(LabelTransformParam.LabelEncoderEntry)
_sym_db.RegisterMessage(LabelTransformParam.EncoderKeyTypeEntry)
_sym_db.RegisterMessage(LabelTransformParam.EncoderValueTypeEntry)
DESCRIPTOR._options = None
_LABELTRANSFORMPARAM_LABELENCODERENTRY._options = None
_LABELTRANSFORMPARAM_ENCODERKEYTYPEENTRY._options = None
_LABELTRANSFORMPARAM_ENCODERVALUETYPEENTRY._options = None
# @@protoc_insertion_point(module_scope)
|
|
from typing import Any, Dict, List, Optional, Tuple, Union
from cleo.styles import OutputStyle
from sdoc.sdoc2.helper.Enumerable import Enumerable
from sdoc.sdoc2.Position import Position
inline_creators = {}
"""
Map from inline commands to node creators.
:type: dict[str,callable]
"""
block_creators = {}
"""
Map from block commands to object creators.
:type: dict[str,callable]
"""
formatters = {}
"""
Map from format name to map from inline and block commands to format creators.
:type: dict[str,dict[str,callable]]
"""
class NodeStore:
"""
Class for creating, storing, and retrieving nodes.
@todo Make abstract and implement other document store classes.
"""
_errors: int = 0
"""
The error count.
"""
_io: Optional[OutputStyle] = None
"""
Styled output formatter.
"""
# ------------------------------------------------------------------------------------------------------------------
def __init__(self, io: OutputStyle):
"""
Object constructor.
"""
NodeStore._io = io
self.format: str = 'html'
"""
The output format.
"""
self.nested_nodes: List[Any] = []
"""
The stack of nested nodes (only filled when creating all nodes).
:type: list[sdoc.sdoc2.node.Node.Node]
"""
self.nodes: Dict[Any] = {}
"""
The actual node store. Map from node ID to node.
:type: dict[int,sdoc.sdoc2.node.Node.Node]
"""
self._enumerable_numbers: Dict[Enumerable] = {}
"""
The current numbers of enumerable nodes (e.g. headings, figures).
"""
self.labels: Dict[str, Union[str, Dict[str, str]]] = {}
"""
The identifiers of labels which refers on each heading node.
"""
# ------------------------------------------------------------------------------------------------------------------
@staticmethod
def error(message: str, node=None) -> None:
"""
Logs an error.
:param str message: The error message.this message will be appended with 'at filename:line.column' ot the token.
:param sdoc.sdoc2.node.Node.Node|None node: The node where the error occurred.
"""
NodeStore._errors += 1
messages = [message]
if node:
filename = node.position.file_name
line_number = node.position.start_line
column_number = node.position.start_column + 1
messages.append('Position: {0!s}:{1:d}.{2:d}'.format(filename, line_number, column_number))
NodeStore._io.error(messages)
# ------------------------------------------------------------------------------------------------------------------
@staticmethod
def get_formatter(output_type: str, name_formatter: str):
"""
Returns the formatter for special type.
:param str output_type: The type of output formatter (e.g. 'html')
:param str name_formatter: The name of formatter (e.g. 'smile')
:rtype: sdoc.sdoc2.formatter.Formatter.Formatter
"""
return formatters[output_type][name_formatter]
# ------------------------------------------------------------------------------------------------------------------
def end_block_node(self, command: str) -> None:
"""
Signals the end of a block command.
:param string command: The name of the inline command.
"""
# Pop none block command nodes from the stack.
while self.nested_nodes and not self.nested_nodes[-1].is_block_command():
self.nested_nodes.pop()
if not self.nested_nodes:
# @todo position
raise RuntimeError("Unexpected \\end{{{0!s}}}.".format(command))
# Get the last node on the block stack.
node = self.nested_nodes[-1]
if node.name != command:
# @todo position \end
# @todo position \begin
raise RuntimeError("\\begin{{{0!s}}} and \\end{{{1!s}}} do not match.".format(node.name, command))
# Pop the last node of the block stack.
self.nested_nodes.pop()
# ------------------------------------------------------------------------------------------------------------------
def in_scope(self, node_id: int):
"""
Retrieves a node based on its ID.
:param int node_id: The node ID.
:rtype: sdoc.sdoc2.node.Node.Node
"""
return self.nodes[node_id]
# ------------------------------------------------------------------------------------------------------------------
def out_scope(self, node):
"""
Marks a node as not longer in scope.
:param sdoc.sdoc2.node.Node.Node node: The node.
"""
pass
# ------------------------------------------------------------------------------------------------------------------
@staticmethod
def register_inline_command(command: str, constructor) -> None:
"""
Registers a node constructor for an inline command.
:param str command: The name of the inline command.
:param callable constructor: The node constructor.
"""
inline_creators[command] = constructor
# ------------------------------------------------------------------------------------------------------------------
@staticmethod
def register_formatter(command: str, output_format: str, formatter) -> None:
"""
Registers a output formatter constructor for a command.
:param str command: The name of the command.
:param str output_format: The output format the formatter generates.
:param callable formatter: The formatter for generating the content of the node in the output format.
"""
if output_format not in formatters:
formatters[output_format] = {}
formatters[output_format][command] = formatter
# ------------------------------------------------------------------------------------------------------------------
@staticmethod
def register_block_command(command: str, constructor) -> None:
"""
Registers a node constructor for a block command.
:param str command: The name of the inline command.
:param callable constructor: The node constructor.
"""
block_creators[command] = constructor
# ------------------------------------------------------------------------------------------------------------------
def create_inline_node(self,
command: str,
options: Optional[Dict[str, str]] = None,
argument: str = '',
position: Position = None):
"""
Creates a node based an inline command.
Note: The node is not stored nor appended to the content tree.
:param str command: The inline command.
:param dict[str,str] options: The options.
:param str argument: The argument of the inline command.
:param Position|None position: The position of the node definition.
:rtype: sdoc.sdoc2.node.Node.Node
"""
if command not in inline_creators:
# @todo set error status
constructor = inline_creators['unknown']
node = constructor(self._io, options, argument)
node.name = command
else:
# Create the new node.
constructor = inline_creators[command]
node = constructor(self._io, options, argument)
node.position = position
# Store the node and assign ID.
self.store_node(node)
return node
# ------------------------------------------------------------------------------------------------------------------
def create_block_node(self, command: str, options: Dict[str, str], position: Position):
"""
Creates a node based on a block command.
Note: The node is not appended to the content tree.
:param str command: The inline command.
:param dict[str,str] options: The options.
:param Position position: The position of the node definition.
:rtype: sdoc.sdoc2.node.Node.Node
"""
if command not in block_creators:
constructor = block_creators['unknown']
# @todo set error status
else:
# Create the new node.
constructor = block_creators[command]
node = constructor(self._io, options)
node.position = position
# Store the node and assign ID.
self.store_node(node)
return node
# ------------------------------------------------------------------------------------------------------------------
def append_inline_node(self, command: str, options: Dict[str, str], argument: str, position: Position):
"""
Creates a node based an inline command and appends it to the end of the content tree.
:param str command: The inline command.
:param dict[str,str] options: The options.
:param str argument: The argument of the inline command.
:param Position position: The position of the node definition.
:rtype: sdoc.sdoc2.node.Node.Node
"""
# Create the inline node.
node = self.create_inline_node(command, options, argument, position)
# Add the node to the node store.
self._append_to_content_tree(node)
return node
# ------------------------------------------------------------------------------------------------------------------
def append_block_node(self, command: str, options: Dict[str, str], position: Position):
"""
Creates a node based on a block command and appends it to the end of the content tree.
:param str command: The inline command.
:param dict[str,str] options: The options.
:param Position position: The position of the node definition.
:rtype: sdoc.sdoc2.node.Node.Node
"""
# Create the block node.
node = self.create_block_node(command, options, position)
# Add the node to the node store.
self._append_to_content_tree(node)
return node
# ------------------------------------------------------------------------------------------------------------------
def create_formatter(self, io: OutputStyle, command: str, parent=None):
"""
Creates a formatter for generating the output of nodes in the requested output format.
:param OutputStyle io: The IO object.
:param str command: The inline of block command.
:param sdoc.sdoc2.formatter.Formatter.Formatter|None parent: The parent formatter.
:rtype: sdoc.sdoc2.formatter.Formatter.Formatter
"""
if self.format not in formatters:
raise RuntimeError("Unknown output format '{0!s}'.".format(self.format))
if command not in formatters[self.format]:
# @todo use default none decorator with warning
raise RuntimeError("Unknown formatter '{0!s}' for format '{1!s}'.".format(command, self.format))
constructor = formatters[self.format][command]
formatter = constructor(io, parent)
return formatter
# ------------------------------------------------------------------------------------------------------------------
def _adjust_hierarchy(self, node) -> None:
"""
Adjust the hierarchy based on the hierarchy of a new node.
:param sdoc.sdoc2.node.Node.Node node: The new node.
"""
node_hierarchy_name = node.get_hierarchy_name()
parent_found = False
while self.nested_nodes and not parent_found:
parent_node = self.nested_nodes[-1]
parent_hierarchy_name = parent_node.get_hierarchy_name()
if parent_hierarchy_name != node_hierarchy_name:
if node.is_hierarchy_root():
parent_found = True
else:
self.error("Improper nesting of node '{0!s}' at {1!s} and node '{2!s}' at {3!s}.".format(
parent_node.name, parent_node.position, node.name, node.position))
if not parent_found:
parent_hierarchy_level = parent_node.get_hierarchy_level()
node_hierarchy_level = node.get_hierarchy_level(parent_hierarchy_level)
if parent_hierarchy_level >= node_hierarchy_level and len(self.nested_nodes) > 1:
self.nested_nodes.pop()
else:
parent_found = True
parent_node = self.nested_nodes[-1]
parent_hierarchy_level = parent_node.get_hierarchy_level()
node_hierarchy_level = node.get_hierarchy_level(parent_hierarchy_level)
if node_hierarchy_level - parent_hierarchy_level > 1:
self.error("Improper nesting of levels:{0:d} at {1!s} and {2:d} at {3!s}.".format(
parent_hierarchy_level, parent_node.position, node_hierarchy_level, node.position),
node)
# ------------------------------------------------------------------------------------------------------------------
def store_node(self, node) -> int:
"""
Stores a node. If the node was not stored before assigns an ID to this node, otherwise the node replaces the
node stored under the same ID. Returns the ID if the node.
:param sdoc.sdoc2.node.Node.Node node: The node.
"""
if not node.id:
# Add the node to the node store.
node_id = len(self.nodes) + 1
node.id = node_id
self.nodes[node.id] = node
return node.id
# ------------------------------------------------------------------------------------------------------------------
def _append_to_content_tree(self, node) -> None:
"""
Appends the node at the proper nesting level at the end of the content tree.
:param sdoc.sdoc2.node.Node.Node node: The node.
"""
if node.id == 1:
# The first node must be a document root.
if not node.is_document_root():
# @todo position of block node.
raise RuntimeError("Node {0!s} is not a document root".format(node.name))
self.nested_nodes.append(node)
else:
# All other nodes must not be a document root.
if node.is_document_root():
# @todo position of block node.
raise RuntimeError("Unexpected {0!s}. Node is document root".format(node.name))
# If the node is a part of a hierarchy adjust the nested nodes stack.
if node.get_hierarchy_name():
self._adjust_hierarchy(node)
# Add the node to the list of child nodes of its parent node.
if self.nested_nodes:
parent_node = self.nested_nodes[-1]
# Pop from stack if we have two list element nodes (e.g. item nodes) in a row.
if node.is_list_element() and type(parent_node) == type(node):
self.nested_nodes.pop()
parent_node = self.nested_nodes[-1]
parent_node.child_nodes.append(node.id)
# Block commands and hierarchical nodes must be appended to the nested nodes.
if node.is_block_command() or node.get_hierarchy_name():
self.nested_nodes.append(node)
# ------------------------------------------------------------------------------------------------------------------
def prepare_content_tree(self) -> None:
"""
Prepares after parsing at SDoc2 level the content tree for further processing.
"""
# Currently, node with ID 1 is the document node. @todo Improve getting the document node.
self.nodes[1].prepare_content_tree()
# ------------------------------------------------------------------------------------------------------------------
def number_numerable(self) -> None:
"""
Numbers all numerable nodes such as chapters, sections, figures, and, items.
"""
self.nodes[1].number(self._enumerable_numbers)
# ------------------------------------------------------------------------------------------------------------------
def generate_toc(self) -> None:
"""
Checks if we have table of contents in document. If yes, we generate table of contents.
"""
for node in self.nodes.values():
if node.get_command() == 'toc':
node.generate_toc()
# ------------------------------------------------------------------------------------------------------------------
@staticmethod
def generate(target_format) -> int:
"""
Generates the document.
:param sdoc.format.Format.Format target_format: The format which will generate file.
"""
# Start generating file using specific formatter and check the errors.
format_errors = target_format.generate()
NodeStore._errors += format_errors
return NodeStore._errors
# ------------------------------------------------------------------------------------------------------------------
def get_enumerated_items(self) -> List[Tuple[str, str]]:
"""
Returns a list with tuples with command and number of enumerated nodes.
This method is intended for unit test only.
:rtype: list[(str,str)]
"""
return self.nodes[1].get_enumerated_items()
# ------------------------------------------------------------------------------------------------------------------
def parse_labels(self) -> None:
"""
Method for parsing labels, setting additional arguments to nodes, and removing label nodes from tree.
"""
self.nodes[1].parse_labels()
self.nodes[1].change_ref_argument()
# ----------------------------------------------------------------------------------------------------------------------
|
|
from __future__ import division
import re
from collections import Counter
import math
import heapq
import sys
class PhraseMining(object):
"""
PhraseMining performs frequent pattern mining followed by agglomerative clustering
on the input corpus and then stores the results in intermediate files.
:param file_name:
path to the input corpus.
:param min_support:
minimum support threshold which must be satisfied by each phrase during frequent
pattern mining.
:param max_phrase_size:
maximum allowed phrase size.
:param alpha:
threshold for the significance score.
"""
def __init__(self, file_name, min_support=10, max_phrase_size=40, alpha=4):
self.min_support = min_support
self.max_phrase_size = max_phrase_size
self.alpha = alpha
self.file_name = file_name
def mine(self):
return self._run_phrase_mining(self.min_support, self.max_phrase_size, self.alpha, self.file_name)
def _frequentPatternMining(self, documents, min_support, max_phrase_size, word_freq, active_indices):
"""
Performs frequent pattern mining to collect aggregate counts for all contiguous phrases in the
input document that satisfy a certain minimum support threshold.
Parameters:
@documents: the input corpus
@min_support: minimum support threshold which must be satisfied by each phrase.
@max_phrase_size: maximum allowed phrase size
@word_freq: raw frequency of each word in the input corpus
@active_indices: set of active indices
"""
hash_counter = word_freq
n = 2
#iterate until documents is empty
while(len(documents) > 0):
temp_documents = []
new_active_indices = []
#go over each document
for d_i,doc in enumerate(documents):
#get set of indices of phrases of length n-1 with min support
new_word_indices = []
word_indices = active_indices[d_i]
for index in word_indices:
words = doc.split()
if index+n-2 < len(words):
key = ""
for i in range(index, index+n-2+1):
if i == index+n-2:
key = key + words[i]
else:
key = key + words[i] + " "
#check if the phrase 'key' meets min support
if hash_counter[key] >= min_support:
new_word_indices.append(index)
#remove the current document if there is no more phrases of length
#n which satisfy the minimum support threshold
if len(new_word_indices) != 0:
new_active_indices.append(new_word_indices)
temp_documents.append(doc)
words = doc.split()
for idx, i in enumerate(new_word_indices[:-1]):
phrase = ""
if (new_word_indices[idx+1] == i + 1):
for idx in range(i, i+n):
if idx == i+n-1:
phrase += words[idx]
else:
phrase += words[idx] + " "
hash_counter[phrase] += 1
documents = temp_documents
active_indices = new_active_indices
n += 1
if n == max_phrase_size:
break
hash_counter = Counter(x for x in hash_counter.elements() if hash_counter[x] >= min_support)
return hash_counter
def _agglomerative_clustering(self, doc, hash_counter, alpha, total_words):
"""
Performs agglomerative clustering to get meaningful phrases from the input document.
Parameters:
@doc: input corpus
@hash_counter: map from phrases to their respective raw frequency
@alpha: threshold for the significance score
@total_words: total count of the words in input corpus.
"""
sig_map = {}
phrases = doc.split()
while(True):
max_sig = float("-inf")
max_pair = -1
for index, word in enumerate(phrases[:-1]):
phrase = phrases[index]+" "+phrases[index+1]
if phrase not in sig_map:
sig_score = self._significance_score(phrases[index], phrases[index+1], hash_counter, total_words)
sig_map[phrase] = sig_score
if(max_sig < sig_map[phrase]):
max_sig = sig_map[phrase]
max_pair = index
if(max_sig < alpha):
break
#merge max pair
merged_phrase = phrases[max_pair] + " "+ phrases[max_pair+1]
#fix phrases
phrases[max_pair] = merged_phrase
phrases.pop(max_pair+1)
return phrases
def _significance_score(self, phrase1, phrase2, hash_counter, total_words):
"""
Calculates the signifance score of the phrase obtained by joining phrase1
and phrase2. The significance score basically measures how unlikely is the
new phrase. The more unlikely it is, the more informative it will be.
Parameters:
@phrase1: first phrase
@phrase2: second phrase
@hash_counter: map from phrases to their respective raw frequency
@total_words: total count of the words in input corpus.
"""
combined_phrase = phrase1+" "+phrase2
combined_size = len(combined_phrase.split())
actual_occurence = hash_counter[combined_phrase]
numerator = hash_counter[phrase1]*hash_counter[phrase2]
if actual_occurence == 0:
return float("-inf")
denominator = total_words * total_words
independent_prob = numerator/denominator
independent_prob *= 2
expected_occurence = independent_prob*total_words
return (actual_occurence-expected_occurence)/math.sqrt(max(actual_occurence, expected_occurence))
def _get_true_frequency(self, hash_counter):
"""
Updates the raw frequency of the phrases to get their true frequencies.
"""
true_counter = Counter(hash_counter)
for key in hash_counter:
val = key.split()
if len(val) <= 1:
continue
substr1 = " ".join(val[0:-1])
substr2 = " ".join(val[1:])
true_counter[substr1] -= hash_counter[key]
true_counter[substr2] -= hash_counter[key]
return true_counter
def _get_stopwords(self):
"""
Returns a list of stopwords.
"""
f = open("topmine_src/stopwords.txt")
stopwords = set()
for line in f:
stopwords.add(line.rstrip())
return stopwords
def _get_word_freq(self, documents):
"""
Calculates the frequency of each word in the input document.
"""
total_words = 0
word_freq = Counter()
active_indices = []
for doc_index, doc in enumerate(documents):
words = doc.split()
word_indices = []
for word_index, word in enumerate(words):
word_freq[word] += 1
word_indices.append(word_index)
total_words += 1
active_indices.append(word_indices)
return total_words, word_freq, active_indices
def _get_partitioned_docs(self, document_range, doc_phrases):
"""
Partitions the input document based on the punctuations.
"""
partitioned_docs = []
start = 0
end = 0
for idx in document_range:
end = idx
final_doc = []
for i in range(start, end):
final_doc.extend(doc_phrases[i])
partitioned_docs.append(final_doc)
start = end
return partitioned_docs
def _process_partitioned_docs(self, partitioned_docs):
self.vocab = {}
self.index_vocab = []
self.partitioned_docs = []
word_counter = 0
for document_index, document in enumerate(partitioned_docs):
document_of_phrases = []
for phrase in document:
phrases_of_words = []
for word in phrase.split():
if word not in self.vocab:
self.vocab[word] = word_counter
self.index_vocab.append(word)
word_counter += 1
phrases_of_words.append(self.vocab[word])
document_of_phrases.append(phrases_of_words)
self.partitioned_docs.append(document_of_phrases)
def _preprocess_input(self, filename, stopwords):
"""
Performs preprocessing on the input document. Includes stopword removal.
"""
f = open(filename, 'r')
documents = []
document_range = []
i = 0
num_docs = 0
for line in f:
line_lowercase = line.lower()
sentences_no_punc = re.split(r"[.,;!?]",line_lowercase)
stripped_sentences = []
for sentence in sentences_no_punc:
stripped_sentences.append(re.sub('[^A-Za-z0-9]+', ' ', sentence))
sentences_no_punc = stripped_sentences
i += len(sentences_no_punc)
document_range.append(i)
documents.extend(sentences_no_punc)
num_docs += 1
documents = [doc.strip() for doc in documents]
# remove stop-words
documents2 = []
for doc in documents:
documents2.append(' '.join([word for word in doc.split() if word not in stopwords]))
documents = documents2[:]
return documents, document_range, num_docs
def _run_phrase_mining(self, min_support, max_phrase_size, alpha, file_name):
"""
Runs the phrase mining algorithm.
Parameters:
@min_support: minimum support threshold which must be satisfied by each phrase.
@max_phrase_size: maximum allowed phrase size
@alpha: threshold for the significance score
@file_name: path to the input corpus
"""
stopwords = self._get_stopwords()
documents, document_range, num_docs = self._preprocess_input(file_name, stopwords)
#calculate frequency of all words
total_words, word_freq, active_indices = self._get_word_freq(documents)
vocab_size = len(word_freq)
#run frequent pattern mining
hash_counter = self._frequentPatternMining(documents, min_support, max_phrase_size, word_freq, active_indices)
#run agglomerative clustering
doc_phrases = []
for doc in documents:
doc_phrases.append(self._agglomerative_clustering(doc, hash_counter, alpha, total_words))
#update true count of each phrase
self.true_counter = self._get_true_frequency(hash_counter)
partitioned_docs = self._get_partitioned_docs(document_range, doc_phrases)
self._process_partitioned_docs(partitioned_docs)
return self.partitioned_docs, self.index_vocab
def get_frequent_phrases(self, min_support):
"""
Returns the most frequent phrases in the corpus that occur more than
the minimum support in descending order of frequency
"""
frequent_phrases = []
for key,value in self.true_counter.most_common():
if value >= min_support and len(key.split(" "))>1:
frequent_phrases.append((key, value))
elif value < min_support:
break
return frequent_phrases
|
|
from __future__ import division
import abc
import numpy as np
from copy import deepcopy
from menpo.fit.base import Fitter
from menpo.fit.fittingresult import FittingResultList
from menpo.transform import AlignmentAffine, Scale
from menpo.landmark import LandmarkGroup
from .fittingresult import MultilevelFittingResult
from .functions import noisy_align
class MultilevelFitter(Fitter):
r"""
Mixin that all MultilevelFitter objects must implement.
"""
@abc.abstractproperty
def reference_shape(self):
r"""
Returns the reference shape. Typically, the mean of shape model.
"""
pass
@abc.abstractproperty
def feature_type(self):
r"""
Defines the feature computation function.
"""
pass
@abc.abstractproperty
def n_levels(self):
r"""
Returns the number of levels used by fitter.
"""
pass
@abc.abstractproperty
def downscale(self):
r"""
Returns the downscale factor used by the fitter.
"""
pass
@abc.abstractproperty
def scaled_levels(self):
r"""
Returns True if the shape results returned by the basic fittings
must be scaled.
"""
pass
@abc.abstractproperty
def interpolator(self):
r"""
Returns the type of interpolator used by the fitter.
"""
pass
def fit_images(self, images, group=None, label='all',
initialization='from_gt_shape', noise_std=0.0,
rotation=False, max_iters=50, verbose=True, view=False,
error_type='me_norm', **kwargs):
r"""
Fits a list of images.
Parameters
-----------
images: list of :class:`pybug.image.masked.MaskedImage`
The list of images to be fitted.
group : string, Optional
The key of the landmark set that should be used. If None,
and if there is only one set of landmarks, this set will be used.
Default: None
label: string, Optional
The label of of the landmark manager that you wish to use. If no
label is passed, the convex hull of all landmarks is used.
Default: 'all'
initialization: 'from_gt_shape' or 'detection', optional
The type of initialization to be used for fitting the image.
Default: 'from_gt_shape'
noise_std: float
The std of the gaussian noise used to produce the initial shape.
Default: 0.0
rotation: boolean
Specifies whether in-plane rotation is to be used to produce the
initial shape.
Default: False
max_iters: int or list, optional
The maximum number of iterations.
If int, then this will be the overall maximum number of iterations
for all the pyramidal levels.
If list, then a maximum number of iterations is specified for each
pyramidal level.
Default: 50
verbose: boolean
Whether or not to print information related to the fitting
results (such as: final error, convergence, ...).
Default: True
view: boolean
Whether or not the fitting results are to be displayed.
Default: False
error_type: 'me_norm', 'me' or 'rmse', optional.
Specifies the way in which the error between the fitted and
ground truth shapes is to be computed.
Default: 'me_norm'
Returns
-------
FittingList: :class:`pybug.aam.fitting.FittingList`
A fitting list object containing a fitting list object
associated to each image.
"""
n_images = len(images)
fittings = []
for j, image in enumerate(images):
if verbose:
print '- fitting image {} of {}'.format(j, n_images)
fittings.append(
self.fit(image, group=group, label=label,
initialization=initialization, noise_std=noise_std,
rotation=rotation, max_iters=max_iters,
verbose=verbose, view=view, error_type=error_type,
**kwargs))
return FittingResultList(fittings)
def fit(self, image, group=None, label='all',
initialization='from_gt_shape', noise_std=0.0, rotation=False,
max_iters=50, verbose=True, view=False, error_type='me_norm',
**kwargs):
r"""
Fits a single image.
Parameters
-----------
image: :class:`pybug.image.masked.MaskedImage`
The image to be fitted.
group: string, Optional
The key of the landmark set that should be used. If None,
and if there is only one set of landmarks, this set will be used.
Default: None
label: string, Optional
The label of of the landmark manager that you wish to use. If no
label is passed, the convex hull of all landmarks is used.
Default: 'all'
initialization: 'from_gt_shape' or 'detection', optional
The type of initialization to be used for fitting the image.
Default: 'from_gt_shape'
noise_std: float
The std of the gaussian noise used to produce the initial shape.
Default: 0.0
rotation: boolean
Specifies whether in-plane rotation is to be used to produce the
initial shape.
Default: False
max_iters: int or list, optional
The maximum number of iterations.
If int, then this will be the overall maximum number of iterations
for all the pyramidal levels.
If list, then a maximum number of iterations is specified for each
pyramidal level.
Default: 50
verbose: boolean
Whether or not to print information related to the fitting
results (such as: final error, convergence, ...).
Default: True
view: boolean
Whether or not the fitting results are to be displayed.
Default: False
error_type: 'me_norm', 'me' or 'rmse', optional.
Specifies the way in which the error between the fitted and
ground truth shapes is to be computed.
Default: 'me_norm'
Returns
-------
FittingList: :class:`pybug.aam.fitting.FittingList`
A fitting list object containing the fitting objects associated
to each run.
"""
image = deepcopy(image)
if isinstance(image.landmarks[group][label], LandmarkGroup):
gt_shape = image.landmarks[group][label].lms
else:
if group or label is not 'all':
raise ValueError('The specified group {} and/or '
'label {} do not exist'.format(group,
label))
elif initialization is not 'detection':
raise ValueError('Initialization method {} cannot '
'be used because the image is not '
'landmarked'.format(initialization))
gt_shape = None
if initialization is 'from_gt_shape':
initial_shape = self._noisy_align_from_gt_shape(
gt_shape, noise_std=noise_std, rotation=rotation)
elif type is 'detection':
initial_shape = self._detect_shape(
noise_std=noise_std, rotation=rotation)
else:
raise ValueError('Unknown initialization string selected. '
'Valid options are: "from_gt_shape", '
'"detection"')
images = self._prepare_image(image, initial_shape,
gt_shape=gt_shape)
if gt_shape:
gt_shapes = [i.landmarks['gt_shape'].lms for i in images]
else:
gt_shapes = None
initial_shapes = [i.landmarks['initial_shape'].lms
for i in images]
affine_correction = AlignmentAffine(initial_shapes[-1], initial_shape)
fittings = self._fit(images, initial_shapes[0], max_iters=max_iters,
gt_shapes=gt_shapes, **kwargs)
multiple_fitting = self._create_fitting(image, fittings,
affine_correction,
gt_shape=gt_shape,
error_type=error_type)
if verbose:
multiple_fitting.print_fitting_info()
if view:
multiple_fitting.view_final_fitting(new_figure=True)
return multiple_fitting
def _detect_shape(self, noise_std=0.0, rotation=False):
r"""
Generates an initial shape by automatically detecting the object
being modelled (typically faces) in the image. This method should be
wired to future face and object detection algorithms.
Parameters
-----------
noise_std: float, optional
The std of the gaussian noise used to produce the initial shape.
Default: 0.0
rotation: boolean, optional
Specifies whether rotation is to be used to produce the initial
shape.
Default: False
Returns
-------
initial_shape: :class:`pybug.shape.PointCloud`
The initial shape.
"""
raise ValueError('_detect_shape not implemented yet')
def _noisy_align_from_gt_shape(self, gt_shape, noise_std=0.0,
rotation=False):
r"""
Generates an initial shape by adding gaussian noise to
the perfect similarity alignment between the ground truth
and default shape.
Parameters
-----------
gt_shape: :class:`pybug.shape.PointCloud` list
The ground truth shape.
noise_std: float, optional
The std of the gaussian noise used to produce the initial shape.
Default: 0.0
rotation: boolean, optional
Specifies whether ground truth in-plane rotation is to be used
to produce the initial shape.
Default: False
Returns
-------
initial_shape: :class:`pybug.shape.PointCloud`
The initial shape.
"""
reference_shape = self.reference_shape
return noisy_align(reference_shape, gt_shape, noise_std=noise_std,
rotation=rotation).apply(reference_shape)
@abc.abstractmethod
def _prepare_image(self, image, initial_shape, gt_shape=None):
r"""
Prepares an image to be fitted.
Parameters
-----------
image: :class:`pybug.image.masked.MaskedImage`
The original image to be fitted.
initial_shape: class:`pybug.shape.PointCloud`
The initial shape from which the fitting will start.
gt_shape: class:`pybug.shape.PointCloud`, optional
The original ground truth shape associated to the image.
Default: None
Returns
-------
images: :class:`pybug.image.masked.MaskedImage` list
A list containing the images that will be used by the fitting
algorithms.
"""
pass
def _create_fitting(self, image, fittings, affine_correction,
gt_shape=None, error_type='me_norm'):
r"""
Creates the :class: `pybug.aam.fitting.MultipleFitting` object
associated with a particular Fitter objects.
Parameters
-----------
image: :class:`pybug.image.masked.MaskedImage`
The original image to be fitted.
fittings: :class:`pybug.aam.fitting.BasicFitting` list
A list of basic fitting objects containing the state of the
different fitting levels.
affine_correction: :class: `pybug.transforms.affine.Affine`
An affine transform that maps the result of the top resolution
fitting level to the space scale of the original image.
gt_shape: class:`pybug.shape.PointCloud`, optional
The ground truth shape associated to the image.
Default: None
error_type: 'me_norm', 'me' or 'rmse', optional.
Specifies the way in which the error between the fitted and
ground truth shapes is to be computed.
Default: 'me_norm'
Returns
-------
fitting: :class:`pybug.aam.Fitting`
The fitting object that will hold the state of the fitter.
"""
return MultilevelFittingResult(image, self, fittings,
affine_correction, gt_shape=gt_shape,
error_type=error_type)
def _fit(self, images, initial_shape, gt_shapes=None, max_iters=50,
**kwargs):
r"""
Fits the AAM to an image using Lucas-Kanade.
Parameters
-----------
images: :class:`pybug.image.masked.MaskedImage` list
The images to be fitted.
initial_shape: :class:`pybug.shape.PointCloud`
The initial shape from which the fitting will start.
gt_shapes: :class:`pybug.shape.PointCloud` list, optional
The original ground truth shapes associated to the images.
Default: None
max_iters: int or list, optional
The maximum number of iterations.
If int, then this will be the overall maximum number of iterations
for all the pyramidal levels.
If list, then a maximum number of iterations is specified for each
pyramidal level.
Default: 50
Returns
-------
fittings: :class:`pybug.aam.fitting` list
The fitting object containing the state of the whole fitting
procedure.
"""
shape = initial_shape
n_levels = self.n_levels
if type(max_iters) is int:
max_iters = [np.round(max_iters/n_levels)
for _ in range(n_levels)]
elif len(max_iters) is 1 and n_levels > 1:
max_iters = [np.round(max_iters[0]/n_levels)
for _ in range(n_levels)]
elif len(max_iters) is not n_levels:
raise ValueError('n_shape can be integer, integer list '
'containing 1 or {} elements or '
'None'.format(self.n_levels))
gt = None
fittings = []
for j, (i, f, it) in enumerate(zip(images, self._fitters, max_iters)):
if gt_shapes is not None:
gt = gt_shapes[j]
parameters = f.get_parameters(shape)
fitting = f.fit(i, parameters, gt_shape=gt,
max_iters=it, **kwargs)
fittings.append(fitting)
shape = fitting.final_shape
if self.scaled_levels:
Scale(self.downscale,
n_dims=shape.n_dims).apply_inplace(shape)
return fittings
|
|
# ******************************************************************************
# Copyright 2014-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
"""
Utility functions for testing
"""
from builtins import zip
import numpy as np
import numpy.random as nprnd
from neon import logger as neon_logger
from neon.backends.backend import Tensor
def sparse_rand(shape, frac=0.05, round_up=False):
# generate an input with sparse activation
# in the input dimension for LSTM testing
# frac is the fraction of the matrix elements
# which will be nonzero. Set round_up to
# True to get a binary matrix, i.e. elements
# are either set to 0 or 1
num_el = np.prod(shape)
inds = nprnd.permutation(num_el)[0:int(frac * num_el)]
# draw frac*num_el random numbers
vals = nprnd.random(inds.size)
if round_up:
vals = np.ceil(vals)
out = np.zeros(shape)
out.flat[inds] = vals
return (out, inds)
def allclose_with_out(x, y, atol=0.0, rtol=1.0e-5):
# run the np.allclose on x and y
# if it fails print some stats
# before returning
ac = np.allclose(x, y, rtol=rtol, atol=atol)
if not ac:
dd = np.abs(x - y)
neon_logger.display('abs errors: %e [%e, %e] Abs Thresh = %e'
% (np.median(dd), np.min(dd), np.max(dd), atol))
amax = np.argmax(dd)
if np.isscalar(x):
neon_logger.display('worst case: %e %e' % (x, y.flat[amax]))
elif np.isscalar(y):
neon_logger.display('worst case: %e %e' % (x.flat[amax], y))
else:
neon_logger.display('worst case: %e %e' % (x.flat[amax], y.flat[amax]))
dd = np.abs(dd - atol) / np.abs(y)
neon_logger.display('rel errors: %e [%e, %e] Rel Thresh = %e'
% (np.median(dd), np.min(dd), np.max(dd), rtol))
amax = np.argmax(dd)
if np.isscalar(x):
neon_logger.display('worst case: %e %e' % (x, y.flat[amax]))
elif np.isscalar(y):
neon_logger.display('worst case: %e %e' % (x.flat[amax], y))
else:
neon_logger.display('worst case: %e %e' % (x.flat[amax], y.flat[amax]))
return ac
def symallclose(x, y, rtol=1.0e-5):
# symetric relative allclose function
# checks abs(x-y)/(abs(x) + abs(y))
dd = np.divide(np.abs(x - y), np.abs(x) + np.abs(y))
return all(np.less_equal(dd, rtol))
def call_func(f, backend, tensors):
"""
Call and evaluate a function with corresponding tensors, returns a numpy array.
Arguments:
f (lambda): Usage f(backend, *tensors)
backend (Backend or numpy): one of (np, NervanaGPU, NervanaCPU, NervanaMKL)
tensors (list): list of tensors
Returns:
numpy.ndarray: the evaluated result of f
"""
if backend == np:
return f(backend, *tensors)
else:
op_tree = f(backend, *tensors)
op_tree_val = backend.empty(op_tree.shape)
op_tree_val[:] = op_tree
return op_tree_val.get()
def tensors_allclose(a_tensors, b_tensors, rtol=0, atol=1e-7):
"""
For each backends, calls f with its tensors, and returns the results to
allclose.
Arguments:
a_tensors: list of tensors, or a tensor
b_tensors: (another) list of tensors, or a tensor
rtol (float, optional): Relative tolerance.
atol (float, optional): Absolute tolerance.
Returns:
bool: If the tensors of fs is all close
"""
# deal with individual tensor
if type(a_tensors) is not list and type(b_tensors) is not list:
a_tensors = [a_tensors]
b_tensors = [b_tensors]
results = []
for a_tensor, b_tensor in zip(a_tensors, b_tensors):
if isinstance(a_tensor, Tensor):
a_tensor = a_tensor.get()
if isinstance(b_tensor, Tensor):
b_tensor = b_tensor.get()
results.append(allclose_with_out(a_tensor.astype(b_tensor.dtype),
b_tensor,
rtol=rtol, atol=atol))
return all(results)
def funcs_allclose(f, backends, backend_tensors, rtol=0, atol=1e-7):
"""
For each backends, calls f with its tensors, and assert the results to be
all close.
Arguments:
f (lambda): Usage f(backend, *tensors)
backend (Backend or numpy): one of (np, NervanaGPU, NervanaCPU, NervanaMKL)
tensors (list): list of tensors
rtol (float, optional): Relative tolerance.
atol (float, optional): Absolute tolerance.
Returns:
bool: If the results of fs is close
"""
# call funcs to get results
results = []
for backend, tensors in zip(backends, backend_tensors):
results.append(call_func(f, backend, tensors))
# assert results to be equal
return tensors_allclose(results, rtol=rtol, atol=atol)
def gen_backend_tensors(backends, tensor_dims, flags=None, dtype=np.float32):
"""
Generates random number for all backends.
Arguments:
backends (list): List of backends, one of (np, NervanaGPU, NervanaCPU, NervanaMKL)
tensor_dims (list): List of dimensions of the tensors, for example
[(1, 2), (3, 4), (5, 6)]
dtype (data-type): One of (np.float16, np.float32), must be the same
as backend.dtype if backend is one of the nervana
backends
flags (list or str): If list is provided, specifies the flag for each
tensor. If str is provided, will be applied to all
tensors. Flags is one of the following:
('zeros', 'pos_ones', 'neg_ones', 'pos_rand',
'neg_rand', 'rand', None)
Returns:
List of lists of tensors, corresponding to the backends.
For example:
[[np.ndarray, np.ndarray, np.ndarray],
[GPUTensor, GPUTensor, GPUTensor],
[CPUTensor, CPUTensor, CPUTensor],
[MKLTensor, MKLTensor, MKLTensor]]
"""
tensor_num = len(tensor_dims)
if flags is not None:
assert len(flags) == tensor_num
# init
backend_tensors = [[] for i in range(tensor_num)]
# generate
idx = 0
for tensor_dim, flag in zip(tensor_dims, flags):
assert flag in ('zeros', 'pos_ones', 'neg_ones', 'pos_rand', 'neg_rand',
'rand', None)
# numpy standard value
if flag == 'zeros':
tensor = np.zeros(tensor_dim)
elif flag == 'pos_ones':
tensor = np.ones(tensor_dim)
elif flag == 'neg_ones':
tensor = -np.ones(tensor_dim)
elif flag == 'pos_rand':
tensor = np.random.rand(*tensor_dim)
elif flag == 'neg_rand':
tensor = -np.random.rand(*tensor_dim)
elif flag == 'rand' or flag is None:
tensor = -np.random.randn(*tensor_dim)
else:
raise NotImplementedError
tensor = tensor.astype(dtype)
# copy to different backends
for backend, tensors in zip(backends, backend_tensors):
if backend == np:
tensors.append(tensor)
else:
assert(backend.default_dtype == dtype)
tensors.append(backend.array(tensor, name='x%s' % idx))
idx += 1
return backend_tensors
class BackendPool(object):
"""
Cache and reuse backend for testing. Useful for testing multiple expressions
per backend. A backend is identified by the backend module and dtype.
"""
pools = {}
@staticmethod
def get_backend(backend_module, dtype):
"""
Arguments:
backend_module: NervanaGPU, NervanaCPU, NervanaMKL
dtype: np.float32, np.float16, etc
Returns:
Backend: the corresponding backend with certain default_dtype
"""
if backend_module not in BackendPool.pools:
BackendPool.pools[backend_module] = dict()
pool = BackendPool.pools[backend_module]
if dtype not in pool:
pool[dtype] = backend_module(default_dtype=dtype)
be = pool[dtype]
return be
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""DenseNet models for Keras.
Reference:
- [Densely Connected Convolutional Networks](
https://arxiv.org/abs/1608.06993) (CVPR 2017)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras import backend
from tensorflow.python.keras.applications import imagenet_utils
from tensorflow.python.keras.engine import training
from tensorflow.python.keras.layers import VersionAwareLayers
from tensorflow.python.keras.utils import data_utils
from tensorflow.python.keras.utils import layer_utils
from tensorflow.python.lib.io import file_io
from tensorflow.python.util.tf_export import keras_export
BASE_WEIGHTS_PATH = ('https://storage.googleapis.com/tensorflow/'
'keras-applications/densenet/')
DENSENET121_WEIGHT_PATH = (
BASE_WEIGHTS_PATH + 'densenet121_weights_tf_dim_ordering_tf_kernels.h5')
DENSENET121_WEIGHT_PATH_NO_TOP = (
BASE_WEIGHTS_PATH +
'densenet121_weights_tf_dim_ordering_tf_kernels_notop.h5')
DENSENET169_WEIGHT_PATH = (
BASE_WEIGHTS_PATH + 'densenet169_weights_tf_dim_ordering_tf_kernels.h5')
DENSENET169_WEIGHT_PATH_NO_TOP = (
BASE_WEIGHTS_PATH +
'densenet169_weights_tf_dim_ordering_tf_kernels_notop.h5')
DENSENET201_WEIGHT_PATH = (
BASE_WEIGHTS_PATH + 'densenet201_weights_tf_dim_ordering_tf_kernels.h5')
DENSENET201_WEIGHT_PATH_NO_TOP = (
BASE_WEIGHTS_PATH +
'densenet201_weights_tf_dim_ordering_tf_kernels_notop.h5')
layers = VersionAwareLayers()
def dense_block(x, blocks, name):
"""A dense block.
Args:
x: input tensor.
blocks: integer, the number of building blocks.
name: string, block label.
Returns:
Output tensor for the block.
"""
for i in range(blocks):
x = conv_block(x, 32, name=name + '_block' + str(i + 1))
return x
def transition_block(x, reduction, name):
"""A transition block.
Args:
x: input tensor.
reduction: float, compression rate at transition layers.
name: string, block label.
Returns:
output tensor for the block.
"""
bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1
x = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name=name + '_bn')(
x)
x = layers.Activation('relu', name=name + '_relu')(x)
x = layers.Conv2D(
int(backend.int_shape(x)[bn_axis] * reduction),
1,
use_bias=False,
name=name + '_conv')(
x)
x = layers.AveragePooling2D(2, strides=2, name=name + '_pool')(x)
return x
def conv_block(x, growth_rate, name):
"""A building block for a dense block.
Args:
x: input tensor.
growth_rate: float, growth rate at dense layers.
name: string, block label.
Returns:
Output tensor for the block.
"""
bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1
x1 = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name=name + '_0_bn')(
x)
x1 = layers.Activation('relu', name=name + '_0_relu')(x1)
x1 = layers.Conv2D(
4 * growth_rate, 1, use_bias=False, name=name + '_1_conv')(
x1)
x1 = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name=name + '_1_bn')(
x1)
x1 = layers.Activation('relu', name=name + '_1_relu')(x1)
x1 = layers.Conv2D(
growth_rate, 3, padding='same', use_bias=False, name=name + '_2_conv')(
x1)
x = layers.Concatenate(axis=bn_axis, name=name + '_concat')([x, x1])
return x
def DenseNet(
blocks,
include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation='softmax'):
"""Instantiates the DenseNet architecture.
Reference:
- [Densely Connected Convolutional Networks](
https://arxiv.org/abs/1608.06993) (CVPR 2017)
Optionally loads weights pre-trained on ImageNet.
Note that the data format convention used by the model is
the one specified in your Keras config at `~/.keras/keras.json`.
Note: each Keras Application expects a specific kind of input preprocessing.
For DenseNet, call `tf.keras.applications.densenet.preprocess_input` on your
inputs before passing them to the model.
Args:
blocks: numbers of building blocks for the four dense layers.
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization),
'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: optional Keras tensor
(i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)` (with `'channels_last'` data format)
or `(3, 224, 224)` (with `'channels_first'` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 32.
E.g. `(200, 200, 3)` would be one valid value.
pooling: optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional block.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional block, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
classifier_activation: A `str` or callable. The activation function to use
on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top" layer.
Returns:
A `keras.Model` instance.
Raises:
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
ValueError: if `classifier_activation` is not `softmax` or `None` when
using a pretrained top layer.
"""
if not (weights in {'imagenet', None} or file_io.file_exists_v2(weights)):
raise ValueError('The `weights` argument should be either '
'`None` (random initialization), `imagenet` '
'(pre-training on ImageNet), '
'or the path to the weights file to be loaded.')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as `"imagenet"` with `include_top`'
' as true, `classes` should be 1000')
# Determine proper input shape
input_shape = imagenet_utils.obtain_input_shape(
input_shape,
default_size=224,
min_size=32,
data_format=backend.image_data_format(),
require_flatten=include_top,
weights=weights)
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1
x = layers.ZeroPadding2D(padding=((3, 3), (3, 3)))(img_input)
x = layers.Conv2D(64, 7, strides=2, use_bias=False, name='conv1/conv')(x)
x = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name='conv1/bn')(
x)
x = layers.Activation('relu', name='conv1/relu')(x)
x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)))(x)
x = layers.MaxPooling2D(3, strides=2, name='pool1')(x)
x = dense_block(x, blocks[0], name='conv2')
x = transition_block(x, 0.5, name='pool2')
x = dense_block(x, blocks[1], name='conv3')
x = transition_block(x, 0.5, name='pool3')
x = dense_block(x, blocks[2], name='conv4')
x = transition_block(x, 0.5, name='pool4')
x = dense_block(x, blocks[3], name='conv5')
x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5, name='bn')(x)
x = layers.Activation('relu', name='relu')(x)
if include_top:
x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
imagenet_utils.validate_activation(classifier_activation, weights)
x = layers.Dense(classes, activation=classifier_activation,
name='predictions')(x)
else:
if pooling == 'avg':
x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
elif pooling == 'max':
x = layers.GlobalMaxPooling2D(name='max_pool')(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = layer_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
if blocks == [6, 12, 24, 16]:
model = training.Model(inputs, x, name='densenet121')
elif blocks == [6, 12, 32, 32]:
model = training.Model(inputs, x, name='densenet169')
elif blocks == [6, 12, 48, 32]:
model = training.Model(inputs, x, name='densenet201')
else:
model = training.Model(inputs, x, name='densenet')
# Load weights.
if weights == 'imagenet':
if include_top:
if blocks == [6, 12, 24, 16]:
weights_path = data_utils.get_file(
'densenet121_weights_tf_dim_ordering_tf_kernels.h5',
DENSENET121_WEIGHT_PATH,
cache_subdir='models',
file_hash='9d60b8095a5708f2dcce2bca79d332c7')
elif blocks == [6, 12, 32, 32]:
weights_path = data_utils.get_file(
'densenet169_weights_tf_dim_ordering_tf_kernels.h5',
DENSENET169_WEIGHT_PATH,
cache_subdir='models',
file_hash='d699b8f76981ab1b30698df4c175e90b')
elif blocks == [6, 12, 48, 32]:
weights_path = data_utils.get_file(
'densenet201_weights_tf_dim_ordering_tf_kernels.h5',
DENSENET201_WEIGHT_PATH,
cache_subdir='models',
file_hash='1ceb130c1ea1b78c3bf6114dbdfd8807')
else:
if blocks == [6, 12, 24, 16]:
weights_path = data_utils.get_file(
'densenet121_weights_tf_dim_ordering_tf_kernels_notop.h5',
DENSENET121_WEIGHT_PATH_NO_TOP,
cache_subdir='models',
file_hash='30ee3e1110167f948a6b9946edeeb738')
elif blocks == [6, 12, 32, 32]:
weights_path = data_utils.get_file(
'densenet169_weights_tf_dim_ordering_tf_kernels_notop.h5',
DENSENET169_WEIGHT_PATH_NO_TOP,
cache_subdir='models',
file_hash='b8c4d4c20dd625c148057b9ff1c1176b')
elif blocks == [6, 12, 48, 32]:
weights_path = data_utils.get_file(
'densenet201_weights_tf_dim_ordering_tf_kernels_notop.h5',
DENSENET201_WEIGHT_PATH_NO_TOP,
cache_subdir='models',
file_hash='c13680b51ded0fb44dff2d8f86ac8bb1')
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
return model
@keras_export('keras.applications.densenet.DenseNet121',
'keras.applications.DenseNet121')
def DenseNet121(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000):
"""Instantiates the Densenet121 architecture."""
return DenseNet([6, 12, 24, 16], include_top, weights, input_tensor,
input_shape, pooling, classes)
@keras_export('keras.applications.densenet.DenseNet169',
'keras.applications.DenseNet169')
def DenseNet169(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000):
"""Instantiates the Densenet169 architecture."""
return DenseNet([6, 12, 32, 32], include_top, weights, input_tensor,
input_shape, pooling, classes)
@keras_export('keras.applications.densenet.DenseNet201',
'keras.applications.DenseNet201')
def DenseNet201(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000):
"""Instantiates the Densenet201 architecture."""
return DenseNet([6, 12, 48, 32], include_top, weights, input_tensor,
input_shape, pooling, classes)
@keras_export('keras.applications.densenet.preprocess_input')
def preprocess_input(x, data_format=None):
return imagenet_utils.preprocess_input(
x, data_format=data_format, mode='torch')
@keras_export('keras.applications.densenet.decode_predictions')
def decode_predictions(preds, top=5):
return imagenet_utils.decode_predictions(preds, top=top)
preprocess_input.__doc__ = imagenet_utils.PREPROCESS_INPUT_DOC.format(
mode='',
ret=imagenet_utils.PREPROCESS_INPUT_RET_DOC_TORCH,
error=imagenet_utils.PREPROCESS_INPUT_ERROR_DOC)
decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__
DOC = """
Reference:
- [Densely Connected Convolutional Networks](
https://arxiv.org/abs/1608.06993) (CVPR 2017)
Optionally loads weights pre-trained on ImageNet.
Note that the data format convention used by the model is
the one specified in your Keras config at `~/.keras/keras.json`.
Note: each Keras Application expects a specific kind of input preprocessing.
For DenseNet, call `tf.keras.applications.densenet.preprocess_input` on your
inputs before passing them to the model.
Args:
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization),
'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)` (with `'channels_last'` data format)
or `(3, 224, 224)` (with `'channels_first'` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 32.
E.g. `(200, 200, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional block.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional block, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
Returns:
A Keras model instance.
"""
setattr(DenseNet121, '__doc__', DenseNet121.__doc__ + DOC)
setattr(DenseNet169, '__doc__', DenseNet169.__doc__ + DOC)
setattr(DenseNet201, '__doc__', DenseNet201.__doc__ + DOC)
|
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module defines the events signaled by abinit during the execution. It also
provides a parser to extract these events form the main output file and the log file.
"""
import sys
import os.path
import datetime
import collections
import ruamel.yaml as yaml
import abc
import logging
import numpy as np
from monty.string import indent, is_string, list_strings
from monty.fnmatch import WildCard
from monty.termcolor import colored
from monty.inspect import all_subclasses
from monty.json import MontyDecoder
from pymatgen.core.structure import Structure
from monty.json import MSONable
from pymatgen.util.serialization import pmg_serialize
from .abiinspect import YamlTokenizer
logger = logging.getLogger(__name__)
__all__ = [
"EventsParser",
"get_event_handler_classes",
"ScfConvergenceWarning",
"NscfConvergenceWarning",
"RelaxConvergenceWarning",
"Correction",
"DilatmxError",
"DilatmxErrorHandler",
]
def straceback():
"""Returns a string with the traceback."""
import traceback
return traceback.format_exc()
class AbinitEvent(yaml.YAMLObject):
"""
Example (YAML syntax)::
Normal warning without any handler:
--- !Warning
message: |
This is a normal warning that won't
trigger any handler in the python code!
src_file: routine_name
src_line: 112
...
Critical warning that will trigger some action in the python code.
--- !ScfConvergeWarning
message: |
The human-readable message goes here!
src_file: foo.F90
src_line: 112
tolname: tolwfr
actual_tol: 1.0e-8
required_tol: 1.0e-10
nstep: 50
...
The algorithm to extract the YAML sections is very simple.
1) We use YamlTokenizer to extract the documents from the output file
2) If we have a tag that ends with "Warning", "Error", "Bug", "Comment
we know we have encountered a new ABINIT event
3) We parse the document with yaml.safe_load(doc.text) and we get the object
Note that:
# --- and ... become reserved words (whey they are placed at
the begining of a line) since they are used to mark the beginning and
the end of YAML documents.
# All the possible events should subclass `AbinitEvent` and define
the class attribute yaml_tag so that yaml.safe_load will know how to
build the instance.
"""
color = None
def __init__(self, src_file, src_line, message):
"""
Basic constructor for :class:`AbinitEvent`.
Args:
message: String with human-readable message providing info on the event.
src_file: String with the name of the Fortran file where the event is raised.
src_line Integer giving the line number in src_file.
"""
#print("src_file", src_file, "src_line", src_line)
self.message = message
self.src_file = src_file
self.src_line = src_line
@pmg_serialize
def as_dict(self):
# This is needed because the events printed in the main output file do not define scr_file and src_line
src_file = getattr(self, "src_file", "Unknown")
src_line = getattr(self, "src_line", 0)
return dict(message=self.message, src_file=src_file, src_line=src_line, yaml_tag=self.yaml_tag)
@classmethod
def from_dict(cls, d):
cls = as_event_class(d.get("yaml_tag"))
return cls(**{k: v for k, v in d.items() if k != "yaml_tag" and not k.startswith("@")})
@property
def header(self):
try:
return "<%s at %s:%s>" % (self.name, self.src_file, self.src_line)
except AttributeError:
# This is needed because the events printed in the main output file do not define scr_file and src_line
return "<%s at %s:%s>" % (self.name, "Unknown", 0)
def __repr__(self):
return self.header
def __str__(self):
return "\n".join((self.header, self.message))
def __eq__(self, other):
if other is None: return False
return self.message == other.message
def __ne__(self, other):
return not self.__eq__(other)
@property
def name(self):
"""Name of the event (class name)"""
return self.__class__.__name__
@property
def baseclass(self):
"""The baseclass of self."""
for cls in _BASE_CLASSES:
if isinstance(self, cls):
return cls
raise ValueError("Cannot determine the base class of %s" % self.__class__.__name__)
def correct(self, task):
"""
This method is called when an error is detected in a :class:`Task`
It should perform any corrective measures relating to the detected error.
The idea is similar to the one used in custodian but the handler receives
a :class:`Task` object so that we have access to its methods.
Returns:
(dict) JSON serializable dict that describes the errors and actions taken. E.g.
{"errors": list_of_errors, "actions": list_of_actions_taken}.
If this is an unfixable error, actions should be set to None.
"""
return 0
class AbinitComment(AbinitEvent):
"""Base class for Comment events"""
yaml_tag = '!COMMENT'
color = "blue"
class AbinitError(AbinitEvent):
"""Base class for Error events"""
yaml_tag = '!ERROR'
color = "red"
class AbinitYamlError(AbinitError):
"""
Raised if the YAML parser cannot parse the document and the doc tag is an Error.
It's an AbinitError because the msg produced by the code is not valid YAML!
"""
class AbinitBug(AbinitEvent):
"""Base class for Bug events"""
yaml_tag = '!BUG'
color = "red"
class AbinitWarning(AbinitEvent):
"""
Base class for Warning events (the most important class).
Developers should subclass this class to define the different exceptions
raised by the code and the possible actions that can be performed.
"""
yaml_tag = '!WARNING'
color = "magenta"
class AbinitCriticalWarning(AbinitWarning):
color = "red"
class AbinitYamlWarning(AbinitCriticalWarning):
"""
Raised if the YAML parser cannot parse the document and the doc tas is a Warning.
"""
###############################
# Warnings triggering restart #
###############################
class ScfConvergenceWarning(AbinitCriticalWarning):
"""Warning raised when the GS SCF cycle did not converge."""
yaml_tag = '!ScfConvergenceWarning'
class NscfConvergenceWarning(AbinitCriticalWarning):
"""Warning raised when the GS NSCF cycle did not converge."""
yaml_tag = '!NscfConvergenceWarning'
class RelaxConvergenceWarning(AbinitCriticalWarning):
"""Warning raised when the structural relaxation did not converge."""
yaml_tag = '!RelaxConvergenceWarning'
# TODO: for the time being we don't discern between GS and PhononCalculations.
#class PhononConvergenceWarning(AbinitCriticalWarning):
# """Warning raised when the phonon calculation did not converge."""
# yaml_tag = u'!PhononConvergenceWarning'
class QPSConvergenceWarning(AbinitCriticalWarning):
"""Warning raised when the QPS iteration (GW) did not converge."""
yaml_tag = '!QPSConvergenceWarning'
class HaydockConvergenceWarning(AbinitCriticalWarning):
"""Warning raised when the Haydock method (BSE) did not converge."""
yaml_tag = '!HaydockConvergenceWarning'
# Error classes providing a correct method.
# Register the concrete base classes.
_BASE_CLASSES = [
AbinitComment,
AbinitError,
AbinitBug,
AbinitWarning,
]
class EventReport(collections.abc.Iterable, MSONable):
"""
Iterable storing the events raised by an ABINIT calculation.
Attributes::
stat: information about a file as returned by os.stat
"""
def __init__(self, filename, events=None):
"""
List of ABINIT events.
Args:
filename: Name of the file
events: List of Event objects
"""
self.filename = os.path.abspath(filename)
self.stat = os.stat(self.filename)
self.start_datetime, self.end_datetime = None, None
self._events = []
self._events_by_baseclass = collections.defaultdict(list)
if events is not None:
for ev in events:
self.append(ev)
def __len__(self):
return len(self._events)
def __iter__(self):
return self._events.__iter__()
def __getitem__(self, slice):
return self._events[slice]
def __str__(self):
#has_colours = stream_has_colours(stream)
has_colours = True
lines = []
app = lines.append
app("Events found in %s\n" % self.filename)
for i, event in enumerate(self):
if has_colours:
app("[%d] %s" % (i+1, colored(event.header, color=event.color)))
app(indent(event.message, 4))
else:
app("[%d] %s" % (i+1, str(event)))
app("num_errors: %s, num_warnings: %s, num_comments: %s, completed: %s\n" % (
self.num_errors, self.num_warnings, self.num_comments, self.run_completed))
return "\n".join(lines)
def append(self, event):
"""Add an event to the list."""
self._events.append(event)
self._events_by_baseclass[event.baseclass].append(event)
def set_run_completed(self, boolean, start_datetime, end_datetime):
"""Set the value of _run_completed."""
self._run_completed = boolean
if (start_datetime, end_datetime) != (None, None):
# start_datetime: Sat Feb 28 23:54:27 2015
# end_datetime: Sat Feb 28 23:54:30 2015
try:
fmt = "%a %b %d %H:%M:%S %Y"
self.start_datetime = datetime.datetime.strptime(start_datetime, fmt)
self.end_datetime = datetime.datetime.strptime(end_datetime, fmt)
except Exception as exc:
# Maybe LOCALE != en_US
logger.warning(str(exc))
@property
def run_etime(self):
"""Wall-time of the run as `timedelta` object."""
if self.start_datetime is None or self.end_datetime is None:
return None
return self.end_datetime - self.start_datetime
@property
def run_completed(self):
"""True if the calculation terminated."""
try:
return self._run_completed
except AttributeError:
return False
@property
def comments(self):
"""List of comments found."""
return self.select(AbinitComment)
@property
def errors(self):
"""List of errors + bugs found."""
return self.select(AbinitError) + self.select(AbinitBug)
@property
def warnings(self):
"""List of warnings found."""
return self.select(AbinitWarning)
@property
def num_warnings(self):
"""Number of warnings reported."""
return len(self.warnings)
@property
def num_errors(self):
"""Number of errors reported."""
return len(self.errors)
@property
def num_comments(self):
"""Number of comments reported."""
return len(self.comments)
def select(self, base_class):
"""
Return the list of events that inherits from class base_class
"""
return self._events_by_baseclass[base_class]
def filter_types(self, event_types):
events = []
for ev in self:
if type(ev) in event_types: events.append(ev)
return self.__class__(filename=self.filename, events=events)
def get_events_of_type(self, event_class):
"""Return a list of events of the given class."""
return [ev for ev in self if type(ev) == event_class]
@pmg_serialize
def as_dict(self):
return dict(filename=self.filename, events=[e.as_dict() for e in self._events])
@classmethod
def from_dict(cls, d):
return cls(filename=d["filename"], events=[AbinitEvent.from_dict(e) for e in d["events"]])
class EventsParserError(Exception):
"""Base class for the exceptions raised by :class:`EventsParser`."""
class EventsParser:
"""
Parses the output or the log file produced by ABINIT and extract the list of events.
"""
Error = EventsParserError
def parse(self, filename, verbose=0):
"""
Parse the given file. Return :class:`EventReport`.
"""
run_completed, start_datetime, end_datetime = False, None, None
filename = os.path.abspath(filename)
report = EventReport(filename)
w = WildCard("*Error|*Warning|*Comment|*Bug|*ERROR|*WARNING|*COMMENT|*BUG")
import warnings
warnings.simplefilter('ignore', yaml.error.UnsafeLoaderWarning)
with YamlTokenizer(filename) as tokens:
for doc in tokens:
if w.match(doc.tag):
#print("got doc.tag", doc.tag,"--")
try:
#print(doc.text)
event = yaml.load(doc.text) # Can't use ruamel safe_load!
#yaml.load(doc.text, Loader=ruamel.yaml.Loader)
#print(event.yaml_tag, type(event))
except:
#raise
# Wrong YAML doc. Check tha doc tag and instantiate the proper event.
message = "Malformatted YAML document at line: %d\n" % doc.lineno
message += doc.text
# This call is very expensive when we have many exceptions due to malformatted YAML docs.
if verbose:
message += "Traceback:\n %s" % straceback()
if "error" in doc.tag.lower():
print("It seems an error. doc.tag:", doc.tag)
event = AbinitYamlError(message=message, src_file=__file__, src_line=0)
else:
event = AbinitYamlWarning(message=message, src_file=__file__, src_line=0)
event.lineno = doc.lineno
report.append(event)
# Check whether the calculation completed.
if doc.tag == "!FinalSummary":
#print(doc)
run_completed = True
d = doc.as_dict()
#print(d)
start_datetime, end_datetime = d["start_datetime"], d["end_datetime"]
report.set_run_completed(run_completed, start_datetime, end_datetime)
return report
def report_exception(self, filename, exc):
"""
This method is used when self.parser raises an Exception so that
we can report a customized :class:`EventReport` object with info the exception.
"""
# Build fake event.
event = AbinitError(src_file="Unknown", src_line=0, message=str(exc))
return EventReport(filename, events=[event])
class EventHandler(MSONable, metaclass=abc.ABCMeta):
"""
Abstract base class defining the interface for an EventHandler.
The__init__ should always provide default values for its arguments so that we can
easily instantiate the handlers with:
handlers = [cls() for cls in get_event_handler_classes()]
The defaul values should be chosen so to cover the most typical cases.
Each EventHandler should define the class attribute `can_change_physics`
that is true if the handler changes `important` parameters of the
run that are tightly connected to the physics of the system.
For example, an `EventHandler` that changes the value of `dilatmx` and
prepare the restart is not changing the physics. Similarly a handler
that changes the mixing algorithm. On the contrary, a handler that
changes the value of the smearing is modifying an important physical
parameter, and the user should be made aware of this so that
there's an explicit agreement between the user and the code.
The default handlers are those that do not change the physics,
other handlers can be installed by the user when constructing with the flow with
TODO
.. warning::
The EventHandler should perform any action at the level of the input files
needed to solve the problem and then prepare the task for a new submission
The handler should never try to resubmit the task. The submission must be
delegated to the scheduler or Fireworks.
"""
event_class = AbinitEvent
"""AbinitEvent subclass associated to this handler."""
#can_change_physics
FIXED = 1
NOT_FIXED = 0
def __init__(self):
"""Simple init for compatibility with introspection in as_dict/from_dict"""
return super(EventHandler,self).__init__()
@classmethod
def cls2str(cls):
lines = []
app = lines.append
ecls = cls.event_class
app("event name = %s" % ecls.yaml_tag)
app("event documentation: ")
lines.extend(ecls.__doc__.split("\n"))
app("handler documentation: ")
lines.extend(cls.__doc__.split("\n"))
return "\n".join(lines)
def __str__(self):
return "<%s>" % self.__class__.__name__
def can_handle(self, event):
"""True if this handler is associated to the given :class:`AbinitEvent`"""
return self.event_class == event.__class__
# TODO: defined CorrectionRecord object and provide helper functions to build it
def count(self, task):
"""
Return the number of times the event associated to this handler
has been already fixed in the :class:`Task`.
"""
return len([c for c in task.corrections if c["event"]["@class"] == self.event_class])
@abc.abstractmethod
def handle_task_event(self, task, event):
"""
Method to handle Abinit events.
Args:
task: :class:`Task` object.
event: :class:`AbinitEvent` found in the log file.
Return:
0 if no action has been applied, 1 if the problem has been fixed.
"""
@pmg_serialize
def as_dict(self):
"""
Basic implementation of as_dict if __init__ has no arguments. Subclasses may need to overwrite.
"""
d = {}
return d
@classmethod
def from_dict(cls, d):
"""
Basic implementation of from_dict if __init__ has no arguments. Subclasses may need to overwrite.
"""
return cls()
@classmethod
def compare_inputs(cls, new_input, old_input):
def vars_dict(d):
"""
make a simple dictionary and convert numpy arrays to lists
"""
new_d = {}
for key, value in d.items():
if isinstance(value, np.ndarray): value = value.tolist()
new_d[key] = value
return new_d
new_vars = vars_dict(new_input)
old_vars = vars_dict(old_input)
new_keys = set(new_vars.keys())
old_keys = set(old_vars.keys())
intersect = new_keys.intersection(old_keys)
added_keys = new_keys - intersect
removed_keys = old_keys - intersect
changed_keys = set(v for v in intersect if new_vars[v] != old_vars[v])
log_diff = {}
if added_keys:
log_diff['_set'] = {k: new_vars[k] for k in added_keys}
if changed_keys:
log_diff['_update'] = ({k: {'new': new_vars[k], 'old': old_vars[k]} for k in changed_keys})
if new_input.structure != old_input.structure:
log_diff['_change_structure'] = new_input.structure.as_dict()
if removed_keys:
log_diff['_pop'] = {k: old_vars[k] for k in removed_keys}
return log_diff
class Correction(MSONable):
def __init__(self, handler, actions, event, reset=False):
self.handler = handler
self.actions = actions
self.event = event
self.reset = reset
@pmg_serialize
def as_dict(self):
return dict(handler=self.handler.as_dict(), actions=self.actions, event=self.event.as_dict(), reset=self.reset)
@classmethod
def from_dict(cls, d):
dec = MontyDecoder()
return cls(handler=dec.process_decoded(d['handler']), actions=d['actions'],
event=dec.process_decoded(d['event']), reset=d['reset'])
#class WarningHandler(EventHandler):
# """Base class for handlers associated to ABINIT warnings."""
# event_class = AbinitWarning
#
#class BugHandler(EventHandler):
# """Base class for handlers associated to ABINIT bugs."""
# event_class = AbinitBug
class ErrorHandler(EventHandler):
"""Base class for handlers associated to ABINIT errors."""
event_class = AbinitError
_ABC_EVHANDLER_CLASSES = set([ErrorHandler,])
# Public API
def autodoc_event_handlers(stream=sys.stdout):
"""
Print to the given string, the documentation for the events
and the associated handlers.
"""
lines = []
for cls in all_subclasses(EventHandler):
if cls in _ABC_EVHANDLER_CLASSES: continue
event_class = cls.event_class
lines.extend(cls.cls2str().split("\n"))
# Here we enforce the abstract protocol of the class
# The unit test in tests_events will detect the problem.
if not hasattr(cls, "can_change_physics"):
raise RuntimeError("%s: can_change_physics must be defined" % cls)
stream.write("\n".join(lines) + "\n")
def get_event_handler_classes(categories=None):
"""Return the list of handler classes."""
classes = [c for c in all_subclasses(EventHandler) if c not in _ABC_EVHANDLER_CLASSES]
return classes
def as_event_class(obj):
"""
Convert obj into a subclass of AbinitEvent.
obj can be either a class or a string with the class name or the YAML tag
"""
if is_string(obj):
for c in all_subclasses(AbinitEvent):
if c.__name__ == obj or c.yaml_tag == obj: return c
raise ValueError("Cannot find event class associated to %s" % obj)
# Assume class.
assert obj in all_subclasses(AbinitEvent)
return obj
############################################
########## Concrete classes ################
############################################
class DilatmxError(AbinitError):
"""
This Error occurs in variable cell calculations when the increase in the
unit cell volume is too large.
"""
yaml_tag = '!DilatmxError'
class DilatmxErrorHandler(ErrorHandler):
"""
Handle DilatmxError. Abinit produces a netcdf file with the last structure before aborting
The handler changes the structure in the input with the last configuration and modify the value of dilatmx.
"""
event_class = DilatmxError
can_change_physics = False
def __init__(self, max_dilatmx=1.3):
self.max_dilatmx = max_dilatmx
@pmg_serialize
def as_dict(self):
return {'max_dilatmx': self.max_dilatmx}
@classmethod
def from_dict(cls, d):
return cls(max_dilatmx=d['max_dilatmx'])
def handle_task_event(self, task, event):
# Read the last structure dumped by ABINIT before aborting.
filepath = task.outdir.has_abiext("DILATMX_STRUCT.nc")
last_structure = Structure.from_file(filepath)
task._change_structure(last_structure)
#read the suggested dilatmx
# new_dilatmx = 1.05
# if new_dilatmx > self.max_dilatmx:
# msg = "Suggested dilatmx ({}) exceeds maximux configured value ({}).".format(new_dilatmx, self.max_dilatmx)
# return self.NOT_FIXED
# task.strategy.abinit_input.set_vars(dilatmx=new_dilatmx)
msg = "Take last structure from DILATMX_STRUCT.nc, will try to restart with dilatmx %s" % task.get_inpvar("dilatmx")
task.log_correction(event, msg)
# Note that we change the structure but we don't try restart from the previous WFK|DEN file
# because Abinit called mpi_abort and therefore no final WFK|DEN file has been produced.
return self.FIXED
def handle_input_event(self, abiinput, outdir, event):
try:
old_abiinput = abiinput.deepcopy()
# Read the last structure dumped by ABINIT before aborting.
filepath = outdir.has_abiext("DILATMX_STRUCT.nc")
last_structure = Structure.from_file(filepath)
abiinput.set_structure(last_structure)
#FIXME restart from DEN files not always working with interpolation
return Correction(self, self.compare_inputs(abiinput, old_abiinput), event, reset=True)
# return Correction(self, self.compare_inputs(abiinput, old_abiinput), event, event=False)
except Exception as exc:
logger.warning('Error while trying to apply the handler {}.'.format(str(self)), exc)
return None
class TolSymError(AbinitError):
"""
Class of errors raised by Abinit when it cannot detect the symmetries of the system.
The handler assumes the structure makes sense and the error is just due to numerical inaccuracies.
We increase the value of tolsym in the input file (default 1-8) so that Abinit can find the space group
and re-symmetrize the input structure.
"""
yaml_tag = '!TolSymError'
class TolSymErrorHandler(ErrorHandler):
"""
Increase the value of tolsym in the input file.
"""
event_class = TolSymError
can_change_physics = False
def __init__(self, max_nfixes=3):
self.max_nfixes = max_nfixes
@pmg_serialize
def as_dict(self):
return {'max_nfixes': self.max_nfixes}
@classmethod
def from_dict(cls, d):
return cls(max_nfixes=d['max_nfixes'])
def handle_task_event(self, task, event):
# TODO: Add limit on the number of fixes one can do for the same error
# For example in this case, the scheduler will stop after 20 submissions
if self.count(task) > self.max_nfixes:
return self.NOT_FIXED
old_tolsym = task.get_inpvar("tolsym")
new_tolsym = 1e-6 if old_tolsym is None else old_tolsym * 10
task.set_vars(tolsym=new_tolsym)
task.log_correction(event, "Increasing tolsym from %s to %s" % (old_tolsym, new_tolsym))
return self.FIXED
def handle_input_event(self, abiinput, outdir, event):
try:
old_abiinput = abiinput.deepcopy()
old_tolsym = abiinput["tolsym"]
new_tolsym = 1e-6 if old_tolsym is None else old_tolsym * 10
abiinput.set_vars(tolsym=new_tolsym)
return Correction(self, self.compare_inputs(abiinput, old_abiinput), event, reset=False)
except Exception as exc:
logger.warning('Error while trying to apply the handler {}.'.format(str(self)), exc)
return None
class MemanaError(AbinitError):
"""
Class of errors raised by the memory analyzer.
(the section that estimates the memory requirements from the input parameters).
"""
yaml_tag = '!MemanaError'
class MemanaErrorHandler(ErrorHandler):
"""
Set mem_test to 0 to bypass the memory check.
"""
event_class = MemanaError
can_change_physics = False
def handle_task_event(self, task, event):
task.set_vars(mem_test=0)
task.log_correction(event, "Find MemanaError. Setting mem_test to 0 in input file.")
return self.FIXED
def handle_input_event(self, abiinput, outdir, event):
try:
old_abiinput = abiinput.deepcopy()
abiinput.set_vars(mem_test=0)
return Correction(self, self.compare_inputs(abiinput, old_abiinput), event, reset=False)
except Exception as exc:
logger.warning('Error while trying to apply the handler {}.'.format(str(self)), exc)
return None
class MemoryError(AbinitError):
"""
This error occurs when a checked allocation fails in Abinit
The only way to go is to increase memory
"""
yaml_tag = '!MemoryError'
class MemoryErrorHandler(ErrorHandler):
"""
Handle MemoryError. Increase the resources requirements
"""
event_class = MemoryError
can_change_physics = False
def handle_task_event(self, task, event):
task.manager.increase_resources()
return self.FIXED
def handle_input_event(self, abiinput, outdir, event):
"""
Shouldn't do anything on the input
"""
return None
|
|
"""
Module to set up run time parameters for Clawpack -- classic code.
The values set in the function setrun are then written out to data files
that will be read in by the Fortran code.
"""
import os
import numpy as np
#------------------------------
def setrun(claw_pkg='amrclaw'):
#------------------------------
"""
Define the parameters used for running Clawpack.
INPUT:
claw_pkg expected to be "amrclaw" for this setrun.
OUTPUT:
rundata - object of class ClawRunData
"""
from clawpack.clawutil import data
assert claw_pkg.lower() == 'amrclaw', "Expected claw_pkg = 'amrclaw'"
num_dim = 2
rundata = data.ClawRunData(claw_pkg, num_dim)
#------------------------------------------------------------------
# Problem-specific parameters to be written to setprob.data:
#------------------------------------------------------------------
# Sample setup to write one line to setprob.data ...
probdata = rundata.new_UserData(name='probdata',fname='setprob.data')
# Gamma EOS parameter for three materials: air, plastic and water
probdata.add_param('gammagas', 1.4, 'gamma for ideal gas')
probdata.add_param('gammaplas', 1.1, 'gamma est. for polystirene')
probdata.add_param('gammawat', 7.15, 'gamma for water')
# Pinf parameter for Tammann EOS for three materials: air, plastic and water
# pinfplas Calculated with c^2=gamma*(p+pinf)/rho to make c =2240m/s (polystyrene speed of sound),
# (c= 1484 in water). Values from water obtained from kirsten's paper in the references
probdata.add_param('pinfgas', 0.0, 'pinf for stiffend gas/plastic')
probdata.add_param('pinfplas', 4789425947.72, 'pinf for stiffend gas/plastic')
probdata.add_param('pinfwat', 300000000.0, 'pinf for stiffend water')
# Density at rest and at room temperature of: air, plastic and water
probdata.add_param('rhog', 1.0, 'air density in kg/m^3')
probdata.add_param('rhop', 1050.0, 'polystirene density in kg/m^3')
probdata.add_param('rhow', 1000.0, 'water density in kg/m^3')
# omegas, omeplas and omewat are not used in this code, but they can be used to extend the code
# to a more general EOS with an extra parameter, like de Van der Waals EOS.
probdata.add_param('omegas', 0.0, 'omega (specific excluded volume) for stiffend gas/plastic')
probdata.add_param('omeplas', 0.0, 'omega (specific excluded volume) for stiffend gas/plastic')
probdata.add_param('omewat', 0.0, 'omega (specific excluded volume) for stiffend water')
# Parameters for mapped grid (if changed, they also need to be changed in mapc2p.py, also
# double check the mapc2p.py test works correctly with the new parameters)
probdata.add_param('rsqr', 0.039999, 'radius of outer square for mapped grid')
probdata.add_param('rout', 0.015, 'radius of outer circle of ring inclusion for mapped grid')
probdata.add_param('rinn', 0.010, 'radius of inner circle of ring inclusion for mapped grid')
#------------------------------------------------------------------
# Standard Clawpack parameters to be written to claw.data:
#------------------------------------------------------------------
clawdata = rundata.clawdata # initialized when rundata instantiated
# ---------------
# Spatial domain:
# ---------------
# Number of space dimensions:
clawdata.num_dim = num_dim
# Lower and upper edge of computational domain:
clawdata.lower[0] = -0.05 #-0.055 # xlower
clawdata.upper[0] = 0.05 #0.055 # xupper
clawdata.lower[1] = 0.000000e+00 # ylower
clawdata.upper[1] = 0.05000e+00 # yupper
# Number of grid cells:
# For original mapped grid, used multiples of 20x10, so the interface is aligned
clawdata.num_cells[0] = 40 #40 #40 #56-mymapping #40-randys # mx
clawdata.num_cells[1] = 20 #20 #14 #17-mymapping #14-randys # my
# ---------------
# Size of system:
# ---------------
# Number of equations in the system:
clawdata.num_eqn = 4
# Number of auxiliary variables in the aux array (initialized in setaux)
clawdata.num_aux = 7
# Index of aux array corresponding to capacity function, if there is one:
clawdata.capa_index = 0
# -------------
# Initial time:
# -------------
clawdata.t0 = 0.000000
# Restart from checkpoint file of a previous run?
# Note: If restarting, you must also change the Makefile to set:
# RESTART = True
# If restarting, t0 above should be from original run, and the
# restart_file 'fort.chkNNNNN' specified below should be in
# the OUTDIR indicated in Makefile.
clawdata.restart = False # True to restart from prior results
clawdata.restart_file = 'fort.chk00006' # File to use for restart data
# -------------
# Output times:
#--------------
# Specify at what times the results should be written to fort.q files.
# Note that the time integration stops after the final output time.
clawdata.output_style = 1
if clawdata.output_style==1:
# Output ntimes frames at equally spaced times up to tfinal:
# Can specify num_output_times = 0 for no output
clawdata.num_output_times = 150
clawdata.tfinal = 0.0002 #0.00025 #0.00015 #0.00015
clawdata.output_t0 = True # output at initial (or restart) time?
elif clawdata.output_style == 2:
# Specify a list or numpy array of output times:
# Include t0 if you want output at the initial time.
clawdata.output_times = [0., 0.1]
elif clawdata.output_style == 3:
# Output every step_interval timesteps over total_steps timesteps:
clawdata.output_step_interval = 1
clawdata.total_steps = 4
clawdata.output_t0 = True # output at initial (or restart) time?
clawdata.output_format == 'ascii' # 'ascii' or 'netcdf'
clawdata.output_q_components = 'all' # could be list such as [True,True]
clawdata.output_aux_components = 'all' # could be list
clawdata.output_aux_onlyonce = False # output aux arrays only at t0
# ---------------------------------------------------
# Verbosity of messages to screen during integration:
# ---------------------------------------------------
# The current t, dt, and cfl will be printed every time step
# at AMR levels <= verbosity. Set verbosity = 0 for no printing.
clawdata.verbosity = 0
# --------------
# Time stepping:
# --------------
# if dt_variable==True: variable time steps used based on cfl_desired,
# if dt_variable==False: fixed time steps dt = dt_initial always used.
clawdata.dt_variable = True
# Initial time step for variable dt.
# (If dt_variable==0 then dt=dt_initial for all steps)
clawdata.dt_initial = 1.000000e-07
# Max time step to be allowed if variable dt used:
clawdata.dt_max = 1.000000e+99
# Desired Courant number if variable dt used
clawdata.cfl_desired = 0.5 #0.600000
# max Courant number to allow without retaking step with a smaller dt:
clawdata.cfl_max = 0.6 #0.700000
# Maximum number of time steps to allow between output times:
clawdata.steps_max = 500000
# ------------------
# Method to be used:
# ------------------
# Order of accuracy: 1 => Godunov, 2 => Lax-Wendroff plus limiters
clawdata.order = 2
# Use dimensional splitting?
clawdata.dimensional_split = 'unsplit' # 'godunov' #'unsplit'
# For unsplit method, transverse_waves can be
# 0 or 'none' ==> donor cell (only normal solver used)
# 1 or 'increment' ==> corner transport of waves
# 2 or 'all' ==> corner transport of 2nd order corrections too
clawdata.transverse_waves = 1 #2
# Number of waves in the Riemann solution:
clawdata.num_waves = 3
# List of limiters to use for each wave family:
# Required: len(limiter) == num_waves
# Some options:
# 0 or 'none' ==> no limiter (Lax-Wendroff)
# 1 or 'minmod' ==> minmod
# 2 or 'superbee' ==> superbee
# 3 or 'vanleer' ==> van Leer
# 4 or 'mc' ==> MC limiter (see inline limiter.f to
# enable or disable modified minmod (TVD, only first order, "add viscosity")
clawdata.limiter = [1, 1, 1] #[1, 1, 1]
clawdata.use_fwaves = False # True ==> use f-wave version of algorithms
# Source terms splitting:
# src_split == 0 or 'none' ==> no source term (src routine never called)
# src_split == 1 or 'godunov' ==> Godunov (1st order) splitting used,
# src_split == 2 or 'strang' ==> Strang (2nd order) splitting used, not recommended.
clawdata.source_split = 1
# --------------------
# Boundary conditions:
# --------------------
# Number of ghost cells (usually 2)
clawdata.num_ghost = 2
# Choice of BCs at xlower and xupper:
# 0 or 'user' => user specified (must modify bcNamr.f to use this option)
# 1 or 'extrap' => extrapolation (non-reflecting outflow)
# 2 or 'periodic' => periodic (must specify this at both boundaries)
# 3 or 'wall' => solid wall for systems where q(2) is normal velocity
clawdata.bc_lower[0] = 'user' # at xlower
clawdata.bc_upper[0] = 'extrap' # at xupper
clawdata.bc_lower[1] = 'extrap' # at ylower
clawdata.bc_upper[1] = 'extrap' # at yupper
# --------------
# Checkpointing:
# --------------
# Specify when checkpoint files should be created that can be
# used to restart a computation.
clawdata.checkpt_style = 0
if clawdata.checkpt_style == 0:
# Do not checkpoint at all
pass
elif clawdata.checkpt_style == 1:
# Checkpoint only at tfinal.
pass
elif clawdata.checkpt_style == 2:
# Specify a list of checkpoint times.
ckouttime0 = 50.00/1000000.0
ckouttime1 = 63.33/1000000.0
clawdata.checkpt_times = [ckouttime0, ckouttime1]
elif clawdata.checkpt_style == 3:
# Checkpoint every checkpt_interval timesteps (on Level 1)
# and at the final time.
clawdata.checkpt_interval = 5
# ---------------
# Gauges:
# ---------------
gauges = rundata.gaugedata.gauges
# for gauges append lines of the form [gaugeno, x, y, t1, t2]
gauges.append([0, -0.01, 0, 0., 1e9])
gauges.append([1, -0.01, 0.005, 0., 1e9])
gauges.append([2, -0.01, 0.01, 0., 1e9])
gauges.append([3, 0.0, 0, 0., 1e9])
gauges.append([4, 0.0, 0.005, 0., 1e9])
gauges.append([5, 0.0, 0.01, 0., 1e9])
gauges.append([6, 0.01, 0, 0., 1e9])
gauges.append([7, 0.01, 0.005, 0., 1e9])
gauges.append([8, 0.01, 0.01, 0., 1e9])
# ---------------
# AMR parameters:
# ---------------
amrdata = rundata.amrdata
# max number of refinement levels:
amrdata.amr_levels_max = 4
# List of refinement ratios at each level (length at least amr_level_max-1)
amrdata.refinement_ratios_x = [2, 2, 2, 2, 2]
amrdata.refinement_ratios_y = [2, 2, 2, 2, 2]
amrdata.refinement_ratios_t = [2, 2, 2, 2, 2]
# Specify type of each aux variable in amrdata.auxtype.
# This must be a list of length num_aux, each element of which is one
# of:
# 'center', 'capacity', 'xleft', or 'yleft' (see documentation).
amrdata.aux_type = ['center','center','center','center','center','center','center']
# Flag for refinement based on Richardson error estimater:
amrdata.flag_richardson = False # use Richardson?
amrdata.flag_richardson_tol = 1.000000e-00 # Richardson tolerance
# Flag for refinement using routine flag2refine:
amrdata.flag2refine = True # use this?
amrdata.flag2refine_tol = 5000.0 #10000.0 #10000.0 #10.000000 #100000.0 #5.000000e-02 # tolerance used in this routine (100000000.000000)
# User can modify flag2refine to change the criterion for flagging.
# Default: check maximum absolute difference of first component of q
# between a cell and each of its neighbors.
# steps to take on each level L between regriddings of level L+1:
amrdata.regrid_interval = 2
# width of buffer zone around flagged points:
# (typically the same as regrid_interval so waves don't escape):
amrdata.regrid_buffer_width = 2 #3
# clustering alg. cutoff for (# flagged pts) / (total # of cells
# refined)
# (closer to 1.0 => more small grids may be needed to cover flagged
# cells)
amrdata.clustering_cutoff = 0.700000
# print info about each regridding up to this level:
amrdata.verbosity_regrid = 0
# -------------------
# Refinement Regions:
# -------------------
regions = rundata.regiondata.regions
# Remove initial spurious wave from interface coupling by not refining until t_0
t_0 = 1.67/1000000.0
# NOT REQUIRED IF MAPPING NOT PRESENT
# All of the water region (for level 2 with mapping)
#regions.append([2,2,0,1e9,-0.0155,0.0155, 0.0, 0.0155])
# All of the water region (for level 3 with mapping)
#regions.append([3,3,0,1e9,-0.0155,0.0155, 0.0, 0.0155])
# Regions along interface (for level 3 with mapping)
#regions.append([3,3,0,1e9,-0.0155,-0.0145, 0.0, 0.0155])
#regions.append([3,3,0,1e9,0.0145,0.0155, 0.0, 0.0155])
#regions.append([3,3,0,1e9,-0.0155, 0.0155, 0.0145, 0.0155])
# Regions along interface (for level 4 with mapping)
#regions.append([4,4,0,1e9,-0.0155,0.0155, 0.0, 0.0155])
# Regions along interface (for level 5 with mapping)
#regions.append([5,5,0,1e9,-0.0155,0.0155, 0.0, 0.0155])
# Regions along interface (for level 5 with mapping)
#regions.append([5,5,0,1e9,-0.02,0.02, 0.0, 0.02])
# Regions along interface (for level 6 with mapping)
#regions.append([6,6,0,1e9,-0.02,0.02, 0.0, 0.0155])
# Regions along interface (for level 6 without mapping)
#regions.append([5,6,t_0,1e9,-0.025,0.025, 0.0, 0.025])
# Along one corner for Schlieren
##regions.append([6,6,t_0,1e9,-0.02,0.005, 0.005, 0.02])
##regions.append([6,6,t_0,1e9,-0.02,0.02, -0.02, 0.02])
##regions.append([6,6,t_0,1e9,-0.02,0.02, -0.02, 0.02])
##regions.append([5,5,t_0,1e9,-0.01,0.02, 0.01, 0.02])
#regions.append([5,5,t_0,1e9,-0.02,-0.01, 0.01, 0.02])
#OTHER COMBINATIONS
# Interface corners
#regions.append([3,3,0,1e9,-0.0155,-0.0145, 0.0145, 0.0155])
#regions.append([3,3,0,1e9,0.0145,0.0155, 0.0145, 0.0155])
#regions.append([2,3,4e-6,1e9,-0.03,-0.029, 0.0, 0.030])
#regions.append([3,3,0,1e9,-0.02,0.02, 0.0, 0.02])
# to specify regions of refinement append lines of the form
# [minlevel,maxlevel,t1,t2,x1,x2,y1,y2]
# ----- For developers -----
# Toggle debugging print statements:
amrdata.dprint = False # print domain flags
amrdata.eprint = False # print err est flags
amrdata.edebug = False # even more err est flags
amrdata.gprint = False # grid bisection/clustering
amrdata.nprint = False # proper nesting output
amrdata.pprint = False # proj. of tagged points
amrdata.rprint = False # print regridding summary
amrdata.sprint = False # space/memory output
amrdata.tprint = False # time step reporting each level
amrdata.uprint = False # update/upbnd reporting
return rundata
# end of function setrun
# ----------------------
if __name__ == '__main__':
# Set up run-time parameters and write all data files.
import sys
rundata = setrun(*sys.argv[1:])
rundata.write()
|
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import str
from past.builtins import basestring
from collections import defaultdict
from datetime import datetime
import getpass
import logging
import signal
import socket
import subprocess
import sys
from time import sleep
from sqlalchemy import Column, Integer, String, DateTime, func, Index
from sqlalchemy.orm.session import make_transient
from airflow import executors, models, settings, utils
from airflow.configuration import conf
from airflow.utils import AirflowException, State
Base = models.Base
ID_LEN = models.ID_LEN
# Setting up a statsd client if needed
statsd = None
if conf.getboolean('scheduler', 'statsd_on'):
from statsd import StatsClient
statsd = StatsClient(
host=conf.get('scheduler', 'statsd_host'),
port=conf.getint('scheduler', 'statsd_port'),
prefix=conf.get('scheduler', 'statsd_prefix'))
class BaseJob(Base):
"""
Abstract class to be derived for jobs. Jobs are processing items with state
and duration that aren't task instances. For instance a BackfillJob is
a collection of task instance runs, but should have it's own state, start
and end time.
"""
__tablename__ = "job"
id = Column(Integer, primary_key=True)
dag_id = Column(String(ID_LEN),)
state = Column(String(20))
job_type = Column(String(30))
start_date = Column(DateTime())
end_date = Column(DateTime())
latest_heartbeat = Column(DateTime())
executor_class = Column(String(500))
hostname = Column(String(500))
unixname = Column(String(1000))
__mapper_args__ = {
'polymorphic_on': job_type,
'polymorphic_identity': 'BaseJob'
}
__table_args__ = (
Index('job_type_heart', job_type, latest_heartbeat),
)
def __init__(
self,
executor=executors.DEFAULT_EXECUTOR,
heartrate=conf.getint('scheduler', 'JOB_HEARTBEAT_SEC'),
*args, **kwargs):
self.hostname = socket.gethostname()
self.executor = executor
self.executor_class = executor.__class__.__name__
self.start_date = datetime.now()
self.latest_heartbeat = datetime.now()
self.heartrate = heartrate
self.unixname = getpass.getuser()
super(BaseJob, self).__init__(*args, **kwargs)
def is_alive(self):
return (
(datetime.now() - self.latest_heartbeat).seconds <
(conf.getint('scheduler', 'JOB_HEARTBEAT_SEC') * 2.1)
)
def kill(self):
session = settings.Session()
job = session.query(BaseJob).filter(BaseJob.id == self.id).first()
job.end_date = datetime.now()
try:
self.on_kill()
except:
logging.error('on_kill() method failed')
session.merge(job)
session.commit()
session.close()
raise AirflowException("Job shut down externally.")
def on_kill(self):
'''
Will be called when an external kill command is received
'''
pass
def heartbeat_callback(self):
pass
def heartbeat(self):
'''
Heartbeats update the job's entry in the database with a timestamp
for the latest_heartbeat and allows for the job to be killed
externally. This allows at the system level to monitor what is
actually active.
For instance, an old heartbeat for SchedulerJob would mean something
is wrong.
This also allows for any job to be killed externally, regardless
of who is running it or on which machine it is running.
Note that if your heartbeat is set to 60 seconds and you call this
method after 10 seconds of processing since the last heartbeat, it
will sleep 50 seconds to complete the 60 seconds and keep a steady
heart rate. If you go over 60 seconds before calling it, it won't
sleep at all.
'''
session = settings.Session()
job = session.query(BaseJob).filter(BaseJob.id == self.id).first()
if job.state == State.SHUTDOWN:
self.kill()
if job.latest_heartbeat:
sleep_for = self.heartrate - (
datetime.now() - job.latest_heartbeat).total_seconds()
if sleep_for > 0:
sleep(sleep_for)
job.latest_heartbeat = datetime.now()
session.merge(job)
session.commit()
session.close()
self.heartbeat_callback()
logging.debug('[heart] Boom.')
def run(self):
if statsd:
statsd.incr(self.__class__.__name__.lower()+'_start', 1, 1)
# Adding an entry in the DB
session = settings.Session()
self.state = State.RUNNING
session.add(self)
session.commit()
id_ = self.id
make_transient(self)
self.id = id_
# Run
self._execute()
# Marking the success in the DB
self.end_date = datetime.now()
self.state = State.SUCCESS
session.merge(self)
session.commit()
session.close()
if statsd:
statsd.incr(self.__class__.__name__.lower()+'_end', 1, 1)
def _execute(self):
raise NotImplemented("This method needs to be overridden")
class SchedulerJob(BaseJob):
"""
This SchedulerJob runs indefinitely and constantly schedules the jobs
that are ready to run. It figures out the latest runs for each
task and see if the dependencies for the next schedules are met.
If so it triggers the task instance. It does this for each task
in each DAG and repeats.
:param dag_id: to run the scheduler for a single specific DAG
:type dag_id: string
:param subdir: to search for DAG under a certain folder only
:type subdir: string
:param test_mode: used for unit testing this class only, runs a single
schedule run
:type test_mode: bool
:param refresh_dags_every: force refresh the DAG definition every N
runs, as specified here
:type refresh_dags_every: int
:param do_pickle: to pickle the DAG object and send over to workers
for non-local executors
:type do_pickle: bool
"""
__mapper_args__ = {
'polymorphic_identity': 'SchedulerJob'
}
def __init__(
self,
dag_id=None,
subdir=None,
test_mode=False,
refresh_dags_every=10,
num_runs=None,
do_pickle=False,
*args, **kwargs):
self.dag_id = dag_id
self.subdir = subdir
if test_mode:
self.num_runs = 1
else:
self.num_runs = num_runs
self.refresh_dags_every = refresh_dags_every
self.do_pickle = do_pickle
super(SchedulerJob, self).__init__(*args, **kwargs)
self.heartrate = conf.getint('scheduler', 'SCHEDULER_HEARTBEAT_SEC')
@utils.provide_session
def manage_slas(self, dag, session=None):
"""
Finding all tasks that have SLAs defined, and sending alert emails
where needed. New SLA misses are also recorded in the database.
Where assuming that the scheduler runs often, so we only check for
tasks that should have succeeded in the past hour.
"""
TI = models.TaskInstance
sq = (
session
.query(
TI.task_id,
func.max(TI.execution_date).label('max_ti'))
.filter(TI.dag_id == dag.dag_id)
.filter(TI.state == State.SUCCESS)
.filter(TI.task_id.in_(dag.task_ids))
.group_by(TI.task_id).subquery('sq')
)
max_tis = session.query(TI).filter(
TI.dag_id == dag.dag_id,
TI.task_id == sq.c.task_id,
TI.execution_date == sq.c.max_ti,
).all()
ts = datetime.now()
SlaMiss = models.SlaMiss
for ti in max_tis:
task = dag.get_task(ti.task_id)
dttm = ti.execution_date
if task.sla:
dttm += dag.schedule_interval
while dttm < datetime.now():
if dttm + task.sla + dag.schedule_interval < datetime.now():
session.merge(models.SlaMiss(
task_id=ti.task_id,
dag_id=ti.dag_id,
execution_date=dttm,
timestamp=ts))
dttm += dag.schedule_interval
session.commit()
slas = (
session
.query(SlaMiss)
.filter(SlaMiss.email_sent == False)
.filter(SlaMiss.dag_id == dag.dag_id)
.all()
)
if slas:
sla_dates = [sla.execution_date for sla in slas]
blocking_tis = (
session
.query(TI)
.filter(TI.state != State.SUCCESS)
.filter(TI.execution_date.in_(sla_dates))
.filter(TI.dag_id == dag.dag_id)
.all()
)
for ti in blocking_tis:
ti.task = dag.get_task(ti.task_id)
blocking_tis = ([ti for ti in blocking_tis
if ti.are_dependencies_met(main_session=session)])
task_list = "\n".join([
sla.task_id + ' on ' + sla.execution_date.isoformat()
for sla in slas])
blocking_task_list = "\n".join([
ti.task_id + ' on ' + ti.execution_date.isoformat()
for ti in blocking_tis])
from airflow import ascii
email_content = """\
Here's a list of tasks thas missed their SLAs:
<pre><code>{task_list}\n<code></pre>
Blocking tasks:
<pre><code>{blocking_task_list}\n{ascii.bug}<code></pre>
""".format(**locals())
emails = []
for t in dag.tasks:
if t.email:
if isinstance(t.email, basestring):
l = [t.email]
elif isinstance(t.email, (list, tuple)):
l = t.email
for email in l:
if email not in emails:
emails.append(email)
if emails and len(slas):
utils.send_email(
emails,
"[airflow] SLA miss on DAG=" + dag.dag_id,
email_content)
for sla in slas:
sla.email_sent = True
session.merge(sla)
session.commit()
session.close()
def import_errors(self, dagbag):
session = settings.Session()
session.query(models.ImportError).delete()
for filename, stacktrace in list(dagbag.import_errors.items()):
session.add(models.ImportError(
filename=filename, stacktrace=stacktrace))
session.commit()
def process_dag(self, dag, executor):
"""
This method schedules a single DAG by looking at the latest
run for each task and attempting to schedule the following run.
As multiple schedulers may be running for redundancy, this
function takes a lock on the DAG and timestamps the last run
in ``last_scheduler_run``.
"""
DagModel = models.DagModel
session = settings.Session()
# picklin'
pickle_id = None
if self.do_pickle and self.executor.__class__ not in (
executors.LocalExecutor, executors.SequentialExecutor):
pickle_id = dag.pickle(session).id
db_dag = session.query(
DagModel).filter(DagModel.dag_id == dag.dag_id).first()
last_scheduler_run = db_dag.last_scheduler_run or datetime(2000, 1, 1)
secs_since_last = (
datetime.now() - last_scheduler_run).total_seconds()
# if db_dag.scheduler_lock or
if secs_since_last < self.heartrate:
session.commit()
session.close()
return None
else:
# Taking a lock
db_dag.scheduler_lock = True
db_dag.last_scheduler_run = datetime.now()
session.commit()
TI = models.TaskInstance
logging.info(
"Getting latest instance "
"for all tasks in dag " + dag.dag_id)
sq = (
session
.query(
TI.task_id,
func.max(TI.execution_date).label('max_ti'))
.filter(TI.dag_id == dag.dag_id)
.group_by(TI.task_id).subquery('sq')
)
qry = session.query(TI).filter(
TI.dag_id == dag.dag_id,
TI.task_id == sq.c.task_id,
TI.execution_date == sq.c.max_ti,
)
logging.debug("Querying max dates for each task")
latest_ti = qry.all()
ti_dict = {ti.task_id: ti for ti in latest_ti}
session.expunge_all()
session.commit()
logging.debug("{} rows returned".format(len(latest_ti)))
for task in dag.tasks:
if task.adhoc:
continue
if task.task_id not in ti_dict:
# Brand new task, let's get started
ti = TI(task, task.start_date)
ti.refresh_from_db()
if ti.is_queueable(flag_upstream_failed=True):
logging.info(
'First run for {ti}'.format(**locals()))
executor.queue_task_instance(ti, pickle_id=pickle_id)
else:
ti = ti_dict[task.task_id]
ti.task = task # Hacky but worky
if ti.state == State.RUNNING:
continue # Only one task at a time
elif ti.state == State.UP_FOR_RETRY:
# If task instance if up for retry, make sure
# the retry delay is met
if ti.is_runnable():
logging.debug('Triggering retry: ' + str(ti))
executor.queue_task_instance(ti, pickle_id=pickle_id)
elif ti.state == State.QUEUED:
# If was queued we skipped so that in gets prioritized
# in self.prioritize_queued
continue
else:
# Trying to run the next schedule
next_schedule = (
ti.execution_date + task.schedule_interval)
if (
ti.task.end_date and
next_schedule > ti.task.end_date):
continue
ti = TI(
task=task,
execution_date=next_schedule,
)
ti.refresh_from_db()
if ti.is_queueable(flag_upstream_failed=True):
logging.debug('Queuing next run: ' + str(ti))
executor.queue_task_instance(ti, pickle_id=pickle_id)
# Releasing the lock
logging.debug("Unlocking DAG (scheduler_lock)")
db_dag = (
session.query(DagModel)
.filter(DagModel.dag_id == dag.dag_id)
.first()
)
db_dag.scheduler_lock = False
session.merge(db_dag)
session.commit()
session.close()
@utils.provide_session
def prioritize_queued(self, session, executor, dagbag):
# Prioritizing queued task instances
pools = {p.pool: p for p in session.query(models.Pool).all()}
TI = models.TaskInstance
queued_tis = (
session.query(TI)
.filter(TI.state == State.QUEUED)
.all()
)
session.expunge_all()
d = defaultdict(list)
for ti in queued_tis:
if (
ti.dag_id not in dagbag.dags or not
dagbag.dags[ti.dag_id].has_task(ti.task_id)):
# Deleting queued jobs that don't exist anymore
session.delete(ti)
session.commit()
else:
d[ti.pool].append(ti)
for pool, tis in list(d.items()):
open_slots = pools[pool].open_slots(session=session)
if open_slots > 0:
tis = sorted(
tis, key=lambda ti: (-ti.priority_weight, ti.start_date))
for ti in tis[:open_slots]:
task = None
try:
task = dagbag.dags[ti.dag_id].get_task(ti.task_id)
except:
logging.error("Queued task {} seems gone".format(ti))
session.delete(ti)
if task:
ti.task = task
# picklin'
dag = dagbag.dags[ti.dag_id]
pickle_id = None
if self.do_pickle and self.executor.__class__ not in (
executors.LocalExecutor, executors.SequentialExecutor):
pickle_id = dag.pickle(session).id
if ti.are_dependencies_met():
executor.queue_task_instance(ti, force=True, pickle_id=pickle_id)
else:
session.delete(ti)
session.commit()
def _execute(self):
dag_id = self.dag_id
def signal_handler(signum, frame):
logging.error("SIGINT (ctrl-c) received")
sys.exit(1)
signal.signal(signal.SIGINT, signal_handler)
utils.pessimistic_connection_handling()
logging.basicConfig(level=logging.DEBUG)
logging.info("Starting the scheduler")
dagbag = models.DagBag(self.subdir, sync_to_db=True)
executor = dagbag.executor
executor.start()
i = 0
while not self.num_runs or self.num_runs > i:
try:
loop_start_dttm = datetime.now()
try:
self.prioritize_queued(executor=executor, dagbag=dagbag)
except Exception as e:
logging.exception(e)
i += 1
try:
if i % self.refresh_dags_every == 0:
dagbag = models.DagBag(self.subdir, sync_to_db=True)
else:
dagbag.collect_dags(only_if_updated=True)
except:
logging.error("Failed at reloading the dagbag")
if statsd:
statsd.incr('dag_refresh_error', 1, 1)
sleep(5)
if dag_id:
dags = [dagbag.dags[dag_id]]
else:
dags = [
dag for dag in dagbag.dags.values() if not dag.parent_dag]
paused_dag_ids = dagbag.paused_dags()
for dag in dags:
logging.debug("Scheduling {}".format(dag.dag_id))
dag = dagbag.get_dag(dag.dag_id)
if not dag or (dag.dag_id in paused_dag_ids):
continue
try:
self.process_dag(dag, executor)
self.manage_slas(dag)
except Exception as e:
logging.exception(e)
logging.info(
"Done queuing tasks, calling the executor's heartbeat")
duration_sec = (datetime.now() - loop_start_dttm).total_seconds()
logging.info("Loop took: {} seconds".format(duration_sec))
try:
self.import_errors(dagbag)
except Exception as e:
logging.exception(e)
try:
dagbag.kill_zombies()
except Exception as e:
logging.exception(e)
try:
# We really just want the scheduler to never ever stop.
executor.heartbeat()
self.heartbeat()
except Exception as e:
logging.exception(e)
logging.error("Tachycardia!")
except Exception as deep_e:
logging.exception(deep_e)
def heartbeat_callback(self):
if statsd:
statsd.gauge('scheduler_heartbeat', 1, 1)
class BackfillJob(BaseJob):
"""
A backfill job consists of a dag or subdag for a specific time range. It
triggers a set of task instance runs, in the right order and lasts for
as long as it takes for the set of task instance to be completed.
"""
__mapper_args__ = {
'polymorphic_identity': 'BackfillJob'
}
def __init__(
self,
dag, start_date=None, end_date=None, mark_success=False,
include_adhoc=False,
donot_pickle=False,
ignore_dependencies=False,
*args, **kwargs):
self.dag = dag
dag.override_start_date(start_date)
self.dag_id = dag.dag_id
self.bf_start_date = start_date
self.bf_end_date = end_date
self.mark_success = mark_success
self.include_adhoc = include_adhoc
self.donot_pickle = donot_pickle
self.ignore_dependencies = ignore_dependencies
super(BackfillJob, self).__init__(*args, **kwargs)
def _execute(self):
"""
Runs a dag for a specified date range.
"""
session = settings.Session()
start_date = self.bf_start_date
end_date = self.bf_end_date
# picklin'
pickle_id = None
if not self.donot_pickle and self.executor.__class__ not in (
executors.LocalExecutor, executors.SequentialExecutor):
pickle = models.DagPickle(self.dag)
session.add(pickle)
session.commit()
pickle_id = pickle.id
executor = self.executor
executor.start()
# Build a list of all instances to run
tasks_to_run = {}
failed = []
succeeded = []
started = []
wont_run = []
for task in self.dag.tasks:
if (not self.include_adhoc) and task.adhoc:
continue
start_date = start_date or task.start_date
end_date = end_date or task.end_date or datetime.now()
for dttm in utils.date_range(
start_date, end_date, task.dag.schedule_interval):
ti = models.TaskInstance(task, dttm)
tasks_to_run[ti.key] = ti
# Triggering what is ready to get triggered
while tasks_to_run:
for key, ti in list(tasks_to_run.items()):
ti.refresh_from_db()
if ti.state in (
State.SUCCESS, State.SKIPPED) and key in tasks_to_run:
succeeded.append(key)
del tasks_to_run[key]
elif ti.is_runnable(flag_upstream_failed=True):
executor.queue_task_instance(
ti,
mark_success=self.mark_success,
task_start_date=self.bf_start_date,
pickle_id=pickle_id,
ignore_dependencies=self.ignore_dependencies)
ti.state = State.RUNNING
if key not in started:
started.append(key)
self.heartbeat()
executor.heartbeat()
# Reacting to events
for key, state in list(executor.get_event_buffer().items()):
dag_id, task_id, execution_date = key
if key not in tasks_to_run:
continue
ti = tasks_to_run[key]
ti.refresh_from_db()
if ti.state in (State.FAILED, State.SKIPPED):
if ti.state == State.FAILED:
failed.append(key)
logging.error("Task instance " + str(key) + " failed")
elif ti.state == State.SKIPPED:
wont_run.append(key)
logging.error("Skipping " + str(key) + " failed")
del tasks_to_run[key]
# Removing downstream tasks that also shouldn't run
for t in self.dag.get_task(task_id).get_flat_relatives(
upstream=False):
key = (ti.dag_id, t.task_id, execution_date)
if key in tasks_to_run:
wont_run.append(key)
del tasks_to_run[key]
elif ti.state == State.SUCCESS:
succeeded.append(key)
del tasks_to_run[key]
msg = (
"[backfill progress] "
"waiting: {0} | "
"succeeded: {1} | "
"kicked_off: {2} | "
"failed: {3} | "
"wont_run: {4} ").format(
len(tasks_to_run),
len(succeeded),
len(started),
len(failed),
len(wont_run))
logging.info(msg)
executor.end()
session.close()
if failed:
raise AirflowException(
"Some tasks instances failed, here's the list:\n"+str(failed))
logging.info("All done. Exiting.")
class LocalTaskJob(BaseJob):
__mapper_args__ = {
'polymorphic_identity': 'LocalTaskJob'
}
def __init__(
self,
task_instance,
ignore_dependencies=False,
force=False,
mark_success=False,
pickle_id=None,
task_start_date=None,
*args, **kwargs):
self.task_instance = task_instance
self.ignore_dependencies = ignore_dependencies
self.force = force
self.pickle_id = pickle_id
self.mark_success = mark_success
self.task_start_date = task_start_date
super(LocalTaskJob, self).__init__(*args, **kwargs)
def _execute(self):
command = self.task_instance.command(
raw=True,
ignore_dependencies=self.ignore_dependencies,
force=self.force,
pickle_id=self.pickle_id,
mark_success=self.mark_success,
task_start_date=self.task_start_date,
job_id=self.id,
)
self.process = subprocess.Popen(['bash', '-c', command])
return_code = None
while return_code is None:
self.heartbeat()
return_code = self.process.poll()
def on_kill(self):
self.process.terminate()
|
|
from jinja2 import Template
from autonetkit.design.utils import filters
from autonetkit.design.utils.filters import find_node_by_label
from autonetkit.design.utils.general import group_by
from autonetkit.design.utils.graph_utils import topology_to_nx_graph, wrap_node_ids
from autonetkit.network_model.network_model import NetworkModel
from autonetkit.network_model.types import DeviceType, PortType
from autonetkit.webserver.publish import publish_model_to_webserver
network_model = NetworkModel()
t_phy = network_model.create_topology("physical")
r1 = t_phy.create_node(DeviceType.ROUTER, "r1")
r1.set("x", 0)
r1.set("y", 0)
r1.set("asn", 1)
r2 = t_phy.create_node(DeviceType.ROUTER, "r2")
r3 = t_phy.create_node(DeviceType.ROUTER, "r3")
r4 = t_phy.create_node(DeviceType.ROUTER, "r4")
r5 = t_phy.create_node(DeviceType.ROUTER, "r5")
h1 = t_phy.create_node(DeviceType.HOST, "h1")
h2 = t_phy.create_node(DeviceType.HOST, "h2")
properties = {
"r2": (250, 0, 1),
"r3": (0, 250, 1),
"r4": (250, 250, 1),
"r5": (500, 125, 2),
"h1": (125, 125, 1),
"h2": (500, 250, 2),
}
for node_id, (x, y, asn) in properties.items():
node = find_node_by_label(t_phy, node_id)
node.set("x", x)
node.set("y", y)
node.set("asn", asn)
# create ports
r1p1 = r1.create_port(PortType.PHYSICAL)
h1p1 = h1.create_port(PortType.PHYSICAL)
# and link them
t_phy.create_link(r1p1, h1p1)
# or create directly
t_phy.create_link(r1.create_port(PortType.PHYSICAL), r2.create_port(PortType.PHYSICAL))
# or in a loop
pairs = [(r1, r2), (r1, r3), (r2, r4),
(r3, r4), (r2, r5), (r4, r5), (r5, h2)]
for n1, n2 in pairs:
t_phy.create_link(n1.create_port(PortType.PHYSICAL), n2.create_port(PortType.PHYSICAL))
# create loopbacks
routers = filters.routers(t_phy)
for node in t_phy.nodes():
lo0 = node.create_port(PortType.LOGICAL)
node.set("lo0_id", lo0.id)
# assign port labels
for node in t_phy.nodes():
physical_ports = filters.physical_ports(node)
for index, port in enumerate(physical_ports):
port.set("label", f"eth{index}")
t_ip = network_model.create_topology("ip")
t_ip.add_nodes_from(t_phy.nodes())
t_ip.add_links_from(t_phy.links())
grouped = group_by(t_ip.nodes(), "asn")
for asn, nodes in grouped.items():
for index, node in enumerate(nodes):
lo0 = node.loopback_zero()
loopback_ip = f"172.16.{asn}.{index}"
lo0.set("ip", loopback_ip)
links = [l for l in t_ip.links()
if l.n1.get("asn") == l.n2.get("asn") == asn]
for index, link in enumerate(links):
prefix = f"10.{asn}.{index}"
network = prefix + ".0"
link.p1.set("ip", prefix + ".1")
link.p1.set("network", network)
link.p2.set("ip", prefix + ".2")
link.p2.set("network", network)
# inter-as links
links = [l for l in t_ip.links()
if l.n1.get("asn") != l.n2.get("asn")]
for index, link in enumerate(links):
prefix = f"10.0.{index}"
network = prefix + ".0"
link.p1.set("ip", prefix + ".1")
link.p1.set("network", network)
link.p2.set("ip", prefix + ".2")
t_ospf = network_model.create_topology("ospf")
t_ospf.add_nodes_from(routers)
ebgp_links = [l for l in t_phy.links()
if l.n1.get("asn") == l.n2.get("asn")]
t_ospf.add_links_from(ebgp_links)
t_ibgp = network_model.create_topology("ibgp")
t_ibgp.add_nodes_from(routers)
ibgp_pairs = [(n1, n2) for n1 in t_ibgp.nodes()
for n2 in t_ibgp.nodes()
if n1 != n2 and n1.get("asn") == n2.get("asn")]
for n1, n2 in ibgp_pairs:
p1 = n1.loopback_zero()
p2 = n2.loopback_zero()
t_ibgp.create_link(p1, p2)
t_ebgp = network_model.create_topology("ebgp")
t_ebgp.add_nodes_from(routers)
ebgp_links = [l for l in t_phy.links()
if l.n1.get("asn") != l.n2.get("asn")]
t_ebgp.add_links_from(ebgp_links)
# analysis
import networkx as nx
graph = topology_to_nx_graph(t_phy)
path = nx.shortest_path(graph, h1.id, h2.id)
path = wrap_node_ids(t_phy, path)
p1 = t_phy.create_node_path(path)
# Compile device models
compiled = {}
for node in filters.routers(t_phy):
data = {
"hostname": node.label,
"interfaces": [],
"asn": node.get("asn")
}
for port in filters.physical_ports(node):
ip_port = t_ip.get_port_by_id(port.id)
data["interfaces"].append({
"id": port.label,
"ip": ip_port.get("ip")
})
ospf_node = t_ospf.get_node_by_id(node.id)
ospf_enabled = ospf_node.degree() > 0
data["ospf"] = {"networks": [],
"enabled":ospf_enabled}
for port in filters.physical_ports(ospf_node):
if not port.connected:
continue
ip_port = t_ip.get_port_by_id(port.id)
network = ip_port.get("network")
data["ospf"]["networks"].append(network)
ebgp_node = t_ebgp.get_node_by_id(node.id)
data["ebgp"] = {"neighbors": []}
for peer in ebgp_node.peer_nodes():
ip_peer = t_ip.get_node_by_id(peer.id)
peer_ip = ip_peer.loopback_zero().get("ip")
data["ebgp"]["neighbors"].append({
"ip": peer_ip,
"asn": peer.get("asn")
})
ibgp_node = t_ibgp.get_node_by_id(node.id)
bgp_enabled = ebgp_node.degree() > 0 or ibgp_node.degree() > 0
data["bgp_enabled"] = bgp_enabled
data["ibgp"] = {"neighbors": []}
for peer in ibgp_node.peer_nodes():
ip_peer = t_ip.get_node_by_id(peer.id)
peer_ip = ip_peer.loopback_zero().get("ip")
data["ibgp"]["neighbors"].append({
"ip": peer_ip,
"asn": peer.get("asn")
})
compiled[node] = data
for node in filters.hosts(t_phy):
data = {
"hostname": node.label,
"interfaces": []
}
for port in filters.physical_ports(node):
ip_port = t_ip.get_port_by_id(port.id)
data["interfaces"].append({
"id": port.label,
"ip": ip_port.get("ip")
})
compiled[node] = data
# and render using template
rtr_template_str = """
! router
hostname {{ data.hostname }}
{% for interface in data.interfaces %}
{{interface.id}} {{ interface.ip}} up
{% endfor %}
{% if data.ospf.enabled %}
!
router ospf
{% for network in data.ospf.networks %}
network {{network}}
{% endfor %}
!
{% endif %}
{% if data.bgp_enabled %}
router bgp {{ asn }}
{% for peer in data.ebgp.neighbors %}
neighbor {{peer.ip}} {{peer.asn}}
{% endfor %}
{% for peer in data.ibgp.neighbors %}
neighbor {{peer.ip}} {{peer.asn}}
{% endfor %}
{% endif %}
!
"""
host_template_str = """
! host
hostname {{ data.hostname }}
{% for interface in data.interfaces %}
{{interface.id}} {{ interface.ip}} up
{% endfor %}
"""
templates = {
DeviceType.ROUTER: Template(rtr_template_str, trim_blocks=True),
DeviceType.HOST: Template(host_template_str, trim_blocks=True)
}
for node, data in compiled.items():
template = templates[node.type]
rendered = template.render(data=data)
print(rendered)
publish_model_to_webserver(network_model)
|
|
#!/usr/bin/env python
# encoding: utf-8
################################################################################
#
# RMG - Reaction Mechanism Generator
#
# Copyright (c) 2002-2009 Prof. William H. Green (whgreen@mit.edu) and the
# RMG Team (rmg_dev@mit.edu)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
"""
This module provides the :class:`PressureDependenceJob` class, which represents
a job for computing the pressure-dependent rate coefficients of a unimolecular
reaction network.
"""
import os.path
import math
import numpy
import logging
import rmgpy.constants as constants
import rmgpy.quantity as quantity
from rmgpy.kinetics import Chebyshev, PDepArrhenius, getRateCoefficientUnitsFromReactionOrder
from rmgpy.reaction import Reaction
from rmgpy.kinetics.tunneling import Wigner, Eckart
from rmgpy.cantherm.output import prettify
################################################################################
class PressureDependenceJob(object):
"""
A representation of a pressure dependence job. The attributes are:
======================= ====================================================
Attribute Description
======================= ====================================================
`Tmin` The minimum temperature at which to compute :math:`k(T,P)` values
`Tmax` The maximum temperature at which to compute :math:`k(T,P)` values
`Tcount` The number of temperatures at which to compute :math:`k(T,P)` values
`Pmin` The minimum pressure at which to compute :math:`k(T,P)` values
`Pmax` The maximum pressure at which to compute :math:`k(T,P)` values
`Pcount` The number of pressures at which to compute :math:`k(T,P)` values
`Emin` The minimum energy to use to compute :math:`k(T,P)` values
`Emax` The maximum energy to use to compute :math:`k(T,P)` values
`maximumGrainSize` The maximum energy grain size to use to compute :math:`k(T,P)` values
`minimumGrainCount` The minimum number of energy grains to use to compute :math:`k(T,P)` values
`method` The method to use to reduce the master equation to :math:`k(T,P)` values
`interpolationModel` The interpolation model to fit to the computed :math:`k(T,P)` values
`maximumAtoms` The maximum number of atoms to apply pressure dependence to (in RMG jobs)
`activeKRotor` A flag indicating whether to treat the K-rotor as active or adiabatic
`activeJRotor` A flag indicating whether to treat the J-rotor as active or adiabatic
`rmgmode` A flag that toggles "RMG mode", described below
----------------------- ----------------------------------------------------
`network` The unimolecular reaction network
`Tlist` An array of temperatures at which to compute :math:`k(T,P)` values
`Plist` An array of pressures at which to compute :math:`k(T,P)` values
`Elist` An array of energies to use to compute :math:`k(T,P)` values
======================= ====================================================
In RMG mode, several alterations to the k(T,P) algorithm are made both for
speed and due to the nature of the approximations used:
* Densities of states are not computed for product channels
* Arbitrary rigid rotor moments of inertia are included in the active modes;
these cancel in the ILT and equilibrium expressions
* k(E) for each path reaction is computed in the direction A -> products,
where A is always an explored isomer; the high-P kinetics are reversed
if necessary for this purpose
* Thermodynamic parameters are always used to compute the reverse k(E)
from the forward k(E) for each path reaction
RMG mode should be turned off by default except in RMG jobs.
"""
def __init__(self, network,
Tmin=None, Tmax=None, Tcount=0, Tlist=None,
Pmin=None, Pmax=None, Pcount=0, Plist=None,
maximumGrainSize=None, minimumGrainCount=0,
method=None, interpolationModel=None, maximumAtoms=None,
activeKRotor=True, activeJRotor=True, rmgmode=False):
self.network = network
self.Tmin = Tmin
self.Tmax = Tmax
self.Tcount = Tcount
if Tlist is not None:
self.Tlist = Tlist
self.Tmin = (numpy.min(self.Tlist.value_si),"K")
self.Tmax = (numpy.max(self.Tlist.value_si),"K")
self.Tcount = len(self.Tlist.value_si)
else:
self.Tlist = None
self.Pmin = Pmin
self.Pmax = Pmax
self.Pcount = Pcount
if Plist is not None:
self.Plist = Plist
self.Pmin = (numpy.min(self.Plist.value_si)*1e-5,"bar")
self.Pmax = (numpy.max(self.Plist.value_si)*1e-5,"bar")
self.Pcount = len(self.Plist.value_si)
else:
self.Plist = None
self.maximumGrainSize = maximumGrainSize
self.minimumGrainCount = minimumGrainCount
self.Emin = None
self.Emax = None
self.Elist = None
self.method = method
self.interpolationModel = interpolationModel
self.maximumAtoms = maximumAtoms
self.activeKRotor = activeKRotor
self.activeJRotor = activeJRotor
self.rmgmode = rmgmode
@property
def Tmin(self):
"""The minimum temperature at which the computed k(T,P) values are valid, or ``None`` if not defined."""
return self._Tmin
@Tmin.setter
def Tmin(self, value):
self._Tmin = quantity.Temperature(value)
@property
def Tmax(self):
"""The maximum temperature at which the computed k(T,P) values are valid, or ``None`` if not defined."""
return self._Tmax
@Tmax.setter
def Tmax(self, value):
self._Tmax = quantity.Temperature(value)
@property
def Tlist(self):
"""The temperatures at which the k(T,P) values are computed."""
return self._Tlist
@Tlist.setter
def Tlist(self, value):
self._Tlist = quantity.Temperature(value)
@property
def Pmin(self):
"""The minimum pressure at which the computed k(T,P) values are valid, or ``None`` if not defined."""
return self._Pmin
@Pmin.setter
def Pmin(self, value):
self._Pmin = quantity.Pressure(value)
@property
def Pmax(self):
"""The maximum pressure at which the computed k(T,P) values are valid, or ``None`` if not defined."""
return self._Pmax
@Pmax.setter
def Pmax(self, value):
self._Pmax = quantity.Pressure(value)
@property
def Plist(self):
"""The pressures at which the k(T,P) values are computed."""
return self._Plist
@Plist.setter
def Plist(self, value):
self._Plist = quantity.Pressure(value)
@property
def maximumGrainSize(self):
"""The maximum allowed energy grain size, or ``None`` if not defined."""
return self._maximumGrainSize
@maximumGrainSize.setter
def maximumGrainSize(self, value):
self._maximumGrainSize = quantity.Energy(value)
def copy(self):
"""
Return a copy of the pressure dependence job.
"""
return PressureDependenceJob(
network = self.network,
Tmin = self.Tmax,
Tmax = self.Tmax,
Tcount = self.Tcount,
Tlist = self.Tlist,
Pmin = self.Pmin,
Pmax = self.Pmax,
Pcount = self.Pcount,
Plist = self.Plist,
maximumGrainSize = self.maximumGrainSize,
minimumGrainCount = self.minimumGrainCount,
method = self.method,
interpolationModel = self.interpolationModel,
activeKRotor = self.activeKRotor,
activeJRotor = self.activeJRotor,
rmgmode = self.rmgmode,
)
def execute(self, outputFile, plot):
self.network.printSummary()
if outputFile is not None:
self.draw(os.path.dirname(outputFile))
self.initialize()
self.K = self.network.calculateRateCoefficients(self.Tlist.value_si, self.Plist.value_si, self.method)
self.fitInterpolationModels()
if outputFile is not None:
self.save(outputFile)
if plot:
self.plot(os.path.dirname(outputFile))
def generateTemperatureList(self):
"""
Returns an array of temperatures based on the interpolation `model`,
minimum and maximum temperatures `Tmin` and `Tmax` in K, and the number of
temperatures `Tcount`. For Chebyshev polynomials a Gauss-Chebyshev
distribution is used; for all others a linear distribution on an inverse
temperature domain is used. Note that the Gauss-Chebyshev grid does *not*
place `Tmin` and `Tmax` at the endpoints, yet the interpolation is still
valid up to these values.
"""
Tmin = self.Tmin.value_si
Tmax = self.Tmax.value_si
Tcount = self.Tcount
if self.Tlist is not None:
pass
elif self.interpolationModel[0].lower() == 'chebyshev':
# Distribute temperatures on a Gauss-Chebyshev grid
Tlist = numpy.zeros(Tcount, numpy.float64)
for i in range(Tcount):
T = -math.cos((2*i+1) * math.pi / (2*self.Tcount))
T = 2.0 / ((1.0/Tmax - 1.0/Tmin) * T + 1.0/Tmax + 1.0/Tmin)
Tlist[i] = T
self.Tlist = (Tlist,"K")
else:
# Distribute temperatures evenly on a T^-1 domain
Tlist = 1.0/numpy.linspace(1.0/Tmax, 1.0/Tmin, Tcount)
self.Tlist = (Tlist,"K")
return self.Tlist.value_si
def initialize(self):
for reaction in self.network.pathReactions:
tunneling = reaction.transitionState.tunneling
if isinstance(tunneling, Wigner) and tunneling.frequency is None:
tunneling.frequency = (reaction.transitionState.frequency.value_si,"cm^-1")
elif isinstance(tunneling, Eckart) and tunneling.frequency is None:
tunneling.frequency = (reaction.transitionState.frequency.value_si,"cm^-1")
tunneling.E0_reac = (sum([reactant.conformer.E0.value_si for reactant in reaction.reactants])*0.001,"kJ/mol")
tunneling.E0_TS = (reaction.transitionState.conformer.E0.value_si*0.001,"kJ/mol")
tunneling.E0_prod = (sum([product.conformer.E0.value_si for product in reaction.products])*0.001,"kJ/mol")
elif tunneling is not None:
if tunneling.frequency is not None:
# Frequency was given by the user
pass
else:
raise ValueError('Unknown tunneling model {0!r} for path reaction {1}.'.format(tunneling, reaction))
maximumGrainSize = self.maximumGrainSize.value_si if self.maximumGrainSize is not None else 0.0
self.network.initialize(
Tmin = self.Tmin.value_si,
Tmax = self.Tmax.value_si,
Pmin = self.Pmin.value_si,
Pmax = self.Pmax.value_si,
maximumGrainSize = maximumGrainSize,
minimumGrainCount = self.minimumGrainCount,
activeJRotor = self.activeJRotor,
activeKRotor = self.activeKRotor,
rmgmode = self.rmgmode,
)
self.generateTemperatureList()
self.generatePressureList()
def generatePressureList(self):
"""
Returns an array of pressures based on the interpolation `model`,
minimum and maximum pressures `Pmin` and `Pmax` in Pa, and the number of
pressures `Pcount`. For Chebyshev polynomials a Gauss-Chebyshev
distribution is used; for all others a linear distribution on an logarithmic
pressure domain is used. Note that the Gauss-Chebyshev grid does *not*
place `Pmin` and `Pmax` at the endpoints, yet the interpolation is still
valid up to these values.
"""
Pmin = self.Pmin.value_si
Pmax = self.Pmax.value_si
Pcount = self.Pcount
if self.Plist is not None:
pass
if self.interpolationModel[0].lower() == 'chebyshev':
# Distribute pressures on a Gauss-Chebyshev grid
Plist = numpy.zeros(Pcount, numpy.float64)
for i in range(Pcount):
P = -math.cos((2*i+1) * math.pi / (2*self.Pcount))
P = 10**(0.5 * ((math.log10(Pmax) - math.log10(Pmin)) * P + math.log10(Pmax) + math.log10(Pmin)))
Plist[i] = P
self.Plist = (Plist*1e-5,"bar")
else:
# Distribute pressures evenly on a log domain
Plist = 10.0 ** numpy.linspace(math.log10(Pmin), math.log10(Pmax), Pcount)
self.Plist = (Plist*1e-5,"bar")
return self.Plist.value_si
def fitInterpolationModels(self):
configurations = []
configurations.extend(self.network.isomers)
configurations.extend(self.network.reactants)
configurations.extend(self.network.products)
self.network.netReactions = []
Nreac = self.network.Nisom + self.network.Nreac
Nprod = Nreac + self.network.Nprod
Tmin = self.Tmin.value_si
Tmax = self.Tmax.value_si
Tdata = self.Tlist.value_si
Pmin = self.Pmin.value_si
Pmax = self.Pmax.value_si
Pdata = self.Plist.value_si
for prod in range(Nprod):
for reac in range(Nreac):
if reac == prod: continue
reaction = Reaction(
reactants = configurations[reac].species,
products = configurations[prod].species,
)
kdata = self.K[:,:,prod,reac].copy()
order = len(reaction.reactants)
kdata *= 1e6 ** (order-1)
kunits = {1: 's^-1', 2: 'cm^3/(mol*s)', 3: 'cm^6/(mol^2*s)'}[order]
reaction.kinetics = self.fitInterpolationModel(Tdata, Pdata, kdata, kunits)
self.network.netReactions.append(reaction)
def fitInterpolationModel(self, Tdata, Pdata, kdata, kunits):
Tmin = self.Tmin.value_si
Tmax = self.Tmax.value_si
Pmin = self.Pmin.value_si
Pmax = self.Pmax.value_si
model = self.interpolationModel[0].lower()
if model == 'chebyshev':
kinetics = Chebyshev().fitToData(Tdata, Pdata, kdata, kunits,
self.interpolationModel[1], self.interpolationModel[2],
Tmin, Tmax, Pmin, Pmax,
)
elif model == 'pdeparrhenius':
kinetics = PDepArrhenius().fitToData(Tdata, Pdata, kdata, kunits)
else:
raise Exception('Invalid interpolation model {0!r}.'.format(self.interpolationModel[0]))
return kinetics
def save(self, outputFile):
logging.info('Saving pressure dependence results for {0} network...'.format(self.network.label))
f = open(outputFile, 'a')
Nreac = self.network.Nisom + self.network.Nreac
Nprod = Nreac + self.network.Nprod
Tlist = self.Tlist.value_si
Plist = self.Plist.value_si
Tcount = Tlist.shape[0]
Pcount = Plist.shape[0]
count = 0
for prod in range(Nprod):
for reac in range(Nreac):
if reac == prod: continue
reaction = self.network.netReactions[count]
count += 1
kdata = self.K[:,:,prod,reac].copy()
order = len(reaction.reactants)
kdata *= 1e6 ** (order-1)
kunits = {1: 's^-1', 2: 'cm^3/(mol*s)', 3: 'cm^6/(mol^2*s)'}[order]
f.write('# =========== ')
f.write('=========== ' * Pcount)
f.write('\n')
f.write('# T \ P ')
f.write(' '.join(['{0:11.3e}'.format(P*1e-5) for P in Plist]))
f.write('\n')
f.write('# =========== ')
f.write('=========== ' * Pcount)
f.write('\n')
for t in range(Tcount):
f.write('# {0:11g}'.format(Tlist[t]))
for p in range(Pcount):
f.write(' {0:11.3e}'.format(kdata[t,p]))
f.write('\n')
f.write('# =========== ')
f.write('=========== ' * Pcount)
f.write('\n')
string = 'pdepreaction(reactants={0!r}, products={1!r}, kinetics={2!r})'.format(
[reactant.label for reactant in reaction.reactants],
[product.label for product in reaction.products],
reaction.kinetics,
)
f.write('{0}\n\n'.format(prettify(string)))
f.close()
f = open(os.path.join(os.path.dirname(outputFile), 'chem.inp'), 'a')
count = 0
for prod in range(Nprod):
for reac in range(Nreac):
if reac == prod: continue
reaction = self.network.netReactions[count]
kinetics = reaction.kinetics
count += 1
string = '{0!s:51} 1.0 0.0 0.0\n'.format(reaction)
if isinstance(kinetics, PDepArrhenius):
for P, arrhenius in zip(kinetics.pressures.value_si, kinetics.arrhenius):
string += 'PLOG/ {0:<9.3f} {1:<11.3e} {2:<8.2f} {3:<8.2f}/\n'.format(P / 101325.,
arrhenius.A.value_si / (arrhenius.T0.value_si ** arrhenius.n.value_si) * 1e6 ** (len(reaction.reactants) - 1),
arrhenius.n.value_si,
arrhenius.Ea.value_si / 4184.
)
elif isinstance(kinetics, Chebyshev):
coeffs = kinetics.coeffs.value_si.copy()
coeffs[0,0] += 6 * (len(reaction.reactants) - 1)
string += 'TCHEB/ {0:<9.3f} {1:<9.3f}/\n'.format(kinetics.Tmin.value_si, kinetics.Tmax.value_si)
string += 'PCHEB/ {0:<9.3f} {1:<9.3f}/\n'.format(kinetics.Pmin.value_si / 101325., kinetics.Pmax.value_si / 101325.)
string += 'CHEB/ {0:d} {1:d}/\n'.format(kinetics.degreeT, kinetics.degreeP)
if kinetics.degreeP < 6:
for i in range(kinetics.degreeT):
string += 'CHEB/'
for j in range(kinetics.degreeP):
string += ' {0:<12.3e}'.format(coeffs[i,j])
string += '/\n'
else:
coeffs_list = []
for i in range(kinetics.degreeT):
for j in range(kinetics.degreeP):
coeffs_list.append(coeffs[i,j])
coeffs_list[0] += 6 * (numReactants - 1)
for i in range(len(coeffs_list)):
if i % 5 == 0: string += ' CHEB/'
string += ' {0:<12.3e}'.format(coeffs_list[i])
if i % 5 == 4: string += '/\n'
f.write('{0}\n'.format(string))
f.close()
def plot(self, outputDirectory):
# Skip this step if matplotlib is not installed
try:
import pylab
except ImportError:
return
import matplotlib.cm
cm = matplotlib.cm.jet
Nreac = self.network.Nisom + self.network.Nreac
Nprod = Nreac + self.network.Nprod
Tlist = self.Tlist.value_si
Plist = self.Plist.value_si
Tcount = Tlist.shape[0]
Pcount = Plist.shape[0]
K = self.K
count = 0
for prod in range(Nprod):
for reac in range(Nreac):
if reac == prod: continue
reaction = self.network.netReactions[count]
count += 1
reaction_str = '{0} {1} {2}'.format(
' + '.join([reactant.label for reactant in reaction.reactants]),
'<=>' if prod < Nreac else '-->',
' + '.join([product.label for product in reaction.products]),
)
fig = pylab.figure(figsize=(10,6))
K2 = numpy.zeros((Tcount, Pcount))
if reaction.kinetics is not None:
for t in range(Tcount):
for p in range(Pcount):
K2[t,p] = reaction.kinetics.getRateCoefficient(Tlist[t], Plist[p])
K = self.K[:,:,prod,reac].copy()
order = len(reaction.reactants)
K *= 1e6 ** (order-1)
K2 *= 1e6 ** (order-1)
kunits = {1: 's^-1', 2: 'cm^3/(mol*s)', 3: 'cm^6/(mol^2*s)'}[order]
pylab.subplot(1,2,1)
for p in range(Pcount):
pylab.semilogy(1000.0 / Tlist, K[:,p], color=cm(1.*p/(Pcount-1)), marker='o', linestyle='')
if reaction.kinetics is not None:
pylab.semilogy(1000.0 / Tlist, K2[:,p], color=cm(1.*p/(Pcount-1)), marker='', linestyle='-')
pylab.xlabel('1000 / Temperature (1000/K)')
pylab.ylabel('Rate coefficient ({0})'.format(kunits))
pylab.title(reaction_str)
pylab.subplot(1,2,2)
for t in range(Tcount):
pylab.loglog(Plist*1e-5, K[t,:], color=cm(1.*t/(Tcount-1)), marker='o', linestyle='')
pylab.loglog(Plist*1e-5, K2[t,:], color=cm(1.*t/(Tcount-1)), marker='', linestyle='-')
pylab.xlabel('Pressure (bar)')
pylab.ylabel('Rate coefficient ({0})'.format(kunits))
pylab.title(reaction_str)
fig.subplots_adjust(left=0.10, bottom=0.13, right=0.95, top=0.92, wspace=0.3, hspace=0.3)
pylab.savefig(os.path.join(outputDirectory, 'kinetics_{0:d}.pdf'.format(count)))
pylab.close()
def draw(self, outputDirectory):
"""
Generate a PDF drawing of the pressure-dependent reaction network.
This requires that Cairo and its Python wrapper be available; if not,
the drawing is not generated.
"""
# Skip this step if cairo is not installed
try:
import cairo
except ImportError:
return
from rmgpy.pdep.draw import NetworkDrawer
path = os.path.join(outputDirectory, 'network.pdf')
NetworkDrawer().draw(self.network, format='pdf', path=path)
def saveInputFile(self, path):
"""
Save a CanTherm input file for the pressure dependence job to `path`
on disk.
"""
speciesList = self.network.getAllSpecies()
# Add labels for species, reactions, transition states that don't have them
for i, spec in enumerate(speciesList):
if not spec.label:
spec.label = 'species{0:d}'.format(i+1)
for i, rxn in enumerate(self.network.pathReactions):
if not rxn.label:
rxn.label = 'reaction{0:d}'.format(i+1)
if not rxn.transitionState.label:
rxn.transitionState.label = 'TS{0:d}'.format(i+1)
if not self.network.label:
self.network.label = 'network'
with open(path, 'w') as f:
# Write species
for spec in speciesList:
f.write('species(\n')
f.write(' label = {0!r},\n'.format(str(spec)))
if len(spec.molecule) > 0:
f.write(' structure = SMILES({0!r}),\n'.format(spec.molecule[0].toSMILES()))
if spec.conformer is not None:
if spec.conformer.E0 is not None:
f.write(' E0 = {0!r},\n'.format(spec.conformer.E0))
if len(spec.conformer.modes) > 0:
f.write(' modes = [\n')
for mode in spec.conformer.modes:
f.write(' {0!r},\n'.format(mode))
f.write(' ],\n')
f.write(' spinMultiplicity = {0:d},\n'.format(spec.conformer.spinMultiplicity))
f.write(' opticalIsomers = {0:d},\n'.format(spec.conformer.opticalIsomers))
if spec.molecularWeight is not None:
f.write(' molecularWeight = {0!r},\n'.format(spec.molecularWeight))
if spec.transportData is not None:
f.write(' collisionModel = {0!r},\n'.format(spec.transportData))
if spec.energyTransferModel is not None:
f.write(' energyTransferModel = {0!r},\n'.format(spec.energyTransferModel))
if spec.thermo is not None:
f.write(' thermo = {0!r},\n'.format(spec.thermo))
f.write(')\n\n')
# Write transition states
for rxn in self.network.pathReactions:
ts = rxn.transitionState
f.write('transitionState(\n')
f.write(' label = {0!r},\n'.format(ts.label))
if ts.conformer is not None:
if ts.conformer.E0 is not None:
f.write(' E0 = {0!r},\n'.format(ts.conformer.E0))
if len(ts.conformer.modes) > 0:
f.write(' modes = [\n')
for mode in ts.conformer.modes:
f.write(' {0!r},\n'.format(mode))
f.write(' ],\n')
f.write(' spinMultiplicity = {0:d},\n'.format(ts.conformer.spinMultiplicity))
f.write(' opticalIsomers = {0:d},\n'.format(ts.conformer.opticalIsomers))
if ts.frequency is not None:
f.write(' frequency = {0!r},\n'.format(ts.frequency))
f.write(')\n\n')
# Write reactions
for rxn in self.network.pathReactions:
ts = rxn.transitionState
f.write('reaction(\n')
f.write(' label = {0!r},\n'.format(rxn.label))
f.write(' reactants = [{0}],\n'.format(', '.join([repr(str(spec)) for spec in rxn.reactants])))
f.write(' products = [{0}],\n'.format(', '.join([repr(str(spec)) for spec in rxn.products])))
f.write(' transitionState = {0!r},\n'.format(rxn.transitionState.label))
if rxn.kinetics is not None:
f.write(' kinetics = {0!r},\n'.format(rxn.kinetics))
if ts.tunneling is not None:
f.write(' tunneling = {0!r},\n'.format(ts.tunneling.__class__.__name__))
f.write(')\n\n')
# Write network
f.write('network(\n')
f.write(' label = {0!r},\n'.format(self.network.label))
f.write(' isomers = [\n')
for isomer in self.network.isomers:
f.write(' {0!r},\n'.format(str(isomer.species[0])))
f.write(' ],\n')
f.write(' reactants = [\n')
for reactants in self.network.reactants:
f.write(' ({0}),\n'.format(', '.join([repr(str(spec)) for spec in reactants.species])))
f.write(' ],\n')
f.write(' bathGas = {\n')
for spec, frac in self.network.bathGas.items():
f.write(' {0!r}: {1:g},\n'.format(str(spec), frac))
f.write(' },\n')
f.write(')\n\n')
# Write pressure dependence
f.write('pressureDependence(\n')
f.write(' label = {0!r},\n'.format(self.network.label))
f.write(' Tmin = {0!r},\n'.format(self.Tmin))
f.write(' Tmax = {0!r},\n'.format(self.Tmax))
f.write(' Tcount = {0:d},\n'.format(self.Tcount))
f.write(' Tlist = {0!r},\n'.format(self.Tlist))
f.write(' Pmin = {0!r},\n'.format(self.Pmin))
f.write(' Pmax = {0!r},\n'.format(self.Pmax))
f.write(' Pcount = {0:d},\n'.format(self.Pcount))
f.write(' Plist = {0!r},\n'.format(self.Plist))
if self.maximumGrainSize is not None:
f.write(' maximumGrainSize = {0!r},\n'.format(self.maximumGrainSize))
if self.minimumGrainCount != 0:
f.write(' minimumGrainCount = {0:d},\n'.format(self.minimumGrainCount))
f.write(' method = {0!r},\n'.format(self.method))
if self.interpolationModel is not None:
f.write(' interpolationModel = {0!r},\n'.format(self.interpolationModel))
f.write(' activeKRotor = {0!r},\n'.format(self.activeKRotor))
f.write(' activeJRotor = {0!r},\n'.format(self.activeJRotor))
if self.rmgmode:
f.write(' rmgmode = {0!r},\n'.format(self.rmgmode))
f.write(')\n\n')
|
|
# coding=utf-8
#!/usr/bin/env python2
# Welcome to {Game Title}
# This is a text based game inspired by Fallout written in Python
#
# @copyright 2015 Alexander Young, Noah Hayes
# @license MIT Licence <https://github.com/meun5/camel-game-python/blob/master/LICENSE>
# @link Github <https://github.com/meun5/camel-game-python/>
# @version alpha-dev 0.1.1
#
# Credits:
# @author Alexander Young <youngale@urbandaleschools.com>
# @designer Noah Hayes <hayesnoa@urbandaleschools.com>
#
# Fallout is Copyright Bethesda Game Studios
# Copyright infringement was not the intent of this game
from __future__ import print_function
from datetime import date, timedelta
from sys import exit
import random
import string
import time
import json
import os.path, os
import datetime
gameTitle = "sudo apt-get install a-virus.pyc#!=="
gameTitle += ''.join(random.choice(string.hexdigits) for i in range(36))
save_name = "save.json"
# The story begins on 23/10/2287
curr_date = datetime.date(2287, 10, 23)
maxTravel = {
"user": {
"max": 547,
"min": 120,
},
"haters": {
"max": 176,
"min": 15,
}
}
maxGain = 45
inv = {
"cola": 3,
"radroach": 3,
"kilometres": 46495,
"haters_back": 150,
"day": 1,
"stats": {
"health": 100,
"thirst": 75,
"bandwidth": 250,
},
"need_bandwidth": False,
}
limits = {
"eat": 3,
"drink": 300,
"deduct_min": {
"health": 2,
"thirst": 2,
"bandwidth": 4,
},
"deduct_max": {
"health": 7,
"thirst": 9,
"bandwidth": 8,
},
"generate": {
"health": "4|37",
"drink": "7|29",
}
}
senarios = {
"general": {
"haters": "The haters are {amount} kilometres behind you.",
"travel": "You traveled {amount} kilometres",
"call_isp": "You are out of Bandwidth. Call Bell Canada at 1-866-310-2355.",
"eat": "You ate {limit} radroach meat and gained {amount} health. Thats about it.",
"drink": "You drank {limit} litres of Nuka Cola and quenched {amount} thirst. Nothing else happened.",
},
"travel": {
0: {
"message": "Nothing interseting happened today.",
"gained": "You traveled {amount} kilometres",
"special": "kilometres|same",
"type": "kilometres",
"event": "none",
},
1: {
"message": "You got stuck in a boring conversation and it took all day to get out.",
"gained": "You didn't travel at all today",
"special": "kilometres|none",
"type": "kilometres",
"event": "none"
},
2: {
"message": "Whilst traveling you found a stash of food!",
"gained": "You gained {amount} radroach meats",
"special": "radroach|inc",
"type": "radroach",
"event": "none",
},
3: {
"message": "While you where traveling you happened to find a vending machine with Nuka Cola in it!",
"gained": "You gained {amount} litres of Nuka Cola",
"special": "cola|inc",
"type": "cola",
"event": "none",
},
4: {
"message": "Whilst traveling you where attacked by a deathclaw. In your confusion you traveled in the wrong direction.",
"gained": "You traveled {amount} kilometres back",
"special": "kilometres|half",
"type": "kilometres",
"event": "battle",
},
5: {
"message": "Whilst traveling you passed by a friendly internet cafe. Some generous trolls gave some Nuka Cola",
"gained": "You gain {amount} litres of Nuka Cola",
"special": "cola|inc",
"type": "cola",
"event": "none",
},
6: {
"message": "While you where traveling you where MITM'ed by a hatin' sysadmin. While you battled with him, the haters gained ground.",
"gained": "You traveled {amount} kilometres",
"special": "haters_back|inc",
"type": "kilometres",
"event": "battle",
},
},
}
isGame = True
def init():
print()
load()
printBlank(2)
print("Note: This game looks best when played in a fullscreen black and green terminal")
printBlank(3)
print("This game was heavily inspired by:")
printBlank(3)
printTitle()
printBlank(5)
print("Disclaimer: This game may contain one or more of the following:")
print()
print("1. Geroge Mush")
print("2. Memes")
print("3. Illuminate")
print("4. Disclaimers")
print("5. John Wayne")
print("7. Codes")
print("8. Serious Mispellings")
print("9. Fallout Refrences")
print("10. etc..")
time.sleep(2)
printBlank(2)
print("Good. Now that that's out of the way we can continue")
time.sleep(4)
printBlank(3)
print("Welcome to", gameTitle)
print()
print("This is a game about getting your C-- Cert.")
print("You will travel across the great Internet to a place called Univeristy of Idaho in Denmark, Russia, Canada 6089234.")
print("<{Insert Story Here}>")
print()
printStats()
healthCheck(True)
printMenu()
gameRunner()
def printTitle():
print(" ______ __ __ __ ")
print(" / ____/ ____ _ / / / / ____ __ __ / /_")
print(" / /_ / __ `/ / / / / / __ \ / / / / / __/")
print(" / __/ / /_/ / / / / / / /_/ // /_/ / / /_ ")
print(" /_/ \__,_/ /_/ /_/ \____/ \____/ \__/ ")
def printInv():
print()
print("Today's date:", curr_date)
print()
print("You have", "{:,}".format(inv["cola"]), "litres of Nuka Cola")
print("You have", "{:,}".format(inv["radroach"]), "radroach meats")
print("You have", "{:,}".format(inv["kilometres"]), "kilometres to go")
print("The Haters are", "{:,}".format(inv["haters_back"]), "kilometres behind you")
def printMenu():
printInv()
printBlank(2)
print("T: Travel")
print("D: Drink")
print("R: Eat")
if inv["need_bandwidth"]:
print("C: Call ISP")
print("#: Sleep/Save")
print("~: Reset")
print("E: Exit")
print()
def printStats():
print()
print("Your Stats:")
for i in inv["stats"]:
print(i.capitalize() + ":", inv["stats"][i], "GB" if i == "bandwidth" else "")
def printBlank(num):
if isinstance(num, int):
for i in range(num):
print()
def save(false):
with open(save_name, 'w') as fp:
json.dump(inv, fp)
if false:
print()
print("Save Successful")
print()
def load():
global curr_date
if os.path.isfile(save_name):
with open(save_name) as fp:
data = json.load(fp)
if data:
print("Load Successful")
curr_date += timedelta(days=data["day"])
for i in data:
inv[i] = data[i]
else:
print("Load file not found. Creating file.")
save(False)
def doReset():
print()
print("Are you absolutly certian you want to reset (All progress will be deleted!)?")
print("Notice: This will exit the game")
y_n = str(raw_input("(Y/N): ")).capitalize()
if y_n == "Y":
print()
print("Removing save file...")
os.remove(save_name)
print("Cleaning Up...")
isGame = False
print("Exiting...")
exit()
else:
print("Unknown option:", y_n)
exitGame()
def switch(thing):
if thing == "T":
travel()
elif thing == "#":
save(True)
return False
elif thing == "E":
exitGame()
return False
elif thing == "~":
doReset()
return False
elif thing == "R":
if not eat():
return False
elif thing == "D":
if not drink():
return False
if inv["need_bandwidth"]:
if thing == "C":
call_isp()
return True
def exitGame():
isGame = False
save(True)
exit()
def call_isp():
printBlank(2)
print("Calling Bell Canada at 1-866-310-2355")
for i in range(550):
time.sleep(0.04)
print(".", end="")
printBlank(2)
print("Hello, My name is Mark from Bell Canada.")
print("How may I help you today?")
print()
print("O: Order Bandwidth")
print()
optionInvalid = True
while (optionInvalid):
option = str(raw_input("Select an Option: ")).capitalize()
if option == "O":
optionInvalid = False
print()
print("Ok, let me just check a few details", end="")
for i in range(3):
time.sleep(3)
print(".", end="")
print()
print("Thank you for your patience. How much bandwidth would you like to order?")
print()
amountLarge = True
while (amountLarge):
amount = int(raw_input("Enter a numerical amount: "))
if amount > 450:
print("Sorry, we can not currently offer that amount")
elif amount <= 450:
amountLarge = False
print("Ok, I'll add that to your account right away", end="")
inv["stats"]["bandwidth"] += amount
for i in range(3):
time.sleep(3)
print(".", end="")
print()
print("Thank you for calling Bell Canada. Your Bandwidth has been added to your account.")
if inv["stats"]["bandwidth"] >= 0:
inv["need_bandwidth"] = False
time.sleep(3)
printBlank(5)
else:
print("Sorry, I don't understand that option.")
def invControl(what, amount, mode = "add"):
if mode == "subtract":
inv[what] -= amount
elif mode == "add":
inv[what] += amount
elif mode == "double":
inv[what] *= 2
elif mode == "half":
inv[what] /= 2
def doAction(action, amount, doReturn = True):
if isinstance(action, list):
what = action[0].lower()
much = action[1].lower()
mode = "add"
if much == "half":
amount /= 2
elif much == "none":
amount = 0
elif much == "inc":
amount += random.randrange(maxGain)
invControl(what, amount, mode)
if doReturn:
return amount
else:
return False
def healthCheck(isInit):
for i in inv["stats"]:
if inv["stats"][i] <= 0:
print()
if i == "health":
print("You died due to the lack of health.")
print()
if isInit:
doReset()
else:
print("Start the game and reset to try again.")
exitGame()
elif i == "thirst":
print("You died due to the lack of proper hydration")
print()
if isInit:
doReset()
else:
print("Start the game and reset to try again.")
exitGame()
elif i == "bandwidth":
print("You ran out of bandwidth. Call you ISP to get more.")
print()
inv["need_bandwidth"] = True
def dayTick(event = False):
for i in inv["stats"]:
amount = random.randint(limits["deduct_min"][i], limits["deduct_max"][i])
if isinstance(event, str):
if event == "battle":
amount *= 3
inv["stats"][i] -= amount
print("You used", amount, "of", i.capitalize())
def travel():
global curr_date
if inv["need_bandwidth"]:
print("You don't have any bandwidth. You need to call Bell Canada to get more.")
if str(raw_input("Do you want to call them now? (Y/N): ")) == "Y":
call_isp()
else:
kilo = random.randint(maxTravel["user"]["min"], maxTravel["user"]["max"])
amount = random.randint(maxTravel["user"]["min"], maxTravel["user"]["max"])
kilo_hater = random.randint(maxTravel["haters"]["min"], maxTravel["haters"]["max"])
_local = senarios["travel"][random.randrange(1, len(senarios["travel"]))]
printBlank(7)
print(_local["message"])
amount = doAction(_local["special"].split("|"), amount)
if (_local["special"].split("|")[0] == "kilometres" and _local["special"].split("|")[1] == "none"):
kilo = 0
if _local["type"] is not "kilometres":
print(senarios["general"]["travel"].format(amount = kilo))
inv["kilometres"] -= kilo
print(_local["gained"].format(amount = amount))
print(senarios["general"]["haters"].format(amount = kilo_hater))
printBlank(3)
curr_date += timedelta(days=1)
inv["day"] += 1
inv["haters_back"] = kilo_hater
dayTick(_local["event"])
def eat():
print()
amount = random.randint(int(limits["generate"]["health"].split("|")[0]), int(limits["generate"]["health"].split("|")[1]))
inv["stats"]["health"] += amount
if inv["radroach"] >= limits["drink"]:
inv["radroach"] -= limits["eat"]
print(senarios["general"]["eat"].format(amount = amount, limit = limits["eat"]))
else:
print("You don't have enough radroach to eat")
print()
return False
print()
def drink():
print()
amount = random.randint(int(limits["generate"]["drink"].split("|")[0]), int(limits["generate"]["drink"].split("|")[1]))
inv["stats"]["thirst"] += amount
if inv["cola"] >= limits["drink"]:
inv["cola"] -= limits["drink"]
print(senarios["general"]["drink"].format(amount = amount, limit = limits["drink"]))
else:
print("You don't have enough cola to drink")
print()
return False
print()
def gameRunner():
while (isGame):
what = str(raw_input("What would you like to do?: ")).capitalize()
if switch(what):
printStats()
healthCheck(False)
printMenu()
init()
exit()
|
|
###############################################################################
# The MIT License (MIT)
#
# Copyright (c) 2014 Justin Lovinger
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
###############################################################################
"""Gravitational search algorithm"""
import random
import math
import numpy
from optimal import optimize, common
EPSILON = 1e-10
# TODO: Optimize to use numpy array operations wherever possible
class GSA(optimize.StandardOptimizer):
"""Gravitational Search Algorithm
Perform gravitational search algorithm optimization with a given fitness function.
"""
def __init__(self,
solution_size,
lower_bounds,
upper_bounds,
population_size=20,
grav_initial=100.0,
grav_reduction_rate=20.0):
"""Create an object that optimizes a given fitness function with GSA.
Args:
solution_size: The number of real values in each solution.
lower_bounds: list, each value is a lower bound for the corresponding
component of the solution.
upper_bounds: list, each value is a upper bound for the corresponding
component of the solution.
population_size: The number of potential solutions in every generation
grav_initial: Initial value for grav parameter (0 - 1)
grav_reduction_rate: Rate that grav parameter decreases over time (0 - 1)
"""
super(GSA, self).__init__(solution_size, population_size)
# set parameters for users problem
self._lower_bounds = numpy.array(lower_bounds)
self._upper_bounds = numpy.array(upper_bounds)
# GSA variables
self._grav_initial = grav_initial # G_i in GSA paper
self._grav_reduction_rate = grav_reduction_rate
self._velocity_matrix = None
self.initialize()
# Hyperparameter definitions
self._hyperparameters['_grav_initial'] = {
'type': 'float',
'min': 1e-10,
'max': 200.0
}
self._hyperparameters['_grav_reduction_rate'] = {
'type': 'float',
'min': 1e-10,
'max': 40.0
}
def initialize(self):
# Initialize GSA variables
self._velocity_matrix = numpy.zeros((self._population_size,
self._solution_size))
def initial_population(self):
return _initial_gsa_population(self._population_size,
self._solution_size, self._lower_bounds,
self._upper_bounds)
def next_population(self, population, fitnesses):
new_pop, self._velocity_matrix = _new_population_gsa(
population, fitnesses, self._velocity_matrix, self._lower_bounds,
self._upper_bounds, self._grav_initial, self._grav_reduction_rate,
self.iteration, self._max_iterations)
return new_pop
def _initial_gsa_population(population_size, solution_size, lower_bounds,
upper_bounds):
"""Create a random initial population of floating point values.
Args:
population_size: an integer representing the number of solutions in the population.
problem_size: the number of values in each solution.
lower_bounds: array; each value is a lower bound for the corresponding
part of the solution.
upper_bounds: array; each value is a upper bound for the corresponding
part of the solution.
Returns:
list; A list of random solutions.
"""
if len(lower_bounds) != solution_size or len(upper_bounds) != solution_size:
raise ValueError(
"Lower and upper bounds much have a length equal to the problem size."
)
# population_size rows
# solution_size columns
# Each column in range of corresponding lower and upper bounds
return numpy.random.uniform(lower_bounds, upper_bounds, (population_size,
solution_size))
def _new_population_gsa(population, fitnesses, velocity_matrix, lower_bounds,
upper_bounds, grav_initial, grav_reduction_rate,
iteration, max_iterations):
"""Generate a new population as given by GSA algorithm.
In GSA paper, grav_initial is G_0
"""
# Make sure population is a numpy array
if not isinstance(population, numpy.ndarray):
population = numpy.array(population)
# Update the gravitational constant, and the best and worst of the population
# Calculate the mass and acceleration for each solution
# Update the velocity and position of each solution
population_size = population.shape[0]
solution_size = population.shape[1]
# In GSA paper, grav is G
grav = _next_grav(grav_initial, grav_reduction_rate, iteration,
max_iterations)
mass_vector = _get_masses(fitnesses)
# Get the force on each solution
# Only the best K solutions apply force
# K linearly decreases to 1
num_best = int(population_size - (population_size - 1) *
(iteration / float(max_iterations)))
force_matrix = _get_force_matrix(grav, population, mass_vector, num_best)
# Get the acceleration of each solution
# By dividing each force vector by corresponding mass
acceleration_matrix = force_matrix / mass_vector.reshape(force_matrix.shape[0], 1)
# Update the velocity of each solution
# The GSA algorithm specifies that the new velocity for each dimension
# is a sum of a random fraction of its current velocity in that dimension,
# and its acceleration in that dimension
new_velocity_matrix = numpy.random.random(
velocity_matrix.shape) * velocity_matrix + acceleration_matrix
# Create the new population
new_population = numpy.clip(
# Move each position by its velocity vector
population + new_velocity_matrix,
# Clip to constrain to bounds
lower_bounds, upper_bounds)
return new_population, new_velocity_matrix
def _next_grav(grav_initial, grav_reduction_rate, iteration, max_iterations):
"""Calculate G as given by GSA algorithm.
In GSA paper, grav is G
"""
return grav_initial * math.exp(
-grav_reduction_rate * iteration / float(max_iterations))
def _get_masses(fitnesses):
"""Convert fitnesses into masses, as given by GSA algorithm."""
# Make sure fitnesses is a numpy array
if not isinstance(fitnesses, numpy.ndarray):
fitnesses = numpy.array(fitnesses)
# Obtain constants
best_fitness = numpy.max(fitnesses)
worst_fitness = numpy.min(fitnesses)
fitness_range = best_fitness - worst_fitness
# Calculate raw masses for each solution
# By scaling each fitness to a positive value
masses = (fitnesses - worst_fitness) / (fitness_range + EPSILON) + EPSILON
# Normalize to a sum of 1 to obtain final mass for each solution
masses /= numpy.sum(masses)
return masses
def _get_force_matrix(grav, position_matrix, mass_vector, num_best):
"""Gives the force of solution j on solution i.
num_rows(position_matrix) == num_elements(mass_vector)
Each element in mass_vector corresponds to a row in position_matrix.
args:
grav: The gravitational constant. (G)
position_matrix: Each row is a parameter vector,
a.k.a. the position of a body body.
masses: Each element is the mass of corresponding
parameter vector (row) in position_matrix
num_best: How many bodies to apply their force
to each other body
returns:
numpy.array; Matrix of total force on each body.
Each row is a force vector corresponding
to corresponding row / body in position_matrix.
"""
# TODO: Refactor to avoid calculating per position vector
# Get index of num_best highest masses (corresponds to rows in population)
k_best_indices = numpy.argpartition(mass_vector, -num_best)[-num_best:]
# The GSA algorithm specifies that the total force in each dimension
# is a random sum of the individual forces in that dimension.
force_matrix = []
for mass, position_vector in zip(mass_vector, position_matrix):
# NOTE: We can ignore position_vector being in k_best because
# difference will just be 0 vector
diff_matrix = position_matrix[k_best_indices] - position_vector
force_matrix.append(
# Scale result by gravity constant
grav *
# Add force vector applied to this body by each other body
numpy.sum(
# Multiply each force scalar by a random number
# in range [0, 1)
numpy.random.random(diff_matrix.shape) *
# Multiply each position difference vector by
# product of corresponding masses
((mass_vector[k_best_indices] * mass) / (
# divided by distance
# (add EPSILON to prevent divide by 0)
numpy.linalg.norm(diff_matrix, ord=2) + EPSILON
)).reshape(diff_matrix.shape[0], 1) *
# All multiplied by matrix of position difference vectors,
# giving direction of force vectors
diff_matrix,
# Sum along each force vector
axis=0))
force_matrix = numpy.array(force_matrix)
return force_matrix
|
|
# (C) Datadog, Inc. 2010-2016
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
"""kubernetes check
Collects metrics from cAdvisor instance
"""
# stdlib
from collections import defaultdict
from fnmatch import fnmatch
import numbers
import re
import time
import calendar
# 3rd party
import requests
import simplejson as json
# project
from checks import AgentCheck
from config import _is_affirmative
from utils.kubernetes import KubeUtil
NAMESPACE = "kubernetes"
DEFAULT_MAX_DEPTH = 10
DEFAULT_USE_HISTOGRAM = False
DEFAULT_PUBLISH_ALIASES = False
DEFAULT_ENABLED_RATES = [
'diskio.io_service_bytes.stats.total',
'network.??_bytes',
'cpu.*.total']
DEFAULT_COLLECT_EVENTS = False
NET_ERRORS = ['rx_errors', 'tx_errors', 'rx_dropped', 'tx_dropped']
DEFAULT_ENABLED_GAUGES = [
'memory.usage',
'filesystem.usage']
GAUGE = AgentCheck.gauge
RATE = AgentCheck.rate
HISTORATE = AgentCheck.generate_historate_func(["container_name"])
HISTO = AgentCheck.generate_histogram_func(["container_name"])
FUNC_MAP = {
GAUGE: {True: HISTO, False: GAUGE},
RATE: {True: HISTORATE, False: RATE}
}
EVENT_TYPE = 'kubernetes'
# Suffixes per
# https://github.com/kubernetes/kubernetes/blob/8fd414537b5143ab039cb910590237cabf4af783/pkg/api/resource/suffix.go#L108
FACTORS = {
'n': float(1)/(1000*1000*1000),
'u': float(1)/(1000*1000),
'm': float(1)/1000,
'k': 1000,
'M': 1000*1000,
'G': 1000*1000*1000,
'T': 1000*1000*1000*1000,
'P': 1000*1000*1000*1000*1000,
'E': 1000*1000*1000*1000*1000*1000,
'Ki': 1024,
'Mi': 1024*1024,
'Gi': 1024*1024*1024,
'Ti': 1024*1024*1024*1024,
'Pi': 1024*1024*1024*1024*1024,
'Ei': 1024*1024*1024*1024*1024*1024,
}
QUANTITY_EXP = re.compile(r'[-+]?\d+[\.]?\d*[numkMGTPE]?i?')
class Kubernetes(AgentCheck):
""" Collect metrics and events from kubelet """
pod_names_by_container = {}
def __init__(self, name, init_config, agentConfig, instances=None):
if instances is not None and len(instances) > 1:
raise Exception('Kubernetes check only supports one configured instance.')
AgentCheck.__init__(self, name, init_config, agentConfig, instances)
inst = instances[0] if instances is not None else None
self.kubeutil = KubeUtil(instance=inst)
if not self.kubeutil.host:
raise Exception('Unable to retrieve Docker hostname and host parameter is not set')
def _perform_kubelet_checks(self, url):
service_check_base = NAMESPACE + '.kubelet.check'
is_ok = True
try:
r = requests.get(url, params={'verbose': True})
for line in r.iter_lines():
# avoid noise; this check is expected to fail since we override the container hostname
if line.find('hostname') != -1:
continue
matches = re.match('\[(.)\]([^\s]+) (.*)?', line)
if not matches or len(matches.groups()) < 2:
continue
service_check_name = service_check_base + '.' + matches.group(2)
status = matches.group(1)
if status == '+':
self.service_check(service_check_name, AgentCheck.OK)
else:
self.service_check(service_check_name, AgentCheck.CRITICAL)
is_ok = False
except Exception as e:
self.log.warning('kubelet check %s failed: %s' % (url, str(e)))
self.service_check(service_check_base, AgentCheck.CRITICAL,
message='Kubelet check %s failed: %s' % (url, str(e)))
else:
if is_ok:
self.service_check(service_check_base, AgentCheck.OK)
else:
self.service_check(service_check_base, AgentCheck.CRITICAL)
def check(self, instance):
self.max_depth = instance.get('max_depth', DEFAULT_MAX_DEPTH)
enabled_gauges = instance.get('enabled_gauges', DEFAULT_ENABLED_GAUGES)
self.enabled_gauges = ["{0}.{1}".format(NAMESPACE, x) for x in enabled_gauges]
enabled_rates = instance.get('enabled_rates', DEFAULT_ENABLED_RATES)
self.enabled_rates = ["{0}.{1}".format(NAMESPACE, x) for x in enabled_rates]
self.publish_aliases = _is_affirmative(instance.get('publish_aliases', DEFAULT_PUBLISH_ALIASES))
self.use_histogram = _is_affirmative(instance.get('use_histogram', DEFAULT_USE_HISTOGRAM))
self.publish_rate = FUNC_MAP[RATE][self.use_histogram]
self.publish_gauge = FUNC_MAP[GAUGE][self.use_histogram]
# initialized by _filter_containers
self._filtered_containers = set()
pods_list = self.kubeutil.retrieve_pods_list()
# kubelet health checks
self._perform_kubelet_checks(self.kubeutil.kube_health_url)
# kubelet metrics
self._update_metrics(instance, pods_list)
# kubelet events
if _is_affirmative(instance.get('collect_events', DEFAULT_COLLECT_EVENTS)):
self._process_events(instance, pods_list)
def _publish_raw_metrics(self, metric, dat, tags, depth=0):
if depth >= self.max_depth:
self.log.warning('Reached max depth on metric=%s' % metric)
return
if isinstance(dat, numbers.Number):
if self.enabled_rates and any([fnmatch(metric, pat) for pat in self.enabled_rates]):
self.publish_rate(self, metric, float(dat), tags)
elif self.enabled_gauges and any([fnmatch(metric, pat) for pat in self.enabled_gauges]):
self.publish_gauge(self, metric, float(dat), tags)
elif isinstance(dat, dict):
for k, v in dat.iteritems():
self._publish_raw_metrics(metric + '.%s' % k.lower(), v, tags, depth + 1)
elif isinstance(dat, list):
self._publish_raw_metrics(metric, dat[-1], tags, depth + 1)
@staticmethod
def _shorten_name(name):
# shorten docker image id
return re.sub('([0-9a-fA-F]{64,})', lambda x: x.group(1)[0:12], name)
def _get_post_1_2_tags(self, cont_labels, subcontainer, kube_labels):
tags = []
pod_name = cont_labels[KubeUtil.POD_NAME_LABEL]
pod_namespace = cont_labels[KubeUtil.NAMESPACE_LABEL]
tags.append(u"pod_name:{0}/{1}".format(pod_namespace, pod_name))
tags.append(u"kube_namespace:{0}".format(pod_namespace))
kube_labels_key = "{0}/{1}".format(pod_namespace, pod_name)
pod_labels = kube_labels.get(kube_labels_key)
if pod_labels:
tags += list(pod_labels)
if "-" in pod_name:
replication_controller = "-".join(pod_name.split("-")[:-1])
tags.append("kube_replication_controller:%s" % replication_controller)
if self.publish_aliases and subcontainer.get("aliases"):
for alias in subcontainer['aliases'][1:]:
# we don't add the first alias as it will be the container_name
tags.append('container_alias:%s' % (self._shorten_name(alias)))
return tags
def _get_pre_1_2_tags(self, cont_labels, subcontainer, kube_labels):
tags = []
pod_name = cont_labels[KubeUtil.POD_NAME_LABEL]
tags.append(u"pod_name:{0}".format(pod_name))
pod_labels = kube_labels.get(pod_name)
if pod_labels:
tags.extend(list(pod_labels))
if "-" in pod_name:
replication_controller = "-".join(pod_name.split("-")[:-1])
if "/" in replication_controller:
namespace, replication_controller = replication_controller.split("/", 1)
tags.append(u"kube_namespace:%s" % namespace)
tags.append(u"kube_replication_controller:%s" % replication_controller)
if self.publish_aliases and subcontainer.get("aliases"):
for alias in subcontainer['aliases'][1:]:
# we don't add the first alias as it will be the container_name
tags.append(u"container_alias:%s" % (self._shorten_name(alias)))
return tags
def _update_container_metrics(self, instance, subcontainer, kube_labels):
"""Publish metrics for a subcontainer and handle filtering on tags"""
tags = list(instance.get('tags', [])) # add support for custom tags
if len(subcontainer.get('aliases', [])) >= 1:
# The first alias seems to always match the docker container name
container_name = subcontainer['aliases'][0]
else:
# We default to the container id
container_name = subcontainer['name']
tags.append('container_name:%s' % container_name)
container_image = subcontainer['spec'].get('image')
if container_image:
tags.append('container_image:%s' % container_image)
try:
cont_labels = subcontainer['spec']['labels']
except KeyError:
self.log.debug("Subcontainer, doesn't have any labels")
cont_labels = {}
# Collect pod names, namespaces, rc...
if KubeUtil.NAMESPACE_LABEL in cont_labels and KubeUtil.POD_NAME_LABEL in cont_labels:
# Kubernetes >= 1.2
tags += self._get_post_1_2_tags(cont_labels, subcontainer, kube_labels)
elif KubeUtil.POD_NAME_LABEL in cont_labels:
# Kubernetes <= 1.1
tags += self._get_pre_1_2_tags(cont_labels, subcontainer, kube_labels)
else:
# Those are containers that are not part of a pod.
# They are top aggregate views and don't have the previous metadata.
tags.append("pod_name:no_pod")
# if the container should be filtered we return its tags without publishing its metrics
is_filtered = self.kubeutil.are_tags_filtered(tags)
if is_filtered:
self._filtered_containers.add(subcontainer['id'])
return tags
stats = subcontainer['stats'][-1] # take the latest
self._publish_raw_metrics(NAMESPACE, stats, tags)
if subcontainer.get("spec", {}).get("has_filesystem"):
fs = stats['filesystem'][-1]
fs_utilization = float(fs['usage'])/float(fs['capacity'])
self.publish_gauge(self, NAMESPACE + '.filesystem.usage_pct', fs_utilization, tags)
if subcontainer.get("spec", {}).get("has_network"):
net = stats['network']
self.publish_rate(self, NAMESPACE + '.network_errors',
sum(float(net[x]) for x in NET_ERRORS),
tags)
return tags
def _update_metrics(self, instance, pods_list):
def parse_quantity(s):
number = ''
unit = ''
for c in s:
if c.isdigit() or c == '.':
number += c
else:
unit += c
return float(number) * FACTORS.get(unit, 1)
metrics = self.kubeutil.retrieve_metrics()
excluded_labels = instance.get('excluded_labels')
kube_labels = self.kubeutil.extract_kube_labels(pods_list, excluded_keys=excluded_labels)
if not metrics:
raise Exception('No metrics retrieved cmd=%s' % self.metrics_cmd)
# container metrics from Cadvisor
container_tags = {}
for subcontainer in metrics:
c_id = subcontainer.get('id')
try:
tags = self._update_container_metrics(instance, subcontainer, kube_labels)
if c_id:
container_tags[c_id] = tags
# also store tags for aliases
for alias in subcontainer.get('aliases', []):
container_tags[alias] = tags
except Exception, e:
self.log.error("Unable to collect metrics for container: {0} ({1}".format(c_id, e))
# container metrics from kubernetes API: limits and requests
for pod in pods_list['items']:
try:
containers = pod['spec']['containers']
name2id = {}
for cs in pod['status'].get('containerStatuses', []):
c_id = cs.get('containerID', '').split('//')[-1]
name = cs.get('name')
if name:
name2id[name] = c_id
except KeyError:
self.log.debug("Pod %s does not have containers specs, skipping...", pod['metadata'].get('name'))
continue
for container in containers:
c_name = container.get('name')
c_id = name2id.get(c_name)
if c_id in self._filtered_containers:
self.log.debug('Container {} is excluded'.format(c_name))
continue
_tags = container_tags.get(c_id, [])
# limits
try:
for limit, value_str in container['resources']['limits'].iteritems():
values = [parse_quantity(s) for s in QUANTITY_EXP.findall(value_str)]
if len(values) != 1:
self.log.warning("Error parsing limits value string: %s", value_str)
continue
self.publish_gauge(self, '{}.{}.limits'.format(NAMESPACE, limit), values[0], _tags)
except (KeyError, AttributeError) as e:
self.log.debug("Unable to retrieve container limits for %s: %s", c_name, e)
self.log.debug("Container object for {}: {}".format(c_name, container))
# requests
try:
for request, value_str in container['resources']['requests'].iteritems():
values = [parse_quantity(s) for s in QUANTITY_EXP.findall(value_str)]
if len(values) != 1:
self.log.warning("Error parsing requests value string: %s", value_str)
continue
self.publish_gauge(self, '{}.{}.requests'.format(NAMESPACE, request), values[0], _tags)
except (KeyError, AttributeError) as e:
self.log.error("Unable to retrieve container requests for %s: %s", c_name, e)
self.log.debug("Container object for {}: {}".format(c_name, container))
self._update_pods_metrics(instance, pods_list)
self._update_node(instance)
def _update_node(self, instance):
machine_info = self.kubeutil.retrieve_machine_info()
num_cores = machine_info.get('num_cores', 0)
memory_capacity = machine_info.get('memory_capacity', 0)
tags = instance.get('tags', [])
self.publish_gauge(self, NAMESPACE + '.cpu.capacity', float(num_cores), tags)
self.publish_gauge(self, NAMESPACE + '.memory.capacity', float(memory_capacity), tags)
# TODO(markine): Report 'allocatable' which is capacity minus capacity
# reserved for system/Kubernetes.
def _update_pods_metrics(self, instance, pods):
supported_kinds = [
"DaemonSet",
"Deployment",
"Job",
"ReplicationController",
"ReplicaSet",
]
# (create-by, namespace): count
controllers_map = defaultdict(int)
for pod in pods['items']:
try:
created_by = json.loads(pod['metadata']['annotations']['kubernetes.io/created-by'])
kind = created_by['reference']['kind']
if kind in supported_kinds:
namespace = created_by['reference']['namespace']
controllers_map[(created_by['reference']['name'], namespace)] += 1
except (KeyError, ValueError) as e:
self.log.debug("Unable to retrieve pod kind for pod %s: %s", pod, e)
continue
tags = instance.get('tags', [])
for (ctrl, namespace), pod_count in controllers_map.iteritems():
_tags = tags[:] # copy base tags
_tags.append('kube_replication_controller:{0}'.format(ctrl))
_tags.append('kube_namespace:{0}'.format(namespace))
self.publish_gauge(self, NAMESPACE + '.pods.running', pod_count, _tags)
def _process_events(self, instance, pods_list):
"""
Retrieve a list of events from the kubernetes API.
At the moment (k8s v1.3) there is no support to select events based on a timestamp query, so we
go through the whole list every time. This should be fine for now as events
have a TTL of one hour[1] but logic needs to improve as soon as they provide
query capabilities or at least pagination, see [2][3].
[1] https://github.com/kubernetes/kubernetes/blob/release-1.3.0/cmd/kube-apiserver/app/options/options.go#L51
[2] https://github.com/kubernetes/kubernetes/issues/4432
[3] https://github.com/kubernetes/kubernetes/issues/1362
"""
node_ip, node_name = self.kubeutil.get_node_info()
self.log.debug('Processing events on {} [{}]'.format(node_name, node_ip))
k8s_namespace = instance.get('namespace', 'default')
events_endpoint = '{}/namespaces/{}/events'.format(self.kubeutil.kubernetes_api_url, k8s_namespace)
self.log.debug('Kubernetes API endpoint to query events: %s' % events_endpoint)
events = self.kubeutil.retrieve_json_auth(events_endpoint, self.kubeutil.get_auth_token())
event_items = events.get('items') or []
last_read = self.kubeutil.last_event_collection_ts[k8s_namespace]
most_recent_read = 0
self.log.debug('Found {} events, filtering out using timestamp: {}'.format(len(event_items), last_read))
for event in event_items:
# skip if the event is too old
event_ts = calendar.timegm(time.strptime(event.get('lastTimestamp'), '%Y-%m-%dT%H:%M:%SZ'))
if event_ts <= last_read:
continue
involved_obj = event.get('involvedObject', {})
tags = self.kubeutil.extract_event_tags(event)
# compute the most recently seen event, without relying on items order
if event_ts > most_recent_read:
most_recent_read = event_ts
title = '{} {} on {}'.format(involved_obj.get('name'), event.get('reason'), node_name)
message = event.get('message')
source = event.get('source')
if source:
message += '\nSource: {} {}\n'.format(source.get('component', ''), source.get('host', ''))
msg_body = "%%%\n{}\n```\n{}\n```\n%%%".format(title, message)
dd_event = {
'timestamp': event_ts,
'host': node_ip,
'event_type': EVENT_TYPE,
'msg_title': title,
'msg_text': msg_body,
'source_type_name': EVENT_TYPE,
'event_object': 'kubernetes:{}'.format(involved_obj.get('name')),
'tags': tags,
}
self.event(dd_event)
if most_recent_read > 0:
self.kubeutil.last_event_collection_ts[k8s_namespace] = most_recent_read
self.log.debug('_last_event_collection_ts is now {}'.format(most_recent_read))
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import operator
import math
class graph(object):
def __init__(self, nodes, transitions, no_ancester=None):
"""Initialize graph's object
@param nodes list of ids of nodes in the graph
@param transitions list of edges in the graph in the form (source_node, destination_node)
@param no_ancester list of nodes with no incoming edges
"""
self.nodes = nodes or []
self.edges = transitions or []
self.no_ancester = no_ancester or {}
trans = {}
for t in transitions:
trans.setdefault(t[0], [])
trans[t[0]].append(t[1])
self.transitions = trans
self.result = {}
def init_rank(self):
"""Computes rank of the nodes of the graph by finding initial feasible tree
"""
self.edge_wt = {}
for link in self.links:
self.edge_wt[link] = self.result[link[1]]['x'] - self.result[link[0]]['x']
tot_node = len(self.partial_order)
#do until all the nodes in the component are searched
while self.tight_tree()<tot_node:
list_node = []
list_edge = []
for node in self.nodes:
if node not in self.reachable_nodes:
list_node.append(node)
for edge in self.edge_wt:
if edge not in self.tree_edges:
list_edge.append(edge)
slack = 100
for edge in list_edge:
if ((edge[0] in self.reachable_nodes and edge[1] not in self.reachable_nodes) or
(edge[1] in self.reachable_nodes and edge[0] not in self.reachable_nodes)):
if slack > self.edge_wt[edge]-1:
slack = self.edge_wt[edge]-1
new_edge = edge
if new_edge[0] not in self.reachable_nodes:
delta = -(self.edge_wt[new_edge]-1)
else:
delta = self.edge_wt[new_edge]-1
for node in self.result:
if node in self.reachable_nodes:
self.result[node]['x'] += delta
for edge in self.edge_wt:
self.edge_wt[edge] = self.result[edge[1]]['x'] - self.result[edge[0]]['x']
self.init_cutvalues()
def tight_tree(self):
self.reachable_nodes = []
self.tree_edges = []
self.reachable_node(self.start)
return len(self.reachable_nodes)
def reachable_node(self, node):
"""Find the nodes of the graph which are only 1 rank apart from each other
"""
if node not in self.reachable_nodes:
self.reachable_nodes.append(node)
for edge in self.edge_wt:
if edge[0]==node:
if self.edge_wt[edge]==1:
self.tree_edges.append(edge)
if edge[1] not in self.reachable_nodes:
self.reachable_nodes.append(edge[1])
self.reachable_node(edge[1])
def init_cutvalues(self):
"""Initailize cut values of edges of the feasible tree.
Edges with negative cut-values are removed from the tree to optimize rank assignment
"""
self.cut_edges = {}
self.head_nodes = []
i=0
for edge in self.tree_edges:
self.head_nodes = []
rest_edges = []
rest_edges += self.tree_edges
del rest_edges[i]
self.head_component(self.start, rest_edges)
i+=1
positive = 0
negative = 0
for source_node in self.transitions:
if source_node in self.head_nodes:
for dest_node in self.transitions[source_node]:
if dest_node not in self.head_nodes:
negative+=1
else:
for dest_node in self.transitions[source_node]:
if dest_node in self.head_nodes:
positive+=1
self.cut_edges[edge] = positive - negative
def head_component(self, node, rest_edges):
"""Find nodes which are reachable from the starting node, after removing an edge
"""
if node not in self.head_nodes:
self.head_nodes.append(node)
for edge in rest_edges:
if edge[0]==node:
self.head_component(edge[1],rest_edges)
def process_ranking(self, node, level=0):
"""Computes initial feasible ranking after making graph acyclic with depth-first search
"""
if node not in self.result:
self.result[node] = {'y': None, 'x':level, 'mark':0}
else:
if level > self.result[node]['x']:
self.result[node]['x'] = level
if self.result[node]['mark']==0:
self.result[node]['mark'] = 1
for sec_end in self.transitions.get(node, []):
self.process_ranking(sec_end, level+1)
def make_acyclic(self, parent, node, level, tree):
"""Computes Partial-order of the nodes with depth-first search
"""
if node not in self.partial_order:
self.partial_order[node] = {'level':level, 'mark':0}
if parent:
tree.append((parent, node))
if self.partial_order[node]['mark']==0:
self.partial_order[node]['mark'] = 1
for sec_end in self.transitions.get(node, []):
self.links.append((node, sec_end))
self.make_acyclic(node, sec_end, level+1, tree)
return tree
def rev_edges(self, tree):
"""reverse the direction of the edges whose source-node-partail_order> destination-node-partail_order
to make the graph acyclic
"""
Is_Cyclic = False
i=0
for link in self.links:
src = link[0]
des = link[1]
edge_len = self.partial_order[des]['level'] - self.partial_order[src]['level']
if edge_len < 0:
del self.links[i]
self.links.insert(i, (des, src))
self.transitions[src].remove(des)
self.transitions.setdefault(des, []).append(src)
Is_Cyclic = True
elif math.fabs(edge_len) > 1:
Is_Cyclic = True
i += 1
return Is_Cyclic
def exchange(self, e, f):
"""Exchange edges to make feasible-tree optimized
:param e: edge with negative cut-value
:param f: new edge with minimum slack-value
"""
del self.tree_edges[self.tree_edges.index(e)]
self.tree_edges.append(f)
self.init_cutvalues()
def enter_edge(self, edge):
"""Finds a new_edge with minimum slack value to replace an edge with negative cut-value
@param edge edge with negative cut-value
"""
self.head_nodes = []
rest_edges = []
rest_edges += self.tree_edges
del rest_edges[rest_edges.index(edge)]
self.head_component(self.start, rest_edges)
if edge[1] in self.head_nodes:
l = []
for node in self.result:
if node not in self.head_nodes:
l.append(node)
self.head_nodes = l
slack = 100
new_edge = edge
for source_node in self.transitions:
if source_node in self.head_nodes:
for dest_node in self.transitions[source_node]:
if dest_node not in self.head_nodes:
if slack>(self.edge_wt[edge]-1):
slack = self.edge_wt[edge]-1
new_edge = (source_node, dest_node)
return new_edge
def leave_edge(self):
"""Returns the edge with negative cut_value(if exists)
"""
if self.critical_edges:
for edge in self.critical_edges:
self.cut_edges[edge] = 0
for edge in self.cut_edges:
if self.cut_edges[edge]<0:
return edge
return None
def finalize_rank(self, node, level):
self.result[node]['x'] = level
for destination in self.optimal_edges.get(node, []):
self.finalize_rank(destination, level+1)
def normalize(self):
"""The ranks are normalized by setting the least rank to zero.
"""
least_rank = min(map(lambda x: x['x'], self.result.values()))
if least_rank!=0:
for node in self.result:
self.result[node]['x']-=least_rank
def make_chain(self):
"""Edges between nodes more than one rank apart are replaced by chains of unit
length edges between temporary nodes.
"""
for edge in self.edge_wt:
if self.edge_wt[edge]>1:
self.transitions[edge[0]].remove(edge[1])
start = self.result[edge[0]]['x']
end = self.result[edge[1]]['x']
for rank in range(start+1, end):
if not self.result.get((rank, 'temp'), False):
self.result[(rank, 'temp')] = {'y': None, 'x': rank, 'mark': 0}
for rank in range(start, end):
if start==rank:
self.transitions[edge[0]].append((rank+1, 'temp'))
elif rank==end-1:
self.transitions.setdefault((rank, 'temp'), []).append(edge[1])
else:
self.transitions.setdefault((rank, 'temp'), []).append((rank+1, 'temp'))
def init_order(self, node, level):
"""Initialize orders the nodes in each rank with depth-first search
"""
if not self.result[node]['y']:
self.result[node]['y'] = self.order[level]
self.order[level] += 1
for sec_end in self.transitions.get(node, []):
if node!=sec_end:
self.init_order(sec_end, self.result[sec_end]['x'])
def order_heuristic(self):
for i in range(12):
self.wmedian()
def wmedian(self):
"""Applies median heuristic to find optimzed order of the nodes with in their ranks
"""
for level in self.levels:
node_median = []
nodes = self.levels[level]
for node in nodes:
node_median.append((node, self.median_value(node, level-1)))
sort_list = sorted(node_median, key=operator.itemgetter(1))
new_list = [tuple[0] for tuple in sort_list]
self.levels[level] = new_list
order = 0
for node in nodes:
self.result[node]['y'] = order
order +=1
def median_value(self, node, adj_rank):
"""Returns median value of a vertex , defined as the median position of the adjacent vertices
@param node node to process
@param adj_rank rank 1 less than the node's rank
"""
adj_nodes = self.adj_position(node, adj_rank)
l = len(adj_nodes)
m = l/2
if l==0:
return -1.0
elif l%2 == 1:
return adj_nodes[m]#median of the middle element
elif l==2:
return (adj_nodes[0]+adj_nodes[1])/2
else:
left = adj_nodes[m-1] - adj_nodes[0]
right = adj_nodes[l-1] - adj_nodes[m]
return ((adj_nodes[m-1]*right) + (adj_nodes[m]*left))/(left+right)
def adj_position(self, node, adj_rank):
"""Returns list of the present positions of the nodes adjacent to node in the given adjacent rank.
@param node node to process
@param adj_rank rank 1 less than the node's rank
"""
pre_level_nodes = self.levels.get(adj_rank, [])
adj_nodes = []
if pre_level_nodes:
for src in pre_level_nodes:
if self.transitions.get(src) and node in self.transitions[src]:
adj_nodes.append(self.result[src]['y'])
return adj_nodes
def preprocess_order(self):
levels = {}
for r in self.partial_order:
l = self.result[r]['x']
levels.setdefault(l,[])
levels[l].append(r)
self.levels = levels
def graph_order(self):
"""Finds actual-order of the nodes with respect to maximum number of nodes in a rank in component
"""
mid_pos = 0.0
max_level = max(map(lambda x: len(x), self.levels.values()))
for level in self.levels:
if level:
no = len(self.levels[level])
factor = (max_level - no) * 0.10
list = self.levels[level]
list.reverse()
if no%2==0:
first_half = list[no/2:]
factor = -factor
else:
first_half = list[no/2+1:]
if max_level==1:#for the case when horizontal graph is there
self.result[list[no/2]]['y'] = mid_pos + (self.result[list[no/2]]['x']%2 * 0.5)
else:
self.result[list[no/2]]['y'] = mid_pos + factor
last_half = list[:no/2]
i=1
for node in first_half:
self.result[node]['y'] = mid_pos - (i + factor)
i += 1
i=1
for node in last_half:
self.result[node]['y'] = mid_pos + (i + factor)
i += 1
else:
self.max_order += max_level+1
mid_pos = self.result[self.start]['y']
def tree_order(self, node, last=0):
mid_pos = self.result[node]['y']
l = self.transitions.get(node, [])
l.reverse()
no = len(l)
rest = no%2
first_half = l[no/2+rest:]
last_half = l[:no/2]
for i, child in enumerate(first_half):
self.result[child]['y'] = mid_pos - (i+1 - (0 if rest else 0.5))
if self.transitions.get(child, False):
if last:
self.result[child]['y'] = last + len(self.transitions[child])/2 + 1
last = self.tree_order(child, last)
if rest:
mid_node = l[no/2]
self.result[mid_node]['y'] = mid_pos
if self.transitions.get(mid_node, False):
if last:
self.result[mid_node]['y'] = last + len(self.transitions[mid_node])/2 + 1
if node!=mid_node:
last = self.tree_order(mid_node)
else:
if last:
self.result[mid_node]['y'] = last + 1
self.result[node]['y'] = self.result[mid_node]['y']
mid_pos = self.result[node]['y']
i=1
last_child = None
for child in last_half:
self.result[child]['y'] = mid_pos + (i - (0 if rest else 0.5))
last_child = child
i += 1
if self.transitions.get(child, False):
if last:
self.result[child]['y'] = last + len(self.transitions[child])/2 + 1
if node!=child:
last = self.tree_order(child, last)
if last_child:
last = self.result[last_child]['y']
return last
def process_order(self):
"""Finds actual-order of the nodes with respect to maximum number of nodes in a rank in component
"""
if self.Is_Cyclic:
max_level = max(map(lambda x: len(x), self.levels.values()))
if max_level%2:
self.result[self.start]['y'] = (max_level+1)/2 + self.max_order + (self.max_order and 1)
else:
self.result[self.start]['y'] = max_level /2 + self.max_order + (self.max_order and 1)
self.graph_order()
else:
self.result[self.start]['y'] = 0
self.tree_order(self.start, 0)
min_order = math.fabs(min(map(lambda x: x['y'], self.result.values())))
index = self.start_nodes.index(self.start)
same = False
roots = []
if index>0:
for start in self.start_nodes[:index]:
same = True
for edge in self.tree_list[start][1:]:
if edge in self.tree_list[self.start]:
continue
else:
same = False
break
if same:
roots.append(start)
if roots:
min_order += self.max_order
else:
min_order += self.max_order + 1
for level in self.levels:
for node in self.levels[level]:
self.result[node]['y'] += min_order
if roots:
roots.append(self.start)
one_level_el = self.tree_list[self.start][0][1]
base = self.result[one_level_el]['y']# * 2 / (index + 2)
no = len(roots)
first_half = roots[:no/2]
if no%2==0:
last_half = roots[no/2:]
else:
last_half = roots[no/2+1:]
factor = -math.floor(no/2)
for start in first_half:
self.result[start]['y'] = base + factor
factor += 1
if no%2:
self.result[roots[no/2]]['y'] = base + factor
factor +=1
for start in last_half:
self.result[start]['y'] = base + factor
factor += 1
self.max_order = max(map(lambda x: x['y'], self.result.values()))
def find_starts(self):
"""Finds other start nodes of the graph in the case when graph is disconneted
"""
rem_nodes = []
for node in self.nodes:
if not self.partial_order.get(node):
rem_nodes.append(node)
cnt = 0
while True:
if len(rem_nodes)==1:
self.start_nodes.append(rem_nodes[0])
break
else:
count = 0
new_start = rem_nodes[0]
largest_tree = []
for node in rem_nodes:
self.partial_order = {}
tree = self.make_acyclic(None, node, 0, [])
if len(tree)+1 > count:
count = len(tree) + 1
new_start = node
largest_tree = tree
else:
if not largest_tree:
new_start = rem_nodes[0]
rem_nodes.remove(new_start)
self.start_nodes.append(new_start)
for edge in largest_tree:
if edge[0] in rem_nodes:
rem_nodes.remove(edge[0])
if edge[1] in rem_nodes:
rem_nodes.remove(edge[1])
if not rem_nodes:
break
def rank(self):
"""Finds the optimized rank of the nodes using Network-simplex algorithm
"""
self.levels = {}
self.critical_edges = []
self.partial_order = {}
self.links = []
self.Is_Cyclic = False
self.tree_list[self.start] = self.make_acyclic(None, self.start, 0, [])
self.Is_Cyclic = self.rev_edges(self.tree_list[self.start])
self.process_ranking(self.start)
self.init_rank()
#make cut values of all tree edges to 0 to optimize feasible tree
e = self.leave_edge()
while e :
f = self.enter_edge(e)
if e==f:
self.critical_edges.append(e)
else:
self.exchange(e,f)
e = self.leave_edge()
#finalize rank using optimum feasible tree
# self.optimal_edges = {}
# for edge in self.tree_edges:
# source = self.optimal_edges.setdefault(edge[0], [])
# source.append(edge[1])
# self.finalize_rank(self.start, 0)
#normalization
self.normalize()
for edge in self.edge_wt:
self.edge_wt[edge] = self.result[edge[1]]['x'] - self.result[edge[0]]['x']
def order_in_rank(self):
"""Finds optimized order of the nodes within their ranks using median heuristic
"""
self.make_chain()
self.preprocess_order()
self.order = {}
max_rank = max(map(lambda x: x, self.levels.keys()))
for i in range(max_rank+1):
self.order[i] = 0
self.init_order(self.start, self.result[self.start]['x'])
for level in self.levels:
self.levels[level].sort(lambda x, y: cmp(self.result[x]['y'], self.result[y]['y']))
self.order_heuristic()
self.process_order()
def process(self, starting_node):
"""Process the graph to find ranks and order of the nodes
@param starting_node node from where to start the graph search
"""
self.start_nodes = starting_node or []
self.partial_order = {}
self.links = []
self.tree_list = {}
if self.nodes:
if self.start_nodes:
#add dummy edges to the nodes which does not have any incoming edges
tree = self.make_acyclic(None, self.start_nodes[0], 0, [])
for node in self.no_ancester:
for sec_node in self.transitions.get(node, []):
if sec_node in self.partial_order.keys():
self.transitions[self.start_nodes[0]].append(node)
break
self.partial_order = {}
tree = self.make_acyclic(None, self.start_nodes[0], 0, [])
# if graph is disconnected or no start-node is given
#than to find starting_node for each component of the node
if len(self.nodes) > len(self.partial_order):
self.find_starts()
self.max_order = 0
#for each component of the graph find ranks and order of the nodes
for s in self.start_nodes:
self.start = s
self.rank() # First step:Netwoek simplex algorithm
self.order_in_rank() #Second step: ordering nodes within ranks
def __str__(self):
result = ''
for l in self.levels:
result += 'PosY: ' + str(l) + '\n'
for node in self.levels[l]:
result += '\tPosX: '+ str(self.result[node]['y']) + ' - Node:' + str(node) + "\n"
return result
def scale(self, maxx, maxy, nwidth=0, nheight=0, margin=20):
"""Computes actual co-ordiantes of the nodes
"""
#for flat edges ie. source an destination nodes are on the same rank
for src in self.transitions:
for des in self.transitions[src]:
if self.result[des]['x'] - self.result[src]['x'] == 0:
self.result[src]['x'] += 0.08
self.result[des]['x'] -= 0.08
factorX = maxx + nheight
factorY = maxy + nwidth
for node in self.result:
self.result[node]['y'] = (self.result[node]['y']) * factorX + margin
self.result[node]['x'] = (self.result[node]['x']) * factorY + margin
def result_get(self):
return self.result
if __name__=='__main__':
starting_node = ['profile'] # put here nodes with flow_start=True
nodes = ['project','account','hr','base','product','mrp','test','profile']
transitions = [
('profile','mrp'),
('mrp','project'),
('project','product'),
('mrp','hr'),
('mrp','test'),
('project','account'),
('project','hr'),
('product','base'),
('account','product'),
('account','test'),
('account','base'),
('hr','base'),
('test','base')
]
radius = 20
g = graph(nodes, transitions)
g.process(starting_node)
g.scale(radius*3,radius*3, radius, radius)
from PIL import Image
from PIL import ImageDraw
img = Image.new("RGB", (800, 600), "#ffffff")
draw = ImageDraw.Draw(img)
result = g.result_get()
node_res = {}
for node in nodes:
node_res[node] = result[node]
for name,node in node_res.items():
draw.arc( (int(node['y']-radius), int(node['x']-radius),int(node['y']+radius), int(node['x']+radius) ), 0, 360, (128,128,128))
draw.text( (int(node['y']), int(node['x'])), str(name), (128,128,128))
for t in transitions:
draw.line( (int(node_res[t[0]]['y']), int(node_res[t[0]]['x']),int(node_res[t[1]]['y']),int(node_res[t[1]]['x'])),(128,128,128) )
img.save("graph.png", "PNG")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import uuid
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import uuidutils
import six
from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LI
from heat.engine import attributes
from heat.engine.clients import progress
from heat.engine import constraints
from heat.engine import function
from heat.engine import properties
from heat.engine import resource
from heat.engine.resources.openstack.neutron import subnet
from heat.engine.resources.openstack.nova import server_network_mixin
from heat.engine.resources import scheduler_hints as sh
from heat.engine.resources import stack_user
from heat.engine import support
from heat.rpc import api as rpc_api
cfg.CONF.import_opt('default_software_config_transport', 'heat.common.config')
LOG = logging.getLogger(__name__)
class Server(stack_user.StackUser, sh.SchedulerHintsMixin,
server_network_mixin.ServerNetworkMixin):
PROPERTIES = (
NAME, IMAGE, BLOCK_DEVICE_MAPPING, BLOCK_DEVICE_MAPPING_V2,
FLAVOR, FLAVOR_UPDATE_POLICY, IMAGE_UPDATE_POLICY, KEY_NAME,
ADMIN_USER, AVAILABILITY_ZONE, SECURITY_GROUPS, NETWORKS,
SCHEDULER_HINTS, METADATA, USER_DATA_FORMAT, USER_DATA,
RESERVATION_ID, CONFIG_DRIVE, DISK_CONFIG, PERSONALITY,
ADMIN_PASS, SOFTWARE_CONFIG_TRANSPORT
) = (
'name', 'image', 'block_device_mapping', 'block_device_mapping_v2',
'flavor', 'flavor_update_policy', 'image_update_policy', 'key_name',
'admin_user', 'availability_zone', 'security_groups', 'networks',
'scheduler_hints', 'metadata', 'user_data_format', 'user_data',
'reservation_id', 'config_drive', 'diskConfig', 'personality',
'admin_pass', 'software_config_transport'
)
_BLOCK_DEVICE_MAPPING_KEYS = (
BLOCK_DEVICE_MAPPING_DEVICE_NAME, BLOCK_DEVICE_MAPPING_VOLUME_ID,
BLOCK_DEVICE_MAPPING_SNAPSHOT_ID,
BLOCK_DEVICE_MAPPING_VOLUME_SIZE,
BLOCK_DEVICE_MAPPING_DELETE_ON_TERM,
) = (
'device_name', 'volume_id',
'snapshot_id',
'volume_size',
'delete_on_termination',
)
_BLOCK_DEVICE_MAPPING_V2_KEYS = (
BLOCK_DEVICE_MAPPING_DEVICE_NAME,
BLOCK_DEVICE_MAPPING_VOLUME_ID,
BLOCK_DEVICE_MAPPING_IMAGE_ID,
BLOCK_DEVICE_MAPPING_SNAPSHOT_ID,
BLOCK_DEVICE_MAPPING_SWAP_SIZE,
BLOCK_DEVICE_MAPPING_DEVICE_TYPE,
BLOCK_DEVICE_MAPPING_DISK_BUS,
BLOCK_DEVICE_MAPPING_BOOT_INDEX,
BLOCK_DEVICE_MAPPING_VOLUME_SIZE,
BLOCK_DEVICE_MAPPING_DELETE_ON_TERM,
) = (
'device_name',
'volume_id',
'image_id',
'snapshot_id',
'swap_size',
'device_type',
'disk_bus',
'boot_index',
'volume_size',
'delete_on_termination',
)
_NETWORK_KEYS = (
NETWORK_UUID, NETWORK_ID, NETWORK_FIXED_IP, NETWORK_PORT,
NETWORK_SUBNET
) = (
'uuid', 'network', 'fixed_ip', 'port',
'subnet'
)
_SOFTWARE_CONFIG_FORMATS = (
HEAT_CFNTOOLS, RAW, SOFTWARE_CONFIG
) = (
'HEAT_CFNTOOLS', 'RAW', 'SOFTWARE_CONFIG'
)
_SOFTWARE_CONFIG_TRANSPORTS = (
POLL_SERVER_CFN, POLL_SERVER_HEAT, POLL_TEMP_URL, ZAQAR_MESSAGE
) = (
'POLL_SERVER_CFN', 'POLL_SERVER_HEAT', 'POLL_TEMP_URL', 'ZAQAR_MESSAGE'
)
ATTRIBUTES = (
NAME_ATTR, ADDRESSES, NETWORKS_ATTR, FIRST_ADDRESS,
INSTANCE_NAME, ACCESSIPV4, ACCESSIPV6, CONSOLE_URLS,
) = (
'name', 'addresses', 'networks', 'first_address',
'instance_name', 'accessIPv4', 'accessIPv6', 'console_urls',
)
properties_schema = {
NAME: properties.Schema(
properties.Schema.STRING,
_('Server name.'),
update_allowed=True
),
IMAGE: properties.Schema(
properties.Schema.STRING,
_('The ID or name of the image to boot with.'),
constraints=[
constraints.CustomConstraint('glance.image')
],
update_allowed=True
),
BLOCK_DEVICE_MAPPING: properties.Schema(
properties.Schema.LIST,
_('Block device mappings for this server.'),
schema=properties.Schema(
properties.Schema.MAP,
schema={
BLOCK_DEVICE_MAPPING_DEVICE_NAME: properties.Schema(
properties.Schema.STRING,
_('A device name where the volume will be '
'attached in the system at /dev/device_name. '
'This value is typically vda.'),
required=True
),
BLOCK_DEVICE_MAPPING_VOLUME_ID: properties.Schema(
properties.Schema.STRING,
_('The ID of the volume to boot from. Only one '
'of volume_id or snapshot_id should be '
'provided.'),
constraints=[
constraints.CustomConstraint('cinder.volume')
]
),
BLOCK_DEVICE_MAPPING_SNAPSHOT_ID: properties.Schema(
properties.Schema.STRING,
_('The ID of the snapshot to create a volume '
'from.'),
constraints=[
constraints.CustomConstraint('cinder.snapshot')
]
),
BLOCK_DEVICE_MAPPING_VOLUME_SIZE: properties.Schema(
properties.Schema.INTEGER,
_('The size of the volume, in GB. It is safe to '
'leave this blank and have the Compute service '
'infer the size.')
),
BLOCK_DEVICE_MAPPING_DELETE_ON_TERM: properties.Schema(
properties.Schema.BOOLEAN,
_('Indicate whether the volume should be deleted '
'when the server is terminated.')
),
},
)
),
BLOCK_DEVICE_MAPPING_V2: properties.Schema(
properties.Schema.LIST,
_('Block device mappings v2 for this server.'),
schema=properties.Schema(
properties.Schema.MAP,
schema={
BLOCK_DEVICE_MAPPING_DEVICE_NAME: properties.Schema(
properties.Schema.STRING,
_('A device name where the volume will be '
'attached in the system at /dev/device_name. '
'This value is typically vda.'),
),
BLOCK_DEVICE_MAPPING_VOLUME_ID: properties.Schema(
properties.Schema.STRING,
_('The volume_id can be boot or non-boot device '
'to the server.'),
constraints=[
constraints.CustomConstraint('cinder.volume')
]
),
BLOCK_DEVICE_MAPPING_IMAGE_ID: properties.Schema(
properties.Schema.STRING,
_('The ID of the image to create a volume from.'),
constraints=[
constraints.CustomConstraint('glance.image')
],
),
BLOCK_DEVICE_MAPPING_SNAPSHOT_ID: properties.Schema(
properties.Schema.STRING,
_('The ID of the snapshot to create a volume '
'from.'),
constraints=[
constraints.CustomConstraint('cinder.snapshot')
]
),
BLOCK_DEVICE_MAPPING_SWAP_SIZE: properties.Schema(
properties.Schema.INTEGER,
_('The size of the swap, in MB.')
),
BLOCK_DEVICE_MAPPING_DEVICE_TYPE: properties.Schema(
properties.Schema.STRING,
_('Device type: at the moment we can make distinction'
' only between disk and cdrom.'),
constraints=[
constraints.AllowedValues(['cdrom', 'disk']),
],
),
BLOCK_DEVICE_MAPPING_DISK_BUS: properties.Schema(
properties.Schema.STRING,
_('Bus of the device: hypervisor driver chooses a '
'suitable default if omitted.'),
constraints=[
constraints.AllowedValues(['ide', 'lame_bus',
'scsi', 'usb',
'virtio']),
],
),
BLOCK_DEVICE_MAPPING_BOOT_INDEX: properties.Schema(
properties.Schema.INTEGER,
_('Integer used for ordering the boot disks.'),
),
BLOCK_DEVICE_MAPPING_VOLUME_SIZE: properties.Schema(
properties.Schema.INTEGER,
_('Size of the block device in GB. If it is omitted, '
'hypervisor driver calculates size.'),
),
BLOCK_DEVICE_MAPPING_DELETE_ON_TERM: properties.Schema(
properties.Schema.BOOLEAN,
_('Indicate whether the volume should be deleted '
'when the server is terminated.')
),
},
),
support_status=support.SupportStatus(version='2015.1')
),
FLAVOR: properties.Schema(
properties.Schema.STRING,
_('The ID or name of the flavor to boot onto.'),
required=True,
update_allowed=True,
constraints=[
constraints.CustomConstraint('nova.flavor')
]
),
FLAVOR_UPDATE_POLICY: properties.Schema(
properties.Schema.STRING,
_('Policy on how to apply a flavor update; either by requesting '
'a server resize or by replacing the entire server.'),
default='RESIZE',
constraints=[
constraints.AllowedValues(['RESIZE', 'REPLACE']),
],
update_allowed=True
),
IMAGE_UPDATE_POLICY: properties.Schema(
properties.Schema.STRING,
_('Policy on how to apply an image-id update; either by '
'requesting a server rebuild or by replacing the entire server'),
default='REBUILD',
constraints=[
constraints.AllowedValues(['REBUILD', 'REPLACE',
'REBUILD_PRESERVE_EPHEMERAL']),
],
update_allowed=True
),
KEY_NAME: properties.Schema(
properties.Schema.STRING,
_('Name of keypair to inject into the server.'),
constraints=[
constraints.CustomConstraint('nova.keypair')
]
),
ADMIN_USER: properties.Schema(
properties.Schema.STRING,
_('Name of the administrative user to use on the server.'),
support_status=support.SupportStatus(
status=support.HIDDEN,
version='5.0.0',
message=_('The default cloud-init user set up for each image '
'(e.g. "ubuntu" for Ubuntu 12.04+, "fedora" for '
'Fedora 19+ and "cloud-user" for CentOS/RHEL 6.5).'),
previous_status=support.SupportStatus(
status=support.DEPRECATED,
version='2014.1',
previous_status=support.SupportStatus(version='2013.2')
)
)
),
AVAILABILITY_ZONE: properties.Schema(
properties.Schema.STRING,
_('Name of the availability zone for server placement.')
),
SECURITY_GROUPS: properties.Schema(
properties.Schema.LIST,
_('List of security group names or IDs. Cannot be used if '
'neutron ports are associated with this server; assign '
'security groups to the ports instead.'),
default=[]
),
NETWORKS: properties.Schema(
properties.Schema.LIST,
_('An ordered list of nics to be added to this server, with '
'information about connected networks, fixed ips, port etc.'),
schema=properties.Schema(
properties.Schema.MAP,
schema={
NETWORK_UUID: properties.Schema(
properties.Schema.STRING,
_('ID of network to create a port on.'),
support_status=support.SupportStatus(
status=support.HIDDEN,
version='5.0.0',
previous_status=support.SupportStatus(
status=support.DEPRECATED,
message=_('Use property %s.') % NETWORK_ID,
version='2014.1'
)
),
constraints=[
constraints.CustomConstraint('neutron.network')
]
),
NETWORK_ID: properties.Schema(
properties.Schema.STRING,
_('Name or ID of network to create a port on.'),
constraints=[
constraints.CustomConstraint('neutron.network')
]
),
NETWORK_FIXED_IP: properties.Schema(
properties.Schema.STRING,
_('Fixed IP address to specify for the port '
'created on the requested network.'),
constraints=[
constraints.CustomConstraint('ip_addr')
]
),
NETWORK_PORT: properties.Schema(
properties.Schema.STRING,
_('ID of an existing port to associate with this '
'server.'),
constraints=[
constraints.CustomConstraint('neutron.port')
]
),
NETWORK_SUBNET: properties.Schema(
properties.Schema.STRING,
_('Subnet in which to allocate the IP address for '
'port. Used only if port property is not specified '
'for creating port, based on derived properties.'),
support_status=support.SupportStatus(version='5.0.0'),
implemented=False
)
},
),
update_allowed=True
),
SCHEDULER_HINTS: properties.Schema(
properties.Schema.MAP,
_('Arbitrary key-value pairs specified by the client to help '
'boot a server.')
),
METADATA: properties.Schema(
properties.Schema.MAP,
_('Arbitrary key/value metadata to store for this server. Both '
'keys and values must be 255 characters or less. Non-string '
'values will be serialized to JSON (and the serialized '
'string must be 255 characters or less).'),
update_allowed=True
),
USER_DATA_FORMAT: properties.Schema(
properties.Schema.STRING,
_('How the user_data should be formatted for the server. For '
'HEAT_CFNTOOLS, the user_data is bundled as part of the '
'heat-cfntools cloud-init boot configuration data. For RAW '
'the user_data is passed to Nova unmodified. '
'For SOFTWARE_CONFIG user_data is bundled as part of the '
'software config data, and metadata is derived from any '
'associated SoftwareDeployment resources.'),
default=HEAT_CFNTOOLS,
constraints=[
constraints.AllowedValues(_SOFTWARE_CONFIG_FORMATS),
]
),
SOFTWARE_CONFIG_TRANSPORT: properties.Schema(
properties.Schema.STRING,
_('How the server should receive the metadata required for '
'software configuration. POLL_SERVER_CFN will allow calls to '
'the cfn API action DescribeStackResource authenticated with '
'the provided keypair. POLL_SERVER_HEAT will allow calls to '
'the Heat API resource-show using the provided keystone '
'credentials. POLL_TEMP_URL will create and populate a '
'Swift TempURL with metadata for polling.'),
default=cfg.CONF.default_software_config_transport,
constraints=[
constraints.AllowedValues(_SOFTWARE_CONFIG_TRANSPORTS),
]
),
USER_DATA: properties.Schema(
properties.Schema.STRING,
_('User data script to be executed by cloud-init.'),
default=''
),
RESERVATION_ID: properties.Schema(
properties.Schema.STRING,
_('A UUID for the set of servers being requested.')
),
CONFIG_DRIVE: properties.Schema(
properties.Schema.BOOLEAN,
_('If True, enable config drive on the server.')
),
DISK_CONFIG: properties.Schema(
properties.Schema.STRING,
_('Control how the disk is partitioned when the server is '
'created.'),
constraints=[
constraints.AllowedValues(['AUTO', 'MANUAL']),
]
),
PERSONALITY: properties.Schema(
properties.Schema.MAP,
_('A map of files to create/overwrite on the server upon boot. '
'Keys are file names and values are the file contents.'),
default={}
),
ADMIN_PASS: properties.Schema(
properties.Schema.STRING,
_('The administrator password for the server.'),
update_allowed=True
),
}
attributes_schema = {
NAME_ATTR: attributes.Schema(
_('Name of the server.'),
type=attributes.Schema.STRING
),
ADDRESSES: attributes.Schema(
_('A dict of all network addresses with corresponding port_id. '
'Each network will have two keys in dict, they are network '
'name and network id. '
'The port ID may be obtained through the following expression: '
'"{get_attr: [<server>, addresses, <network name_or_id>, 0, '
'port]}".'),
type=attributes.Schema.MAP
),
NETWORKS_ATTR: attributes.Schema(
_('A dict of assigned network addresses of the form: '
'{"public": [ip1, ip2...], "private": [ip3, ip4], '
'"public_uuid": [ip1, ip2...], "private_uuid": [ip3, ip4]}. '
'Each network will have two keys in dict, they are network '
'name and network id. '),
type=attributes.Schema.MAP
),
FIRST_ADDRESS: attributes.Schema(
_('Convenience attribute to fetch the first assigned network '
'address, or an empty string if nothing has been assigned at '
'this time. Result may not be predictable if the server has '
'addresses from more than one network.'),
support_status=support.SupportStatus(
status=support.HIDDEN,
version='5.0.0',
message=_('Use the networks attribute instead of '
'first_address. For example: "{get_attr: '
'[<server name>, networks, <network name>, 0]}"'),
previous_status=support.SupportStatus(
status=support.DEPRECATED,
version='2014.2',
previous_status=support.SupportStatus(version='2013.2')
)
)
),
INSTANCE_NAME: attributes.Schema(
_('AWS compatible instance name.'),
type=attributes.Schema.STRING
),
ACCESSIPV4: attributes.Schema(
_('The manually assigned alternative public IPv4 address '
'of the server.'),
type=attributes.Schema.STRING
),
ACCESSIPV6: attributes.Schema(
_('The manually assigned alternative public IPv6 address '
'of the server.'),
type=attributes.Schema.STRING
),
CONSOLE_URLS: attributes.Schema(
_("URLs of server's consoles. "
"To get a specific console type, the requested type "
"can be specified as parameter to the get_attr function, "
"e.g. get_attr: [ <server>, console_urls, novnc ]. "
"Currently supported types are "
"novnc, xvpvnc, spice-html5, rdp-html5, serial."),
support_status=support.SupportStatus(version='2015.1'),
type=attributes.Schema.MAP
),
}
# Server host name limit to 53 characters by due to typical default
# linux HOST_NAME_MAX of 64, minus the .novalocal appended to the name
physical_resource_name_limit = 53
default_client_name = 'nova'
entity = 'servers'
def translation_rules(self):
return [properties.TranslationRule(
self.properties,
properties.TranslationRule.REPLACE,
source_path=[self.NETWORKS, self.NETWORK_ID],
value_name=self.NETWORK_UUID)]
def __init__(self, name, json_snippet, stack):
super(Server, self).__init__(name, json_snippet, stack)
if self.user_data_software_config():
self._register_access_key()
def _server_name(self):
name = self.properties[self.NAME]
if name:
return name
return self.physical_resource_name()
def _config_drive(self):
# This method is overridden by the derived CloudServer resource
return self.properties[self.CONFIG_DRIVE]
def _populate_deployments_metadata(self, meta):
meta['deployments'] = meta.get('deployments', [])
if self.transport_poll_server_heat():
meta['os-collect-config'] = {'heat': {
'user_id': self._get_user_id(),
'password': self.password,
'auth_url': self.context.auth_url,
'project_id': self.stack.stack_user_project_id,
'stack_id': self.stack.identifier().stack_path(),
'resource_name': self.name}
}
if self.transport_zaqar_message():
queue_id = self.physical_resource_name()
self.data_set('metadata_queue_id', queue_id)
zaqar_plugin = self.client_plugin('zaqar')
zaqar = zaqar_plugin.create_for_tenant(
self.stack.stack_user_project_id)
queue = zaqar.queue(queue_id)
queue.post({'body': meta, 'ttl': zaqar_plugin.DEFAULT_TTL})
meta['os-collect-config'] = {'zaqar': {
'user_id': self._get_user_id(),
'password': self.password,
'auth_url': self.context.auth_url,
'project_id': self.stack.stack_user_project_id,
'queue_id': queue_id}
}
elif self.transport_poll_server_cfn():
meta['os-collect-config'] = {'cfn': {
'metadata_url': '%s/v1/' % cfg.CONF.heat_metadata_server_url,
'access_key_id': self.access_key,
'secret_access_key': self.secret_key,
'stack_name': self.stack.name,
'path': '%s.Metadata' % self.name}
}
elif self.transport_poll_temp_url():
container = self.physical_resource_name()
object_name = str(uuid.uuid4())
self.client('swift').put_container(container)
url = self.client_plugin('swift').get_temp_url(
container, object_name, method='GET')
put_url = self.client_plugin('swift').get_temp_url(
container, object_name)
self.data_set('metadata_put_url', put_url)
self.data_set('metadata_object_name', object_name)
meta['os-collect-config'] = {'request': {
'metadata_url': url}
}
self.client('swift').put_object(
container, object_name, jsonutils.dumps(meta))
self.metadata_set(meta)
def _register_access_key(self):
'''
Access is limited to this resource, which created the keypair
'''
def access_allowed(resource_name):
return resource_name == self.name
if self.transport_poll_server_cfn():
self.stack.register_access_allowed_handler(
self.access_key, access_allowed)
elif self.transport_poll_server_heat():
self.stack.register_access_allowed_handler(
self._get_user_id(), access_allowed)
def _create_transport_credentials(self):
if self.transport_poll_server_cfn():
self._create_user()
self._create_keypair()
elif (self.transport_poll_server_heat() or
self.transport_zaqar_message()):
self.password = uuid.uuid4().hex
self._create_user()
self._register_access_key()
@property
def access_key(self):
return self.data().get('access_key')
@property
def secret_key(self):
return self.data().get('secret_key')
@property
def password(self):
return self.data().get('password')
@password.setter
def password(self, password):
if password is None:
self.data_delete('password')
else:
self.data_set('password', password, True)
def user_data_raw(self):
return self.properties[self.USER_DATA_FORMAT] == self.RAW
def user_data_software_config(self):
return self.properties[
self.USER_DATA_FORMAT] == self.SOFTWARE_CONFIG
def transport_poll_server_cfn(self):
return self.properties[
self.SOFTWARE_CONFIG_TRANSPORT] == self.POLL_SERVER_CFN
def transport_poll_server_heat(self):
return self.properties[
self.SOFTWARE_CONFIG_TRANSPORT] == self.POLL_SERVER_HEAT
def transport_poll_temp_url(self):
return self.properties[
self.SOFTWARE_CONFIG_TRANSPORT] == self.POLL_TEMP_URL
def transport_zaqar_message(self):
return self.properties.get(
self.SOFTWARE_CONFIG_TRANSPORT) == self.ZAQAR_MESSAGE
def get_software_config(self, ud_content):
try:
sc = self.rpc_client().show_software_config(
self.context, ud_content)
return sc[rpc_api.SOFTWARE_CONFIG_CONFIG]
except Exception as ex:
self.rpc_client().ignore_error_named(ex, 'NotFound')
return ud_content
def handle_create(self):
security_groups = self.properties[self.SECURITY_GROUPS]
user_data_format = self.properties[self.USER_DATA_FORMAT]
ud_content = self.properties[self.USER_DATA]
if self.user_data_software_config() or self.user_data_raw():
if uuidutils.is_uuid_like(ud_content):
# attempt to load the userdata from software config
ud_content = self.get_software_config(ud_content)
metadata = self.metadata_get(True) or {}
if self.user_data_software_config():
self._create_transport_credentials()
self._populate_deployments_metadata(metadata)
userdata = self.client_plugin().build_userdata(
metadata,
ud_content,
instance_user=None,
user_data_format=user_data_format)
flavor = self.properties[self.FLAVOR]
availability_zone = self.properties[self.AVAILABILITY_ZONE]
image = self.properties[self.IMAGE]
if image:
image = self.client_plugin('glance').get_image_id(image)
flavor_id = self.client_plugin().get_flavor_id(flavor)
instance_meta = self.properties[self.METADATA]
if instance_meta is not None:
instance_meta = self.client_plugin().meta_serialize(
instance_meta)
scheduler_hints = self._scheduler_hints(
self.properties[self.SCHEDULER_HINTS])
nics = self._build_nics(self.properties[self.NETWORKS])
block_device_mapping = self._build_block_device_mapping(
self.properties[self.BLOCK_DEVICE_MAPPING])
block_device_mapping_v2 = self._build_block_device_mapping_v2(
self.properties[self.BLOCK_DEVICE_MAPPING_V2])
reservation_id = self.properties[self.RESERVATION_ID]
disk_config = self.properties[self.DISK_CONFIG]
admin_pass = self.properties[self.ADMIN_PASS] or None
personality_files = self.properties[self.PERSONALITY]
key_name = self.properties[self.KEY_NAME]
server = None
try:
server = self.client().servers.create(
name=self._server_name(),
image=image,
flavor=flavor_id,
key_name=key_name,
security_groups=security_groups,
userdata=userdata,
meta=instance_meta,
scheduler_hints=scheduler_hints,
nics=nics,
availability_zone=availability_zone,
block_device_mapping=block_device_mapping,
block_device_mapping_v2=block_device_mapping_v2,
reservation_id=reservation_id,
config_drive=self._config_drive(),
disk_config=disk_config,
files=personality_files,
admin_pass=admin_pass)
finally:
# Avoid a race condition where the thread could be canceled
# before the ID is stored
if server is not None:
self.resource_id_set(server.id)
return server.id
def check_create_complete(self, server_id):
return self.client_plugin()._check_active(server_id)
def handle_check(self):
server = self.client().servers.get(self.resource_id)
status = self.client_plugin().get_status(server)
checks = [{'attr': 'status', 'expected': 'ACTIVE', 'current': status}]
self._verify_check_conditions(checks)
@classmethod
def _build_block_device_mapping(cls, bdm):
if not bdm:
return None
bdm_dict = {}
for mapping in bdm:
mapping_parts = []
snapshot_id = mapping.get(cls.BLOCK_DEVICE_MAPPING_SNAPSHOT_ID)
if snapshot_id:
mapping_parts.append(snapshot_id)
mapping_parts.append('snap')
else:
volume_id = mapping.get(cls.BLOCK_DEVICE_MAPPING_VOLUME_ID)
mapping_parts.append(volume_id)
mapping_parts.append('')
volume_size = mapping.get(cls.BLOCK_DEVICE_MAPPING_VOLUME_SIZE)
delete = mapping.get(cls.BLOCK_DEVICE_MAPPING_DELETE_ON_TERM)
if volume_size:
mapping_parts.append(str(volume_size))
else:
mapping_parts.append('')
if delete:
mapping_parts.append(str(delete))
device_name = mapping.get(cls.BLOCK_DEVICE_MAPPING_DEVICE_NAME)
bdm_dict[device_name] = ':'.join(mapping_parts)
return bdm_dict
@classmethod
def _build_block_device_mapping_v2(cls, bdm_v2):
if not bdm_v2:
return None
bdm_v2_list = []
for mapping in bdm_v2:
bmd_dict = None
if mapping.get(cls.BLOCK_DEVICE_MAPPING_VOLUME_ID):
bmd_dict = {
'uuid': mapping.get(cls.BLOCK_DEVICE_MAPPING_VOLUME_ID),
'source_type': 'volume',
'destination_type': 'volume',
'boot_index': 0,
'delete_on_termination': False,
}
elif mapping.get(cls.BLOCK_DEVICE_MAPPING_SNAPSHOT_ID):
bmd_dict = {
'uuid': mapping.get(cls.BLOCK_DEVICE_MAPPING_SNAPSHOT_ID),
'source_type': 'snapshot',
'destination_type': 'volume',
'boot_index': 0,
'delete_on_termination': False,
}
elif mapping.get(cls.BLOCK_DEVICE_MAPPING_IMAGE_ID):
bmd_dict = {
'uuid': mapping.get(cls.BLOCK_DEVICE_MAPPING_IMAGE_ID),
'source_type': 'image',
'destination_type': 'volume',
'boot_index': 0,
'delete_on_termination': False,
}
elif mapping.get(cls.BLOCK_DEVICE_MAPPING_SWAP_SIZE):
bmd_dict = {
'source_type': 'blank',
'destination_type': 'local',
'boot_index': -1,
'delete_on_termination': True,
'guest_format': 'swap',
'volume_size': mapping.get(
cls.BLOCK_DEVICE_MAPPING_SWAP_SIZE),
}
update_props = (cls.BLOCK_DEVICE_MAPPING_DEVICE_NAME,
cls.BLOCK_DEVICE_MAPPING_DEVICE_TYPE,
cls.BLOCK_DEVICE_MAPPING_DISK_BUS,
cls.BLOCK_DEVICE_MAPPING_BOOT_INDEX,
cls.BLOCK_DEVICE_MAPPING_VOLUME_SIZE,
cls.BLOCK_DEVICE_MAPPING_DELETE_ON_TERM)
for update_prop in update_props:
if mapping.get(update_prop) is not None:
bmd_dict[update_prop] = mapping.get(update_prop)
if bmd_dict:
bdm_v2_list.append(bmd_dict)
return bdm_v2_list
def _add_port_for_address(self, server):
"""Method adds port id to list of addresses.
This method is used only for resolving attributes.
"""
nets = copy.deepcopy(server.addresses)
ifaces = server.interface_list()
ip_mac_mapping_on_port_id = dict(((iface.fixed_ips[0]['ip_address'],
iface.mac_addr), iface.port_id)
for iface in ifaces)
for net_name in nets:
for addr in nets[net_name]:
addr['port'] = ip_mac_mapping_on_port_id.get(
(addr['addr'], addr['OS-EXT-IPS-MAC:mac_addr']))
return self._extend_networks(nets)
def _extend_networks(self, networks):
"""Method adds same networks with replaced name on network id.
This method is used only for resolving attributes.
"""
nets = copy.deepcopy(networks)
for key in list(nets.keys()):
try:
net_id = self.client_plugin().get_net_id_by_label(key)
except (exception.NovaNetworkNotFound,
exception.PhysicalResourceNameAmbiguity):
net_id = None
if net_id:
nets[net_id] = nets[key]
return nets
def _resolve_attribute(self, name):
if name == self.FIRST_ADDRESS:
return self.client_plugin().server_to_ipaddress(
self.resource_id) or ''
if name == self.NAME_ATTR:
return self._server_name()
try:
server = self.client().servers.get(self.resource_id)
except Exception as e:
self.client_plugin().ignore_not_found(e)
return ''
if name == self.ADDRESSES:
return self._add_port_for_address(server)
if name == self.NETWORKS_ATTR:
return self._extend_networks(server.networks)
if name == self.INSTANCE_NAME:
return getattr(server, 'OS-EXT-SRV-ATTR:instance_name', None)
if name == self.ACCESSIPV4:
return server.accessIPv4
if name == self.ACCESSIPV6:
return server.accessIPv6
if name == self.CONSOLE_URLS:
return self.client_plugin('nova').get_console_urls(server)
def add_dependencies(self, deps):
super(Server, self).add_dependencies(deps)
# Depend on any Subnet in this template with the same
# network_id as the networks attached to this server.
# It is not known which subnet a server might be assigned
# to so all subnets in a network should be created before
# the servers in that network.
nets = self.properties[self.NETWORKS]
if not nets:
return
for res in six.itervalues(self.stack):
if res.has_interface('OS::Neutron::Subnet'):
subnet_net = (res.properties.get(subnet.Subnet.NETWORK_ID)
or res.properties.get(subnet.Subnet.NETWORK))
for net in nets:
# worry about network_id because that could be the match
# assigned to the subnet as well and could have been
# created by this stack. Regardless, the server should
# still wait on the subnet.
net_id = (net.get(self.NETWORK_ID) or
net.get(self.NETWORK_UUID))
if net_id and net_id == subnet_net:
deps += (self, res)
break
def _update_flavor(self, prop_diff):
flavor_update_policy = (
prop_diff.get(self.FLAVOR_UPDATE_POLICY) or
self.properties[self.FLAVOR_UPDATE_POLICY])
flavor = prop_diff[self.FLAVOR]
if flavor_update_policy == 'REPLACE':
raise resource.UpdateReplace(self.name)
flavor_id = self.client_plugin().get_flavor_id(flavor)
handler_args = {'args': (flavor_id,)}
checker_args = {'args': (flavor_id, flavor)}
prg_resize = progress.ServerUpdateProgress(self.resource_id,
'resize',
handler_extra=handler_args,
checker_extra=checker_args)
prg_verify = progress.ServerUpdateProgress(self.resource_id,
'verify_resize')
return prg_resize, prg_verify
def _update_image(self, prop_diff):
image_update_policy = (
prop_diff.get(self.IMAGE_UPDATE_POLICY) or
self.properties[self.IMAGE_UPDATE_POLICY])
if image_update_policy == 'REPLACE':
raise resource.UpdateReplace(self.name)
image = prop_diff[self.IMAGE]
image_id = self.client_plugin('glance').get_image_id(image)
preserve_ephemeral = (
image_update_policy == 'REBUILD_PRESERVE_EPHEMERAL')
password = (prop_diff.get(self.ADMIN_PASS) or
self.properties[self.ADMIN_PASS])
kwargs = {'password': password,
'preserve_ephemeral': preserve_ephemeral}
prg = progress.ServerUpdateProgress(self.resource_id,
'rebuild',
handler_extra={'args': (image_id,),
'kwargs': kwargs})
return prg
def _update_networks(self, server, prop_diff):
updaters = []
new_networks = prop_diff.get(self.NETWORKS)
old_networks = self.properties[self.NETWORKS]
if not server:
server = self.client().servers.get(self.resource_id)
interfaces = server.interface_list()
remove_ports, add_nets = self.calculate_networks(
old_networks, new_networks, interfaces)
for port in remove_ports:
updaters.append(
progress.ServerUpdateProgress(
self.resource_id, 'interface_detach',
complete=True,
handler_extra={'args': (port,)})
)
for args in add_nets:
updaters.append(
progress.ServerUpdateProgress(
self.resource_id, 'interface_attach',
complete=True,
handler_extra={'kwargs': args})
)
return updaters
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
if 'Metadata' in tmpl_diff:
self.metadata_set(tmpl_diff['Metadata'])
updaters = []
server = None
if self.METADATA in prop_diff:
server = self.client().servers.get(self.resource_id)
self.client_plugin().meta_update(server,
prop_diff[self.METADATA])
if self.FLAVOR in prop_diff:
updaters.extend(self._update_flavor(prop_diff))
if self.IMAGE in prop_diff:
updaters.append(self._update_image(prop_diff))
elif self.ADMIN_PASS in prop_diff:
if not server:
server = self.client().servers.get(self.resource_id)
server.change_password(prop_diff[self.ADMIN_PASS])
if self.NAME in prop_diff:
if not server:
server = self.client().servers.get(self.resource_id)
self.client_plugin().rename(server, prop_diff[self.NAME])
if self.NETWORKS in prop_diff:
updaters.extend(self._update_networks(server, prop_diff))
# NOTE(pas-ha) optimization is possible (starting first task
# right away), but we'd rather not, as this method already might
# have called several APIs
return updaters
def check_update_complete(self, updaters):
'''Push all updaters to completion in list order.'''
for prg in updaters:
if not prg.called:
handler = getattr(self.client_plugin(), prg.handler)
prg.called = handler(*prg.handler_args,
**prg.handler_kwargs)
return False
if not prg.complete:
check_complete = getattr(self.client_plugin(), prg.checker)
prg.complete = check_complete(*prg.checker_args,
**prg.checker_kwargs)
break
return all(prg.complete for prg in updaters)
def metadata_update(self, new_metadata=None):
'''
Refresh the metadata if new_metadata is None
'''
if new_metadata is None:
# Re-resolve the template metadata and merge it with the
# current resource metadata. This is necessary because the
# attributes referenced in the template metadata may change
# and the resource itself adds keys to the metadata which
# are not specified in the template (e.g the deployments data)
meta = self.metadata_get(refresh=True) or {}
tmpl_meta = self.t.metadata()
meta.update(tmpl_meta)
self.metadata_set(meta)
@staticmethod
def _check_maximum(count, maximum, msg):
'''
Check a count against a maximum, unless maximum is -1 which indicates
that there is no limit
'''
if maximum != -1 and count > maximum:
raise exception.StackValidationFailed(message=msg)
def _validate_block_device_mapping(self):
# either volume_id or snapshot_id needs to be specified, but not both
# for block device mapping.
bdm = self.properties[self.BLOCK_DEVICE_MAPPING] or []
bootable_vol = False
for mapping in bdm:
device_name = mapping[self.BLOCK_DEVICE_MAPPING_DEVICE_NAME]
if device_name == 'vda':
bootable_vol = True
volume_id = mapping.get(self.BLOCK_DEVICE_MAPPING_VOLUME_ID)
snapshot_id = mapping.get(self.BLOCK_DEVICE_MAPPING_SNAPSHOT_ID)
if volume_id is not None and snapshot_id is not None:
raise exception.ResourcePropertyConflict(
self.BLOCK_DEVICE_MAPPING_VOLUME_ID,
self.BLOCK_DEVICE_MAPPING_SNAPSHOT_ID)
if volume_id is None and snapshot_id is None:
msg = _('Either volume_id or snapshot_id must be specified for'
' device mapping %s') % device_name
raise exception.StackValidationFailed(message=msg)
bdm_v2 = self.properties[self.BLOCK_DEVICE_MAPPING_V2] or []
if bdm and bdm_v2:
raise exception.ResourcePropertyConflict(
self.BLOCK_DEVICE_MAPPING, self.BLOCK_DEVICE_MAPPING_V2)
for mapping in bdm_v2:
volume_id = mapping.get(self.BLOCK_DEVICE_MAPPING_VOLUME_ID)
snapshot_id = mapping.get(self.BLOCK_DEVICE_MAPPING_SNAPSHOT_ID)
image_id = mapping.get(self.BLOCK_DEVICE_MAPPING_IMAGE_ID)
swap_size = mapping.get(self.BLOCK_DEVICE_MAPPING_SWAP_SIZE)
property_tuple = (volume_id, snapshot_id, image_id, swap_size)
if property_tuple.count(None) < 3:
raise exception.ResourcePropertyConflict(
self.BLOCK_DEVICE_MAPPING_VOLUME_ID,
self.BLOCK_DEVICE_MAPPING_SNAPSHOT_ID,
self.BLOCK_DEVICE_MAPPING_IMAGE_ID,
self.BLOCK_DEVICE_MAPPING_SWAP_SIZE)
if property_tuple.count(None) == 4:
msg = _('Either volume_id, snapshot_id, image_id or '
'swap_size must be specified.')
raise exception.StackValidationFailed(message=msg)
if any((volume_id, snapshot_id, image_id)):
bootable_vol = True
return bootable_vol
def _validate_network(self, network):
if (network.get(self.NETWORK_ID) is None
and network.get(self.NETWORK_PORT) is None
and network.get(self.NETWORK_UUID) is None):
msg = _('One of the properties "%(id)s", "%(port_id)s", '
'"%(uuid)s" should be set for the '
'specified network of server "%(server)s".'
'') % dict(id=self.NETWORK_ID,
port_id=self.NETWORK_PORT,
uuid=self.NETWORK_UUID,
server=self.name)
raise exception.StackValidationFailed(message=msg)
if network.get(self.NETWORK_UUID) and network.get(self.NETWORK_ID):
msg = _('Properties "%(uuid)s" and "%(id)s" are both set '
'to the network "%(network)s" for the server '
'"%(server)s". The "%(uuid)s" property is deprecated. '
'Use only "%(id)s" property.'
'') % dict(uuid=self.NETWORK_UUID,
id=self.NETWORK_ID,
network=network[self.NETWORK_ID],
server=self.name)
raise exception.StackValidationFailed(message=msg)
elif network.get(self.NETWORK_UUID):
LOG.info(_LI('For the server "%(server)s" the "%(uuid)s" '
'property is set to network "%(network)s". '
'"%(uuid)s" property is deprecated. Use '
'"%(id)s" property instead.'),
dict(uuid=self.NETWORK_UUID,
id=self.NETWORK_ID,
network=network[self.NETWORK_ID],
server=self.name))
def validate(self):
'''
Validate any of the provided params
'''
super(Server, self).validate()
bootable_vol = self._validate_block_device_mapping()
# make sure the image exists if specified.
image = self.properties[self.IMAGE]
if not image and not bootable_vol:
msg = _('Neither image nor bootable volume is specified for'
' instance %s') % self.name
raise exception.StackValidationFailed(message=msg)
# network properties 'uuid' and 'network' shouldn't be used
# both at once for all networks
networks = self.properties[self.NETWORKS] or []
# record if any networks include explicit ports
networks_with_port = False
for network in networks:
networks_with_port = (networks_with_port or
network.get(self.NETWORK_PORT))
self._validate_network(network)
# retrieve provider's absolute limits if it will be needed
metadata = self.properties[self.METADATA]
personality = self.properties[self.PERSONALITY]
if metadata is not None or personality:
limits = self.client_plugin().absolute_limits()
# if 'security_groups' present for the server and explict 'port'
# in one or more entries in 'networks', raise validation error
if networks_with_port and self.properties[self.SECURITY_GROUPS]:
raise exception.ResourcePropertyConflict(
self.SECURITY_GROUPS,
"/".join([self.NETWORKS, self.NETWORK_PORT]))
# verify that the number of metadata entries is not greater
# than the maximum number allowed in the provider's absolute
# limits
if metadata is not None:
msg = _('Instance metadata must not contain greater than %s '
'entries. This is the maximum number allowed by your '
'service provider') % limits['maxServerMeta']
self._check_maximum(len(metadata),
limits['maxServerMeta'], msg)
# verify the number of personality files and the size of each
# personality file against the provider's absolute limits
if personality:
msg = _("The personality property may not contain "
"greater than %s entries.") % limits['maxPersonality']
self._check_maximum(len(personality),
limits['maxPersonality'], msg)
for path, contents in personality.items():
msg = (_("The contents of personality file \"%(path)s\" "
"is larger than the maximum allowed personality "
"file size (%(max_size)s bytes).") %
{'path': path,
'max_size': limits['maxPersonalitySize']})
self._check_maximum(len(bytes(contents.encode('utf-8'))),
limits['maxPersonalitySize'], msg)
def _delete_temp_url(self):
object_name = self.data().get('metadata_object_name')
if not object_name:
return
try:
container = self.physical_resource_name()
swift = self.client('swift')
swift.delete_object(container, object_name)
headers = swift.head_container(container)
if int(headers['x-container-object-count']) == 0:
swift.delete_container(container)
except Exception as ex:
self.client_plugin('swift').ignore_not_found(ex)
def _delete_queue(self):
queue_id = self.data().get('metadata_queue_id')
if not queue_id:
return
client_plugin = self.client_plugin('zaqar')
zaqar = client_plugin.create_for_tenant(
self.stack.stack_user_project_id)
try:
zaqar.queue(queue_id).delete()
except Exception as ex:
client_plugin.ignore_not_found(ex)
self.data_delete('metadata_queue_id')
def handle_snapshot_delete(self, state):
if state[0] != self.FAILED:
image_id = self.client().servers.create_image(
self.resource_id, self.physical_resource_name())
return progress.ServerDeleteProgress(
self.resource_id, image_id, False)
return self.handle_delete()
def handle_delete(self):
if self.resource_id is None:
return
if self.user_data_software_config():
self._delete_user()
self._delete_temp_url()
self._delete_queue()
try:
self.client().servers.delete(self.resource_id)
except Exception as e:
self.client_plugin().ignore_not_found(e)
return
return progress.ServerDeleteProgress(self.resource_id)
def check_delete_complete(self, prg):
if not prg:
return True
if not prg.image_complete:
image = self.client().images.get(prg.image_id)
if image.status in ('DELETED', 'ERROR'):
raise exception.Error(image.status)
elif image.status == 'ACTIVE':
prg.image_complete = True
if not self.handle_delete():
return True
return False
return self.client_plugin().check_delete_server_complete(
prg.server_id)
def handle_suspend(self):
'''
Suspend a server - note we do not wait for the SUSPENDED state,
this is polled for by check_suspend_complete in a similar way to the
create logic so we can take advantage of coroutines
'''
if self.resource_id is None:
raise exception.Error(_('Cannot suspend %s, resource_id not set') %
self.name)
try:
server = self.client().servers.get(self.resource_id)
except Exception as e:
if self.client_plugin().is_not_found(e):
raise exception.NotFound(_('Failed to find server %s') %
self.resource_id)
else:
raise
else:
# if the server has been suspended successful,
# no need to suspend again
if self.client_plugin().get_status(server) != 'SUSPENDED':
LOG.debug('suspending server %s' % self.resource_id)
server.suspend()
return server.id
def check_suspend_complete(self, server_id):
cp = self.client_plugin()
server = cp.fetch_server(server_id)
if not server:
return False
status = cp.get_status(server)
LOG.debug('%(name)s check_suspend_complete status = %(status)s'
% {'name': self.name, 'status': status})
if status in list(cp.deferred_server_statuses + ['ACTIVE']):
return status == 'SUSPENDED'
else:
exc = resource.ResourceUnknownStatus(
result=_('Suspend of server %s failed') % server.name,
resource_status=status)
raise exc
def handle_resume(self):
'''
Resume a server - note we do not wait for the ACTIVE state,
this is polled for by check_resume_complete in a similar way to the
create logic so we can take advantage of coroutines
'''
if self.resource_id is None:
raise exception.Error(_('Cannot resume %s, resource_id not set') %
self.name)
try:
server = self.client().servers.get(self.resource_id)
except Exception as e:
if self.client_plugin().is_not_found(e):
raise exception.NotFound(_('Failed to find server %s') %
self.resource_id)
else:
raise
else:
# if the server has been resumed successful,
# no need to resume again
if self.client_plugin().get_status(server) != 'ACTIVE':
LOG.debug('resuming server %s' % self.resource_id)
server.resume()
return server.id
def check_resume_complete(self, server_id):
return self.client_plugin()._check_active(server_id)
def handle_snapshot(self):
image_id = self.client().servers.create_image(
self.resource_id, self.physical_resource_name())
self.data_set('snapshot_image_id', image_id)
return image_id
def check_snapshot_complete(self, image_id):
image = self.client().images.get(image_id)
if image.status == 'ACTIVE':
return True
elif image.status == 'ERROR' or image.status == 'DELETED':
raise exception.Error(image.status)
return False
def handle_delete_snapshot(self, snapshot):
image_id = snapshot['resource_data'].get('snapshot_image_id')
try:
self.client().images.delete(image_id)
except Exception as e:
self.client_plugin().ignore_not_found(e)
def handle_restore(self, defn, restore_data):
image_id = restore_data['resource_data']['snapshot_image_id']
props = function.resolve(self.properties.data)
props[self.IMAGE] = image_id
return defn.freeze(properties=props)
def resource_mapping():
return {
'OS::Nova::Server': Server,
}
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Cudnn RNN operators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.cudnn_rnn.python.ops import cudnn_rnn_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.layers import base as base_layer
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.platform import tf_logging as logging
CUDNN_RNN_UNIDIRECTION = cudnn_rnn_ops.CUDNN_RNN_UNIDIRECTION
CUDNN_RNN_BIDIRECTION = cudnn_rnn_ops.CUDNN_RNN_BIDIRECTION
CUDNN_LSTM = cudnn_rnn_ops.CUDNN_LSTM
CUDNN_GRU = cudnn_rnn_ops.CUDNN_GRU
CUDNN_RNN_RELU = cudnn_rnn_ops.CUDNN_RNN_RELU
CUDNN_RNN_TANH = cudnn_rnn_ops.CUDNN_RNN_TANH
# Half for cell input, half for hidden states.
CUDNN_LSTM_PARAMS_PER_LAYER = cudnn_rnn_ops.CUDNN_LSTM_PARAMS_PER_LAYER
CUDNN_GRU_PARAMS_PER_LAYER = cudnn_rnn_ops.CUDNN_GRU_PARAMS_PER_LAYER
CUDNN_RNN_TANH_PARAMS_PER_LAYER = cudnn_rnn_ops.CUDNN_RNN_TANH_PARAMS_PER_LAYER
CUDNN_RNN_RELU_PARAMS_PER_LAYER = cudnn_rnn_ops.CUDNN_RNN_RELU_PARAMS_PER_LAYER
CUDNN_INPUT_LINEAR_MODE = cudnn_rnn_ops.CUDNN_INPUT_LINEAR_MODE
CUDNN_INPUT_SKIP_MODE = cudnn_rnn_ops.CUDNN_INPUT_SKIP_MODE
CUDNN_INPUT_AUTO_MODE = cudnn_rnn_ops.CUDNN_INPUT_AUTO_MODE
class _CudnnRNN(base_layer.Layer):
# pylint:disable=line-too-long
"""Abstract class for RNN layers with Cudnn implementation.
Cudnn RNNs have two major differences from other platform-independent RNNs tf
provides:
* Cudnn LSTM and GRU are mathematically different from their tf counterparts.
(e.g. @{tf.contrib.rnn.LSTMBlockCell} and @{tf.nn.rnn_cell.GRUCell}.
* Cudnn-trained checkpoints are not directly compatible with tf RNNs:
* They use a single opaque parameter buffer for the entire (possibly)
multi-layer multi-directional RNN; Whereas tf RNN weights are per-cell and
layer.
* The size and layout of the parameter buffers may change between
CUDA/CuDNN/GPU generations. Because of that, the opaque parameter variable
does not have a static shape and is not partitionable. Instead of using
partitioning to alleviate the PS's traffic load, try building a
multi-tower model and do gradient aggregation locally within the host
before updating the PS. See https://www.tensorflow.org/performance/performance_models#parameter_server_variables
for a detailed performance guide.
Consequently, if one plans to use Cudnn trained models on both GPU and CPU
for inference and training, one needs to:
* Create a CudnnOpaqueParamsSaveable subclass object to save RNN params in
canonical format. (This is done for you automatically during layer building
process.)
* When not using a Cudnn RNN class, use CudnnCompatibleRNN classes to load the
checkpoints. These classes are platform-independent and perform the same
computation as Cudnn for training and inference.
Similarly, CudnnCompatibleRNN-trained checkpoints can be loaded by CudnnRNN
classes seamlessly.
Below is a typical workflow(using LSTM as an example):
for detailed performance guide.
# Use Cudnn-trained checkpoints with CudnnCompatibleRNNs
```python
with tf.Graph().as_default():
lstm = CudnnLSTM(num_layers, num_units, direction, ...)
outputs, output_states = lstm(inputs, initial_states, training=True)
# If user plans to delay calling the cell with inputs, one can do
# lstm.build(input_shape)
saver = Saver()
# training subgraph
...
# Once in a while save the model.
saver.save(save_path)
# Inference subgraph for unidirectional RNN on, e.g., CPU or mobile.
with tf.Graph().as_default():
single_cell = lambda: tf.contrib.cudnn_rnn.CudnnCompatibleLSTM(num_units)
# NOTE: Even if there's only one layer, the cell needs to be wrapped in
# MultiRNNCell.
cell = tf.nn.rnn_cell.MultiRNNCell(
[single_cell() for _ in range(num_layers)])
# Leave the scope arg unset.
outputs, final_state = tf.nn.dynamic_rnn(cell, inputs, initial_state, ...)
saver = Saver()
# Create session
sess = ...
# Restores
saver.restore(sess, save_path)
# Inference subgraph for bidirectional RNN
with tf.Graph().as_default():
single_cell = lambda: tf.contrib.cudnn_rnn.CudnnCompatibleLSTM(num_units)
cells_fw = [single_cell() for _ in range(num_layers)]
cells_bw = [single_cell() for _ in range(num_layers)]
# Leave the scope arg unset.
(outputs, output_state_fw,
output_state_bw) = tf.contrib.rnn.stack_bidirectional_dynamic_rnn(
cells_fw, cells_bw, inputs, ...)
saver = Saver()
# Create session
sess = ...
# Restores
saver.restore(sess, save_path)
```
"""
# pylint:enable=line-too-long
# The following are constants defined by subclasses.
# Type of RNN cell.
_rnn_mode = None
# Number of cell weights(or biases) per layer.
_num_params_per_layer = None
# Custom SaveableObject class for the CudnnRNN class.
_saveable_cls = None
# TODO(jamesqin): support float16 CuDNN RNN
def __init__(self,
num_layers,
num_units,
input_mode=CUDNN_INPUT_LINEAR_MODE,
direction=CUDNN_RNN_UNIDIRECTION,
dropout=0.,
seed=None,
dtype=dtypes.float32,
kernel_initializer=None,
bias_initializer=None,
name=None):
"""Creates a CudnnRNN model from model spec.
Args:
num_layers: the number of layers for the RNN model.
num_units: the number of units within the RNN model.
input_mode: indicate whether there is a linear projection between the
input and the actual computation before the first layer. It can be
'linear_input', 'skip_input' or 'auto_select'.
'linear_input' (default) always applies a linear projection of input
onto RNN hidden state. (standard RNN behavior).
'skip_input' is only allowed when input_size == num_units;
'auto_select' implies 'skip_input' when input_size == num_units;
otherwise, it implies 'linear_input'.
direction: the direction model that the model operates. Can be either
'unidirectional' or 'bidirectional'
dropout: dropout rate, a number between [0, 1]. Dropout is applied on
inputs of each layer. When set to 0, dropout is disabled.
seed: the op seed used for initializing dropout. See @{tf.set_random_seed}
for behavior.
dtype: tf.float32 or tf.float64
kernel_initializer: starting value to initialize the weight.
bias_initializer: starting value to initialize the bias
(default is all zeros).
name: VariableScope for the created subgraph; defaults to class name.
This only serves the default scope if later no scope is specified when
invoking __call__().
Raises:
ValueError: if direction is invalid. Or dtype is not supported.
"""
super(_CudnnRNN, self).__init__(dtype=dtype, name=name)
cudnn_rnn_ops.check_direction(direction)
cudnn_rnn_ops.check_input_mode(input_mode)
if dtype not in [dtypes.float32, dtypes.float64]:
raise ValueError("Only support float32, float64, provided %s" % dtype)
# Layer self.dtype is type name, the original DType object is kept here.
self._plain_dtype = dtype
self._num_layers = num_layers
self._num_units = num_units
self._input_mode = input_mode
self._direction = direction
self._dropout = dropout
self._seed = seed
self._kernel_initializer = kernel_initializer
self._bias_initializer = bias_initializer
# Init input_size to None, which will be set after build().
self._input_size = None
self._saveable = None
@property
def num_layers(self):
return self._num_layers
@property
def num_units(self):
return self._num_units
@property
def input_mode(self):
"""Input mode of first layer.
Indicates whether there is a linear projection between the input and the
actual computation before the first layer. It can be
* 'linear_input': (default) always applies a linear projection of input
onto RNN hidden state. (standard RNN behavior)
* 'skip_input': 'skip_input' is only allowed when input_size == num_units.
* 'auto_select'. implies 'skip_input' when input_size == num_units;
otherwise, it implies 'linear_input'.
Returns:
'linear_input', 'skip_input' or 'auto_select'.
"""
return self._input_mode
@property
def input_size(self):
if not self._input_size:
raise ValueError(
"\'input_size\' is unknown since layer has not been built.")
return self._input_size
@property
def rnn_mode(self):
"""Type of RNN cell used.
Returns:
`lstm`, `gru`, `rnn_relu` or `rnn_tanh`.
"""
return self._rnn_mode
@property
def direction(self):
"""Returns `unidirectional` or `bidirectional`."""
return self._direction
@property
def num_dirs(self):
return 1 if self._direction == CUDNN_RNN_UNIDIRECTION else 2
@property
def saveable(self):
return self._saveable
@property
def canonical_weight_shapes(self):
"""Shapes of Cudnn canonical weight tensors."""
if not self._input_size:
raise RuntimeError(
"%s.canonical_weight_shapes invoked before input shape is known" %
type(self).__name__)
shapes = []
for i in range(self._num_layers):
shapes.extend(self._canonical_weight_shape(i))
return shapes
@property
def canonical_bias_shapes(self):
"""Shapes of Cudnn canonical bias tensors."""
return self._canonical_bias_shape(0) * self._num_layers
def _update_trainable_weights(self, getter, *args, **kwargs):
"""Custom getter for layer variables."""
# Add variables to layer's `(non_)trainable_weights` list(s).
variable = getter(*args, **kwargs)
trainable = kwargs.get("trainable", True)
if trainable and variable not in self._trainable_weights:
self._trainable_weights.append(variable)
elif not trainable and variable not in self._non_trainable_weights:
self._non_trainable_weights.append(variable)
return variable
def build(self, input_shape):
"""Create variables of the Cudnn RNN.
It can be called manually before `__call__()` or automatically through
`__call__()`. In the former case, subsequent `__call__()`s will skip
creating variables.
Args:
input_shape: network input tensor shape, a python list or a TensorShape
object with 3 dimensions.
Raises:
ValueError: if input_shape has wrong dimension or unknown 3rd dimension.
"""
if self.built:
return
input_shape = tensor_shape.TensorShape(input_shape)
if input_shape.ndims != 3:
raise ValueError("Expecting input_shape with 3 dims, got %d" %
input_shape.ndims)
if input_shape[-1].value is None:
raise ValueError("The last dimension of the inputs to `CudnnRNN` "
"should be defined. Found `None`.")
self._input_size = input_shape[-1].value
self.input_spec = base_layer.InputSpec(ndim=3, axes={-1: self._input_size})
self._set_scope(None)
# Not using base class `add_variable()` since the it calls
# `tf.get_variable()` with a callable initializer whereas here with a
# tensor. The difference is mandated to support forward-compatibility with
# Cudnn.
with vs.variable_scope(
self._scope,
reuse=self.built,
custom_getter=self._update_trainable_weights):
if self._kernel_initializer is None:
self._kernel_initializer = init_ops.glorot_uniform_initializer(
seed=self._seed, dtype=self._plain_dtype)
if self._bias_initializer is None:
self._bias_initializer = init_ops.constant_initializer(
0.0, dtype=self._plain_dtype)
weights = [
self._kernel_initializer(sp, dtype=self._plain_dtype)
for sp in self.canonical_weight_shapes
]
biases = [
self._bias_initializer(sp, dtype=self._plain_dtype)
for sp in self.canonical_bias_shapes
]
opaque_params_t = self._canonical_to_opaque(weights, biases)
if vs.get_variable_scope().partitioner is not None:
logging.warn(
"Partitioner is not supported for Cudnn RNN layer variables, using "
"it will create forward-compatibility issues with future "
"CUDA/CuDNN generations.")
# Initialize opaque params with a tensor.
self.kernel = vs.get_variable(
"opaque_kernel", initializer=opaque_params_t, validate_shape=False)
# Create saveable in the outer scope of the cudnn subgraph, such that
# alternative subgraph with platform-independent rnn cells can load the
# checkpoints directly.
if not (self.built or vs.get_variable_scope().reuse):
self._create_saveable()
self.built = True
def call(self, inputs, initial_state=None, training=True):
"""Runs the forward step for the RNN model.
Args:
inputs: `3-D` tensor with shape `[time_len, batch_size, input_size]`.
initial_state: a tuple of tensor(s) of shape
`[num_layers * num_dirs, batch_size, num_units]`. If not provided, use
zero initial states. The tuple size is 2 for LSTM and 1 for other RNNs.
training: whether this operation will be used in training or inference.
Returns:
output: a tensor of shape `[time_len, batch_size, num_dirs * num_units]`.
It is a `concat([fwd_output, bak_output], axis=2)`.
output_states: a tuple of tensor(s) of the same shape and structure as
`initial_state`.
Raises:
ValueError: initial_state is not a tuple.
"""
if initial_state is not None and not isinstance(initial_state, tuple):
raise ValueError("Invalid initial_state type: %s, expecting tuple.",
type(initial_state))
dtype = self.dtype
inputs = ops.convert_to_tensor(inputs, dtype=dtype)
batch_size = array_ops.shape(inputs)[1]
if initial_state is None:
initial_state = self._zero_state(batch_size)
if self._rnn_mode == CUDNN_LSTM:
h, c = initial_state # pylint:disable=unbalanced-tuple-unpacking,unpacking-non-sequence
else:
h, = initial_state # pylint:disable=unbalanced-tuple-unpacking,unpacking-non-sequence
h = ops.convert_to_tensor(h, dtype=dtype)
if self._rnn_mode == CUDNN_LSTM:
c = ops.convert_to_tensor(c, dtype=dtype)
else:
# For model that doesn't take input_c, replace with a dummy tensor.
c = array_ops.constant([], dtype=dtype)
outputs, (output_h, output_c) = self._forward(inputs, h, c, self.kernel,
training)
if self._rnn_mode == CUDNN_LSTM:
return outputs, (output_h, output_c)
else:
return outputs, (output_h,)
def state_shape(self, batch_size):
raise NotImplementedError
def _zero_state(self, batch_size):
res = []
for sp in self.state_shape(batch_size):
res.append(array_ops.zeros(sp, dtype=self.dtype))
return tuple(res)
def _canonical_weight_shape(self, layer):
"""Shapes of Cudnn canonical weight tensors for given layer."""
if layer < 0 or layer >= self._num_layers:
raise ValueError("\'layer\' is not valid, got %s, expecting [%d, %d]" %
(layer, 0, self._num_layers-1))
if not self._input_size:
raise RuntimeError(
"%s._canonical_weight_shape invoked before input shape is known" %
type(self).__name__)
input_size = self._input_size
num_units = self._num_units
num_gates = self._num_params_per_layer // 2
is_bidi = self._direction == CUDNN_RNN_BIDIRECTION
if layer == 0:
wts_applied_on_inputs = [(num_units, input_size)] * num_gates
else:
if is_bidi:
wts_applied_on_inputs = [(num_units, 2 * num_units)] * num_gates
else:
wts_applied_on_inputs = [(num_units, num_units)] * num_gates
wts_applied_on_hidden_states = [(num_units, num_units)] * num_gates
tf_wts = wts_applied_on_inputs + wts_applied_on_hidden_states
return tf_wts if not is_bidi else tf_wts * 2
def _canonical_bias_shape(self, unused_layer):
"""Shapes of Cudnn canonical bias tensors for given layer."""
num_dirs = 1 if self._direction == CUDNN_RNN_UNIDIRECTION else 2
return [[self._num_units]] * num_dirs * self._num_params_per_layer
def _canonical_to_opaque(self, cu_weights, cu_biases):
if not self._input_size:
raise RuntimeError(
"%s._canonical_to_opaque invoked before input shape is known" %
type(self).__name__)
return cudnn_rnn_ops.cudnn_rnn_canonical_to_opaque_params(
rnn_mode=self._rnn_mode,
num_layers=self._num_layers,
num_units=self._num_units,
input_size=self._input_size,
weights=cu_weights,
biases=cu_biases,
input_mode=self._input_mode,
direction=self._direction)
def _forward(self, inputs, h, c, opaque_params, training):
output, output_h, output_c = cudnn_rnn_ops._cudnn_rnn( # pylint:disable=protected-access
inputs,
h,
c,
opaque_params,
training,
self._rnn_mode,
input_mode=self._input_mode,
direction=self._direction,
dropout=self._dropout,
seed=self._seed)
return output, (output_h, output_c)
def _create_saveable(self):
"""Create custom saveable for the Cudnn layer.
Called during layer building process to make sharing checkpoints between
Cudnn and Cudnn-compatible RNNs easy.
Returns:
a `CudnnOpaqueParamsSaveable` object.
Raises:
RuntimeError: if any custom saveable is already created for this layer.
"""
if self._saveable is not None:
raise RuntimeError("Cudnn saveable already created.")
self._saveable = self._saveable_cls( # pylint:disable=not-callable
self.trainable_variables[0],
self.num_layers,
self.num_units,
self.input_size,
self.input_mode,
self.direction,
scope=vs.get_variable_scope(),
name="%s_saveable" % self.trainable_variables[0].op.name)
ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, self._saveable)
class CudnnLSTM(_CudnnRNN):
"""Cudnn implementation of LSTM layer."""
_rnn_mode = CUDNN_LSTM
_num_params_per_layer = CUDNN_LSTM_PARAMS_PER_LAYER
_saveable_cls = cudnn_rnn_ops.CudnnLSTMSaveable
def state_shape(self, batch_size):
"""Shape of Cudnn LSTM states.
Shape is a 2-element tuple. Each is
[num_layers * num_dirs, batch_size, num_units]
Args:
batch_size: an int
Returns:
a tuple of python arrays.
"""
return ([self.num_layers * self.num_dirs, batch_size, self.num_units],
[self.num_layers * self.num_dirs, batch_size, self.num_units])
class _CudnnRNNNoInputC(_CudnnRNN):
"""Abstract simple CudnnRNN layer without input_c."""
def state_shape(self, batch_size):
"""Shape of the state of Cudnn RNN cells w/o. input_c.
Shape is a 1-element tuple,
[num_layers * num_dirs, batch_size, num_units]
Args:
batch_size: an int
Returns:
a tuple of python arrays.
"""
return [self.num_layers * self.num_dirs, batch_size, self.num_units],
class CudnnGRU(_CudnnRNNNoInputC):
"""Cudnn implementation of the GRU layer."""
_rnn_mode = CUDNN_GRU
_num_params_per_layer = CUDNN_GRU_PARAMS_PER_LAYER
_saveable_cls = cudnn_rnn_ops.CudnnGRUSaveable
class CudnnRNNTanh(_CudnnRNNNoInputC):
"""Cudnn implementation of the RNN-tanh layer."""
_rnn_mode = CUDNN_RNN_TANH
_num_params_per_layer = CUDNN_RNN_TANH_PARAMS_PER_LAYER
_saveable_cls = cudnn_rnn_ops.CudnnRNNTanhSaveable
class CudnnRNNRelu(_CudnnRNNNoInputC):
"""Cudnn implementation of the RNN-relu layer."""
_rnn_mode = CUDNN_RNN_RELU
_num_params_per_layer = CUDNN_RNN_RELU_PARAMS_PER_LAYER
_saveable_cls = cudnn_rnn_ops.CudnnRNNReluSaveable
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for baseline.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import shutil
import tempfile
import numpy as np
import six
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.client import session as tf_session
from tensorflow.python.estimator.canned import baseline
from tensorflow.python.estimator.canned import metric_keys
from tensorflow.python.estimator.export import export
from tensorflow.python.estimator.inputs import numpy_io
from tensorflow.python.estimator.inputs import pandas_io
from tensorflow.python.feature_column import feature_column as feature_column_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.summary.writer import writer_cache
from tensorflow.python.training import checkpoint_utils
from tensorflow.python.training import distribute as distribute_lib
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import optimizer
from tensorflow.python.training import queue_runner
from tensorflow.python.training import saver
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
# pylint rules which are disabled by default for test files.
# pylint: disable=invalid-name,protected-access,missing-docstring
# Names of variables created by model.
BIAS_NAME = 'baseline/bias'
def assert_close(expected, actual, rtol=1e-04, name='assert_close'):
with ops.name_scope(name, 'assert_close', (expected, actual, rtol)) as scope:
expected = ops.convert_to_tensor(expected, name='expected')
actual = ops.convert_to_tensor(actual, name='actual')
rdiff = math_ops.abs(expected - actual, 'diff') / math_ops.abs(expected)
rtol = ops.convert_to_tensor(rtol, name='rtol')
return check_ops.assert_less(
rdiff,
rtol,
data=('Condition expected =~ actual did not hold element-wise:'
'expected = ', expected, 'actual = ', actual, 'rdiff = ', rdiff,
'rtol = ', rtol,),
name=scope)
def save_variables_to_ckpt(model_dir):
init_all_op = [variables.global_variables_initializer()]
with tf_session.Session() as sess:
sess.run(init_all_op)
saver.Saver().save(sess, os.path.join(model_dir, 'model.ckpt'))
def queue_parsed_features(feature_map):
tensors_to_enqueue = []
keys = []
for key, tensor in six.iteritems(feature_map):
keys.append(key)
tensors_to_enqueue.append(tensor)
queue_dtypes = [x.dtype for x in tensors_to_enqueue]
input_queue = data_flow_ops.FIFOQueue(capacity=100, dtypes=queue_dtypes)
queue_runner.add_queue_runner(
queue_runner.QueueRunner(input_queue,
[input_queue.enqueue(tensors_to_enqueue)]))
dequeued_tensors = input_queue.dequeue()
return {keys[i]: dequeued_tensors[i] for i in range(len(dequeued_tensors))}
def sorted_key_dict(unsorted_dict):
return {k: unsorted_dict[k] for k in sorted(unsorted_dict)}
def sigmoid(x):
return 1 / (1 + np.exp(-1.0 * x))
def _baseline_regressor_fn(*args, **kwargs):
return baseline.BaselineRegressor(*args, **kwargs)
def _baseline_classifier_fn(*args, **kwargs):
return baseline.BaselineClassifier(*args, **kwargs)
# Tests for Baseline Regressor.
# TODO(b/36813849): Add tests with dynamic shape inputs using placeholders.
class BaselineRegressorEvaluationTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def test_evaluation_for_simple_data(self):
with ops.Graph().as_default():
variables.Variable([13.0], name=BIAS_NAME)
variables.Variable(
100, name=ops.GraphKeys.GLOBAL_STEP, dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
baseline_regressor = _baseline_regressor_fn(model_dir=self._model_dir)
eval_metrics = baseline_regressor.evaluate(
input_fn=lambda: ({'age': ((1,),)}, ((10.,),)), steps=1)
# Logit is bias = 13, while label is 10. Loss is 3**2 = 9.
self.assertDictEqual({
metric_keys.MetricKeys.LOSS: 9.,
metric_keys.MetricKeys.LOSS_MEAN: 9.,
metric_keys.MetricKeys.PREDICTION_MEAN: 13.,
metric_keys.MetricKeys.LABEL_MEAN: 10.,
ops.GraphKeys.GLOBAL_STEP: 100
}, eval_metrics)
def test_evaluation_batch(self):
"""Tests evaluation for batch_size==2."""
with ops.Graph().as_default():
variables.Variable([13.0], name=BIAS_NAME)
variables.Variable(
100, name=ops.GraphKeys.GLOBAL_STEP, dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
baseline_regressor = _baseline_regressor_fn(model_dir=self._model_dir)
eval_metrics = baseline_regressor.evaluate(
input_fn=lambda: ({'age': ((1,), (1,))}, ((10.,), (10.,))), steps=1)
# Logit is bias = 13, while label is 10.
# Loss per example is 3**2 = 9.
# Training loss is the sum over batch = 9 + 9 = 18
# Average loss is the average over batch = 9
self.assertDictEqual({
metric_keys.MetricKeys.LOSS: 18.,
metric_keys.MetricKeys.LOSS_MEAN: 9.,
metric_keys.MetricKeys.PREDICTION_MEAN: 13.,
metric_keys.MetricKeys.LABEL_MEAN: 10.,
ops.GraphKeys.GLOBAL_STEP: 100
}, eval_metrics)
def test_evaluation_weights(self):
"""Tests evaluation with weights."""
with ops.Graph().as_default():
variables.Variable([13.0], name=BIAS_NAME)
variables.Variable(
100, name=ops.GraphKeys.GLOBAL_STEP, dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
def _input_fn():
features = {'age': ((1,), (1,)), 'weights': ((1.,), (2.,))}
labels = ((10.,), (10.,))
return features, labels
baseline_regressor = _baseline_regressor_fn(
weight_column='weights',
model_dir=self._model_dir)
eval_metrics = baseline_regressor.evaluate(input_fn=_input_fn, steps=1)
# Logit is bias = 13, while label is 10.
# Loss per example is 3**2 = 9.
# Training loss is the weighted sum over batch = 9 + 2*9 = 27
# average loss is the weighted average = 9 + 2*9 / (1 + 2) = 9
self.assertDictEqual({
metric_keys.MetricKeys.LOSS: 27.,
metric_keys.MetricKeys.LOSS_MEAN: 9.,
metric_keys.MetricKeys.PREDICTION_MEAN: 13.,
metric_keys.MetricKeys.LABEL_MEAN: 10.,
ops.GraphKeys.GLOBAL_STEP: 100
}, eval_metrics)
def test_evaluation_for_multi_dimensions(self):
label_dim = 2
with ops.Graph().as_default():
variables.Variable([46.0, 58.0], name=BIAS_NAME)
variables.Variable(100, name='global_step', dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
baseline_regressor = _baseline_regressor_fn(
label_dimension=label_dim,
model_dir=self._model_dir)
input_fn = numpy_io.numpy_input_fn(
x={
'age': np.array([[2., 4., 5.]]),
},
y=np.array([[46., 58.]]),
batch_size=1,
num_epochs=None,
shuffle=False)
eval_metrics = baseline_regressor.evaluate(input_fn=input_fn, steps=1)
self.assertItemsEqual(
(metric_keys.MetricKeys.LOSS, metric_keys.MetricKeys.LOSS_MEAN,
metric_keys.MetricKeys.PREDICTION_MEAN,
metric_keys.MetricKeys.LABEL_MEAN, ops.GraphKeys.GLOBAL_STEP),
eval_metrics.keys())
# Logit is bias which is [46, 58]
self.assertAlmostEqual(0, eval_metrics[metric_keys.MetricKeys.LOSS])
class BaselineRegressorPredictTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def test_1d(self):
"""Tests predict when all variables are one-dimensional."""
with ops.Graph().as_default():
variables.Variable([.2], name=BIAS_NAME)
variables.Variable(100, name='global_step', dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
baseline_regressor = _baseline_regressor_fn(model_dir=self._model_dir)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': np.array([[2.]])},
y=None,
batch_size=1,
num_epochs=1,
shuffle=False)
predictions = baseline_regressor.predict(input_fn=predict_input_fn)
predicted_scores = list([x['predictions'] for x in predictions])
# x * weight + bias = 2. * 10. + .2 = 20.2
self.assertAllClose([[.2]], predicted_scores)
def testMultiDim(self):
"""Tests predict when all variables are multi-dimenstional."""
batch_size = 2
label_dimension = 3
with ops.Graph().as_default():
variables.Variable( # shape=[label_dimension]
[.2, .4, .6], name=BIAS_NAME)
variables.Variable(100, name='global_step', dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
baseline_regressor = _baseline_regressor_fn(
label_dimension=label_dimension,
model_dir=self._model_dir)
predict_input_fn = numpy_io.numpy_input_fn(
# x shape=[batch_size, x_dim]
x={'x': np.array([[1., 2., 3., 4.], [5., 6., 7., 8.]])},
y=None,
batch_size=batch_size,
num_epochs=1,
shuffle=False)
predictions = baseline_regressor.predict(input_fn=predict_input_fn)
predicted_scores = list([x['predictions'] for x in predictions])
# score = bias, shape=[batch_size, label_dimension]
self.assertAllClose([[0.2, 0.4, 0.6], [0.2, 0.4, 0.6]],
predicted_scores)
class BaselineRegressorIntegrationTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _test_complete_flow(self, train_input_fn, eval_input_fn, predict_input_fn,
input_dimension, label_dimension, prediction_length):
feature_columns = [
feature_column_lib.numeric_column('x', shape=(input_dimension,))
]
est = _baseline_regressor_fn(
label_dimension=label_dimension,
model_dir=self._model_dir)
# TRAIN
# learn y = x
est.train(train_input_fn, steps=200)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(200, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn(metric_keys.MetricKeys.LOSS, six.iterkeys(scores))
# PREDICT
predictions = np.array(
[x['predictions'] for x in est.predict(predict_input_fn)])
self.assertAllEqual((prediction_length, label_dimension), predictions.shape)
# EXPORT
feature_spec = feature_column_lib.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_savedmodel(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(gfile.Exists(export_dir))
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
label_dimension = 2
input_dimension = label_dimension
batch_size = 10
prediction_length = batch_size
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=1,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=None,
batch_size=batch_size,
num_epochs=1,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
label_dimension=label_dimension,
prediction_length=prediction_length)
def test_pandas_input_fn(self):
"""Tests complete flow with pandas_input_fn."""
if not HAS_PANDAS:
return
# Pandas DataFrame natually supports 1 dim data only.
label_dimension = 1
input_dimension = label_dimension
batch_size = 10
data = np.array([1., 2., 3., 4.], dtype=np.float32)
x = pd.DataFrame({'x': data})
y = pd.Series(data)
prediction_length = 4
train_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, num_epochs=None, shuffle=True)
eval_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, shuffle=False)
predict_input_fn = pandas_io.pandas_input_fn(
x=x, batch_size=batch_size, shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
label_dimension=label_dimension,
prediction_length=prediction_length)
def test_input_fn_from_parse_example(self):
"""Tests complete flow with input_fn constructed from parse_example."""
label_dimension = 2
input_dimension = label_dimension
batch_size = 10
prediction_length = batch_size
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
serialized_examples = []
for datum in data:
example = example_pb2.Example(features=feature_pb2.Features(
feature={
'x':
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=datum)),
'y':
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=datum[:label_dimension])),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': parsing_ops.FixedLenFeature([input_dimension], dtypes.float32),
'y': parsing_ops.FixedLenFeature([label_dimension], dtypes.float32),
}
def _train_input_fn():
feature_map = parsing_ops.parse_example(serialized_examples, feature_spec)
features = queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
input_dimension=input_dimension,
label_dimension=label_dimension,
prediction_length=prediction_length)
class BaselineRegressorTrainingTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _mock_optimizer(self, expected_loss=None):
expected_var_names = [
'%s:0' % BIAS_NAME
]
def _minimize(loss, global_step=None, var_list=None):
trainable_vars = var_list or ops.get_collection(
ops.GraphKeys.TRAINABLE_VARIABLES)
self.assertItemsEqual(expected_var_names,
[var.name for var in trainable_vars])
# Verify loss. We can't check the value directly, so we add an assert op.
self.assertEquals(0, loss.shape.ndims)
if expected_loss is None:
if global_step is not None:
return distribute_lib.increment_var(global_step)
return control_flow_ops.no_op()
assert_loss = assert_close(
math_ops.to_float(expected_loss, name='expected'),
loss,
name='assert_loss')
with ops.control_dependencies((assert_loss,)):
if global_step is not None:
return distribute_lib.increment_var(global_step)
return control_flow_ops.no_op()
mock_optimizer = test.mock.NonCallableMock(
spec=optimizer.Optimizer,
wraps=optimizer.Optimizer(use_locking=False, name='my_optimizer'))
mock_optimizer.minimize = test.mock.MagicMock(wraps=_minimize)
# NOTE: Estimator.params performs a deepcopy, which wreaks havoc with mocks.
# So, return mock_optimizer itself for deepcopy.
mock_optimizer.__deepcopy__ = lambda _: mock_optimizer
return mock_optimizer
def _assert_checkpoint(self,
label_dimension,
expected_global_step,
expected_bias=None):
shapes = {
name: shape
for (name, shape) in checkpoint_utils.list_variables(self._model_dir)
}
self.assertEqual([], shapes[ops.GraphKeys.GLOBAL_STEP])
self.assertEqual(expected_global_step,
checkpoint_utils.load_variable(self._model_dir,
ops.GraphKeys.GLOBAL_STEP))
self.assertEqual([label_dimension], shapes[BIAS_NAME])
if expected_bias is not None:
self.assertEqual(expected_bias,
checkpoint_utils.load_variable(self._model_dir,
BIAS_NAME))
def testFromScratchWithDefaultOptimizer(self):
# Create BaselineRegressor.
label = 5.
age = 17
baseline_regressor = _baseline_regressor_fn(model_dir=self._model_dir)
# Train for a few steps, and validate final checkpoint.
num_steps = 10
baseline_regressor.train(
input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=num_steps)
self._assert_checkpoint(label_dimension=1, expected_global_step=num_steps)
def testTrainWithOneDimLabel(self):
label_dimension = 1
batch_size = 20
est = _baseline_regressor_fn(
label_dimension=label_dimension,
model_dir=self._model_dir)
data_rank_1 = np.linspace(0., 2., batch_size, dtype=np.float32)
self.assertEqual((batch_size,), data_rank_1.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={'age': data_rank_1},
y=data_rank_1,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(label_dimension=1, expected_global_step=200)
def testTrainWithOneDimWeight(self):
label_dimension = 1
batch_size = 20
est = _baseline_regressor_fn(
label_dimension=label_dimension,
weight_column='w',
model_dir=self._model_dir)
data_rank_1 = np.linspace(0., 2., batch_size, dtype=np.float32)
self.assertEqual((batch_size,), data_rank_1.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={'age': data_rank_1,
'w': data_rank_1},
y=data_rank_1,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(label_dimension=1, expected_global_step=200)
def testFromScratch(self):
# Create BaselineRegressor.
label = 5.
age = 17
# loss = (logits - label)^2 = (0 - 5.)^2 = 25.
mock_optimizer = self._mock_optimizer(expected_loss=25.)
baseline_regressor = _baseline_regressor_fn(
model_dir=self._model_dir,
optimizer=mock_optimizer)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
baseline_regressor.train(
input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
label_dimension=1,
expected_global_step=num_steps,
expected_bias=[0.])
def testFromCheckpoint(self):
# Create initial checkpoint.
bias = 7.0
initial_global_step = 100
with ops.Graph().as_default():
variables.Variable([bias], name=BIAS_NAME)
variables.Variable(
initial_global_step,
name=ops.GraphKeys.GLOBAL_STEP,
dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
# logits = bias = 6.
# loss = (logits - label)^2 = (7 - 5)^2 = 4
mock_optimizer = self._mock_optimizer(expected_loss=4.)
baseline_regressor = _baseline_regressor_fn(
model_dir=self._model_dir,
optimizer=mock_optimizer)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
baseline_regressor.train(
input_fn=lambda: ({'age': ((17,),)}, ((5.,),)), steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
label_dimension=1,
expected_global_step=initial_global_step + num_steps,
expected_bias=[bias])
def testFromCheckpointMultiBatch(self):
# Create initial checkpoint.
bias = 5.0
initial_global_step = 100
with ops.Graph().as_default():
variables.Variable([bias], name=BIAS_NAME)
variables.Variable(
initial_global_step,
name=ops.GraphKeys.GLOBAL_STEP,
dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
# logits = bias
# logits[0] = 5.
# logits[1] = 5.
# loss = sum(logits - label)^2 = (5 - 5)^2 + (5 - 3)^2 = 4
mock_optimizer = self._mock_optimizer(expected_loss=4.)
baseline_regressor = _baseline_regressor_fn(
model_dir=self._model_dir,
optimizer=mock_optimizer)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
baseline_regressor.train(
input_fn=lambda: ({'age': ((17,), (15,))}, ((5.,), (3.,))),
steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
label_dimension=1,
expected_global_step=initial_global_step + num_steps,
expected_bias=bias)
# Tests for Baseline Classifier.
class BaselineClassifierTrainingTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
shutil.rmtree(self._model_dir)
def _mock_optimizer(self, expected_loss=None):
expected_var_names = [
'%s:0' % BIAS_NAME
]
def _minimize(loss, global_step):
trainable_vars = ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
self.assertItemsEqual(
expected_var_names,
[var.name for var in trainable_vars])
# Verify loss. We can't check the value directly, so we add an assert op.
self.assertEquals(0, loss.shape.ndims)
if expected_loss is None:
return distribute_lib.increment_var(global_step)
assert_loss = assert_close(
math_ops.to_float(expected_loss, name='expected'),
loss,
name='assert_loss')
with ops.control_dependencies((assert_loss,)):
return distribute_lib.increment_var(global_step)
mock_optimizer = test.mock.NonCallableMock(
spec=optimizer.Optimizer,
wraps=optimizer.Optimizer(use_locking=False, name='my_optimizer'))
mock_optimizer.minimize = test.mock.MagicMock(wraps=_minimize)
# NOTE: Estimator.params performs a deepcopy, which wreaks havoc with mocks.
# So, return mock_optimizer itself for deepcopy.
mock_optimizer.__deepcopy__ = lambda _: mock_optimizer
return mock_optimizer
def _assert_checkpoint(
self, n_classes, expected_global_step, expected_bias=None):
logits_dimension = n_classes if n_classes > 2 else 1
shapes = {
name: shape for (name, shape) in
checkpoint_utils.list_variables(self._model_dir)
}
self.assertEqual([], shapes[ops.GraphKeys.GLOBAL_STEP])
self.assertEqual(
expected_global_step,
checkpoint_utils.load_variable(
self._model_dir, ops.GraphKeys.GLOBAL_STEP))
self.assertEqual([logits_dimension], shapes[BIAS_NAME])
if expected_bias is not None:
self.assertAllEqual(expected_bias,
checkpoint_utils.load_variable(
self._model_dir, BIAS_NAME))
def _testFromScratchWithDefaultOptimizer(self, n_classes):
label = 0
age = 17
est = baseline.BaselineClassifier(
n_classes=n_classes,
model_dir=self._model_dir)
# Train for a few steps, and validate final checkpoint.
num_steps = 10
est.train(
input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=num_steps)
self._assert_checkpoint(n_classes, num_steps)
def testBinaryClassesFromScratchWithDefaultOptimizer(self):
self._testFromScratchWithDefaultOptimizer(n_classes=2)
def testMultiClassesFromScratchWithDefaultOptimizer(self):
self._testFromScratchWithDefaultOptimizer(n_classes=4)
def _testTrainWithTwoDimsLabel(self, n_classes):
batch_size = 20
est = baseline.BaselineClassifier(
n_classes=n_classes,
model_dir=self._model_dir)
data_rank_1 = np.array([0, 1])
data_rank_2 = np.array([[0], [1]])
self.assertEqual((2,), data_rank_1.shape)
self.assertEqual((2, 1), data_rank_2.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={'age': data_rank_1},
y=data_rank_2,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(n_classes, 200)
def testBinaryClassesTrainWithTwoDimsLabel(self):
self._testTrainWithTwoDimsLabel(n_classes=2)
def testMultiClassesTrainWithTwoDimsLabel(self):
self._testTrainWithTwoDimsLabel(n_classes=4)
def _testTrainWithOneDimLabel(self, n_classes):
batch_size = 20
est = baseline.BaselineClassifier(
n_classes=n_classes,
model_dir=self._model_dir)
data_rank_1 = np.array([0, 1])
self.assertEqual((2,), data_rank_1.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={'age': data_rank_1},
y=data_rank_1,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(n_classes, 200)
def testBinaryClassesTrainWithOneDimLabel(self):
self._testTrainWithOneDimLabel(n_classes=2)
def testMultiClassesTrainWithOneDimLabel(self):
self._testTrainWithOneDimLabel(n_classes=4)
def _testTrainWithTwoDimsWeight(self, n_classes):
batch_size = 20
est = baseline.BaselineClassifier(
weight_column='w',
n_classes=n_classes,
model_dir=self._model_dir)
data_rank_1 = np.array([0, 1])
data_rank_2 = np.array([[0], [1]])
self.assertEqual((2,), data_rank_1.shape)
self.assertEqual((2, 1), data_rank_2.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={'age': data_rank_1, 'w': data_rank_2}, y=data_rank_1,
batch_size=batch_size, num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(n_classes, 200)
def testBinaryClassesTrainWithTwoDimsWeight(self):
self._testTrainWithTwoDimsWeight(n_classes=2)
def testMultiClassesTrainWithTwoDimsWeight(self):
self._testTrainWithTwoDimsWeight(n_classes=4)
def _testTrainWithOneDimWeight(self, n_classes):
batch_size = 20
est = baseline.BaselineClassifier(
weight_column='w',
n_classes=n_classes,
model_dir=self._model_dir)
data_rank_1 = np.array([0, 1])
self.assertEqual((2,), data_rank_1.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={'age': data_rank_1, 'w': data_rank_1}, y=data_rank_1,
batch_size=batch_size, num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(n_classes, 200)
def testBinaryClassesTrainWithOneDimWeight(self):
self._testTrainWithOneDimWeight(n_classes=2)
def testMultiClassesTrainWithOneDimWeight(self):
self._testTrainWithOneDimWeight(n_classes=4)
def _testFromScratch(self, n_classes):
label = 1
age = 17
# For binary classifier:
# loss = sigmoid_cross_entropy(logits, label) where logits=0 (weights are
# all zero initially) and label = 1 so,
# loss = 1 * -log ( sigmoid(logits) ) = 0.69315
# For multi class classifier:
# loss = cross_entropy(logits, label) where logits are all 0s (weights are
# all zero initially) and label = 1 so,
# loss = 1 * -log ( 1.0 / n_classes )
# For this particular test case, as logits are same, the formula
# 1 * -log ( 1.0 / n_classes ) covers both binary and multi class cases.
mock_optimizer = self._mock_optimizer(
expected_loss=-1 * math.log(1.0/n_classes))
est = baseline.BaselineClassifier(
n_classes=n_classes,
optimizer=mock_optimizer,
model_dir=self._model_dir)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
est.train(
input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
n_classes,
expected_global_step=num_steps,
expected_bias=[0.] if n_classes == 2 else [.0] * n_classes)
def testBinaryClassesFromScratch(self):
self._testFromScratch(n_classes=2)
def testMultiClassesFromScratch(self):
self._testFromScratch(n_classes=4)
def _testFromCheckpoint(self, n_classes):
# Create initial checkpoint.
label = 1
age = 17
bias = [-1.0] if n_classes == 2 else [-1.0] * n_classes
initial_global_step = 100
with ops.Graph().as_default():
variables.Variable(bias, name=BIAS_NAME)
variables.Variable(
initial_global_step, name=ops.GraphKeys.GLOBAL_STEP,
dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
# For binary classifier:
# logits = bias = -1.
# loss = sigmoid_cross_entropy(logits, label)
# so, loss = 1 * -log ( sigmoid(-1) ) = 1.3133
# For multi class classifier:
# loss = cross_entropy(logits, label)
# where logits = bias and label = 1
# so, loss = 1 * -log ( softmax(logits)[1] )
if n_classes == 2:
expected_loss = 1.3133
else:
logits = bias
logits_exp = np.exp(logits)
softmax = logits_exp / logits_exp.sum()
expected_loss = -1 * math.log(softmax[label])
mock_optimizer = self._mock_optimizer(expected_loss=expected_loss)
est = baseline.BaselineClassifier(
n_classes=n_classes,
optimizer=mock_optimizer,
model_dir=self._model_dir)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
est.train(
input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
n_classes,
expected_global_step=initial_global_step + num_steps,
expected_bias=bias)
def testBinaryClassesFromCheckpoint(self):
self._testFromCheckpoint(n_classes=2)
def testMultiClassesFromCheckpoint(self):
self._testFromCheckpoint(n_classes=4)
def _testFromCheckpointFloatLabels(self, n_classes):
"""Tests float labels for binary classification."""
# Create initial checkpoint.
if n_classes > 2:
return
label = 0.8
age = 17
bias = [-1.0]
initial_global_step = 100
with ops.Graph().as_default():
variables.Variable(bias, name=BIAS_NAME)
variables.Variable(
initial_global_step, name=ops.GraphKeys.GLOBAL_STEP,
dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
# logits = bias = -1.
# loss = sigmoid_cross_entropy(logits, label)
# => loss = -0.8 * log(sigmoid(-1)) -0.2 * log(sigmoid(+1)) = 1.1132617
mock_optimizer = self._mock_optimizer(expected_loss=1.1132617)
est = baseline.BaselineClassifier(
n_classes=n_classes,
optimizer=mock_optimizer,
model_dir=self._model_dir)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
est.train(
input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
def testBinaryClassesFromCheckpointFloatLabels(self):
self._testFromCheckpointFloatLabels(n_classes=2)
def testMultiClassesFromCheckpointFloatLabels(self):
self._testFromCheckpointFloatLabels(n_classes=4)
def _testFromCheckpointMultiBatch(self, n_classes):
# Create initial checkpoint.
label = [1, 0]
age = [17, 18.5]
# For binary case, the expected weight has shape (1,1). For multi class
# case, the shape is (1, n_classes). In order to test the weights, set
# weights as 2.0 * range(n_classes).
bias = [-1.0] if n_classes == 2 else [-1.0] * n_classes
initial_global_step = 100
with ops.Graph().as_default():
variables.Variable(bias, name=BIAS_NAME)
variables.Variable(
initial_global_step, name=ops.GraphKeys.GLOBAL_STEP,
dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
# For binary classifier:
# logits = bias
# logits[0] = -1.
# logits[1] = -1.
# loss = sigmoid_cross_entropy(logits, label)
# so, loss[0] = 1 * -log ( sigmoid(-1) ) = 1.3133
# loss[1] = (1 - 0) * -log ( 1- sigmoid(-1) ) = 0.3132
# For multi class classifier:
# loss = cross_entropy(logits, label)
# where logits = bias and label = [1, 0]
# so, loss = 1 * -log ( softmax(logits)[label] )
if n_classes == 2:
expected_loss = (1.3133 + 0.3132)
else:
# Expand logits since batch_size=2
logits = bias * np.ones(shape=(2, 1))
logits_exp = np.exp(logits)
softmax_row_0 = logits_exp[0] / logits_exp[0].sum()
softmax_row_1 = logits_exp[1] / logits_exp[1].sum()
expected_loss_0 = -1 * math.log(softmax_row_0[label[0]])
expected_loss_1 = -1 * math.log(softmax_row_1[label[1]])
expected_loss = expected_loss_0 + expected_loss_1
mock_optimizer = self._mock_optimizer(expected_loss=expected_loss)
est = baseline.BaselineClassifier(
n_classes=n_classes,
optimizer=mock_optimizer,
model_dir=self._model_dir)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
est.train(
input_fn=lambda: ({'age': (age)}, (label)),
steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
n_classes,
expected_global_step=initial_global_step + num_steps,
expected_bias=bias)
def testBinaryClassesFromCheckpointMultiBatch(self):
self._testFromCheckpointMultiBatch(n_classes=2)
def testMultiClassesFromCheckpointMultiBatch(self):
self._testFromCheckpointMultiBatch(n_classes=4)
class BaselineClassifierEvaluationTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
shutil.rmtree(self._model_dir)
def _test_evaluation_for_simple_data(self, n_classes):
label = 1
age = 1.
bias = [-1.0] if n_classes == 2 else [-1.0] * n_classes
with ops.Graph().as_default():
variables.Variable(bias, name=BIAS_NAME)
variables.Variable(
100, name=ops.GraphKeys.GLOBAL_STEP, dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
est = _baseline_classifier_fn(
n_classes=n_classes,
model_dir=self._model_dir)
eval_metrics = est.evaluate(
input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=1)
if n_classes == 2:
# Binary classes: loss = -log(sigmoid(-1)) = 1.3133
# Prediction = sigmoid(-1) = 0.2689
expected_metrics = {
metric_keys.MetricKeys.LOSS: 1.3133,
ops.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.LOSS_MEAN: 1.3133,
metric_keys.MetricKeys.ACCURACY: 0.,
metric_keys.MetricKeys.PRECISION: 0.,
metric_keys.MetricKeys.RECALL: 0.,
metric_keys.MetricKeys.PREDICTION_MEAN: 0.2689,
metric_keys.MetricKeys.LABEL_MEAN: 1.,
metric_keys.MetricKeys.ACCURACY_BASELINE: 1,
metric_keys.MetricKeys.AUC: 0.,
metric_keys.MetricKeys.AUC_PR: 1.,
}
else:
# Multi classes: loss = 1 * -log ( softmax(logits)[label] )
logits = bias
logits_exp = np.exp(logits)
softmax = logits_exp / logits_exp.sum()
expected_loss = -1 * math.log(softmax[label])
expected_metrics = {
metric_keys.MetricKeys.LOSS: expected_loss,
ops.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.LOSS_MEAN: expected_loss,
metric_keys.MetricKeys.ACCURACY: 0.,
}
self.assertAllClose(sorted_key_dict(expected_metrics),
sorted_key_dict(eval_metrics), rtol=1e-3)
def test_binary_classes_evaluation_for_simple_data(self):
self._test_evaluation_for_simple_data(n_classes=2)
def test_multi_classes_evaluation_for_simple_data(self):
self._test_evaluation_for_simple_data(n_classes=4)
def _test_evaluation_batch(self, n_classes):
"""Tests evaluation for batch_size==2."""
label = [1, 0]
age = [17., 18.]
bias = [-1.0] if n_classes == 2 else [-1.0] * n_classes
initial_global_step = 100
with ops.Graph().as_default():
variables.Variable(bias, name=BIAS_NAME)
variables.Variable(
initial_global_step, name=ops.GraphKeys.GLOBAL_STEP,
dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
est = _baseline_classifier_fn(
n_classes=n_classes,
model_dir=self._model_dir)
eval_metrics = est.evaluate(
input_fn=lambda: ({'age': (age)}, (label)), steps=1)
if n_classes == 2:
# Logits are (-1., -1.) labels are (1, 0).
# Loss is
# loss for row 1: 1 * -log(sigmoid(-1)) = 1.3133
# loss for row 2: (1 - 0) * -log(1 - sigmoid(-1)) = 0.3132
# Prediction = sigmoid(-1) = 0.2689
expected_loss = 1.3133 + 0.3132
expected_metrics = {
metric_keys.MetricKeys.LOSS: expected_loss,
ops.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.LOSS_MEAN: expected_loss / 2,
metric_keys.MetricKeys.ACCURACY: 0.5,
metric_keys.MetricKeys.PRECISION: 0.,
metric_keys.MetricKeys.RECALL: 0.,
metric_keys.MetricKeys.PREDICTION_MEAN: 0.2689,
metric_keys.MetricKeys.LABEL_MEAN: 0.5,
metric_keys.MetricKeys.ACCURACY_BASELINE: 0.5,
metric_keys.MetricKeys.AUC: 0.5,
metric_keys.MetricKeys.AUC_PR: 0.75,
}
else:
# Expand logits since batch_size=2
logits = bias * np.ones(shape=(2, 1))
logits_exp = np.exp(logits)
softmax_row_0 = logits_exp[0] / logits_exp[0].sum()
softmax_row_1 = logits_exp[1] / logits_exp[1].sum()
expected_loss_0 = -1 * math.log(softmax_row_0[label[0]])
expected_loss_1 = -1 * math.log(softmax_row_1[label[1]])
expected_loss = expected_loss_0 + expected_loss_1
expected_metrics = {
metric_keys.MetricKeys.LOSS: expected_loss,
ops.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.LOSS_MEAN: expected_loss / 2,
metric_keys.MetricKeys.ACCURACY: 0.5,
}
self.assertAllClose(sorted_key_dict(expected_metrics),
sorted_key_dict(eval_metrics), rtol=1e-3)
def test_binary_classes_evaluation_batch(self):
self._test_evaluation_batch(n_classes=2)
def test_multi_classes_evaluation_batch(self):
self._test_evaluation_batch(n_classes=4)
def _test_evaluation_weights(self, n_classes):
"""Tests evaluation with weights."""
label = [1, 0]
age = [17., 18.]
weights = [1., 2.]
# For binary case, the expected weight has shape (1,1). For multi class
# case, the shape is (1, n_classes). In order to test the weights, set
# weights as 2.0 * range(n_classes).
bias = [-1.0] if n_classes == 2 else [-1.0] * n_classes
initial_global_step = 100
with ops.Graph().as_default():
variables.Variable(bias, name=BIAS_NAME)
variables.Variable(
initial_global_step, name=ops.GraphKeys.GLOBAL_STEP,
dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
est = _baseline_classifier_fn(
n_classes=n_classes,
weight_column='w',
model_dir=self._model_dir)
eval_metrics = est.evaluate(
input_fn=lambda: ({'age': (age), 'w': (weights)}, (label)), steps=1)
if n_classes == 2:
# Logits are (-1., -1.) labels are (1, 0).
# Loss is
# loss for row 1: 1 * -log(sigmoid(-1)) = 1.3133
# loss for row 2: (1 - 0) * -log(1 - sigmoid(-1)) = 0.3132
# weights = [1., 2.]
expected_loss = 1.3133 * 1. + 0.3132 * 2.
loss_mean = expected_loss / (1.0 + 2.0)
label_mean = np.average(label, weights=weights)
logits = [-1, -1]
logistics = sigmoid(np.array(logits))
predictions_mean = np.average(logistics, weights=weights)
expected_metrics = {
metric_keys.MetricKeys.LOSS: expected_loss,
ops.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.LOSS_MEAN: loss_mean,
metric_keys.MetricKeys.ACCURACY: 2. / (1. + 2.),
metric_keys.MetricKeys.PRECISION: 0.,
metric_keys.MetricKeys.RECALL: 0.,
metric_keys.MetricKeys.PREDICTION_MEAN: predictions_mean,
metric_keys.MetricKeys.LABEL_MEAN: label_mean,
metric_keys.MetricKeys.ACCURACY_BASELINE: (
max(label_mean, 1-label_mean)),
metric_keys.MetricKeys.AUC: 0.5,
metric_keys.MetricKeys.AUC_PR: 2. / (1. + 2.),
}
else:
# Multi classes: unweighted_loss = 1 * -log ( soft_max(logits)[label] )
# Expand logits since batch_size=2
logits = bias * np.ones(shape=(2, 1))
logits_exp = np.exp(logits)
softmax_row_0 = logits_exp[0] / logits_exp[0].sum()
softmax_row_1 = logits_exp[1] / logits_exp[1].sum()
expected_loss_0 = -1 * math.log(softmax_row_0[label[0]])
expected_loss_1 = -1 * math.log(softmax_row_1[label[1]])
loss_mean = np.average([expected_loss_0, expected_loss_1],
weights=weights)
expected_loss = loss_mean * np.sum(weights)
expected_metrics = {
metric_keys.MetricKeys.LOSS: expected_loss,
ops.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.LOSS_MEAN: loss_mean,
metric_keys.MetricKeys.ACCURACY: 2. / (1. + 2.),
}
self.assertAllClose(sorted_key_dict(expected_metrics),
sorted_key_dict(eval_metrics), rtol=1e-3)
def test_binary_classes_evaluation_weights(self):
self._test_evaluation_weights(n_classes=2)
def test_multi_classes_evaluation_weights(self):
self._test_evaluation_weights(n_classes=4)
class BaselineClassifierPredictTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
shutil.rmtree(self._model_dir)
def _testPredictions(self, n_classes, label_vocabulary, label_output_fn):
"""Tests predict when all variables are one-dimensional."""
age = 1.
bias = [10.0] if n_classes == 2 else [10.0] * n_classes
with ops.Graph().as_default():
variables.Variable(bias, name=BIAS_NAME)
variables.Variable(100, name='global_step', dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
est = _baseline_classifier_fn(
label_vocabulary=label_vocabulary,
n_classes=n_classes,
model_dir=self._model_dir)
predict_input_fn = numpy_io.numpy_input_fn(
x={'age': np.array([[age]])},
y=None,
batch_size=1,
num_epochs=1,
shuffle=False)
predictions = list(est.predict(input_fn=predict_input_fn))
if n_classes == 2:
scalar_logits = bias[0]
two_classes_logits = [0, scalar_logits]
two_classes_logits_exp = np.exp(two_classes_logits)
softmax = two_classes_logits_exp / two_classes_logits_exp.sum()
expected_predictions = {
'class_ids': [1],
'classes': [label_output_fn(1)],
'logistic': [sigmoid(np.array(scalar_logits))],
'logits': [scalar_logits],
'probabilities': softmax,
}
else:
onedim_logits = np.array(bias)
class_ids = onedim_logits.argmax()
logits_exp = np.exp(onedim_logits)
softmax = logits_exp / logits_exp.sum()
expected_predictions = {
'class_ids': [class_ids],
'classes': [label_output_fn(class_ids)],
'logits': onedim_logits,
'probabilities': softmax,
}
self.assertEqual(1, len(predictions))
# assertAllClose cannot handle byte type.
self.assertEqual(expected_predictions['classes'], predictions[0]['classes'])
expected_predictions.pop('classes')
predictions[0].pop('classes')
self.assertAllClose(sorted_key_dict(expected_predictions),
sorted_key_dict(predictions[0]))
def testBinaryClassesWithoutLabelVocabulary(self):
n_classes = 2
self._testPredictions(n_classes,
label_vocabulary=None,
label_output_fn=lambda x: ('%s' % x).encode())
def testBinaryClassesWithLabelVocabulary(self):
n_classes = 2
self._testPredictions(
n_classes,
label_vocabulary=['class_vocab_{}'.format(i)
for i in range(n_classes)],
label_output_fn=lambda x: ('class_vocab_%s' % x).encode())
def testMultiClassesWithoutLabelVocabulary(self):
n_classes = 4
self._testPredictions(
n_classes,
label_vocabulary=None,
label_output_fn=lambda x: ('%s' % x).encode())
def testMultiClassesWithLabelVocabulary(self):
n_classes = 4
self._testPredictions(
n_classes,
label_vocabulary=['class_vocab_{}'.format(i)
for i in range(n_classes)],
label_output_fn=lambda x: ('class_vocab_%s' % x).encode())
class BaselineClassifierIntegrationTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
shutil.rmtree(self._model_dir)
def _test_complete_flow(self, n_classes, train_input_fn, eval_input_fn,
predict_input_fn, input_dimension, prediction_length):
feature_columns = [
feature_column_lib.numeric_column('x', shape=(input_dimension,))
]
est = _baseline_classifier_fn(
n_classes=n_classes,
model_dir=self._model_dir)
# TRAIN
# learn y = x
est.train(train_input_fn, steps=200)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(200, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn(metric_keys.MetricKeys.LOSS, six.iterkeys(scores))
# PREDICT
predictions = np.array(
[x['classes'] for x in est.predict(predict_input_fn)])
self.assertAllEqual((prediction_length, 1), predictions.shape)
# EXPORT
feature_spec = feature_column_lib.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_savedmodel(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(gfile.Exists(export_dir))
def _test_numpy_input_fn(self, n_classes):
"""Tests complete flow with numpy_input_fn."""
input_dimension = 4
batch_size = 10
prediction_length = batch_size
data = np.linspace(0., 2., batch_size * input_dimension, dtype=np.float32)
data = data.reshape(batch_size, input_dimension)
target = np.array([1] * batch_size)
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=target,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=target,
batch_size=batch_size,
num_epochs=1,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=None,
batch_size=batch_size,
num_epochs=1,
shuffle=False)
self._test_complete_flow(
n_classes=n_classes,
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
prediction_length=prediction_length)
def test_binary_classes_numpy_input_fn(self):
self._test_numpy_input_fn(n_classes=2)
def test_multi_classes_numpy_input_fn(self):
self._test_numpy_input_fn(n_classes=4)
def _test_pandas_input_fn(self, n_classes):
"""Tests complete flow with pandas_input_fn."""
if not HAS_PANDAS:
return
# Pandas DataFrame natually supports 1 dim data only.
input_dimension = 1
batch_size = 10
data = np.array([1., 2., 3., 4.], dtype=np.float32)
target = np.array([1, 0, 1, 0], dtype=np.int32)
x = pd.DataFrame({'x': data})
y = pd.Series(target)
prediction_length = 4
train_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, num_epochs=None, shuffle=True)
eval_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, shuffle=False)
predict_input_fn = pandas_io.pandas_input_fn(
x=x, batch_size=batch_size, shuffle=False)
self._test_complete_flow(
n_classes=n_classes,
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
prediction_length=prediction_length)
def test_binary_classes_pandas_input_fn(self):
self._test_pandas_input_fn(n_classes=2)
def test_multi_classes_pandas_input_fn(self):
self._test_pandas_input_fn(n_classes=4)
def _test_input_fn_from_parse_example(self, n_classes):
"""Tests complete flow with input_fn constructed from parse_example."""
input_dimension = 2
batch_size = 10
prediction_length = batch_size
data = np.linspace(0., 2., batch_size * input_dimension, dtype=np.float32)
data = data.reshape(batch_size, input_dimension)
target = np.array([1] * batch_size, dtype=np.int64)
serialized_examples = []
for x, y in zip(data, target):
example = example_pb2.Example(features=feature_pb2.Features(
feature={
'x':
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=x)),
'y':
feature_pb2.Feature(int64_list=feature_pb2.Int64List(
value=[y])),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': parsing_ops.FixedLenFeature([input_dimension], dtypes.float32),
'y': parsing_ops.FixedLenFeature([1], dtypes.int64),
}
def _train_input_fn():
feature_map = parsing_ops.parse_example(serialized_examples, feature_spec)
features = queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
n_classes=n_classes,
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
input_dimension=input_dimension,
prediction_length=prediction_length)
def test_binary_classes_input_fn_from_parse_example(self):
self._test_input_fn_from_parse_example(n_classes=2)
def test_multi_classes_input_fn_from_parse_example(self):
self._test_input_fn_from_parse_example(n_classes=4)
# Tests for Baseline logit_fn.
class BaselineLogitFnTest(test.TestCase):
def test_basic_logit_correctness(self):
"""baseline_logit_fn simply returns the bias variable."""
with ops.Graph().as_default():
logit_fn = baseline._baseline_logit_fn_builder(num_outputs=2)
logits = logit_fn(features={'age': [[23.], [31.]]})
with variable_scope.variable_scope('baseline', reuse=True):
bias_var = variable_scope.get_variable('bias')
with tf_session.Session() as sess:
sess.run([variables.global_variables_initializer()])
self.assertAllClose([[0., 0.], [0., 0.]], logits.eval())
sess.run(bias_var.assign([10., 5.]))
self.assertAllClose([[10., 5.], [10., 5.]], logits.eval())
if __name__ == '__main__':
test.main()
|
|
# Copyright 2015 AirPlug Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# views.py
from flask import abort, Blueprint, flash, redirect, render_template, request, url_for, g, jsonify
import urllib
from sqlalchemy.exc import IntegrityError
from server.chartviewer.models import *
from server.mapviewer.models import *
# from server.mapviewer.models import _query_by_str
from server.mapviewer.forms import *
from server.util import *
from server.users.views import auth
import simplejson as json
import server
mapviewer = Blueprint("mapviewer", __name__)
@mapviewer.route("/list", methods=['GET', 'POST', 'OPTIONS'])
@crossdomain(origin=server.app.config['CROSSDOMAIN_ORIGIN'], credentials=True, headers='Authorization')
@auth.login_required
def dataset():
userid = g.user.opid
selection = list_query_for_map(userid) # after add userid parameter
return json.dumps(selection)
@mapviewer.route("/menus/sqlid/<string:sqlid>", methods=['GET', 'POST', 'OPTIONS'])
@crossdomain(origin=server.app.config['CROSSDOMAIN_ORIGIN'], credentials=True, headers='Authorization')
@auth.login_required
def menusBysqlid(sqlid):
userid = g.user.opid
selection = listMenus(sqlid, userid)
return json.dumps(selection)
@mapviewer.route("/menus/where/sqlid/<string:sqlid>", methods=['GET', 'POST', 'OPTIONS'])
@crossdomain(origin=server.app.config['CROSSDOMAIN_ORIGIN'], credentials=True, headers='Authorization')
@auth.login_required
def whereMenusBysqlid(sqlid):
selection = list_where_menus(sqlid)
return json.dumps(selection)
@mapviewer.route("/menus/select/sqlid/<string:sqlid>", methods=['GET', 'POST', 'OPTIONS'])
@crossdomain(origin=server.app.config['CROSSDOMAIN_ORIGIN'], credentials=True, headers='Authorization')
@auth.login_required
def selectMenusBysqlid1(sqlid):
selection = list_select_menus(sqlid)
return json.dumps(selection)
@mapviewer.route("/templates/sqlid/<string:sqlid>/selid/<string:selid>", methods=['GET', 'POST', 'OPTIONS'])
@crossdomain(origin=server.app.config['CROSSDOMAIN_ORIGIN'], credentials=True, headers='Authorization')
@auth.login_required
def tmplsBysqlidByselid(sqlid, selid):
userid = g.user.opid
selection = listTmpl(sqlid, selid, userid)
return json.dumps(selection)
@mapviewer.route("/execute/sqlid/<string:sqlid>/selid/<string:selid>/tmplid/<string:tmplid>", methods=['GET', 'POST', 'OPTIONS'])
@crossdomain(origin=server.app.config['CROSSDOMAIN_ORIGIN'], credentials=True, headers='Authorization')
@auth.login_required
def execute(sqlid, selid, tmplid):
userid = g.user.opid
strparam = request.args.get('whvalid')
strparam = urllib.unquote(strparam)
whvalid = json.loads(strparam)
sdt = request.args.get('sdt')
edt = request.args.get('edt')
limit = request.args.get('limit')
bounds = request.args.get('bounds')
roundlevel = request.args.get('roundlevel')
if whvalid is None:
whvalid = []
elif whvalid == '[]':
whvalid = []
if bounds:
print '--- bounds is ??? '
print bounds
bounds = bounds.split(',')
print roundlevel
selection = executeQuery(getExeQuery(sqlid, selid, whvalid, tmplid, userid, sdt, edt, limit, bounds, roundlevel))
print json.dumps(selection, use_decimal=True)
return json.dumps(selection, use_decimal=True)
@mapviewer.route("/description/sqlid/<string:sqlid>", methods=['GET', 'POST', 'OPTIONS'])
@crossdomain(origin=server.app.config['CROSSDOMAIN_ORIGIN'], credentials=True, headers='Authorization')
@auth.login_required
def descriptionById(sqlid):
userid = g.user.opid
selection = descriptions(sqlid, userid)
return json.dumps(selection)
@mapviewer.route("/", methods=("GET", "POST"))
def index():
return jsonify({'index': ''})
@mapviewer.route("/item/execute", methods=['GET', 'POST', 'OPTIONS']) # remove 'ds'
@crossdomain(origin=server.app.config['CROSSDOMAIN_ORIGIN'], credentials=True, headers='Authorization')
@auth.login_required
def execute_mapboard_item_by_query():
userid = g.user.id
query_str = request.form['query']
bounds = request.form['bounds']
roundlevel = request.form['roundlevel']
template = request.form['template']
data = map_query_by_str(query_str, bounds, roundlevel, template)
return json.dumps(data)
################################################################################################################
# about MapViewer REST url
################################################################################################################
@mapviewer.route("/board/query/list", methods=['GET', 'POST', 'OPTIONS']) # remove 'ds'
@crossdomain(origin=server.app.config['CROSSDOMAIN_ORIGIN'], credentials=True, headers='Authorization')
@auth.login_required
def list_qurey_for_map():
userid = g.user.id
data = q_list_query_for_map(g.user.id)
return json.dumps(data)
@mapviewer.route("/board/templates/list", methods=['GET', 'POST', 'OPTIONS']) # remove 'ds'
@crossdomain(origin=server.app.config['CROSSDOMAIN_ORIGIN'], credentials=True, headers='Authorization')
@auth.login_required
def list_templates_for_map():
userid = g.user.id
data = q_list_templates_for_map()
return json.dumps(data)
@mapviewer.route("/board/list", methods=['GET', 'POST', 'OPTIONS']) # remove 'ds'
@crossdomain(origin=server.app.config['CROSSDOMAIN_ORIGIN'], credentials=True, headers='Authorization')
@auth.login_required
def list_mapboard():
userid = g.user.id
data = list_mapboard_from_model(g.user.id)
return json.dumps(data)
@mapviewer.route("/board/view/<int:mapboard_id>", methods=['GET', 'POST', 'OPTIONS']) # remove 'ds'
@crossdomain(origin=server.app.config['CROSSDOMAIN_ORIGIN'], credentials=True, headers='Authorization')
@auth.login_required
def view_mapboard(mapboard_id):
userid = g.user.id
query = Mapboard.query.filter_by(adminid=userid).filter_by(id=mapboard_id)
data = query_to_list_json(query)
return json.dumps(data)
@mapviewer.route("/board/add", methods=['GET', 'POST', 'OPTIONS']) # remove 'ds'
@crossdomain(origin=server.app.config['CROSSDOMAIN_ORIGIN'], credentials=True, headers='Authorization')
@auth.login_required
def add_mapboard():
userid = g.user.id
print 'add_mapboard called '
print 'request.form in all is ' + str(request.form)
form = MapboardForm(csrf_enabled=False)
if request.method == 'POST' and form.validate():
form.adminid.data = userid
print 'form.data is ' + str(form.data)
instance = Mapboard.create(**form.data)
print 'add result instance -------- '
print instance
print form.errors
# query = Mapboard.query.order_by(Mapboard.id.desc()).filter_by(id=instance.id).filter_by(adminid=userid).limit(1)
data = list_mapboard_from_model(g.user.id)
return json.dumps(data)
@mapviewer.route("/board/edit/<int:mapboard_id>", methods=['GET', 'POST', 'OPTIONS'])
@crossdomain(origin=server.app.config['CROSSDOMAIN_ORIGIN'], credentials=True, headers='Authorization')
@auth.login_required
def edit_mapboard(mapboard_id):
userid = g.user.id
print 'edit_mapboard called '
print 'request.form in all is ' + str(request.form)
form = MapboardForm(csrf_enabled=False)
print 'form.data is ' + str(form.data)
instance = Mapboard.get_or_404(mapboard_id)
if instance and request.method == 'POST' and form.validate():
instance.update(**form.data)
# query = Mapboard.query.filter(Mapboard.id == mapboard_id).filter_by(adminid=userid)
data = list_mapboard_from_model(g.user.id)
return json.dumps(data)
@mapviewer.route("/board/delete/<int:mapboard_id>", methods=['GET', 'POST', 'OPTIONS'])
@crossdomain(origin=server.app.config['CROSSDOMAIN_ORIGIN'], credentials=True, headers='Authorization')
@auth.login_required
def delete_mapboard(mapboard_id):
userid = g.user.opid
print '----- delete_mapboard ------'
form = MapboardForm(csrf_enabled=False)
print 'form.data:' + str(form)
items = query_to_list_json(MapboardItem.query.filter_by(mapboard_id=mapboard_id))
for item in items:
inst = MapboardItem.get(item['id'])
if inst:
inst.delete()
instance = Mapboard.get_or_404(mapboard_id)
# print str(dsquery)
if instance:
instance.delete()
data = list_mapboard_from_model(g.user.id)
return json.dumps(data)
# ################################################################################################################
# # about Dashboard Item REST url
# ################################################################################################################
@mapviewer.route("/board/item/list/<int:mapboard_id>", methods=['GET', 'POST', 'OPTIONS']) # remove 'ds'
@crossdomain(origin=server.app.config['CROSSDOMAIN_ORIGIN'], credentials=True, headers='Authorization')
@auth.login_required
def list_mapboard_item(mapboard_id):
userid = g.user.id
query = MapboardItem.query.filter_by(mapboard_id=mapboard_id)
# data = q_list_mapboard_items(mapboard_id)
data = query_to_list_json(query)
# print data
return json.dumps(data)
@mapviewer.route("/board/item/view/<int:mapboard_id>/<int:item_id>", methods=['GET', 'POST', 'OPTIONS']) # remove 'ds'
@crossdomain(origin=server.app.config['CROSSDOMAIN_ORIGIN'], credentials=True, headers='Authorization')
@auth.login_required
def view_mapboard_item(mapboard_id):
userid = g.user.id
query = MapboardItem.query.filter_by(mapboard_id=mapboard_id).filter_by(id=item_id)
data = query_to_list_json(query)
return json.dumps(data)
@mapviewer.route("/board/item/add/<int:mapboard_id>", methods=['GET', 'POST', 'OPTIONS']) # remove 'ds'
@crossdomain(origin=server.app.config['CROSSDOMAIN_ORIGIN'], credentials=True, headers='Authorization')
@auth.login_required
def add_mapboard_item(mapboard_id):
userid = g.user.id
print 'add_mapboard_item called '
print 'request.form in all is ' + str(request.form)
# print 'request.form.get in sqlid is ' + str(request.form.get('sqlid',None))
form = MapboardItemForm(csrf_enabled=False)
if request.method == 'POST' and form.validate():
print 'form.data is ' + str(form.data)
instance = MapboardItem.create(**form.data)
urlstr = request.form['url']
asqlid = re.search(r'\/sqlid\/(\d)*', urlstr, flags=0).group()[len('/sqlid/'):]
aselid = re.search(r'\/selid\/(\d)*', urlstr, flags=0).group()[len('/selid/'):]
atmplid = re.search(r'\/tmplid\/(\d)*', urlstr, flags=0).group()[len('/tmplid/'):]
args = {'sqlid': int(asqlid), 'selid': int(aselid), 'tmplid': int(atmplid)}
try:
DsViewTmpl.get_or_create(**args)
except (RuntimeError, TypeError, NameError, IntegrityError) as e:
print e
pass
print form.errors
query = MapboardItem.query.filter_by(mapboard_id=mapboard_id).order_by(MapboardItem.id.desc()).limit(1)
data = query_to_list_json(query)
print '-------- add data return value end ----------'
print data
return json.dumps(data)
@mapviewer.route("/board/item/edit/<int:mapboard_id>/<int:item_id>", methods=['GET', 'POST', 'OPTIONS'])
@crossdomain(origin=server.app.config['CROSSDOMAIN_ORIGIN'], credentials=True, headers='Authorization')
@auth.login_required
def edit_mapboard_item(mapboard_id, item_id):
userid = g.user.id
print 'edit_mapboard_item called request.form in all is ' + str(request.form)
form = MapboardItemForm(csrf_enabled=False)
print 'form.data is ' + str(form.data)
instance = MapboardItem.get_or_404(item_id)
if instance and request.method == 'POST' and form.validate():
instance.update(**form.data)
query = MapboardItem.query.filter(MapboardItem.mapboard_id == mapboard_id).filter(MapboardItem.id == item_id)
data = query_to_list_json(query)
return json.dumps(data)
@mapviewer.route("/board/item/delete/<int:mapboard_id>/<int:item_id>", methods=['GET', 'POST', 'OPTIONS'])
@crossdomain(origin=server.app.config['CROSSDOMAIN_ORIGIN'], credentials=True, headers='Authorization')
@auth.login_required
def delete_mapboard_item(mapboard_id, item_id):
userid = g.user.opid
print '----- delete_mapboard_item ------'
form = MapboardItemForm(csrf_enabled=False)
print 'form.data:' + str(form)
print 'delete_mapboard_item is %s' % (item_id)
instance = MapboardItem.get_or_404(item_id)
print str(instance)
if instance:
instance.delete()
return jsonify({'result': 'success', 'item_id': item_id})
return jsonify({'result': 'error', 'item_id': item_id})
@mapviewer.route("/board/item/execute/<int:mapboard_id>/<int:item_id>", methods=['GET', 'POST', 'OPTIONS']) # remove 'ds'
@crossdomain(origin=server.app.config['CROSSDOMAIN_ORIGIN'], credentials=True, headers='Authorization')
@auth.login_required
def execute_mapboard_item_by_query2(mapboard_id, item_id):
# userid = g.user.id
#
# query_str = request.form['query']
# bounds = request.form['bounds']
# roundlevel = request.form['roundlevel']
# template = request.form['template']
#
# data = _query_by_str(query_str, bounds, roundlevel, template)
# return json.dumps(data)
# userid = g.user.id
query_str = request.form['query']
bounds = request.form['bounds']
roundlevel = request.form['roundlevel']
template = request.form['template']
data = map_query_by_str(query_str, bounds, roundlevel, template)
return json.dumps(data)
# @mapviewer.route("/execute/sqlid/<string:sqlid>/selid/<string:selid>/tmplid/<string:tmplid>", methods=['GET', 'POST', 'OPTIONS'])
# @crossdomain(origin=server.app.config['CROSSDOMAIN_ORIGIN'], credentials=True, headers='Authorization')
# @auth.login_required
# def execute(sqlid, selid, tmplid):
# userid = g.user.opid
# strparam = request.args.get('whvalid')
# strparam = urllib.unquote(strparam)
# whvalid = json.loads(strparam)
#
# sdt = request.args.get('sdt')
# edt = request.args.get('edt')
# limit = request.args.get('limit')
# bounds = request.args.get('bounds')
# roundlevel = request.args.get('roundlevel')
#
# if whvalid is None:
# whvalid = []
# elif whvalid == '[]':
# whvalid = []
#
# if bounds:
# print '--- bounds is ??? '
# print bounds
# bounds = bounds.split(',')
#
# print roundlevel
#
# selection = executeQuery(getExeQuery(sqlid, selid, whvalid, tmplid, userid, sdt, edt, limit, bounds, roundlevel))
# print json.dumps(selection, use_decimal=True)
# return json.dumps(selection, use_decimal=True)
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from ..compat import dict_items
from ..compat import dict_keys
class PathEngine(object):
"""
This module is responsible for computing the next-hop for every router in the domain
based on the collection of link states that have been gathered.
"""
def __init__(self, container):
self.container = container
self.id = self.container.id
def _calculate_tree_from_root(self, root, collection):
##
# Make a copy of the current collection of link-states that contains
# a fake link-state for nodes that are known-peers but are not in the
# collection currently. This is needed to establish routes to those nodes
# so we can trade link-state information with them.
##
link_states = {}
for _id, ls in collection.items():
link_states[_id] = ls.peers
for p in ls.peers:
if p not in link_states:
link_states[p] = {_id: 1}
##
# Setup Dijkstra's Algorithm
##
hops = {}
cost = {}
prev = {}
for _id in link_states:
hops[_id] = None # infinite
cost[_id] = None # infinite
prev[_id] = None # undefined
hops[root] = 0 # no hops to the root node
cost[root] = 0 # no cost to the root node
unresolved = NodeSet(cost)
##
# Process unresolved nodes until lowest cost paths to all reachable nodes have been found.
##
while not unresolved.empty():
u = unresolved.lowest_cost()
if cost[u] is None:
# There are no more reachable nodes in unresolved
break
for v, v_cost in link_states[u].items():
if unresolved.contains(v):
alt = cost[u] + v_cost
if cost[v] is None or alt < cost[v]:
hops[v] = hops[u] + 1
cost[v] = alt
prev[v] = u
unresolved.set_cost(v, alt)
##
# Remove unreachable nodes from the maps. Note that this will also remove the
# root node (has no previous node) from the map.
##
for u, val in dict_items(prev):
if not val:
prev.pop(u)
hops.pop(u)
cost.pop(u)
##
# Return previous-node and cost maps. Prev is a map of all reachable, remote nodes to
# their predecessor node. Cost is a map of all reachable nodes and their costs.
##
return prev, cost, hops
def _calculate_valid_origins(self, nodeset, collection):
##
# Calculate the tree from each origin, determine the set of origins-per-dest
# for which the path from origin to dest passes through us. This is the set
# of valid origins for forwarding to the destination.
##
valid_origin = {} # Map of destination => List of Valid Origins
for node in nodeset:
if node != self.id:
valid_origin[node] = []
for root in dict_keys(valid_origin):
prev, cost, hops = self._calculate_tree_from_root(root, collection)
nodes = dict_keys(prev)
while len(nodes) > 0:
u = nodes[0]
path = [u]
nodes.remove(u)
v = prev[u]
while v != root:
if v in nodes:
if v != self.id:
path.append(v)
nodes.remove(v)
if v == self.id:
for dest in path:
valid_origin[dest].append(root)
u = v
v = prev[u]
return valid_origin
def calculate_routes(self, collection):
##
# Generate the shortest-path tree with the local node as root
##
prev, cost, hops = self._calculate_tree_from_root(self.id, collection)
nodes = dict_keys(prev)
##
# We will also compute the radius of the topology. This is the number of
# hops (not cost) to the most distant router from the local node
##
radius = max(hops.values()) if len(hops) > 0 else 0
##
# Distill the path tree into a map of next hops for each node
##
next_hops = {}
while len(nodes) > 0:
u = nodes[0] # pick any destination
path = [u]
nodes.remove(u)
v = prev[u]
while v != self.id: # build a list of nodes in the path back to the root
if v in nodes:
path.append(v)
nodes.remove(v)
u = v
v = prev[u]
for w in path: # mark each node in the path as reachable via the next hop
next_hops[w] = u
##
# Calculate the valid origins for remote routers
##
valid_origins = self._calculate_valid_origins(dict_keys(prev), collection)
return (next_hops, cost, valid_origins, radius)
class NodeSet(object):
"""
This data structure is an ordered list of node IDs, sorted in increasing order by their cost.
Equal cost nodes are secondarily sorted by their ID in order to provide deterministic and
repeatable ordering.
"""
def __init__(self, cost_map):
self.nodes = []
for _id, cost in cost_map.items():
##
# Assume that nodes are either unreachable (cost = None) or local (cost = 0)
# during this initialization.
##
if cost == 0:
self.nodes.insert(0, (_id, cost))
else:
##
# There is no need to sort unreachable nodes by ID
##
self.nodes.append((_id, cost))
def __repr__(self):
return self.nodes.__repr__()
def empty(self):
return len(self.nodes) == 0
def contains(self, _id):
for a, b in self.nodes:
if a == _id:
return True
return False
def lowest_cost(self):
"""
Remove and return the lowest cost node ID.
"""
_id, cost = self.nodes.pop(0)
return _id
def set_cost(self, _id, new_cost):
"""
Set the cost for an ID in the NodeSet and re-insert the ID so that the list
remains sorted in increasing cost order.
"""
index = 0
for i, c in self.nodes:
if i == _id:
break
index += 1
self.nodes.pop(index)
index = 0
for i, c in self.nodes:
if c is None or new_cost < c or (new_cost == c and _id < i):
break
index += 1
self.nodes.insert(index, (_id, new_cost))
|
|
from __future__ import unicode_literals
import keyword
import re
from collections import OrderedDict
from click import BaseCommand, BaseException
from config import DEFAULT_DB_ALIAS
from ibu import connection as connections
class Command(BaseCommand):
help = "Introspects the database tables in the given database and outputs a Django model module."
requires_system_checks = False
db_module = 'ibu.backends'
def add_arguments(self, parser):
parser.add_argument('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Nominates a database to '
'introspect. Defaults to using the "default" database.')
def handle(self, **options):
try:
for line in self.handle_inspection(options):
self.stdout.write("%s\n" % line)
except NotImplementedError:
raise BaseException(
"Database inspection isn't supported for the currently selected database backend.")
def handle_inspection(self, options):
connection = connections[options['database']]
# 'table_name_filter' is a stealth option
table_name_filter = options.get('table_name_filter')
def table2model(table_name):
return re.sub(r'[^a-zA-Z0-9]', '', table_name.title())
def strip_prefix(s):
return s[1:] if s.startswith("u'") else s
with connection.cursor() as cursor:
yield "# This is an auto-generated Django model module."
yield "# You'll have to do the following manually to clean this up:"
yield "# * Rearrange models' order"
yield "# * Make sure each model has one field with primary_key=True"
yield "# * Make sure each ForeignKey has `on_delete` set to the desired behavior."
yield (
"# * Remove `managed = False` lines if you wish to allow "
"Django to create, modify, and delete the table"
)
yield "# Feel free to rename the models, but don't rename db_table values or field names."
yield "from __future__ import unicode_literals"
yield ''
yield 'from %s import models' % self.db_module
known_models = []
for table_name in connection.introspection.table_names(cursor):
if table_name_filter is not None and callable(table_name_filter):
if not table_name_filter(table_name):
continue
yield ''
yield ''
yield 'class %s(models.Model):' % table2model(table_name)
known_models.append(table2model(table_name))
try:
relations = connection.introspection.get_relations(
cursor, table_name)
except NotImplementedError:
relations = {}
try:
indexes = connection.introspection.get_indexes(
cursor, table_name)
except NotImplementedError:
indexes = {}
try:
constraints = connection.introspection.get_constraints(
cursor, table_name)
except NotImplementedError:
constraints = {}
# Holds column names used in the table so far
used_column_names = []
# Maps column names to names of model fields
column_to_field_name = {}
for row in connection.introspection.get_table_description(cursor, table_name):
# Holds Field notes, to be displayed in a Python comment.
comment_notes = []
# Holds Field parameters such as 'db_column'.
extra_params = OrderedDict()
column_name = row[0]
is_relation = column_name in relations
att_name, params, notes = self.normalize_col_name(
column_name, used_column_names, is_relation)
extra_params.update(params)
comment_notes.extend(notes)
used_column_names.append(att_name)
column_to_field_name[column_name] = att_name
# Add primary_key and unique, if necessary.
if column_name in indexes:
if indexes[column_name]['primary_key']:
extra_params['primary_key'] = True
elif indexes[column_name]['unique']:
extra_params['unique'] = True
if is_relation:
rel_to = (
"self" if relations[column_name][1] == table_name
else table2model(relations[column_name][1])
)
if rel_to in known_models:
field_type = 'ForeignKey(%s' % rel_to
else:
field_type = "ForeignKey('%s'" % rel_to
else:
# Calling `get_field_type` to get the field type string and any
# additional parameters and notes.
field_type, field_params, field_notes = self.get_field_type(
connection, table_name, row)
extra_params.update(field_params)
comment_notes.extend(field_notes)
field_type += '('
# Don't output 'id = meta.AutoField(primary_key=True)', because
# that's assumed if it doesn't exist.
if att_name == 'id' and extra_params == {'primary_key': True}:
if field_type == 'AutoField(':
continue
elif field_type == 'IntegerField(' and not connection.features.can_introspect_autofield:
comment_notes.append('AutoField?')
# Add 'null' and 'blank', if the 'null_ok' flag was present in the
# table description.
if row[6]: # If it's NULL...
if field_type == 'BooleanField(':
field_type = 'NullBooleanField('
else:
extra_params['blank'] = True
extra_params['null'] = True
field_desc = '%s = %s%s' % (
att_name,
# Custom fields will have a dotted path
'' if '.' in field_type else 'models.',
field_type,
)
if field_type.startswith('ForeignKey('):
field_desc += ', models.DO_NOTHING'
if extra_params:
if not field_desc.endswith('('):
field_desc += ', '
field_desc += ', '.join(
'%s=%s' % (k, strip_prefix(repr(v)))
for k, v in extra_params.items())
field_desc += ')'
if comment_notes:
field_desc += ' # ' + ' '.join(comment_notes)
yield ' %s' % field_desc
for meta_line in self.get_meta(table_name, constraints, column_to_field_name):
yield meta_line
def normalize_col_name(self, col_name, used_column_names, is_relation):
"""
Modify the column name to make it Python-compatible as a field name
"""
field_params = {}
field_notes = []
new_name = col_name.lower()
if new_name != col_name:
field_notes.append('Field name made lowercase.')
if is_relation:
if new_name.endswith('_id'):
new_name = new_name[:-3]
else:
field_params['db_column'] = col_name
new_name, num_repl = re.subn(r'\W', '_', new_name)
if num_repl > 0:
field_notes.append(
'Field renamed to remove unsuitable characters.')
if new_name.find('__') >= 0:
while new_name.find('__') >= 0:
new_name = new_name.replace('__', '_')
if col_name.lower().find('__') >= 0:
# Only add the comment if the double underscore was in the
# original name
field_notes.append(
"Field renamed because it contained more than one '_' in a row.")
if new_name.startswith('_'):
new_name = 'field%s' % new_name
field_notes.append("Field renamed because it started with '_'.")
if new_name.endswith('_'):
new_name = '%sfield' % new_name
field_notes.append("Field renamed because it ended with '_'.")
if keyword.iskeyword(new_name):
new_name += '_field'
field_notes.append(
'Field renamed because it was a Python reserved word.')
if new_name[0].isdigit():
new_name = 'number_%s' % new_name
field_notes.append(
"Field renamed because it wasn't a valid Python identifier.")
if new_name in used_column_names:
num = 0
while '%s_%d' % (new_name, num) in used_column_names:
num += 1
new_name = '%s_%d' % (new_name, num)
field_notes.append('Field renamed because of name conflict.')
if col_name != new_name and field_notes:
field_params['db_column'] = col_name
return new_name, field_params, field_notes
def get_field_type(self, connection, table_name, row):
"""
Given the database connection, the table name, and the cursor row
description, this routine will return the given field type name, as
well as any additional keyword parameters and notes for the field.
"""
field_params = OrderedDict()
field_notes = []
try:
field_type = connection.introspection.get_field_type(row[1], row)
except KeyError:
field_type = 'TextField'
field_notes.append('This field type is a guess.')
# This is a hook for data_types_reverse to return a tuple of
# (field_type, field_params_dict).
if type(field_type) is tuple:
field_type, new_params = field_type
field_params.update(new_params)
# Add max_length for all CharFields.
if field_type == 'CharField' and row[3]:
field_params['max_length'] = int(row[3])
if field_type == 'DecimalField':
if row[4] is None or row[5] is None:
field_notes.append(
'max_digits and decimal_places have been guessed, as this '
'database handles decimal fields as float')
field_params['max_digits'] = row[
4] if row[4] is not None else 10
field_params['decimal_places'] = row[
5] if row[5] is not None else 5
else:
field_params['max_digits'] = row[4]
field_params['decimal_places'] = row[5]
return field_type, field_params, field_notes
def get_meta(self, table_name, constraints, column_to_field_name):
"""
Return a sequence comprising the lines of code necessary
to construct the inner Meta class for the model corresponding
to the given database table name.
"""
unique_together = []
for index, params in constraints.items():
if params['unique']:
columns = params['columns']
if len(columns) > 1:
# we do not want to include the u"" or u'' prefix
# so we build the string rather than interpolate the tuple
tup = '(' + ', '.join("'%s'" %
column_to_field_name[c] for c in columns) + ')'
unique_together.append(tup)
meta = ["",
" class Meta:",
" managed = False",
" db_table = '%s'" % table_name]
if unique_together:
tup = '(' + ', '.join(unique_together) + ',)'
meta += [" unique_together = %s" % tup]
return meta
|
|
###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Tavendo GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
from __future__ import absolute_import
import re
import six
from autobahn.wamp.types import SubscribeOptions
__all__ = (
'Pattern',
'register',
'subscribe',
'error',
'convert_starred_uri'
)
def convert_starred_uri(uri):
"""
Convert a starred URI to a standard WAMP URI and a detected matching
policy. A starred URI is one that may contain the character '*' used
to mark URI wildcard components or URI prefixes. Starred URIs are
more comfortable / intuitive to use at the user/API level, but need
to be converted for use on the wire (WAMP protocol level).
This function takes a possibly starred URI, detects the matching policy
implied by stars, and returns a pair (uri, match) with any stars
removed from the URI and the detected matching policy.
An URI like 'com.example.topic1' (without any stars in it) is
detected as an exact-matching URI.
An URI like 'com.example.*' (with exactly one star at the very end)
is detected as a prefix-matching URI on 'com.example.'.
An URI like 'com.*.foobar.*' (with more than one star anywhere) is
detected as a wildcard-matching URI on 'com..foobar.' (in this example,
there are two wildcard URI components).
Note that an URI like 'com.example.*' is always detected as
a prefix-matching URI 'com.example.'. You cannot express a wildcard-matching
URI 'com.example.' using the starred URI notation! A wildcard matching on
'com.example.' is different from prefix-matching on 'com.example.' (which
matches a strict superset of the former!). This is one reason we don't use
starred URIs for WAMP at the protocol level.
"""
assert(type(uri) == six.text_type)
cnt_stars = uri.count(u'*')
if cnt_stars == 0:
match = u'exact'
elif cnt_stars == 1 and uri[-1] == u'*':
match = u'prefix'
uri = uri[:-1]
else:
match = u'wildcard'
uri = uri.replace(u'*', u'')
return uri, match
class Pattern(object):
"""
A WAMP URI Pattern.
.. todo::
* suffix matches
* args + kwargs
* uuid converter
* multiple URI patterns per decorated object
* classes: Pattern, EndpointPattern, ..
"""
URI_TARGET_ENDPOINT = 1
URI_TARGET_HANDLER = 2
URI_TARGET_EXCEPTION = 3
URI_TYPE_EXACT = 1
URI_TYPE_PREFIX = 2
URI_TYPE_WILDCARD = 3
_URI_COMPONENT = re.compile(r"^[a-z0-9][a-z0-9_\-]*$")
"""
Compiled regular expression for a WAMP URI component.
"""
_URI_NAMED_COMPONENT = re.compile(r"^<([a-z][a-z0-9_]*)>$")
"""
Compiled regular expression for a named WAMP URI component.
.. note::
This pattern is stricter than a general WAMP URI component since a valid Python identifier is required.
"""
_URI_NAMED_CONVERTED_COMPONENT = re.compile(r"^<([a-z][a-z0-9_]*):([a-z]*)>$")
"""
Compiled regular expression for a named and type-converted WAMP URI component.
.. note::
This pattern is stricter than a general WAMP URI component since a valid Python identifier is required.
"""
def __init__(self, uri, target):
"""
:param uri: The URI or URI pattern, e.g. ``"com.myapp.product.<product:int>.update"``.
:type uri: unicode
:param target: The target for this pattern: a procedure endpoint (a callable),
an event handler (a callable) or an exception (a class).
:type target: callable or obj
"""
assert(type(uri) == six.text_type)
assert(target in [Pattern.URI_TARGET_ENDPOINT,
Pattern.URI_TARGET_HANDLER,
Pattern.URI_TARGET_EXCEPTION])
components = uri.split('.')
pl = []
nc = {}
for i in range(len(components)):
component = components[i]
match = Pattern._URI_NAMED_CONVERTED_COMPONENT.match(component)
if match:
ctype = match.groups()[1]
if ctype not in ['string', 'int', 'suffix']:
raise Exception("invalid URI")
if ctype == 'suffix' and i != len(components) - 1:
raise Exception("invalid URI")
name = match.groups()[0]
if name in nc:
raise Exception("invalid URI")
if ctype in ['string', 'suffix']:
nc[name] = str
elif ctype == 'int':
nc[name] = int
else:
# should not arrive here
raise Exception("logic error")
pl.append("(?P<{0}>[a-z0-9_]+)".format(name))
continue
match = Pattern._URI_NAMED_COMPONENT.match(component)
if match:
name = match.groups()[0]
if name in nc:
raise Exception("invalid URI")
nc[name] = str
pl.append("(?P<{0}>[a-z0-9_]+)".format(name))
continue
match = Pattern._URI_COMPONENT.match(component)
if match:
pl.append(component)
continue
raise Exception("invalid URI")
if nc:
# URI pattern
self._type = Pattern.URI_TYPE_WILDCARD
p = "^" + "\.".join(pl) + "$"
self._pattern = re.compile(p)
self._names = nc
else:
# exact URI
self._type = Pattern.URI_TYPE_EXACT
self._pattern = None
self._names = None
self._uri = uri
self._target = target
def uri(self):
"""
Returns the original URI (pattern) for this pattern.
:returns: The URI (pattern), e.g. ``"com.myapp.product.<product:int>.update"``.
:rtype: unicode
"""
return self._uri
def subscribe_options(self):
if self._type == Pattern.URI_TYPE_WILDCARD:
return SubscribeOptions(match=u"wildcard")
else:
return SubscribeOptions(match=u"exact")
def match(self, uri):
"""
Match the given (fully qualified) URI according to this pattern
and return extracted args and kwargs.
:param uri: The URI to match, e.g. ``"com.myapp.product.123456.update"``.
:type uri: unicode
:returns: A tuple ``(args, kwargs)``
:rtype: tuple
"""
args = []
kwargs = {}
if self._type == Pattern.URI_TYPE_EXACT:
return args, kwargs
elif self._type == Pattern.URI_TYPE_WILDCARD:
match = self._pattern.match(uri)
if match:
for key in self._names:
val = match.group(key)
val = self._names[key](val)
kwargs[key] = val
return args, kwargs
else:
raise Exception("no match")
def is_endpoint(self):
"""
Check if this pattern is for a procedure endpoint.
:returns: ``True``, iff this pattern is for a procedure endpoint.
:rtype: bool
"""
return self._target == Pattern.URI_TARGET_ENDPOINT
def is_handler(self):
"""
Check if this pattern is for an event handler.
:returns: ``True``, iff this pattern is for an event handler.
:rtype: bool
"""
return self._target == Pattern.URI_TARGET_HANDLER
def is_exception(self):
"""
Check if this pattern is for an exception.
:returns: ``True``, iff this pattern is for an exception.
:rtype: bool
"""
return self._target == Pattern.URI_TARGET_EXCEPTION
def register(uri):
"""
Decorator for WAMP procedure endpoints.
"""
def decorate(f):
assert(callable(f))
if not hasattr(f, '_wampuris'):
f._wampuris = []
f._wampuris.append(Pattern(uri, Pattern.URI_TARGET_ENDPOINT))
return f
return decorate
def subscribe(uri):
"""
Decorator for WAMP event handlers.
"""
def decorate(f):
assert(callable(f))
if not hasattr(f, '_wampuris'):
f._wampuris = []
f._wampuris.append(Pattern(uri, Pattern.URI_TARGET_HANDLER))
return f
return decorate
def error(uri):
"""
Decorator for WAMP error classes.
"""
def decorate(cls):
assert(issubclass(cls, Exception))
if not hasattr(cls, '_wampuris'):
cls._wampuris = []
cls._wampuris.append(Pattern(uri, Pattern.URI_TARGET_EXCEPTION))
return cls
return decorate
|
|
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nailgun.api.v1.validators.base import BasicValidator
from nailgun.consts import NETWORK_INTERFACE_TYPES
from nailgun.consts import OVS_BOND_MODES
from nailgun import objects
from nailgun.db import db
from nailgun.db.sqlalchemy.models import Cluster
from nailgun.db.sqlalchemy.models import Node
from nailgun.errors import errors
class NetworkConfigurationValidator(BasicValidator):
@classmethod
def validate_networks_update(cls, data):
d = cls.validate_json(data)
if not d:
raise errors.InvalidData(
"No valid data received",
log_message=True
)
networks = d.get('networks')
if not isinstance(networks, list):
raise errors.InvalidData(
"'networks' is expected to be an array",
log_message=True
)
for i in networks:
if 'id' not in i:
raise errors.InvalidData(
"No 'id' param presents for '{0}' network".format(i),
log_message=True
)
return d
class NovaNetworkConfigurationValidator(NetworkConfigurationValidator):
@classmethod
def validate_dns_servers_update(cls, data):
d = cls.validate_json(data)
dns_servers = d['dns_nameservers'].get("nameservers", [])
if not isinstance(dns_servers, list):
raise errors.InvalidData(
"It's expected to receive array of DNS servers, "
"not a single object",
log_message=True
)
if len(dns_servers) < 2:
raise errors.InvalidData(
"There should be at least two DNS servers",
log_message=True
)
return d
class NeutronNetworkConfigurationValidator(NetworkConfigurationValidator):
@classmethod
def validate_neutron_params(cls, data, **kwargs):
d = cls.validate_json(data)
np = d.get('networking_parameters')
cluster_id = kwargs.get("cluster_id")
if cluster_id:
cluster = db().query(Cluster).get(cluster_id)
if cluster and cluster.network_config:
cfg = cluster.network_config
for k in ("segmentation_type", "net_l23_provider"):
if k in np and getattr(cfg, k) != np[k]:
raise errors.InvalidData(
"Change of '{0}' is prohibited".format(k),
log_message=True
)
return d
class NetAssignmentValidator(BasicValidator):
@classmethod
def validate(cls, node):
if not isinstance(node, dict):
raise errors.InvalidData(
"Each node should be dict",
log_message=True
)
if 'id' not in node:
raise errors.InvalidData(
"Each node should have ID",
log_message=True
)
if 'interfaces' not in node or \
not isinstance(node['interfaces'], list):
raise errors.InvalidData(
"Node '{0}': there is no 'interfaces' list".format(
node['id']),
log_message=True
)
net_ids = set()
for iface in node['interfaces']:
if not isinstance(iface, dict):
raise errors.InvalidData(
"Node '{0}': each interface should be a dict "
"(got '{1}')".format(node['id'], iface),
log_message=True
)
if 'type' not in iface:
raise errors.InvalidData(
"Node '{0}': each interface must have a type".format(
node['id']),
log_message=True
)
if iface['type'] not in NETWORK_INTERFACE_TYPES:
raise errors.InvalidData(
"Node '{0}': unknown interface type".format(node['id']),
log_message=True
)
if iface['type'] == NETWORK_INTERFACE_TYPES.ether \
and 'id' not in iface:
raise errors.InvalidData(
"Node '{0}': each HW interface must have ID".format(
node['id']),
log_message=True
)
if iface['type'] == NETWORK_INTERFACE_TYPES.bond:
if 'name' not in iface:
raise errors.InvalidData(
"Node '{0}': each bond interface must have "
"name".format(node['id']),
log_message=True
)
if 'mode' not in iface:
raise errors.InvalidData(
"Node '{0}': each bond interface must have "
"mode".format(node['id']),
log_message=True
)
if iface['mode'] not in OVS_BOND_MODES:
raise errors.InvalidData(
"Node '{0}': bond interface '{1}' has unknown "
"mode '{2}'".format(
node['id'], iface['name'], iface['mode']),
log_message=True
)
if 'slaves' not in iface \
or not isinstance(iface['slaves'], list) \
or len(iface['slaves']) < 2:
raise errors.InvalidData(
"Node '{0}': each bond interface must have two or more"
" slaves".format(node['id']),
log_message=True
)
for slave in iface['slaves']:
if 'name' not in slave:
raise errors.InvalidData(
"Node '{0}', interface '{1}': each bond slave "
"must have name".format(node['id'], iface['name']),
log_message=True
)
if 'assigned_networks' not in iface or \
not isinstance(iface['assigned_networks'], list):
raise errors.InvalidData(
"Node '{0}', interface '{1}':"
" there is no 'assigned_networks' list".format(
node['id'], iface.get('id') or iface.get('name')),
log_message=True
)
for net in iface['assigned_networks']:
if not isinstance(net, dict):
raise errors.InvalidData(
"Node '{0}', interface '{1}':"
" each assigned network should be a dict".format(
node['id'], iface.get('id') or iface.get('name')),
log_message=True
)
if 'id' not in net:
raise errors.InvalidData(
"Node '{0}', interface '{1}':"
" each assigned network should have ID".format(
node['id'], iface.get('id') or iface.get('name')),
log_message=True
)
if net['id'] in net_ids:
raise errors.InvalidData(
"Node '{0}': there is a duplicated network '{1}' in"
" assigned networks (second occurrence is in "
"interface '{2}')".format(
node['id'], net['id'],
iface.get('id') or iface.get('name')),
log_message=True
)
net_ids.add(net['id'])
return node
@classmethod
def validate_structure(cls, webdata):
node_data = cls.validate_json(webdata)
return cls.validate(node_data)
@classmethod
def validate_collection_structure_and_data(cls, webdata):
data = cls.validate_json(webdata)
if not isinstance(data, list):
raise errors.InvalidData(
"Data should be list of nodes",
log_message=True
)
for node_data in data:
cls.validate(node_data)
cls.verify_data_correctness(node_data)
return data
@classmethod
def validate_structure_and_data(cls, webdata, node_id):
interfaces_data = cls.validate_json(webdata)
node_data = {'id': node_id, 'interfaces': interfaces_data}
cls.validate(node_data)
cls.verify_data_correctness(node_data)
return interfaces_data
@classmethod
def verify_data_correctness(cls, node):
db_node = db().query(Node).filter_by(id=node['id']).first()
if not db_node:
raise errors.InvalidData(
"There is no node with ID '{0}' in DB".format(node['id']),
log_message=True
)
interfaces = node['interfaces']
db_interfaces = db_node.nic_interfaces
network_group_ids = objects.Node.get_network_manager(
db_node
).get_node_networkgroups_ids(db_node)
bonded_eth_ids = set()
for iface in interfaces:
if iface['type'] == NETWORK_INTERFACE_TYPES.ether:
db_iface = filter(
lambda i: i.id == iface['id'],
db_interfaces
)
if not db_iface:
raise errors.InvalidData(
"Node '{0}': there is no interface with ID '{1}'"
" in DB".format(node['id'], iface['id']),
log_message=True
)
elif iface['type'] == NETWORK_INTERFACE_TYPES.bond:
for slave in iface['slaves']:
iface_id = [i.id for i in db_interfaces
if i.name == slave['name']]
if iface_id:
if iface_id[0] in bonded_eth_ids:
raise errors.InvalidData(
"Node '{0}': interface '{1}' is used in bonds "
"more than once".format(
node['id'], iface_id[0]),
log_message=True
)
bonded_eth_ids.add(iface_id[0])
else:
raise errors.InvalidData(
"Node '{0}': there is no interface '{1}' found "
"for bond '{2}' in DB".format(
node['id'], slave['name'], iface['name']),
log_message=True
)
for net in iface['assigned_networks']:
if net['id'] not in network_group_ids:
raise errors.InvalidData(
"Network '{0}' doesn't exist for node {1}".format(
net['id'], node['id']),
log_message=True
)
network_group_ids.remove(net['id'])
if network_group_ids:
str_ng_ids = ["'" + str(ng_id) + "'"
for ng_id in network_group_ids]
raise errors.InvalidData(
"Node '{0}': {1} network(s) are left unassigned".format(
node['id'], ",".join(str_ng_ids)),
log_message=True
)
for iface in interfaces:
if iface['type'] == NETWORK_INTERFACE_TYPES.ether \
and iface['id'] in bonded_eth_ids \
and len(iface['assigned_networks']) > 0:
raise errors.InvalidData(
"Node '{0}': interface '{1}' cannot have "
"assigned networks as it is used in "
"bond".format(node['id'], iface['id']),
log_message=True
)
|
|
# pylint: disable-all
import json
import os
import pprint
import unittest
from unittest.mock import MagicMock
from circleci.api import Api
from circleci.error import BadKeyError, BadVerbError, InvalidFilterError
class TestCircleCIApi(unittest.TestCase):
def setUp(self):
self.c = Api(os.getenv('CIRCLE_TOKEN'))
def loadMock(self, filename):
"""helper function to open mock responses"""
filename = 'tests/mocks/{0}'.format(filename)
with open(filename, 'r') as f:
self.c._request = MagicMock(return_value=f.read())
def test_bad_verb(self):
with self.assertRaises(BadVerbError) as e:
self.c._request('BAD', 'dummy')
self.assertEqual('BAD', e.exception.argument)
self.assertIn('DELETE', e.exception.message)
def test_get_user_info(self):
self.loadMock('mock_user_info_response')
resp = json.loads(self.c.get_user_info())
self.assertEqual(resp["selected_email"], 'mock+ccie-tester@circleci.com')
def test_get_projects(self):
self.loadMock('mock_get_projects_response')
resp = json.loads(self.c.get_projects())
self.assertEqual(resp[0]['vcs_url'], 'MOCK+https://ghe-dev.circleci.com/ccie-tester/testing')
def test_follow_project(self):
self.loadMock('mock_follow_project_response')
resp = json.loads(self.c.follow_project('ccie-tester', 'testing'))
self.assertEqual(resp["mock+following"], True)
def test_get_project_build_summary(self):
self.loadMock('mock_project_build_summary_response')
resp = json.loads(self.c.get_project_build_summary('ccie-tester', 'testing'))
self.assertEqual(len(resp), 6)
self.assertEqual(resp[0]['username'], 'MOCK+ccie-tester')
# with invalid status filter
with self.assertRaises(InvalidFilterError) as e:
json.loads(self.c.get_project_build_summary('ccie-tester', 'testing', status_filter='dummy'))
self.assertEqual('dummy', e.exception.argument)
self.assertIn('running', e.exception.message)
# with branch
resp = json.loads(self.c.get_project_build_summary('ccie-tester', 'testing', branch='master'))
self.assertEqual(len(resp), 6)
self.assertEqual(resp[0]['username'], 'MOCK+ccie-tester')
def test_get_recent_builds(self):
self.loadMock('mock_get_recent_builds_response')
resp = json.loads(self.c.get_recent_builds())
self.assertEqual(len(resp), 7)
self.assertEqual(resp[0]['reponame'], 'MOCK+testing')
def test_get_build_info(self):
self.loadMock('mock_get_build_info_response')
resp = json.loads(self.c.get_build_info('ccie-tester', 'testing', '1'))
self.assertEqual(resp['reponame'], 'MOCK+testing')
def test_get_artifacts(self):
self.loadMock('mock_get_artifacts_response')
resp = json.loads(self.c.get_artifacts('ccie-tester', 'testing', '1'))
self.assertEqual(resp[0]['path'], 'MOCK+raw-test-output/go-test-report.xml')
def test_retry_build(self):
self.loadMock('mock_retry_build_response')
resp = json.loads(self.c.retry_build('ccie-tester', 'testing', '1'))
self.assertEqual(resp['reponame'], 'MOCK+testing')
# with SSH
resp = json.loads(self.c.retry_build('ccie-tester', 'testing', '1', ssh=True))
self.assertEqual(resp['reponame'], 'MOCK+testing')
def test_cancel_build(self):
self.loadMock('mock_cancel_build_response')
resp = json.loads(self.c.cancel_build('ccie-tester', 'testing', '11'))
self.assertEqual(resp['reponame'], 'MOCK+testing')
self.assertEqual(resp['build_num'], 11)
self.assertTrue(resp['canceled'])
def test_add_ssh_user(self):
self.loadMock('mock_add_ssh_user_response')
resp = json.loads(self.c.add_ssh_user('ccie-tester', 'testing', '11'))
self.assertEqual(resp['reponame'], 'MOCK+testing')
self.assertEqual(resp['ssh_users'][0]['login'], 'ccie-tester')
def test_trigger_build(self):
self.loadMock('mock_trigger_build_response')
resp = json.loads(self.c.trigger_build('ccie-tester', 'testing'))
self.assertEqual(resp['reponame'], 'MOCK+testing')
def test_list_checkout_keys(self):
self.loadMock('mock_list_checkout_keys_response')
resp = json.loads(self.c.list_checkout_keys('levlaz', 'circleci-sandbox'))
self.assertEqual(resp[0]['type'], 'deploy-key')
self.assertIn('public_key', resp[0])
def test_create_checkout_key(self):
with self.assertRaises(BadKeyError) as e:
self.c.create_checkout_key('levlaz', 'test', 'bad')
self.assertEqual('bad', e.exception.argument)
self.assertIn('deploy-key', e.exception.message)
self.loadMock('mock_create_checkout_key_response')
resp = json.loads(self.c.create_checkout_key('levlaz', 'test', 'deploy-key'))
self.assertEqual(resp['type'], 'deploy-key')
self.assertIn('public_key', resp)
def test_get_checkout_key(self):
self.loadMock('mock_get_checkout_key_response')
resp = json.loads(self.c.get_checkout_key('levlaz', 'circleci-sandbox', '94:19:ab:a9:f4:2b:21:1c:a5:87:dd:ee:3d:c2:90:4e'))
self.assertEqual(resp['type'], 'deploy-key')
self.assertIn('public_key', resp)
def test_delete_checkout_key(self):
self.loadMock('mock_delete_checkout_key_response')
resp = json.loads(self.c.delete_checkout_key('levlaz', 'circleci-sandbox', '94:19:ab:a9:f4:2b:21:1c:a5:87:dd:ee:3d:c2:90:4e'))
self.assertEqual(resp['message'], 'ok')
def test_get_test_metadata(self):
self.loadMock('mock_get_test_metadata_response')
resp = json.loads(self.c.get_test_metadata('levlaz', 'circleci-demo-javascript-express', 127))
self.assertEqual(len(resp), 2)
self.assertIn('tests', resp)
def test_list_envvars(self):
self.loadMock('mock_list_envvars_response')
resp = json.loads(self.c.list_envvars('levlaz', 'circleci-sandbox'))
self.assertEqual(len(resp), 4)
self.assertEqual(resp[0]['name'], 'BAR')
def test_add_envvar(self):
self.loadMock('mock_add_envvar_response')
resp = json.loads(self.c.add_envvar('levlaz', 'circleci-sandbox', 'foo', 'bar'))
self.assertEqual(resp['name'], 'foo')
self.assertNotEqual(resp['value'], 'bar')
def test_get_envvar(self):
self.loadMock('mock_get_envvar_response')
resp = json.loads(self.c.get_envvar('levlaz', 'circleci-sandbox', 'foo'))
self.assertEqual(resp['name'], 'foo')
self.assertNotEqual(resp['value'], 'bar')
def test_delete_envvar(self):
self.loadMock('mock_delete_envvar_response')
resp = json.loads(self.c.delete_envvar('levlaz', 'circleci-sandbox', 'foo'))
self.assertEqual(resp['message'], 'ok')
def test_get_latest_artifact(self):
self.loadMock('mock_get_latest_artifacts_response')
resp = json.loads(self.c.get_latest_artifact('levlaz', 'circleci-sandbox'))
self.assertEqual(resp[0]['path'],'circleci-docs/index.html')
resp = json.loads(self.c.get_latest_artifact('levlaz', 'circleci-sandbox', 'master'))
self.assertEqual(resp[0]['path'],'circleci-docs/index.html')
with self.assertRaises(InvalidFilterError):
self.c.get_latest_artifact('levlaz', 'circleci-sandbox', 'master', 'invalid')
# def test_helper(self):
# resp = self.c.get_latest_artifact('circleci', 'circleci-docs')
# print(resp)
# with open('tests/mocks/mock_get_latest_artifacts_response', 'w') as f:
# json.dump(resp, f)
|
|
# Version: 0.10
"""
The Versioneer
==============
* like a rocketeer, but for versions!
* https://github.com/warner/python-versioneer
* Brian Warner
* License: Public Domain
* Compatible With: python2.6, 2.7, and 3.2, 3.3
[](https://travis-ci.org/warner/python-versioneer)
This is a tool for managing a recorded version number in distutils-based
python projects. The goal is to remove the tedious and error-prone "update
the embedded version string" step from your release process. Making a new
release should be as easy as recording a new tag in your version-control
system, and maybe making new tarballs.
## Quick Install
* `pip install versioneer` to somewhere to your $PATH
* run `versioneer-installer` in your source tree: this installs `versioneer.py`
* follow the instructions below (also in the `versioneer.py` docstring)
## Version Identifiers
Source trees come from a variety of places:
* a version-control system checkout (mostly used by developers)
* a nightly tarball, produced by build automation
* a snapshot tarball, produced by a web-based VCS browser, like github's
"tarball from tag" feature
* a release tarball, produced by "setup.py sdist", distributed through PyPI
Within each source tree, the version identifier (either a string or a number,
this tool is format-agnostic) can come from a variety of places:
* ask the VCS tool itself, e.g. "git describe" (for checkouts), which knows
about recent "tags" and an absolute revision-id
* the name of the directory into which the tarball was unpacked
* an expanded VCS variable ($Id$, etc)
* a `_version.py` created by some earlier build step
For released software, the version identifier is closely related to a VCS
tag. Some projects use tag names that include more than just the version
string (e.g. "myproject-1.2" instead of just "1.2"), in which case the tool
needs to strip the tag prefix to extract the version identifier. For
unreleased software (between tags), the version identifier should provide
enough information to help developers recreate the same tree, while also
giving them an idea of roughly how old the tree is (after version 1.2, before
version 1.3). Many VCS systems can report a description that captures this,
for example 'git describe --tags --dirty --always' reports things like
"0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the
0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has
uncommitted changes.
The version identifier is used for multiple purposes:
* to allow the module to self-identify its version: `myproject.__version__`
* to choose a name and prefix for a 'setup.py sdist' tarball
## Theory of Operation
Versioneer works by adding a special `_version.py` file into your source
tree, where your `__init__.py` can import it. This `_version.py` knows how to
dynamically ask the VCS tool for version information at import time. However,
when you use "setup.py build" or "setup.py sdist", `_version.py` in the new
copy is replaced by a small static file that contains just the generated
version data.
`_version.py` also contains `$Revision$` markers, and the installation
process marks `_version.py` to have this marker rewritten with a tag name
during the "git archive" command. As a result, generated tarballs will
contain enough information to get the proper version.
## Installation
First, decide on values for the following configuration variables:
* `versionfile_source`:
A project-relative pathname into which the generated version strings should
be written. This is usually a `_version.py` next to your project's main
`__init__.py` file. If your project uses `src/myproject/__init__.py`, this
should be `src/myproject/_version.py`. This file should be checked in to
your VCS as usual: the copy created below by `setup.py versioneer` will
include code that parses expanded VCS keywords in generated tarballs. The
'build' and 'sdist' commands will replace it with a copy that has just the
calculated version string.
* `versionfile_build`:
Like `versionfile_source`, but relative to the build directory instead of
the source directory. These will differ when your setup.py uses
'package_dir='. If you have `package_dir={'myproject': 'src/myproject'}`,
then you will probably have `versionfile_build='myproject/_version.py'` and
`versionfile_source='src/myproject/_version.py'`.
* `tag_prefix`:
a string, like 'PROJECTNAME-', which appears at the start of all VCS tags.
If your tags look like 'myproject-1.2.0', then you should use
tag_prefix='myproject-'. If you use unprefixed tags like '1.2.0', this
should be an empty string.
* `parentdir_prefix`:
a string, frequently the same as tag_prefix, which appears at the start of
all unpacked tarball filenames. If your tarball unpacks into
'myproject-1.2.0', this should be 'myproject-'.
This tool provides one script, named `versioneer-installer`. That script does
one thing: write a copy of `versioneer.py` into the current directory.
To versioneer-enable your project:
* 1: Run `versioneer-installer` to copy `versioneer.py` into the top of your
source tree.
* 2: add the following lines to the top of your `setup.py`, with the
configuration values you decided earlier:
import versioneer
versioneer.versionfile_source = 'src/myproject/_version.py'
versioneer.versionfile_build = 'myproject/_version.py'
versioneer.tag_prefix = '' # tags are like 1.2.0
versioneer.parentdir_prefix = 'myproject-' # dirname like 'myproject-1.2.0'
* 3: add the following arguments to the setup() call in your setup.py:
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
* 4: now run `setup.py versioneer`, which will create `_version.py`, and
will modify your `__init__.py` to define `__version__` (by calling a
function from `_version.py`). It will also modify your `MANIFEST.in` to
include both `versioneer.py` and the generated `_version.py` in sdist
tarballs.
* 5: commit these changes to your VCS. To make sure you won't forget,
`setup.py versioneer` will mark everything it touched for addition.
## Post-Installation Usage
Once established, all uses of your tree from a VCS checkout should get the
current version string. All generated tarballs should include an embedded
version string (so users who unpack them will not need a VCS tool installed).
If you distribute your project through PyPI, then the release process should
boil down to two steps:
* 1: git tag 1.0
* 2: python setup.py register sdist upload
If you distribute it through github (i.e. users use github to generate
tarballs with `git archive`), the process is:
* 1: git tag 1.0
* 2: git push; git push --tags
Currently, all version strings must be based upon a tag. Versioneer will
report "unknown" until your tree has at least one tag in its history. This
restriction will be fixed eventually (see issue #12).
## Version-String Flavors
Code which uses Versioneer can learn about its version string at runtime by
importing `_version` from your main `__init__.py` file and running the
`get_versions()` function. From the "outside" (e.g. in `setup.py`), you can
import the top-level `versioneer.py` and run `get_versions()`.
Both functions return a dictionary with different keys for different flavors
of the version string:
* `['version']`: condensed tag+distance+shortid+dirty identifier. For git,
this uses the output of `git describe --tags --dirty --always` but strips
the tag_prefix. For example "0.11-2-g1076c97-dirty" indicates that the tree
is like the "1076c97" commit but has uncommitted changes ("-dirty"), and
that this commit is two revisions ("-2-") beyond the "0.11" tag. For
released software (exactly equal to a known tag), the identifier will only
contain the stripped tag, e.g. "0.11".
* `['full']`: detailed revision identifier. For Git, this is the full SHA1
commit id, followed by "-dirty" if the tree contains uncommitted changes,
e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac-dirty".
Some variants are more useful than others. Including `full` in a bug report
should allow developers to reconstruct the exact code being tested (or
indicate the presence of local changes that should be shared with the
developers). `version` is suitable for display in an "about" box or a CLI
`--version` output: it can be easily compared against release notes and lists
of bugs fixed in various releases.
In the future, this will also include a
[PEP-0440](http://legacy.python.org/dev/peps/pep-0440/) -compatible flavor
(e.g. `1.2.post0.dev123`). This loses a lot of information (and has no room
for a hash-based revision id), but is safe to use in a `setup.py`
"`version=`" argument. It also enables tools like *pip* to compare version
strings and evaluate compatibility constraint declarations.
The `setup.py versioneer` command adds the following text to your
`__init__.py` to place a basic version in `YOURPROJECT.__version__`:
from ._version import get_versions
__version = get_versions()['version']
del get_versions
## Updating Versioneer
To upgrade your project to a new release of Versioneer, do the following:
* install the new Versioneer (`pip install -U versioneer` or equivalent)
* re-run `versioneer-installer` in your source tree to replace `versioneer.py`
* edit `setup.py`, if necessary, to include any new configuration settings indicated by the release notes
* re-run `setup.py versioneer` to replace `SRC/_version.py`
* commit any changed files
## Future Directions
This tool is designed to make it easily extended to other version-control
systems: all VCS-specific components are in separate directories like
src/git/ . The top-level `versioneer.py` script is assembled from these
components by running make-versioneer.py . In the future, make-versioneer.py
will take a VCS name as an argument, and will construct a version of
`versioneer.py` that is specific to the given VCS. It might also take the
configuration arguments that are currently provided manually during
installation by editing setup.py . Alternatively, it might go the other
direction and include code from all supported VCS systems, reducing the
number of intermediate scripts.
## License
To make Versioneer easier to embed, all its code is hereby released into the
public domain. The `_version.py` that it creates is also in the public
domain.
"""
import os, sys, re
from distutils.core import Command
from distutils.command.sdist import sdist as _sdist
from distutils.command.build import build as _build
versionfile_source = None
versionfile_build = None
tag_prefix = None
parentdir_prefix = None
VCS = "git"
LONG_VERSION_PY = '''
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (build by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.10 (https://github.com/warner/python-versioneer)
# these strings will be replaced by git during git-archive
git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
import subprocess
import sys
import errno
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
assert isinstance(commands, list)
p = None
for c in commands:
try:
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %%s" %% args[0])
print(e)
return None
else:
if verbose:
print("unable to find command, tried %%s" %% (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version >= '3':
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %%s (error)" %% args[0])
return None
return stdout
import sys
import re
import os.path
def get_expanded_variables(versionfile_abs):
# the code embedded in _version.py can just fetch the value of these
# variables. When used from setup.py, we don't want to import
# _version.py, so we do it with a regexp instead. This function is not
# used from _version.py.
variables = {}
try:
f = open(versionfile_abs,"r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
variables["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
variables["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return variables
def versions_from_expanded_variables(variables, tag_prefix, verbose=False):
refnames = variables["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("variables are unexpanded, not using")
return {} # unexpanded, so not in an unpacked git-archive tarball
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %%d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%%s', no digits" %% ",".join(refs-tags))
if verbose:
print("likely tags: %%s" %% ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %%s" %% r)
return { "version": r,
"full": variables["full"].strip() }
# no suitable tags, so we use the full revision id
if verbose:
print("no suitable tags, using full revision id")
return { "version": variables["full"].strip(),
"full": variables["full"].strip() }
def versions_from_vcs(tag_prefix, root, verbose=False):
# this runs 'git' from the root of the source tree. This only gets called
# if the git-archive 'subst' variables were *not* expanded, and
# _version.py hasn't already been rewritten with a short version string,
# meaning we're inside a checked out source tree.
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %%s" %% root)
return {}
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
stdout = run_command(GITS, ["describe", "--tags", "--dirty", "--always"],
cwd=root)
if stdout is None:
return {}
if not stdout.startswith(tag_prefix):
if verbose:
print("tag '%%s' doesn't start with prefix '%%s'" %% (stdout, tag_prefix))
return {}
tag = stdout[len(tag_prefix):]
stdout = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if stdout is None:
return {}
full = stdout.strip()
if tag.endswith("-dirty"):
full += "-dirty"
return {"version": tag, "full": full}
def versions_from_parentdir(parentdir_prefix, root, verbose=False):
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%%s', but '%%s' doesn't start with prefix '%%s'" %%
(root, dirname, parentdir_prefix))
return None
return {"version": dirname[len(parentdir_prefix):], "full": ""}
tag_prefix = "%(TAG_PREFIX)s"
parentdir_prefix = "%(PARENTDIR_PREFIX)s"
versionfile_source = "%(VERSIONFILE_SOURCE)s"
def get_versions(default={"version": "unknown", "full": ""}, verbose=False):
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded variables.
variables = { "refnames": git_refnames, "full": git_full }
ver = versions_from_expanded_variables(variables, tag_prefix, verbose)
if ver:
return ver
try:
root = os.path.abspath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in range(len(versionfile_source.split("/"))):
root = os.path.dirname(root)
except NameError:
return default
return (versions_from_vcs(tag_prefix, root, verbose)
or versions_from_parentdir(parentdir_prefix, root, verbose)
or default)
'''
import subprocess
import sys
import errno
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
assert isinstance(commands, list)
p = None
for c in commands:
try:
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % args[0])
print(e)
return None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version >= '3':
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % args[0])
return None
return stdout
import sys
import re
import os.path
def get_expanded_variables(versionfile_abs):
# the code embedded in _version.py can just fetch the value of these
# variables. When used from setup.py, we don't want to import
# _version.py, so we do it with a regexp instead. This function is not
# used from _version.py.
variables = {}
try:
f = open(versionfile_abs,"r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
variables["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
variables["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return variables
def versions_from_expanded_variables(variables, tag_prefix, verbose=False):
refnames = variables["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("variables are unexpanded, not using")
return {} # unexpanded, so not in an unpacked git-archive tarball
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs-tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return { "version": r,
"full": variables["full"].strip() }
# no suitable tags, so we use the full revision id
if verbose:
print("no suitable tags, using full revision id")
return { "version": variables["full"].strip(),
"full": variables["full"].strip() }
def versions_from_vcs(tag_prefix, root, verbose=False):
# this runs 'git' from the root of the source tree. This only gets called
# if the git-archive 'subst' variables were *not* expanded, and
# _version.py hasn't already been rewritten with a short version string,
# meaning we're inside a checked out source tree.
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %s" % root)
return {}
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
stdout = run_command(GITS, ["describe", "--tags", "--dirty", "--always"],
cwd=root)
if stdout is None:
return {}
if not stdout.startswith(tag_prefix):
if verbose:
print("tag '%s' doesn't start with prefix '%s'" % (stdout, tag_prefix))
return {}
tag = stdout[len(tag_prefix):]
stdout = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if stdout is None:
return {}
full = stdout.strip()
if tag.endswith("-dirty"):
full += "-dirty"
return {"version": tag, "full": full}
def versions_from_parentdir(parentdir_prefix, root, verbose=False):
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%s', but '%s' doesn't start with prefix '%s'" %
(root, dirname, parentdir_prefix))
return None
return {"version": dirname[len(parentdir_prefix):], "full": ""}
import os.path
import sys
# os.path.relpath only appeared in Python-2.6 . Define it here for 2.5.
def os_path_relpath(path, start=os.path.curdir):
"""Return a relative version of a path"""
if not path:
raise ValueError("no path specified")
start_list = [x for x in os.path.abspath(start).split(os.path.sep) if x]
path_list = [x for x in os.path.abspath(path).split(os.path.sep) if x]
# Work out how much of the filepath is shared by start and path.
i = len(os.path.commonprefix([start_list, path_list]))
rel_list = [os.path.pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return os.path.curdir
return os.path.join(*rel_list)
def do_vcs_install(manifest_in, versionfile_source, ipy):
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
files = [manifest_in, versionfile_source, ipy]
try:
me = __file__
if me.endswith(".pyc") or me.endswith(".pyo"):
me = os.path.splitext(me)[0] + ".py"
versioneer_file = os_path_relpath(me)
except NameError:
versioneer_file = "versioneer.py"
files.append(versioneer_file)
present = False
try:
f = open(".gitattributes", "r")
for line in f.readlines():
if line.strip().startswith(versionfile_source):
if "export-subst" in line.strip().split()[1:]:
present = True
f.close()
except EnvironmentError:
pass
if not present:
f = open(".gitattributes", "a+")
f.write("%s export-subst\n" % versionfile_source)
f.close()
files.append(".gitattributes")
run_command(GITS, ["add", "--"] + files)
SHORT_VERSION_PY = """
# This file was generated by 'versioneer.py' (0.10) from
# revision-control system data, or from the parent directory name of an
# unpacked source archive. Distribution tarballs contain a pre-generated copy
# of this file.
version_version = '%(version)s'
version_full = '%(full)s'
def get_versions(default={}, verbose=False):
return {'version': version_version, 'full': version_full}
"""
DEFAULT = {"version": "unknown", "full": "unknown"}
def versions_from_file(filename):
versions = {}
try:
f = open(filename)
except EnvironmentError:
return versions
for line in f.readlines():
mo = re.match("version_version = '([^']+)'", line)
if mo:
versions["version"] = mo.group(1)
mo = re.match("version_full = '([^']+)'", line)
if mo:
versions["full"] = mo.group(1)
f.close()
return versions
def write_to_version_file(filename, versions):
f = open(filename, "w")
f.write(SHORT_VERSION_PY % versions)
f.close()
print("set %s to '%s'" % (filename, versions["version"]))
def get_root():
try:
return os.path.dirname(os.path.abspath(__file__))
except NameError:
return os.path.dirname(os.path.abspath(sys.argv[0]))
def get_versions(default=DEFAULT, verbose=False):
# returns dict with two keys: 'version' and 'full'
assert versionfile_source is not None, "please set versioneer.versionfile_source"
assert tag_prefix is not None, "please set versioneer.tag_prefix"
assert parentdir_prefix is not None, "please set versioneer.parentdir_prefix"
# I am in versioneer.py, which must live at the top of the source tree,
# which we use to compute the root directory. py2exe/bbfreeze/non-CPython
# don't have __file__, in which case we fall back to sys.argv[0] (which
# ought to be the setup.py script). We prefer __file__ since that's more
# robust in cases where setup.py was invoked in some weird way (e.g. pip)
root = get_root()
versionfile_abs = os.path.join(root, versionfile_source)
# extract version from first of _version.py, 'git describe', parentdir.
# This is meant to work for developers using a source checkout, for users
# of a tarball created by 'setup.py sdist', and for users of a
# tarball/zipball created by 'git archive' or github's download-from-tag
# feature.
variables = get_expanded_variables(versionfile_abs)
if variables:
ver = versions_from_expanded_variables(variables, tag_prefix)
if ver:
if verbose: print("got version from expanded variable %s" % ver)
return ver
ver = versions_from_file(versionfile_abs)
if ver:
if verbose: print("got version from file %s %s" % (versionfile_abs,ver))
return ver
ver = versions_from_vcs(tag_prefix, root, verbose)
if ver:
if verbose: print("got version from git %s" % ver)
return ver
ver = versions_from_parentdir(parentdir_prefix, root, verbose)
if ver:
if verbose: print("got version from parentdir %s" % ver)
return ver
if verbose: print("got version from default %s" % ver)
return default
def get_version(verbose=False):
return get_versions(verbose=verbose)["version"]
class cmd_version(Command):
description = "report generated version string"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
ver = get_version(verbose=True)
print("Version is currently: %s" % ver)
class cmd_build(_build):
def run(self):
versions = get_versions(verbose=True)
_build.run(self)
# now locate _version.py in the new build/ directory and replace it
# with an updated value
target_versionfile = os.path.join(self.build_lib, versionfile_build)
print("UPDATING %s" % target_versionfile)
os.unlink(target_versionfile)
f = open(target_versionfile, "w")
f.write(SHORT_VERSION_PY % versions)
f.close()
if 'cx_Freeze' in sys.modules: # cx_freeze enabled?
from cx_Freeze.dist import build_exe as _build_exe
class cmd_build_exe(_build_exe):
def run(self):
versions = get_versions(verbose=True)
target_versionfile = versionfile_source
print("UPDATING %s" % target_versionfile)
os.unlink(target_versionfile)
f = open(target_versionfile, "w")
f.write(SHORT_VERSION_PY % versions)
f.close()
_build_exe.run(self)
os.unlink(target_versionfile)
f = open(versionfile_source, "w")
f.write(LONG_VERSION_PY % {"DOLLAR": "$",
"TAG_PREFIX": tag_prefix,
"PARENTDIR_PREFIX": parentdir_prefix,
"VERSIONFILE_SOURCE": versionfile_source,
})
f.close()
class cmd_sdist(_sdist):
def run(self):
versions = get_versions(verbose=True)
self._versioneer_generated_versions = versions
# unless we update this, the command will keep using the old version
self.distribution.metadata.version = versions["version"]
return _sdist.run(self)
def make_release_tree(self, base_dir, files):
_sdist.make_release_tree(self, base_dir, files)
# now locate _version.py in the new base_dir directory (remembering
# that it may be a hardlink) and replace it with an updated value
target_versionfile = os.path.join(base_dir, versionfile_source)
print("UPDATING %s" % target_versionfile)
os.unlink(target_versionfile)
f = open(target_versionfile, "w")
f.write(SHORT_VERSION_PY % self._versioneer_generated_versions)
f.close()
INIT_PY_SNIPPET = """
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
"""
class cmd_update_files(Command):
description = "install/upgrade Versioneer files: __init__.py SRC/_version.py"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
print(" creating %s" % versionfile_source)
f = open(versionfile_source, "w")
f.write(LONG_VERSION_PY % {"DOLLAR": "$",
"TAG_PREFIX": tag_prefix,
"PARENTDIR_PREFIX": parentdir_prefix,
"VERSIONFILE_SOURCE": versionfile_source,
})
f.close()
ipy = os.path.join(os.path.dirname(versionfile_source), "__init__.py")
try:
old = open(ipy, "r").read()
except EnvironmentError:
old = ""
if INIT_PY_SNIPPET not in old:
print(" appending to %s" % ipy)
f = open(ipy, "a")
f.write(INIT_PY_SNIPPET)
f.close()
else:
print(" %s unmodified" % ipy)
# Make sure both the top-level "versioneer.py" and versionfile_source
# (PKG/_version.py, used by runtime code) are in MANIFEST.in, so
# they'll be copied into source distributions. Pip won't be able to
# install the package without this.
manifest_in = os.path.join(get_root(), "MANIFEST.in")
simple_includes = set()
try:
for line in open(manifest_in, "r").readlines():
if line.startswith("include "):
for include in line.split()[1:]:
simple_includes.add(include)
except EnvironmentError:
pass
# That doesn't cover everything MANIFEST.in can do
# (http://docs.python.org/2/distutils/sourcedist.html#commands), so
# it might give some false negatives. Appending redundant 'include'
# lines is safe, though.
if "versioneer.py" not in simple_includes:
print(" appending 'versioneer.py' to MANIFEST.in")
f = open(manifest_in, "a")
f.write("include versioneer.py\n")
f.close()
else:
print(" 'versioneer.py' already in MANIFEST.in")
if versionfile_source not in simple_includes:
print(" appending versionfile_source ('%s') to MANIFEST.in" %
versionfile_source)
f = open(manifest_in, "a")
f.write("include %s\n" % versionfile_source)
f.close()
else:
print(" versionfile_source already in MANIFEST.in")
# Make VCS-specific changes. For git, this means creating/changing
# .gitattributes to mark _version.py for export-time keyword
# substitution.
do_vcs_install(manifest_in, versionfile_source, ipy)
def get_cmdclass():
cmds = {'version': cmd_version,
'versioneer': cmd_update_files,
'build': cmd_build,
'sdist': cmd_sdist,
}
if 'cx_Freeze' in sys.modules: # cx_freeze enabled?
cmds['build_exe'] = cmd_build_exe
del cmds['build']
return cmds
|
|
import hashlib
import logging
from flask import flash, redirect, render_template, request, url_for
from flask_login import current_user, login_required, login_user, logout_user
from redash import __version__, limiter, models, settings
from redash.authentication import current_org, get_login_url
from redash.authentication.account import (BadSignature, SignatureExpired,
send_password_reset_email,
validate_token)
from redash.handlers import routes
from redash.handlers.base import json_response, org_scoped_rule
from redash.version_check import get_latest_version
from sqlalchemy.orm.exc import NoResultFound
logger = logging.getLogger(__name__)
def get_google_auth_url(next_path):
if settings.MULTI_ORG:
google_auth_url = url_for('google_oauth.authorize_org', next=next_path, org_slug=current_org.slug)
else:
google_auth_url = url_for('google_oauth.authorize', next=next_path)
return google_auth_url
def render_token_login_page(template, org_slug, token):
try:
user_id = validate_token(token)
org = current_org._get_current_object()
user = models.User.get_by_id_and_org(user_id, org)
except NoResultFound:
logger.exception("Bad user id in token. Token= , User id= %s, Org=%s", user_id, token, org_slug)
return render_template("error.html", error_message="Invalid invite link. Please ask for a new one."), 400
except (SignatureExpired, BadSignature):
logger.exception("Failed to verify invite token: %s, org=%s", token, org_slug)
return render_template("error.html",
error_message="Your invite link has expired. Please ask for a new one."), 400
status_code = 200
if request.method == 'POST':
if 'password' not in request.form:
flash('Bad Request')
status_code = 400
elif not request.form['password']:
flash('Cannot use empty password.')
status_code = 400
elif len(request.form['password']) < 6:
flash('Password length is too short (<6).')
status_code = 400
else:
# TODO: set active flag
user.hash_password(request.form['password'])
models.db.session.add(user)
login_user(user)
models.db.session.commit()
return redirect(url_for('redash.index', org_slug=org_slug))
if settings.GOOGLE_OAUTH_ENABLED:
google_auth_url = get_google_auth_url(url_for('redash.index', org_slug=org_slug))
else:
google_auth_url = ''
return render_template(template, google_auth_url=google_auth_url, user=user), status_code
@routes.route(org_scoped_rule('/invite/<token>'), methods=['GET', 'POST'])
def invite(token, org_slug=None):
return render_token_login_page("invite.html", org_slug, token)
@routes.route(org_scoped_rule('/reset/<token>'), methods=['GET', 'POST'])
def reset(token, org_slug=None):
return render_token_login_page("reset.html", org_slug, token)
@routes.route(org_scoped_rule('/forgot'), methods=['GET', 'POST'])
def forgot_password(org_slug=None):
submitted = False
if request.method == 'POST' and request.form['email']:
submitted = True
email = request.form['email']
try:
org = current_org._get_current_object()
user = models.User.get_by_email_and_org(email, org)
send_password_reset_email(user)
except NoResultFound:
logging.error("No user found for forgot password: %s", email)
return render_template("forgot.html", submitted=submitted)
@routes.route(org_scoped_rule('/login'), methods=['GET', 'POST'])
@limiter.limit(settings.THROTTLE_LOGIN_PATTERN)
def login(org_slug=None):
# We intentionally use == as otherwise it won't actually use the proxy. So weird :O
# noinspection PyComparisonWithNone
if current_org == None and not settings.MULTI_ORG:
return redirect('/setup')
elif current_org == None:
return redirect('/')
index_url = url_for("redash.index", org_slug=org_slug)
next_path = request.args.get('next', index_url)
if current_user.is_authenticated:
return redirect(next_path)
if not settings.PASSWORD_LOGIN_ENABLED:
if settings.REMOTE_USER_LOGIN_ENABLED:
return redirect(url_for("remote_user_auth.login", next=next_path))
elif settings.SAML_LOGIN_ENABLED:
return redirect(url_for("saml_auth.sp_initiated", next=next_path))
else:
return redirect(url_for("google_oauth.authorize", next=next_path))
if request.method == 'POST':
try:
org = current_org._get_current_object()
user = models.User.get_by_email_and_org(request.form['email'], org)
if user and user.verify_password(request.form['password']):
remember = ('remember' in request.form)
login_user(user, remember=remember)
return redirect(next_path)
else:
flash("Wrong email or password.")
except NoResultFound:
flash("Wrong email or password.")
google_auth_url = get_google_auth_url(next_path)
return render_template("login.html",
org_slug=org_slug,
next=next_path,
username=request.form.get('username', ''),
show_google_openid=settings.GOOGLE_OAUTH_ENABLED,
google_auth_url=google_auth_url,
show_saml_login=settings.SAML_LOGIN_ENABLED,
show_remote_user_login=settings.REMOTE_USER_LOGIN_ENABLED)
@routes.route(org_scoped_rule('/logout'))
def logout(org_slug=None):
logout_user()
return redirect(get_login_url(next=None))
def base_href():
if settings.MULTI_ORG:
base_href = url_for('redash.index', _external=True, org_slug=current_org.slug)
else:
base_href = url_for('redash.index', _external=True)
return base_href
def client_config():
if not current_user.is_api_user() and current_user.is_authenticated:
client_config = {
'newVersionAvailable': get_latest_version(),
'version': __version__
}
else:
client_config = {}
client_config.update(settings.COMMON_CLIENT_CONFIG)
client_config.update({
'basePath': base_href()
})
return client_config
@routes.route('/api/config', methods=['GET'])
def config(org_slug=None):
return json_response({
'org_slug': current_org.slug,
'client_config': client_config()
})
@routes.route(org_scoped_rule('/api/session'), methods=['GET'])
@login_required
def session(org_slug=None):
if current_user.is_api_user():
user = {
'permissions': [],
'apiKey': current_user.id
}
else:
email_md5 = hashlib.md5(current_user.email.lower()).hexdigest()
gravatar_url = "https://www.gravatar.com/avatar/%s?s=40" % email_md5
user = {
'gravatar_url': gravatar_url,
'id': current_user.id,
'name': current_user.name,
'email': current_user.email,
'groups': current_user.group_ids,
'permissions': current_user.permissions
}
return json_response({
'user': user,
'org_slug': current_org.slug,
'client_config': client_config()
})
|
|
#-*- coding: utf-8 -*-
# stino/menu.py
import os
import json
from . import constant
from . import fileutil
from . import serial
class MainMenu:
def __init__(self, language, arduino_info, file_name):
self.language = language
self.file = os.path.join(constant.stino_root, file_name)
self.arduino_info = arduino_info
self.menu = MenuItem('Main Menu')
self.refresh()
def getMenu(self):
return self.menu
def printMenu(self):
printMenu(self.menu)
def buildMenu(self):
self.menu = buildMainMenu(self.language, self.arduino_info)
def genFile(self):
data = convertMenuToData(self.menu)
text = json.dumps(data, indent = 4)
# opened_file = open(self.file, 'w')
# opened_file.write(text)
# opened_file.close()
fileutil.writeFile(self.file, text)
def refresh(self):
self.buildMenu()
self.genFile()
class MenuItem:
def __init__(self, caption = '-'):
self.caption = caption
self.id = caption.lower()
self.mnemonic = None
self.children = []
self.command = None
self.checkbox = False
self.args = None
def hasSubmenu(self):
state = False
if self.children:
state = True
return state
def getCaption(self):
return self.caption
def getMnemonic(self):
return self.mnemonic
def getId(self):
return self.id
def getCommand(self):
return self.command
def getCheckbox(self):
return self.checkbox
def getArgs(self):
return self.args
def getSubmenu(self):
return self.children
def addMenuItem(self, menu_item):
self.children.append(menu_item)
def addMenuGroup(self, menu_group):
if self.hasSubmenu():
seperator = MenuItem()
self.addMenuItem(seperator)
self.children += menu_group.getGroup()
def setCaption(self, caption):
self.caption = caption
def setMnemonic(self, mnemonic):
self.mnemonic = mnemonic
def setId(self, ID):
self.id = ID
def setCommand(self, command):
self.command = command
def setCheckbox(self):
self.checkbox = True
def setArgs(self, args):
self.args = args
def getSubmenuItem(caption):
subitem = None
for item in self.children:
if item.getCaption() == caption:
subitem = item
return subitem
class MenuItemGroup:
def __init__(self):
self.group = []
def clear(self):
self.group = []
def hasMenuItem(self):
state = False
if self.group:
state = True
return state
def addMenuItem(self, menu_item):
self.group.append(menu_item)
def removeMenuItem(self, menu_item):
if menu_item in self.group:
self.group.remove(menu_item)
def getGroup(self):
return self.group
def printMenu(menu, level = 0):
caption = menu.getCaption()
if level > 0:
caption = '\t' * level + '|__' + caption
print(caption)
if menu.hasSubmenu():
for submenu in menu.getSubmenu():
printMenu(submenu, level+1)
def buildMenuFromSketch(sketch):
name = sketch.getName()
cur_menu = MenuItem(name)
if sketch.hasSubItem():
for sub_sketch in sketch.getSubItemList():
sub_menu_item = buildMenuFromSketch(sub_sketch)
cur_menu.addMenuItem(sub_menu_item)
else:
folder = sketch.getFolder()
if folder:
command = 'open_sketch'
args = {'folder' : folder}
cur_menu.setCommand(command)
cur_menu.setArgs(args)
return cur_menu
def buildSketchbookMenu(language, arduino_info):
sketchbook = arduino_info.getSketchbook()
sketchbook.setName(language.translate('Sketchbook'))
sketchbook_menu = buildMenuFromSketch(sketchbook)
return sketchbook_menu
def buildLibraryMenu(language, arduino_info):
library_menu = MenuItem(language.translate('Import Library'))
platform_list = arduino_info.getPlatformList()
for platform in platform_list:
name = platform.getName()
sub_menu_item = MenuItem(name)
lib_list = platform.getLibList()
for lib in lib_list:
lib_name = lib.getName()
lib_menu_item = MenuItem(lib_name)
lib_folder = lib.getFolder()
command = 'import_library'
lib_args = {'folder' : lib_folder}
lib_menu_item.setCommand(command)
lib_menu_item.setArgs(lib_args)
sub_menu_item.addMenuItem(lib_menu_item)
library_menu.addMenuItem(sub_menu_item)
return library_menu
def buildExampleMenu(language, arduino_info):
example_menu = MenuItem(language.translate('Examples'))
platform_list = arduino_info.getPlatformList()
for platform in platform_list:
cur_example = platform.getExample()
sub_menu_item = buildMenuFromSketch(cur_example)
example_menu.addMenuItem(sub_menu_item)
return example_menu
def buildBoardMenuList(arduino_info):
board_menu_list = []
platform_list = arduino_info.getPlatformList()
for platform in platform_list:
platform_id = platform_list.index(platform)
name = platform.getName()
board_menu = MenuItem(name)
board_list = platform.getBoardList()
if board_list:
for cur_board in board_list:
board_id = board_list.index(cur_board)
board_name = cur_board.getName()
board_menu_item = MenuItem(board_name)
command = 'choose_board'
board_args = {'platform' : platform_id, 'board': board_id}
board_menu_item.setCommand(command)
board_menu_item.setArgs(board_args)
board_menu_item.setCheckbox()
board_menu.addMenuItem(board_menu_item)
board_menu_list.append(board_menu)
return board_menu_list
def buildBoardOptionMenuList(arduino_info):
board_option_menu_list = []
platform_id = constant.sketch_settings.get('platform', -1)
board_id = constant.sketch_settings.get('board', -1)
if platform_id > -1:
platform_list = arduino_info.getPlatformList()
if platform_id < len(platform_list):
platform = platform_list[platform_id]
board_list = platform.getBoardList()
if board_id < len(board_list):
board = board_list[board_id]
board_option_list = board.getOptionList()
for board_option in board_option_list:
board_option_id = board_option_list.index(board_option)
board_option_name = board_option.getName()
board_option_menu = MenuItem(board_option_name)
board_option_item_list = board_option.getItemList()
for board_option_item in board_option_item_list:
board_option_item_id = board_option_item_list.index(board_option_item)
board_option_item_name = board_option_item.getName()
board_option_item_menu = MenuItem(board_option_item_name)
command = 'choose_board_option'
args = {'board_option' : board_option_id, 'board_option_item' : board_option_item_id}
board_option_item_menu.setCommand(command)
board_option_item_menu.setArgs(args)
board_option_item_menu.setCheckbox()
board_option_menu.addMenuItem(board_option_item_menu)
board_option_menu_list.append(board_option_menu)
return board_option_menu_list
def buildProgrammerMenu(language, arduino_info):
programmer_menu = MenuItem(language.translate('Programmer'))
platform_id = chosen_platform = constant.sketch_settings.get('platform', -1)
if platform_id > -1:
platform_list = arduino_info.getPlatformList()
if platform_id < len(platform_list):
platform = platform_list[platform_id]
programmer_list = platform.getProgrammerList()
if programmer_list:
for cur_programmer in programmer_list:
programmer_id = programmer_list.index(cur_programmer)
programmer_name = cur_programmer.getName()
programmer_menu_item = MenuItem(programmer_name)
command = 'choose_programmer'
programmer_args = {'platform' : platform_id, 'programmer': programmer_id}
programmer_menu_item.setCommand(command)
programmer_menu_item.setArgs(programmer_args)
programmer_menu_item.setCheckbox()
programmer_menu.addMenuItem(programmer_menu_item)
return programmer_menu
def buildSerialPortMenu(language):
serial_port_menu = MenuItem(language.translate('Serial Port'))
serial_port_list = serial.getSerialPortList()
for serial_port in serial_port_list:
serial_port_item = MenuItem(serial_port)
index = serial_port_list.index(serial_port)
args = {'serial_port' : index}
serial_port_item.setCommand('choose_serial_port')
serial_port_item.setArgs(args)
serial_port_item.setCheckbox()
serial_port_menu.addMenuItem(serial_port_item)
return serial_port_menu
def buildLineEndingMenu(language):
line_ending_menu = MenuItem(language.translate('Line Ending'))
for line_ending_caption in constant.line_ending_caption_list:
sub_menu = MenuItem(line_ending_caption)
line_ending_caption_id = constant.line_ending_caption_list.index(line_ending_caption)
args = {'line_ending' : line_ending_caption_id}
sub_menu.setCommand('choose_line_ending')
sub_menu.setArgs(args)
sub_menu.setCheckbox()
line_ending_menu.addMenuItem(sub_menu)
return line_ending_menu
def buildDisplayModeMenu(language):
display_mode_menu = MenuItem(language.translate('Display as'))
for display_mode in constant.display_mode_list:
sub_menu = MenuItem(display_mode)
display_mode_id = constant.display_mode_list.index(display_mode)
args = {'display_mode' : display_mode_id}
sub_menu.setCommand('choose_display_mode')
sub_menu.setArgs(args)
sub_menu.setCheckbox()
display_mode_menu.addMenuItem(sub_menu)
return display_mode_menu
def buildBaudrateMenu(language):
baudrate_menu = MenuItem(language.translate('Baudrate'))
for baudrate in constant.baudrate_list:
sub_menu = MenuItem(baudrate)
baudrate_id = constant.baudrate_list.index(baudrate)
args = {'baudrate' : baudrate_id}
sub_menu.setCommand('choose_baudrate')
sub_menu.setArgs(args)
sub_menu.setCheckbox()
baudrate_menu.addMenuItem(sub_menu)
return baudrate_menu
def buildSerialMonitorMenu(language):
serial_monitor_menu = MenuItem(language.translate('Serial Monitor'))
start_menu = MenuItem(language.translate('Start'))
start_menu.setCommand('start_serial_monitor')
stop_menu = MenuItem(language.translate('Stop'))
stop_menu.setCommand('stop_serial_monitor')
send_menu = MenuItem(language.translate('Send'))
send_menu.setCommand('send_serial_text')
line_ending_menu = buildLineEndingMenu(language)
display_mode_menu = buildDisplayModeMenu(language)
baudrate_menu = buildBaudrateMenu(language)
serial_monitor_menu.addMenuItem(start_menu)
serial_monitor_menu.addMenuItem(stop_menu)
serial_monitor_menu.addMenuItem(send_menu)
serial_monitor_menu.addMenuItem(baudrate_menu)
serial_monitor_menu.addMenuItem(line_ending_menu)
serial_monitor_menu.addMenuItem(display_mode_menu)
return serial_monitor_menu
def buildLanguageMenu(language):
language_menu = MenuItem(language.translate('Language'))
language_item_list = language.getLanguageItemList()
for language_item in language_item_list:
caption = language_item.getCaption()
language_menu_item = MenuItem(caption)
index = language_item_list.index(language_item)
args = {'language' : index}
language_menu_item.setCommand('choose_language')
language_menu_item.setArgs(args)
language_menu_item.setCheckbox()
language_menu.addMenuItem(language_menu_item)
return language_menu
def buildSettingMenu(language):
setting_menu = MenuItem(language.translate('Preferences'))
select_arduino_folder_menu = MenuItem(language.translate('Select Arduino Application Folder'))
select_arduino_folder_menu.setCommand('choose_arduino_folder')
change_sketchbook_folder_menu = MenuItem(language.translate('Change Sketchbook Folder'))
change_sketchbook_folder_menu.setCommand('change_sketchbook_folder')
change_build_folder_menu = MenuItem(language.translate('Select Build Folder'))
change_build_folder_menu.setCommand('choose_build_folder')
language_menu = buildLanguageMenu(language)
setting_menu.addMenuItem(select_arduino_folder_menu)
setting_menu.addMenuItem(change_sketchbook_folder_menu)
setting_menu.addMenuItem(change_build_folder_menu)
setting_menu.addMenuItem(language_menu)
return setting_menu
def buildReferenceMenu(language):
references_menu = MenuItem(language.translate('References'))
getting_started_menu = MenuItem(language.translate('Getting Started'))
getting_started_menu.setCommand('open_ref')
args = {'url': 'Guide_index'}
getting_started_menu.setArgs(args)
troubleshooting_menu = MenuItem(language.translate('Troubleshooting'))
troubleshooting_menu.setCommand('open_ref')
args = {'url': 'Guide_Troubleshooting'}
troubleshooting_menu.setArgs(args)
ref_menu = MenuItem(language.translate('Reference'))
ref_menu.setCommand('open_ref')
args = {'url': 'index'}
ref_menu.setArgs(args)
find_menu = MenuItem(language.translate('Find in Reference'))
find_menu.setCommand('find_in_reference')
faq_menu = MenuItem(language.translate('Frequently Asked Questions'))
faq_menu.setCommand('open_ref')
args = {'url': 'FAQ'}
faq_menu.setArgs(args)
website_menu = MenuItem(language.translate('Visit Arduino Website'))
website_menu.setCommand('open_url')
args = {'url': 'http://arduino.cc'}
website_menu.setArgs(args)
references_menu.addMenuItem(getting_started_menu)
references_menu.addMenuItem(troubleshooting_menu)
references_menu.addMenuItem(ref_menu)
references_menu.addMenuItem(find_menu)
references_menu.addMenuItem(faq_menu)
references_menu.addMenuItem(website_menu)
return references_menu
def buildSketchMenuGroup(language, arduino_info):
new_sketch_menu = MenuItem(language.translate('New Sketch'))
new_sketch_menu.setCommand('new_sketch')
sketch_menu_group = MenuItemGroup()
sketchbook_menu = buildSketchbookMenu(language, arduino_info)
examples_menu = buildExampleMenu(language, arduino_info)
sketch_menu_group.addMenuItem(new_sketch_menu)
sketch_menu_group.addMenuItem(sketchbook_menu)
sketch_menu_group.addMenuItem(examples_menu)
return sketch_menu_group
def buildLibraryMenuGroup(language, arduino_info):
library_menu_group = MenuItemGroup()
import_lib_menu = buildLibraryMenu(language, arduino_info)
show_sketch_folder_menu = MenuItem(language.translate('Show Sketch Folder'))
show_sketch_folder_menu.setCommand('show_sketch_folder')
library_menu_group.addMenuItem(import_lib_menu)
library_menu_group.addMenuItem(show_sketch_folder_menu)
return library_menu_group
def buildDebugMenuGroup(language):
debug_menu_group = MenuItemGroup()
extra_flag_menu = MenuItem(language.translate('Extra Flags'))
extra_flag_menu.setCommand('set_extra_flag')
compile_menu = MenuItem(language.translate('Verify/Compile'))
compile_menu.setCommand('compile_sketch')
upload_menu = MenuItem(language.translate('Upload'))
upload_menu.setCommand('upload_sketch')
programmer_upload_menu = MenuItem(language.translate('Upload by Using Programmer'))
programmer_upload_menu.setCommand('upload_using_programmer')
debug_menu_group.addMenuItem(extra_flag_menu)
debug_menu_group.addMenuItem(compile_menu)
debug_menu_group.addMenuItem(upload_menu)
debug_menu_group.addMenuItem(programmer_upload_menu)
return debug_menu_group
def buildBoardMenuGroup(arduino_info):
board_menu_group = MenuItemGroup()
board_menu_list = buildBoardMenuList(arduino_info)
board_option_menu_list = buildBoardOptionMenuList(arduino_info)
sub_menu_list = board_menu_list + board_option_menu_list
for sub_menu in sub_menu_list:
board_menu_group.addMenuItem(sub_menu)
return board_menu_group
def buildProgrammerMenuGroup(language, arduino_info):
programmer_menu_group = MenuItemGroup()
programmer_menu = buildProgrammerMenu(language, arduino_info)
programmer_menu_group.addMenuItem(programmer_menu)
burn_bootloader_menu = MenuItem(language.translate('Burn Bootloader'))
burn_bootloader_menu.setCommand('burn_bootloader')
programmer_menu_group.addMenuItem(burn_bootloader_menu)
return programmer_menu_group
def buildSerialMenuGroup(language):
serial_menu_group = MenuItemGroup()
serial_port_menu = buildSerialPortMenu(language)
serial_monitor_menu = buildSerialMonitorMenu(language)
serial_menu_group.addMenuItem(serial_port_menu)
serial_menu_group.addMenuItem(serial_monitor_menu)
return serial_menu_group
def buildToolsMenuGroup(language):
tools_menu_group = MenuItemGroup()
auto_format_menu = MenuItem(language.translate('Auto Format'))
auto_format_menu.setCommand('auto_format')
archive_sketch_menu = MenuItem(language.translate('Archive Sketch'))
archive_sketch_menu.setCommand('archive_sketch')
tools_menu_group.addMenuItem(auto_format_menu)
tools_menu_group.addMenuItem(archive_sketch_menu)
return tools_menu_group
def buildSettingMenuGroup(language):
setting_menu_group = MenuItemGroup()
setting_menu = buildSettingMenu(language)
global_setting_menu = MenuItem(language.translate('Global Setting'))
global_setting_menu.setCommand('set_global_setting')
global_setting_menu.setCheckbox()
full_compilation_menu = MenuItem(language.translate('Full Compilation'))
full_compilation_menu.setCommand('set_full_compilation')
full_compilation_menu.setCheckbox()
bare_gcc_only_menu = MenuItem(language.translate('Bare GCC Build (No Arduino code-munging)'))
bare_gcc_only_menu.setCommand('set_bare_gcc_only')
bare_gcc_only_menu.setCheckbox()
show_compilation_menu = MenuItem(language.translate('Compilation'))
show_compilation_menu.setCommand('show_compilation_output')
show_compilation_menu.setCheckbox()
show_upload_menu = MenuItem(language.translate('Upload'))
show_upload_menu.setCommand('show_upload_output')
show_upload_menu.setCheckbox()
show_verbose_output_menu = MenuItem(language.translate('Show Verbose Output'))
show_verbose_output_menu.addMenuItem(show_compilation_menu)
show_verbose_output_menu.addMenuItem(show_upload_menu)
verify_code_menu = MenuItem(language.translate('Verify Code after Upload'))
verify_code_menu.setCommand('verify_code')
verify_code_menu.setCheckbox()
setting_menu_group.addMenuItem(setting_menu)
setting_menu_group.addMenuItem(global_setting_menu)
setting_menu_group.addMenuItem(bare_gcc_only_menu)
setting_menu_group.addMenuItem(full_compilation_menu)
setting_menu_group.addMenuItem(show_verbose_output_menu)
setting_menu_group.addMenuItem(verify_code_menu)
return setting_menu_group
def buildHelpMenuGroup(language):
help_menu_group = MenuItemGroup()
references_menu = buildReferenceMenu(language)
about_menu = MenuItem(language.translate('About Stino'))
about_menu.setCommand('about_stino')
help_menu_group.addMenuItem(references_menu)
help_menu_group.addMenuItem(about_menu)
return help_menu_group
# Build Main Menu
def buildPreferenceMenu(language):
preference_menu = MenuItem('Preferences')
preference_menu.setMnemonic('n')
show_arduino_menu = MenuItem(language.translate('Show Arduino Menu'))
show_arduino_menu.setCommand('show_arduino_menu')
show_arduino_menu.setCheckbox()
preference_menu.addMenuItem(show_arduino_menu)
return preference_menu
def buildArduinoMenu(language, arduino_info):
arduino_menu = MenuItem('Arduino')
sketch_menu_group = buildSketchMenuGroup(language, arduino_info)
library_menu_group = buildLibraryMenuGroup(language, arduino_info)
debug_menu_group = buildDebugMenuGroup(language)
board_menu_group = buildBoardMenuGroup(arduino_info)
programmer_menu_group = buildProgrammerMenuGroup(language, arduino_info)
serial_menu_group = buildSerialMenuGroup(language)
tools_menu_group = buildToolsMenuGroup(language)
setting_menu_group = buildSettingMenuGroup(language)
help_menu_group = buildHelpMenuGroup(language)
arduino_menu.addMenuGroup(sketch_menu_group)
arduino_menu.addMenuGroup(library_menu_group)
arduino_menu.addMenuGroup(debug_menu_group)
arduino_menu.addMenuGroup(board_menu_group)
arduino_menu.addMenuGroup(programmer_menu_group)
arduino_menu.addMenuGroup(serial_menu_group)
arduino_menu.addMenuGroup(tools_menu_group)
arduino_menu.addMenuGroup(setting_menu_group)
arduino_menu.addMenuGroup(help_menu_group)
return arduino_menu
def buildMainMenu(language, arduino_info):
main_menu = MenuItem('Main Menu')
show_arduino_menu = constant.global_settings.get('show_arduino_menu', True)
preference_menu = buildPreferenceMenu(language)
main_menu.addMenuItem(preference_menu)
if show_arduino_menu:
arduino_menu = buildArduinoMenu(language, arduino_info)
main_menu.addMenuItem(arduino_menu)
return main_menu
def convertMenuToData(cur_menu, level = 0):
caption = cur_menu.getCaption()
sub_menu_list = cur_menu.getSubmenu()
if sub_menu_list:
sub_data_list = []
for sub_menu in sub_menu_list:
sub_data = convertMenuToData(sub_menu, level + 1)
if sub_data:
sub_data_list.append(sub_data)
if level > 0:
menu_id = cur_menu.getId()
menu_mnemonic = cur_menu.getMnemonic()
data = {}
data['caption'] = caption
data['id'] = menu_id
data['mnemonic'] = menu_mnemonic
data['children'] = sub_data_list
else:
data = sub_data_list
else:
data = {}
command = cur_menu.getCommand()
if command or caption == '-':
args = cur_menu.getArgs()
checkbox = cur_menu.getCheckbox()
data['caption'] = caption
data['command'] = command
if args:
data['args'] = args
if checkbox:
data['checkbox'] = checkbox
return data
|
|
import os
import stat
from typing import Union, Optional, AnyStr, Iterator
from ._root import NO_ROOT, Root
from ..types import AnyPath
class PathAdapter(object):
"""A recreation of os.DirEntry which can be constructed from a path"""
def __init__(self, path):
if isinstance(path, (bytes, str, os.PathLike)):
self.path = os.fspath(path)
self.dirname, self.name = os.path.split(self.path)
else:
self.dirname, self.name = path
self.path = os.path.join(self.dirname, self.name)
self._stat = { }
def __str__(self):
return str(self.path)
def __repr__(self):
return "%s(%r)" % (type(self).__name__, self.path)
def __fspath__(self):
return self.path
def inode(self):
return self.stat(follow_symlinks=False).st_ino
def is_dir(self, *, follow_symlinks=True):
return stat.S_ISDIR(self.stat(follow_symlinks=follow_symlinks).st_mode)
def is_file(self, *, follow_symlinks=True):
return stat.S_ISREG(self.stat(follow_symlinks=follow_symlinks).st_mode)
def is_symlink(self):
return stat.S_ISLNK(self.stat(follow_symlinks=False).st_mode)
def stat(self, *, follow_symlinks=True):
follow = bool(follow_symlinks)
if follow in self._stat:
return self._stat[follow]
result = os.stat(self.path, follow_symlinks=follow)
self._stat[follow] = result
return result
DirEntryLike = Union[os.DirEntry, PathAdapter]
# Like pathlib.Path, but because the needs of this app are a little beyond what
# it provides, and it's hard (impossible?) to subclass, this is a partial
# rewrite of what we need with extra bits added on. It benefits from the
# caching behavior that os.DirEntry offers, and can use one as a delegate.
# Otherwise it can be constructed with a plain old path as well. Use the
# classmethods from_* to create instances.
class FileEntry(os.PathLike):
def __init__(self, dirname: AnyPath, resource: DirEntryLike, root=NO_ROOT):
if not isinstance(resource, (os.DirEntry, PathAdapter)):
raise TypeError("resource must be an instance of os.DirEntry or PathAdapter")
self._dirname = os.fspath(dirname)
self._resource = resource
self._splitext: Optional[AnyStr] = None
self._root: Root = NO_ROOT if root is None else root
self._parent: Optional['FileEntry'] = None
def __str__(self) -> str:
return str(self._resource.path)
def __repr__(self) -> str:
return "%s(%r, %r, %r)" % (
type(self).__name__,
self._dirname,
self._resource,
self._root
)
def __fspath__(self) -> AnyStr:
return self._resource.path
def __hash__(self):
return (
hash(self._dirname) ^
hash(self._resource) ^
hash(self._root)
)
def __eq__(self, other) -> bool:
if not isinstance(other, FileEntry):
return NotImplemented
return (
self._dirname == other._dirname and
self._resource == other._resource and
self._root == other._root
)
def _copy_with_path(self, path) -> 'FileEntry':
return FileEntry.from_path(path, self._root)
@classmethod
def from_path(cls, path, root=NO_ROOT) -> 'FileEntry':
resource = PathAdapter(path)
return cls(resource.dirname, resource, root)
@classmethod
def from_dir_and_name(cls, parent, name, root=NO_ROOT) -> 'FileEntry':
return cls(parent, PathAdapter((parent, name)), root)
@classmethod
def from_dir_entry(cls, parent, entry, root=NO_ROOT) -> 'FileEntry':
return cls(parent, entry, root)
@property
def path(self) -> AnyStr:
return self._resource.path
@property
def root(self) -> Root:
return self._root
@property
def parent(self) -> 'FileEntry':
if self._parent is None:
self._parent = self._copy_with_path(self._dirname)
return self._parent
def join(self, name: AnyPath) -> 'FileEntry':
name = os.fspath(name)
joined = self._copy_with_path(os.path.join(self._resource.path, name))
if (
name != os.curdir and
name != os.pardir and
os.sep not in name and (
os.altsep is None or
os.altsep not in name
)
):
joined._parent = self
return joined
def dir_content(self) -> Iterator['FileEntry']:
with os.scandir(self._resource.path) as entries:
for entry in entries:
child_obj = FileEntry.from_dir_entry(
self._resource.path,
entry,
self._root
)
child_obj._parent = self
yield child_obj
def __truediv__(self, rhs) -> 'FileEntry':
if isinstance(rhs, (str, bytes, os.PathLike)):
return self.join(rhs)
return NotImplemented
def __rtruediv__(self, lhs) -> 'FileEntry':
return self._copy_with_path(os.path.join(os.fspath(lhs), self._resource.path))
@property
def basename(self) -> AnyStr:
return self._resource.name
@property
def dirname(self) -> AnyStr:
return self._dirname
@property
def barename(self) -> AnyStr:
if self._splitext is None:
self._splitext = os.path.splitext(self._resource.name)
return self._splitext[0]
@property
def extension(self) -> AnyStr:
if self._splitext is None:
self._splitext = os.path.splitext(self._resource.name)
return self._splitext[1]
@property
def stat(self) -> os.stat_result:
return self._resource.stat()
@property
def size(self):
return self.stat.st_size
@property
def atime(self):
return self._resource.stat().st_atime
@property
def ctime(self):
return self._resource.stat().st_ctime
@property
def mtime(self):
return self._resource.stat().st_mtime
@property
def is_file(self) -> bool:
return self._resource.is_file()
@property
def is_dir(self) -> bool:
return self._resource.is_dir()
@property
def is_symlink(self) -> bool:
return self._resource.is_symlink()
@property
def inode(self):
return self._resource.inode()
@property
def dev(self):
return self._resource.stat(follow_symlinks=False).st_dev
@property
def uid(self):
inode = self._resource.inode()
dev = self._resource.stat(follow_symlinks=False).st_dev
# quirk: per documentation, on windows, the stat returned from os.DirEntry.stat() has
# st_ino and st_dev set to 0
if inode == 0 or dev == 0:
current_stat = os.stat(self._resource.path, follow_symlinks=False)
inode = current_stat.st_ino
dev = current_stat.st_dev
return dev, inode
|
|
"""Support for Vallox ventilation units."""
from datetime import timedelta
import ipaddress
import logging
from vallox_websocket_api import PROFILE as VALLOX_PROFILE, Vallox
from vallox_websocket_api.constants import vlxDevConstants
from vallox_websocket_api.exceptions import ValloxApiException
import voluptuous as vol
from homeassistant.const import CONF_HOST, CONF_NAME
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.discovery import async_load_platform
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.event import async_track_time_interval
_LOGGER = logging.getLogger(__name__)
DOMAIN = "vallox"
DEFAULT_NAME = "Vallox"
SIGNAL_VALLOX_STATE_UPDATE = "vallox_state_update"
SCAN_INTERVAL = timedelta(seconds=60)
# Various metric keys that are reused between profiles.
METRIC_KEY_MODE = "A_CYC_MODE"
METRIC_KEY_PROFILE_FAN_SPEED_HOME = "A_CYC_HOME_SPEED_SETTING"
METRIC_KEY_PROFILE_FAN_SPEED_AWAY = "A_CYC_AWAY_SPEED_SETTING"
METRIC_KEY_PROFILE_FAN_SPEED_BOOST = "A_CYC_BOOST_SPEED_SETTING"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_HOST): vol.All(ipaddress.ip_address, cv.string),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
},
extra=vol.ALLOW_EXTRA,
)
# pylint: disable=no-member
PROFILE_TO_STR_SETTABLE = {
VALLOX_PROFILE.HOME: "Home",
VALLOX_PROFILE.AWAY: "Away",
VALLOX_PROFILE.BOOST: "Boost",
VALLOX_PROFILE.FIREPLACE: "Fireplace",
}
STR_TO_PROFILE = {v: k for (k, v) in PROFILE_TO_STR_SETTABLE.items()}
# pylint: disable=no-member
PROFILE_TO_STR_REPORTABLE = {
**{VALLOX_PROFILE.NONE: "None", VALLOX_PROFILE.EXTRA: "Extra"},
**PROFILE_TO_STR_SETTABLE,
}
ATTR_PROFILE = "profile"
ATTR_PROFILE_FAN_SPEED = "fan_speed"
SERVICE_SCHEMA_SET_PROFILE = vol.Schema(
{vol.Required(ATTR_PROFILE): vol.All(cv.string, vol.In(STR_TO_PROFILE))}
)
SERVICE_SCHEMA_SET_PROFILE_FAN_SPEED = vol.Schema(
{
vol.Required(ATTR_PROFILE_FAN_SPEED): vol.All(
vol.Coerce(int), vol.Clamp(min=0, max=100)
)
}
)
SERVICE_SET_PROFILE = "set_profile"
SERVICE_SET_PROFILE_FAN_SPEED_HOME = "set_profile_fan_speed_home"
SERVICE_SET_PROFILE_FAN_SPEED_AWAY = "set_profile_fan_speed_away"
SERVICE_SET_PROFILE_FAN_SPEED_BOOST = "set_profile_fan_speed_boost"
SERVICE_TO_METHOD = {
SERVICE_SET_PROFILE: {
"method": "async_set_profile",
"schema": SERVICE_SCHEMA_SET_PROFILE,
},
SERVICE_SET_PROFILE_FAN_SPEED_HOME: {
"method": "async_set_profile_fan_speed_home",
"schema": SERVICE_SCHEMA_SET_PROFILE_FAN_SPEED,
},
SERVICE_SET_PROFILE_FAN_SPEED_AWAY: {
"method": "async_set_profile_fan_speed_away",
"schema": SERVICE_SCHEMA_SET_PROFILE_FAN_SPEED,
},
SERVICE_SET_PROFILE_FAN_SPEED_BOOST: {
"method": "async_set_profile_fan_speed_boost",
"schema": SERVICE_SCHEMA_SET_PROFILE_FAN_SPEED,
},
}
DEFAULT_FAN_SPEED_HOME = 50
DEFAULT_FAN_SPEED_AWAY = 25
DEFAULT_FAN_SPEED_BOOST = 65
async def async_setup(hass, config):
"""Set up the client and boot the platforms."""
conf = config[DOMAIN]
host = conf.get(CONF_HOST)
name = conf.get(CONF_NAME)
client = Vallox(host)
state_proxy = ValloxStateProxy(hass, client)
service_handler = ValloxServiceHandler(client, state_proxy)
hass.data[DOMAIN] = {"client": client, "state_proxy": state_proxy, "name": name}
for vallox_service in SERVICE_TO_METHOD:
schema = SERVICE_TO_METHOD[vallox_service]["schema"]
hass.services.async_register(
DOMAIN, vallox_service, service_handler.async_handle, schema=schema
)
# The vallox hardware expects quite strict timings for websocket
# requests. Timings that machines with less processing power, like
# Raspberries, cannot live up to during the busy start phase of Home
# Asssistant. Hence, async_add_entities() for fan and sensor in respective
# code will be called with update_before_add=False to intentionally delay
# the first request, increasing chance that it is issued only when the
# machine is less busy again.
hass.async_create_task(async_load_platform(hass, "sensor", DOMAIN, {}, config))
hass.async_create_task(async_load_platform(hass, "fan", DOMAIN, {}, config))
async_track_time_interval(hass, state_proxy.async_update, SCAN_INTERVAL)
return True
class ValloxStateProxy:
"""Helper class to reduce websocket API calls."""
def __init__(self, hass, client):
"""Initialize the proxy."""
self._hass = hass
self._client = client
self._metric_cache = {}
self._profile = None
self._valid = False
def fetch_metric(self, metric_key):
"""Return cached state value."""
_LOGGER.debug("Fetching metric key: %s", metric_key)
if not self._valid:
raise OSError("Device state out of sync.")
if metric_key not in vlxDevConstants.__dict__:
raise KeyError(f"Unknown metric key: {metric_key}")
return self._metric_cache[metric_key]
def get_profile(self):
"""Return cached profile value."""
_LOGGER.debug("Returning profile")
if not self._valid:
raise OSError("Device state out of sync.")
return PROFILE_TO_STR_REPORTABLE[self._profile]
async def async_update(self, event_time):
"""Fetch state update."""
_LOGGER.debug("Updating Vallox state cache")
try:
self._metric_cache = await self._client.fetch_metrics()
self._profile = await self._client.get_profile()
self._valid = True
except (OSError, ValloxApiException) as err:
_LOGGER.error("Error during state cache update: %s", err)
self._valid = False
async_dispatcher_send(self._hass, SIGNAL_VALLOX_STATE_UPDATE)
class ValloxServiceHandler:
"""Services implementation."""
def __init__(self, client, state_proxy):
"""Initialize the proxy."""
self._client = client
self._state_proxy = state_proxy
async def async_set_profile(self, profile: str = "Home") -> bool:
"""Set the ventilation profile."""
_LOGGER.debug("Setting ventilation profile to: %s", profile)
try:
await self._client.set_profile(STR_TO_PROFILE[profile])
return True
except (OSError, ValloxApiException) as err:
_LOGGER.error("Error setting ventilation profile: %s", err)
return False
async def async_set_profile_fan_speed_home(
self, fan_speed: int = DEFAULT_FAN_SPEED_HOME
) -> bool:
"""Set the fan speed in percent for the Home profile."""
_LOGGER.debug("Setting Home fan speed to: %d%%", fan_speed)
try:
await self._client.set_values(
{METRIC_KEY_PROFILE_FAN_SPEED_HOME: fan_speed}
)
return True
except (OSError, ValloxApiException) as err:
_LOGGER.error("Error setting fan speed for Home profile: %s", err)
return False
async def async_set_profile_fan_speed_away(
self, fan_speed: int = DEFAULT_FAN_SPEED_AWAY
) -> bool:
"""Set the fan speed in percent for the Home profile."""
_LOGGER.debug("Setting Away fan speed to: %d%%", fan_speed)
try:
await self._client.set_values(
{METRIC_KEY_PROFILE_FAN_SPEED_AWAY: fan_speed}
)
return True
except (OSError, ValloxApiException) as err:
_LOGGER.error("Error setting fan speed for Away profile: %s", err)
return False
async def async_set_profile_fan_speed_boost(
self, fan_speed: int = DEFAULT_FAN_SPEED_BOOST
) -> bool:
"""Set the fan speed in percent for the Boost profile."""
_LOGGER.debug("Setting Boost fan speed to: %d%%", fan_speed)
try:
await self._client.set_values(
{METRIC_KEY_PROFILE_FAN_SPEED_BOOST: fan_speed}
)
return True
except (OSError, ValloxApiException) as err:
_LOGGER.error("Error setting fan speed for Boost profile: %s", err)
return False
async def async_handle(self, service):
"""Dispatch a service call."""
method = SERVICE_TO_METHOD.get(service.service)
params = service.data.copy()
if not hasattr(self, method["method"]):
_LOGGER.error("Service not implemented: %s", method["method"])
return
result = await getattr(self, method["method"])(**params)
# Force state_proxy to refresh device state, so that updates are
# propagated to platforms.
if result:
await self._state_proxy.async_update(None)
|
|
#------------------------------------------------------------------------------
# Copyright 2016, 2017, Oracle and/or its affiliates. All rights reserved.
#
# Portions Copyright 2007-2015, Anthony Tuininga. All rights reserved.
#
# Portions Copyright 2001-2007, Computronix (Canada) Ltd., Edmonton, Alberta,
# Canada. All rights reserved.
#------------------------------------------------------------------------------
"""Module for testing connections."""
import random
import threading
class TestConnection(TestCase):
def __ConnectAndDrop(self):
"""Connect to the database, perform a query and drop the connection."""
connection = cx_Oracle.connect(self.username, self.password,
self.tnsentry, threaded = True)
cursor = connection.cursor()
cursor.execute(u"select count(*) from TestNumbers")
count, = cursor.fetchone()
self.assertEqual(count, 10)
def __VerifyAttributes(self, connection, attrName, value, sql):
setattr(connection, attrName, value)
cursor = connection.cursor()
cursor.execute(sql)
result, = cursor.fetchone()
self.assertEqual(result, value, "%s value mismatch" % attrName)
def setUp(self):
self.username = USERNAME
self.password = PASSWORD
self.tnsentry = TNSENTRY
def verifyArgs(self, connection):
self.assertEqual(connection.username, self.username,
"user name differs")
self.assertEqual(connection.tnsentry, self.tnsentry,
"tnsentry differs")
self.assertEqual(connection.dsn, self.tnsentry, "dsn differs")
def testAllArgs(self):
"connection to database with user, password, TNS separate"
connection = cx_Oracle.connect(self.username, self.password,
self.tnsentry)
self.verifyArgs(connection)
def testAppContext(self):
"test use of application context"
namespace = "CLIENTCONTEXT"
appContextEntries = [
( namespace, "ATTR1", "VALUE1" ),
( namespace, "ATTR2", "VALUE2" ),
( namespace, "ATTR3", "VALUE3" )
]
connection = cx_Oracle.connect(self.username, self.password,
self.tnsentry, appcontext = appContextEntries)
cursor = connection.cursor()
for namespace, name, value in appContextEntries:
cursor.execute("select sys_context(:1, :2) from dual",
(namespace, name))
actualValue, = cursor.fetchone()
self.assertEqual(actualValue, value)
def testAppContextNegative(self):
"test invalid use of application context"
self.assertRaises(TypeError, cx_Oracle.connect, self.username,
self.password, self.tnsentry,
appcontext = [('userenv', 'action')])
def testAttributes(self):
"test connection end-to-end tracing attributes"
connection = cx_Oracle.connect(USERNAME, PASSWORD, TNSENTRY)
if CLIENT_VERSION >= (12, 1):
self.__VerifyAttributes(connection, "dbop", "cx_OracleTest_DBOP",
"select dbop_name from v$sql_monitor "
"where sid = sys_context('userenv', 'sid')")
self.__VerifyAttributes(connection, "action", "cx_OracleTest_Action",
"select sys_context('userenv', 'action') from dual")
self.__VerifyAttributes(connection, "module", "cx_OracleTest_Module",
"select sys_context('userenv', 'module') from dual")
self.__VerifyAttributes(connection, "clientinfo",
"cx_OracleTest_CInfo",
"select sys_context('userenv', 'client_info') from dual")
self.__VerifyAttributes(connection, "client_identifier",
"cx_OracleTest_CID",
"select sys_context('userenv', 'client_identifier') from dual")
def testAutoCommit(self):
"test use of autocommit"
connection = cx_Oracle.connect(USERNAME, PASSWORD, TNSENTRY)
cursor = connection.cursor()
otherConnection = cx_Oracle.connect(USERNAME, PASSWORD, TNSENTRY)
otherCursor = otherConnection.cursor()
cursor.execute("truncate table TestTempTable")
cursor.execute("""
insert into TestTempTable (IntCol, StringCol)
values (1, null)""")
otherCursor.execute("select * from TestTempTable")
rows = otherCursor.fetchall()
self.assertEqual(rows, [])
connection.autocommit = True
cursor.execute("""
insert into TestTempTable (IntCol, StringCol)
values (2, null)""")
otherCursor.execute("select * from TestTempTable order by IntCol")
rows = otherCursor.fetchall()
self.assertEqual(rows, [(1, None), (2, None)])
def testBadConnectString(self):
"connection to database with bad connect string"
self.assertRaises(cx_Oracle.DatabaseError, cx_Oracle.connect,
self.username)
self.assertRaises(cx_Oracle.DatabaseError, cx_Oracle.connect,
self.username + u"@" + self.tnsentry)
self.assertRaises(cx_Oracle.DatabaseError, cx_Oracle.connect,
self.username + "@" + self.tnsentry + "/" + self.password)
def testBadPassword(self):
"connection to database with bad password"
self.assertRaises(cx_Oracle.DatabaseError, cx_Oracle.connect,
self.username, self.password + u"X", self.tnsentry)
def testChangePassword(self):
"test changing password"
newPassword = "NEW_PASSWORD"
connection = cx_Oracle.connect(self.username, self.password,
self.tnsentry)
connection.changepassword(self.password, newPassword)
cconnection = cx_Oracle.connect(self.username, newPassword,
self.tnsentry)
connection.changepassword(newPassword, self.password)
def testChangePasswordNegative(self):
"test changing password to an invalid value"
newPassword = "1" * 150
connection = cx_Oracle.connect(self.username, self.password,
self.tnsentry)
self.assertRaises(cx_Oracle.DatabaseError, connection.changepassword,
self.password, newPassword)
def testEncodings(self):
"connection with only encoding or nencoding specified should work"
connection = cx_Oracle.connect(self.username, self.password,
self.tnsentry)
encoding = connection.encoding
nencoding = connection.nencoding
connection = cx_Oracle.connect(self.username, self.password,
self.tnsentry, encoding = "UTF-8")
self.assertEqual(connection.encoding, "UTF-8")
self.assertEqual(connection.nencoding, nencoding)
connection = cx_Oracle.connect(self.username, self.password,
self.tnsentry, nencoding = "UTF-8")
self.assertEqual(connection.encoding, encoding)
self.assertEqual(connection.nencoding, "UTF-8")
def testDifferentEncodings(self):
connection = cx_Oracle.connect(self.username, self.password,
self.tnsentry, encoding = "UTF-8", nencoding = "UTF-16")
value = u"\u03b4\u4e2a"
cursor = connection.cursor()
ncharVar = cursor.var(cx_Oracle.NCHAR, 100)
ncharVar.setvalue(0, value)
cursor.execute("select :value from dual", value = ncharVar)
result, = cursor.fetchone()
self.assertEqual(result, value)
def testExceptionOnClose(self):
"confirm an exception is raised after closing a connection"
connection = cx_Oracle.connect(self.username, self.password,
self.tnsentry)
connection.close()
self.assertRaises(cx_Oracle.DatabaseError, connection.rollback)
def testConnectWithHandle(self):
"test creating a connection using a handle"
connection = cx_Oracle.connect(self.username, self.password,
self.tnsentry)
cursor = connection.cursor()
cursor.execute("truncate table TestTempTable")
intValue = random.randint(1, 32768)
cursor.execute("insert into TestTempTable values (:val, null)",
val = intValue)
connection2 = cx_Oracle.connect(handle = connection.handle)
cursor = connection2.cursor()
cursor.execute("select IntCol from TestTempTable")
fetchedIntValue, = cursor.fetchone()
self.assertEqual(fetchedIntValue, intValue)
cursor.close()
self.assertRaises(cx_Oracle.DatabaseError, connection2.close)
connection.close()
cursor = connection2.cursor()
self.assertRaises(cx_Oracle.DatabaseError, cursor.execute,
"select count(*) from TestTempTable")
def testMakeDSN(self):
"test making a data source name from host, port and sid"
formatString = u"(DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)" + \
"(HOST=%s)(PORT=%d))(CONNECT_DATA=(SID=%s)))"
args = ("hostname", 1521, "TEST")
result = cx_Oracle.makedsn(*args)
self.assertEqual(result, formatString % args)
args = (u"hostname", 1521, u"TEST")
result = cx_Oracle.makedsn(*args)
self.assertEqual(result, formatString % args)
def testSingleArg(self):
"connection to database with user, password, TNS together"
connection = cx_Oracle.connect("%s/%s@%s" % \
(self.username, self.password, self.tnsentry))
self.verifyArgs(connection)
def testVersion(self):
"connection version is a string"
connection = cx_Oracle.connect(self.username, self.password,
self.tnsentry)
self.assertTrue(isinstance(connection.version, str))
def testRollbackOnClose(self):
"connection rolls back before close"
connection = cx_Oracle.connect(self.username, self.password,
self.tnsentry)
cursor = connection.cursor()
cursor.execute("truncate table TestTempTable")
otherConnection = cx_Oracle.connect(self.username, self.password,
self.tnsentry)
otherCursor = otherConnection.cursor()
otherCursor.execute("insert into TestTempTable (IntCol) values (1)")
otherCursor.close()
otherConnection.close()
cursor.execute("select count(*) from TestTempTable")
count, = cursor.fetchone()
self.assertEqual(count, 0)
def testRollbackOnDel(self):
"connection rolls back before destruction"
connection = cx_Oracle.connect(self.username, self.password,
self.tnsentry)
cursor = connection.cursor()
cursor.execute("truncate table TestTempTable")
otherConnection = cx_Oracle.connect(self.username, self.password,
self.tnsentry)
otherCursor = otherConnection.cursor()
otherCursor.execute("insert into TestTempTable (IntCol) values (1)")
del otherCursor
del otherConnection
cursor.execute("select count(*) from TestTempTable")
count, = cursor.fetchone()
self.assertEqual(count, 0)
def testThreading(self):
"connection to database with multiple threads"
threads = []
for i in range(20):
thread = threading.Thread(None, self.__ConnectAndDrop)
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
def testStringFormat(self):
"test string format of connection"
connection = cx_Oracle.connect(self.username, self.password,
self.tnsentry)
expectedValue = "<cx_Oracle.Connection to %s@%s>" % \
(self.username, self.tnsentry)
self.assertEqual(str(connection), expectedValue)
def testCtxMgrClose(self):
"test context manager - close"
connection = cx_Oracle.connect(self.username, self.password,
self.tnsentry)
cx_Oracle.__future__.ctx_mgr_close = True
try:
with connection:
cursor = connection.cursor()
cursor.execute("truncate table TestTempTable")
cursor.execute("insert into TestTempTable values (1, null)")
connection.commit()
cursor.execute("insert into TestTempTable values (2, null)")
finally:
cx_Oracle.__future__.ctx_mgr_close = False
self.assertRaises(cx_Oracle.DatabaseError, connection.ping)
connection = cx_Oracle.connect(self.username, self.password,
self.tnsentry)
cursor = connection.cursor()
cursor.execute("select count(*) from TestTempTable")
count, = cursor.fetchone()
self.assertEqual(count, 1)
def testCtxMgrCommitOnSuccess(self):
"test context manager - commit on success"
connection = cx_Oracle.connect(self.username, self.password,
self.tnsentry)
cursor = connection.cursor()
cursor.execute("truncate table TestTempTable")
with connection:
cursor.execute("""
insert into TestTempTable (IntCol, StringCol)
values (1, null)""")
connection.rollback()
cursor.execute("select count(*) from TestTempTable")
count, = cursor.fetchone()
self.assertEqual(count, 1)
def testCtxMgrRollbackOnFailure(self):
"test context manager - rollback on failure"
connection = cx_Oracle.connect(self.username, self.password,
self.tnsentry)
cursor = connection.cursor()
cursor.execute("truncate table TestTempTable")
cursor.execute("""
insert into TestTempTable (IntCol, StringCol)
values (1, null)""")
try:
with connection:
1 / 0
except:
pass
cursor.execute("select count(*) from TestTempTable")
count, = cursor.fetchone()
self.assertEqual(count, 0)
def testConnectionAttributes(self):
"test connection attribute values"
connection = cx_Oracle.connect(self.username, self.password,
self.tnsentry, encoding = "ASCII")
self.assertEqual(connection.maxBytesPerCharacter, 1)
connection = cx_Oracle.connect(self.username, self.password,
self.tnsentry, encoding = "UTF-8")
self.assertEqual(connection.maxBytesPerCharacter, 4)
if CLIENT_VERSION >= (12, 1):
self.assertEqual(connection.ltxid, b'')
self.assertEqual(connection.current_schema, None)
connection.current_schema = "test_schema"
self.assertEqual(connection.current_schema, "test_schema")
self.assertEqual(connection.edition, None)
connection.external_name = "test_external"
self.assertEqual(connection.external_name, "test_external")
connection.internal_name = "test_internal"
self.assertEqual(connection.internal_name, "test_internal")
connection.stmtcachesize = 30
self.assertEqual(connection.stmtcachesize, 30)
self.assertRaises(TypeError, connection.stmtcachesize, 20.5)
self.assertRaises(TypeError, connection.stmtcachesize, "value")
def testClosedConnectionAttributes(self):
"test closed connection attribute values"
connection = cx_Oracle.connect(self.username, self.password,
self.tnsentry)
connection.close()
attrNames = ["current_schema", "edition", "external_name",
"internal_name", "stmtcachesize"]
if CLIENT_VERSION >= (12, 1):
attrNames.append("ltxid")
for name in attrNames:
self.assertRaises(cx_Oracle.DatabaseError, getattr, connection,
name)
def testPing(self):
"test connection ping"
connection = cx_Oracle.connect(self.username, self.password,
self.tnsentry)
connection.ping()
def testTransactionBegin(self):
"test begin, prepare, cancel transaction"
connection = cx_Oracle.connect(self.username, self.password,
self.tnsentry)
cursor = connection.cursor()
cursor.execute("truncate table TestTempTable")
connection.begin(10, 'trxnId', 'branchId')
self.assertEqual(connection.prepare(), False)
connection.begin(10, 'trxnId', 'branchId')
cursor.execute("""
insert into TestTempTable (IntCol, StringCol)
values (1, 'tesName')""")
self.assertEqual(connection.prepare(), True)
connection.cancel()
connection.rollback()
cursor.execute("select count(*) from TestTempTable")
count, = cursor.fetchone()
self.assertEqual(count, 0)
|
|
# Copyright 2016 Uri Laserson
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import gzip
import json
import os
import re
import sys
from collections import Counter, OrderedDict
from functools import reduce
from glob import glob
from os import path as osp
from os.path import join as pjoin
from subprocess import PIPE, Popen
import numpy as np
import pandas as pd
from click import Choice, Path, command, group, option
from tqdm import tqdm
from phip.utils import DEFAULT_FDR, compute_size_factors, readfq
# handle gzipped or uncompressed files
def open_maybe_compressed(*args, **kwargs):
if args[0].endswith(".gz"):
# gzip modes are different from default open modes
if len(args[1]) == 1:
args = (args[0], args[1] + "t") + args[2:]
compresslevel = kwargs.pop("compresslevel", 6)
return gzip.open(*args, **kwargs, compresslevel=compresslevel)
else:
return open(*args, **kwargs)
@group(context_settings={"help_option_names": ["-h", "--help"]})
def cli():
"""phip -- PhIP-seq analysis tools"""
pass
@cli.command(name="truncate-fasta")
@option(
"-i",
"--input",
required=True,
type=Path(exists=True, dir_okay=False),
help="input fasta",
)
@option("-o", "--output", required=True, type=Path(exists=False), help="output fasta")
@option(
"-k",
"--length",
required=True,
type=int,
help="length of starting subsequence to extract",
)
def truncate_fasta(input, output, length):
"""Truncate each sequence of a fasta file."""
with open(input, "r") as ip, open(output, "w") as op:
for (n, s, q) in readfq(ip):
print(f">{n}\n{s[:length]}", file=op)
@cli.command(name="merge-kallisto-tpm")
@option(
"-i",
"--input",
required=True,
type=Path(exists=True, file_okay=False),
help="input dir containing kallisto results",
)
@option("-o", "--output", required=True, type=Path(exists=False), help="output path")
def merge_kallisto_tpm(input, output):
"""Merge kallisto abundance results.
Input directory should contain sample-named subdirectories, each containing
an abundance.tsv file. This command will generate a single tab-delim
output file with each column containing the tpm values for that sample.
"""
samples = os.listdir(input)
iterators = [open(pjoin(input, s, "abundance.tsv"), "r") for s in samples]
with open(output, "w") as op:
it = zip(*iterators)
# burn headers of input files and write header of output file
_ = next(it)
print("id\t{}".format("\t".join(samples)), file=op)
for lines in it:
fields_array = [line.split("\t") for line in lines]
# check that join column is the same
assert all([fields[0] == fields_array[0][0] for fields in fields_array])
merged_fields = [fields_array[0][0]] + [f[4].strip() for f in fields_array]
print("\t".join(merged_fields), file=op)
@cli.command(name="gamma-poisson-model")
@option(
"-i",
"--input",
required=True,
type=Path(exists=True, dir_okay=False),
help="input counts file (tab-delim)",
)
@option(
"-o", "--output", required=True, type=Path(exists=False), help="output directory"
)
@option(
"-t",
"--trim-percentile",
default=99.9,
help="lower percent of data to keep for model fitting",
)
@option(
"-d", "--index-cols", default=1, help="number of columns to use as index/row-key"
)
def gamma_poisson_model(input, output, trim_percentile, index_cols):
"""Fit a gamma-poisson model.
Compute -log10(pval) for each (possibly-normalized) count.
"""
from phip.gampois import gamma_poisson_model as model
counts = pd.read_csv(input, sep="\t", header=0, index_col=list(range(index_cols)))
os.makedirs(output, exist_ok=True)
alpha, beta, rates, mlxp = model(counts, trim_percentile)
with open(pjoin(output, "parameters.json"), "w") as op:
json.dump(
{
"alpha": alpha,
"beta": beta,
"trim_percentile": trim_percentile,
"background_rates": list(rates),
},
op,
)
mlxp.to_csv(pjoin(output, "mlxp.tsv"), sep="\t", float_format="%.2f")
@cli.command(name="clipped-factorization-model")
@option(
"-i",
"--input",
required=True,
type=Path(exists=True, dir_okay=False),
help="input counts file (tab-delim)",
)
@option(
"-o",
"--output",
required=False,
type=Path(exists=False),
help="output file or directory. If ends in .tsv, will be treated as file",
)
@option(
"-d", "--index-cols", default=1, help="number of columns to use as index/row-key"
)
@option("--rank", default=3, show_default=True, help="matrix rank")
@option(
"--clip-percentile",
default=99.9,
show_default=True,
help="percentile thershold to clip at",
)
@option(
"--learning-rate",
default=1.0,
show_default=True,
help="learning rate for Adam optimizer",
)
@option(
"--minibatch-size", default=1024 * 32, show_default=True, help="rows per minibatch"
)
@option(
"--patience",
default=5,
show_default=True,
help="number of epochs of no improvement to wait before early stopping",
)
@option("--max-epochs", default=1000, show_default=True, help="maximum epochs")
@option(
"--discard-sample-reads-fraction",
default=0.01,
show_default=True,
help="Discard samples with fewer than X * m reads, where m is the median "
"number of reads across samples",
)
@option(
"--no-normalize-to-reads-per-million",
is_flag=True,
help="Work directly on read counts, not counts divided by sample totals",
)
@option(
"--log-every-seconds",
default=1,
show_default=True,
help="write progress no more often than every N seconds",
)
def clipped_factorization_model(
input,
output,
index_cols,
rank,
clip_percentile,
learning_rate,
minibatch_size,
patience,
max_epochs,
discard_sample_reads_fraction,
no_normalize_to_reads_per_million,
log_every_seconds,
):
"""Fit matrix factorization model.
Computes residuals from a matrix factorization model. Specifically, attempt
to detect and correct for clone and sample batch effects by subtracting off
a learned low-rank reconstruction of the given counts matrix.
The result is the (clones x samples) matrix of residuals after correcting for
batch effects. A few additional rows and columns (named _background_0,
_background_1, ...) giving the learned effects are also included.
"""
from phip.clipped_factorization import do_clipped_factorization
counts = pd.read_csv(input, sep="\t", header=0, index_col=list(range(index_cols)))
total_reads = counts.sum()
expected_reads = total_reads.median()
for sample in counts.columns:
if total_reads[sample] / expected_reads < discard_sample_reads_fraction:
print(
"[!!] EXCLUDING SAMPLE %s DUE TO INSUFFICIENT READS "
"(%d vs. sample median %d)"
% (sample, total_reads[sample], expected_reads)
)
del counts[sample]
result_df = do_clipped_factorization(
counts,
rank=rank,
clip_percentile=clip_percentile,
learning_rate=learning_rate,
minibatch_size=minibatch_size,
patience=patience,
max_epochs=max_epochs,
normalize_to_reads_per_million=not no_normalize_to_reads_per_million,
log_every_seconds=log_every_seconds,
)
if output.endswith(".tsv"):
output_path = output
else:
os.makedirs(output)
output_path = pjoin(output, "mixture.tsv")
result_df.to_csv(output_path, sep="\t", float_format="%.2f")
print("Wrote: %s" % output_path)
@cli.command(name="call-hits")
@option(
"-i",
"--input",
required=True,
type=Path(exists=True, dir_okay=False),
help="input counts file (tab-delim)",
)
@option(
"-o",
"--output",
required=False,
type=Path(exists=False),
help="output file or directory. If ends in .tsv, will be treated as file",
)
@option(
"-d", "--index-cols", default=1, help="number of columns to use as index/row-key"
)
@option(
"--beads-regex",
default=".*beads.*",
show_default=True,
help="samples with names matching this regex are considered beads-only",
)
@option(
"--ignore-columns-regex",
default="^_background.*",
show_default=True,
help="ignore columns matching the given regex (evaluated in case-insensitive"
" mode.) Ignored columns are passed through to output without processing.",
)
@option(
"--ignore-rows-regex",
default="^_background.*",
show_default=True,
help="ignore rows matching the given regex (evaluated in case-insensitive "
"mode). Ignored rows are passed through to output without processing.",
)
@option(
"--fdr", default=DEFAULT_FDR, show_default=True, help="target false discovery rate"
)
@option(
"--normalize-to-reads-per-million",
type=Choice(["always", "never", "guess"]),
default="guess",
show_default=True,
help="Divide counts by totals per sample. Recommended "
"when running directly on raw read counts (as opposed to matrix "
'factorization residuals). If set to "guess" then the counts matrix '
"will be left as-is if it contains negative entries, and otherwise "
"will be normalized.",
)
@option(
"--verbosity",
default=2,
show_default=True,
help="verbosity: no output (0), result summary only (1), or progress (2)",
)
def call_hits(
input,
output,
index_cols,
beads_regex,
ignore_columns_regex,
ignore_rows_regex,
fdr,
normalize_to_reads_per_million,
verbosity,
):
"""Call hits at specified FDR using a heuristic.
Either raw read counts or the result of the clipped-factorization-model
sub-command can be provided.
The result is a matrix of shape (clones x samples). Entries above 1.0 in
this matrix indicate hits. Higher values indicate more evidence for a
hit, but there is no simple interpretation of these values beyond whether
they are below/above 1.0.
See the documentation for `hit_calling.do_hit_calling()` for details on
the implementation.
"""
from phip.hit_calling import do_hit_calling
original_counts = pd.read_csv(
input, sep="\t", header=0, index_col=list(range(index_cols))
)
counts = original_counts
print("Read input matrix: %d clones x %d samples." % counts.shape)
print("Columns: %s" % " ".join(counts.columns))
columns_to_ignore = [
s
for s in counts.columns
if ignore_columns_regex
and re.match(ignore_columns_regex, s, flags=re.IGNORECASE)
]
if columns_to_ignore:
print(
"Ignoring %d columns matching regex '%s': %s"
% (
len(columns_to_ignore),
ignore_columns_regex,
" ".join(columns_to_ignore),
)
)
counts = counts[[c for c in counts.columns if c not in columns_to_ignore]]
rows_to_ignore = [
s
for s in counts.index
if ignore_rows_regex
and index_cols == 1
and re.match(ignore_rows_regex, s, flags=re.IGNORECASE)
]
if rows_to_ignore:
print(
"Ignoring %d rows matching regex '%s': %s"
% (len(rows_to_ignore), ignore_rows_regex, " ".join(rows_to_ignore))
)
counts = counts.loc[~counts.index.isin(rows_to_ignore)]
beads_only_samples = [
s for s in counts.columns if re.match(beads_regex, s, flags=re.IGNORECASE)
]
print(
"Beads-only regex '%s' matched %d samples: %s"
% (beads_regex, len(beads_only_samples), " ".join(beads_only_samples))
)
result_df = do_hit_calling(
counts,
beads_only_samples=beads_only_samples,
fdr=fdr,
normalize_to_reads_per_million={"always": True, "never": False, "guess": None}[
normalize_to_reads_per_million
],
verbosity=verbosity,
)
full_result_df = original_counts.copy()
for column in result_df.columns:
full_result_df.loc[result_df.index, column] = result_df[column]
if output.endswith(".tsv"):
output_path = output
else:
os.makedirs(output)
output_path = pjoin(output, "hits.tsv")
full_result_df.to_csv(output_path, sep="\t", float_format="%.4f")
print("Wrote: %s" % output_path)
# TOOLS THAT SHOULD BE USED RARELY
@cli.command(name="zip-reads-and-barcodes")
@option(
"-i",
"--input",
type=Path(exists=True, dir_okay=False),
required=True,
help="reads fastq file",
)
@option(
"-b",
"--barcodes",
type=Path(exists=True, dir_okay=False),
required=True,
help="indexes/barcodes fastq file",
)
@option(
"-m",
"--mapping",
type=Path(exists=True, dir_okay=False),
required=True,
help="barcode to sample mapping (tab-delim, no header line)",
)
@option(
"-o", "--output", type=Path(exists=False), required=True, help="output directory"
)
@option(
"-z", "--compress-output", is_flag=True, help="gzip-compress output fastq files"
)
@option(
"-n",
"--no-wrap",
is_flag=True,
help="fastq inputs are not wrapped (i.e., 4 lines per record)",
)
def zip_reads_barcodes(input, barcodes, mapping, output, compress_output, no_wrap):
"""Zip reads with barcodes and split into files.
Some older versions of the Illumina pipeline would not annotate the reads
with their corresponding barcodes, but would leave the barcode reads in a
separate fastq file. This tool will take both fastq files and will modify
the main fastq record to add the barcode to the header line (in the same
place Illumina would put it). It will the write one file per sample as
provided in the mapping.
This should only be necessary on older data files. Newer pipelines that use
bcl2fastq2 or the "generate fastq" pipeline in Basespace (starting 9/2016)
should not require this.
This tool requires that the reads are presented in the same order in the
two input files (which should be the case).
This tool should be used very rarely.
"""
from .utils import load_mapping, edit1_mapping
if no_wrap:
from .utils import read_fastq_nowrap as fastq_parser
else:
from .utils import readfq as fastq_parser
os.makedirs(output, mode=0o755)
input = osp.abspath(input)
barcodes = osp.abspath(barcodes)
# generate all possible edit-1 BCs
bc2sample = edit1_mapping(load_mapping(mapping))
with open_maybe_compressed(input, "r") as r_h, open_maybe_compressed(
barcodes, "r"
) as b_h:
# open file handles for each sample
ext = "fastq.gz" if compress_output else "fastq"
output_handles = {
s: open_maybe_compressed(
pjoin(output, "{s}.{ext}".format(s=s, ext=ext)), "w"
)
for s in set(bc2sample.values())
}
try:
for (r_n, r_s, r_q), (b_n, b_s, b_q) in zip(
tqdm(fastq_parser(r_h)), fastq_parser(b_h)
):
assert r_n.split(maxsplit=1)[0] == b_n.split(maxsplit=1)[0]
try:
print(
"@{r_n}\n{r_s}\n+\n{r_q}".format(r_n=r_n, r_s=r_s, r_q=r_q),
file=output_handles[bc2sample[b_s]],
)
except KeyError:
continue
finally:
for h in output_handles.values():
h.close()
@cli.command(name="merge-columns")
@option(
"-i", "--input", required=True, help="input path (directory of tab-delim files)"
)
@option("-o", "--output", required=True, help="output path")
@option(
"-m",
"--method",
type=Choice(["iter", "outer"]),
default="iter",
help="merge/join method",
)
@option(
"-p",
"--position",
type=int,
default=1,
help="the field position to merge (0-indexed)",
)
@option(
"-d", "--index-cols", default=1, help="number of columns to use as index/row-key"
)
def merge_columns(input, output, method, position, index_cols):
"""Merge tab-delimited files.
input must be a directory containing `.tsv` files to merge.
method: iter -- concurrently iterate over lines of all files; assumes
row-keys are identical in each file
method: outer -- bona fide outer join of data in each file; loads all files
into memory and joins using pandas
"""
input_dir = os.path.abspath(input)
output_file = os.path.abspath(output)
input_files = glob(pjoin(input_dir, "*.tsv"))
if method == "iter":
file_iterators = [open(f, "r") for f in input_files]
file_headers = [osp.splitext(osp.basename(f))[0] for f in input_files]
with open(output_file, "w") as op:
# iterate through lines
for lines in zip(*file_iterators):
fields_array = [
[field.strip() for field in line.split("\t")] for line in lines
]
# check that join column is the same
for fields in fields_array[1:]:
assert fields_array[0][:index_cols] == fields[:index_cols]
merged_fields = fields_array[0][:index_cols] + [
f[position] for f in fields_array
]
print("\t".join(merged_fields), file=op)
elif method == "outer":
def load(path):
icols = list(range(index_cols))
ucols = icols + [position]
return pd.read_csv(
path, sep="\t", header=0, dtype=str, index_col=icols, usecols=ucols
)
dfs = [load(path) for path in input_files]
merge = lambda l, r: pd.merge(
l, r, how="outer", left_index=True, right_index=True
)
df = reduce(merge, dfs).fillna(0)
df.to_csv(output, sep="\t", float_format="%.2f")
@cli.command(name="normalize-counts")
@option("-i", "--input", required=True, help="input counts (tab-delim)")
@option("-o", "--output", required=True, help="output path")
@option(
"-m",
"--method",
type=Choice(["col-sum", "size-factors"]),
default="size-factors",
help="normalization method",
)
@option(
"-d", "--index-cols", default=1, help="number of columns to use as index/row-key"
)
def normalize_counts(input, output, method, index_cols):
"""Normalize count matrix.
Two methods for normalizing are available:
* Size factors from Anders and Huber 2010 (similar to TMM)
* Normalize to constant column-sum of 1e6
"""
df = pd.read_csv(input, sep="\t", header=0, index_col=list(range(index_cols)))
if method == "col-sum":
normalized = df / (df.sum() / 1e6)
elif method == "size-factors":
factors = compute_size_factors(df.values)
normalized = df / factors
normalized.to_csv(output, sep="\t", float_format="%.2f")
@cli.command(name="count-exact-matches")
@option(
"-i",
"--input",
required=True,
type=Path(exists=True, dir_okay=False),
help="input fastq (gzipped ok)",
)
@option(
"-o",
"--input",
required=True,
type=Path(exists=True, dir_okay=False),
help="input fastq (gzipped ok)",
)
@option(
"-r",
"--reference",
required=True,
type=Path(exists=True, dir_okay=False),
help="path to reference (input) counts file (tab-delim)",
)
@option(
"-l",
"--read-length",
required=True,
type=int,
help="read length (or, number of bases to use for matching)",
metavar="<read-length>",
)
def count_exact_matches(input, reference, read_length):
"""Match reads to reference exactly.
Takes the first <read-length> bases of each read and attempt to match it
exactly to the reference sequences. Computes the number of matches for each
reference.
"""
# load reference
seq_to_ref = OrderedDict()
with open(reference, "r") as ip:
for (ref_name, seq, _) in readfq(ip):
seq_to_ref[seq[: params.read_length]] = ref_name
num_reads = 0
num_matched = 0
counts = Counter()
with gzip.open(input, "rt") as ip:
for (name, seq, _) in tqdm(readfq(ip)):
num_reads += 1
refname = seq_to_ref.get(seq)
if refname is not None:
num_matched += 1
counts[refname] += 1
print(
"num_reads: {}\nnum_matched: {}\nfrac_matched: {}".format(
num_reads, num_matched, num_matched / num_reads
),
file=sys.stderr,
)
with open(output[0], "w") as op:
print("id\t{}".format(wildcards.sample), file=op)
for (_, refname) in seq_to_ref.items():
print("{}\t{}".format(refname, counts[refname]), file=op)
# DEPRECATED TOOLS
@cli.command(name="split-fastq", deprecated=True)
@option("-i", "--input", required=True, help="input path (fastq file)")
@option("-o", "--output", required=True, help="output path (directory)")
@option("-n", "--chunk-size", type=int, required=True, help="number of reads per chunk")
def split_fastq(input, output, chunk_size):
"""Split fastq files into smaller chunks."""
input_file = osp.abspath(input)
output_dir = osp.abspath(output)
os.makedirs(output_dir, mode=0o755)
# convenience functions
output_file = lambda i: pjoin(output_dir, "part.{0}.fastq".format(i))
with open_maybe_compressed(input_file, "r") as input_handle:
num_processed = 0
file_num = 1
for (name, seq, qual) in readfq(input_handle):
if num_processed == 0:
op = open_maybe_compressed(output_file(file_num), "w")
print(f"@{name}\n{seq}\n+\n{qual}", file=op)
num_processed += 1
if num_processed == chunk_size:
op.close()
num_processed = 0
file_num += 1
if not op.closed:
op.close()
@cli.command(name="align-parts", deprecated=True)
@option("-i", "--input", required=True, help="input path (directory of fastq parts)")
@option("-o", "--output", required=True, help="output path (directory)")
@option(
"-x", "--index", required=True, help="bowtie index (e.g., as specified to bowtie2)"
)
@option(
"-b",
"--batch-submit",
default="",
help="batch submit command to prefix bowtie command invocation",
)
@option(
"-p",
"--threads",
default=1,
help="Number of threads to specify for each invocation of bowtie",
)
@option(
"-3",
"--trim3",
default=0,
help="Number of bases to trim off of 3-end (passed to bowtie)",
)
@option("-d", "--dry-run", is_flag=True, help="Dry run; print out commands to execute")
def align_parts(input, output, index, batch_submit, threads, trim3, dry_run):
"""Align fastq files to peptide reference."""
input_dir = osp.abspath(input)
output_dir = osp.abspath(output)
if not dry_run:
os.makedirs(output_dir, mode=0o755)
bowtie_cmd_template = (
"bowtie -n 3 -l 100 --best --nomaqround --norc -k 1 -p {threads} "
"-3 {trim3} --quiet {index} {input} {output}"
)
for input_file in glob(pjoin(input_dir, "*.fastq")):
output_file = pjoin(
output_dir, osp.splitext(osp.basename(input_file))[0] + ".aln"
)
bowtie_cmd = bowtie_cmd_template.format(
index=index,
input=input_file,
output=output_file,
threads=threads,
trim3=trim3,
)
submit_cmd = "{batch_cmd} {app_cmd}".format(
batch_cmd=batch_submit, app_cmd=bowtie_cmd
)
if dry_run:
print(submit_cmd.strip())
else:
p = Popen(
submit_cmd.strip(), shell=True, stdout=PIPE, universal_newlines=True
)
print(p.communicate()[0])
@cli.command(name="compute-counts", deprecated=True)
@option("-i", "--input", required=True, help="input path (directory of aln files)")
@option("-o", "--output", required=True, help="output path (directory)")
@option(
"-r",
"--reference",
required=True,
help="path to reference (input) counts file (tab-delim)",
)
def compute_counts(input, output, reference):
"""Compute counts from aligned bam file."""
input_dir = osp.abspath(input)
output_dir = osp.abspath(output)
os.makedirs(output_dir, mode=0o755)
# load reference (i.e., input) counts
ref_names = []
ref_counts = []
with open(reference, "r") as ip:
# burn header
_ = next(ip)
for line in ip:
fields = line.split("\t")
ref_names.append(fields[0].strip())
ref_counts.append(round(float(fields[1])))
# compute count dicts
for input_file in glob(pjoin(input_dir, "*.aln")):
print(input_file)
sys.stdout.flush()
counts = {}
sample = osp.splitext(osp.basename(input_file))[0]
# accumulate counts
with open(input_file, "r") as ip:
for line in ip:
ref_clone = line.split("\t")[2].strip()
counts[ref_clone] = counts.get(ref_clone, 0) + 1
# write counts
output_file = pjoin(output_dir, sample + ".tsv")
with open(output_file, "w") as op:
print("id\tinput\t{0}".format(sample), file=op)
for (ref_name, ref_count) in zip(ref_names, ref_counts):
record = "{0}\t{1}\t{2}".format(
ref_name, ref_count, counts.get(ref_name, 0)
)
print(record, file=op)
@cli.command(name="gen-covariates", deprecated=True)
@option("-i", "--input", required=True, help="input path to merged count file")
@option(
"-s", "--substring", required=True, help="substring to match against column names"
)
@option("-o", "--output", required=True, help="output file (recommend .tsv extension)")
def gen_covariates(input, substring, output):
"""Compute covariates for input to stat model.
The input (`-i`) should be the merged counts file. Each column name is
matched against the given substring. The median coverage-normalized value
of each row from the matching columns will be output into a tab-delim file.
This file can be used as the "reference" values for computing p-values.
"""
input_file = osp.abspath(input)
output_file = osp.abspath(output)
counts = pd.read_csv(input_file, sep="\t", header=0, index_col=0)
matched_columns = [col for col in counts.columns if substring in col]
sums = counts[matched_columns].sum()
normed = counts[matched_columns] / sums * sums.median()
medians = normed.median(axis=1)
medians.name = "input"
medians.to_csv(output_file, sep="\t", header=True, index_label="id")
@cli.command(name="compute-pvals", deprecated=True)
@option("-i", "--input", required=True, help="input path")
@option("-o", "--output", required=True, help="output path")
@option(
"-b",
"--batch-submit",
help="batch submit command to prefix pval command invocation",
)
@option(
"-d",
"--dry-run",
is_flag=True,
help="Dry run; print out commands to execute for batch submit",
)
def compute_pvals(input, output, batch_submit, dry_run):
"""Compute p-values from counts."""
from .genpois import (
estimate_GP_distributions,
lambda_theta_regression,
precompute_pvals,
)
if batch_submit is not None:
# run compute-pvals on each file using batch submit command
input_dir = osp.abspath(input)
output_dir = osp.abspath(output)
if not dry_run:
os.makedirs(output_dir, mode=0o755)
pval_cmd_template = "phip compute-pvals -i {input} -o {output}"
for input_file in glob(pjoin(input_dir, "*.tsv")):
sample = osp.splitext(osp.basename(input_file))[0]
output_file = pjoin(output_dir, "{0}.pvals.tsv".format(sample))
pval_cmd = pval_cmd_template.format(input=input_file, output=output_file)
submit_cmd = "{batch_cmd} {app_cmd}".format(
batch_cmd=batch_submit, app_cmd=pval_cmd
)
if dry_run:
print(submit_cmd.strip())
else:
p = Popen(
submit_cmd.strip(), shell=True, stdout=PIPE, universal_newlines=True
)
print(p.communicate()[0])
else:
# actually compute p-vals on single file
input_file = osp.abspath(input)
output_file = osp.abspath(output)
clones = []
samples = None
input_counts = []
output_counts = []
with open(input_file, "r") as ip:
header_fields = next(ip).split("\t")
samples = [f.strip() for f in header_fields[2:]]
for line in tqdm(ip, desc="Loading data"):
fields = line.split("\t")
clones.append(fields[0].strip())
input_counts.append(int(fields[1]))
output_counts.append(np.int_(fields[2:]))
input_counts = np.asarray(input_counts)
# pseudocounts to combat negative regressed theta:
output_counts = np.asarray(output_counts) + 1
uniq_input_values = list(set(input_counts))
# Estimate generalized Poisson distributions for every input count
(lambdas, thetas, idxs) = estimate_GP_distributions(
input_counts, output_counts, uniq_input_values
)
# Regression on all of the theta and lambda values computed
(lambda_fits, theta_fits) = lambda_theta_regression(lambdas, thetas, idxs)
# Precompute CDF for possible input-output combinations
uniq_combos = []
for i in range(output_counts.shape[1]):
uniq_combos.append(set(zip(input_counts, output_counts[:, i])))
log10pval_hash = precompute_pvals(lambda_fits, theta_fits, uniq_combos)
# Compute p-values for each clone using regressed GP parameters
with open(output_file, "w") as op:
header = "\t".join(["id"] + samples)
print(header, file=op)
for (clone, ic, ocs) in zip(
tqdm(clones, desc="Writing scores"), input_counts, output_counts
):
fields = [clone]
for (i, oc) in enumerate(ocs):
fields.append("{:.2f}".format(log10pval_hash[(i, ic, oc)]))
print("\t".join(fields), file=op)
|
|
# coding=utf-8
# Copyright 2022 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tensor2tensor.utils.metrics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensor2tensor.utils import metrics
import tensorflow.compat.v1 as tf
class MetricsTest(tf.test.TestCase):
def testAccuracyMetric(self):
predictions = np.random.randint(1, 5, size=(12, 12, 12, 1))
targets = np.random.randint(1, 5, size=(12, 12, 12, 1))
expected = np.mean((predictions == targets).astype(float))
with self.test_session() as session:
scores, _ = metrics.padded_accuracy(
tf.one_hot(predictions, depth=5, dtype=tf.float32),
tf.constant(targets, dtype=tf.int32))
a = tf.reduce_mean(scores)
session.run(tf.global_variables_initializer())
actual = session.run(a)
self.assertAlmostEqual(actual, expected)
def testAccuracyTopKMetric(self):
predictions = np.random.randint(1, 5, size=(12, 12, 12, 1))
targets = np.random.randint(1, 5, size=(12, 12, 12, 1))
expected = np.mean((predictions == targets).astype(float))
with self.test_session() as session:
predicted = tf.one_hot(predictions, depth=5, dtype=tf.float32)
scores1, _ = metrics.padded_accuracy_topk(
predicted, tf.constant(targets, dtype=tf.int32), k=1)
scores2, _ = metrics.padded_accuracy_topk(
predicted, tf.constant(targets, dtype=tf.int32), k=7)
a1 = tf.reduce_mean(scores1)
a2 = tf.reduce_mean(scores2)
session.run(tf.global_variables_initializer())
actual1, actual2 = session.run([a1, a2])
self.assertAlmostEqual(actual1, expected)
self.assertAlmostEqual(actual2, 1.0)
def testPrefixAccuracy(self):
vocab_size = 10
predictions = tf.one_hot(
tf.constant([[[1], [2], [3], [4], [9], [6], [7], [8]],
[[1], [2], [3], [4], [5], [9], [7], [8]],
[[1], [2], [3], [4], [5], [9], [7], [0]]]),
vocab_size)
labels = tf.expand_dims(
tf.constant([[[1], [2], [3], [4], [5], [6], [7], [8]],
[[1], [2], [3], [4], [5], [6], [7], [8]],
[[1], [2], [3], [4], [5], [6], [7], [0]]]),
axis=-1)
expected_accuracy = np.average([4.0 / 8.0,
5.0 / 8.0,
5.0 / 7.0])
accuracy, _ = metrics.prefix_accuracy(predictions, labels)
with self.test_session() as session:
accuracy_value = session.run(accuracy)
self.assertAlmostEqual(expected_accuracy, accuracy_value)
def testSequenceAccuracyMetric(self):
predictions = np.random.randint(4, size=(12, 12, 12, 1))
targets = np.random.randint(4, size=(12, 12, 12, 1))
expected = np.mean(
np.prod((predictions == targets).astype(float), axis=(1, 2)))
with self.test_session() as session:
scores, _ = metrics.padded_sequence_accuracy(
tf.one_hot(predictions, depth=4, dtype=tf.float32),
tf.constant(targets, dtype=tf.int32))
a = tf.reduce_mean(scores)
session.run(tf.global_variables_initializer())
actual = session.run(a)
self.assertEqual(actual, expected)
def testTwoClassAccuracyMetric(self):
predictions = tf.constant([0.0, 0.2, 0.4, 0.6, 0.8, 1.0], dtype=tf.float32)
targets = tf.constant([0, 0, 1, 0, 1, 1], dtype=tf.int32)
expected = 2.0 / 3.0
with self.test_session() as session:
accuracy, _ = metrics.two_class_accuracy(predictions, targets)
session.run(tf.global_variables_initializer())
session.run(tf.local_variables_initializer())
actual = session.run(accuracy)
self.assertAlmostEqual(actual, expected)
def testTwoClassLogLikelihood(self):
predictions = np.array([0.0, 0.2, 0.4, 0.6, 0.8, 1.0])
targets = np.array([0, 0, 1, 0, 1, 1])
expected = (2.0 * np.log(0.8) + 2.0 * np.log(0.4)) / 6.0
with self.test_session() as session:
avg_log_likelihood, _ = metrics.two_class_log_likelihood(
predictions, targets)
actual = session.run(avg_log_likelihood)
self.assertAlmostEqual(actual, expected)
def testTwoClassLogLikelihoodVersusOldImplementation(self):
def alt_two_class_log_likelihood_impl(predictions, labels):
float_labels = tf.cast(labels, dtype=tf.float64)
float_predictions = tf.cast(tf.squeeze(predictions), dtype=tf.float64)
# likelihood should be just p for class 1, and 1 - p for class 0.
# signs is 1 for class 1, and -1 for class 0
signs = 2 * float_labels - tf.ones_like(float_labels)
# constant_term is 1 for class 0, and 0 for class 1.
constant_term = tf.ones_like(float_labels) - float_labels
likelihoods = constant_term + signs * float_predictions
log_likelihoods = tf.log(likelihoods)
avg_log_likelihood = tf.reduce_mean(log_likelihoods)
return avg_log_likelihood
predictions = np.random.rand(1, 10, 1)
targets = np.random.randint(2, size=10)
with self.test_session() as session:
new_log_likelihood, _ = metrics.two_class_log_likelihood(
predictions, targets)
alt_log_likelihood = alt_two_class_log_likelihood_impl(
predictions, targets)
new_impl, alt_impl = session.run([new_log_likelihood, alt_log_likelihood])
self.assertAlmostEqual(new_impl, alt_impl)
def testRMSEMetric(self):
predictions = np.full((10, 1), 1) # All 1's
targets = np.full((10, 1), 3) # All 3's
expected = np.sqrt(np.mean((predictions - targets)**2)) # RMSE = 2.0
with self.test_session() as session:
rmse, _ = metrics.padded_rmse(
tf.constant(predictions, dtype=tf.int32),
tf.constant(targets, dtype=tf.int32))
session.run(tf.global_variables_initializer())
actual = session.run(rmse)
self.assertEqual(actual, expected)
def testUnpaddedRMSEMetric(self):
predictions = np.full((10, 1), 1) # All 1's
targets = np.full((10, 1), 3) # All 3's
expected = np.mean((predictions - targets)**2) # MSE = 4.0
with self.test_session() as session:
mse, _ = metrics.unpadded_mse(
tf.constant(predictions, dtype=tf.int32),
tf.constant(targets, dtype=tf.int32))
session.run(tf.global_variables_initializer())
actual = session.run(mse)
self.assertEqual(actual, expected)
def testSequenceEditDistanceMetric(self):
predictions = np.array([[3, 4, 5, 1, 0, 0],
[2, 1, 3, 4, 0, 0],
[2, 1, 3, 4, 0, 0]])
# Targets are just a bit different:
# - first sequence has a different prediction
# - second sequence has a different prediction and one extra step
# - third sequence is identical
targets = np.array([[5, 4, 5, 1, 0, 0],
[2, 5, 3, 4, 1, 0],
[2, 1, 3, 4, 0, 0]])
# Reshape to match expected input format by metric fns.
predictions = np.reshape(predictions, [3, 6, 1, 1])
targets = np.reshape(targets, [3, 6, 1, 1])
with self.test_session() as session:
scores, weight = metrics.sequence_edit_distance(
tf.one_hot(predictions, depth=6, dtype=tf.float32),
tf.constant(targets, dtype=tf.int32))
session.run(tf.global_variables_initializer())
actual_scores, actual_weight = session.run([scores, weight])
self.assertAlmostEqual(actual_scores, 3.0 / 13)
self.assertEqual(actual_weight, 13)
def testWordErrorRateMetric(self):
# Ensure availability of the WER metric function in the dictionary.
assert metrics.Metrics.WORD_ERROR_RATE in metrics.METRICS_FNS
# Test if WER is computed correctly.
ref = np.asarray([
# a b c
[97, 34, 98, 34, 99],
[97, 34, 98, 34, 99],
[97, 34, 98, 34, 99],
[97, 34, 98, 34, 99],
])
hyp = np.asarray([
[97, 34, 98, 34, 99], # a b c
[97, 34, 98, 0, 0], # a b
[97, 34, 98, 34, 100], # a b d
[0, 0, 0, 0, 0] # empty
])
labels = np.reshape(ref, ref.shape + (1, 1))
predictions = np.zeros((len(ref), np.max([len(s) for s in hyp]), 1, 1, 256))
for i, sample in enumerate(hyp):
for j, idx in enumerate(sample):
predictions[i, j, 0, 0, idx] = 1
with self.test_session() as session:
actual_wer, unused_actual_ref_len = session.run(
metrics.word_error_rate(predictions, labels))
expected_wer = 0.417
places = 3
self.assertAlmostEqual(round(actual_wer, places), expected_wer, places)
def testNegativeLogPerplexity(self):
predictions = np.random.randint(4, size=(12, 12, 12, 1))
targets = np.random.randint(4, size=(12, 12, 12, 1))
with self.test_session() as session:
scores, _ = metrics.padded_neg_log_perplexity(
tf.one_hot(predictions, depth=4, dtype=tf.float32),
tf.constant(targets, dtype=tf.int32))
a = tf.reduce_mean(scores)
session.run(tf.global_variables_initializer())
actual = session.run(a)
self.assertEqual(actual.shape, ())
def testNegativeLogPerplexityMasked(self):
predictions = np.random.randint(4, size=(12, 12, 12, 1))
targets = np.random.randint(4, size=(12, 12, 12, 1))
features = {
'targets_mask': tf.to_float(tf.ones([12, 12]))
}
with self.test_session() as session:
scores, _ = metrics.padded_neg_log_perplexity_with_masking(
tf.one_hot(predictions, depth=4, dtype=tf.float32),
tf.constant(targets, dtype=tf.int32),
features)
a = tf.reduce_mean(scores)
session.run(tf.global_variables_initializer())
actual = session.run(a)
self.assertEqual(actual.shape, ())
def testNegativeLogPerplexityMaskedAssert(self):
predictions = np.random.randint(4, size=(12, 12, 12, 1))
targets = np.random.randint(4, size=(12, 12, 12, 1))
features = {}
with self.assertRaisesRegexp(
ValueError,
'masked_neg_log_perplexity requires targets_mask feature'):
with self.test_session() as session:
scores, _ = metrics.padded_neg_log_perplexity_with_masking(
tf.one_hot(predictions, depth=4, dtype=tf.float32),
tf.constant(targets, dtype=tf.int32),
features)
a = tf.reduce_mean(scores)
session.run(tf.global_variables_initializer())
_ = session.run(a)
def testSigmoidAccuracyOneHot(self):
logits = np.array([
[-1., 1.],
[1., -1.],
[-1., 1.],
[1., -1.]
])
labels = np.array([
[0, 1],
[1, 0],
[1, 0],
[0, 1]
])
logits = np.expand_dims(np.expand_dims(logits, 1), 1)
labels = np.expand_dims(np.expand_dims(labels, 1), 1)
with self.test_session() as session:
score, _ = metrics.sigmoid_accuracy_one_hot(logits, labels)
session.run(tf.global_variables_initializer())
session.run(tf.local_variables_initializer())
s = session.run(score)
self.assertEqual(s, 0.5)
def testSigmoidAccuracy(self):
logits = np.array([
[-1., 1.],
[1., -1.],
[-1., 1.],
[1., -1.]
])
labels = np.array([1, 0, 0, 1])
with self.test_session() as session:
score, _ = metrics.sigmoid_accuracy(logits, labels)
session.run(tf.global_variables_initializer())
session.run(tf.local_variables_initializer())
s = session.run(score)
self.assertEqual(s, 0.5)
def testSigmoidPrecisionOneHot(self):
logits = np.array([
[-1., 1.],
[1., -1.],
[1., -1.],
[1., -1.]
])
labels = np.array([
[0, 1],
[0, 1],
[0, 1],
[0, 1]
])
logits = np.expand_dims(np.expand_dims(logits, 1), 1)
labels = np.expand_dims(np.expand_dims(labels, 1), 1)
with self.test_session() as session:
score, _ = metrics.sigmoid_precision_one_hot(logits, labels)
session.run(tf.global_variables_initializer())
session.run(tf.local_variables_initializer())
s = session.run(score)
self.assertEqual(s, 0.25)
def testSigmoidRecallOneHot(self):
logits = np.array([
[-1., 1.],
[1., -1.],
[1., -1.],
[1., -1.]
])
labels = np.array([
[0, 1],
[0, 1],
[0, 1],
[0, 1]
])
logits = np.expand_dims(np.expand_dims(logits, 1), 1)
labels = np.expand_dims(np.expand_dims(labels, 1), 1)
with self.test_session() as session:
score, _ = metrics.sigmoid_recall_one_hot(logits, labels)
session.run(tf.global_variables_initializer())
session.run(tf.local_variables_initializer())
s = session.run(score)
self.assertEqual(s, 0.25)
def testSigmoidCrossEntropyOneHot(self):
logits = np.array([
[-1., 1.],
[1., -1.],
[1., -1.],
[1., -1.]
])
labels = np.array([
[0, 1],
[1, 0],
[0, 0],
[0, 1]
])
logits = np.expand_dims(np.expand_dims(logits, 1), 1)
labels = np.expand_dims(np.expand_dims(labels, 1), 1)
with self.test_session() as session:
score, _ = metrics.sigmoid_cross_entropy_one_hot(logits, labels)
session.run(tf.global_variables_initializer())
session.run(tf.local_variables_initializer())
s = session.run(score)
self.assertAlmostEqual(s, 0.688, places=3)
def testRocAuc(self):
logits = np.array([
[-1., 1.],
[1., -1.],
[1., -1.],
[1., -1.]
])
labels = np.array([
[1],
[0],
[1],
[0]
])
logits = np.expand_dims(np.expand_dims(logits, 1), 1)
labels = np.expand_dims(np.expand_dims(labels, 1), 1)
with self.test_session() as session:
score, _ = metrics.roc_auc(logits, labels)
session.run(tf.global_variables_initializer())
session.run(tf.local_variables_initializer())
s = session.run(score)
self.assertAlmostEqual(s, 0.750, places=3)
def testMultilabelMatch3(self):
predictions = np.random.randint(1, 5, size=(100, 1, 1, 1))
targets = np.random.randint(1, 5, size=(100, 10, 1, 1))
weights = np.random.randint(0, 2, size=(100, 1, 1, 1))
targets *= weights
predictions_repeat = np.repeat(predictions, 10, axis=1)
expected = (predictions_repeat == targets).astype(float)
expected = np.sum(expected, axis=(1, 2, 3))
expected = np.minimum(expected / 3.0, 1.)
expected = np.sum(expected * weights[:, 0, 0, 0]) / weights.shape[0]
with self.test_session() as session:
scores, weights_ = metrics.multilabel_accuracy_match3(
tf.one_hot(predictions, depth=5, dtype=tf.float32),
tf.constant(targets, dtype=tf.int32))
a, a_op = tf.metrics.mean(scores, weights_)
session.run(tf.local_variables_initializer())
session.run(tf.global_variables_initializer())
_ = session.run(a_op)
actual = session.run(a)
self.assertAlmostEqual(actual, expected, places=6)
def testPearsonCorrelationCoefficient(self):
predictions = np.random.rand(12, 1)
targets = np.random.rand(12, 1)
expected = np.corrcoef(np.squeeze(predictions), np.squeeze(targets))[0][1]
with self.test_session() as session:
pearson, _ = metrics.pearson_correlation_coefficient(
tf.constant(predictions, dtype=tf.float32),
tf.constant(targets, dtype=tf.float32))
session.run(tf.global_variables_initializer())
session.run(tf.local_variables_initializer())
actual = session.run(pearson)
self.assertAlmostEqual(actual, expected)
if __name__ == '__main__':
tf.test.main()
|
|
# Copyright 2013-2015 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import unittest2 as unittest
except ImportError:
import unittest # noqa
from concurrent.futures import ThreadPoolExecutor
from mock import Mock, ANY, call
from cassandra import OperationTimedOut, SchemaTargetType, SchemaChangeType
from cassandra.protocol import ResultMessage, RESULT_KIND_ROWS
from cassandra.cluster import ControlConnection, _Scheduler
from cassandra.pool import Host
from cassandra.policies import (SimpleConvictionPolicy, RoundRobinPolicy,
ConstantReconnectionPolicy)
PEER_IP = "foobar"
class MockMetadata(object):
def __init__(self):
self.hosts = {
"192.168.1.0": Host("192.168.1.0", SimpleConvictionPolicy),
"192.168.1.1": Host("192.168.1.1", SimpleConvictionPolicy),
"192.168.1.2": Host("192.168.1.2", SimpleConvictionPolicy)
}
for host in self.hosts.values():
host.set_up()
self.cluster_name = None
self.partitioner = None
self.token_map = {}
def get_host(self, rpc_address):
return self.hosts.get(rpc_address)
def all_hosts(self):
return self.hosts.values()
def rebuild_token_map(self, partitioner, token_map):
self.partitioner = partitioner
self.token_map = token_map
class MockCluster(object):
max_schema_agreement_wait = 5
load_balancing_policy = RoundRobinPolicy()
reconnection_policy = ConstantReconnectionPolicy(2)
down_host = None
contact_points = []
is_shutdown = False
def __init__(self):
self.metadata = MockMetadata()
self.added_hosts = []
self.removed_hosts = []
self.scheduler = Mock(spec=_Scheduler)
self.executor = Mock(spec=ThreadPoolExecutor)
def add_host(self, address, datacenter, rack, signal=False, refresh_nodes=True):
host = Host(address, SimpleConvictionPolicy, datacenter, rack)
self.added_hosts.append(host)
return host
def remove_host(self, host):
self.removed_hosts.append(host)
def on_up(self, host):
pass
def on_down(self, host, is_host_addition):
self.down_host = host
class MockConnection(object):
is_defunct = False
def __init__(self):
self.host = "192.168.1.0"
self.local_results = [
["schema_version", "cluster_name", "data_center", "rack", "partitioner", "release_version", "tokens"],
[["a", "foocluster", "dc1", "rack1", "Murmur3Partitioner", "2.2.0", ["0", "100", "200"]]]
]
self.peer_results = [
["rpc_address", "peer", "schema_version", "data_center", "rack", "tokens"],
[["192.168.1.1", "10.0.0.1", "a", "dc1", "rack1", ["1", "101", "201"]],
["192.168.1.2", "10.0.0.2", "a", "dc1", "rack1", ["2", "102", "202"]]]
]
local_response = ResultMessage(
kind=RESULT_KIND_ROWS, results=self.local_results)
peer_response = ResultMessage(
kind=RESULT_KIND_ROWS, results=self.peer_results)
self.wait_for_responses = Mock(return_value=(peer_response, local_response))
class FakeTime(object):
def __init__(self):
self.clock = 0
def time(self):
return self.clock
def sleep(self, amount):
self.clock += amount
class ControlConnectionTest(unittest.TestCase):
def setUp(self):
self.cluster = MockCluster()
self.connection = MockConnection()
self.time = FakeTime()
self.control_connection = ControlConnection(self.cluster, 1, 0, 0)
self.control_connection._connection = self.connection
self.control_connection._time = self.time
def _get_matching_schema_preloaded_results(self):
local_results = [
["schema_version", "cluster_name", "data_center", "rack", "partitioner", "release_version", "tokens"],
[["a", "foocluster", "dc1", "rack1", "Murmur3Partitioner", "2.2.0", ["0", "100", "200"]]]
]
local_response = ResultMessage(kind=RESULT_KIND_ROWS, results=local_results)
peer_results = [
["rpc_address", "peer", "schema_version", "data_center", "rack", "tokens"],
[["192.168.1.1", "10.0.0.1", "a", "dc1", "rack1", ["1", "101", "201"]],
["192.168.1.2", "10.0.0.2", "a", "dc1", "rack1", ["2", "102", "202"]]]
]
peer_response = ResultMessage(kind=RESULT_KIND_ROWS, results=peer_results)
return (peer_response, local_response)
def _get_nonmatching_schema_preloaded_results(self):
local_results = [
["schema_version", "cluster_name", "data_center", "rack", "partitioner", "release_version", "tokens"],
[["a", "foocluster", "dc1", "rack1", "Murmur3Partitioner", "2.2.0", ["0", "100", "200"]]]
]
local_response = ResultMessage(kind=RESULT_KIND_ROWS, results=local_results)
peer_results = [
["rpc_address", "peer", "schema_version", "data_center", "rack", "tokens"],
[["192.168.1.1", "10.0.0.1", "a", "dc1", "rack1", ["1", "101", "201"]],
["192.168.1.2", "10.0.0.2", "b", "dc1", "rack1", ["2", "102", "202"]]]
]
peer_response = ResultMessage(kind=RESULT_KIND_ROWS, results=peer_results)
return (peer_response, local_response)
def test_wait_for_schema_agreement(self):
"""
Basic test with all schema versions agreeing
"""
self.assertTrue(self.control_connection.wait_for_schema_agreement())
# the control connection should not have slept at all
self.assertEqual(self.time.clock, 0)
def test_wait_for_schema_agreement_uses_preloaded_results_if_given(self):
"""
wait_for_schema_agreement uses preloaded results if given for shared table queries
"""
preloaded_results = self._get_matching_schema_preloaded_results()
self.assertTrue(self.control_connection.wait_for_schema_agreement(preloaded_results=preloaded_results))
# the control connection should not have slept at all
self.assertEqual(self.time.clock, 0)
# the connection should not have made any queries if given preloaded results
self.assertEqual(self.connection.wait_for_responses.call_count, 0)
def test_wait_for_schema_agreement_falls_back_to_querying_if_schemas_dont_match_preloaded_result(self):
"""
wait_for_schema_agreement requery if schema does not match using preloaded results
"""
preloaded_results = self._get_nonmatching_schema_preloaded_results()
self.assertTrue(self.control_connection.wait_for_schema_agreement(preloaded_results=preloaded_results))
# the control connection should not have slept at all
self.assertEqual(self.time.clock, 0)
self.assertEqual(self.connection.wait_for_responses.call_count, 1)
def test_wait_for_schema_agreement_fails(self):
"""
Make sure the control connection sleeps and retries
"""
# change the schema version on one node
self.connection.peer_results[1][1][2] = 'b'
self.assertFalse(self.control_connection.wait_for_schema_agreement())
# the control connection should have slept until it hit the limit
self.assertGreaterEqual(self.time.clock, self.cluster.max_schema_agreement_wait)
def test_wait_for_schema_agreement_skipping(self):
"""
If rpc_address or schema_version isn't set, the host should be skipped
"""
# an entry with no schema_version
self.connection.peer_results[1].append(
["192.168.1.3", "10.0.0.3", None, "dc1", "rack1", ["3", "103", "203"]]
)
# an entry with a different schema_version and no rpc_address
self.connection.peer_results[1].append(
[None, None, "b", "dc1", "rack1", ["4", "104", "204"]]
)
# change the schema version on one of the existing entries
self.connection.peer_results[1][1][3] = 'c'
self.cluster.metadata.get_host('192.168.1.1').is_up = False
self.assertTrue(self.control_connection.wait_for_schema_agreement())
self.assertEqual(self.time.clock, 0)
def test_wait_for_schema_agreement_rpc_lookup(self):
"""
If the rpc_address is 0.0.0.0, the "peer" column should be used instead.
"""
self.connection.peer_results[1].append(
["0.0.0.0", PEER_IP, "b", "dc1", "rack1", ["3", "103", "203"]]
)
host = Host("0.0.0.0", SimpleConvictionPolicy)
self.cluster.metadata.hosts[PEER_IP] = host
host.is_up = False
# even though the new host has a different schema version, it's
# marked as down, so the control connection shouldn't care
self.assertTrue(self.control_connection.wait_for_schema_agreement())
self.assertEqual(self.time.clock, 0)
# but once we mark it up, the control connection will care
host.is_up = True
self.assertFalse(self.control_connection.wait_for_schema_agreement())
self.assertGreaterEqual(self.time.clock, self.cluster.max_schema_agreement_wait)
def test_refresh_nodes_and_tokens(self):
self.control_connection.refresh_node_list_and_token_map()
meta = self.cluster.metadata
self.assertEqual(meta.partitioner, 'Murmur3Partitioner')
self.assertEqual(meta.cluster_name, 'foocluster')
# check token map
self.assertEqual(sorted(meta.all_hosts()), sorted(meta.token_map.keys()))
for token_list in meta.token_map.values():
self.assertEqual(3, len(token_list))
# check datacenter/rack
for host in meta.all_hosts():
self.assertEqual(host.datacenter, "dc1")
self.assertEqual(host.rack, "rack1")
self.assertEqual(self.connection.wait_for_responses.call_count, 1)
def test_refresh_nodes_and_tokens_uses_preloaded_results_if_given(self):
"""
refresh_nodes_and_tokens uses preloaded results if given for shared table queries
"""
preloaded_results = self._get_matching_schema_preloaded_results()
self.control_connection._refresh_node_list_and_token_map(self.connection, preloaded_results=preloaded_results)
meta = self.cluster.metadata
self.assertEqual(meta.partitioner, 'Murmur3Partitioner')
self.assertEqual(meta.cluster_name, 'foocluster')
# check token map
self.assertEqual(sorted(meta.all_hosts()), sorted(meta.token_map.keys()))
for token_list in meta.token_map.values():
self.assertEqual(3, len(token_list))
# check datacenter/rack
for host in meta.all_hosts():
self.assertEqual(host.datacenter, "dc1")
self.assertEqual(host.rack, "rack1")
# the connection should not have made any queries if given preloaded results
self.assertEqual(self.connection.wait_for_responses.call_count, 0)
def test_refresh_nodes_and_tokens_no_partitioner(self):
"""
Test handling of an unknown partitioner.
"""
# set the partitioner column to None
self.connection.local_results[1][0][4] = None
self.control_connection.refresh_node_list_and_token_map()
meta = self.cluster.metadata
self.assertEqual(meta.partitioner, None)
self.assertEqual(meta.token_map, {})
def test_refresh_nodes_and_tokens_add_host(self):
self.connection.peer_results[1].append(
["192.168.1.3", "10.0.0.3", "a", "dc1", "rack1", ["3", "103", "203"]]
)
self.cluster.scheduler.schedule = lambda delay, f, *args, **kwargs: f(*args, **kwargs)
self.control_connection.refresh_node_list_and_token_map()
self.assertEqual(1, len(self.cluster.added_hosts))
self.assertEqual(self.cluster.added_hosts[0].address, "192.168.1.3")
self.assertEqual(self.cluster.added_hosts[0].datacenter, "dc1")
self.assertEqual(self.cluster.added_hosts[0].rack, "rack1")
def test_refresh_nodes_and_tokens_remove_host(self):
del self.connection.peer_results[1][1]
self.control_connection.refresh_node_list_and_token_map()
self.assertEqual(1, len(self.cluster.removed_hosts))
self.assertEqual(self.cluster.removed_hosts[0].address, "192.168.1.2")
def test_refresh_nodes_and_tokens_timeout(self):
def bad_wait_for_responses(*args, **kwargs):
self.assertEqual(kwargs['timeout'], self.control_connection._timeout)
raise OperationTimedOut()
self.connection.wait_for_responses = bad_wait_for_responses
self.control_connection.refresh_node_list_and_token_map()
self.cluster.executor.submit.assert_called_with(self.control_connection._reconnect)
def test_refresh_schema_timeout(self):
def bad_wait_for_responses(*args, **kwargs):
self.time.sleep(kwargs['timeout'])
raise OperationTimedOut()
self.connection.wait_for_responses = Mock(side_effect=bad_wait_for_responses)
self.control_connection.refresh_schema()
self.assertEqual(self.connection.wait_for_responses.call_count, self.cluster.max_schema_agreement_wait / self.control_connection._timeout)
self.assertEqual(self.connection.wait_for_responses.call_args[1]['timeout'], self.control_connection._timeout)
def test_handle_topology_change(self):
event = {
'change_type': 'NEW_NODE',
'address': ('1.2.3.4', 9000)
}
self.cluster.scheduler.reset_mock()
self.control_connection._handle_topology_change(event)
self.cluster.scheduler.schedule_unique.assert_called_once_with(ANY, self.control_connection.refresh_node_list_and_token_map)
event = {
'change_type': 'REMOVED_NODE',
'address': ('1.2.3.4', 9000)
}
self.cluster.scheduler.reset_mock()
self.control_connection._handle_topology_change(event)
self.cluster.scheduler.schedule_unique.assert_called_once_with(ANY, self.cluster.remove_host, None)
event = {
'change_type': 'MOVED_NODE',
'address': ('1.2.3.4', 9000)
}
self.cluster.scheduler.reset_mock()
self.control_connection._handle_topology_change(event)
self.cluster.scheduler.schedule_unique.assert_called_once_with(ANY, self.control_connection.refresh_node_list_and_token_map)
def test_handle_status_change(self):
event = {
'change_type': 'UP',
'address': ('1.2.3.4', 9000)
}
self.cluster.scheduler.reset_mock()
self.control_connection._handle_status_change(event)
self.cluster.scheduler.schedule_unique.assert_called_once_with(ANY, self.control_connection.refresh_node_list_and_token_map)
# do the same with a known Host
event = {
'change_type': 'UP',
'address': ('192.168.1.0', 9000)
}
self.cluster.scheduler.reset_mock()
self.control_connection._handle_status_change(event)
host = self.cluster.metadata.hosts['192.168.1.0']
self.cluster.scheduler.schedule_unique.assert_called_once_with(ANY, self.cluster.on_up, host)
self.cluster.scheduler.schedule.reset_mock()
event = {
'change_type': 'DOWN',
'address': ('1.2.3.4', 9000)
}
self.control_connection._handle_status_change(event)
self.assertFalse(self.cluster.scheduler.schedule.called)
# do the same with a known Host
event = {
'change_type': 'DOWN',
'address': ('192.168.1.0', 9000)
}
self.control_connection._handle_status_change(event)
host = self.cluster.metadata.hosts['192.168.1.0']
self.assertIs(host, self.cluster.down_host)
def test_handle_schema_change(self):
change_types = [getattr(SchemaChangeType, attr) for attr in vars(SchemaChangeType) if attr[0] != '_']
for change_type in change_types:
event = {
'target_type': SchemaTargetType.TABLE,
'change_type': change_type,
'keyspace': 'ks1',
'table': 'table1'
}
self.cluster.scheduler.reset_mock()
self.control_connection._handle_schema_change(event)
self.cluster.scheduler.schedule_unique.assert_called_once_with(ANY, self.control_connection.refresh_schema, **event)
self.cluster.scheduler.reset_mock()
event['target_type'] = SchemaTargetType.KEYSPACE
del event['table']
self.control_connection._handle_schema_change(event)
self.cluster.scheduler.schedule_unique.assert_called_once_with(ANY, self.control_connection.refresh_schema, **event)
def test_refresh_disabled(self):
cluster = MockCluster()
schema_event = {
'target_type': SchemaTargetType.TABLE,
'change_type': SchemaChangeType.CREATED,
'keyspace': 'ks1',
'table': 'table1'
}
status_event = {
'change_type': 'UP',
'address': ('1.2.3.4', 9000)
}
topo_event = {
'change_type': 'MOVED_NODE',
'address': ('1.2.3.4', 9000)
}
cc_no_schema_refresh = ControlConnection(cluster, 1, -1, 0)
cluster.scheduler.reset_mock()
# no call on schema refresh
cc_no_schema_refresh._handle_schema_change(schema_event)
self.assertFalse(cluster.scheduler.schedule.called)
self.assertFalse(cluster.scheduler.schedule_unique.called)
# topo and status changes as normal
cc_no_schema_refresh._handle_status_change(status_event)
cc_no_schema_refresh._handle_topology_change(topo_event)
cluster.scheduler.schedule_unique.assert_has_calls([call(ANY, cc_no_schema_refresh.refresh_node_list_and_token_map),
call(ANY, cc_no_schema_refresh.refresh_node_list_and_token_map)])
cc_no_topo_refresh = ControlConnection(cluster, 1, 0, -1)
cluster.scheduler.reset_mock()
# no call on topo refresh
cc_no_topo_refresh._handle_topology_change(topo_event)
self.assertFalse(cluster.scheduler.schedule.called)
self.assertFalse(cluster.scheduler.schedule_unique.called)
# schema and status change refresh as normal
cc_no_topo_refresh._handle_status_change(status_event)
cc_no_topo_refresh._handle_schema_change(schema_event)
cluster.scheduler.schedule_unique.assert_has_calls([call(ANY, cc_no_topo_refresh.refresh_node_list_and_token_map),
call(0.0, cc_no_topo_refresh.refresh_schema,
**schema_event)])
class EventTimingTest(unittest.TestCase):
"""
A simple test to validate that event scheduling happens in order
Added for PYTHON-358
"""
def setUp(self):
self.cluster = MockCluster()
self.connection = MockConnection()
self.time = FakeTime()
# Use 2 for the schema_event_refresh_window which is what we would normally default to.
self.control_connection = ControlConnection(self.cluster, 1, 2, 0)
self.control_connection._connection = self.connection
self.control_connection._time = self.time
def test_event_delay_timing(self):
"""
Submits a wide array of events make sure that each is scheduled to occur in the order they were received
"""
prior_delay = 0
for _ in range(100):
for change_type in ('CREATED', 'DROPPED', 'UPDATED'):
event = {
'change_type': change_type,
'keyspace': '1',
'table': 'table1'
}
# This is to increment the fake time, we don't actually sleep here.
self.time.sleep(.001)
self.cluster.scheduler.reset_mock()
self.control_connection._handle_schema_change(event)
self.cluster.scheduler.mock_calls
# Grabs the delay parameter from the scheduler invocation
current_delay = self.cluster.scheduler.mock_calls[0][1][0]
self.assertLess(prior_delay, current_delay)
prior_delay = current_delay
|
|
import copy
import shelve
import logging
from hashlib import sha256
from urllib import quote
from urllib import unquote
from saml2 import SAMLError
from saml2.s_utils import rndstr
from saml2.s_utils import PolicyError
from saml2.saml import NameID
from saml2.saml import NAMEID_FORMAT_PERSISTENT
from saml2.saml import NAMEID_FORMAT_TRANSIENT
from saml2.saml import NAMEID_FORMAT_EMAILADDRESS
__author__ = 'rolandh'
logger = logging.getLogger(__name__)
ATTR = ["name_qualifier", "sp_name_qualifier", "format", "sp_provided_id",
"text"]
class Unknown(SAMLError):
pass
def code(item):
"""
Turn a NameID class instance into a quoted string of comma separated
attribute,value pairs. The attribute name is replaced with a digits.
Depends on knowledge on the specific order of the attributes for that
class that is used.
:param item: The class instance
:return: A quoted string
"""
_res = []
i = 0
for attr in ATTR:
val = getattr(item, attr)
if val:
_res.append("%d=%s" % (i, quote(val)))
i += 1
return ",".join(_res)
def decode(txt):
"""Turns a coded string by code() into a NameID class instance.
:param txt: The coded string
"""
_nid = NameID()
for part in txt.split(","):
if part.find("=") != -1:
i, val = part.split("=")
try:
setattr(_nid, ATTR[int(i)], unquote(val))
except:
pass
return _nid
class IdentDB(object):
""" A class that handles identifiers of entities
Keeps a list of all nameIDs returned per SP
"""
def __init__(self, db, domain="", name_qualifier=""):
if isinstance(db, basestring):
self.db = shelve.open(db)
else:
self.db = db
self.domain = domain
self.name_qualifier = name_qualifier
def _create_id(self, nformat, name_qualifier="", sp_name_qualifier=""):
_id = sha256(rndstr(32))
_id.update(nformat)
if name_qualifier:
_id.update(name_qualifier)
if sp_name_qualifier:
_id.update(sp_name_qualifier)
return _id.hexdigest()
def create_id(self, nformat, name_qualifier="", sp_name_qualifier=""):
_id = self._create_id(nformat, name_qualifier, sp_name_qualifier)
while _id in self.db:
_id = self._create_id(nformat, name_qualifier, sp_name_qualifier)
return _id
def store(self, ident, name_id):
if isinstance(ident, unicode):
ident = ident.encode("utf-8")
try:
val = self.db[ident].split(" ")
except KeyError:
val = []
_cn = code(name_id)
val.append(_cn)
self.db[ident] = " ".join(val)
self.db[_cn] = ident
def remove_remote(self, name_id):
_cn = code(name_id)
_id = self.db[_cn]
try:
vals = self.db[_id].split(" ")
vals.remove(_cn)
self.db[_id] = " ".join(vals)
except KeyError:
pass
del self.db[_cn]
def remove_local(self, sid):
if isinstance(sid, unicode):
sid = sid.encode("utf-8")
try:
for val in self.db[sid].split(" "):
try:
del self.db[val]
except KeyError:
pass
del self.db[sid]
except KeyError:
pass
def get_nameid(self, userid, nformat, sp_name_qualifier, name_qualifier):
_id = self.create_id(nformat, name_qualifier, sp_name_qualifier)
if nformat == NAMEID_FORMAT_EMAILADDRESS:
if not self.domain:
raise SAMLError("Can't issue email nameids, unknown domain")
_id = "%s@%s" % (_id, self.domain)
if nformat == NAMEID_FORMAT_PERSISTENT:
_id = userid
nameid = NameID(format=nformat, sp_name_qualifier=sp_name_qualifier,
name_qualifier=name_qualifier, text=_id)
self.store(userid, nameid)
return nameid
def find_nameid(self, userid, **kwargs):
res = []
try:
_vals = self.db[userid]
except KeyError:
logger.debug("failed to find userid %s in IdentDB" % userid)
return res
for val in _vals.split(" "):
nid = decode(val)
if kwargs:
for key, val in kwargs.items():
if getattr(nid, key, None) != val:
break
else:
res.append(nid)
else:
res.append(nid)
return res
def nim_args(self, local_policy=None, sp_name_qualifier="",
name_id_policy=None, name_qualifier=""):
"""
:param local_policy:
:param sp_name_qualifier:
:param name_id_policy:
:param name_qualifier:
:return:
"""
logger.debug("local_policy: %s, name_id_policy: %s" % (local_policy,
name_id_policy))
if name_id_policy and name_id_policy.sp_name_qualifier:
sp_name_qualifier = name_id_policy.sp_name_qualifier
else:
sp_name_qualifier = sp_name_qualifier
if name_id_policy and name_id_policy.format:
nameid_format = name_id_policy.format
elif local_policy:
nameid_format = local_policy.get_nameid_format(sp_name_qualifier)
else:
raise SAMLError("Unknown NameID format")
if not name_qualifier:
name_qualifier = self.name_qualifier
return {"nformat": nameid_format,
"sp_name_qualifier": sp_name_qualifier,
"name_qualifier": name_qualifier}
def construct_nameid(self, userid, local_policy=None,
sp_name_qualifier=None, name_id_policy=None,
name_qualifier=""):
""" Returns a name_id for the object. How the name_id is
constructed depends on the context.
:param local_policy: The policy the server is configured to follow
:param userid: The local permanent identifier of the object
:param sp_name_qualifier: The 'user'/-s of the name_id
:param name_id_policy: The policy the server on the other side wants
us to follow.
:param name_qualifier: A domain qualifier
:return: NameID instance precursor
"""
args = self.nim_args(local_policy, sp_name_qualifier, name_id_policy)
if name_qualifier:
args["name_qualifier"] = name_qualifier
else:
args["name_qualifier"] = self.name_qualifier
return self.get_nameid(userid, **args)
def transient_nameid(self, userid, sp_name_qualifier="", name_qualifier=""):
return self.get_nameid(userid, NAMEID_FORMAT_TRANSIENT,
sp_name_qualifier, name_qualifier)
def persistent_nameid(self, userid, sp_name_qualifier="",
name_qualifier=""):
nameid = self.match_local_id(userid, sp_name_qualifier, name_qualifier)
if nameid:
return nameid
else:
return self.get_nameid(userid, NAMEID_FORMAT_PERSISTENT,
sp_name_qualifier, name_qualifier)
def find_local_id(self, name_id):
"""
Only find persistent IDs
:param name_id:
:return:
"""
try:
return self.db[code(name_id)]
except KeyError:
logger.debug("name: %s" % code(name_id))
logger.debug("id keys: %s" % self.db.keys())
return None
def match_local_id(self, userid, sp_name_qualifier, name_qualifier):
try:
for val in self.db[userid].split(" "):
nid = decode(val)
if nid.format == NAMEID_FORMAT_TRANSIENT:
continue
snq = getattr(nid, "sp_name_qualifier", "")
if snq and snq == sp_name_qualifier:
nq = getattr(nid, "name_qualifier", None)
if nq and nq == name_qualifier:
return nid
elif not nq and not name_qualifier:
return nid
elif not snq and not sp_name_qualifier:
nq = getattr(nid, "name_qualifier", None)
if nq and nq == name_qualifier:
return nid
elif not nq and not name_qualifier:
return nid
except KeyError:
pass
return None
def handle_name_id_mapping_request(self, name_id, name_id_policy):
"""
:param name_id: The NameID that specifies the principal
:param name_id_policy: The NameIDPolicy of the requester
:return: If an old name_id exists that match the name-id policy
that is return otherwise if a new one can be created it
will be and returned. If no old matching exists and a new
is not allowed to be created None is returned.
"""
_id = self.find_local_id(name_id)
if not _id:
raise Unknown("Unknown entity")
# return an old one if present
for val in self.db[_id].split(" "):
_nid = decode(val)
if _nid.format == name_id_policy.format:
if _nid.sp_name_qualifier == name_id_policy.sp_name_qualifier:
return _nid
if name_id_policy.allow_create == "false":
raise PolicyError("Not allowed to create new identifier")
# else create and return a new one
return self.construct_nameid(_id, name_id_policy=name_id_policy)
def handle_manage_name_id_request(self, name_id, new_id=None,
new_encrypted_id="", terminate=""):
"""
Requests from the SP is about the SPProvidedID attribute.
So this is about adding,replacing and removing said attribute.
:param name_id: NameID instance
:param new_id: NewID instance
:param new_encrypted_id: NewEncryptedID instance
:param terminate: Terminate instance
:return: The modified name_id
"""
_id = self.find_local_id(name_id)
orig_name_id = copy.copy(name_id)
if new_id:
name_id.sp_provided_id = new_id.text
elif new_encrypted_id:
# TODO
pass
elif terminate:
name_id.sp_provided_id = None
else:
#NOOP
return name_id
self.remove_remote(orig_name_id)
self.store(_id, name_id)
return name_id
def close(self):
self.db.close()
|
|
# Author : Antoine Broyelle
# Licence : MIT
# inspired by : KTH - DD2432 : Artificial Neural Networks and Other Learning Systems
# https://www.kth.se/student/kurser/kurs/DD2432?l=en
import numpy as np
import sys
class MLP:
'''Multi-layers Perceptron.'''
def __init__(self, inputs, targets, nbNodes=1, outputType='logic'):
'''
Constructor
:param inputs: set of data points as row vectors
:param nbNodes: number of hidden nodes
:param outputType: can be 'logic' with a sigmoid, 'linear', or 'softmax'
'''
# Prerequisites
if np.ndim(inputs) > 2:
raise Exception('[pcn][__init__] The input should be a matrix with maximun 2 indexes')
if np.shape(inputs)[0] != np.shape(targets)[0]:
raise Exception('[pcn][__init__] The input and target matrixs do not have the same number of samples')
# Parameters
dimensions = np.shape(inputs)
self.nbSamples = dimensions[0]
self.dimIn = 1 if np.ndim(inputs) == 1 else dimensions[1]
self.dimOut = 1 if np.ndim(targets) <= 1 else np.shape(targets)[1]
self.nbNodes = nbNodes
self.outputType = outputType
# Data
self.targets = targets
self.inputs = np.concatenate((inputs, np.ones((self.nbSamples, 1))), axis=1)
# Initialise network
# uniform distribution of weigths in [-1/sqrt(n), 1/sqrt(n)] with n number of input node
self.w1 = 2*(np.random.rand(self.dimIn + 1, self.nbNodes) - 0.5) / np.sqrt(self.dimIn)
self.w2 = 2*(np.random.rand(self.nbNodes + 1, self.dimOut) - 0.5) / np.sqrt(self.nbNodes)
def __addColumn(self, inputs):
return np.concatenate((inputs, np.ones((np.shape(inputs)[0],1))),axis=1)
def __phi(self,x):
'''Sigmoid function for activation'''
return 1.0 / (1.0 + np.exp(-0.8 * x))
def __deltaPhi(self,x):
'''Derivative of the Sigmoid function phi'''
return 0.8 * np.exp(-0.6 * x) * self.__phi(x)**2
def predict(self, inputs=None, training=False):
'''
Recall/Forward step of the back-propagation algorithm
:param inputs:
:param training: if called with training = True, temporary calculations are returned
:return: In case training = True :
oout: output of the network. oout = phi(oin)
oin: input of output nodes. oin = hout*W2
hout : output of the first layer. hout = phi(hin)
hin : intput of the hidden nodes. hin = inputs*W1
Otherwise : oout
:warn: be careful with matrix dimensions due to the bias terms
'''
if inputs is None:
inputs = self.inputs
else:
inputs = self.__addColumn(inputs)
hin = np.dot(inputs, self.w1)
hout = self.__phi(hin)
oin = np.dot(self.__addColumn(hout), self.w2)
if self.outputType == 'linear':
result = oin, oin, hout, hin
elif self.outputType == 'logic':
result = self.__phi(oin), oin, hout, hin
elif self.outputType == 'softmax':
result = np.exp(oin)/np.sum(np.exp(oin)), oin, hout, hin
else:
raise Exception('[mlp][fwd] outputType not valid')
if training:
return result
else:
return result[0]
def train(self, eta=0.1, beta=None, nbIte=100, momentum=0.7, validData=None,
validTargets=None, eps=10**(-6)):
'''
Training using back-propagation
:param eta: learning rate for the hidden layer.
:param beta: learning rate for the output layer.
:param nbIte: number of iterations.
:param momentum: update inertia. If no momentum is required it should be equal to 0.
In case of an early stop, the momentum will be set to 0.
:param validData: validation set for early stopping.
:param validTargets: target values for the validation set for early stopping.
:param eps: early stop criterion. Stop training if the two previous updates generate a
sum of squared errors lower than eps.
'''
if beta is None:
beta = eta
updatew1 = np.zeros(self.w1.shape)
updatew2 = np.zeros(self.w2.shape)
earlyStop = True if validData is not None and validTargets is not None else False
momentum = 0 if earlyStop else momentum
valErr0 = 0 if not earlyStop else np.sum((self.predict(validData) - validTargets )**2) # current
valErr1 = valErr0+2*eps # previous error
valErr2 = valErr1+2*eps
for n in range(nbIte):
if earlyStop and n > 10 and (valErr1 - valErr0) < eps and (valErr2 - valErr1) < eps:
break
outputs, oin, hout, hin = self.predict(training=True)
if np.mod(n,100) == 0:
print >> sys.stderr, "Iter: ",n, " error(SSE): ", np.sum((outputs-self.targets)**2)
if self.outputType == 'linear':
deltaO = (outputs - self.targets)
elif self.outputType == 'logic':
deltaO = (outputs - self.targets) * self.__deltaPhi(oin)
elif self.outputType == 'softmax':
deltaO = beta * (outputs - self.targets) * outputs * (1.0 - outputs)
else:
raise Exception('[mlp][train] outputType not valid')
deltaH = np.dot(deltaO, np.transpose(self.w2[:-1,:])) * self.__deltaPhi(hin)
updatew1 = eta * np.dot(np.transpose(self.inputs), deltaH) + momentum * updatew1
updatew2 = beta * np.dot(np.transpose(self.__addColumn(hout)), deltaO) + momentum * updatew2
self.w1 -= updatew1
self.w2 -= updatew2
if earlyStop:
valErr2 = valErr1
valErr1 = valErr0
valErr0 = np.sum((self.predict(validData) - validTargets )**2)
print >> sys.stderr, "Iter: ", n, " error(SSE): ", np.sum((outputs - self.targets) ** 2)
return self.predict()
if __name__ == "__main__":
'''Logic Tests'''
eta = 0.1
inputs = np.array([[0,0], [0,1], [1,0], [1,1]])
ANDtargets = np.array([[0], [0], [0], [1]])
ORtargets = np.array([0, 1, 1, 1]) # second format for 1 dimensional targets
XORtargets = np.array([[0], [1], [1], [0]]) # non linearly separable
print "XOR"
mlp = MLP(inputs, XORtargets, nbNodes=3)
output = mlp.train(eta, eta, 2000)
print "Perceptron learning rule"
print output
'''2D test'''
import matplotlib.pyplot as plt
# Data parameters
n = 150
sigma = 0.8
cov = [[sigma, 0], [0, sigma]]
p = 2
# Data generation
dataA = np.random.multivariate_normal([p, -p], cov, n)
dataB = np.random.multivariate_normal([-p, p], cov, n)
dataC = np.random.multivariate_normal([p, p], cov, n)
dataD = np.random.multivariate_normal([-p, -p], cov, n)
targetA = np.repeat(np.array([[1,0,0,0]]), n, axis=0)
targetB = np.repeat(np.array([[0,1,0,0]]), n, axis=0)
targetC = np.repeat(np.array([[0,0,1,0]]), n, axis=0)
targetD = np.repeat(np.array([[0,0,0,1]]), n, axis=0)
data = np.concatenate((dataA, dataB, dataC, dataD))
target = np.concatenate((targetA, targetB, targetC, targetD))
# Shuffle
p = np.random.permutation(np.shape(data)[0])
data = data[p]
target = target[p]
# Normalize
#data = (data - np.mean(data, axis=0)) / np.var(data, axis=0)
# Split
trainData = data[::2]
validData = data[1::4]
testData = data[3::4]
trainTarget = target[::2]
validTarget = target[1::4]
testTarget = target[3::4]
# Learning
mlp = MLP(trainData, trainTarget, nbNodes=2)
out = mlp.train(nbIte=100000, eta=0.1, validData=validData, validTargets=validTarget)
c = np.argmax(out, axis=1)
plt.scatter(trainData[:,0], trainData[:,1], c=c, s=120, marker='.')
# Evaluation
x = np.arange(-6, 6, 0.01)
y = np.arange(-4, 4, 0.01)
xx0, yy0 = np.meshgrid(x, y)
xx = np.reshape(xx0, (xx0.shape[0]*xx0.shape[1],1))
yy = np.reshape(yy0, (yy0.shape[0]*yy0.shape[1],1))
grid = np.concatenate((xx,yy), axis=1)
area = mlp.predict(grid)
plt.scatter(validData[:, 0], validData[:, 1], c=np.argmax(validTarget, axis=1), s=120,marker='*')
plt.contour(xx0, yy0, np.argmax(area, axis=1).reshape(xx0.shape))
plt.show()
|
|
import collections
import contextlib
import errno
import hashlib
import json
import os
import io
import re
import shutil
import stat
import struct
import tarfile
import tempfile
import zipfile
from datetime import datetime, timedelta
from django import forms
from django.conf import settings
from django.core.files.storage import (
File as DjangoFile, default_storage as storage)
from django.template.defaultfilters import filesizeformat
from django.utils.encoding import force_text
from django.utils.jslex import JsLexer
from django.utils.translation import ugettext
import flufl.lock
import rdflib
from xml.parsers.expat import ExpatError
from defusedxml import minidom
from defusedxml.common import DefusedXmlException
import olympia.core.logger
from olympia import amo, core
from olympia.access import acl
from olympia.addons.utils import verify_mozilla_trademark
from olympia.amo.utils import decode_json, find_language, rm_local_tmp_dir
from olympia.applications.models import AppVersion
from olympia.lib.crypto.signing import get_signer_organizational_unit_name
from olympia.lib import unicodehelper
from olympia.users.utils import (
mozilla_signed_extension_submission_allowed,
system_addon_submission_allowed)
from olympia.versions.compare import version_int as vint
log = olympia.core.logger.getLogger('z.files.utils')
class ParseError(forms.ValidationError):
pass
VERSION_RE = re.compile(r'^[-+*.\w]{,32}$')
SIGNED_RE = re.compile(r'^META\-INF/(\w+)\.(rsa|sf)$')
# This is essentially what Firefox matches
# (see toolkit/components/extensions/ExtensionUtils.jsm)
MSG_RE = re.compile(r'__MSG_(?P<msgid>[a-zA-Z0-9@_]+?)__')
# The default update URL.
default = (
'https://versioncheck.addons.mozilla.org/update/VersionCheck.php?'
'reqVersion=%REQ_VERSION%&id=%ITEM_ID%&version=%ITEM_VERSION%&'
'maxAppVersion=%ITEM_MAXAPPVERSION%&status=%ITEM_STATUS%&appID=%APP_ID%&'
'appVersion=%APP_VERSION%&appOS=%APP_OS%&appABI=%APP_ABI%&'
'locale=%APP_LOCALE%¤tAppVersion=%CURRENT_APP_VERSION%&'
'updateType=%UPDATE_TYPE%'
)
# number of times this lock has been aquired and not yet released
# could be helpful to debug potential race-conditions and multiple-locking
# scenarios.
_lock_count = {}
def get_filepath(fileorpath):
"""Resolve the actual file path of `fileorpath`.
This supports various input formats, a path, a django `File` object,
`olympia.files.File`, a `FileUpload` or just a regular file-like object.
"""
if isinstance(fileorpath, str):
return fileorpath
elif isinstance(fileorpath, DjangoFile):
return fileorpath
elif hasattr(fileorpath, 'file_path'): # File
return fileorpath.file_path
elif hasattr(fileorpath, 'path'): # FileUpload
return fileorpath.path
elif hasattr(fileorpath, 'name'): # file-like object
return fileorpath.name
return fileorpath
def id_to_path(pk):
"""
Generate a path from an id, to distribute folders in the file system.
1 => 1/1/1
12 => 2/12/12
123456 => 6/56/123456
"""
pk = str(pk)
path = [pk[-1]]
if len(pk) >= 2:
path.append(pk[-2:])
else:
path.append(pk)
path.append(pk)
return os.path.join(*path)
def get_file(fileorpath):
"""Get a file-like object, whether given a FileUpload object or a path."""
if hasattr(fileorpath, 'path'): # FileUpload
return storage.open(fileorpath.path, 'rb')
if hasattr(fileorpath, 'name'):
return fileorpath
return storage.open(fileorpath, 'rb')
def make_xpi(files):
file_obj = io.BytesIO()
zip_file = zipfile.ZipFile(file_obj, 'w')
for path, data in files.items():
zip_file.writestr(path, data)
zip_file.close()
file_obj.seek(0)
return file_obj
class UnsupportedFileType(forms.ValidationError):
pass
class NoManifestFound(forms.ValidationError):
pass
class InvalidManifest(forms.ValidationError):
pass
class Extractor(object):
"""Extract add-on info from a manifest file."""
App = collections.namedtuple('App', 'appdata id min max')
@classmethod
def parse(cls, xpi_fobj, minimal=False):
zip_file = SafeZip(xpi_fobj)
certificate = os.path.join('META-INF', 'mozilla.rsa')
certificate_info = None
if zip_file.exists(certificate):
certificate_info = SigningCertificateInformation(
zip_file.read(certificate))
if zip_file.exists('manifest.json'):
data = ManifestJSONExtractor(
zip_file, certinfo=certificate_info).parse(minimal=minimal)
elif zip_file.exists('install.rdf'):
# Note that RDFExtractor is a misnomer, it receives the zip_file
# object because it might need to read other files than just
# the rdf to deal with dictionaries, complete themes etc.
data = RDFExtractor(
zip_file, certinfo=certificate_info).parse(minimal=minimal)
else:
raise NoManifestFound(
'No install.rdf or manifest.json found')
return data
def get_appversions(app, min_version, max_version):
"""Return the `AppVersion`s that correspond to the given versions."""
qs = AppVersion.objects.filter(application=app.id)
min_appver = qs.get(version=min_version)
max_appver = qs.get(version=max_version)
return min_appver, max_appver
def get_simple_version(version_string):
"""Extract the version number without the ><= requirements.
This simply extracts the version number without the ><= requirement so
it will not be accurate for version requirements that are not >=, <= or
= to a version.
>>> get_simple_version('>=33.0a1')
'33.0a1'
"""
if not version_string:
return ''
return re.sub('[<=>]', '', version_string)
class RDFExtractor(object):
"""Extract add-on info from an install.rdf."""
# https://developer.mozilla.org/en-US/Add-ons/Install_Manifests#type
TYPES = {
'2': amo.ADDON_EXTENSION,
'4': amo.ADDON_THEME,
'8': amo.ADDON_LPAPP,
'64': amo.ADDON_DICT,
'128': amo.ADDON_EXTENSION, # Telemetry Experiment
'256': amo.ADDON_EXTENSION, # WebExtension Experiment
}
# Langpacks and dictionaries, if the type is properly set, are always
# considered restartless.
ALWAYS_RESTARTLESS_TYPES = ('8', '64', '128', '256')
# Telemetry and Web Extension Experiments types.
# See: bug 1220097 and https://github.com/mozilla/addons-server/issues/3315
EXPERIMENT_TYPES = ('128', '256')
manifest = u'urn:mozilla:install-manifest'
is_experiment = False # Experiment extensions: bug 1220097.
def __init__(self, zip_file, certinfo=None):
self.zip_file = zip_file
self.certinfo = certinfo
self.rdf = rdflib.Graph().parse(
data=force_text(zip_file.read('install.rdf')))
self.package_type = None
self.find_root() # Will set self.package_type
def parse(self, minimal=False):
data = {
'guid': self.find('id'),
'type': self.find_type(),
'version': self.find('version'),
'is_webextension': False,
'name': self.find('name'),
'summary': self.find('description'),
}
# Populate certificate information (e.g signed by mozilla or not)
# early on to be able to verify compatibility based on it
if self.certinfo is not None:
data.update(self.certinfo.parse())
if not minimal:
data.update({
'homepage': self.find('homepageURL'),
'is_restart_required': (
self.find('bootstrap') != 'true' and
self.find('type') not in self.ALWAYS_RESTARTLESS_TYPES),
'apps': self.apps(),
})
# We used to simply use the value of 'strictCompatibility' in the
# rdf to set strict_compatibility, but now we enable it or not for
# all legacy add-ons depending on their type. This will prevent
# them from being marked as compatible with Firefox 57.
# This is not true for legacy add-ons already signed by Mozilla.
# For these add-ons we just re-use to whatever
# `strictCompatibility` is set.
if data['type'] not in amo.NO_COMPAT:
if self.certinfo and self.certinfo.is_mozilla_signed_ou:
data['strict_compatibility'] = (
self.find('strictCompatibility') == 'true')
else:
data['strict_compatibility'] = True
else:
data['strict_compatibility'] = False
# `experiment` is detected in in `find_type`.
data['is_experiment'] = self.is_experiment
return data
def find_type(self):
# If the extension declares a type that we know about, use
# that.
# https://developer.mozilla.org/en-US/Add-ons/Install_Manifests#type
self.package_type = self.find('type')
if self.package_type and self.package_type in self.TYPES:
# If it's an experiment, we need to store that for later.
self.is_experiment = self.package_type in self.EXPERIMENT_TYPES
return self.TYPES[self.package_type]
# Look for Complete Themes.
is_complete_theme = self.find('internalName')
if is_complete_theme:
return amo.ADDON_THEME
# Look for dictionaries.
is_dictionary = (
self.zip_file.exists('dictionaries/') and
any(fname.endswith('.dic') for fname in self.zip_file.namelist())
)
if is_dictionary:
return amo.ADDON_DICT
# Consult <em:type>.
return self.TYPES.get(self.package_type, amo.ADDON_EXTENSION)
def uri(self, name):
namespace = 'http://www.mozilla.org/2004/em-rdf'
return rdflib.term.URIRef('%s#%s' % (namespace, name))
def find_root(self):
# If the install-manifest root is well-defined, it'll show up when we
# search for triples with it. If not, we have to find the context that
# defines the manifest and use that as our root.
# http://www.w3.org/TR/rdf-concepts/#section-triples
manifest = rdflib.term.URIRef(self.manifest)
if list(self.rdf.triples((manifest, None, None))):
self.root = manifest
else:
self.root = next(self.rdf.subjects(None, self.manifest))
def find(self, name, ctx=None):
"""Like $() for install.rdf, where name is the selector."""
if ctx is None:
ctx = self.root
# predicate it maps to <em:{name}>.
match = list(self.rdf.objects(ctx, predicate=self.uri(name)))
# These come back as rdflib.Literal, which subclasses unicode.
if match:
return str(match[0])
def apps(self):
rv = []
seen_apps = set()
for ctx in self.rdf.objects(None, self.uri('targetApplication')):
app = amo.APP_GUIDS.get(self.find('id', ctx))
if not app:
continue
if app.guid not in amo.APP_GUIDS or app.id in seen_apps:
continue
if app not in amo.APP_USAGE:
# Ignore non-firefoxes compatibility.
continue
seen_apps.add(app.id)
try:
min_appver_text = self.find('minVersion', ctx)
max_appver_text = self.find('maxVersion', ctx)
# Rewrite '*' as '56.*' in legacy extensions, since they
# are not compatible with higher versions.
# We don't do that for legacy add-ons that are already
# signed by Mozilla to allow them for Firefox 57 onwards.
needs_max_56_star = (
app.id in (amo.FIREFOX.id, amo.ANDROID.id) and
max_appver_text == '*' and
not (self.certinfo and self.certinfo.is_mozilla_signed_ou)
)
if needs_max_56_star:
max_appver_text = '56.*'
min_appver, max_appver = get_appversions(
app, min_appver_text, max_appver_text)
except AppVersion.DoesNotExist:
continue
rv.append(Extractor.App(
appdata=app, id=app.id, min=min_appver, max=max_appver))
return rv
class ManifestJSONExtractor(object):
def __init__(self, zip_file, data='', certinfo=None):
self.zip_file = zip_file
self.certinfo = certinfo
if not data:
data = zip_file.read('manifest.json')
# Remove BOM if present.
data = unicodehelper.decode(data)
# Run through the JSON and remove all comments, then try to read
# the manifest file.
# Note that Firefox and the WebExtension spec only allow for
# line comments (starting with `//`), not block comments (starting with
# `/*`). We strip out both in AMO because the linter will flag the
# block-level comments explicitly as an error (so the developer can
# change them to line-level comments).
#
# But block level comments are not allowed. We just flag them elsewhere
# (in the linter).
json_string = ''
lexer = JsLexer()
for name, token in lexer.lex(data):
if name not in ('blockcomment', 'linecomment'):
json_string += token
try:
self.data = json.loads(json_string)
except Exception:
raise InvalidManifest(
ugettext('Could not parse the manifest file.'))
def get(self, key, default=None):
return self.data.get(key, default)
@property
def is_experiment(self):
"""Return whether or not the webextension uses
experiments or theme experiments API.
In legacy extensions this is a different type, but for webextensions
we just look at the manifest."""
experiment_keys = ('experiment_apis', 'theme_experiment')
return any(bool(self.get(key)) for key in experiment_keys)
@property
def gecko(self):
"""Return the "applications|browser_specific_settings["gecko"]" part
of the manifest."""
parent_block = self.get(
'browser_specific_settings', self.get('applications', {}))
return parent_block.get('gecko', {})
@property
def guid(self):
return self.gecko.get('id', None)
@property
def type(self):
return (
amo.ADDON_LPAPP if 'langpack_id' in self.data
else amo.ADDON_STATICTHEME if 'theme' in self.data
else amo.ADDON_DICT if 'dictionaries' in self.data
else amo.ADDON_EXTENSION
)
@property
def strict_max_version(self):
return get_simple_version(self.gecko.get('strict_max_version'))
@property
def strict_min_version(self):
return get_simple_version(self.gecko.get('strict_min_version'))
def apps(self):
"""Get `AppVersion`s for the application."""
type_ = self.type
if type_ == amo.ADDON_LPAPP:
# Langpack are only compatible with Firefox desktop at the moment.
# https://github.com/mozilla/addons-server/issues/8381
# They are all strictly compatible with a specific version, so
# the default min version here doesn't matter much.
apps = (
(amo.FIREFOX, amo.DEFAULT_WEBEXT_MIN_VERSION),
)
elif type_ == amo.ADDON_STATICTHEME:
# Static themes are only compatible with Firefox desktop >= 53
# and Firefox for Android >=65.
apps = (
(amo.FIREFOX, amo.DEFAULT_STATIC_THEME_MIN_VERSION_FIREFOX),
(amo.ANDROID, amo.DEFAULT_STATIC_THEME_MIN_VERSION_ANDROID),
)
elif type_ == amo.ADDON_DICT:
# WebExt dicts are only compatible with Firefox desktop >= 61.
apps = (
(amo.FIREFOX, amo.DEFAULT_WEBEXT_DICT_MIN_VERSION_FIREFOX),
)
else:
webext_min = (
amo.DEFAULT_WEBEXT_MIN_VERSION
if self.get('browser_specific_settings', None) is None
else amo.DEFAULT_WEBEXT_MIN_VERSION_BROWSER_SPECIFIC)
# amo.DEFAULT_WEBEXT_MIN_VERSION_BROWSER_SPECIFIC should be 48.0,
# which is the same as amo.DEFAULT_WEBEXT_MIN_VERSION_ANDROID, so
# no specific treatment for Android.
apps = (
(amo.FIREFOX, webext_min),
(amo.ANDROID, amo.DEFAULT_WEBEXT_MIN_VERSION_ANDROID),
)
doesnt_support_no_id = (
self.strict_min_version and
(vint(self.strict_min_version) <
vint(amo.DEFAULT_WEBEXT_MIN_VERSION_NO_ID))
)
if self.guid is None and doesnt_support_no_id:
raise forms.ValidationError(
ugettext('GUID is required for Firefox 47 and below.')
)
# If a minimum strict version is specified, it needs to be higher
# than the version when Firefox started supporting WebExtensions.
unsupported_no_matter_what = (
self.strict_min_version and vint(self.strict_min_version) <
vint(amo.DEFAULT_WEBEXT_MIN_VERSION))
if unsupported_no_matter_what:
msg = ugettext('Lowest supported "strict_min_version" is 42.0.')
raise forms.ValidationError(msg)
for app, default_min_version in apps:
if self.guid is None and not self.strict_min_version:
strict_min_version = max(amo.DEFAULT_WEBEXT_MIN_VERSION_NO_ID,
default_min_version)
else:
# strict_min_version for this app shouldn't be lower than the
# default min version for this app.
strict_min_version = max(
self.strict_min_version, default_min_version)
strict_max_version = (
self.strict_max_version or amo.DEFAULT_WEBEXT_MAX_VERSION)
if vint(strict_max_version) < vint(strict_min_version):
strict_max_version = strict_min_version
qs = AppVersion.objects.filter(application=app.id)
try:
min_appver = qs.get(version=strict_min_version)
except AppVersion.DoesNotExist:
# If the specified strict_min_version can't be found, raise an
# error, we can't guess an appropriate one.
msg = ugettext(
u'Unknown "strict_min_version" {appver} for {app}'.format(
app=app.pretty, appver=strict_min_version))
raise forms.ValidationError(msg)
try:
max_appver = qs.get(version=strict_max_version)
except AppVersion.DoesNotExist:
# If the specified strict_max_version can't be found, this is
# less of a problem, ignore and replace with '*'.
# https://github.com/mozilla/addons-server/issues/7160
max_appver = qs.get(version=amo.DEFAULT_WEBEXT_MAX_VERSION)
yield Extractor.App(
appdata=app, id=app.id, min=min_appver, max=max_appver)
def target_locale(self):
"""Guess target_locale for a dictionary from manifest contents."""
try:
dictionaries = self.get('dictionaries', {})
key = force_text(list(dictionaries.keys())[0])
return key[:255]
except (IndexError, UnicodeDecodeError):
# This shouldn't happen: the linter should prevent it, but
# just in case, handle the error (without bothering with
# translations as users should never see this).
raise forms.ValidationError('Invalid dictionaries object.')
def parse(self, minimal=False):
data = {
'guid': self.guid,
'type': self.type,
'version': self.get('version', ''),
'is_webextension': True,
'name': self.get('name'),
'summary': self.get('description'),
'homepage': self.get('homepage_url'),
'default_locale': self.get('default_locale'),
}
# Populate certificate information (e.g signed by mozilla or not)
# early on to be able to verify compatibility based on it
if self.certinfo is not None:
data.update(self.certinfo.parse())
if self.type == amo.ADDON_STATICTHEME:
data['theme'] = self.get('theme', {})
if not minimal:
data.update({
'is_restart_required': False,
'apps': list(self.apps()),
# Langpacks have strict compatibility enabled, rest of
# webextensions don't.
'strict_compatibility': data['type'] == amo.ADDON_LPAPP,
'is_experiment': self.is_experiment,
})
if self.type == amo.ADDON_EXTENSION:
# Only extensions have permissions and content scripts
data.update({
'permissions': self.get('permissions', []),
'content_scripts': self.get('content_scripts', []),
})
elif self.type == amo.ADDON_DICT:
data['target_locale'] = self.target_locale()
return data
class SigningCertificateInformation(object):
"""Process the signature to determine the addon is a Mozilla Signed
extension, so is signed already with a special certificate. We want to
know this so we don't write over it later, and stop unauthorised people
from submitting them to AMO."""
def __init__(self, certificate_data):
pkcs7 = certificate_data
self.cert_ou = get_signer_organizational_unit_name(pkcs7)
@property
def is_mozilla_signed_ou(self):
return self.cert_ou == 'Mozilla Extensions'
def parse(self):
return {'is_mozilla_signed_extension': self.is_mozilla_signed_ou}
def extract_search(content):
def _text(tag):
try:
return dom.getElementsByTagName(tag)[0].childNodes[0].wholeText
except (IndexError, AttributeError):
raise forms.ValidationError(
ugettext('Could not parse uploaded file, missing or empty '
'<%s> element') % tag)
# Only catch basic errors, most of that validation already happened in
# devhub.tasks:annotate_search_plugin_validation
try:
dom = minidom.parse(content)
except DefusedXmlException:
raise forms.ValidationError(
ugettext('OpenSearch: XML Security error.'))
except ExpatError:
raise forms.ValidationError(ugettext('OpenSearch: XML Parse Error.'))
return {
'name': _text('ShortName'),
'description': _text('Description')
}
def parse_search(fileorpath, addon=None):
try:
f = get_file(fileorpath)
data = extract_search(f)
except forms.ValidationError:
raise
except Exception:
log.error('OpenSearch parse error', exc_info=True)
raise forms.ValidationError(ugettext('Could not parse uploaded file.'))
return {'guid': None,
'type': amo.ADDON_SEARCH,
'name': data['name'],
'is_restart_required': False,
'is_webextension': False,
'summary': data['description'],
'version': datetime.now().strftime('%Y%m%d')}
class FSyncMixin(object):
"""Mixin that implements fsync for file extractions.
This mixin uses the `_extract_member` interface used by `ziplib` and
`tarfile` so it's somewhat unversal.
We need this to make sure that on EFS / NFS all data is immediately
written to avoid any data loss on the way.
"""
def _fsync_dir(self, path):
descriptor = os.open(path, os.O_DIRECTORY)
try:
os.fsync(descriptor)
except OSError as exc:
# On some filesystem doing a fsync on a directory
# raises an EINVAL error. Ignoring it is usually safe.
if exc.errno != errno.EINVAL:
raise
os.close(descriptor)
def _fsync_file(self, path):
descriptor = os.open(path, os.O_RDONLY)
os.fsync(descriptor)
os.close(descriptor)
def _extract_member(self, member, targetpath, *args, **kwargs):
"""Extends `ZipFile._extract_member` to call fsync().
For every extracted file we are ensuring that it's data has been
written to disk. We are doing this to avoid any data inconsistencies
that we have seen in the past.
To do this correctly we are fsync()ing all directories as well
only that will ensure we have a durable write for that specific file.
This is inspired by https://github.com/2ndquadrant-it/barman/
(see backup.py -> backup_fsync_and_set_sizes and utils.py)
"""
super(FSyncMixin, self)._extract_member(
member, targetpath, *args, **kwargs)
parent_dir = os.path.dirname(os.path.normpath(targetpath))
if parent_dir:
self._fsync_dir(parent_dir)
self._fsync_file(targetpath)
class FSyncedZipFile(FSyncMixin, zipfile.ZipFile):
"""Subclass of ZipFile that calls `fsync` for file extractions."""
pass
class FSyncedTarFile(FSyncMixin, tarfile.TarFile):
"""Subclass of TarFile that calls `fsync` for file extractions."""
pass
def archive_member_validator(archive, member):
"""Validate a member of an archive member (TarInfo or ZipInfo)."""
filename = getattr(member, 'filename', getattr(member, 'name', None))
filesize = getattr(member, 'file_size', getattr(member, 'size', None))
_validate_archive_member_name_and_size(filename, filesize)
def _validate_archive_member_name_and_size(filename, filesize):
if filename is None or filesize is None:
raise forms.ValidationError(ugettext('Unsupported archive type.'))
try:
force_text(filename)
except UnicodeDecodeError:
# We can't log the filename unfortunately since it's encoding
# is obviously broken :-/
log.error('Extraction error, invalid file name encoding')
msg = ugettext('Invalid file name in archive. Please make sure '
'all filenames are utf-8 or latin1 encoded.')
raise forms.ValidationError(msg)
if '../' in filename or '..' == filename or filename.startswith('/'):
log.error('Extraction error, invalid file name: %s' % (filename))
# L10n: {0} is the name of the invalid file.
msg = ugettext('Invalid file name in archive: {0}')
raise forms.ValidationError(msg.format(filename))
if filesize > settings.FILE_UNZIP_SIZE_LIMIT:
log.error('Extraction error, file too big for file (%s): '
'%s' % (filename, filesize))
# L10n: {0} is the name of the invalid file.
msg = ugettext('File exceeding size limit in archive: {0}')
raise forms.ValidationError(msg.format(filename))
class SafeZip(object):
def __init__(self, source, mode='r', force_fsync=False):
self.source = source
self.info_list = None
self.mode = mode
self.force_fsync = force_fsync
self.is_valid = self.initialize_and_validate()
def initialize_and_validate(self):
"""
Runs some overall archive checks.
"""
# Shortcut to avoid expensive check over and over again
if getattr(self, 'is_valid', False):
return True
if self.force_fsync:
zip_file = FSyncedZipFile(self.source, self.mode)
else:
zip_file = zipfile.ZipFile(self.source, self.mode)
info_list = zip_file.infolist()
total_file_size = 0
for info in info_list:
total_file_size += info.file_size
archive_member_validator(self.source, info)
if total_file_size >= settings.MAX_ZIP_UNCOMPRESSED_SIZE:
raise forms.ValidationError(ugettext(
'Uncompressed size is too large'))
self.info_list = info_list
self.zip_file = zip_file
return True
def is_signed(self):
"""Tells us if an addon is signed."""
finds = []
for info in self.info_list:
match = SIGNED_RE.match(info.filename)
if match:
name, ext = match.groups()
# If it's rsa or sf, just look for the opposite.
if (name, {'rsa': 'sf', 'sf': 'rsa'}[ext]) in finds:
return True
finds.append((name, ext))
def extract_from_manifest(self, manifest):
"""
Extracts a file given a manifest such as:
jar:chrome/de.jar!/locale/de/browser/
or
locale/de/browser
"""
type, path = manifest.split(':')
jar = self
if type == 'jar':
parts = path.split('!')
for part in parts[:-1]:
jar = self.__class__(io.BytesIO(jar.zip_file.read(part)))
path = parts[-1]
return jar.read(path[1:] if path.startswith('/') else path)
def extract_info_to_dest(self, info, dest):
"""Extracts the given info to a directory and checks the file size."""
self.zip_file.extract(info, dest)
dest = os.path.join(dest, info.filename)
if not os.path.isdir(dest):
# Directories consistently report their size incorrectly.
size = os.stat(dest)[stat.ST_SIZE]
if size != info.file_size:
log.error('Extraction error, uncompressed size: %s, %s not %s'
% (self.source, size, info.file_size))
raise forms.ValidationError(ugettext('Invalid archive.'))
def extract_to_dest(self, dest):
"""Extracts the zip file to a directory."""
for info in self.info_list:
self.extract_info_to_dest(info, dest)
def close(self):
self.zip_file.close()
@property
def filelist(self):
return self.zip_file.filelist
@property
def namelist(self):
return self.zip_file.namelist
def exists(self, path):
try:
return self.zip_file.getinfo(path)
except KeyError:
return False
def read(self, path):
return self.zip_file.read(path)
def extract_zip(source, remove=False, force_fsync=False, tempdir=None):
"""Extracts the zip file. If remove is given, removes the source file."""
if tempdir is None:
tempdir = tempfile.mkdtemp(dir=settings.TMP_PATH)
try:
zip_file = SafeZip(source, force_fsync=force_fsync)
zip_file.extract_to_dest(tempdir)
except Exception:
rm_local_tmp_dir(tempdir)
raise
if remove:
os.remove(source)
return tempdir
def extract_extension_to_dest(source, dest=None, force_fsync=False):
"""Extract `source` to `dest`.
`source` can be an extension or extension source, can be a zip, tar
(gzip, bzip) or a search provider (.xml file).
Note that this doesn't verify the contents of `source` except for
that it requires something valid to be extracted.
:returns: Extraction target directory, if `dest` is `None` it'll be a
temporary directory.
"""
target, tempdir = None, None
if dest is None:
target = tempdir = tempfile.mkdtemp(dir=settings.TMP_PATH)
else:
target = dest
try:
source = force_text(source)
if source.endswith((u'.zip', u'.xpi')):
with open(source, 'rb') as source_file:
zip_file = SafeZip(source_file, force_fsync=force_fsync)
zip_file.extract_to_dest(target)
elif source.endswith((u'.tar.gz', u'.tar.bz2', u'.tgz')):
tarfile_class = (
tarfile.TarFile
if not force_fsync else FSyncedTarFile)
with tarfile_class.open(source) as archive:
archive.extractall(target)
elif source.endswith(u'.xml'):
shutil.copy(source, target)
if force_fsync:
FSyncMixin()._fsync_file(target)
except (zipfile.BadZipfile, tarfile.ReadError, IOError):
if tempdir is not None:
rm_local_tmp_dir(tempdir)
raise forms.ValidationError(
ugettext('Invalid or broken archive.'))
return target
def copy_over(source, dest):
"""
Copies from the source to the destination, removing the destination
if it exists and is a directory.
"""
if os.path.exists(dest) and os.path.isdir(dest):
shutil.rmtree(dest)
shutil.copytree(source, dest)
# mkdtemp will set the directory permissions to 700
# for the webserver to read them, we need 755
os.chmod(dest, stat.S_IRWXU | stat.S_IRGRP |
stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH)
shutil.rmtree(source)
def get_all_files(folder, strip_prefix='', prefix=None):
"""Return all files in a file/directory tree.
:param folder: The folder of which to return the file-tree.
:param strip_prefix str: A string to strip in case we're adding a custom
`prefix` Doesn't have any implications if
`prefix` isn't given.
:param prefix: A custom prefix to add to all files and folders.
"""
all_files = []
# Not using os.path.walk so we get just the right order.
def iterate(path):
path_dirs, path_files = storage.listdir(path)
for dirname in sorted(path_dirs):
full = os.path.join(path, force_text(dirname))
all_files.append(full)
iterate(full)
for filename in sorted(path_files):
full = os.path.join(path, force_text(filename))
all_files.append(full)
iterate(folder)
if prefix is not None:
# This is magic: strip the prefix, e.g /tmp/ and prepend the prefix
all_files = [
os.path.join(prefix, fname[len(strip_prefix) + 1:])
for fname in all_files]
return all_files
def extract_xpi(xpi, path):
"""Extract all files from `xpi` to `path`.
This can be removed in favour of our already extracted git-repositories
once we land and tested them in production.
"""
tempdir = extract_zip(xpi)
all_files = get_all_files(tempdir)
copy_over(tempdir, path)
return all_files
def parse_xpi(xpi, addon=None, minimal=False, user=None):
"""Extract and parse an XPI. Returns a dict with various properties
describing the xpi.
Will raise ValidationError if something went wrong while parsing.
If minimal is True, it avoids validation as much as possible (still raising
ValidationError for hard errors like I/O or invalid json/rdf) and returns
only the minimal set of properties needed to decide what to do with the
add-on: guid, version and is_webextension.
"""
try:
xpi = get_file(xpi)
xpi_info = Extractor.parse(xpi, minimal=minimal)
except forms.ValidationError:
raise
except IOError as e:
if len(e.args) < 2:
err, strerror = None, e.args[0]
else:
err, strerror = e.args
log.error('I/O error({0}): {1}'.format(err, strerror))
# Note: we don't really know what happened, so even though we return a
# generic message about the manifest, don't raise InvalidManifest. We
# want the validation to stop there.
raise forms.ValidationError(ugettext(
'Could not parse the manifest file.'))
except Exception:
# As above, don't raise InvalidManifest here.
log.error('XPI parse error', exc_info=True)
raise forms.ValidationError(ugettext(
'Could not parse the manifest file.'))
if minimal:
return xpi_info
return check_xpi_info(xpi_info, addon, xpi, user=user)
def check_xpi_info(xpi_info, addon=None, xpi_file=None, user=None):
from olympia.addons.models import Addon, DeniedGuid
guid = xpi_info['guid']
is_webextension = xpi_info.get('is_webextension', False)
# If we allow the guid to be omitted we assume that one was generated
# or existed before and use that one.
# An example are WebExtensions that don't require a guid but we generate
# one once they're uploaded. Now, if you update that WebExtension we
# just use the original guid.
if addon and not guid and is_webextension:
xpi_info['guid'] = guid = addon.guid
if not guid and not is_webextension:
raise forms.ValidationError(ugettext('Could not find an add-on ID.'))
if guid:
current_user = core.get_user()
if current_user:
deleted_guid_clashes = Addon.unfiltered.exclude(
authors__id=current_user.id).filter(guid=guid)
else:
deleted_guid_clashes = Addon.unfiltered.filter(guid=guid)
if addon and addon.guid != guid:
msg = ugettext(
'The add-on ID in your manifest.json or install.rdf (%s) '
'does not match the ID of your add-on on AMO (%s)')
raise forms.ValidationError(msg % (guid, addon.guid))
if (not addon and
# Non-deleted add-ons.
(Addon.objects.filter(guid=guid).exists() or
# DeniedGuid objects for deletions for Mozilla disabled add-ons
DeniedGuid.objects.filter(guid=guid).exists() or
# Deleted add-ons that don't belong to the uploader.
deleted_guid_clashes.exists())):
raise forms.ValidationError(ugettext('Duplicate add-on ID found.'))
if len(xpi_info['version']) > 32:
raise forms.ValidationError(
ugettext('Version numbers should have fewer than 32 characters.'))
if not VERSION_RE.match(xpi_info['version']):
raise forms.ValidationError(
ugettext('Version numbers should only contain letters, numbers, '
'and these punctuation characters: +*.-_.'))
if is_webextension and xpi_info.get('type') == amo.ADDON_STATICTHEME:
max_size = settings.MAX_STATICTHEME_SIZE
if xpi_file and os.path.getsize(xpi_file.name) > max_size:
raise forms.ValidationError(
ugettext(u'Maximum size for WebExtension themes is {0}.')
.format(filesizeformat(max_size)))
if xpi_file:
# Make sure we pass in a copy of `xpi_info` since
# `resolve_webext_translations` modifies data in-place
translations = Addon.resolve_webext_translations(
xpi_info.copy(), xpi_file)
verify_mozilla_trademark(translations['name'], core.get_user())
# Parse the file to get and validate package data with the addon.
if not acl.experiments_submission_allowed(user, xpi_info):
raise forms.ValidationError(
ugettext(u'You cannot submit this type of add-on'))
if not addon and not system_addon_submission_allowed(
user, xpi_info):
guids = ' or '.join(
'"' + guid + '"' for guid in amo.SYSTEM_ADDON_GUIDS)
raise forms.ValidationError(
ugettext(u'You cannot submit an add-on with a guid ending '
u'%s' % guids))
if not mozilla_signed_extension_submission_allowed(user, xpi_info):
raise forms.ValidationError(
ugettext(u'You cannot submit a Mozilla Signed Extension'))
if not acl.langpack_submission_allowed(user, xpi_info):
raise forms.ValidationError(
ugettext('You cannot submit a language pack'))
return xpi_info
def parse_addon(pkg, addon=None, user=None, minimal=False):
"""
Extract and parse a file path, UploadedFile or FileUpload. Returns a dict
with various properties describing the add-on.
Will raise ValidationError if something went wrong while parsing.
`addon` parameter is mandatory if the file being parsed is going to be
attached to an existing Addon instance.
`user` parameter is mandatory unless minimal `parameter` is True. It should
point to the UserProfile responsible for the upload.
If `minimal` parameter is True, it avoids validation as much as possible
(still raising ValidationError for hard errors like I/O or invalid
json/rdf) and returns only the minimal set of properties needed to decide
what to do with the add-on (the exact set depends on the add-on type, but
it should always contain at least guid, type, version and is_webextension.
"""
name = getattr(pkg, 'name', pkg)
if name.endswith('.xml'):
parsed = parse_search(pkg, addon)
elif name.endswith(amo.VALID_ADDON_FILE_EXTENSIONS):
parsed = parse_xpi(pkg, addon, minimal=minimal, user=user)
else:
valid_extensions_string = u'(%s)' % u', '.join(
amo.VALID_ADDON_FILE_EXTENSIONS)
raise UnsupportedFileType(
ugettext(
'Unsupported file type, please upload a supported '
'file {extensions}.'.format(
extensions=valid_extensions_string)))
if not minimal:
if user is None:
# This should never happen and means there is a bug in
# addons-server itself.
raise forms.ValidationError(ugettext('Unexpected error.'))
# FIXME: do the checks depending on user here.
if addon and addon.type != parsed['type']:
msg = ugettext(
'The type (%s) does not match the type of your add-on on '
'AMO (%s)')
raise forms.ValidationError(msg % (parsed['type'], addon.type))
return parsed
def get_sha256(file_obj, block_size=io.DEFAULT_BUFFER_SIZE):
"""Calculate a sha256 hash for `file_obj`.
`file_obj` must be an open file descriptor. The caller needs to take
care of closing it properly.
"""
hash_ = hashlib.sha256()
for chunk in iter(lambda: file_obj.read(block_size), b''):
hash_.update(chunk)
return hash_.hexdigest()
def update_version_number(file_obj, new_version_number):
"""Update the manifest to have the new version number."""
# Create a new xpi with the updated version.
updated = u'{0}.updated_version_number'.format(file_obj.file_path)
# Copy the original XPI, with the updated install.rdf or package.json.
with zipfile.ZipFile(file_obj.file_path, 'r') as source:
file_list = source.infolist()
with zipfile.ZipFile(updated, 'w', zipfile.ZIP_DEFLATED) as dest:
for file_ in file_list:
content = source.read(file_.filename)
if file_.filename == 'manifest.json':
content = _update_version_in_json_manifest(
content, new_version_number)
dest.writestr(file_, content)
# Move the updated file to the original file.
shutil.move(updated, file_obj.file_path)
def write_crx_as_xpi(chunks, target):
"""Extract and strip the header from the CRX, convert it to a regular ZIP
archive, then write it to `target`. Read more about the CRX file format:
https://developer.chrome.com/extensions/crx
"""
# First we open the uploaded CRX so we can see how much we need
# to trim from the header of the file to make it a valid ZIP.
with tempfile.NamedTemporaryFile('w+b', dir=settings.TMP_PATH) as tmp:
for chunk in chunks:
tmp.write(chunk)
tmp.seek(0)
header = tmp.read(16)
header_info = struct.unpack('4cHxII', header)
public_key_length = header_info[5]
signature_length = header_info[6]
# This is how far forward we need to seek to extract only a
# ZIP file from this CRX.
start_position = 16 + public_key_length + signature_length
hash = hashlib.sha256()
tmp.seek(start_position)
# Now we open the Django storage and write our real XPI file.
with storage.open(target, 'wb') as file_destination:
bytes = tmp.read(65536)
# Keep reading bytes and writing them to the XPI.
while bytes:
hash.update(bytes)
file_destination.write(bytes)
bytes = tmp.read(65536)
return hash
def _update_version_in_json_manifest(content, new_version_number):
"""Change the version number in the json manifest file provided."""
updated = json.loads(content)
if 'version' in updated:
updated['version'] = new_version_number
return json.dumps(updated)
def extract_translations(file_obj):
"""Extract all translation messages from `file_obj`.
:param locale: if not `None` the list will be restricted only to `locale`.
"""
xpi = get_filepath(file_obj)
messages = {}
try:
with zipfile.ZipFile(xpi, 'r') as source:
file_list = source.namelist()
# Fetch all locales the add-on supports
# see https://developer.chrome.com/extensions/i18n#overview-locales
# for more details on the format.
locales = {
name.split('/')[1] for name in file_list
if name.startswith('_locales/') and
name.endswith('/messages.json')}
for locale in locales:
corrected_locale = find_language(locale)
# Filter out languages we don't support.
if not corrected_locale:
continue
fname = '_locales/{0}/messages.json'.format(locale)
try:
data = source.read(fname)
messages[corrected_locale] = decode_json(data)
except (ValueError, KeyError):
# `ValueError` thrown by `decode_json` if the json is
# invalid and `KeyError` thrown by `source.read`
# usually means the file doesn't exist for some reason,
# we fail silently
continue
except IOError:
pass
return messages
def resolve_i18n_message(message, messages, locale, default_locale=None):
"""Resolve a translatable string in an add-on.
This matches ``__MSG_extensionName__`` like names and returns the correct
translation for `locale`.
:param locale: The locale to fetch the translation for, If ``None``
(default) ``settings.LANGUAGE_CODE`` is used.
:param messages: A dictionary of messages, e.g the return value
of `extract_translations`.
"""
if not message or not isinstance(message, str):
# Don't even attempt to extract invalid data.
# See https://github.com/mozilla/addons-server/issues/3067
# for more details
return message
match = MSG_RE.match(message)
if match is None:
return message
locale = find_language(locale)
if default_locale:
default_locale = find_language(default_locale)
msgid = match.group('msgid')
default = {'message': message}
if locale in messages:
message = messages[locale].get(msgid, default)
elif default_locale in messages:
message = messages[default_locale].get(msgid, default)
if not isinstance(message, dict):
# Fallback for invalid message format, should be caught by
# addons-linter in the future but we'll have to handle it.
# See https://github.com/mozilla/addons-server/issues/3485
return default['message']
return message['message']
def get_background_images(file_obj, theme_data, header_only=False):
"""Extract static theme header image from `file_obj` and return in dict."""
xpi = get_filepath(file_obj)
if not theme_data:
# we might already have theme_data, but otherwise get it from the xpi.
try:
parsed_data = parse_xpi(xpi, minimal=True)
theme_data = parsed_data.get('theme', {})
except forms.ValidationError:
# If we can't parse the existing manifest safely return.
return {}
images_dict = theme_data.get('images', {})
# Get the reference in the manifest. headerURL is the deprecated variant.
header_url = images_dict.get(
'theme_frame', images_dict.get('headerURL'))
# And any additional backgrounds too.
additional_urls = (
images_dict.get('additional_backgrounds', []) if not header_only
else [])
image_urls = [header_url] + additional_urls
images = {}
try:
with zipfile.ZipFile(xpi, 'r') as source:
for url in image_urls:
_, file_ext = os.path.splitext(str(url).lower())
if file_ext not in amo.THEME_BACKGROUND_EXTS:
# Just extract image files.
continue
try:
images[url] = source.read(url)
except KeyError:
pass
except IOError as ioerror:
log.debug(ioerror)
return images
@contextlib.contextmanager
def atomic_lock(lock_dir, lock_name, lifetime=60):
"""A atomic, NFS safe implementation of a file lock.
Uses `flufl.lock` under the hood. Can be used as a context manager::
with atomic_lock(settings.TMP_PATH, 'extraction-1234'):
extract_xpi(...)
:return: `True` if the lock was attained, we are owning the lock,
`False` if there is an already existing lock.
"""
lock_name = lock_name + '.lock'
count = _lock_count.get(lock_name, 0)
log.debug('Acquiring lock %s, count is %d.' % (lock_name, count))
lock_name = os.path.join(lock_dir, lock_name)
lock = flufl.lock.Lock(lock_name, lifetime=timedelta(seconds=lifetime))
try:
# set `timeout=0` to avoid any process blocking but catch the
# TimeOutError raised instead.
lock.lock(timeout=timedelta(seconds=0))
except flufl.lock.AlreadyLockedError:
# This process already holds the lock
yield False
except flufl.lock.TimeOutError:
# Some other process holds the lock.
# Let's break the lock if it has expired. Unfortunately
# there's a bug in flufl.lock so let's do this manually.
# Bug: https://gitlab.com/warsaw/flufl.lock/merge_requests/1
release_time = lock._releasetime
max_release_time = release_time + flufl.lock._lockfile.CLOCK_SLOP
if (release_time != -1 and datetime.now() > max_release_time):
# Break the lock and try to aquire again
lock._break()
lock.lock(timeout=timedelta(seconds=0))
yield lock.is_locked
else:
# Already locked
yield False
else:
# Is usually `True` but just in case there were some weird `lifetime`
# values set we return the check if we really attained the lock.
yield lock.is_locked
if lock.is_locked:
log.debug('Releasing lock %s.' % lock.details[2])
lock.unlock()
|
|
# -*- coding: utf-8 -*-
from __future__ import with_statement
from django.test import TestCase
from transifex.languages.models import Language
from transifex.resources.formats.validators import *
class TestValidators(TestCase):
def test_empty_translation(self):
old = 'old'
new = ''
v = BaseValidator()
v(old, new)
def test_spaces(self):
v = SpaceValidator()
old = "as"
new = " "
self.assertRaises(ValidationError, v, old, new)
new = " \t"
self.assertRaises(ValidationError, v, old, new)
def test_brackets(self):
v = MatchingBracketsValidator()
for c in MatchingBracketsValidator.bracket_chars:
old = c + "string"
new = c + "trans"
v(old, new)
new = "trans" + c
v(old, new)
new = "tr" + c + "ans"
v(old, new)
new = "trans"
self.assertRaises(ValidationError, v, old, new)
for c1 in MatchingBracketsValidator.bracket_chars:
for c2 in MatchingBracketsValidator.bracket_chars:
old = 'a' + c1 + 'b' + c2 + 'c'
new = c1 + 'abc' + c2
v(old, new)
new = c2 + 'abc' + c1
v
new = c1 + 'abc'
self.assertRaises(ValidationError, v, old, new)
new = c2 + 'abc'
self.assertRaises(ValidationError, v, old, new)
def test_urls(self):
v = UrlsValidator()
old = "blah http://www.transifex.net blah"
new = "blah http://www.transifex.net blah"
v(old, new)
new = "blah www.transifex.net blah"
self.assertRaises(ValidationError, v, old, new)
new = "blah http://www.tranisfex.net blah"
self.assertRaises(ValidationError, v, old, new)
new = "blah-blah"
self.assertRaises(ValidationError, v, old, new)
old = "blah http://www.transifex.net blah https://indifex.com"
new = "blah http://www.transifex.net blah https://indifex.com"
v(old, new)
new = "blah https://indifex.com"
self.assertRaises(ValidationError, v, old, new)
def test_emails(self):
v = EmailAddressesValidator()
old = "blah me@indifex.com"
new = "blah me@indifex.com blah"
v(old, new)
new = "blah you@indifex.com blah"
self.assertRaises(ValidationError, v, old, new)
old = "blah me@indifex.com and me@gmail.com blah"
new = "blah me@indifex.com and me@gmail.com blah"
v(old, new)
new = "blah you@indifex.com blah"
self.assertRaises(ValidationError, v, old, new)
def test_start_newlines(self):
v = NewLineAtBeginningValidator()
old = "asdaf"
new = "asdasa"
v(old, new)
old = "\n asdasa"
self.assertRaises(ValidationError, v, old, new)
new = "\nasdasdsaafsaf"
v(old, new)
old = "asadaaf"
self.assertRaises(ValidationError, v, old, new)
def test_start_newlines(self):
v = NewLineAtEndValidator()
old = "asdaf"
new = "asdasa"
v(old, new)
old = "asdasa\n"
self.assertRaises(ValidationError, v, old, new)
new = "asdasdsaafsaf\n"
v(old, new)
old = "asadaaf"
self.assertRaises(ValidationError, v, old, new)
def test_numbers(self):
v = NumbersValidator()
old = "asa0asda1as+2afd-3asdas0.12asda"
new = "asa0asda1as+2afd-3asdas0.12asda"
v(old, new)
new = "asa0asda1as+2afd-3asdas0,12asda"
v(old, new)
new = "asa0asda1as+2afd-3asdas012asda"
self.assertRaises(ValidationError, v, old, new)
new = "asaasda1as+2afd-3asdas012asda"
self.assertRaises(ValidationError, v, old, new)
new = "asa0asda1as-2afd-3asdas0.12asda"
self.assertRaises(ValidationError, v, old, new)
old = "as as das dsa "
new = "agre dsg fs sa d"
v(old, new)
def test_printf_formats(self):
class Language(object):
pass
sl = Language()
sl.nplurals = 2
tl = Language()
tl.nplurals = 2
v = PrintfFormatNumberValidator(sl, tl)
old = "%s %d"
new = "%s %d"
v(old, new)
new = "%f"
self.assertRaises(ValidationError, v, old, new)
tl.nplurals = 3
new = "%f %s %x"
v(old, new)
def test_source_printf_format(self):
v = PrintfFormatSourceValidator()
old = "%s %d asda"
new = "%d %s asagsfdsf %f"
v(old, new)
new = "%d"
self.assertRaises(ValidationError, v, old, new)
new = "%s"
self.assertRaises(ValidationError, v, old, new)
old = "%s %d"
new = "%2$d %1$s"
v(old, new)
old = "%(foo)s %(bar)s"
new = "%(fo0)s %(bar)s"
with self.assertRaises(ValidationError) as cm:
v(old, new)
self.assertIn('foo', unicode(cm.exception))
new = "%(foo)s"
with self.assertRaises(ValidationError) as cm:
v(old, new)
self.assertIn('bar', unicode(cm.exception))
new = "%(bar)s"
with self.assertRaises(ValidationError) as cm:
v(old, new)
self.assertIn('foo', unicode(cm.exception))
new = "%(bar)s %(foo)s"
v(old, new)
def test_translation_printf_format(self):
v = PrintfFormatTranslationValidator()
old = "%s %d asda %f"
new = "%d %s asagsfdsf %f"
v(old, new)
old = "%d %s"
self.assertRaises(ValidationError, v, old, new)
old = "%s %d asda %k"
self.assertRaises(ValidationError, v, old, new)
old = "%s %d"
new = "%2$d %1$s"
v(old, new)
old = "%(foo)s %(bar)s"
new = "%(fo0)s %(bar)s"
with self.assertRaises(ValidationError) as cm:
v(old, new)
self.assertIn('fo0', unicode(cm.exception))
new = "%(baz)s"
with self.assertRaises(ValidationError) as cm:
v(old, new)
self.assertIn('baz', unicode(cm.exception))
new = "%(bar)s %(foo)s"
v(old, new)
def test_singular_printf_number(self):
class Language(object):
pass
sl = Language()
sl.nplurals = 2
tl = Language()
tl.nplurals = 2
v = PrintfFormatPluralizedNumberValidator(sl, tl, rule=5)
old = "%s apples"
new = "apples"
self.assertRaises(ValidationError, v, old, new)
v.rule = 1
new = "apple"
v(old, new)
v.rule = 5
tl.nplurals = 5
v(old, new)
def test_singular_printf_source(self):
v = PrintfFormatPluralizedSourceValidator(rule=5)
old = "%s apples"
new = "apples"
self.assertRaises(ValidationError, v, old, new)
v.rule = 1
new = "apple"
v(old, new)
|
|
# Copyright (c) 2012 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import time
import mock
from oslo_config import cfg
from oslo_log import log
import oslo_messaging
import testtools
from neutron.agent.common import ovs_lib
from neutron.agent.common import utils
from neutron.agent.linux import async_process
from neutron.agent.linux import ip_lib
from neutron.common import constants as n_const
from neutron.plugins.common import constants as p_const
from neutron.plugins.ml2.drivers.l2pop import rpc as l2pop_rpc
from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants
from neutron.tests.unit.plugins.ml2.drivers.openvswitch.agent \
import ovs_test_base
NOTIFIER = 'neutron.plugins.ml2.rpc.AgentNotifierApi'
OVS_LINUX_KERN_VERS_WITHOUT_VXLAN = "3.12.0"
FAKE_MAC = '00:11:22:33:44:55'
FAKE_IP1 = '10.0.0.1'
FAKE_IP2 = '10.0.0.2'
class FakeVif(object):
ofport = 99
port_name = 'name'
class MockFixedIntervalLoopingCall(object):
def __init__(self, f):
self.f = f
def start(self, interval=0):
self.f()
class CreateAgentConfigMap(ovs_test_base.OVSAgentConfigTestBase):
def test_create_agent_config_map_succeeds(self):
self.assertTrue(self.mod_agent.create_agent_config_map(cfg.CONF))
def test_create_agent_config_map_fails_for_invalid_tunnel_config(self):
# An ip address is required for tunneling but there is no default,
# verify this for both gre and vxlan tunnels.
cfg.CONF.set_override('tunnel_types', [p_const.TYPE_GRE],
group='AGENT')
with testtools.ExpectedException(ValueError):
self.mod_agent.create_agent_config_map(cfg.CONF)
cfg.CONF.set_override('tunnel_types', [p_const.TYPE_VXLAN],
group='AGENT')
with testtools.ExpectedException(ValueError):
self.mod_agent.create_agent_config_map(cfg.CONF)
def test_create_agent_config_map_fails_no_local_ip(self):
# An ip address is required for tunneling but there is no default
cfg.CONF.set_override('tunnel_types', [p_const.TYPE_VXLAN],
group='AGENT')
with testtools.ExpectedException(ValueError):
self.mod_agent.create_agent_config_map(cfg.CONF)
def test_create_agent_config_map_fails_for_invalid_tunnel_type(self):
cfg.CONF.set_override('tunnel_types', ['foobar'], group='AGENT')
with testtools.ExpectedException(ValueError):
self.mod_agent.create_agent_config_map(cfg.CONF)
def test_create_agent_config_map_multiple_tunnel_types(self):
cfg.CONF.set_override('local_ip', '10.10.10.10', group='OVS')
cfg.CONF.set_override('tunnel_types', [p_const.TYPE_GRE,
p_const.TYPE_VXLAN], group='AGENT')
cfgmap = self.mod_agent.create_agent_config_map(cfg.CONF)
self.assertEqual(cfgmap['tunnel_types'],
[p_const.TYPE_GRE, p_const.TYPE_VXLAN])
def test_create_agent_config_map_enable_distributed_routing(self):
self.addCleanup(cfg.CONF.reset)
# Verify setting only enable_tunneling will default tunnel_type to GRE
cfg.CONF.set_override('enable_distributed_routing', True,
group='AGENT')
cfgmap = self.mod_agent.create_agent_config_map(cfg.CONF)
self.assertEqual(cfgmap['enable_distributed_routing'], True)
class TestOvsNeutronAgent(object):
def setUp(self):
super(TestOvsNeutronAgent, self).setUp()
notifier_p = mock.patch(NOTIFIER)
notifier_cls = notifier_p.start()
self.notifier = mock.Mock()
notifier_cls.return_value = self.notifier
cfg.CONF.set_default('firewall_driver',
'neutron.agent.firewall.NoopFirewallDriver',
group='SECURITYGROUP')
cfg.CONF.set_default('quitting_rpc_timeout', 10, 'AGENT')
cfg.CONF.set_default('prevent_arp_spoofing', False, 'AGENT')
kwargs = self.mod_agent.create_agent_config_map(cfg.CONF)
mock.patch('neutron.agent.common.ovs_lib.OVSBridge.db_list',
return_value=[]).start()
with mock.patch.object(self.mod_agent.OVSNeutronAgent,
'setup_integration_br'),\
mock.patch.object(self.mod_agent.OVSNeutronAgent,
'setup_ancillary_bridges',
return_value=[]),\
mock.patch('neutron.agent.linux.utils.get_interface_mac',
return_value='00:00:00:00:00:01'),\
mock.patch(
'neutron.agent.common.ovs_lib.BaseOVS.get_bridges'),\
mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall',
new=MockFixedIntervalLoopingCall),\
mock.patch(
'neutron.agent.common.ovs_lib.OVSBridge.' 'get_vif_ports',
return_value=[]):
self.agent = self.mod_agent.OVSNeutronAgent(self._bridge_classes(),
**kwargs)
# set back to true because initial report state will succeed due
# to mocked out RPC calls
self.agent.use_call = True
self.agent.tun_br = self.br_tun_cls(br_name='br-tun')
self.agent.sg_agent = mock.Mock()
def _mock_port_bound(self, ofport=None, new_local_vlan=None,
old_local_vlan=None):
port = mock.Mock()
port.ofport = ofport
net_uuid = 'my-net-uuid'
fixed_ips = [{'subnet_id': 'my-subnet-uuid',
'ip_address': '1.1.1.1'}]
if old_local_vlan is not None:
self.agent.local_vlan_map[net_uuid] = (
self.mod_agent.LocalVLANMapping(
old_local_vlan, None, None, None))
with mock.patch.object(self.agent, 'int_br', autospec=True) as int_br:
int_br.db_get_val.return_value = {}
int_br.set_db_attribute.return_value = True
self.agent.port_bound(port, net_uuid, 'local', None, None,
fixed_ips, "compute:None", False)
vlan_mapping = {'net_uuid': net_uuid,
'network_type': 'local',
'physical_network': None,
'segmentation_id': None}
int_br.set_db_attribute.assert_called_once_with(
"Port", mock.ANY, "other_config", vlan_mapping)
def _test_restore_local_vlan_maps(self, tag):
port = mock.Mock()
port.port_name = 'fake_port'
local_vlan_map = {'net_uuid': 'fake_network_id',
'network_type': 'vlan',
'physical_network': 'fake_network',
'segmentation_id': 1}
with mock.patch.object(self.agent, 'int_br') as int_br, \
mock.patch.object(self.agent, 'provision_local_vlan') as \
provision_local_vlan:
int_br.get_vif_ports.return_value = [port]
int_br.db_list.return_value = [{
'name': port.port_name, 'other_config': local_vlan_map,
'tag': tag
}]
self.agent._restore_local_vlan_map()
if tag:
self.assertTrue(provision_local_vlan.called)
else:
self.assertFalse(provision_local_vlan.called)
def test_restore_local_vlan_map_with_device_has_tag(self):
self._test_restore_local_vlan_maps(2)
def test_restore_local_vlan_map_with_device_no_tag(self):
self._test_restore_local_vlan_maps([])
def test_check_agent_configurations_for_dvr_raises(self):
self.agent.enable_distributed_routing = True
self.agent.enable_tunneling = True
self.agent.l2_pop = False
self.assertRaises(ValueError,
self.agent._check_agent_configurations)
def test_check_agent_configurations_for_dvr(self):
self.agent.enable_distributed_routing = True
self.agent.enable_tunneling = True
self.agent.l2_pop = True
self.assertIsNone(self.agent._check_agent_configurations())
def test_check_agent_configurations_for_dvr_with_vlan(self):
self.agent.enable_distributed_routing = True
self.agent.enable_tunneling = False
self.agent.l2_pop = False
self.assertIsNone(self.agent._check_agent_configurations())
def test_port_bound_deletes_flows_for_valid_ofport(self):
self._mock_port_bound(ofport=1, new_local_vlan=1)
def test_port_bound_ignores_flows_for_invalid_ofport(self):
self._mock_port_bound(ofport=-1, new_local_vlan=1)
def test_port_bound_does_not_rewire_if_already_bound(self):
self._mock_port_bound(ofport=-1, new_local_vlan=1, old_local_vlan=1)
def _test_port_dead(self, cur_tag=None):
port = mock.Mock()
port.ofport = 1
with mock.patch.object(self.agent, 'int_br') as int_br:
int_br.db_get_val.return_value = cur_tag
self.agent.port_dead(port)
if cur_tag == self.mod_agent.DEAD_VLAN_TAG:
self.assertFalse(int_br.set_db_attribute.called)
self.assertFalse(int_br.drop_port.called)
else:
int_br.assert_has_calls([
mock.call.set_db_attribute("Port", mock.ANY, "tag",
self.mod_agent.DEAD_VLAN_TAG,
log_errors=True),
mock.call.drop_port(in_port=port.ofport),
])
def test_port_dead(self):
self._test_port_dead()
def test_port_dead_with_port_already_dead(self):
self._test_port_dead(self.mod_agent.DEAD_VLAN_TAG)
def mock_scan_ports(self, vif_port_set=None, registered_ports=None,
updated_ports=None, port_tags_dict=None):
if port_tags_dict is None: # Because empty dicts evaluate as False.
port_tags_dict = {}
with mock.patch.object(self.agent.int_br,
'get_vif_port_set',
return_value=vif_port_set),\
mock.patch.object(self.agent.int_br,
'get_port_tag_dict',
return_value=port_tags_dict):
return self.agent.scan_ports(registered_ports, updated_ports)
def test_scan_ports_returns_current_only_for_unchanged_ports(self):
vif_port_set = set([1, 3])
registered_ports = set([1, 3])
expected = {'current': vif_port_set}
actual = self.mock_scan_ports(vif_port_set, registered_ports)
self.assertEqual(expected, actual)
def test_scan_ports_returns_port_changes(self):
vif_port_set = set([1, 3])
registered_ports = set([1, 2])
expected = dict(current=vif_port_set, added=set([3]), removed=set([2]))
actual = self.mock_scan_ports(vif_port_set, registered_ports)
self.assertEqual(expected, actual)
def _test_scan_ports_with_updated_ports(self, updated_ports):
vif_port_set = set([1, 3, 4])
registered_ports = set([1, 2, 4])
expected = dict(current=vif_port_set, added=set([3]),
removed=set([2]), updated=set([4]))
actual = self.mock_scan_ports(vif_port_set, registered_ports,
updated_ports)
self.assertEqual(expected, actual)
def test_scan_ports_finds_known_updated_ports(self):
self._test_scan_ports_with_updated_ports(set([4]))
def test_scan_ports_ignores_unknown_updated_ports(self):
# the port '5' was not seen on current ports. Hence it has either
# never been wired or already removed and should be ignored
self._test_scan_ports_with_updated_ports(set([4, 5]))
def test_scan_ports_ignores_updated_port_if_removed(self):
vif_port_set = set([1, 3])
registered_ports = set([1, 2])
updated_ports = set([1, 2])
expected = dict(current=vif_port_set, added=set([3]),
removed=set([2]), updated=set([1]))
actual = self.mock_scan_ports(vif_port_set, registered_ports,
updated_ports)
self.assertEqual(expected, actual)
def test_scan_ports_no_vif_changes_returns_updated_port_only(self):
vif_port_set = set([1, 2, 3])
registered_ports = set([1, 2, 3])
updated_ports = set([2])
expected = dict(current=vif_port_set, updated=set([2]))
actual = self.mock_scan_ports(vif_port_set, registered_ports,
updated_ports)
self.assertEqual(expected, actual)
def test_update_ports_returns_changed_vlan(self):
br = self.br_int_cls('br-int')
mac = "ca:fe:de:ad:be:ef"
port = ovs_lib.VifPort(1, 1, 1, mac, br)
lvm = self.mod_agent.LocalVLANMapping(
1, '1', None, 1, {port.vif_id: port})
local_vlan_map = {'1': lvm}
vif_port_set = set([1, 3])
registered_ports = set([1, 2])
port_tags_dict = {1: []}
expected = dict(
added=set([3]), current=vif_port_set,
removed=set([2]), updated=set([1])
)
with mock.patch.dict(self.agent.local_vlan_map, local_vlan_map),\
mock.patch.object(self.agent, 'tun_br', autospec=True):
actual = self.mock_scan_ports(
vif_port_set, registered_ports, port_tags_dict=port_tags_dict)
self.assertEqual(expected, actual)
def test_bind_devices(self):
devices_up = ['tap1']
devices_down = ['tap2']
self.agent.local_vlan_map["net1"] = mock.Mock()
port_details = [
{'network_id': 'net1', 'vif_port': mock.Mock(),
'device': devices_up[0],
'admin_state_up': True},
{'network_id': 'net1', 'vif_port': mock.Mock(),
'device': devices_down[0],
'admin_state_up': False}]
with mock.patch.object(
self.agent.plugin_rpc, 'update_device_list',
return_value={'devices_up': devices_up,
'devices_down': devices_down,
'failed_devices_up': [],
'failed_devices_down': []}) as update_devices, \
mock.patch.object(self.agent,
'int_br') as int_br:
int_br.db_list.return_value = []
self.agent._bind_devices(port_details)
update_devices.assert_called_once_with(mock.ANY, devices_up,
devices_down,
mock.ANY, mock.ANY)
def _mock_treat_devices_added_updated(self, details, port, func_name):
"""Mock treat devices added or updated.
:param details: the details to return for the device
:param port: the port that get_vif_port_by_id should return
:param func_name: the function that should be called
:returns: whether the named function was called
"""
with mock.patch.object(self.agent.plugin_rpc,
'get_devices_details_list_and_failed_devices',
return_value={'devices': [details],
'failed_devices': None}),\
mock.patch.object(self.agent.int_br,
'get_vifs_by_ids',
return_value={details['device']: port}),\
mock.patch.object(self.agent.plugin_rpc, 'update_device_list',
return_value={'devices_up': [],
'devices_down': details,
'failed_devices_up': [],
'failed_devices_down': []}),\
mock.patch.object(self.agent, func_name) as func:
skip_devs, need_bound_devices = (
self.agent.treat_devices_added_or_updated([{}], False))
# The function should not raise
self.assertFalse(skip_devs)
return func.called
def test_treat_devices_added_updated_ignores_invalid_ofport(self):
port = mock.Mock()
port.ofport = -1
self.assertFalse(self._mock_treat_devices_added_updated(
mock.MagicMock(), port, 'port_dead'))
def test_treat_devices_added_updated_marks_unknown_port_as_dead(self):
port = mock.Mock()
port.ofport = 1
self.assertTrue(self._mock_treat_devices_added_updated(
mock.MagicMock(), port, 'port_dead'))
def test_treat_devices_added_does_not_process_missing_port(self):
with mock.patch.object(
self.agent.plugin_rpc,
'get_devices_details_list_and_failed_devices') as get_dev_fn,\
mock.patch.object(self.agent.int_br,
'get_vif_port_by_id',
return_value=None):
self.assertFalse(get_dev_fn.called)
def test_treat_devices_added_updated_updates_known_port(self):
details = mock.MagicMock()
details.__contains__.side_effect = lambda x: True
self.assertTrue(self._mock_treat_devices_added_updated(
details, mock.Mock(), 'treat_vif_port'))
def test_treat_devices_added_updated_skips_if_port_not_found(self):
dev_mock = mock.MagicMock()
dev_mock.__getitem__.return_value = 'the_skipped_one'
with mock.patch.object(self.agent.plugin_rpc,
'get_devices_details_list_and_failed_devices',
return_value={'devices': [dev_mock],
'failed_devices': None}),\
mock.patch.object(self.agent.int_br,
'get_vifs_by_ids',
return_value={}),\
mock.patch.object(self.agent,
'treat_vif_port') as treat_vif_port:
skip_devs = self.agent.treat_devices_added_or_updated([{}], False)
# The function should return False for resync and no device
# processed
self.assertEqual((['the_skipped_one'], []), skip_devs)
self.assertFalse(treat_vif_port.called)
def test_treat_devices_added_updated_put_port_down(self):
fake_details_dict = {'admin_state_up': False,
'port_id': 'xxx',
'device': 'xxx',
'network_id': 'yyy',
'physical_network': 'foo',
'segmentation_id': 'bar',
'network_type': 'baz',
'fixed_ips': [{'subnet_id': 'my-subnet-uuid',
'ip_address': '1.1.1.1'}],
'device_owner': 'compute:None'
}
with mock.patch.object(self.agent.plugin_rpc,
'get_devices_details_list_and_failed_devices',
return_value={'devices': [fake_details_dict],
'failed_devices': None}),\
mock.patch.object(self.agent.int_br,
'get_vifs_by_ids',
return_value={'xxx': mock.MagicMock()}),\
mock.patch.object(self.agent,
'treat_vif_port') as treat_vif_port:
skip_devs, need_bound_devices = (
self.agent.treat_devices_added_or_updated([{}], False))
# The function should return False for resync
self.assertFalse(skip_devs)
self.assertTrue(treat_vif_port.called)
def _mock_treat_devices_removed(self, port_exists):
details = dict(exists=port_exists)
with mock.patch.object(self.agent.plugin_rpc,
'update_device_list',
return_value={'devices_up': [],
'devices_down': details,
'failed_devices_up': [],
'failed_devices_down': []}):
with mock.patch.object(self.agent, 'port_unbound') as port_unbound:
self.assertFalse(self.agent.treat_devices_removed([{}]))
self.assertTrue(port_unbound.called)
def test_treat_devices_removed_unbinds_port(self):
self._mock_treat_devices_removed(True)
def test_treat_devices_removed_ignores_missing_port(self):
self._mock_treat_devices_removed(False)
def test_bind_port_with_missing_network(self):
self.agent._bind_devices([{'network_id': 'non-existent'}])
def _test_process_network_ports(self, port_info):
with mock.patch.object(self.agent.sg_agent,
"setup_port_filters") as setup_port_filters,\
mock.patch.object(
self.agent,
"treat_devices_added_or_updated",
return_value=([], [])) as device_added_updated,\
mock.patch.object(self.agent.int_br, "db_list",
return_value=[]),\
mock.patch.object(self.agent,
"treat_devices_removed",
return_value=False) as device_removed:
self.assertFalse(self.agent.process_network_ports(port_info,
False))
setup_port_filters.assert_called_once_with(
port_info.get('added', set()),
port_info.get('updated', set()))
devices_added_updated = (port_info.get('added', set()) |
port_info.get('updated', set()))
if devices_added_updated:
device_added_updated.assert_called_once_with(
devices_added_updated, False)
if port_info.get('removed', set()):
device_removed.assert_called_once_with(port_info['removed'])
def test_process_network_ports(self):
self._test_process_network_ports(
{'current': set(['tap0']),
'removed': set(['eth0']),
'added': set(['eth1'])})
def test_process_network_port_with_updated_ports(self):
self._test_process_network_ports(
{'current': set(['tap0', 'tap1']),
'updated': set(['tap1', 'eth1']),
'removed': set(['eth0']),
'added': set(['eth1'])})
def test_process_network_port_with_empty_port(self):
self._test_process_network_ports({})
def test_report_state(self):
with mock.patch.object(self.agent.state_rpc,
"report_state") as report_st:
self.agent.int_br_device_count = 5
self.agent._report_state()
report_st.assert_called_with(self.agent.context,
self.agent.agent_state, True)
self.assertNotIn("start_flag", self.agent.agent_state)
self.assertFalse(self.agent.use_call)
self.assertEqual(
self.agent.agent_state["configurations"]["devices"],
self.agent.int_br_device_count
)
self.agent._report_state()
report_st.assert_called_with(self.agent.context,
self.agent.agent_state, False)
def test_report_state_fail(self):
with mock.patch.object(self.agent.state_rpc,
"report_state") as report_st:
report_st.side_effect = Exception()
self.agent._report_state()
report_st.assert_called_with(self.agent.context,
self.agent.agent_state, True)
self.agent._report_state()
report_st.assert_called_with(self.agent.context,
self.agent.agent_state, True)
def test_port_update(self):
port = {"id": "123",
"network_id": "124",
"admin_state_up": False}
self.agent.port_update("unused_context",
port=port,
network_type="vlan",
segmentation_id="1",
physical_network="physnet")
self.assertEqual(set(['123']), self.agent.updated_ports)
def test_port_delete(self):
vif = FakeVif()
with mock.patch.object(self.agent, 'int_br') as int_br:
int_br.get_vif_by_port_id.return_value = vif.port_name
int_br.get_vif_port_by_id.return_value = vif
self.agent.port_delete("unused_context",
port_id='id')
self.agent.process_deleted_ports(port_info={})
# the main things we care about are that it gets put in the
# dead vlan and gets blocked
int_br.set_db_attribute.assert_any_call(
'Port', vif.port_name, 'tag', self.mod_agent.DEAD_VLAN_TAG,
log_errors=False)
int_br.drop_port.assert_called_once_with(in_port=vif.ofport)
def test_port_delete_removed_port(self):
with mock.patch.object(self.agent, 'int_br') as int_br:
self.agent.port_delete("unused_context",
port_id='id')
# if it was removed from the bridge, we shouldn't be processing it
self.agent.process_deleted_ports(port_info={'removed': {'id', }})
self.assertFalse(int_br.set_db_attribute.called)
self.assertFalse(int_br.drop_port.called)
def test_setup_physical_bridges(self):
with mock.patch.object(ip_lib, "device_exists") as devex_fn,\
mock.patch.object(sys, "exit"),\
mock.patch.object(utils, "execute"),\
mock.patch.object(self.agent, 'br_phys_cls') as phys_br_cls,\
mock.patch.object(self.agent, 'int_br') as int_br:
devex_fn.return_value = True
parent = mock.MagicMock()
phys_br = phys_br_cls()
parent.attach_mock(phys_br_cls, 'phys_br_cls')
parent.attach_mock(phys_br, 'phys_br')
parent.attach_mock(int_br, 'int_br')
phys_br.add_patch_port.return_value = "phy_ofport"
int_br.add_patch_port.return_value = "int_ofport"
self.agent.setup_physical_bridges({"physnet1": "br-eth"})
expected_calls = [
mock.call.phys_br_cls('br-eth'),
mock.call.phys_br.setup_controllers(mock.ANY),
mock.call.phys_br.setup_default_table(),
mock.call.int_br.delete_port('int-br-eth'),
mock.call.phys_br.delete_port('phy-br-eth'),
mock.call.int_br.add_patch_port('int-br-eth',
constants.NONEXISTENT_PEER),
mock.call.phys_br.add_patch_port('phy-br-eth',
constants.NONEXISTENT_PEER),
mock.call.int_br.drop_port(in_port='int_ofport'),
mock.call.phys_br.drop_port(in_port='phy_ofport'),
mock.call.int_br.set_db_attribute('Interface', 'int-br-eth',
'options:peer',
'phy-br-eth'),
mock.call.phys_br.set_db_attribute('Interface', 'phy-br-eth',
'options:peer',
'int-br-eth'),
]
parent.assert_has_calls(expected_calls)
self.assertEqual(self.agent.int_ofports["physnet1"],
"int_ofport")
self.assertEqual(self.agent.phys_ofports["physnet1"],
"phy_ofport")
def test_setup_physical_bridges_using_veth_interconnection(self):
self.agent.use_veth_interconnection = True
with mock.patch.object(ip_lib, "device_exists") as devex_fn,\
mock.patch.object(sys, "exit"),\
mock.patch.object(utils, "execute") as utilsexec_fn,\
mock.patch.object(self.agent, 'br_phys_cls') as phys_br_cls,\
mock.patch.object(self.agent, 'int_br') as int_br,\
mock.patch.object(ip_lib.IPWrapper, "add_veth") as addveth_fn,\
mock.patch.object(ip_lib.IpLinkCommand,
"delete") as linkdel_fn,\
mock.patch.object(ip_lib.IpLinkCommand, "set_up"),\
mock.patch.object(ip_lib.IpLinkCommand, "set_mtu"),\
mock.patch.object(ovs_lib.BaseOVS, "get_bridges") as get_br_fn:
devex_fn.return_value = True
parent = mock.MagicMock()
parent.attach_mock(utilsexec_fn, 'utils_execute')
parent.attach_mock(linkdel_fn, 'link_delete')
parent.attach_mock(addveth_fn, 'add_veth')
addveth_fn.return_value = (ip_lib.IPDevice("int-br-eth1"),
ip_lib.IPDevice("phy-br-eth1"))
phys_br = phys_br_cls()
phys_br.add_port.return_value = "phys_veth_ofport"
int_br.add_port.return_value = "int_veth_ofport"
get_br_fn.return_value = ["br-eth"]
self.agent.setup_physical_bridges({"physnet1": "br-eth"})
expected_calls = [mock.call.link_delete(),
mock.call.utils_execute(['udevadm',
'settle',
'--timeout=10']),
mock.call.add_veth('int-br-eth',
'phy-br-eth')]
parent.assert_has_calls(expected_calls, any_order=False)
self.assertEqual(self.agent.int_ofports["physnet1"],
"int_veth_ofport")
self.assertEqual(self.agent.phys_ofports["physnet1"],
"phys_veth_ofport")
def test_get_peer_name(self):
bridge1 = "A_REALLY_LONG_BRIDGE_NAME1"
bridge2 = "A_REALLY_LONG_BRIDGE_NAME2"
self.agent.use_veth_interconnection = True
self.assertEqual(len(self.agent.get_peer_name('int-', bridge1)),
n_const.DEVICE_NAME_MAX_LEN)
self.assertEqual(len(self.agent.get_peer_name('int-', bridge2)),
n_const.DEVICE_NAME_MAX_LEN)
self.assertNotEqual(self.agent.get_peer_name('int-', bridge1),
self.agent.get_peer_name('int-', bridge2))
def test_setup_tunnel_br(self):
self.tun_br = mock.Mock()
with mock.patch.object(self.agent.int_br,
"add_patch_port",
return_value=1) as intbr_patch_fn,\
mock.patch.object(self.agent,
'tun_br',
autospec=True) as tun_br,\
mock.patch.object(sys, "exit"):
tun_br.add_patch_port.return_value = 2
self.agent.reset_tunnel_br(None)
self.agent.setup_tunnel_br()
self.assertTrue(intbr_patch_fn.called)
def test_setup_tunnel_port(self):
self.agent.tun_br = mock.Mock()
self.agent.l2_pop = False
self.agent.udp_vxlan_port = 8472
self.agent.tun_br_ofports['vxlan'] = {}
with mock.patch.object(self.agent.tun_br,
"add_tunnel_port",
return_value='6') as add_tun_port_fn,\
mock.patch.object(self.agent.tun_br, "add_flow"):
self.agent._setup_tunnel_port(self.agent.tun_br, 'portname',
'1.2.3.4', 'vxlan')
self.assertTrue(add_tun_port_fn.called)
def test_port_unbound(self):
with mock.patch.object(self.agent, "reclaim_local_vlan") as reclvl_fn:
self.agent.enable_tunneling = True
lvm = mock.Mock()
lvm.network_type = "gre"
lvm.vif_ports = {"vif1": mock.Mock()}
self.agent.local_vlan_map["netuid12345"] = lvm
self.agent.port_unbound("vif1", "netuid12345")
self.assertTrue(reclvl_fn.called)
lvm.vif_ports = {}
self.agent.port_unbound("vif1", "netuid12345")
self.assertEqual(reclvl_fn.call_count, 2)
lvm.vif_ports = {"vif1": mock.Mock()}
self.agent.port_unbound("vif3", "netuid12345")
self.assertEqual(reclvl_fn.call_count, 2)
def _prepare_l2_pop_ofports(self):
lvm1 = mock.Mock()
lvm1.network_type = 'gre'
lvm1.vlan = 'vlan1'
lvm1.segmentation_id = 'seg1'
lvm1.tun_ofports = set(['1'])
lvm2 = mock.Mock()
lvm2.network_type = 'gre'
lvm2.vlan = 'vlan2'
lvm2.segmentation_id = 'seg2'
lvm2.tun_ofports = set(['1', '2'])
self.agent.local_vlan_map = {'net1': lvm1, 'net2': lvm2}
self.agent.tun_br_ofports = {'gre':
{'1.1.1.1': '1', '2.2.2.2': '2'}}
self.agent.arp_responder_enabled = True
def test_fdb_ignore_network(self):
self._prepare_l2_pop_ofports()
fdb_entry = {'net3': {}}
with mock.patch.object(self.agent.tun_br, 'add_flow') as add_flow_fn,\
mock.patch.object(self.agent.tun_br,
'delete_flows') as del_flow_fn,\
mock.patch.object(self.agent,
'_setup_tunnel_port') as add_tun_fn,\
mock.patch.object(self.agent,
'cleanup_tunnel_port') as clean_tun_fn:
self.agent.fdb_add(None, fdb_entry)
self.assertFalse(add_flow_fn.called)
self.assertFalse(add_tun_fn.called)
self.agent.fdb_remove(None, fdb_entry)
self.assertFalse(del_flow_fn.called)
self.assertFalse(clean_tun_fn.called)
def test_fdb_ignore_self(self):
self._prepare_l2_pop_ofports()
self.agent.local_ip = 'agent_ip'
fdb_entry = {'net2':
{'network_type': 'gre',
'segment_id': 'tun2',
'ports':
{'agent_ip':
[l2pop_rpc.PortInfo(FAKE_MAC, FAKE_IP1),
n_const.FLOODING_ENTRY]}}}
with mock.patch.object(self.agent.tun_br,
"deferred") as defer_fn:
self.agent.fdb_add(None, fdb_entry)
self.assertFalse(defer_fn.called)
self.agent.fdb_remove(None, fdb_entry)
self.assertFalse(defer_fn.called)
def test_fdb_add_flows(self):
self._prepare_l2_pop_ofports()
fdb_entry = {'net1':
{'network_type': 'gre',
'segment_id': 'tun1',
'ports':
{'2.2.2.2':
[l2pop_rpc.PortInfo(FAKE_MAC, FAKE_IP1),
n_const.FLOODING_ENTRY]}}}
with mock.patch.object(self.agent, 'tun_br', autospec=True) as tun_br,\
mock.patch.object(self.agent,
'_setup_tunnel_port',
autospec=True) as add_tun_fn:
self.agent.fdb_add(None, fdb_entry)
self.assertFalse(add_tun_fn.called)
deferred_br_call = mock.call.deferred().__enter__()
expected_calls = [
deferred_br_call.install_arp_responder('vlan1', FAKE_IP1,
FAKE_MAC),
deferred_br_call.install_unicast_to_tun('vlan1', 'seg1', '2',
FAKE_MAC),
deferred_br_call.install_flood_to_tun('vlan1', 'seg1',
set(['1', '2'])),
]
tun_br.assert_has_calls(expected_calls)
def test_fdb_del_flows(self):
self._prepare_l2_pop_ofports()
fdb_entry = {'net2':
{'network_type': 'gre',
'segment_id': 'tun2',
'ports':
{'2.2.2.2':
[l2pop_rpc.PortInfo(FAKE_MAC, FAKE_IP1),
n_const.FLOODING_ENTRY]}}}
with mock.patch.object(self.agent, 'tun_br', autospec=True) as br_tun:
self.agent.fdb_remove(None, fdb_entry)
deferred_br_call = mock.call.deferred().__enter__()
expected_calls = [
mock.call.deferred(),
mock.call.deferred().__enter__(),
deferred_br_call.delete_arp_responder('vlan2', FAKE_IP1),
deferred_br_call.delete_unicast_to_tun('vlan2', FAKE_MAC),
deferred_br_call.install_flood_to_tun('vlan2', 'seg2',
set(['1'])),
deferred_br_call.delete_port('gre-02020202'),
deferred_br_call.cleanup_tunnel_port('2'),
mock.call.deferred().__exit__(None, None, None),
]
br_tun.assert_has_calls(expected_calls)
def test_fdb_add_port(self):
self._prepare_l2_pop_ofports()
fdb_entry = {'net1':
{'network_type': 'gre',
'segment_id': 'tun1',
'ports': {'1.1.1.1': [l2pop_rpc.PortInfo(FAKE_MAC,
FAKE_IP1)]}}}
with mock.patch.object(self.agent, 'tun_br', autospec=True) as tun_br,\
mock.patch.object(self.agent,
'_setup_tunnel_port') as add_tun_fn:
self.agent.fdb_add(None, fdb_entry)
self.assertFalse(add_tun_fn.called)
fdb_entry['net1']['ports']['10.10.10.10'] = [
l2pop_rpc.PortInfo(FAKE_MAC, FAKE_IP1)]
self.agent.fdb_add(None, fdb_entry)
deferred_br = tun_br.deferred().__enter__()
add_tun_fn.assert_called_with(
deferred_br, 'gre-0a0a0a0a', '10.10.10.10', 'gre')
def test_fdb_del_port(self):
self._prepare_l2_pop_ofports()
fdb_entry = {'net2':
{'network_type': 'gre',
'segment_id': 'tun2',
'ports': {'2.2.2.2': [n_const.FLOODING_ENTRY]}}}
with mock.patch.object(self.agent.tun_br, 'deferred') as defer_fn,\
mock.patch.object(self.agent.tun_br,
'delete_port') as delete_port_fn:
self.agent.fdb_remove(None, fdb_entry)
deferred_br = defer_fn().__enter__()
deferred_br.delete_port.assert_called_once_with('gre-02020202')
self.assertFalse(delete_port_fn.called)
def test_fdb_update_chg_ip(self):
self._prepare_l2_pop_ofports()
fdb_entries = {'chg_ip':
{'net1':
{'agent_ip':
{'before': [l2pop_rpc.PortInfo(FAKE_MAC, FAKE_IP1)],
'after': [l2pop_rpc.PortInfo(FAKE_MAC, FAKE_IP2)]}}}}
with mock.patch.object(self.agent.tun_br, 'deferred') as deferred_fn:
self.agent.fdb_update(None, fdb_entries)
deferred_br = deferred_fn().__enter__()
deferred_br.assert_has_calls([
mock.call.install_arp_responder('vlan1', FAKE_IP2, FAKE_MAC),
mock.call.delete_arp_responder('vlan1', FAKE_IP1)
])
def test_del_fdb_flow_idempotency(self):
lvm = mock.Mock()
lvm.network_type = 'gre'
lvm.vlan = 'vlan1'
lvm.segmentation_id = 'seg1'
lvm.tun_ofports = set(['1', '2'])
with mock.patch.object(self.agent.tun_br, 'mod_flow') as mod_flow_fn,\
mock.patch.object(self.agent.tun_br,
'delete_flows') as delete_flows_fn:
self.agent.del_fdb_flow(self.agent.tun_br, n_const.FLOODING_ENTRY,
'1.1.1.1', lvm, '3')
self.assertFalse(mod_flow_fn.called)
self.assertFalse(delete_flows_fn.called)
def test_recl_lv_port_to_preserve(self):
self._prepare_l2_pop_ofports()
self.agent.l2_pop = True
self.agent.enable_tunneling = True
with mock.patch.object(self.agent, 'tun_br', autospec=True) as tun_br:
self.agent.reclaim_local_vlan('net1')
self.assertFalse(tun_br.cleanup_tunnel_port.called)
def test_recl_lv_port_to_remove(self):
self._prepare_l2_pop_ofports()
self.agent.l2_pop = True
self.agent.enable_tunneling = True
with mock.patch.object(self.agent, 'tun_br', autospec=True) as tun_br:
self.agent.reclaim_local_vlan('net2')
tun_br.delete_port.assert_called_once_with('gre-02020202')
def test_daemon_loop_uses_polling_manager(self):
with mock.patch(
'neutron.agent.common.polling.get_polling_manager') as mock_get_pm:
with mock.patch.object(self.agent, 'rpc_loop') as mock_loop:
self.agent.daemon_loop()
mock_get_pm.assert_called_with(True,
constants.DEFAULT_OVSDBMON_RESPAWN)
mock_loop.assert_called_once_with(polling_manager=mock.ANY)
def test_setup_tunnel_port_invalid_ofport(self):
with mock.patch.object(
self.agent.tun_br,
'add_tunnel_port',
return_value=ovs_lib.INVALID_OFPORT) as add_tunnel_port_fn,\
mock.patch.object(self.mod_agent.LOG, 'error') as log_error_fn:
ofport = self.agent._setup_tunnel_port(
self.agent.tun_br, 'gre-1', 'remote_ip', p_const.TYPE_GRE)
add_tunnel_port_fn.assert_called_once_with(
'gre-1', 'remote_ip', self.agent.local_ip, p_const.TYPE_GRE,
self.agent.vxlan_udp_port, self.agent.dont_fragment)
log_error_fn.assert_called_once_with(
_("Failed to set-up %(type)s tunnel port to %(ip)s"),
{'type': p_const.TYPE_GRE, 'ip': 'remote_ip'})
self.assertEqual(ofport, 0)
def test_setup_tunnel_port_error_negative_df_disabled(self):
with mock.patch.object(
self.agent.tun_br,
'add_tunnel_port',
return_value=ovs_lib.INVALID_OFPORT) as add_tunnel_port_fn,\
mock.patch.object(self.mod_agent.LOG, 'error') as log_error_fn:
self.agent.dont_fragment = False
ofport = self.agent._setup_tunnel_port(
self.agent.tun_br, 'gre-1', 'remote_ip', p_const.TYPE_GRE)
add_tunnel_port_fn.assert_called_once_with(
'gre-1', 'remote_ip', self.agent.local_ip, p_const.TYPE_GRE,
self.agent.vxlan_udp_port, self.agent.dont_fragment)
log_error_fn.assert_called_once_with(
_("Failed to set-up %(type)s tunnel port to %(ip)s"),
{'type': p_const.TYPE_GRE, 'ip': 'remote_ip'})
self.assertEqual(ofport, 0)
def test_tunnel_sync_with_ml2_plugin(self):
fake_tunnel_details = {'tunnels': [{'ip_address': '100.101.31.15'}]}
with mock.patch.object(self.agent.plugin_rpc,
'tunnel_sync',
return_value=fake_tunnel_details),\
mock.patch.object(
self.agent,
'_setup_tunnel_port') as _setup_tunnel_port_fn:
self.agent.tunnel_types = ['vxlan']
self.agent.tunnel_sync()
expected_calls = [mock.call(self.agent.tun_br, 'vxlan-64651f0f',
'100.101.31.15', 'vxlan')]
_setup_tunnel_port_fn.assert_has_calls(expected_calls)
def test_tunnel_sync_invalid_ip_address(self):
fake_tunnel_details = {'tunnels': [{'ip_address': '300.300.300.300'},
{'ip_address': '100.100.100.100'}]}
with mock.patch.object(self.agent.plugin_rpc,
'tunnel_sync',
return_value=fake_tunnel_details),\
mock.patch.object(
self.agent,
'_setup_tunnel_port') as _setup_tunnel_port_fn:
self.agent.tunnel_types = ['vxlan']
self.agent.tunnel_sync()
_setup_tunnel_port_fn.assert_called_once_with(self.agent.tun_br,
'vxlan-64646464',
'100.100.100.100',
'vxlan')
def test_tunnel_update(self):
kwargs = {'tunnel_ip': '10.10.10.10',
'tunnel_type': 'gre'}
self.agent._setup_tunnel_port = mock.Mock()
self.agent.enable_tunneling = True
self.agent.tunnel_types = ['gre']
self.agent.l2_pop = False
self.agent.tunnel_update(context=None, **kwargs)
expected_calls = [
mock.call(self.agent.tun_br, 'gre-0a0a0a0a', '10.10.10.10', 'gre')]
self.agent._setup_tunnel_port.assert_has_calls(expected_calls)
def test_tunnel_delete(self):
kwargs = {'tunnel_ip': '10.10.10.10',
'tunnel_type': 'gre'}
self.agent.enable_tunneling = True
self.agent.tunnel_types = ['gre']
self.agent.tun_br_ofports = {'gre': {'10.10.10.10': '1'}}
with mock.patch.object(
self.agent, 'cleanup_tunnel_port'
) as clean_tun_fn:
self.agent.tunnel_delete(context=None, **kwargs)
self.assertTrue(clean_tun_fn.called)
def _test_ovs_status(self, *args):
reply2 = {'current': set(['tap0']),
'added': set(['tap2']),
'removed': set([])}
reply3 = {'current': set(['tap2']),
'added': set([]),
'removed': set(['tap0'])}
with mock.patch.object(async_process.AsyncProcess, "_spawn"),\
mock.patch.object(log.KeywordArgumentAdapter,
'exception') as log_exception,\
mock.patch.object(self.mod_agent.OVSNeutronAgent,
'scan_ports') as scan_ports,\
mock.patch.object(
self.mod_agent.OVSNeutronAgent,
'process_network_ports') as process_network_ports,\
mock.patch.object(self.mod_agent.OVSNeutronAgent,
'check_ovs_status') as check_ovs_status,\
mock.patch.object(self.mod_agent.OVSNeutronAgent,
'setup_integration_br') as setup_int_br,\
mock.patch.object(self.mod_agent.OVSNeutronAgent,
'setup_physical_bridges') as setup_phys_br,\
mock.patch.object(time, 'sleep'),\
mock.patch.object(self.mod_agent.OVSNeutronAgent,
'update_stale_ofport_rules') as update_stale:
log_exception.side_effect = Exception(
'Fake exception to get out of the loop')
scan_ports.side_effect = [reply2, reply3]
process_network_ports.side_effect = [
False, Exception('Fake exception to get out of the loop')]
check_ovs_status.side_effect = args
try:
self.agent.daemon_loop()
except Exception:
pass
scan_ports.assert_has_calls([
mock.call(set(), set()),
mock.call(set(), set())
])
process_network_ports.assert_has_calls([
mock.call(reply2, False),
mock.call(reply3, True)
])
self.assertTrue(update_stale.called)
# Verify the OVS restart we triggered in the loop
# re-setup the bridges
setup_int_br.assert_has_calls([mock.call()])
setup_phys_br.assert_has_calls([mock.call({})])
def test_ovs_status(self):
self._test_ovs_status(constants.OVS_NORMAL,
constants.OVS_DEAD,
constants.OVS_RESTARTED)
# OVS will not DEAD in some exception, like DBConnectionError.
self._test_ovs_status(constants.OVS_NORMAL,
constants.OVS_RESTARTED)
def test_set_rpc_timeout(self):
self.agent._handle_sigterm(None, None)
for rpc_client in (self.agent.plugin_rpc.client,
self.agent.sg_plugin_rpc.client,
self.agent.dvr_plugin_rpc.client,
self.agent.state_rpc.client):
self.assertEqual(10, rpc_client.timeout)
def test_set_rpc_timeout_no_value(self):
self.agent.quitting_rpc_timeout = None
with mock.patch.object(self.agent, 'set_rpc_timeout') as mock_set_rpc:
self.agent._handle_sigterm(None, None)
self.assertFalse(mock_set_rpc.called)
def test_arp_spoofing_disabled(self):
self.agent.prevent_arp_spoofing = False
# all of this is required just to get to the part of
# treat_devices_added_or_updated that checks the prevent_arp_spoofing
# flag
self.agent.int_br = mock.create_autospec(self.agent.int_br)
self.agent.treat_vif_port = mock.Mock()
self.agent.get_vif_port_by_id = mock.Mock(return_value=FakeVif())
self.agent.plugin_rpc = mock.Mock()
plist = [{a: a for a in ('port_id', 'network_id', 'network_type',
'physical_network', 'segmentation_id',
'admin_state_up', 'fixed_ips', 'device',
'device_owner')}]
(self.agent.plugin_rpc.get_devices_details_list_and_failed_devices.
return_value) = {'devices': plist, 'failed_devices': []}
self.agent.plugin_rpc.update_device_list.return_value = {
'devices_up': plist, 'devices_down': [], 'failed_devices_up': [],
'failed_devices_down': []}
self.agent.setup_arp_spoofing_protection = mock.Mock()
self.agent.treat_devices_added_or_updated([], False)
self.assertFalse(self.agent.setup_arp_spoofing_protection.called)
def test_arp_spoofing_port_security_disabled(self):
int_br = mock.create_autospec(self.agent.int_br)
self.agent.setup_arp_spoofing_protection(
int_br, FakeVif(), {'port_security_enabled': False})
self.assertTrue(int_br.delete_arp_spoofing_protection.called)
self.assertFalse(int_br.install_arp_spoofing_protection.called)
def test_arp_spoofing_basic_rule_setup(self):
vif = FakeVif()
fake_details = {'fixed_ips': []}
self.agent.prevent_arp_spoofing = True
int_br = mock.create_autospec(self.agent.int_br)
self.agent.setup_arp_spoofing_protection(int_br, vif, fake_details)
self.assertEqual(
[mock.call(port=vif.ofport)],
int_br.delete_arp_spoofing_protection.mock_calls)
self.assertEqual(
[mock.call(ip_addresses=set(), port=vif.ofport)],
int_br.install_arp_spoofing_protection.mock_calls)
def test_arp_spoofing_fixed_and_allowed_addresses(self):
vif = FakeVif()
fake_details = {
'fixed_ips': [{'ip_address': '192.168.44.100'},
{'ip_address': '192.168.44.101'}],
'allowed_address_pairs': [{'ip_address': '192.168.44.102/32'},
{'ip_address': '192.168.44.103/32'}]
}
self.agent.prevent_arp_spoofing = True
int_br = mock.create_autospec(self.agent.int_br)
self.agent.setup_arp_spoofing_protection(int_br, vif, fake_details)
# make sure all addresses are allowed
addresses = {'192.168.44.100', '192.168.44.101', '192.168.44.102/32',
'192.168.44.103/32'}
self.assertEqual(
[mock.call(port=vif.ofport, ip_addresses=addresses)],
int_br.install_arp_spoofing_protection.mock_calls)
def test__get_ofport_moves(self):
previous = {'port1': 1, 'port2': 2}
current = {'port1': 5, 'port2': 2}
# we expect it to tell us port1 moved
expected = ['port1']
self.assertEqual(expected,
self.agent._get_ofport_moves(current, previous))
def test_update_stale_ofport_rules_clears_old(self):
self.agent.prevent_arp_spoofing = True
self.agent.vifname_to_ofport_map = {'port1': 1, 'port2': 2}
self.agent.int_br = mock.Mock()
# simulate port1 was removed
newmap = {'port2': 2}
self.agent.int_br.get_vif_port_to_ofport_map.return_value = newmap
self.agent.update_stale_ofport_rules()
# rules matching port 1 should have been deleted
self.assertEqual(
[mock.call(port=1)],
self.agent.int_br.delete_arp_spoofing_protection.mock_calls)
# make sure the state was updated with the new map
self.assertEqual(self.agent.vifname_to_ofport_map, newmap)
def test_update_stale_ofport_rules_treats_moved(self):
self.agent.prevent_arp_spoofing = True
self.agent.vifname_to_ofport_map = {'port1': 1, 'port2': 2}
self.agent.treat_devices_added_or_updated = mock.Mock()
self.agent.int_br = mock.Mock()
# simulate port1 was moved
newmap = {'port2': 2, 'port1': 90}
self.agent.int_br.get_vif_port_to_ofport_map.return_value = newmap
self.agent.update_stale_ofport_rules()
self.agent.treat_devices_added_or_updated.assert_called_with(
['port1'], ovs_restarted=False)
def test__setup_tunnel_port_while_new_mapping_is_added(self):
"""
Test that _setup_tunnel_port doesn't fail if new vlan mapping is
added in a different coroutine while iterating over existing mappings.
See bug 1449944 for more info.
"""
def add_new_vlan_mapping(*args, **kwargs):
self.agent.local_vlan_map['bar'] = (
self.mod_agent.LocalVLANMapping(1, 2, 3, 4))
bridge = mock.Mock()
tunnel_type = 'vxlan'
self.agent.tun_br_ofports = {tunnel_type: dict()}
self.agent.l2_pop = False
self.agent.local_vlan_map = {
'foo': self.mod_agent.LocalVLANMapping(4, tunnel_type, 2, 1)}
bridge.install_flood_to_tun.side_effect = add_new_vlan_mapping
self.agent._setup_tunnel_port(bridge, 1, 2, tunnel_type=tunnel_type)
self.assertIn('bar', self.agent.local_vlan_map)
class TestOvsNeutronAgentOFCtl(TestOvsNeutronAgent,
ovs_test_base.OVSOFCtlTestBase):
pass
class AncillaryBridgesTest(object):
def setUp(self):
super(AncillaryBridgesTest, self).setUp()
notifier_p = mock.patch(NOTIFIER)
notifier_cls = notifier_p.start()
self.notifier = mock.Mock()
notifier_cls.return_value = self.notifier
cfg.CONF.set_default('firewall_driver',
'neutron.agent.firewall.NoopFirewallDriver',
group='SECURITYGROUP')
cfg.CONF.set_override('report_interval', 0, 'AGENT')
self.kwargs = self.mod_agent.create_agent_config_map(cfg.CONF)
def _test_ancillary_bridges(self, bridges, ancillary):
device_ids = ancillary[:]
def pullup_side_effect(*args):
# Check that the device_id exists, if it does return it
# if it does not return None
try:
device_ids.remove(args[0])
return args[0]
except Exception:
return None
with mock.patch.object(self.mod_agent.OVSNeutronAgent,
'setup_integration_br'),\
mock.patch('neutron.agent.linux.utils.get_interface_mac',
return_value='00:00:00:00:00:01'),\
mock.patch('neutron.agent.common.ovs_lib.BaseOVS.get_bridges',
return_value=bridges),\
mock.patch('neutron.agent.common.ovs_lib.BaseOVS.'
'get_bridge_external_bridge_id',
side_effect=pullup_side_effect),\
mock.patch(
'neutron.agent.common.ovs_lib.OVSBridge.' 'db_list',
return_value=[]),\
mock.patch(
'neutron.agent.common.ovs_lib.OVSBridge.' 'get_vif_ports',
return_value=[]):
self.agent = self.mod_agent.OVSNeutronAgent(self._bridge_classes(),
**self.kwargs)
self.assertEqual(len(ancillary), len(self.agent.ancillary_brs))
if ancillary:
bridges = [br.br_name for br in self.agent.ancillary_brs]
for br in ancillary:
self.assertIn(br, bridges)
def test_ancillary_bridges_single(self):
bridges = ['br-int', 'br-ex']
self._test_ancillary_bridges(bridges, ['br-ex'])
def test_ancillary_bridges_none(self):
bridges = ['br-int']
self._test_ancillary_bridges(bridges, [])
def test_ancillary_bridges_multiple(self):
bridges = ['br-int', 'br-ex1', 'br-ex2']
self._test_ancillary_bridges(bridges, ['br-ex1', 'br-ex2'])
def mock_scan_ancillary_ports(self, vif_port_set=None,
registered_ports=None):
bridges = ['br-int', 'br-ex']
ancillary = ['br-ex']
with mock.patch.object(self.mod_agent.OVSNeutronAgent,
'setup_integration_br'), \
mock.patch.object(self.mod_agent.OVSNeutronAgent,
'_restore_local_vlan_map'), \
mock.patch('neutron.agent.common.ovs_lib.BaseOVS.get_bridges',
return_value=bridges), \
mock.patch('neutron.agent.common.ovs_lib.BaseOVS.'
'get_bridge_external_bridge_id',
side_effect=ancillary), \
mock.patch('neutron.agent.common.ovs_lib.OVSBridge.'
'get_vif_port_set',
return_value=vif_port_set):
self.agent = self.mod_agent.OVSNeutronAgent(self._bridge_classes(),
**self.kwargs)
return self.agent.scan_ancillary_ports(registered_ports)
def test_scan_ancillary_ports_returns_cur_only_for_unchanged_ports(self):
vif_port_set = set([1, 2])
registered_ports = set([1, 2])
expected = dict(current=vif_port_set)
actual = self.mock_scan_ancillary_ports(vif_port_set, registered_ports)
self.assertEqual(expected, actual)
def test_scan_ancillary_ports_returns_port_changes(self):
vif_port_set = set([1, 3])
registered_ports = set([1, 2])
expected = dict(current=vif_port_set, added=set([3]), removed=set([2]))
actual = self.mock_scan_ancillary_ports(vif_port_set, registered_ports)
self.assertEqual(expected, actual)
class AncillaryBridgesTestOFCtl(AncillaryBridgesTest,
ovs_test_base.OVSOFCtlTestBase):
pass
class TestOvsDvrNeutronAgent(object):
def setUp(self):
super(TestOvsDvrNeutronAgent, self).setUp()
notifier_p = mock.patch(NOTIFIER)
notifier_cls = notifier_p.start()
self.notifier = mock.Mock()
notifier_cls.return_value = self.notifier
cfg.CONF.set_default('firewall_driver',
'neutron.agent.firewall.NoopFirewallDriver',
group='SECURITYGROUP')
kwargs = self.mod_agent.create_agent_config_map(cfg.CONF)
with mock.patch.object(self.mod_agent.OVSNeutronAgent,
'setup_integration_br'),\
mock.patch.object(self.mod_agent.OVSNeutronAgent,
'setup_ancillary_bridges',
return_value=[]),\
mock.patch('neutron.agent.linux.utils.get_interface_mac',
return_value='00:00:00:00:00:01'),\
mock.patch(
'neutron.agent.common.ovs_lib.BaseOVS.get_bridges'),\
mock.patch('oslo_service.loopingcall.'
'FixedIntervalLoopingCall',
new=MockFixedIntervalLoopingCall),\
mock.patch(
'neutron.agent.common.ovs_lib.OVSBridge.' 'db_list',
return_value=[]),\
mock.patch(
'neutron.agent.common.ovs_lib.OVSBridge.' 'get_vif_ports',
return_value=[]):
self.agent = self.mod_agent.OVSNeutronAgent(self._bridge_classes(),
**kwargs)
# set back to true because initial report state will succeed due
# to mocked out RPC calls
self.agent.use_call = True
self.agent.tun_br = self.br_tun_cls(br_name='br-tun')
self.agent.sg_agent = mock.Mock()
def _setup_for_dvr_test(self):
self._port = mock.Mock()
self._port.ofport = 10
self._port.vif_id = "1234-5678-90"
self._physical_network = 'physeth1'
self._old_local_vlan = None
self._segmentation_id = 2001
self.agent.enable_distributed_routing = True
self.agent.enable_tunneling = True
self.agent.patch_tun_ofport = 1
self.agent.patch_int_ofport = 2
self.agent.dvr_agent.local_ports = {}
self.agent.local_vlan_map = {}
self.agent.dvr_agent.enable_distributed_routing = True
self.agent.dvr_agent.enable_tunneling = True
self.agent.dvr_agent.patch_tun_ofport = 1
self.agent.dvr_agent.patch_int_ofport = 2
self.agent.dvr_agent.tun_br = mock.Mock()
self.agent.dvr_agent.phys_brs[self._physical_network] = mock.Mock()
self.agent.dvr_agent.bridge_mappings = {self._physical_network:
'br-eth1'}
self.agent.dvr_agent.int_ofports[self._physical_network] = 30
self.agent.dvr_agent.phys_ofports[self._physical_network] = 40
self.agent.dvr_agent.local_dvr_map = {}
self.agent.dvr_agent.registered_dvr_macs = set()
self.agent.dvr_agent.dvr_mac_address = 'aa:22:33:44:55:66'
self._net_uuid = 'my-net-uuid'
self._fixed_ips = [{'subnet_id': 'my-subnet-uuid',
'ip_address': '1.1.1.1'}]
self._compute_port = mock.Mock()
self._compute_port.ofport = 20
self._compute_port.vif_id = "1234-5678-91"
self._compute_fixed_ips = [{'subnet_id': 'my-subnet-uuid',
'ip_address': '1.1.1.3'}]
@staticmethod
def _expected_port_bound(port, lvid, is_dvr=True):
resp = [
mock.call.db_get_val('Port', port.port_name, 'other_config'),
mock.call.set_db_attribute('Port', port.port_name, 'other_config',
mock.ANY),
]
if is_dvr:
resp = [mock.call.get_vifs_by_ids([])] + resp
return resp
def _expected_install_dvr_process(self, lvid, port, ip_version,
gateway_ip, gateway_mac):
if ip_version == 4:
ipvx_calls = [
mock.call.install_dvr_process_ipv4(
vlan_tag=lvid,
gateway_ip=gateway_ip),
]
else:
ipvx_calls = [
mock.call.install_dvr_process_ipv6(
vlan_tag=lvid,
gateway_mac=gateway_mac),
]
return ipvx_calls + [
mock.call.install_dvr_process(
vlan_tag=lvid,
dvr_mac_address=self.agent.dvr_agent.dvr_mac_address,
vif_mac=port.vif_mac,
),
]
def _test_port_bound_for_dvr_on_vlan_network(self, device_owner,
ip_version=4):
self._setup_for_dvr_test()
if ip_version == 4:
gateway_ip = '1.1.1.1'
cidr = '1.1.1.0/24'
else:
gateway_ip = '2001:100::1'
cidr = '2001:100::0/64'
self._port.vif_mac = gateway_mac = 'aa:bb:cc:11:22:33'
self._compute_port.vif_mac = '77:88:99:00:11:22'
physical_network = self._physical_network
segmentation_id = self._segmentation_id
network_type = p_const.TYPE_VLAN
int_br = mock.create_autospec(self.agent.int_br)
tun_br = mock.create_autospec(self.agent.tun_br)
phys_br = mock.create_autospec(self.br_phys_cls('br-phys'))
int_br.set_db_attribute.return_value = True
int_br.db_get_val.return_value = {}
with mock.patch.object(self.agent.dvr_agent.plugin_rpc,
'get_subnet_for_dvr',
return_value={'gateway_ip': gateway_ip,
'cidr': cidr,
'ip_version': ip_version,
'gateway_mac': gateway_mac}),\
mock.patch.object(self.agent.dvr_agent.plugin_rpc,
'get_ports_on_host_by_subnet',
return_value=[]),\
mock.patch.object(self.agent.dvr_agent.int_br,
'get_vif_port_by_id',
return_value=self._port),\
mock.patch.object(self.agent, 'int_br', new=int_br),\
mock.patch.object(self.agent, 'tun_br', new=tun_br),\
mock.patch.dict(self.agent.phys_brs,
{physical_network: phys_br}),\
mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br),\
mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br),\
mock.patch.dict(self.agent.dvr_agent.phys_brs,
{physical_network: phys_br}):
self.agent.port_bound(
self._port, self._net_uuid, network_type,
physical_network, segmentation_id, self._fixed_ips,
n_const.DEVICE_OWNER_DVR_INTERFACE, False)
phy_ofp = self.agent.dvr_agent.phys_ofports[physical_network]
int_ofp = self.agent.dvr_agent.int_ofports[physical_network]
lvid = self.agent.local_vlan_map[self._net_uuid].vlan
expected_on_phys_br = [
mock.call.provision_local_vlan(
port=phy_ofp,
lvid=lvid,
segmentation_id=segmentation_id,
distributed=True,
),
] + self._expected_install_dvr_process(
port=self._port,
lvid=lvid,
ip_version=ip_version,
gateway_ip=gateway_ip,
gateway_mac=gateway_mac)
expected_on_int_br = [
mock.call.provision_local_vlan(
port=int_ofp,
lvid=lvid,
segmentation_id=segmentation_id,
),
] + self._expected_port_bound(self._port, lvid)
self.assertEqual(expected_on_int_br, int_br.mock_calls)
self.assertEqual([], tun_br.mock_calls)
self.assertEqual(expected_on_phys_br, phys_br.mock_calls)
int_br.reset_mock()
tun_br.reset_mock()
phys_br.reset_mock()
self.agent.port_bound(self._compute_port, self._net_uuid,
network_type, physical_network,
segmentation_id,
self._compute_fixed_ips,
device_owner, False)
expected_on_int_br = [
mock.call.install_dvr_to_src_mac(
network_type=network_type,
gateway_mac=gateway_mac,
dst_mac=self._compute_port.vif_mac,
dst_port=self._compute_port.ofport,
vlan_tag=segmentation_id,
),
] + self._expected_port_bound(self._compute_port, lvid, False)
self.assertEqual(expected_on_int_br, int_br.mock_calls)
self.assertFalse([], tun_br.mock_calls)
self.assertFalse([], phys_br.mock_calls)
def _test_port_bound_for_dvr_on_vxlan_network(self, device_owner,
ip_version=4):
self._setup_for_dvr_test()
if ip_version == 4:
gateway_ip = '1.1.1.1'
cidr = '1.1.1.0/24'
else:
gateway_ip = '2001:100::1'
cidr = '2001:100::0/64'
network_type = p_const.TYPE_VXLAN
self._port.vif_mac = gateway_mac = 'aa:bb:cc:11:22:33'
self._compute_port.vif_mac = '77:88:99:00:11:22'
physical_network = self._physical_network
segmentation_id = self._segmentation_id
int_br = mock.create_autospec(self.agent.int_br)
tun_br = mock.create_autospec(self.agent.tun_br)
phys_br = mock.create_autospec(self.br_phys_cls('br-phys'))
int_br.set_db_attribute.return_value = True
int_br.db_get_val.return_value = {}
with mock.patch.object(self.agent.dvr_agent.plugin_rpc,
'get_subnet_for_dvr',
return_value={'gateway_ip': gateway_ip,
'cidr': cidr,
'ip_version': ip_version,
'gateway_mac': gateway_mac}),\
mock.patch.object(self.agent.dvr_agent.plugin_rpc,
'get_ports_on_host_by_subnet',
return_value=[]),\
mock.patch.object(self.agent.dvr_agent.int_br,
'get_vif_port_by_id',
return_value=self._port),\
mock.patch.object(self.agent, 'int_br', new=int_br),\
mock.patch.object(self.agent, 'tun_br', new=tun_br),\
mock.patch.dict(self.agent.phys_brs,
{physical_network: phys_br}),\
mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br),\
mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br),\
mock.patch.dict(self.agent.dvr_agent.phys_brs,
{physical_network: phys_br}):
self.agent.port_bound(
self._port, self._net_uuid, network_type,
physical_network, segmentation_id, self._fixed_ips,
n_const.DEVICE_OWNER_DVR_INTERFACE, False)
lvid = self.agent.local_vlan_map[self._net_uuid].vlan
expected_on_int_br = self._expected_port_bound(
self._port, lvid)
expected_on_tun_br = [
mock.call.provision_local_vlan(
network_type=network_type,
segmentation_id=segmentation_id,
lvid=lvid,
distributed=True),
] + self._expected_install_dvr_process(
port=self._port,
lvid=lvid,
ip_version=ip_version,
gateway_ip=gateway_ip,
gateway_mac=gateway_mac)
self.assertEqual(expected_on_int_br, int_br.mock_calls)
self.assertEqual(expected_on_tun_br, tun_br.mock_calls)
self.assertEqual([], phys_br.mock_calls)
int_br.reset_mock()
tun_br.reset_mock()
phys_br.reset_mock()
self.agent.port_bound(self._compute_port, self._net_uuid,
network_type, physical_network,
segmentation_id,
self._compute_fixed_ips,
device_owner, False)
expected_on_int_br = [
mock.call.install_dvr_to_src_mac(
network_type=network_type,
gateway_mac=gateway_mac,
dst_mac=self._compute_port.vif_mac,
dst_port=self._compute_port.ofport,
vlan_tag=lvid,
),
] + self._expected_port_bound(self._compute_port, lvid, False)
self.assertEqual(expected_on_int_br, int_br.mock_calls)
self.assertEqual([], tun_br.mock_calls)
self.assertEqual([], phys_br.mock_calls)
def test_port_bound_for_dvr_with_compute_ports(self):
self._test_port_bound_for_dvr_on_vlan_network(
device_owner="compute:None")
self._test_port_bound_for_dvr_on_vlan_network(
device_owner="compute:None", ip_version=6)
self._test_port_bound_for_dvr_on_vxlan_network(
device_owner="compute:None")
self._test_port_bound_for_dvr_on_vxlan_network(
device_owner="compute:None", ip_version=6)
def test_port_bound_for_dvr_with_lbaas_vip_ports(self):
self._test_port_bound_for_dvr_on_vlan_network(
device_owner=n_const.DEVICE_OWNER_LOADBALANCER)
self._test_port_bound_for_dvr_on_vlan_network(
device_owner=n_const.DEVICE_OWNER_LOADBALANCER, ip_version=6)
self._test_port_bound_for_dvr_on_vxlan_network(
device_owner=n_const.DEVICE_OWNER_LOADBALANCER)
self._test_port_bound_for_dvr_on_vxlan_network(
device_owner=n_const.DEVICE_OWNER_LOADBALANCER, ip_version=6)
def test_port_bound_for_dvr_with_dhcp_ports(self):
self._test_port_bound_for_dvr_on_vlan_network(
device_owner=n_const.DEVICE_OWNER_DHCP)
self._test_port_bound_for_dvr_on_vlan_network(
device_owner=n_const.DEVICE_OWNER_DHCP, ip_version=6)
self._test_port_bound_for_dvr_on_vxlan_network(
device_owner=n_const.DEVICE_OWNER_DHCP)
self._test_port_bound_for_dvr_on_vxlan_network(
device_owner=n_const.DEVICE_OWNER_DHCP, ip_version=6)
def test_port_bound_for_dvr_with_csnat_ports(self):
self._setup_for_dvr_test()
int_br = mock.create_autospec(self.agent.int_br)
tun_br = mock.create_autospec(self.agent.tun_br)
int_br.set_db_attribute.return_value = True
int_br.db_get_val.return_value = {}
with mock.patch.object(self.agent.dvr_agent.plugin_rpc,
'get_subnet_for_dvr',
return_value={'gateway_ip': '1.1.1.1',
'cidr': '1.1.1.0/24',
'ip_version': 4,
'gateway_mac': 'aa:bb:cc:11:22:33'}),\
mock.patch.object(self.agent.dvr_agent.plugin_rpc,
'get_ports_on_host_by_subnet',
return_value=[]),\
mock.patch.object(self.agent.dvr_agent.int_br,
'get_vif_port_by_id',
return_value=self._port),\
mock.patch.object(self.agent, 'int_br', new=int_br),\
mock.patch.object(self.agent, 'tun_br', new=tun_br),\
mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br),\
mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br):
self.agent.port_bound(
self._port, self._net_uuid, 'vxlan',
None, None, self._fixed_ips,
n_const.DEVICE_OWNER_ROUTER_SNAT,
False)
lvid = self.agent.local_vlan_map[self._net_uuid].vlan
expected_on_int_br = [
mock.call.install_dvr_to_src_mac(
network_type='vxlan',
gateway_mac='aa:bb:cc:11:22:33',
dst_mac=self._port.vif_mac,
dst_port=self._port.ofport,
vlan_tag=lvid,
),
] + self._expected_port_bound(self._port, lvid, is_dvr=False)
self.assertEqual(expected_on_int_br, int_br.mock_calls)
expected_on_tun_br = [
mock.call.provision_local_vlan(
network_type='vxlan',
lvid=lvid,
segmentation_id=None,
distributed=True,
),
]
self.assertEqual(expected_on_tun_br, tun_br.mock_calls)
def test_treat_devices_removed_for_dvr_interface(self):
self._test_treat_devices_removed_for_dvr_interface()
self._test_treat_devices_removed_for_dvr_interface(ip_version=6)
def _test_treat_devices_removed_for_dvr_interface(self, ip_version=4):
self._setup_for_dvr_test()
if ip_version == 4:
gateway_ip = '1.1.1.1'
cidr = '1.1.1.0/24'
else:
gateway_ip = '2001:100::1'
cidr = '2001:100::0/64'
gateway_mac = 'aa:bb:cc:11:22:33'
int_br = mock.create_autospec(self.agent.int_br)
tun_br = mock.create_autospec(self.agent.tun_br)
int_br.set_db_attribute.return_value = True
int_br.db_get_val.return_value = {}
with mock.patch.object(self.agent.dvr_agent.plugin_rpc,
'get_subnet_for_dvr',
return_value={'gateway_ip': gateway_ip,
'cidr': cidr,
'ip_version': ip_version,
'gateway_mac': gateway_mac}),\
mock.patch.object(self.agent.dvr_agent.plugin_rpc,
'get_ports_on_host_by_subnet',
return_value=[]),\
mock.patch.object(self.agent, 'int_br', new=int_br),\
mock.patch.object(self.agent, 'tun_br', new=tun_br),\
mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br),\
mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br),\
mock.patch.object(self.agent.dvr_agent.int_br,
'get_vif_port_by_id',
return_value=self._port):
self.agent.port_bound(
self._port, self._net_uuid, 'vxlan',
None, None, self._fixed_ips,
n_const.DEVICE_OWNER_DVR_INTERFACE,
False)
lvid = self.agent.local_vlan_map[self._net_uuid].vlan
self.assertEqual(self._expected_port_bound(self._port, lvid),
int_br.mock_calls)
expected_on_tun_br = [
mock.call.provision_local_vlan(network_type='vxlan',
lvid=lvid, segmentation_id=None, distributed=True),
] + self._expected_install_dvr_process(
port=self._port,
lvid=lvid,
ip_version=ip_version,
gateway_ip=gateway_ip,
gateway_mac=gateway_mac)
self.assertEqual(expected_on_tun_br, tun_br.mock_calls)
int_br.reset_mock()
tun_br.reset_mock()
with mock.patch.object(self.agent, 'reclaim_local_vlan'),\
mock.patch.object(self.agent.plugin_rpc, 'update_device_list',
return_value={
'devices_up': [],
'devices_down': [self._port.vif_id],
'failed_devices_up': [],
'failed_devices_down': []}),\
mock.patch.object(self.agent, 'int_br', new=int_br),\
mock.patch.object(self.agent, 'tun_br', new=tun_br),\
mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br),\
mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br):
self.agent.treat_devices_removed([self._port.vif_id])
if ip_version == 4:
expected = [
mock.call.delete_dvr_process_ipv4(
vlan_tag=lvid,
gateway_ip=gateway_ip),
]
else:
expected = [
mock.call.delete_dvr_process_ipv6(
vlan_tag=lvid,
gateway_mac=gateway_mac),
]
expected.extend([
mock.call.delete_dvr_process(
vlan_tag=lvid,
vif_mac=self._port.vif_mac),
])
self.assertEqual([], int_br.mock_calls)
self.assertEqual(expected, tun_br.mock_calls)
def _test_treat_devices_removed_for_dvr(self, device_owner, ip_version=4):
self._setup_for_dvr_test()
if ip_version == 4:
gateway_ip = '1.1.1.1'
cidr = '1.1.1.0/24'
else:
gateway_ip = '2001:100::1'
cidr = '2001:100::0/64'
gateway_mac = 'aa:bb:cc:11:22:33'
int_br = mock.create_autospec(self.agent.int_br)
tun_br = mock.create_autospec(self.agent.tun_br)
int_br.set_db_attribute.return_value = True
int_br.db_get_val.return_value = {}
with mock.patch.object(self.agent.dvr_agent.plugin_rpc,
'get_subnet_for_dvr',
return_value={'gateway_ip': gateway_ip,
'cidr': cidr,
'ip_version': ip_version,
'gateway_mac': gateway_mac}),\
mock.patch.object(self.agent.dvr_agent.plugin_rpc,
'get_ports_on_host_by_subnet',
return_value=[]),\
mock.patch.object(self.agent.dvr_agent.int_br,
'get_vif_port_by_id',
return_value=self._port),\
mock.patch.object(self.agent, 'int_br', new=int_br),\
mock.patch.object(self.agent, 'tun_br', new=tun_br),\
mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br),\
mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br):
self.agent.port_bound(
self._port, self._net_uuid, 'vxlan',
None, None, self._fixed_ips,
n_const.DEVICE_OWNER_DVR_INTERFACE,
False)
lvid = self.agent.local_vlan_map[self._net_uuid].vlan
self.assertEqual(
self._expected_port_bound(self._port, lvid),
int_br.mock_calls)
expected_on_tun_br = [
mock.call.provision_local_vlan(
network_type='vxlan',
segmentation_id=None,
lvid=lvid,
distributed=True),
] + self._expected_install_dvr_process(
port=self._port,
lvid=lvid,
ip_version=ip_version,
gateway_ip=gateway_ip,
gateway_mac=gateway_mac)
self.assertEqual(expected_on_tun_br, tun_br.mock_calls)
int_br.reset_mock()
tun_br.reset_mock()
self.agent.port_bound(self._compute_port,
self._net_uuid, 'vxlan',
None, None,
self._compute_fixed_ips,
device_owner, False)
self.assertEqual(
[
mock.call.install_dvr_to_src_mac(
network_type='vxlan',
gateway_mac='aa:bb:cc:11:22:33',
dst_mac=self._compute_port.vif_mac,
dst_port=self._compute_port.ofport,
vlan_tag=lvid,
),
] + self._expected_port_bound(self._compute_port, lvid, False),
int_br.mock_calls)
self.assertEqual([], tun_br.mock_calls)
int_br.reset_mock()
tun_br.reset_mock()
with mock.patch.object(self.agent, 'reclaim_local_vlan'),\
mock.patch.object(self.agent.plugin_rpc, 'update_device_list',
return_value={
'devices_up': [],
'devices_down': [
self._compute_port.vif_id],
'failed_devices_up': [],
'failed_devices_down': []}),\
mock.patch.object(self.agent, 'int_br', new=int_br),\
mock.patch.object(self.agent, 'tun_br', new=tun_br),\
mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br),\
mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br):
self.agent.treat_devices_removed([self._compute_port.vif_id])
int_br.assert_has_calls([
mock.call.delete_dvr_to_src_mac(
network_type='vxlan',
vlan_tag=lvid,
dst_mac=self._compute_port.vif_mac,
),
])
self.assertEqual([], tun_br.mock_calls)
def test_treat_devices_removed_for_dvr_with_compute_ports(self):
self._test_treat_devices_removed_for_dvr(
device_owner="compute:None")
self._test_treat_devices_removed_for_dvr(
device_owner="compute:None", ip_version=6)
def test_treat_devices_removed_for_dvr_with_lbaas_vip_ports(self):
self._test_treat_devices_removed_for_dvr(
device_owner=n_const.DEVICE_OWNER_LOADBALANCER)
self._test_treat_devices_removed_for_dvr(
device_owner=n_const.DEVICE_OWNER_LOADBALANCER, ip_version=6)
def test_treat_devices_removed_for_dvr_with_dhcp_ports(self):
self._test_treat_devices_removed_for_dvr(
device_owner=n_const.DEVICE_OWNER_DHCP)
self._test_treat_devices_removed_for_dvr(
device_owner=n_const.DEVICE_OWNER_DHCP, ip_version=6)
def test_treat_devices_removed_for_dvr_csnat_port(self):
self._setup_for_dvr_test()
gateway_mac = 'aa:bb:cc:11:22:33'
int_br = mock.create_autospec(self.agent.int_br)
tun_br = mock.create_autospec(self.agent.tun_br)
int_br.set_db_attribute.return_value = True
int_br.db_get_val.return_value = {}
with mock.patch.object(self.agent.dvr_agent.plugin_rpc,
'get_subnet_for_dvr',
return_value={'gateway_ip': '1.1.1.1',
'cidr': '1.1.1.0/24',
'ip_version': 4,
'gateway_mac': gateway_mac}),\
mock.patch.object(self.agent.dvr_agent.plugin_rpc,
'get_ports_on_host_by_subnet',
return_value=[]),\
mock.patch.object(self.agent.dvr_agent.int_br,
'get_vif_port_by_id',
return_value=self._port),\
mock.patch.object(self.agent, 'int_br', new=int_br),\
mock.patch.object(self.agent, 'tun_br', new=tun_br),\
mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br),\
mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br):
self.agent.port_bound(
self._port, self._net_uuid, 'vxlan',
None, None, self._fixed_ips,
n_const.DEVICE_OWNER_ROUTER_SNAT,
False)
lvid = self.agent.local_vlan_map[self._net_uuid].vlan
expected_on_int_br = [
mock.call.install_dvr_to_src_mac(
network_type='vxlan',
gateway_mac=gateway_mac,
dst_mac=self._port.vif_mac,
dst_port=self._port.ofport,
vlan_tag=lvid,
),
] + self._expected_port_bound(self._port, lvid, is_dvr=False)
self.assertEqual(expected_on_int_br, int_br.mock_calls)
expected_on_tun_br = [
mock.call.provision_local_vlan(
network_type='vxlan',
lvid=lvid,
segmentation_id=None,
distributed=True,
),
]
self.assertEqual(expected_on_tun_br, tun_br.mock_calls)
int_br.reset_mock()
tun_br.reset_mock()
with mock.patch.object(self.agent, 'reclaim_local_vlan'),\
mock.patch.object(self.agent.plugin_rpc, 'update_device_list',
return_value={
'devices_up': [],
'devices_down': [self._port.vif_id],
'failed_devices_up': [],
'failed_devices_down': []}),\
mock.patch.object(self.agent, 'int_br', new=int_br),\
mock.patch.object(self.agent, 'tun_br', new=tun_br),\
mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br),\
mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br):
self.agent.treat_devices_removed([self._port.vif_id])
expected_on_int_br = [
mock.call.delete_dvr_to_src_mac(
network_type='vxlan',
dst_mac=self._port.vif_mac,
vlan_tag=lvid,
),
]
self.assertEqual(expected_on_int_br, int_br.mock_calls)
expected_on_tun_br = []
self.assertEqual(expected_on_tun_br, tun_br.mock_calls)
def test_setup_dvr_flows_on_int_br(self):
self._setup_for_dvr_test()
int_br = mock.create_autospec(self.agent.int_br)
tun_br = mock.create_autospec(self.agent.tun_br)
with mock.patch.object(self.agent, 'int_br', new=int_br),\
mock.patch.object(self.agent, 'tun_br', new=tun_br),\
mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br),\
mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br),\
mock.patch.object(self.agent.dvr_agent.plugin_rpc,
'get_dvr_mac_address_list',
return_value=[{'host': 'cn1',
'mac_address': 'aa:bb:cc:dd:ee:ff'},
{'host': 'cn2',
'mac_address': '11:22:33:44:55:66'}]):
self.agent.dvr_agent.setup_dvr_flows_on_integ_br()
self.assertTrue(self.agent.dvr_agent.in_distributed_mode())
physical_networks = list(
self.agent.dvr_agent.bridge_mappings.keys())
ioport = self.agent.dvr_agent.int_ofports[physical_networks[0]]
expected_on_int_br = [
# setup_dvr_flows_on_integ_br
mock.call.delete_flows(),
mock.call.setup_canary_table(),
mock.call.install_drop(table_id=constants.DVR_TO_SRC_MAC,
priority=1),
mock.call.install_drop(table_id=constants.DVR_TO_SRC_MAC_VLAN,
priority=1),
mock.call.install_normal(table_id=constants.LOCAL_SWITCHING,
priority=1),
mock.call.install_drop(table_id=constants.LOCAL_SWITCHING,
priority=2,
in_port=ioport),
]
self.assertEqual(expected_on_int_br, int_br.mock_calls)
self.assertEqual([], tun_br.mock_calls)
def test_get_dvr_mac_address(self):
self._setup_for_dvr_test()
self.agent.dvr_agent.dvr_mac_address = None
with mock.patch.object(self.agent.dvr_agent.plugin_rpc,
'get_dvr_mac_address_by_host',
return_value={'host': 'cn1',
'mac_address': 'aa:22:33:44:55:66'}):
self.agent.dvr_agent.get_dvr_mac_address()
self.assertEqual('aa:22:33:44:55:66',
self.agent.dvr_agent.dvr_mac_address)
self.assertTrue(self.agent.dvr_agent.in_distributed_mode())
def test_get_dvr_mac_address_exception(self):
self._setup_for_dvr_test()
self.agent.dvr_agent.dvr_mac_address = None
int_br = mock.create_autospec(self.agent.int_br)
with mock.patch.object(self.agent.dvr_agent.plugin_rpc,
'get_dvr_mac_address_by_host',
side_effect=oslo_messaging.RemoteError),\
mock.patch.object(self.agent, 'int_br', new=int_br),\
mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br):
self.agent.dvr_agent.get_dvr_mac_address()
self.assertIsNone(self.agent.dvr_agent.dvr_mac_address)
self.assertFalse(self.agent.dvr_agent.in_distributed_mode())
self.assertEqual([mock.call.install_normal()], int_br.mock_calls)
def test_get_dvr_mac_address_retried(self):
valid_entry = {'host': 'cn1', 'mac_address': 'aa:22:33:44:55:66'}
raise_timeout = oslo_messaging.MessagingTimeout()
# Raise a timeout the first 2 times it calls get_dvr_mac_address()
self._setup_for_dvr_test()
self.agent.dvr_agent.dvr_mac_address = None
with mock.patch.object(self.agent.dvr_agent.plugin_rpc,
'get_dvr_mac_address_by_host',
side_effect=(raise_timeout, raise_timeout,
valid_entry)):
self.agent.dvr_agent.get_dvr_mac_address()
self.assertEqual('aa:22:33:44:55:66',
self.agent.dvr_agent.dvr_mac_address)
self.assertTrue(self.agent.dvr_agent.in_distributed_mode())
self.assertEqual(self.agent.dvr_agent.plugin_rpc.
get_dvr_mac_address_by_host.call_count, 3)
def test_get_dvr_mac_address_retried_max(self):
raise_timeout = oslo_messaging.MessagingTimeout()
# Raise a timeout every time until we give up, currently 5 tries
self._setup_for_dvr_test()
self.agent.dvr_agent.dvr_mac_address = None
int_br = mock.create_autospec(self.agent.int_br)
with mock.patch.object(self.agent.dvr_agent.plugin_rpc,
'get_dvr_mac_address_by_host',
side_effect=raise_timeout),\
mock.patch.object(utils, "execute"),\
mock.patch.object(self.agent, 'int_br', new=int_br),\
mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br):
self.agent.dvr_agent.get_dvr_mac_address()
self.assertIsNone(self.agent.dvr_agent.dvr_mac_address)
self.assertFalse(self.agent.dvr_agent.in_distributed_mode())
self.assertEqual(self.agent.dvr_agent.plugin_rpc.
get_dvr_mac_address_by_host.call_count, 5)
def test_dvr_mac_address_update(self):
self._setup_for_dvr_test()
newhost = 'cn2'
newmac = 'aa:bb:cc:dd:ee:ff'
int_br = mock.create_autospec(self.agent.int_br)
tun_br = mock.create_autospec(self.agent.tun_br)
phys_br = mock.create_autospec(self.br_phys_cls('br-phys'))
physical_network = 'physeth1'
with mock.patch.object(self.agent, 'int_br', new=int_br),\
mock.patch.object(self.agent, 'tun_br', new=tun_br),\
mock.patch.dict(self.agent.phys_brs,
{physical_network: phys_br}),\
mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br),\
mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br),\
mock.patch.dict(self.agent.dvr_agent.phys_brs,
{physical_network: phys_br}):
self.agent.dvr_agent.\
dvr_mac_address_update(
dvr_macs=[{'host': newhost,
'mac_address': newmac}])
expected_on_int_br = [
mock.call.add_dvr_mac_vlan(
mac=newmac,
port=self.agent.int_ofports[physical_network]),
mock.call.add_dvr_mac_tun(
mac=newmac,
port=self.agent.patch_tun_ofport),
]
expected_on_tun_br = [
mock.call.add_dvr_mac_tun(
mac=newmac,
port=self.agent.patch_int_ofport),
]
expected_on_phys_br = [
mock.call.add_dvr_mac_vlan(
mac=newmac,
port=self.agent.phys_ofports[physical_network]),
]
self.assertEqual(expected_on_int_br, int_br.mock_calls)
self.assertEqual(expected_on_tun_br, tun_br.mock_calls)
self.assertEqual(expected_on_phys_br, phys_br.mock_calls)
int_br.reset_mock()
tun_br.reset_mock()
phys_br.reset_mock()
with mock.patch.object(self.agent, 'int_br', new=int_br),\
mock.patch.object(self.agent, 'tun_br', new=tun_br),\
mock.patch.dict(self.agent.phys_brs,
{physical_network: phys_br}),\
mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br),\
mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br),\
mock.patch.dict(self.agent.dvr_agent.phys_brs,
{physical_network: phys_br}):
self.agent.dvr_agent.dvr_mac_address_update(dvr_macs=[])
expected_on_int_br = [
mock.call.remove_dvr_mac_vlan(
mac=newmac),
mock.call.remove_dvr_mac_tun(
mac=newmac,
port=self.agent.patch_tun_ofport),
]
expected_on_tun_br = [
mock.call.remove_dvr_mac_tun(
mac=newmac),
]
expected_on_phys_br = [
mock.call.remove_dvr_mac_vlan(
mac=newmac),
]
self.assertEqual(expected_on_int_br, int_br.mock_calls)
self.assertEqual(expected_on_tun_br, tun_br.mock_calls)
self.assertEqual(expected_on_phys_br, phys_br.mock_calls)
def test_ovs_restart(self):
self._setup_for_dvr_test()
reset_methods = (
'reset_ovs_parameters', 'reset_dvr_parameters',
'setup_dvr_flows_on_integ_br', 'setup_dvr_flows_on_tun_br',
'setup_dvr_flows_on_phys_br', 'setup_dvr_mac_flows_on_all_brs')
reset_mocks = [mock.patch.object(self.agent.dvr_agent, method).start()
for method in reset_methods]
tun_br = mock.create_autospec(self.agent.tun_br)
with mock.patch.object(self.agent,
'check_ovs_status',
return_value=constants.OVS_RESTARTED),\
mock.patch.object(self.agent,
'_agent_has_updates',
side_effect=TypeError('loop exit')),\
mock.patch.object(self.agent, 'tun_br', new=tun_br):
# block RPC calls and bridge calls
self.agent.setup_physical_bridges = mock.Mock()
self.agent.setup_integration_br = mock.Mock()
self.agent.reset_tunnel_br = mock.Mock()
self.agent.state_rpc = mock.Mock()
try:
self.agent.rpc_loop(polling_manager=mock.Mock())
except TypeError:
pass
self.assertTrue(all([x.called for x in reset_mocks]))
class TestOvsDvrNeutronAgentOFCtl(TestOvsDvrNeutronAgent,
ovs_test_base.OVSOFCtlTestBase):
pass
|
|
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Some code for creating Google Chart URI's."""
__author__ = 'tstromberg@google.com (Thomas Stromberg)'
import itertools
import math
import re
import urllib
# external dependencies (from nb_third_party)
from graphy import common
from graphy.backends import google_chart_api
CHART_URI = 'http://chart.apis.google.com/chart'
BASE_COLORS = ('ff9900', '1a00ff', 'ff00e6', '80ff00', '00e6ff', 'fae30a',
'BE81F7', '9f5734', '000000', 'ff0000', '3090c0', '477248f',
'ababab', '7b9f34', '00ff00', '0000ff', '9900ff', '405090',
'051290', 'f3e000', '9030f0', 'f03060', 'e0a030', '4598cd')
CHART_WIDTH = 720
CHART_HEIGHT = 415
def DarkenHexColorCode(color, shade=1):
"""Given a color in hex format (for HTML), darken it X shades."""
rgb_values = [int(x, 16) for x in re.findall('\w\w', color)]
new_color = []
for value in rgb_values:
value -= shade*32
if value <= 0:
new_color.append('00')
elif value <= 16:
# Google Chart API requires that color values be 0-padded.
new_color.append('0' + hex(value)[2:])
else:
new_color.append(hex(value)[2:])
return ''.join(new_color)
def _GoodTicks(max_value, tick_size=2.5, num_ticks=10.0):
"""Find a good round tick size to use in graphs."""
try_tick = tick_size
while try_tick < max_value:
if (max_value / try_tick) > num_ticks:
try_tick *= 2
else:
return int(round(try_tick))
# Fallback
print "Could not find good tick size for %s (size=%s, num=%s)" % (max_value, tick_size, num_ticks)
simple_value = int(max_value / num_ticks)
if simple_value > 0:
return simple_value
else:
return 1
def _BarGraphHeight(bar_count):
# TODO(tstromberg): Fix hardcoding.
proposed_height = 52 + (bar_count*13)
if proposed_height > CHART_HEIGHT:
return CHART_HEIGHT
else:
return proposed_height
def PerRunDurationBarGraph(run_data, scale=None):
"""Output a Google Chart API URL showing per-run durations."""
chart = google_chart_api.BarChart()
chart.vertical = False
chart.bottom.label_gridlines = True
chart.bottom.label_positions = chart.bottom.labels
max_run_avg = -1
runs = {}
for (ns, run_averages) in run_data:
chart.left.labels.append(ns)
for run_num, run_avg in enumerate(run_averages):
if run_num not in runs:
runs[run_num] = []
runs[run_num].append(run_avg)
if run_avg > max_run_avg:
max_run_avg = run_avg
if max_run_avg < 0:
print "No decent data to graph: %s" % run_data
return None
if not scale:
scale = int(math.ceil(max_run_avg / 5) * 5)
if len(runs) == 1:
bar_count = len(runs[0])
chart.AddBars(runs[0])
else:
bar_count = 0
for run_num in sorted(runs):
bar_count += len(runs[run_num])
chart.AddBars(runs[run_num], label='Run %s' % (run_num+1),
color=DarkenHexColorCode('4684ee', run_num*3))
tick = _GoodTicks(scale, num_ticks=15.0)
labels = range(0, scale, tick) + [scale]
chart.bottom.min = 0
chart.display.enhanced_encoding = True
bottom_axis = chart.AddAxis('x', common.Axis())
bottom_axis.labels = ['Duration in ms.']
bottom_axis.label_positions = [int((max_run_avg/2.0)*.9)]
chart.bottom.labels = labels
chart.bottom.max = labels[-1]
return chart.display.Url(CHART_WIDTH, _BarGraphHeight(bar_count))
def MinimumDurationBarGraph(fastest_data, scale=None):
"""Output a Google Chart API URL showing minimum-run durations."""
chart = google_chart_api.BarChart()
chart.vertical = False
chart.bottom.label_gridlines = True
chart.bottom.label_positions = chart.bottom.labels
chart.AddBars([x[1] for x in fastest_data])
chart.left.labels = [x[0].name for x in fastest_data]
slowest_time = fastest_data[-1][1]
if not scale:
scale = int(math.ceil(slowest_time / 5) * 5)
tick = _GoodTicks(scale, num_ticks=15.0)
labels = range(0, scale, tick) + [scale]
chart.bottom.min = 0
chart.bottom.max = scale
chart.display.enhanced_encoding = True
bottom_axis = chart.AddAxis('x', common.Axis())
bottom_axis.labels = ['Duration in ms.']
bottom_axis.label_positions = [int((scale/2.0)*.9)]
chart.bottom.labels = labels
return chart.display.Url(CHART_WIDTH, _BarGraphHeight(len(chart.left.labels)))
def _MakeCumulativeDistribution(run_data, x_chunk=1.5, percent_chunk=3.5):
"""Given run data, generate a cumulative distribution (X in Xms).
Args:
run_data: a tuple of nameserver and query durations
x_chunk: How much value should be chunked together on the x-axis
percent_chunk: How much percentage should be chunked together on y-axis.
Returns:
A list of tuples of tuples: [(ns_name, ((percentage, time),))]
We chunk the data together to intelligently minimize the number of points
that need to be passed to the Google Chart API later (URL limitation!)
"""
# TODO(tstromberg): Use a more efficient algorithm. Pop values out each iter?
dist = []
for (ns, results) in run_data:
if not results:
continue
host_dist = [(0, 0)]
max_result = max(results)
chunk_max = min(results)
# Why such a low value? To make sure the delta for the first coordinate is
# always >percent_chunk. We always want to store the first coordinate.
last_percent = -99
while chunk_max < max_result:
values = [x for x in results if x <= chunk_max]
percent = float(len(values)) / float(len(results)) * 100
if (percent - last_percent) > percent_chunk:
host_dist.append((percent, max(values)))
last_percent = percent
# TODO(tstromberg): Think about using multipliers to degrade precision.
chunk_max += x_chunk
# Make sure the final coordinate is exact.
host_dist.append((100, max_result))
dist.append((ns, host_dist))
return dist
def _MaximumRunDuration(run_data):
"""For a set of run data, return the longest duration.
Args:
run_data: a tuple of nameserver and query durations
Returns:
longest duration found in runs_data (float)
"""
times = [x[1] for x in run_data]
return max(itertools.chain(*times))
def _SortDistribution(a, b):
"""Sort distribution graph by nameserver name."""
sys_pos_cmp = cmp(b[0].system_position, a[0].system_position)
if sys_pos_cmp:
return sys_pos_cmp
preferred_cmp = cmp(b[0].is_keeper, a[0].is_keeper)
if preferred_cmp:
return preferred_cmp
return cmp(a[0].name, b[0].name)
def DistributionLineGraph(run_data, scale=None, sort_by=None):
"""Return a Google Chart API URL showing duration distribution per ns."""
# TODO(tstromberg): Rewrite this method using graphy. Graphy does not
# support setting explicit x values for line graphs, which makes things
# difficult.
distribution = _MakeCumulativeDistribution(run_data)
datasets = []
labels = []
# TODO(tstromberg): Find a way to make colors consistent between runs.
colors = BASE_COLORS[0:len(distribution)]
if not sort_by:
sort_by = _SortDistribution
max_value = _MaximumRunDuration(run_data)
if not scale:
scale = max_value
elif scale < max_value:
max_value = scale
scale = max_value / 100.0
for (ns, xy_pairs) in sorted(distribution, cmp=sort_by):
if len(ns.name) > 1:
labels.append(urllib.quote_plus(ns.name))
else:
labels.append(urllib.quote_plus(ns.ip))
x = []
y = []
for (percentage, duration) in xy_pairs:
scaled_duration = int(round(duration/scale))
x.append(scaled_duration)
y.append(int(round(percentage)))
# Only append one point passed the scale max.
if scaled_duration >= 100:
break
# TODO(tstromberg): Use google_chart_api.util.EnhancedEncoder
datasets.append(','.join(map(str, x)))
datasets.append(','.join(map(str, y)))
# TODO(tstromberg): See if we can get the % sign in the labels!
uri = (('%(uri)s?cht=lxy&chs=%(x)sx%(y)s&chxt=x,y&chg=10,20'
'&chxr=0,0,%(max)s|1,0,100&chd=t:%(datasets)s&chco=%(colors)s'
'&chxt=x,y,x,y&chxl=2:||Duration+in+ms||3:||%%25|'
'&chdl=%(labels)s') %
{'uri': CHART_URI, 'datasets': '|'.join(map(str, datasets)),
'max': int(round(max_value)), 'x': CHART_WIDTH, 'y': CHART_HEIGHT,
'colors': ','.join(colors), 'labels': '|'.join(labels)})
return uri
|
|
# Copyright 2014 Koert van der Veer
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import unittest
import logshipper.context
import logshipper.filters
class Tests(unittest.TestCase):
def test_drop(self):
handler = logshipper.filters.prepare_drop(None)
self.assertEqual(handler({}, None),
logshipper.filters.DROP_MESSAGE)
def test_match_1(self):
handler = logshipper.filters.prepare_match("t(.st)")
message = {"message": "This is a test."}
context = logshipper.context.Context(message, None)
result = handler(message, context)
self.assertEqual(result, None)
self.assertEqual(context.match_field, "message")
self.assertEqual(context.backreferences, ['test', 'est'])
message = {"message": "This is not a match."}
context = logshipper.context.Context(message, None)
result = handler(message, context)
self.assertEqual(result, logshipper.filters.SKIP_STEP)
self.assertEqual(context.match_field, None)
self.assertEqual(context.backreferences, [])
def test_match_n(self):
handler = logshipper.filters.prepare_match({"message": "(t.st)",
"foo": "(?P<boo>b.r)"})
message = {"message": "This is a test.", "foo": "barbar"}
context = logshipper.context.Context(message, None)
result = handler(message, context)
self.assertEqual(result, None)
self.assertEqual(context.match_field, None)
self.assertEqual(context.backreferences, [])
self.assertEqual(message['boo'], 'bar')
def test_extract1(self):
handler = logshipper.filters.prepare_extract({"message": "(t.st)",
"foo": "(?P<boo>b.r)"})
message = {"message": "This is a test.", "foo": "barbar"}
context = logshipper.context.Context(message, None)
result = handler(message, context)
self.assertEqual(result, None)
self.assertEqual(context.match_field, None)
self.assertEqual(context.backreferences, [])
self.assertEqual(message['boo'], 'bar')
self.assertEqual(message['foo'], 'bar')
self.assertEqual(message['message'], 'This is a .')
def test_extract2(self):
handler = logshipper.filters.prepare_extract({"message": "(t.st)"})
message = {"message": "This is a fail."}
context = logshipper.context.Context(message, None)
result = handler(message, context)
self.assertEqual(result, logshipper.filters.SKIP_STEP)
def test_edge1(self):
h = logshipper.filters.prepare_edge("{foo}")
handler = lambda m: h(m, logshipper.context.Context(m, None))
result = handler({"foo": "1"})
self.assertNotEqual(result, logshipper.filters.SKIP_STEP)
result = handler({"foo": "1"})
self.assertEqual(result, logshipper.filters.SKIP_STEP)
result = handler({"foo": "2"})
self.assertNotEqual(result, logshipper.filters.SKIP_STEP)
result = handler({"foo": "1"})
self.assertNotEqual(result, logshipper.filters.SKIP_STEP)
def test_edge2(self):
h = logshipper.filters.prepare_edge({"trigger": "{foo}",
"backlog": 2})
handler = lambda m: h(m, logshipper.context.Context(m, None))
result = handler({"foo": "1"})
self.assertNotEqual(result, logshipper.filters.SKIP_STEP)
result = handler({"foo": "2"})
self.assertNotEqual(result, logshipper.filters.SKIP_STEP)
result = handler({"foo": "1"})
self.assertEqual(result, logshipper.filters.SKIP_STEP)
result = handler({"foo": "2"})
self.assertEqual(result, logshipper.filters.SKIP_STEP)
result = handler({"foo": "3"})
self.assertNotEqual(result, logshipper.filters.SKIP_STEP)
result = handler({"foo": "1"})
self.assertNotEqual(result, logshipper.filters.SKIP_STEP)
def test_replace(self):
match_handler = logshipper.filters.prepare_match("t(.st)")
replace_handler = logshipper.filters.prepare_replace("T{1}")
message = {"message": "This is a test."}
context = logshipper.context.Context(message, None)
match_handler(message, context)
replace_handler(message, context)
self.assertEqual(message['message'], 'This is a Test.')
def test_set(self):
handler = logshipper.filters.prepare_set({"baz": "l{1}{foo}r"})
message = {"foo": "shippe"}
context = logshipper.context.Context(message, None)
context.backreferences = ("", "og",)
result = handler(message, context)
self.assertEqual(result, None)
self.assertEqual(message['baz'], "logshipper")
def test_unset(self):
handler = logshipper.filters.prepare_unset("foo, bar")
message = {"foo": "shippe", "baz": "yeah"}
context = logshipper.context.Context(message, None)
context.backreferences = ("", "og",)
result = handler(message, context)
self.assertEqual(result, None)
self.assertEqual(message, {"baz": "yeah"})
def test_unset_multiple(self):
handler = logshipper.filters.prepare_unset(["foo", "bar"])
message = {"foo": "shippe", "baz": "yeah"}
context = logshipper.context.Context(message, None)
context.backreferences = ("", "og",)
result = handler(message, context)
self.assertEqual(result, None)
self.assertEqual(message, {"baz": "yeah"})
def test_python(self):
handler = logshipper.filters.prepare_python("message['a'] = 4")
message = {}
context = logshipper.context.Context(message, None)
result = handler(message, context)
self.assertEqual(result, None)
self.assertEqual(message, {"a": 4})
@unittest.skip("Travis-ci has some env where the timezone doesn't parse")
def test_strptime_parse_tz(self):
handler = logshipper.filters.prepare_strptime({
"field": "foo",
})
now = datetime.datetime.now()
message = {"foo": "Nov 13 01:22:22 CEST"}
context = logshipper.context.Context(message, None)
result = handler(message, context)
self.assertEqual(result, None)
date = datetime.datetime(now.year, 11, 13, 0, 22, 22, 0)
self.assertEqual(message['foo'], date)
def test_strptime_parse(self):
handler = logshipper.filters.prepare_strptime({
"field": "foo",
})
message = {"foo": "2014 Nov 13 01:22:22"}
context = logshipper.context.Context(message, None)
result = handler(message, context)
self.assertEqual(result, None)
date = datetime.datetime(2014, 11, 13, 1, 22, 22, 0)
self.assertEqual(message['foo'], date)
def test_strptime2(self):
handler = logshipper.filters.prepare_strptime({
"format": "%Y %b %d %H:%M:%S",
"field": "foo",
"timezone": "Europe/Amsterdam"
})
message = {"foo": "2014 Nov 13 01:22:22"}
context = logshipper.context.Context(message, None)
result = handler(message, context)
self.assertEqual(result, None)
date = datetime.datetime(2014, 11, 13, 1, 22, 22, 0)
self.assertEqual(message, {"foo": date})
def test_parse_timedelta(self):
self.assertEqual(logshipper.filters.parse_timedelta('1d2h 5m '),
datetime.timedelta(days=1, hours=2, minutes=5))
self.assertEqual(logshipper.filters.parse_timedelta('1.5d'),
datetime.timedelta(days=1, hours=12))
with self.assertRaises(ValueError):
logshipper.filters.parse_timedelta('1d2h5r')
def test_timewindow1(self):
handler = logshipper.filters.prepare_timewindow("1m")
now = datetime.datetime.utcnow()
message = {"timestamp": now}
context = logshipper.context.Context(message, None)
result = handler(message, context)
self.assertEqual(result, None)
message["timestamp"] = now - datetime.timedelta(minutes=2)
result = handler(message, context)
self.assertEqual(result, logshipper.filters.SKIP_STEP)
message["timestamp"] = now + datetime.timedelta(minutes=2)
result = handler(message, context)
self.assertEqual(result, logshipper.filters.SKIP_STEP)
def test_timewindow2(self):
handler = logshipper.filters.prepare_timewindow("1m-3m")
now = datetime.datetime.utcnow()
message = {"timestamp": now}
context = logshipper.context.Context(message, None)
result = handler(message, context)
self.assertEqual(result, None)
message["timestamp"] = now - datetime.timedelta(minutes=2)
result = handler(message, context)
self.assertEqual(result, logshipper.filters.SKIP_STEP)
message["timestamp"] = now + datetime.timedelta(minutes=2)
result = handler(message, context)
self.assertEqual(result, None)
|
|
import unittest
import numpy
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer import links
from chainer import testing
from chainer.testing import attr
# TODO(hvy): Remove the following import once testing.backend is imported
# in testing/__init__.py
import chainer.testing.backend
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
't': [[0, 2], [-1, 1, 2]],
'reduce': ['sum', 'no'],
}))
@testing.backend.inject_backend_tests(
['test_forward', 'test_return_samples'],
[
# CPU test
{},
# CUDA test
{'use_cuda': True},
])
class TestNegativeSampling(unittest.TestCase):
in_size = 3
sample_size = 2
def setUp(self):
self._config_user = chainer.using_config('dtype', self.dtype)
self._config_user.__enter__()
batch = len(self.t)
x_shape = (batch, self.in_size)
self.x = numpy.random.uniform(-1, 1, x_shape).astype(self.dtype)
self.t = numpy.array(self.t).astype(numpy.int32)
if self.reduce == 'no':
g_shape = self.t.shape
elif self.reduce == 'sum':
g_shape = ()
self.gy = numpy.random.uniform(-1, 1, g_shape).astype(self.dtype)
if self.dtype == numpy.float16:
self.test_forward_options = {'atol': 1e-2}
self.test_backward_options = {'atol': 5e-3}
else:
self.test_forward_options = {}
self.test_backward_options = {'atol': 1e-4}
def tearDown(self):
self._config_user.__exit__(None, None, None)
def create_link(self, rng=None):
if rng is None:
rng = numpy.random.RandomState()
link = links.NegativeSampling(
self.in_size, [10, 5, 2, 5, 2], self.sample_size)
link.cleargrads()
# W is initialized with zero. Inject random values for meaningful test.
link.W.array[:] = rng.uniform(-1, 1, link.W.shape)
return link
def call_link_with_samples(self, samples, func):
# Call the link with given `samples` array.
# `func` is a function in which the link is called.
# mock sampler that returns the saved samples
def mock_sample(shape):
assert samples.shape == shape
return samples.copy()
# Wrap F.negative_sampling to replace sampler with the mock
orig_negative_sampling = chainer.functions.negative_sampling
def wrap_negative_sampling(*args, **kwargs):
args = args[:3] + (mock_sample,) + args[4:]
return orig_negative_sampling(*args, **kwargs)
with testing.patch(
'chainer.functions.loss.negative_sampling.negative_sampling',
wraps=wrap_negative_sampling) as m:
ret = func()
assert m.call_count == 1
return ret
def test_forward(self, backend_config):
x_data = backend_config.get_array(self.x)
t_data = backend_config.get_array(self.t)
x = chainer.Variable(x_data)
t = chainer.Variable(t_data)
link = self.create_link()
if backend_config.use_cuda:
link.to_gpu()
y, samples = link(x, t, reduce=self.reduce, return_samples=True)
self.assertEqual(y.shape, self.gy.shape)
W = cuda.to_cpu(link.W.data)
samples = cuda.to_cpu(samples)
loss = numpy.empty((len(self.x),), self.dtype)
for i in range(len(self.x)):
ix = self.x[i]
it = self.t[i]
if it == -1:
loss[i] = 0
else:
w = W[samples[i]]
f = w.dot(ix)
# first one is positive example
f[0] *= -1
loss[i] = numpy.logaddexp(f, 0).sum()
if self.reduce == 'sum':
loss = loss.sum()
testing.assert_allclose(y.data, loss, **self.test_forward_options)
@attr.gpu
def test_to_cpu(self):
link = self.create_link()
link.to_device((cuda.cupy, 0))
self.assertEqual(
link.sampler.device, chainer.get_device((cuda.cupy, 0)))
link.to_device(numpy)
self.assertEqual(link.sampler.device, backend.CpuDevice())
def test_return_samples(self, backend_config):
batch_size = self.t.shape[0]
link = self.create_link()
if backend_config.use_cuda:
link.to_gpu()
x_data = backend_config.get_array(self.x)
t_data = backend_config.get_array(self.t)
x = chainer.Variable(x_data)
t = chainer.Variable(t_data, requires_grad=False)
# return_samples=True
y, samples = link(x, t, reduce=self.reduce, return_samples=True)
assert isinstance(samples, backend_config.xp.ndarray)
assert samples.shape == (batch_size, self.sample_size + 1)
assert samples.dtype == numpy.int32
# return_samples=False, with saved samples
y_ = self.call_link_with_samples(
samples,
lambda: link(x, t, reduce=self.reduce))
# y and y_ should equal
numpy.testing.assert_array_equal(
cuda.to_cpu(y.array), cuda.to_cpu(y_.array))
@attr.gpu
def test_backward_cpu_gpu(self):
# This test compares gradients of CPU and GPU modes.
rng = numpy.random.RandomState()
rng_state = rng.get_state()
# Call CPU mode link and save samples
x = chainer.Variable(self.x)
t = chainer.Variable(self.t)
link = self.create_link(rng)
y, samples = link(x, t, return_samples=True)
y.backward()
assert t.grad is None
gw_cpu = link.W.grad
gx_cpu = x.grad
# Call GPU mode link
rng.set_state(rng_state)
link = self.create_link(rng)
link.to_gpu()
x = chainer.Variable(cuda.to_gpu(self.x))
t = chainer.Variable(cuda.to_gpu(self.t))
y = self.call_link_with_samples(
cuda.to_gpu(samples), lambda: link(x, t))
y.backward()
assert t.grad is None
gw_gpu = link.W.grad
gx_gpu = x.grad
# Compare gradients from CPU and GPU modes
testing.assert_allclose(gx_cpu, gx_gpu, **self.test_backward_options)
testing.assert_allclose(gw_cpu, gw_gpu, **self.test_backward_options)
testing.run_module(__name__, __file__)
|
|
"""Spectral Embedding"""
# Author: Marina Meila <mmp@stat.washington.edu>
# James McQueen <jmcq@u.washington.edu>
# LICENSE: Simplified BSD https://github.com/mmp2/megaman/blob/master/LICENSE
#
# after the scikit-learn version by
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Wei LI <kuantkid@gmail.com>
#
# diffusion maps portion after:
# Satrajit Ghosh <satra@mit.edu> https://github.com/satra/mapalign/blob/master/mapalign/embed.py
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy import sparse
from scipy.sparse.csgraph import connected_components
from ..embedding.base import BaseEmbedding
from ..utils.validation import check_random_state
from ..utils.eigendecomp import eigen_decomposition, check_eigen_solver
from ..geometry.complete_adjacency_matrix import complete_adjacency_matrix
from ..geometry.affinity import compute_affinity_matrix
from ..geometry.laplacian import compute_laplacian_matrix
from ..utils.nystrom_extension import nystrom_extension
def _graph_connected_component(graph, node_id):
"""
Find the largest graph connected components the contains one
given node
Parameters
----------
graph : array-like, shape: (n_samples, n_samples)
adjacency matrix of the graph, non-zero weight means an edge
between the nodes
node_id : int
The index of the query node of the graph
Returns
-------
connected_components : array-like, shape: (n_samples,)
An array of bool value indicates the indexes of the nodes
belong to the largest connected components of the given query
node
"""
connected_components = np.zeros(shape=(graph.shape[0]), dtype=np.bool)
connected_components[node_id] = True
n_node = graph.shape[0]
for i in range(n_node):
last_num_component = connected_components.sum()
_, node_to_add = np.where(graph[connected_components] != 0)
connected_components[node_to_add] = True
if last_num_component >= connected_components.sum():
break
return connected_components
def _graph_is_connected(graph):
"""
Return whether the graph is connected (True) or Not (False)
Parameters
----------
graph : array-like or sparse matrix, shape: (n_samples, n_samples)
adjacency matrix of the graph, non-zero weight means an edge
between the nodes
Returns
-------
is_connected : bool
True means the graph is fully connected and False means not
"""
if sparse.isspmatrix(graph):
# sparse graph, find all the connected components
n_connected_components, _ = connected_components(graph)
return n_connected_components == 1
else:
# dense graph, find all connected components start from node 0
return _graph_connected_component(graph, 0).sum() == graph.shape[0]
def compute_diffusion_maps(lapl_type, diffusion_map, lambdas, diffusion_time):
""" Credit to Satrajit Ghosh (http://satra.cogitatum.org/) for final steps """
# Check that diffusion maps is using the correct laplacian, warn otherwise
if lapl_type not in ['geometric', 'renormalized']:
warnings.warn("for correct diffusion maps embedding use laplacian type 'geometric' or 'renormalized'.")
# Step 5 of diffusion maps:
vectors = diffusion_map.copy()
psi = vectors/vectors[:,[0]]
diffusion_times = diffusion_time
if diffusion_time == 0:
lambdas = np.abs(lambdas)
diffusion_times = np.exp(1. - np.log(1 - lambdas[1:])/np.log(lambdas[1:]))
lambdas = lambdas / (1 - lambdas)
else:
lambdas = np.abs(lambdas)
lambdas = lambdas ** float(diffusion_time)
diffusion_map = psi * lambdas
return diffusion_map
def spectral_embedding(geom, n_components=8, eigen_solver='auto',
random_state=None, drop_first=True,
diffusion_maps = False, diffusion_time = 0, solver_kwds = None):
"""
Project the sample on the first eigen vectors of the graph Laplacian.
The adjacency matrix is used to compute a normalized graph Laplacian
whose principal eigenvectors (associated to the
smallest eigen values) represent the embedding coordinates of the data.
The ``adjacency`` variable is not strictly the adjacency matrix of a graph but more generally
an affinity or similarity matrix between samples (for instance the
heat kernel of a euclidean distance matrix or a k-NN matrix).
The Laplacian must be symmetric so that the eigen vector decomposition works as expected.
This is ensured by the default setting (for more details,
see the documentation in geometry.py).
The data and generic geometric parameters are passed via a Geometry object, which also
computes the Laplacian. By default, the 'geometric' Laplacian (or "debiased", or "renormalized" with
alpha=1) is used. This is the Laplacian construction defined in [Coifman and Lafon, 2006] (see also
documentation in laplacian.py). Thus, with diffusion_maps=False, spectral embedding is a modification
of the Laplacian Eigenmaps algorithm of [Belkin and Nyiogi, 2002], with diffusion_maps=False, geom.laplacian_method
='symmetricnormalized' it is exactly the Laplacian Eigenmaps, with diffusion_maps=True, diffusion_time>0 it
is the Diffusion Maps algorithm of [Coifman and Lafon 2006]; diffusion_maps=True and diffusion_time=0 is the same
as diffusion_maps=False and default geom.laplacian_method.
Parameters
----------
geom : a Geometry object from megaman.embedding.geometry
n_components : integer, optional
The dimension of the projection subspace.
eigen_solver : {'auto', 'dense', 'arpack', 'lobpcg', or 'amg'}
'auto' :
algorithm will attempt to choose the best method for input data
'dense' :
use standard dense matrix operations for the eigenvalue decomposition.
For this method, M must be an array or matrix type. This method should be avoided for large problems.
'arpack' :
use arnoldi iteration in shift-invert mode. For this method,
M may be a dense matrix, sparse matrix, or general linear operator.
Warning: ARPACK can be unstable for some problems. It is best to
try several random seeds in order to check results.
'lobpcg' :
Locally Optimal Block Preconditioned Conjugate Gradient Method.
A preconditioned eigensolver for large symmetric positive definite
(SPD) generalized eigenproblems.
'amg' :
AMG requires pyamg to be installed. It can be faster on very large,
sparse problems, but may also lead to instabilities.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization of the
lobpcg eigen vectors decomposition when eigen_solver == 'amg'.
By default, arpack is used.
drop_first : bool, optional, default=True
Whether to drop the first eigenvector. For spectral embedding, this
should be True as the first eigenvector should be constant vector for
connected graph, but for spectral clustering, this should be kept as
False to retain the first eigenvector.
diffusion_map : boolean, optional. Whether to return the diffusion map
version by re-scaling the embedding coordinate by the eigenvalues to the power
diffusion_time.
diffusion_time: if diffusion_map=True, the eigenvectors of the Laplacian are rescaled by
(1-lambda)^diffusion_time, where lambda is the corresponding eigenvalue.
diffusion_time has the role of scale parameter. One of the main ideas of diffusion framework is
that running the diffusion forward in time (taking larger and larger
powers of the Laplacian/transition matrix) reveals the geometric structure of X at larger and
larger scales (the diffusion process).
diffusion_time = 0 empirically provides a reasonable balance from a clustering
perspective. Specifically, the notion of a cluster in the data set
is quantified as a region in which the probability of escaping this
region is low (within a certain time t).
Credit to Satrajit Ghosh (http://satra.cogitatum.org/) for description
solver_kwds : any additional keyword arguments to pass to the selected eigen_solver
Returns
-------
embedding : array, shape=(n_samples, n_components)
The reduced samples.
Notes
-----
Spectral embedding is most useful when the graph has one connected
component. If there graph has many components, the first few eigenvectors
will simply uncover the connected components of the graph.
References
----------
* http://en.wikipedia.org/wiki/LOBPCG
* Toward the Optimal Preconditioned Eigensolver: Locally Optimal
Block Preconditioned Conjugate Gradient Method
Andrew V. Knyazev
http://dx.doi.org/10.1137%2FS1064827500366124
"""
random_state = check_random_state(random_state)
if geom.affinity_matrix is None:
geom.compute_affinity_matrix()
if not _graph_is_connected(geom.affinity_matrix):
warnings.warn("Graph is not fully connected: "
"spectral embedding may not work as expected.")
if geom.laplacian_matrix is None:
laplacian = geom.compute_laplacian_matrix(copy=False,
return_lapsym=True)
else:
laplacian = geom.laplacian_matrix
n_nodes = laplacian.shape[0]
lapl_type = geom.laplacian_method
eigen_solver, solver_kwds = check_eigen_solver(eigen_solver,solver_kwds,
size=laplacian.shape[0],
nvec=n_components + 1)
re_normalize = False
PD_solver = False
if eigen_solver in ['amg', 'lobpcg']: # these methods require a symmetric positive definite matrix!
epsilon = 2
PD_solver = True
if lapl_type not in ['symmetricnormalized', 'unnormalized']:
re_normalize = True
# If lobpcg (or amg with lobpcg) is chosen and
# If the Laplacian is non-symmetric then we need to extract:
# the w (weight) vector from geometry
# and the symmetric Laplacian = S.
# The actual Laplacian is L = W^{-1}S (Where W is the diagonal matrix of w)
# Which has the same spectrum as: L* = W^{-1/2}SW^{-1/2} which is symmetric
# We calculate the eigen-decomposition of L*: [D, V]
# then use W^{-1/2}V to compute the eigenvectors of L
# See (Handbook for Cluster Analysis Chapter 2 Proposition 1).
# However, since we censor the affinity matrix A at a radius it is not guaranteed
# to be positive definite. But since L = W^{-1}S has maximum eigenvalue 1 (stochastic matrix)
# and L* has the same spectrum it also has largest e-value of 1.
# therefore if we look at I - L* then this has smallest eigenvalue of 0 and so
# must be positive semi-definite. It also has the same spectrum as L* but
# lambda(I - L*) = 1 - lambda(L*).
# Finally, since we want positive definite not semi-definite we use (1+epsilon)*I
# instead of I to make the smallest eigenvalue epsilon.
if geom.laplacian_weights is None: # a laplacian existed but it wasn't called with return_lapsym = True
geom.compute_laplacian_matrix(copy = False, return_lapsym = True)
w = np.array(geom.laplacian_weights)
symmetrized_laplacian = geom.laplacian_symmetric.copy()
if sparse.isspmatrix(symmetrized_laplacian):
symmetrized_laplacian.data /= np.sqrt(w[symmetrized_laplacian.row])
symmetrized_laplacian.data /= np.sqrt(w[symmetrized_laplacian.col])
symmetrized_laplacian = (1+epsilon)*sparse.identity(n_nodes) - symmetrized_laplacian
else:
symmetrized_laplacian /= np.sqrt(w)
symmetrized_laplacian /= np.sqrt(w[:,np.newaxis])
symmetrized_laplacian = (1+epsilon)*np.identity(n_nodes) - symmetrized_laplacian
else: # using a symmetric laplacian but adjust to avoid positive definite errors
symmetrized_laplacian = geom.laplacian_matrix.copy()
if sparse.isspmatrix(symmetrized_laplacian):
symmetrized_laplacian = (1+epsilon)*sparse.identity(n_nodes) - symmetrized_laplacian
else:
symmetrized_laplacian = (1+epsilon)*np.identity(n_nodes) - symmetrized_laplacian
if PD_solver: # then eI - L was used, fix the eigenvalues
lambdas, diffusion_map = eigen_decomposition(symmetrized_laplacian, n_components+1, eigen_solver=eigen_solver,
random_state=random_state, drop_first=drop_first, largest = False,
solver_kwds=solver_kwds)
lambdas = -lambdas + epsilon
else:
lambdas, diffusion_map = eigen_decomposition(laplacian, n_components+1, eigen_solver=eigen_solver,
random_state=random_state, drop_first=drop_first, largest = True,
solver_kwds=solver_kwds)
if re_normalize:
diffusion_map /= np.sqrt(w[:, np.newaxis]) # put back on original Laplacian space
diffusion_map /= np.linalg.norm(diffusion_map, axis = 0) # norm 1 vectors
# sort the eigenvalues
ind = np.argsort(lambdas); ind = ind[::-1]
lambdas = lambdas[ind]; lambdas[0] = 0
diffusion_map = diffusion_map[:, ind]
eigenvalues = lambdas.copy()
eigenvectors = diffusion_map.copy()
if diffusion_maps:
diffusion_map = compute_diffusion_maps(lapl_type, diffusion_map, lambdas, diffusion_time)
if drop_first:
embedding = diffusion_map[:, 1:(n_components+1)]
eigenvectors = eigenvectors[:, 1:(n_components+1)]
eigenvalues = eigenvalues[1:(n_components+1)]
else:
embedding = diffusion_map[:, :n_components]
eigenvectors = eigenvectors[:, :(n_components)]
eigenvalues = eigenvalues[:(n_components)]
return embedding, eigenvalues, eigenvectors
class SpectralEmbedding(BaseEmbedding):
"""
Spectral embedding for non-linear dimensionality reduction.
Forms an affinity matrix given by the specified function and
applies spectral decomposition to the corresponding graph laplacian.
The resulting transformation is given by the value of the
eigenvectors for each data point.
Parameters
-----------
n_components : integer
number of coordinates for the manifold.
radius : float (optional)
radius for adjacency and affinity calculations. Will be overridden if
either is set in `geom`
geom : dict or megaman.geometry.Geometry object
specification of geometry parameters: keys are
["adjacency_method", "adjacency_kwds", "affinity_method",
"affinity_kwds", "laplacian_method", "laplacian_kwds"]
eigen_solver : {'auto', 'dense', 'arpack', 'lobpcg', or 'amg'}
'auto' :
algorithm will attempt to choose the best method for input data
'dense' :
use standard dense matrix operations for the eigenvalue
decomposition. Uses a dense data array, and thus should be avoided
for large problems.
'arpack' :
use arnoldi iteration in shift-invert mode. For this method,
M may be a dense matrix, sparse matrix, or general linear operator.
Warning: ARPACK can be unstable for some problems. It is best to
try several random seeds in order to check results.
'lobpcg' :
Locally Optimal Block Preconditioned Conjugate Gradient Method.
A preconditioned eigensolver for large symmetric positive definite
(SPD) generalized eigenproblems.
'amg' :
AMG requires pyamg to be installed. It can be faster on very large,
sparse problems, but may also lead to instabilities.
random_state : numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.RandomState
drop_first : bool, optional, default=True
Whether to drop the first eigenvector. For spectral embedding, this
should be True as the first eigenvector should be constant vector for
connected graph, but for spectral clustering, this should be kept as
False to retain the first eigenvector.
diffusion_map : boolean, optional. Whether to return the diffusion map
version by re-scaling the embedding by the eigenvalues.
solver_kwds : any additional keyword arguments to pass to the selected eigen_solver
References
----------
.. [1] A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
.. [2] On Spectral Clustering: Analysis and an algorithm, 2011
Andrew Y. Ng, Michael I. Jordan, Yair Weiss
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.19.8100
.. [3] Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
"""
def __init__(self, n_components=2, radius=None, geom=None,
eigen_solver='auto', random_state=None,
drop_first=True, diffusion_maps=False, diffusion_time=0,solver_kwds=None):
self.n_components = n_components
self.radius = radius
self.geom = geom
self.eigen_solver = eigen_solver
self.random_state = random_state
self.drop_first = drop_first
self.diffusion_maps = diffusion_maps
self.diffusion_time = diffusion_time
self.solver_kwds = solver_kwds
def fit(self, X, y=None, input_type='data'):
"""
Fit the model from data in X.
Parameters
----------
input_type : string, one of: 'data', 'distance' or 'affinity'.
The values of input data X. (default = 'data')
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
If self.input_type is distance, or affinity:
X : array-like, shape (n_samples, n_samples),
Interpret X as precomputed distance or adjacency graph
computed from samples.
Returns
-------
self : object
Returns the instance itself.
"""
X = self._validate_input(X, input_type)
self.fit_geometry(X, input_type)
random_state = check_random_state(self.random_state)
self.embedding_, self.eigenvalues_, self.eigenvectors_ = spectral_embedding(self.geom_,
n_components = self.n_components,
eigen_solver = self.eigen_solver,
random_state = random_state,
drop_first = self.drop_first,
diffusion_maps = self.diffusion_maps,
diffusion_time = self.diffusion_time,
solver_kwds = self.solver_kwds)
self.affinity_matrix_ = self.geom_.affinity_matrix
self.laplacian_matrix_ = self.geom_.laplacian_matrix
self.laplacian_matrix_type_ = self.geom_.laplacian_method
return self
def predict(self, X_test, y=None):
"""
Predict embedding on new data X_test given the existing embedding on training data
Uses the Nystrom Extension to estimate the eigenvectors.
Currently only works with input_type data (i.e. not affinity or distance)
"""
if not hasattr(self, 'geom_'):
raise RuntimeError('the .fit() function must be called before the .predict() function')
if self.geom_.X is None:
raise NotImplementedError('method only implemented when X passed as data')
# Complete the adjacency matrix
adjacency_kwds = self.geom_.adjacency_kwds
if self.geom_.adjacency_method == 'cyflann':
if 'cyflann_kwds' in adjacency_kwds.keys():
cyflann_kwds = adjacency_kwds['cyflann_kwds']
else:
cyflann_kwds = {}
total_adjacency_matrix = complete_adjacency_matrix(self.geom_.adjacency_matrix,
self.geom_.X,
X_test,adjacency_kwds)
# Compute the affinity matrix, check method and kwds
if self.geom_.affinity_kwds is not None:
affinity_kwds = self.geom_.affinity_kwds
else:
affinity_kwds = {}
if self.geom_.affinity_method is not None:
affinity_method = self.geom_.affinity_method
else:
affinity_method = 'auto'
total_affinity_matrix = compute_affinity_matrix(total_adjacency_matrix, affinity_method,
**affinity_kwds)
# Compute the affinity matrix, check method and kwds
if self.geom_.laplacian_kwds is not None:
laplacian_kwds = self.geom_.laplacian_kwds
else:
laplacian_kwds = {}
if self.geom_.laplacian_method is not None:
laplacian_method = self.geom_.laplacian_method
else:
self.laplacian_method = 'auto'
total_laplacian_matrix = compute_laplacian_matrix(total_affinity_matrix, laplacian_method,
**laplacian_kwds)
# Take the columns of Laplacian and existing embedding and pass to Nystrom Extension
(n_sample_train) = self.geom_.adjacency_matrix.shape[0]
total_laplacian_matrix = total_laplacian_matrix.tocsr()
C = total_laplacian_matrix[:, :n_sample_train]
# warnings.warn(str(C.shape))
eigenvalues, eigenvectors = nystrom_extension(C, self.eigenvectors_, self.eigenvalues_)
# If diffusion maps compute diffusion time etc
if self.diffusion_maps:
embedding = compute_diffusion_maps(laplacian_method, eigenvectors, eigenvalues, self.diffusion_time)
else:
embedding = eigenvectors
(n_sample_test) = X_test.shape[0]
embedding_test=embedding[-n_sample_test:, :]
return embedding_test, embedding
|
|
# ===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
# ============= standard library imports ========================
# ============= local library imports ==========================
import os
from pyface.constant import OK
from pyface.file_dialog import FileDialog
from pychron.entry.edit_irradiation_geometry import EditIrradiationGeometry
from pychron.envisage.resources import icon
from pychron.envisage.tasks.actions import PAction as Action, PTaskAction as TaskAction
from pychron.pychron_constants import DVC_PROTOCOL
class AddFluxMonitorAction(Action):
name = 'Add/Edit Flux Monitors'
def perform(self, event):
app = event.task.window.application
s = app.get_service('pychron.entry.editors.flux_monitor_editor.FluxMonitorEditor')
s.add_flux_monitor()
class SensitivityEntryAction(Action):
name = 'Sensitivity'
# accelerator = 'Ctrl+Shift+\\'
id = 'pychron.sensitivity'
def perform(self, event):
pid = 'pychron.entry.sensitivity.task'
app = event.task.window.application
app.get_task(pid)
class SaveSensitivityAction(TaskAction):
name = 'Save'
image = icon('database_save')
method = 'save'
class AddSensitivityAction(TaskAction):
name = 'Add'
image = icon('database_add')
method = 'add'
class DatabaseSaveAction(TaskAction):
name = 'Database Save'
description = 'Save current changes to the database'
method = 'save_to_db'
image = icon('database_save')
class ClearSelectionAction(TaskAction):
name = 'Clear Selection'
image = icon('table_lightning')
method = 'clear_selection'
class RecoverAction(TaskAction):
name = 'Recover'
method = 'recover'
class SavePDFAction(TaskAction):
name = 'Save PDF'
image = icon('file_pdf')
method = 'save_pdf'
class MakeIrradiationBookPDFAction(TaskAction):
name = 'Make Irradiation Book'
image = icon('file_pdf')
method = 'make_irradiation_book_pdf'
class GenerateIdentifiersAction(TaskAction):
name = 'Generate Identifiers'
image = icon('table_lightning')
method = 'generate_identifiers'
description = 'Automatically generate labnumbers (aka identifiers) for each irradiation position in the ' \
'currently selected irradiation.'
class PreviewGenerateIdentifiersAction(TaskAction):
name = 'Preview Generate Identifiers'
image = icon('table_lightning')
method = 'preview_generate_identifiers'
class ImportIrradiationAction(TaskAction):
name = 'Import Irradiation...'
def perform(self, event):
app = event.task.window.application
mdb = 'pychron.mass_spec.database.massspec_database_adapter.MassSpecDatabaseAdapter'
mssource = app.get_service(mdb)
mssource.bind_preferences()
from pychron.data_mapper import do_import_irradiation
dvc = app.get_service('pychron.dvc.dvc.DVC')
plugin = app.get_plugin('pychron.entry.plugin')
sources = {obj: name for name, obj in plugin.data_sources}
sources['Mass Spec'] = mssource
do_import_irradiation(dvc=dvc, sources=sources, default_source='Mass Spec')
class ImportAnalysesAction(Action):
name = 'Import Analyses...'
def perform(self, event):
app = event.task.window.application
dvc = app.get_service('pychron.dvc.dvc.DVC')
from pychron.data_mapper import do_import_analyses
# sources = {}
# usgsvsc = app.get_service('pychron.data_mapper.sources.usgs_vsc_source.ViewUSGSVSCSource')
# sources[usgsvsc] = 'USGS VSC'
plugin = app.get_plugin('pychron.entry.plugin')
sources = {obj: name for name, obj in plugin.data_sources}
do_import_analyses(dvc, sources)
class GenerateTrayAction(Action):
name = 'Generate Tray Image'
image = icon('table_lightning')
# method = 'generate_tray'
description = 'Make a irradiation tray image from an irradiation tray text file.'
def perform(self, event):
# p='/Users/ross/Sandbox/entry_tray'
# p = self.open_file_dialog()
p = None
from pychron.paths import paths
dlg = FileDialog(action='open', default_directory=os.path.join(paths.meta_root, 'irradiation_holders'))
if dlg.open() == OK:
p = dlg.path
if p is not None:
from pychron.entry.graphic_generator import open_txt
bounds = (2.54, 2.54)
radius = 0.03 * 2.54
gcc, gm = open_txt(p,
bounds,
radius,
convert_mm=True,
make=True,
rotate=0)
info = gcc.edit_traits(kind='livemodal')
# from pychron.entry.graphic_generator import GraphicModel
# from pychron.entry.graphic_generator import GraphicGeneratorController
#
# gm = GraphicModel()
#
# # op='/Users/ross/Pychrondata_dev/setupfiles/irradiation_tray_maps/newtrays/26_no_spokes.txt'
#
# gm.srcpath = p
# # gm.xmlpath=p
# # p = make_xml(p,
# # default_radius=radius,
# # default_bounds=bounds,
# # convert_mm=convert_mm,
# # use_label=use_label,
# # make=make,
# # rotate=rotate)
# #
# # # p = '/Users/ross/Sandbox/graphic_gen_from_csv.xml'
# # gm.load(p)
# gcc = GraphicGeneratorController(model=gm)
# info = gcc.edit_traits(kind='livemodal')
# if info.result:
# if self.confirmation_dialog(
# 'Do you want to save this tray to the database. Saving tray as "{}"'.format(gm.name)):
# self.manager.save_tray_to_db(gm.srcpath, gm.name)
class ImportIrradiationFileAction(TaskAction):
name = 'Import Irradiation File'
image = icon('file_xls')
method = 'import_irradiation_load_xls'
description = 'Import irradiation information from an Excel file. Use "Irradiation Template" ' \
'to generate a boilerplate irradiation template'
class MakeIrradiationTemplateAction(Action):
name = 'Irradiation Template'
image = icon('file_xls')
description = 'Make an Excel irradiation template that can be used to import irradiation information.'
def perform(self, event):
from pyface.file_dialog import FileDialog
dialog = FileDialog(action='save as', default_filename='IrradiationTemplate.xls')
from pyface.constant import OK
if dialog.open() == OK:
path = dialog.path
if path:
from pychron.core.helpers.filetools import add_extension
path = add_extension(path, '.xls')
from pychron.entry.loaders.irradiation_template import IrradiationTemplate
i = IrradiationTemplate()
i.make_template(path)
from pyface.confirmation_dialog import confirm
if confirm(None, 'Template saved to {}.\n\nWould you like to open the template?'):
from pychron.core.helpers.filetools import view_file
application = 'Microsoft Office 2011/Microsoft Excel'
view_file(path, application=application)
# from pyface.message_dialog import information
# information(None, 'Template saved to {}'.format(path))
# class ImportSampleMetadataAction(TaskAction):
# name = 'Import Sample Metadata...'
# method = 'import_sample_metadata'
class ExportIrradiationAction(TaskAction):
name = 'Export Irradiation...'
method = 'export_irradiation'
class GenerateIrradiationTableAction(TaskAction):
name = 'Generate Irradiation Table'
accelerator = 'Ctrl+0'
# ddescription = 'Do not use!'
def perform(self, event):
# from pychron.entry.irradiation_table_writer import IrradiationTableWriter
# a = IrradiationTableWriter()
# a.make()
from pychron.entry.irradiation_xls_writer import IrradiationXLSTableWriter
dvc = self.task.window.application.get_service(DVC_PROTOCOL)
if dvc is not None:
if dvc.db.connect():
names = dvc.get_irradiation_names()
a = IrradiationXLSTableWriter(dvc=dvc)
a.make(names)
else:
from pyface.message_dialog import warning
warning(None, 'DVC Plugin is required. Please enable')
class ImportIrradiationGeometryAction(Action):
name = 'Import Irradiation Geometry'
def perform(self, event):
dvc = event.task.application.get_service(DVC_PROTOCOL)
if dvc is not None:
dialog = FileDialog(action='open', default_directory=os.path.join(os.path.expanduser('~'), 'Desktop'))
if dialog.open() == OK:
if dialog.path:
dvc.meta_repo.add_irradiation_geometry_file(dialog.path)
class EditIrradiationGeometryAction(Action):
name = 'Edit Irradiation Geometry'
def perform(self, event):
dvc = event.task.application.get_service(DVC_PROTOCOL)
if dvc is not None:
eiv = EditIrradiationGeometry(dvc=dvc)
eiv.edit_traits()
class TransferJAction(TaskAction):
name = 'Transfer J Data...'
method = 'transfer_j'
class GetIGSNAction(TaskAction):
name = 'Get IGSNs'
method = 'get_igsns'
class GenerateStatusReportAction(TaskAction):
name = 'Status Report...'
method = 'generate_status_report'
class SyncMetaDataAction(TaskAction):
name = 'Sync Repo/DB Metadata'
method = 'sync_metadata'
# def perform(self, event):
# app = event.task.window.application
# app.information_dialog('Sync Repo disabled')
# return
#
# dvc = app.get_service('pychron.dvc.dvc.DVC')
# if dvc:
# dvc.repository_db_sync('IR986', dry_run=False)
# ============= EOF =============================================
|
|
# -*- encoding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for the API /ports/ methods.
"""
import datetime
import mock
from oslo_config import cfg
from oslo_utils import timeutils
from oslo_utils import uuidutils
import six
from six.moves.urllib import parse as urlparse
from testtools.matchers import HasLength
from wsme import types as wtypes
from ironic.api.controllers import base as api_controller
from ironic.api.controllers.v1 import port as api_port
from ironic.api.controllers.v1 import utils as api_utils
from ironic.common import exception
from ironic.conductor import rpcapi
from ironic.tests.api import base as api_base
from ironic.tests.api import utils as apiutils
from ironic.tests import base
from ironic.tests.db import utils as dbutils
from ironic.tests.objects import utils as obj_utils
# NOTE(lucasagomes): When creating a port via API (POST)
# we have to use node_uuid
def post_get_test_port(**kw):
port = apiutils.port_post_data(**kw)
node = dbutils.get_test_node()
del port['node_id']
port['node_uuid'] = kw.get('node_uuid', node['uuid'])
return port
class TestPortObject(base.TestCase):
def test_port_init(self):
port_dict = apiutils.port_post_data(node_id=None)
del port_dict['extra']
port = api_port.Port(**port_dict)
self.assertEqual(wtypes.Unset, port.extra)
class TestListPorts(api_base.FunctionalTest):
def setUp(self):
super(TestListPorts, self).setUp()
self.node = obj_utils.create_test_node(self.context)
def test_empty(self):
data = self.get_json('/ports')
self.assertEqual([], data['ports'])
def test_one(self):
port = obj_utils.create_test_port(self.context, node_id=self.node.id)
data = self.get_json('/ports')
self.assertEqual(port.uuid, data['ports'][0]["uuid"])
self.assertNotIn('extra', data['ports'][0])
self.assertNotIn('node_uuid', data['ports'][0])
# never expose the node_id
self.assertNotIn('node_id', data['ports'][0])
def test_get_one(self):
port = obj_utils.create_test_port(self.context, node_id=self.node.id)
data = self.get_json('/ports/%s' % port.uuid)
self.assertEqual(port.uuid, data['uuid'])
self.assertIn('extra', data)
self.assertIn('node_uuid', data)
# never expose the node_id
self.assertNotIn('node_id', data)
def test_detail(self):
port = obj_utils.create_test_port(self.context, node_id=self.node.id)
data = self.get_json('/ports/detail')
self.assertEqual(port.uuid, data['ports'][0]["uuid"])
self.assertIn('extra', data['ports'][0])
self.assertIn('node_uuid', data['ports'][0])
# never expose the node_id
self.assertNotIn('node_id', data['ports'][0])
def test_detail_against_single(self):
port = obj_utils.create_test_port(self.context, node_id=self.node.id)
response = self.get_json('/ports/%s/detail' % port.uuid,
expect_errors=True)
self.assertEqual(404, response.status_int)
def test_many(self):
ports = []
for id_ in range(5):
port = obj_utils.create_test_port(self.context,
node_id=self.node.id,
uuid=uuidutils.generate_uuid(),
address='52:54:00:cf:2d:3%s' % id_)
ports.append(port.uuid)
data = self.get_json('/ports')
self.assertEqual(len(ports), len(data['ports']))
uuids = [n['uuid'] for n in data['ports']]
six.assertCountEqual(self, ports, uuids)
def test_links(self):
uuid = uuidutils.generate_uuid()
obj_utils.create_test_port(self.context,
uuid=uuid,
node_id=self.node.id)
data = self.get_json('/ports/%s' % uuid)
self.assertIn('links', data.keys())
self.assertEqual(2, len(data['links']))
self.assertIn(uuid, data['links'][0]['href'])
for l in data['links']:
bookmark = l['rel'] == 'bookmark'
self.assertTrue(self.validate_link(l['href'], bookmark=bookmark))
def test_collection_links(self):
ports = []
for id_ in range(5):
port = obj_utils.create_test_port(self.context,
node_id=self.node.id,
uuid=uuidutils.generate_uuid(),
address='52:54:00:cf:2d:3%s' % id_)
ports.append(port.uuid)
data = self.get_json('/ports/?limit=3')
self.assertEqual(3, len(data['ports']))
next_marker = data['ports'][-1]['uuid']
self.assertIn(next_marker, data['next'])
def test_collection_links_default_limit(self):
cfg.CONF.set_override('max_limit', 3, 'api')
ports = []
for id_ in range(5):
port = obj_utils.create_test_port(self.context,
node_id=self.node.id,
uuid=uuidutils.generate_uuid(),
address='52:54:00:cf:2d:3%s' % id_)
ports.append(port.uuid)
data = self.get_json('/ports')
self.assertEqual(3, len(data['ports']))
next_marker = data['ports'][-1]['uuid']
self.assertIn(next_marker, data['next'])
def test_port_by_address(self):
address_template = "aa:bb:cc:dd:ee:f%d"
for id_ in range(3):
obj_utils.create_test_port(self.context,
node_id=self.node.id,
uuid=uuidutils.generate_uuid(),
address=address_template % id_)
target_address = address_template % 1
data = self.get_json('/ports?address=%s' % target_address)
self.assertThat(data['ports'], HasLength(1))
self.assertEqual(target_address, data['ports'][0]['address'])
def test_port_by_address_non_existent_address(self):
# non-existent address
data = self.get_json('/ports?address=%s' % 'aa:bb:cc:dd:ee:ff')
self.assertThat(data['ports'], HasLength(0))
def test_port_by_address_invalid_address_format(self):
obj_utils.create_test_port(self.context, node_id=self.node.id)
invalid_address = 'invalid-mac-format'
response = self.get_json('/ports?address=%s' % invalid_address,
expect_errors=True)
self.assertEqual(400, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertIn(invalid_address, response.json['error_message'])
def test_sort_key(self):
ports = []
for id_ in range(3):
port = obj_utils.create_test_port(self.context,
node_id=self.node.id,
uuid=uuidutils.generate_uuid(),
address='52:54:00:cf:2d:3%s' % id_)
ports.append(port.uuid)
data = self.get_json('/ports?sort_key=uuid')
uuids = [n['uuid'] for n in data['ports']]
self.assertEqual(sorted(ports), uuids)
def test_sort_key_invalid(self):
invalid_key = 'foo'
response = self.get_json('/ports?sort_key=%s' % invalid_key,
expect_errors=True)
self.assertEqual(400, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertIn(invalid_key, response.json['error_message'])
@mock.patch.object(api_utils, 'get_rpc_node')
def test_get_all_by_node_name_ok(self, mock_get_rpc_node):
# GET /v1/ports specifying node_name - success
mock_get_rpc_node.return_value = self.node
for i in range(5):
if i < 3:
node_id = self.node.id
else:
node_id = 100000 + i
obj_utils.create_test_port(self.context,
node_id=node_id,
uuid=uuidutils.generate_uuid(),
address='52:54:00:cf:2d:3%s' % i)
data = self.get_json("/ports?node=%s" % 'test-node',
headers={api_controller.Version.string: '1.5'})
self.assertEqual(3, len(data['ports']))
@mock.patch.object(api_utils, 'get_rpc_node')
def test_get_all_by_node_uuid_and_name(self, mock_get_rpc_node):
# GET /v1/ports specifying node and uuid - should only use node_uuid
mock_get_rpc_node.return_value = self.node
obj_utils.create_test_port(self.context, node_id=self.node.id)
self.get_json('/ports/detail?node_uuid=%s&node=%s' %
(self.node.uuid, 'node-name'))
mock_get_rpc_node.assert_called_once_with(self.node.uuid)
@mock.patch.object(api_utils, 'get_rpc_node')
def test_get_all_by_node_name_not_supported(self, mock_get_rpc_node):
# GET /v1/ports specifying node_name - name not supported
mock_get_rpc_node.side_effect = exception.InvalidUuidOrName(
name=self.node.uuid)
for i in range(3):
obj_utils.create_test_port(self.context,
node_id=self.node.id,
uuid=uuidutils.generate_uuid(),
address='52:54:00:cf:2d:3%s' % i)
data = self.get_json("/ports?node=%s" % 'test-node',
expect_errors=True)
self.assertEqual(0, mock_get_rpc_node.call_count)
self.assertEqual(406, data.status_int)
@mock.patch.object(api_utils, 'get_rpc_node')
def test_detail_by_node_name_ok(self, mock_get_rpc_node):
# GET /v1/ports/detail specifying node_name - success
mock_get_rpc_node.return_value = self.node
port = obj_utils.create_test_port(self.context, node_id=self.node.id)
data = self.get_json('/ports/detail?node=%s' % 'test-node',
headers={api_controller.Version.string: '1.5'})
self.assertEqual(port.uuid, data['ports'][0]['uuid'])
self.assertEqual(self.node.uuid, data['ports'][0]['node_uuid'])
@mock.patch.object(api_utils, 'get_rpc_node')
def test_detail_by_node_name_not_supported(self, mock_get_rpc_node):
# GET /v1/ports/detail specifying node_name - name not supported
mock_get_rpc_node.side_effect = exception.InvalidUuidOrName(
name=self.node.uuid)
obj_utils.create_test_port(self.context, node_id=self.node.id)
data = self.get_json('/ports/detail?node=%s' % 'test-node',
expect_errors=True)
self.assertEqual(0, mock_get_rpc_node.call_count)
self.assertEqual(406, data.status_int)
@mock.patch.object(api_port.PortsController, '_get_ports_collection')
def test_detail_with_incorrect_api_usage(self, mock_gpc):
# GET /v1/ports/detail specifying node and node_uuid. In this case
# we expect the node_uuid interface to be used.
self.get_json('/ports/detail?node=%s&node_uuid=%s' %
('test-node', self.node.uuid))
mock_gpc.assert_called_once_with(self.node.uuid, mock.ANY, mock.ANY,
mock.ANY, mock.ANY, mock.ANY,
mock.ANY, mock.ANY)
@mock.patch.object(rpcapi.ConductorAPI, 'update_port')
class TestPatch(api_base.FunctionalTest):
def setUp(self):
super(TestPatch, self).setUp()
self.node = obj_utils.create_test_node(self.context)
self.port = obj_utils.create_test_port(self.context,
node_id=self.node.id)
p = mock.patch.object(rpcapi.ConductorAPI, 'get_topic_for')
self.mock_gtf = p.start()
self.mock_gtf.return_value = 'test-topic'
self.addCleanup(p.stop)
def test_update_byid(self, mock_upd):
extra = {'foo': 'bar'}
mock_upd.return_value = self.port
mock_upd.return_value.extra = extra
response = self.patch_json('/ports/%s' % self.port.uuid,
[{'path': '/extra/foo',
'value': 'bar',
'op': 'add'}])
self.assertEqual('application/json', response.content_type)
self.assertEqual(200, response.status_code)
self.assertEqual(extra, response.json['extra'])
kargs = mock_upd.call_args[0][1]
self.assertEqual(extra, kargs.extra)
def test_update_byaddress_not_allowed(self, mock_upd):
extra = {'foo': 'bar'}
mock_upd.return_value = self.port
mock_upd.return_value.extra = extra
response = self.patch_json('/ports/%s' % self.port.address,
[{'path': '/extra/foo',
'value': 'bar',
'op': 'add'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_int)
self.assertIn(self.port.address, response.json['error_message'])
self.assertFalse(mock_upd.called)
def test_update_not_found(self, mock_upd):
uuid = uuidutils.generate_uuid()
response = self.patch_json('/ports/%s' % uuid,
[{'path': '/extra/foo',
'value': 'bar',
'op': 'add'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(404, response.status_int)
self.assertTrue(response.json['error_message'])
self.assertFalse(mock_upd.called)
def test_replace_singular(self, mock_upd):
address = 'aa:bb:cc:dd:ee:ff'
mock_upd.return_value = self.port
mock_upd.return_value.address = address
response = self.patch_json('/ports/%s' % self.port.uuid,
[{'path': '/address',
'value': address,
'op': 'replace'}])
self.assertEqual('application/json', response.content_type)
self.assertEqual(200, response.status_code)
self.assertEqual(address, response.json['address'])
self.assertTrue(mock_upd.called)
kargs = mock_upd.call_args[0][1]
self.assertEqual(address, kargs.address)
def test_replace_address_already_exist(self, mock_upd):
address = 'aa:aa:aa:aa:aa:aa'
mock_upd.side_effect = exception.MACAlreadyExists(mac=address)
response = self.patch_json('/ports/%s' % self.port.uuid,
[{'path': '/address',
'value': address,
'op': 'replace'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(409, response.status_code)
self.assertTrue(response.json['error_message'])
self.assertTrue(mock_upd.called)
kargs = mock_upd.call_args[0][1]
self.assertEqual(address, kargs.address)
def test_replace_node_uuid(self, mock_upd):
mock_upd.return_value = self.port
response = self.patch_json('/ports/%s' % self.port.uuid,
[{'path': '/node_uuid',
'value': self.node.uuid,
'op': 'replace'}])
self.assertEqual('application/json', response.content_type)
self.assertEqual(200, response.status_code)
def test_add_node_uuid(self, mock_upd):
mock_upd.return_value = self.port
response = self.patch_json('/ports/%s' % self.port.uuid,
[{'path': '/node_uuid',
'value': self.node.uuid,
'op': 'add'}])
self.assertEqual('application/json', response.content_type)
self.assertEqual(200, response.status_code)
def test_add_node_id(self, mock_upd):
response = self.patch_json('/ports/%s' % self.port.uuid,
[{'path': '/node_id',
'value': '1',
'op': 'add'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_code)
self.assertFalse(mock_upd.called)
def test_replace_node_id(self, mock_upd):
response = self.patch_json('/ports/%s' % self.port.uuid,
[{'path': '/node_id',
'value': '1',
'op': 'replace'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_code)
self.assertFalse(mock_upd.called)
def test_remove_node_id(self, mock_upd):
response = self.patch_json('/ports/%s' % self.port.uuid,
[{'path': '/node_id',
'op': 'remove'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_code)
self.assertFalse(mock_upd.called)
def test_replace_non_existent_node_uuid(self, mock_upd):
node_uuid = '12506333-a81c-4d59-9987-889ed5f8687b'
response = self.patch_json('/ports/%s' % self.port.uuid,
[{'path': '/node_uuid',
'value': node_uuid,
'op': 'replace'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_code)
self.assertIn(node_uuid, response.json['error_message'])
self.assertFalse(mock_upd.called)
def test_replace_multi(self, mock_upd):
extra = {"foo1": "bar1", "foo2": "bar2", "foo3": "bar3"}
self.port.extra = extra
self.port.save()
# mutate extra so we replace all of them
extra = dict((k, extra[k] + 'x') for k in extra.keys())
patch = []
for k in extra.keys():
patch.append({'path': '/extra/%s' % k,
'value': extra[k],
'op': 'replace'})
mock_upd.return_value = self.port
mock_upd.return_value.extra = extra
response = self.patch_json('/ports/%s' % self.port.uuid,
patch)
self.assertEqual('application/json', response.content_type)
self.assertEqual(200, response.status_code)
self.assertEqual(extra, response.json['extra'])
kargs = mock_upd.call_args[0][1]
self.assertEqual(extra, kargs.extra)
def test_remove_multi(self, mock_upd):
extra = {"foo1": "bar1", "foo2": "bar2", "foo3": "bar3"}
self.port.extra = extra
self.port.save()
# Removing one item from the collection
extra.pop('foo1')
mock_upd.return_value = self.port
mock_upd.return_value.extra = extra
response = self.patch_json('/ports/%s' % self.port.uuid,
[{'path': '/extra/foo1',
'op': 'remove'}])
self.assertEqual('application/json', response.content_type)
self.assertEqual(200, response.status_code)
self.assertEqual(extra, response.json['extra'])
kargs = mock_upd.call_args[0][1]
self.assertEqual(extra, kargs.extra)
# Removing the collection
extra = {}
mock_upd.return_value.extra = extra
response = self.patch_json('/ports/%s' % self.port.uuid,
[{'path': '/extra', 'op': 'remove'}])
self.assertEqual('application/json', response.content_type)
self.assertEqual(200, response.status_code)
self.assertEqual({}, response.json['extra'])
kargs = mock_upd.call_args[0][1]
self.assertEqual(extra, kargs.extra)
# Assert nothing else was changed
self.assertEqual(self.port.uuid, response.json['uuid'])
self.assertEqual(self.port.address, response.json['address'])
def test_remove_non_existent_property_fail(self, mock_upd):
response = self.patch_json('/ports/%s' % self.port.uuid,
[{'path': '/extra/non-existent',
'op': 'remove'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_code)
self.assertTrue(response.json['error_message'])
self.assertFalse(mock_upd.called)
def test_remove_mandatory_field(self, mock_upd):
response = self.patch_json('/ports/%s' % self.port.uuid,
[{'path': '/address',
'op': 'remove'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_code)
self.assertTrue(response.json['error_message'])
self.assertFalse(mock_upd.called)
def test_add_root(self, mock_upd):
address = 'aa:bb:cc:dd:ee:ff'
mock_upd.return_value = self.port
mock_upd.return_value.address = address
response = self.patch_json('/ports/%s' % self.port.uuid,
[{'path': '/address',
'value': address,
'op': 'add'}])
self.assertEqual('application/json', response.content_type)
self.assertEqual(200, response.status_code)
self.assertEqual(address, response.json['address'])
self.assertTrue(mock_upd.called)
kargs = mock_upd.call_args[0][1]
self.assertEqual(address, kargs.address)
def test_add_root_non_existent(self, mock_upd):
response = self.patch_json('/ports/%s' % self.port.uuid,
[{'path': '/foo',
'value': 'bar',
'op': 'add'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_int)
self.assertTrue(response.json['error_message'])
self.assertFalse(mock_upd.called)
def test_add_multi(self, mock_upd):
extra = {"foo1": "bar1", "foo2": "bar2", "foo3": "bar3"}
patch = []
for k in extra.keys():
patch.append({'path': '/extra/%s' % k,
'value': extra[k],
'op': 'add'})
mock_upd.return_value = self.port
mock_upd.return_value.extra = extra
response = self.patch_json('/ports/%s' % self.port.uuid,
patch)
self.assertEqual('application/json', response.content_type)
self.assertEqual(200, response.status_code)
self.assertEqual(extra, response.json['extra'])
kargs = mock_upd.call_args[0][1]
self.assertEqual(extra, kargs.extra)
def test_remove_uuid(self, mock_upd):
response = self.patch_json('/ports/%s' % self.port.uuid,
[{'path': '/uuid',
'op': 'remove'}],
expect_errors=True)
self.assertEqual(400, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
self.assertFalse(mock_upd.called)
def test_update_address_invalid_format(self, mock_upd):
response = self.patch_json('/ports/%s' % self.port.uuid,
[{'path': '/address',
'value': 'invalid-format',
'op': 'replace'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_int)
self.assertTrue(response.json['error_message'])
self.assertFalse(mock_upd.called)
def test_update_port_address_normalized(self, mock_upd):
address = 'AA:BB:CC:DD:EE:FF'
mock_upd.return_value = self.port
mock_upd.return_value.address = address.lower()
response = self.patch_json('/ports/%s' % self.port.uuid,
[{'path': '/address',
'value': address,
'op': 'replace'}])
self.assertEqual('application/json', response.content_type)
self.assertEqual(200, response.status_code)
self.assertEqual(address.lower(), response.json['address'])
kargs = mock_upd.call_args[0][1]
self.assertEqual(address.lower(), kargs.address)
class TestPost(api_base.FunctionalTest):
def setUp(self):
super(TestPost, self).setUp()
self.node = obj_utils.create_test_node(self.context)
@mock.patch.object(timeutils, 'utcnow')
def test_create_port(self, mock_utcnow):
pdict = post_get_test_port()
test_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = test_time
response = self.post_json('/ports', pdict)
self.assertEqual(201, response.status_int)
result = self.get_json('/ports/%s' % pdict['uuid'])
self.assertEqual(pdict['uuid'], result['uuid'])
self.assertFalse(result['updated_at'])
return_created_at = timeutils.parse_isotime(
result['created_at']).replace(tzinfo=None)
self.assertEqual(test_time, return_created_at)
# Check location header
self.assertIsNotNone(response.location)
expected_location = '/v1/ports/%s' % pdict['uuid']
self.assertEqual(urlparse.urlparse(response.location).path,
expected_location)
def test_create_port_doesnt_contain_id(self):
with mock.patch.object(self.dbapi, 'create_port',
wraps=self.dbapi.create_port) as cp_mock:
pdict = post_get_test_port(extra={'foo': 123})
self.post_json('/ports', pdict)
result = self.get_json('/ports/%s' % pdict['uuid'])
self.assertEqual(pdict['extra'], result['extra'])
cp_mock.assert_called_once_with(mock.ANY)
# Check that 'id' is not in first arg of positional args
self.assertNotIn('id', cp_mock.call_args[0][0])
def test_create_port_generate_uuid(self):
pdict = post_get_test_port()
del pdict['uuid']
response = self.post_json('/ports', pdict)
result = self.get_json('/ports/%s' % response.json['uuid'])
self.assertEqual(pdict['address'], result['address'])
self.assertTrue(uuidutils.is_uuid_like(result['uuid']))
def test_create_port_valid_extra(self):
pdict = post_get_test_port(extra={'str': 'foo', 'int': 123,
'float': 0.1, 'bool': True,
'list': [1, 2], 'none': None,
'dict': {'cat': 'meow'}})
self.post_json('/ports', pdict)
result = self.get_json('/ports/%s' % pdict['uuid'])
self.assertEqual(pdict['extra'], result['extra'])
def test_create_port_no_mandatory_field_address(self):
pdict = post_get_test_port()
del pdict['address']
response = self.post_json('/ports', pdict, expect_errors=True)
self.assertEqual(400, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
def test_create_port_no_mandatory_field_node_uuid(self):
pdict = post_get_test_port()
del pdict['node_uuid']
response = self.post_json('/ports', pdict, expect_errors=True)
self.assertEqual(400, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
def test_create_port_invalid_addr_format(self):
pdict = post_get_test_port(address='invalid-format')
response = self.post_json('/ports', pdict, expect_errors=True)
self.assertEqual(400, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
def test_create_port_address_normalized(self):
address = 'AA:BB:CC:DD:EE:FF'
pdict = post_get_test_port(address=address)
self.post_json('/ports', pdict)
result = self.get_json('/ports/%s' % pdict['uuid'])
self.assertEqual(address.lower(), result['address'])
def test_create_port_with_hyphens_delimiter(self):
pdict = post_get_test_port()
colonsMAC = pdict['address']
hyphensMAC = colonsMAC.replace(':', '-')
pdict['address'] = hyphensMAC
response = self.post_json('/ports', pdict, expect_errors=True)
self.assertEqual(400, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
def test_create_port_invalid_node_uuid_format(self):
pdict = post_get_test_port(node_uuid='invalid-format')
response = self.post_json('/ports', pdict, expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_int)
self.assertTrue(response.json['error_message'])
def test_node_uuid_to_node_id_mapping(self):
pdict = post_get_test_port(node_uuid=self.node['uuid'])
self.post_json('/ports', pdict)
# GET doesn't return the node_id it's an internal value
port = self.dbapi.get_port_by_uuid(pdict['uuid'])
self.assertEqual(self.node['id'], port.node_id)
def test_create_port_node_uuid_not_found(self):
pdict = post_get_test_port(
node_uuid='1a1a1a1a-2b2b-3c3c-4d4d-5e5e5e5e5e5e')
response = self.post_json('/ports', pdict, expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_int)
self.assertTrue(response.json['error_message'])
def test_create_port_address_already_exist(self):
address = 'AA:AA:AA:11:22:33'
pdict = post_get_test_port(address=address)
self.post_json('/ports', pdict)
pdict['uuid'] = uuidutils.generate_uuid()
response = self.post_json('/ports', pdict, expect_errors=True)
self.assertEqual(409, response.status_int)
self.assertEqual('application/json', response.content_type)
error_msg = response.json['error_message']
self.assertTrue(error_msg)
self.assertIn(address, error_msg.upper())
@mock.patch.object(rpcapi.ConductorAPI, 'destroy_port')
class TestDelete(api_base.FunctionalTest):
def setUp(self):
super(TestDelete, self).setUp()
self.node = obj_utils.create_test_node(self.context)
self.port = obj_utils.create_test_port(self.context,
node_id=self.node.id)
gtf = mock.patch.object(rpcapi.ConductorAPI, 'get_topic_for')
self.mock_gtf = gtf.start()
self.mock_gtf.return_value = 'test-topic'
self.addCleanup(gtf.stop)
def test_delete_port_byaddress(self, mock_dpt):
response = self.delete('/ports/%s' % self.port.address,
expect_errors=True)
self.assertEqual(400, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertIn(self.port.address, response.json['error_message'])
def test_delete_port_byid(self, mock_dpt):
self.delete('/ports/%s' % self.port.uuid,
expect_errors=True)
self.assertTrue(mock_dpt.called)
def test_delete_port_node_locked(self, mock_dpt):
self.node.reserve(self.context, 'fake', self.node.uuid)
mock_dpt.side_effect = exception.NodeLocked(node='fake-node',
host='fake-host')
ret = self.delete('/ports/%s' % self.port.uuid, expect_errors=True)
self.assertEqual(409, ret.status_code)
self.assertTrue(ret.json['error_message'])
self.assertTrue(mock_dpt.called)
|
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""An extensible HTML table reader and writer.
html.py:
Classes to read and write HTML tables
`BeautifulSoup <http://www.crummy.com/software/BeautifulSoup/>`_
must be installed to read HTML tables.
"""
from __future__ import absolute_import, division, print_function
import warnings
import numpy
from ...extern import six
from ...extern.six.moves import zip, range
from . import core
from ...table import Column
from ...utils.xml import writer
from copy import deepcopy
class SoupString(str):
"""
Allows for strings to hold BeautifulSoup data.
"""
def __new__(cls, *args, **kwargs):
return str.__new__(cls, *args, **kwargs)
def __init__(self, val):
self.soup = val
class ListWriter:
"""
Allows for XMLWriter to write to a list instead of a file.
"""
def __init__(self, out):
self.out = out
def write(self, data):
self.out.append(data)
def identify_table(soup, htmldict, numtable):
"""
Checks whether the given BeautifulSoup tag is the table
the user intends to process.
"""
if soup is None or soup.name != 'table':
return False # Tag is not a <table>
elif 'table_id' not in htmldict:
return numtable == 1
table_id = htmldict['table_id']
if isinstance(table_id, six.string_types):
return 'id' in soup.attrs and soup['id'] == table_id
elif isinstance(table_id, int):
return table_id == numtable
# Return False if an invalid parameter is given
return False
class HTMLInputter(core.BaseInputter):
"""
Input lines of HTML in a valid form.
This requires `BeautifulSoup
<http://www.crummy.com/software/BeautifulSoup/>`_ to be installed.
"""
def process_lines(self, lines):
"""
Convert the given input into a list of SoupString rows
for further processing.
"""
try:
from bs4 import BeautifulSoup
except ImportError:
raise core.OptionalTableImportError('BeautifulSoup must be '
'installed to read HTML tables')
if 'parser' not in self.html:
with warnings.catch_warnings():
# Ignore bs4 parser warning #4550.
warnings.filterwarnings('ignore', '.*no parser was explicitly specified.*')
soup = BeautifulSoup('\n'.join(lines))
else: # use a custom backend parser
soup = BeautifulSoup('\n'.join(lines), self.html['parser'])
tables = soup.find_all('table')
for i, possible_table in enumerate(tables):
if identify_table(possible_table, self.html, i + 1):
table = possible_table # Find the correct table
break
else:
if isinstance(self.html['table_id'], int):
err_descr = 'number {0}'.format(self.html['table_id'])
else:
err_descr = "id '{0}'".format(self.html['table_id'])
raise core.InconsistentTableError(
'ERROR: HTML table {0} not found'.format(err_descr))
# Get all table rows
soup_list = [SoupString(x) for x in table.find_all('tr')]
return soup_list
class HTMLSplitter(core.BaseSplitter):
"""
Split HTML table data.
"""
def __call__(self, lines):
"""
Return HTML data from lines as a generator.
"""
for line in lines:
if not isinstance(line, SoupString):
raise TypeError('HTML lines should be of type SoupString')
soup = line.soup
header_elements = soup.find_all('th')
if header_elements:
# Return multicolumns as tuples for HTMLHeader handling
yield [(el.text.strip(), el['colspan']) if el.has_attr('colspan')
else el.text.strip() for el in header_elements]
data_elements = soup.find_all('td')
if data_elements:
yield [el.text.strip() for el in data_elements]
if len(lines) == 0:
raise core.InconsistentTableError('HTML tables must contain data '
'in a <table> tag')
class HTMLOutputter(core.TableOutputter):
"""
Output the HTML data as an ``astropy.table.Table`` object.
This subclass allows for the final table to contain
multidimensional columns (defined using the colspan attribute
of <th>).
"""
default_converters = [core.convert_numpy(numpy.int),
core.convert_numpy(numpy.float),
core.convert_numpy(numpy.str),
core.convert_numpy(numpy.unicode)]
def __call__(self, cols, meta):
"""
Process the data in multidimensional columns.
"""
new_cols = []
col_num = 0
while col_num < len(cols):
col = cols[col_num]
if hasattr(col, 'colspan'):
# Join elements of spanned columns together into list of tuples
span_cols = cols[col_num:col_num + col.colspan]
new_col = core.Column(col.name)
new_col.str_vals = list(zip(*[x.str_vals for x in span_cols]))
new_cols.append(new_col)
col_num += col.colspan
else:
new_cols.append(col)
col_num += 1
return super(HTMLOutputter, self).__call__(new_cols, meta)
class HTMLHeader(core.BaseHeader):
splitter_class = HTMLSplitter
def start_line(self, lines):
"""
Return the line number at which header data begins.
"""
for i, line in enumerate(lines):
if not isinstance(line, SoupString):
raise TypeError('HTML lines should be of type SoupString')
soup = line.soup
if soup.th is not None:
return i
return None
def _set_cols_from_names(self):
"""
Set columns from header names, handling multicolumns appropriately.
"""
self.cols = []
new_names = []
for name in self.names:
if isinstance(name, tuple):
col = core.Column(name=name[0])
col.colspan = int(name[1])
self.cols.append(col)
new_names.append(name[0])
for i in range(1, int(name[1])):
# Add dummy columns
self.cols.append(core.Column(''))
new_names.append('')
else:
self.cols.append(core.Column(name=name))
new_names.append(name)
self.names = new_names
class HTMLData(core.BaseData):
splitter_class = HTMLSplitter
def start_line(self, lines):
"""
Return the line number at which table data begins.
"""
for i, line in enumerate(lines):
if not isinstance(line, SoupString):
raise TypeError('HTML lines should be of type SoupString')
soup = line.soup
if soup.td is not None:
if soup.th is not None:
raise core.InconsistentTableError('HTML tables cannot '
'have headings and data in the same row')
return i
raise core.InconsistentTableError('No start line found for HTML data')
def end_line(self, lines):
"""
Return the line number at which table data ends.
"""
last_index = -1
for i, line in enumerate(lines):
if not isinstance(line, SoupString):
raise TypeError('HTML lines should be of type SoupString')
soup = line.soup
if soup.td is not None:
last_index = i
if last_index == -1:
return None
return last_index + 1
class HTML(core.BaseReader):
"""Read and write HTML tables.
In order to customize input and output, a dict of parameters may
be passed to this class holding specific customizations.
**htmldict** : Dictionary of parameters for HTML input/output.
* css : Customized styling
If present, this parameter will be included in a <style>
tag and will define stylistic attributes of the output.
* table_id : ID for the input table
If a string, this defines the HTML id of the table to be processed.
If an integer, this specifies the index of the input table in the
available tables. Unless this parameter is given, the reader will
use the first table found in the input file.
* multicol : Use multi-dimensional columns for output
The writer will output tuples as elements of multi-dimensional
columns if this parameter is true, and if not then it will
use the syntax 1.36583e-13 .. 1.36583e-13 for output. If not
present, this parameter will be true by default.
* raw_html_cols : column name or list of names with raw HTML content
This allows one to include raw HTML content in the column output,
for instance to include link references in a table. This option
requires that the bleach package be installed. Only whitelisted
tags are allowed through for security reasons (see the
raw_html_clean_kwargs arg).
* raw_html_clean_kwargs : dict of keyword args controlling HTML cleaning
Raw HTML will be cleaned to prevent unsafe HTML from ending up in
the table output. This is done by calling ``bleach.clean(data,
**raw_html_clean_kwargs)``. For details on the available options
(e.g. tag whitelist) see:
http://bleach.readthedocs.io/en/latest/clean.html
* parser : Specific HTML parsing library to use
If specified, this specifies which HTML parsing library
BeautifulSoup should use as a backend. The options to choose
from are 'html.parser' (the standard library parser), 'lxml'
(the recommended parser), 'xml' (lxml's XML parser), and
'html5lib'. html5lib is a highly lenient parser and therefore
might work correctly for unusual input if a different parser
fails.
* jsfiles : list of js files to include when writing table.
* cssfiles : list of css files to include when writing table.
* js : js script to include in the body when writing table.
* table_class : css class for the table
"""
_format_name = 'html'
_io_registry_format_aliases = ['html']
_io_registry_suffix = '.html'
_description = 'HTML table'
header_class = HTMLHeader
data_class = HTMLData
inputter_class = HTMLInputter
def __init__(self, htmldict={}):
"""
Initialize classes for HTML reading and writing.
"""
super(HTML, self).__init__()
self.html = deepcopy(htmldict)
if 'multicol' not in htmldict:
self.html['multicol'] = True
if 'table_id' not in htmldict:
self.html['table_id'] = 1
self.inputter.html = self.html
def read(self, table):
"""
Read the ``table`` in HTML format and return a resulting ``Table``.
"""
self.outputter = HTMLOutputter()
return core.BaseReader.read(self, table)
def write(self, table):
"""
Return data in ``table`` converted to HTML as a list of strings.
"""
cols = list(six.itervalues(table.columns))
self.data.header.cols = cols
if isinstance(self.data.fill_values, tuple):
self.data.fill_values = [self.data.fill_values]
self.data._set_fill_values(cols)
lines = []
# Set HTML escaping to False for any column in the raw_html_cols input
raw_html_cols = self.html.get('raw_html_cols', [])
if isinstance(raw_html_cols, six.string_types):
raw_html_cols = [raw_html_cols] # Allow for a single string as input
cols_escaped = [col.info.name not in raw_html_cols for col in cols]
# Kwargs that get passed on to bleach.clean() if that is available.
raw_html_clean_kwargs = self.html.get('raw_html_clean_kwargs', {})
# Use XMLWriter to output HTML to lines
w = writer.XMLWriter(ListWriter(lines))
with w.tag('html'):
with w.tag('head'):
# Declare encoding and set CSS style for table
with w.tag('meta', attrib={'charset':'utf-8'}):
pass
with w.tag('meta', attrib={'http-equiv':'Content-type',
'content':'text/html;charset=UTF-8'}):
pass
if 'css' in self.html:
with w.tag('style'):
w.data(self.html['css'])
if 'cssfiles' in self.html:
for filename in self.html['cssfiles']:
with w.tag('link', rel="stylesheet", href=filename, type='text/css'):
pass
if 'jsfiles' in self.html:
for filename in self.html['jsfiles']:
with w.tag('script', src=filename):
w.data('') # need this instead of pass to get <script></script>
with w.tag('body'):
if 'js' in self.html:
with w.xml_cleaning_method('none'):
with w.tag('script'):
w.data(self.html['js'])
if isinstance(self.html['table_id'], six.string_types):
html_table_id = self.html['table_id']
else:
html_table_id = None
if 'table_class' in self.html:
html_table_class = self.html['table_class']
attrib={"class":html_table_class}
else:
attrib={}
with w.tag('table', id=html_table_id, attrib=attrib):
with w.tag('thead'):
with w.tag('tr'):
for col in cols:
if len(col.shape) > 1 and self.html['multicol']:
# Set colspan attribute for multicolumns
w.start('th', colspan=col.shape[1])
else:
w.start('th')
w.data(col.info.name.strip())
w.end(indent=False)
col_str_iters = []
new_cols_escaped = []
for col, col_escaped in zip(cols, cols_escaped):
if len(col.shape) > 1 and self.html['multicol']:
span = col.shape[1]
for i in range(span):
# Split up multicolumns into separate columns
new_col = Column([el[i] for el in col])
new_col_iter_str_vals = self.fill_values(col, new_col.info.iter_str_vals())
col_str_iters.append(new_col_iter_str_vals)
new_cols_escaped.append(col_escaped)
else:
col_iter_str_vals = self.fill_values(col, col.info.iter_str_vals())
col_str_iters.append(col_iter_str_vals)
new_cols_escaped.append(col_escaped)
for row in zip(*col_str_iters):
with w.tag('tr'):
for el, col_escaped in zip(row, new_cols_escaped):
# Potentially disable HTML escaping for column
method = ('escape_xml' if col_escaped else 'bleach_clean')
with w.xml_cleaning_method(method, **raw_html_clean_kwargs):
w.start('td')
w.data(el.strip())
w.end(indent=False)
# Fixes XMLWriter's insertion of unwanted line breaks
return [''.join(lines)]
def fill_values(self, col, col_str_iters):
"""
Return an iterator of the values with replacements based on fill_values
"""
# check if the col is a masked column and has fill values
is_masked_column = hasattr(col, 'mask')
has_fill_values = hasattr(col, 'fill_values')
for idx, col_str in enumerate(col_str_iters):
if is_masked_column and has_fill_values:
if col.mask[idx]:
yield col.fill_values[core.masked]
continue
if has_fill_values:
if col_str in col.fill_values:
yield col.fill_values[col_str]
continue
yield col_str
|
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import io
from unittest import TestCase, main
from skbio import Protein, DNA, RNA, Sequence
from skbio.metadata import IntervalMetadata
from skbio.util import get_data_path
from skbio.io import GenBankFormatError
from skbio.io.format.genbank import (
_genbank_sniffer,
_genbank_to_generator, _genbank_to_sequence,
_genbank_to_dna, _genbank_to_rna, _genbank_to_protein,
_parse_locus, _parse_reference,
_generator_to_genbank, _sequence_to_genbank,
_protein_to_genbank, _rna_to_genbank, _dna_to_genbank,
_serialize_locus)
class SnifferTests(TestCase):
def setUp(self):
self.positive_fps = list(map(get_data_path, [
'genbank_5_blanks_start_of_file',
'genbank_single_record_upper',
'genbank_single_record_lower',
'genbank_multi_records']))
self.negative_fps = list(map(get_data_path, [
'empty',
'whitespace_only',
'genbank_6_blanks_start_of_file',
'genbank_w_beginning_whitespace',
'genbank_missing_locus_name']))
def test_positives(self):
for fp in self.positive_fps:
self.assertEqual(_genbank_sniffer(fp), (True, {}))
def test_negatives(self):
for fp in self.negative_fps:
self.assertEqual(_genbank_sniffer(fp), (False, {}))
class GenBankIOTests(TestCase):
# parent class to set up test data for the child class
def setUp(self):
# test locus line
self.locus = (
(['LOCUS NC_005816 9609 bp '
'DNA circular CON 07-FEB-2015'],
{'division': 'CON', 'mol_type': 'DNA', 'shape': 'circular',
'locus_name': 'NC_005816', 'date': '07-FEB-2015',
'unit': 'bp', 'size': 9609}),
(['LOCUS SCU49845 5028 bp '
'DNA PLN 21-JUN-1999'],
{'division': 'PLN', 'mol_type': 'DNA', 'shape': None,
'locus_name': 'SCU49845', 'date': '21-JUN-1999',
'unit': 'bp', 'size': 5028}),
(['LOCUS NP_001832 360 aa '
'linear PRI 18-DEC-2001'],
{'division': 'PRI', 'mol_type': None, 'shape': 'linear',
'locus_name': 'NP_001832', 'date': '18-DEC-2001',
'unit': 'aa', 'size': 360}))
# test single record and read uppercase sequence
self.single_upper_fp = get_data_path('genbank_single_record_upper')
self.single_lower_fp = get_data_path('genbank_single_record_lower')
self.single = (
'GSREILDFK',
{'LOCUS': {'date': '23-SEP-1994',
'division': 'BCT',
'locus_name': 'AAB29917',
'mol_type': None,
'shape': 'linear',
'size': 9,
'unit': 'aa'}},
None,
Protein)
self.single_rna_fp = get_data_path('genbank_single_record')
imd = IntervalMetadata(63)
imd.add([(0, 63)],
[(False, False)],
{'db_xref': '"taxon:562"',
'mol_type': '"mRNA"',
'organism': '"Escherichia coli"',
'type': 'source',
'strand': '+',
'__location': '1..63'})
imd.add([(0, 63)],
[(False, True)],
{'phase': 0,
'db_xref': ['"taxon:562"', '"taxon:561"'],
'__location': '1..>63',
'strand': '+',
'note': '"alkaline phosphatase signal peptide"',
'protein_id': '"AAA23431.1"',
'transl_table': '11',
'translation': '"MKQSTIALAVLPLLFTPVTKA"',
'type': 'CDS'})
self.single_rna = (
'gugaaacaaagcacuauugcacuggcugucuuaccguuacuguuuaccccugugacaaaagcc',
{'ACCESSION': 'M14399',
'COMMENT': 'Original source text: E.coli, cDNA to mRNA.',
'DEFINITION': "alkaline phosphatase signal mRNA, 5' end.",
'KEYWORDS': 'alkaline phosphatase; signal peptide.',
'LOCUS': {'date': '26-APR-1993',
'division': 'BCT',
'locus_name': 'ECOALKP',
'mol_type': 'mRNA',
'shape': 'linear',
'size': 63,
'unit': 'bp'},
'SOURCE': {'ORGANISM': 'Escherichia coli',
'taxonomy': 'Bacteria; Proteobacteria; '
'Gammaproteobacteria; Enterobacteriales; '
'Enterobacteriaceae; Escherichia.'},
'VERSION': 'M14399.1'},
imd,
RNA)
# test:
# 1. multiple records in one file
# 2. lowercase sequence
# 3. DNA, RNA, Protein type
# 4. variation of formats
self.multi_fp = get_data_path('genbank_multi_records')
imd_pro = IntervalMetadata(9)
imd_pro.add([(0, 9)], [(False, False)],
{'organism': '"Bacteria"',
'type': 'source',
'strand': '+',
'__location': '1..9'},)
imd_pro.add([(0, 9)], [(False, True)],
{'__location': '1..>9',
'product': '"L-carnitine amidase"',
'strand': '+',
'type': 'Protein'})
imd_dna = IntervalMetadata(9)
imd_dna.add([(0, 9)], [(False, False)],
{'country': '"Brazil: Parana, Paranavai"',
'type': 'source',
'strand': '+',
'__location': '1..9',
'environmental_sample': ''})
imd_dna.add([(1, 8)], [(True, True)],
{'__location': 'complement(<2..>8)',
'product': '"16S ribosomal RNA"',
'strand': '-',
'type': 'rRNA'})
self.multi = (
('gsreildfk',
{'ACCESSION': 'AAB29917',
'COMMENT': 'Method: direct peptide sequencing.',
'DBSOURCE': 'accession AAB29917.1',
'DEFINITION': 'L-carnitine amidase {N-terminal}',
'KEYWORDS': '.',
'LOCUS': {'date': '23-SEP-1994',
'division': 'BCT',
'locus_name': 'AAB29917',
'mol_type': None,
'shape': 'linear',
'size': 9,
'unit': 'aa'},
'REFERENCE': [{'AUTHORS': 'Joeres,U. and Kula,M.R.',
'JOURNAL': 'AMB 40 (5), 606-610 (1994)',
'PUBMED': '7764422',
'REFERENCE': '1 (residues 1 to 9)',
'REMARK': 'from the original journal article.',
'TITLE': 'a microbial L-carnitine amidase'},
{'AUTHORS': 'Joeres,U. and Kula,M.R.',
'JOURNAL': 'AMB 40 (5), 606-610 (1994)',
'PUBMED': '7764422',
'REFERENCE': '1 (residues 1 to 9)',
'TITLE': 'a microbial L-carnitine amidase'}],
'SOURCE': {'ORGANISM': 'Bacteria',
'taxonomy': 'Unclassified.'},
'VERSION': 'AAB29917.1 GI:545426'},
imd_pro,
Protein),
('catgcaggc',
{'ACCESSION': 'HQ018078',
'DEFINITION': 'Uncultured Xylanimonas sp.16S, partial',
'KEYWORDS': 'ENV.',
'LOCUS': {'date': '29-AUG-2010',
'division': 'ENV',
'locus_name': 'HQ018078',
'mol_type': 'DNA',
'shape': 'linear',
'size': 9,
'unit': 'bp'},
'SOURCE': {'ORGANISM': 'uncultured Xylanimonas sp.',
'taxonomy': 'Bacteria; Actinobacteria; '
'Micrococcales; Promicromonosporaceae; '
'Xylanimonas; environmental samples.'},
'VERSION': 'HQ018078.1 GI:304421728'},
imd_dna,
DNA))
class ReaderTests(GenBankIOTests):
def test_parse_reference(self):
lines = '''
REFERENCE 1 (bases 1 to 154478)
AUTHORS Sato,S., Nakamura,Y., Kaneko,T., and Tabata,S.
TITLE Complete structure of the chloroplast genome of
Arabidopsis thaliana
JOURNAL DNA Res. 6 (5), 283-290 (1999)
PUBMED 10574454'''.split('\n')
exp = {'AUTHORS': 'Sato,S., Nakamura,Y., Kaneko,T., and Tabata,S.',
'JOURNAL': 'DNA Res. 6 (5), 283-290 (1999)',
'PUBMED': '10574454',
'REFERENCE': '1 (bases 1 to 154478)',
'TITLE': ('Complete structure of the chloroplast genome of'
' Arabidopsis thaliana')}
self.assertEqual(_parse_reference(lines), exp)
def test_parse_locus(self):
for serialized, parsed in self.locus:
self.assertEqual(_parse_locus(serialized), parsed)
def test_parse_locus_invalid(self):
lines = [
# missing unit
['LOCUS NC_005816 9609 '
' DNA circular CON 07-FEB-2015'],
# missing division
['LOCUS SCU49845 5028 bp'
' DNA 21-JUN-1999'],
# wrong date format
['LOCUS NP_001832 360 aa'
' linear PRI 2001-12-18']]
for line in lines:
with self.assertRaisesRegex(GenBankFormatError,
r'Could not parse the LOCUS line:.*'):
_parse_locus(line)
def test_genbank_to_generator_single(self):
# test single record and uppercase sequence
for c in [Sequence, Protein]:
obs = next(_genbank_to_generator(
self.single_upper_fp, constructor=c))
exp = c(self.single[0], metadata=self.single[1],
positional_metadata=self.single[2])
self.assertEqual(exp, obs)
def test_genbank_to_generator(self):
for i, obs in enumerate(_genbank_to_generator(self.multi_fp)):
seq, md, imd, constructor = self.multi[i]
exp = constructor(seq, metadata=md, lowercase=True,
interval_metadata=imd)
self.assertEqual(exp, obs)
def test_genbank_to_sequence(self):
for i, exp in enumerate(self.multi):
obs = _genbank_to_sequence(self.multi_fp, seq_num=i+1)
exp = Sequence(exp[0], metadata=exp[1], lowercase=True,
interval_metadata=exp[2])
self.assertEqual(exp, obs)
def test_genbank_to_rna(self):
seq, md, imd, constructor = self.single_rna
obs = _genbank_to_rna(self.single_rna_fp)
exp = constructor(seq, metadata=md,
lowercase=True, interval_metadata=imd)
self.assertEqual(exp, obs)
def test_genbank_to_dna(self):
i = 1
exp = self.multi[i]
obs = _genbank_to_dna(self.multi_fp, seq_num=i+1)
exp = DNA(exp[0], metadata=exp[1], lowercase=True,
interval_metadata=exp[2])
self.assertEqual(exp, obs)
def test_genbank_to_protein(self):
i = 0
exp = self.multi[i]
obs = _genbank_to_protein(self.multi_fp, seq_num=i+1)
exp = Protein(exp[0], metadata=exp[1],
lowercase=True, interval_metadata=exp[2])
self.assertEqual(exp, obs)
class WriterTests(GenBankIOTests):
def test_serialize_locus(self):
for serialized, parsed in self.locus:
self.assertEqual(
_serialize_locus('LOCUS', parsed), serialized[0] + '\n')
def test_generator_to_genbank(self):
seq, md, imd, constructor = self.single
obj = constructor(seq, md, interval_metadata=imd)
with io.StringIO() as fh:
_generator_to_genbank([obj], fh)
obs = fh.getvalue()
with open(self.single_lower_fp) as fh:
exp = fh.read()
self.assertEqual(obs, exp)
def test_sequence_to_genbank(self):
with io.StringIO() as fh:
for i, (seq, md, imd, constructor) in enumerate(self.multi):
obj = Sequence(seq, md, interval_metadata=imd, lowercase=True)
_sequence_to_genbank(obj, fh)
obs = fh.getvalue()
with open(self.multi_fp) as fh:
exp = fh.read()
self.assertEqual(obs, exp)
def test_dna_protein_to_genbank(self):
writers = [_protein_to_genbank,
_dna_to_genbank]
with io.StringIO() as fh:
for i, (seq, md, imd, constructor) in enumerate(self.multi):
obj = constructor(
seq, md, interval_metadata=imd, lowercase=True)
writers[i](obj, fh)
obs = fh.getvalue()
with open(self.multi_fp) as fh:
exp = fh.read()
self.assertEqual(obs, exp)
def test_rna_to_genbank(self):
with io.StringIO() as fh:
seq, md, imd, constructor = self.single_rna
obj = constructor(seq, md, interval_metadata=imd, lowercase=True)
_rna_to_genbank(obj, fh)
obs = fh.getvalue()
with open(self.single_rna_fp) as fh:
exp = fh.read()
self.assertEqual(obs, exp)
class RoundtripTests(GenBankIOTests):
def test_roundtrip_generator(self):
with io.StringIO() as fh:
_generator_to_genbank(_genbank_to_generator(self.multi_fp), fh)
obs = fh.getvalue()
with open(self.multi_fp) as fh:
exp = fh.read()
self.assertEqual(obs, exp)
def test_roundtrip_rna(self):
with io.StringIO() as fh:
_rna_to_genbank(_genbank_to_rna(self.single_rna_fp), fh)
obs = fh.getvalue()
with open(self.single_rna_fp) as fh:
exp = fh.read()
self.assertEqual(obs, exp)
def test_roundtrip_dna(self):
with io.StringIO() as fh:
_dna_to_genbank(_genbank_to_dna(self.single_rna_fp), fh)
obs = fh.getvalue()
with open(self.single_rna_fp) as fh:
exp = fh.read()
self.assertEqual(obs, exp)
def test_roundtrip_protein(self):
with io.StringIO() as fh:
_protein_to_genbank(_genbank_to_protein(self.single_lower_fp), fh)
obs = fh.getvalue()
with open(self.single_lower_fp) as fh:
exp = fh.read()
self.assertEqual(obs, exp)
def test_roundtrip_sequence(self):
with io.StringIO() as fh:
_sequence_to_genbank(_genbank_to_sequence(self.single_rna_fp), fh)
obs = fh.getvalue()
with open(self.single_rna_fp) as fh:
exp = fh.read()
self.assertEqual(obs, exp)
if __name__ == '__main__':
main()
|
|
#!/usr/bin/env python3
# coding: utf-8
# Copyright 2013 The Font Bakery Authors. All Rights Reserved.
# Copyright 2017 The Google Font Tools Authors
# Copyright 2018 The Font Classification Tool Authors:
# - Felipe C. da S. Sanches
# - Dave Crossland
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Initially authored by Google and contributed by Filip Zembowicz.
# Further improved by Dave Crossland and Felipe Sanches.
#
import os
import sys
import collections
from fonts_public_pb2 import FamilyProto
from constants import (NAMEID_FONT_FAMILY_NAME,
NAMEID_FONT_SUBFAMILY_NAME)
try:
from fontTools.ttLib import TTFont
except:
sys.exit("Needs fontTools.\n\npip3 install fonttools")
try:
from google.protobuf import text_format
except:
sys.exit("Needs protobuf.\n\npip3 install protobuf")
def get_FamilyProto_Message(path):
message = FamilyProto()
text_data = open(path, "rb").read()
text_format.Merge(text_data, message)
return message
# The canonical [to Google Fonts] name comes before any aliases
_KNOWN_WEIGHTS = collections.OrderedDict([
('Thin', 100),
('Hairline', 100),
('ExtraLight', 200),
('Light', 300),
('Regular', 400),
('', 400), # Family-Italic resolves to this
('Medium', 500),
('SemiBold', 600),
('Bold', 700),
('ExtraBold', 800),
('Black', 900)
])
FileFamilyStyleWeightTuple = collections.namedtuple(
'FileFamilyStyleWeightTuple', ['file', 'family', 'style', 'weight'])
def StyleWeight(styleweight):
"""Breaks apart a style/weight specifier into a 2-tuple of (style, weight).
Args:
styleweight: style/weight string, e.g. Bold, Regular, or ExtraLightItalic.
Returns:
2-tuple of style (normal or italic) and weight.
"""
if styleweight.endswith('Italic'):
return ('italic', _KNOWN_WEIGHTS[styleweight[:-6]])
return ('normal', _KNOWN_WEIGHTS[styleweight])
def FamilyName(fontname):
"""Attempts to build family name from font name.
For example, HPSimplifiedSans => HP Simplified Sans.
Args:
fontname: The name of a font.
Returns:
The name of the family that should be in this font.
"""
# SomethingUpper => Something Upper
fontname = re.sub('(.)([A-Z][a-z]+)', r'\1 \2', fontname)
# Font3 => Font 3
fontname = re.sub('([a-z])([0-9]+)', r'\1 \2', fontname)
# lookHere => look Here
return re.sub('([a-z0-9])([A-Z])', r'\1 \2', fontname)
class ParseError(Exception):
"""Exception used when parse failed."""
def FileFamilyStyleWeight(filename):
"""Extracts family, style, and weight from Google Fonts standard filename.
Args:
filename: Font filename, eg Lobster-Regular.ttf.
Returns:
FileFamilyStyleWeightTuple for file.
Raises:
ParseError: if file can't be parsed.
"""
m = re.search(r'([^/-]+)-(\w+)\.ttf$', filename) #FAMILY_WEIGHT_REGEX
if not m:
raise ParseError('Could not parse %s' % filename)
sw = StyleWeight(m.group(2))
return FileFamilyStyleWeightTuple(filename,
FamilyName(m.group(1)),
sw[0],
sw[1])
def _FileFamilyStyleWeights(fontdir):
"""Extracts file, family, style, weight 4-tuples for each font in dir.
Args:
fontdir: Directory that supposedly contains font files for a family.
Returns:
List of FileFamilyStyleWeightTuple ordered by weight, style
(normal first).
Raises:
OSError: If the font directory doesn't exist (errno.ENOTDIR) or has no font
files (errno.ENOENT) in it.
RuntimeError: If the font directory appears to contain files from multiple
families.
"""
if not os.path.isdir(fontdir):
raise OSError(errno.ENOTDIR, 'No such directory', fontdir)
files = glob.glob(os.path.join(fontdir, '*.ttf'))
if not files:
raise OSError(errno.ENOENT, 'no font files found')
result = [FileFamilyStyleWeight(f) for f in files]
def _Cmp(r1, r2):
return cmp(r1.weight, r2.weight) or -cmp(r1.style, r2.style)
result = sorted(result, _Cmp)
family_names = {i.family for i in result}
if len(family_names) > 1:
raise RuntimeError('Ambiguous family name; possibilities: %s'
% family_names)
return result
def GFN_from_filename(fontfile):
ttfont = TTFont(fontfile)
gfn = "unknown"
fontdir = os.path.dirname(fontfile)
metadata = os.path.join(fontdir, "METADATA.pb")
if os.path.exists(metadata):
family = get_FamilyProto_Message(metadata)
for font in family.fonts:
if font.filename in fontfile:
gfn = "{}:{}:{}".format(family.name, font.style, font.weight)
break
else:
try:
attributes = _FileFamilyStyleWeights(fontdir)
for (fontfname, family, style, weight) in attributes:
if fontfname in fontfile:
gfn = "{}:{}:{}".format(family, style, weight)
break
except:
pass
if gfn == 'unknown':
#This font lacks a METADATA.pb file and also failed
# to auto-detect the GFN value. As a last resort
# we'll try to extract the info from the NAME table entries.
try:
for entry in ttfont['name'].names:
if entry.nameID == NAMEID_FONT_FAMILY_NAME:
family = entry.string.decode(entry.getEncoding()).encode('ascii', 'ignore').strip()
if entry.nameID == NAMEID_FONT_SUBFAMILY_NAME:
style, weight = StyleWeight(entry.string.decode(entry.getEncoding()).encode('ascii', 'ignore').strip())
ttfont.close()
if family != "": #avoid empty string in cases of misbehaved family names in the name table
gfn = "{}:{}:{}".format(family, style, weight)
if VERBOSE:
print ("Detected GFN from name table entries: '{}' (file='{}')".format(gfn, fontfile))
except:
# print("This seems to be a really bad font file... ({})".format(fontfile))
pass
#if gfn == 'unknown':
# print ("Failed to detect GFN value for '{}'. Defaults to 'unknown'.".format(fontfile))
exceptions = [
("Bio Rhyme", "BioRhyme"),
]
for bad, good in exceptions:
if bad in gfn:
gfn = good.join(gfn.split(bad))
return gfn
def GFNs_from_filenames(filenames):
return {fname: GFN_from_filename(fname) for fname in filenames}
def get_GFNs_from_gfonts(apikey):
import requests
APIURL = 'https://www.googleapis.com/webfonts/v1/webfonts?key={}'.format
r = requests.get(APIURL(apikey))
GFNs = {}
for entry in r.json()["items"]:
family = entry["family"]
subsets = entry["subsets"]
for variant in entry["variants"]:
if variant == "italic":
style = "italic"
weight == 400
elif "italic" in variant:
style = "italic"
weight = "".join(variant.split("italic"))
elif variant == 'regular':
style = "normal"
weight = 400
else:
style = "normal"
weight = variant
gfn = "{}:{}:{}".format(family, style, weight)
GFNs[gfn] = subsets
print (gfn)
return GFNs
|
|
# coding=utf-8
import logging, commands, os, pexpect, sys, time
from app.modules.common.pipelines import *
from app.modules.common.tasks import *
from app.modules.common.errors import *
from app.modules.common.utils import *
from pexpect import *
log = logging.getLogger(__name__)
class ImportOraclePipeLine(Pipeline):
def __init__(self, name):
Pipeline.__init__(self, name)
log.info(run('/bin/bash -c "echo $PATH"'))
self.add_task(CheckOracleImportParams())
self.add_task(CheckOracleProcessTask())
self.add_task(CleanTempPathTask())
self.add_task(CopyDumpFileTask())
self.add_task(UnzipDumpFileTask())
self.add_task(InitOracleUserTask())
self.add_task(AfterImportTask())
class CheckOracleImportParams(Task):
def __init__(self):
Task.__init__(self, 'CheckImportParams')
def paramExists(self, key, params):
keys = params.keys()
if (key not in keys) or (params[key] is None) or ((str(params[key]).lstrip()) == ''):
return False
else:
return True
def __do_execute__(self, params={'username': None,
'password': None,
'temp_path': None,
'dump_path': None,
'db_name': None,
'clean_script_path': None,
'tablespace': None}):
log.info('********************************************************')
log.info("=======> CheckImportParams:Entered execute()")
error_message=[]
param_valid = True
if not self.paramExists('username', params):
param_valid = False
error_message.append('params[username] must not empty')
else:
log.info("=======> params[username] is %s", params['username'])
if not self.paramExists('password', params):
param_valid = False
error_message.append('params[password] must not empty')
else:
log.info("=======> params[password] is %s", params['password'])
if not self.paramExists('temp_path', params):
param_valid = False
error_message.append('params[temp_path] must not empty')
else:
log.info("=======> params[temp_path] is %s", params['temp_path'])
if not self.paramExists('dump_path', params):
param_valid = False
error_message.append('params[dump_path] must not empty')
else:
log.info("=======> params[dump_path] is %s", params['dump_path'])
if not self.paramExists('db_name', params):
params['db_name'] = params['username']
log.info("=======> params[db_name] is %s", params['db_name'])
if not self.paramExists('clean_script_path', params):
param_valid = False
error_message.append('params[clean_script_path] must not empty')
else:
log.info("=======> params[clean_script_path] is %s", params['clean_script_path'])
if not self.paramExists('tablespace', params):
params['tablespace'] = params['username']
log.info("=======> params[tablespace] is empty, set to the username : %s", params['tablespace'])
else:
log.info("=======> params[tablespace] is %s", params['tablespace'])
if not self.paramExists('source_username', params):
params['source_username'] = 'adempiere'
log.info('=======> source username is : %s', params['source_username'])
if not self.paramExists('source_tablespace', params):
params['source_tablespace'] = 'TS_ADEMPIERE'
log.info('=======> source tablespace is : %s', params['source_tablespace'])
if param_valid and not os.path.exists(params['dump_path']):
param_valid = False
error_message.append('not found dump path[%s].' % params['dump_path'])
if param_valid and not os.path.exists(params['temp_path']):
param_valid = False
error_message.append('not found temp path[%s].' % params['temp_path'])
if param_valid and len(os.listdir(params['dump_path'])) <= 0:
param_valid = False
error_message.append('dump path[%s] is an empty directory..' % params['temp_path'])
if param_valid and len(os.listdir(params['clean_script_path'])) <= 0:
param_valid = False
error_message.append('clean script path[%s] is an empty directory..' % params['clean_script_path'])
if not param_valid:
log.error("******** %s", error_message)
raise TaskParamsError(error_message)
log.info("=======> CheckImportParams:Exit execute()")
class CheckOracleProcessTask(Task):
def __init__(self):
Task.__init__(self, 'CheckMysqlProcess')
def __do_execute__(self, params={}):
log.info('********************************************************')
log.info("=======> CheckMysqlProcess:Entered execute()")
try:
sh = '/bin/bash -c "ps -ef | grep ora_ | grep -v grep | wc -l"'
log.info('=======> start check oracle proces, shell : %s', sh)
output, status = run(sh, withexitstatus=1)
log.info('=======> shell status: %s, output: %s', status, output)
if status == 0 and int(output) != 0:
log.info('=======> check oracle process success.')
else:
log.error('=======> Oracle process does not exist')
log.info('=======> try start oracle')
sqlplus = pexpect.spawn('su - oracle', timeout=10)
sqlplus.logfile = sys.stdout
sqlplus.sendline('lsnrctl start')
sqlplus.sendline('sqlplus / as sysdba')
sqlplus.expect('SQL>')
sqlplus.sendline('set head off')
sqlplus.expect('SQL>')
sqlplus.sendline('set feedback off')
sqlplus.expect('SQL>')
sqlplus.sendline('startup')
sqlplus.expect('SQL>')
sqlplus.sendline('exit')
sqlplus.close()
log.info('=======> recheck oracle process, shell : %s', sh)
output, status = run(sh, withexitstatus=1)
log.info('=======> shell status: %s, output: %s', status, output)
if status == 0 and int(output) != 0:
log.info('=======> check oracle process success.')
else:
raise TaskExcecuteError('start oracle error')
except BaseException, e:
log.error('=======> ' + e.message)
raise TaskExcecuteError('execute shell failure, cause: %s.' % e.message)
log.info("=======> CheckMysqlProcess:Exit execute()")
class CleanTempPathTask(Task):
def __init__(self):
Task.__init__(self, 'cleanTempPathTask')
def __do_execute__(self, params={}):
log.info('********************************************************')
log.info("=======> cleanTempPathTask:Entered execute()")
path = '%s/%s' % (params['temp_path'], params['db_name'])
sh = 'mkdir -p %s' % path
log.info('=======> start execute shell : %s', sh)
try:
output, status = run(sh, withexitstatus=1)
log.info('=======> shell status: %s, output: %s', status, output)
except BaseException, e:
log.error('=======> ' + e.message)
raise TaskExcecuteError('execute shell failure.')
log.info('=======> end execute shell')
if status != 0:
raise TaskExcecuteError('create temp path[%s] failure.', path)
else:
log.info('=======> create temp path[%s] success.', path)
log.info('********************************************************')
sh = '/bin/bash -c "rm -f %s/*.zip %s/import.log %s/*.dmp %s/*.sql %s/*.txt"' % (path, path, path, path, path)
log.info('=======> start execute shell : %s', sh)
try:
output, status = run(sh, withexitstatus=1)
log.info('=======> shell status: %s, output: %s', status, output)
except BaseException, e:
log.error('=======> ' + e.message)
raise TaskExcecuteError('execute shell failure, cause: %s.' % e.message)
log.info('=======> end execute shell')
if status != 0:
raise TaskExcecuteError('clean temp path[%s] failure.', path)
else:
log.info('=======> clean temp path[%s] success.', path)
log.info("=======> cleanTempPathTask:Exit execute()")
class CopyDumpFileTask(Task):
def __init__(self):
Task.__init__(self, 'CopyDumpFileTask')
def __do_execute__(self, params={}):
log.info('********************************************************')
log.info("=======> CopyDumpFileTask:Entered execute()")
path = '%s/%s' % (params['temp_path'], params['db_name'])
sh = '/bin/bash -c "cp %s/* %s"' % (params['dump_path'], path)
log.info('=======> start execute shell : %s', sh)
try:
output, status = run(sh, withexitstatus=1, timeout=300)
log.info('=======> shell status: %s, output: %s', status, output)
except BaseException, e:
log.error('=======> ' + e.message)
raise TaskExcecuteError('execute shell failure, cause: %s.' % e.message)
log.info('=======> end execute shell')
if status != 0:
raise TaskExcecuteError('copy dump file failure.')
else:
log.info('=======> copy dump file success.')
log.info("=======> CopyDumpFileTask:Exit execute()")
class UnzipDumpFileTask(Task):
def __init__(self):
Task.__init__(self, 'UnzipDumpFileTask')
def __do_execute__(self, params={}):
log.info('********************************************************')
log.info("=======> UnzipDumpFileTask:Entered execute()")
path = '%s/%s' % (params['temp_path'], params['db_name'])
files = os.listdir(path)
if len(files) == 0:
log.error('dump file not exists')
dump_zip = None
for file in files:
if file.endswith('.zip'):
dump_zip = file
if not dump_zip:
log.error('=======> Dump compressed file does not exist')
raise TaskExcecuteError('Dump compressed file does not exist')
params['import_dump_name'] = str(dump_zip).split('.zip')[0]
log.info("---------->dump name %s", params['import_dump_name'])
sh = '/bin/bash -c "unzip -o %s/%s -d %s"' % (path, dump_zip, path)
log.info('=======> start unzip, shell : %s', sh)
try:
output, status = run(sh, withexitstatus=1, timeout=600)
log.info('=======> shell status: %s, output: %s', status, output)
if status != 0:
raise TaskExcecuteError('upzip dump file failure.')
else:
log.info('=======> upzip dump file success.')
except BaseException, e:
log.error('=======> ' + e.message)
raise TaskExcecuteError('execute shell failure, cause: %s.' % e.message)
sh = '/bin/bash -c "rm -f %s/%s"' % (path, dump_zip)
log.info('=======> start clean dump zip, shell : %s', sh)
try:
output, status = run(sh, withexitstatus=1)
log.info('=======> shell status: %s, output: %s', status, output)
if status != 0:
raise TaskExcecuteError('clean dump zip failure.')
else:
log.info('=======> clean dump zip success.')
except BaseException, e:
log.error('=======> ' + e.message)
raise TaskExcecuteError('execute shell failure, cause: %s.' % e.message)
sh = '/bin/bash -c "chmod -R 777 %s"' % path
log.info('=======> start execute shell : %s', sh)
try:
output, status = run(sh, withexitstatus=1)
log.info('=======> shell status: %s, output: %s', status, output)
except BaseException, e:
log.error('=======> ' + e.message)
raise TaskExcecuteError('execute shell failure, cause: %s.' % e.message)
log.info('=======> end execute shell')
if status != 0:
raise TaskExcecuteError('chmod 777 path[%s] failure.', path)
else:
log.info('=======> chmod 777 path[%s] success.', path)
log.info("=======> UnzipDumpFileTask:Exit execute()")
class InitOracleUserTask(Task):
def __init__(self):
Task.__init__(self, 'CheckMysqlProcess')
def replaceInvalidateChars(self, temp_path, username, regex):
sh = '/bin/bash -c "sed -i \'%s\' %s/%s_disconnect.sql"' % (regex, temp_path, username)
log.info('=======> start Replace invalid character, shell : %s', sh)
try:
output, status = run(sh, withexitstatus=1)
log.info('=======> shell status: %s, output: %s', status, output)
if status != 0:
raise TaskExcecuteError('Replace invalid character failure.')
else:
log.info('=======> Replace invalid character success.')
except BaseException, e:
log.error('=======> ' + e.message)
raise TaskExcecuteError('execute shell failure, cause: %s.' % e.message)
def __do_execute__(self, params={}):
log.info('********************************************************')
log.info("=======> InitOracleUserTask:Entered execute()")
db_name = params['db_name']
temp_path = '%s/%s' % (params['temp_path'], params['db_name'])
username = str(params['username']).upper()
password = params['password']
tablespace = params['tablespace']
dump_path_name = username + '_dump_path'
source_username = params['source_username']
source_tablespace = params['source_tablespace']
try:
sqlplus = pexpect.spawn('su - oracle', timeout=None)
sqlplus.logfile = sys.stdout
sqlplus.expect('$')
sqlplus.sendline('sqlplus / as sysdba')
sqlplus.expect('SQL>')
sqlplus.sendline('set head off')
sqlplus.expect('SQL>')
sqlplus.sendline('set feedback off')
sqlplus.expect('SQL>')
sqlplus.sendline('SELECT USERNAME || \'_\' FROM ALL_USERS where USERNAME = \'%s\';' % username)
index = sqlplus.expect([username + '_', pexpect.TIMEOUT], timeout=None)
log.info('=====> index : %s', sqlplus.before)
log.info('=====> index : %s', index)
if index == 0:
sqlplus.sendline('alter user %s account lock;' % username)
sqlplus.expect('SQL>')
sqlplus.sendline('spool %s/%s_disconnect.sql;' % (temp_path, username))
sqlplus.sendline('select \'alter system kill session \'\'\' || sid ||\',\'||serial#||\'\'\' immediate;\' from v$session where username = UPPER(\'%s\');' % username)
sqlplus.expect('SQL>')
sqlplus.sendline('spool off')
sqlplus.expect('SQL>')
self.replaceInvalidateChars(temp_path, username, 's/SQL>.*//g')
self.replaceInvalidateChars(temp_path, username, 's/new\s\{3\}.*//g')
self.replaceInvalidateChars(temp_path, username, 's/old\s\{3\}.*//g')
sqlplus.sendline('@%s/%s_disconnect.sql;' % (temp_path, username))
sqlplus.expect('SQL>', timeout=None)
elif index == 1:
log.info('user[%s] not exists.', username)
sqlplus.sendline('DROP USER %s CASCADE;' % username)
sqlplus.expect('SQL>', timeout=None)
sqlplus.sendline('DROP TABLESPACE %s INCLUDING CONTENTS AND DATAFILES;' % tablespace)
sqlplus.expect('SQL>', timeout=None)
sqlplus.sendline(
'create tablespace %s datafile \'/u01/app/oracle/oradata/orcldb/%s\' size 400M autoextend on next 10m maxsize unlimited;' % (
tablespace, tablespace))
sqlplus.expect('SQL>', timeout=None)
sqlplus.sendline('CREATE USER %s IDENTIFIED BY %s default tablespace %s account unlock;' % (username,
password,
tablespace))
sqlplus.expect('SQL>', timeout=None)
sqlplus.sendline('GRANT CONNECT,RESOURCE,DBA,UNLIMITED TABLESPACE,CREATE TABLE TO %s;' % username)
sqlplus.expect('SQL>', timeout=None)
sqlplus.sendline('ALTER USER %s DEFAULT ROLE CONNECT, RESOURCE, DBA;' % username)
sqlplus.expect('SQL>', timeout=None)
sqlplus.sendline('grant READ, WRITE ON directory erpdump TO %s;' % username)
sqlplus.expect('SQL>', timeout=None)
sqlplus.sendline('grant create any job to %s;' % username)
sqlplus.expect('SQL>', timeout=None)
sqlplus.sendline('DROP DIRECTORY %s;' % dump_path_name)
sqlplus.expect('SQL>', timeout=None)
sqlplus.sendline('create directory %s as \'%s\';' % (dump_path_name, temp_path))
sqlplus.expect('SQL>', timeout=None)
sqlplus.sendline('grant read,write on directory %s to %s;' % (dump_path_name, username))
sqlplus.expect('SQL>', timeout=None)
sqlplus.sendline('alter user %s account unlock;' % username)
sqlplus.expect('SQL>', timeout=None)
sqlplus.sendcontrol('d')
sqlplus.expect('$')
remap = 'remap_schema=%s:%s remap_tablespace=%s:%s' % (source_username,
username,
source_tablespace,
tablespace)
sh = 'impdp %s/%s dumpfile=%s.dmp DIRECTORY=%s %s' % (username,
password,
params['import_dump_name'],
dump_path_name,
remap)
log.info('======> Start execute oracle import : %s', sh)
sqlplus.sendline(sh)
sqlplus.expect('Job "%s"."(.*)" completed with' % username, timeout=None)
log.info('======> End execute oracle import ')
time.sleep(5)
sqlplus.close()
except BaseException, e:
log.error('=======> ' + e.message)
raise TaskExcecuteError('execute shell failure, cause: %s.' % e.message)
log.info("=======> InitOracleUserTask:Exit execute()")
class AfterImportTask(Task):
def __init__(self):
Task.__init__(self, 'AfterImportTask')
def __do_execute__(self, params={'after_imp_sql_files': None}):
log.info('********************************************************')
log.info("=======> AfterImportTask:Entered execute()")
try:
if 'after_imp_sql_files' in params.keys():
sqls = params['after_imp_sql_files']
if Converter.typeof_list_or_set(sqls) and len(sqls) > 0:
log.info('=======> Start read&write permissions to the script')
for sql in sqls:
sh = '/bin/bash -c "chmod 777 %s"' % sql
log.info('=======> execute shell : %s', sh)
run(sh, withexitstatus=1)
log.info('=======> end read&write permissions to the script')
sqlplus = pexpect.spawn('su - oracle', timeout=10)
sqlplus.logfile = sys.stdout
sqlplus.expect('$')
sqlplus.sendline('sqlplus / as sysdba')
sqlplus.expect('SQL>')
for sql in sqls:
log.info('=====> Start executing SQL script file: %s', sql)
sqlplus.sendline('@%s;' % sql)
sqlplus.expect('SQL>', timeout=None)
sqlplus.sendcontrol('d')
sqlplus.close()
else:
log.info('=======> There is no need to execute the SQL script file')
else:
log.info('=======> There is no need to execute the SQL script file')
except BaseException, e:
log.error('=======> ' + e.message)
raise TaskExcecuteError('execute shell failure, cause: %s.' % e.message)
log.info("=======> AfterImportTask:Exit execute()")
|
|
"""
Testing for the boost module (sklearn.ensemble.boost).
"""
import numpy as np
from numpy.testing import assert_array_equal, assert_array_less
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_equal
from nose.tools import assert_raises
from sklearn.grid_search import GridSearchCV
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import AdaBoostRegressor
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.utils import shuffle
from sklearn import datasets
# Common random state
rng = np.random.RandomState(0)
# Toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y_class = ["foo", "foo", "foo", 1, 1, 1] # test string class labels
y_regr = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
y_t_class = ["foo", 1, 1]
y_t_regr = [-1, 1, 1]
# Load the iris dataset and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data, iris.target = shuffle(iris.data, iris.target, random_state=rng)
# Load the boston dataset and randomly permute it
boston = datasets.load_boston()
boston.data, boston.target = shuffle(boston.data, boston.target,
random_state=rng)
def test_classification_toy():
"""Check classification on a toy dataset."""
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, random_state=0)
clf.fit(X, y_class)
assert_array_equal(clf.predict(T), y_t_class)
assert_array_equal(np.unique(np.asarray(y_t_class)), clf.classes_)
assert_equal(clf.predict_proba(T).shape, (len(T), 2))
assert_equal(clf.decision_function(T).shape, (len(T),))
def test_regression_toy():
"""Check classification on a toy dataset."""
clf = AdaBoostRegressor(random_state=0)
clf.fit(X, y_regr)
assert_array_equal(clf.predict(T), y_t_regr)
def test_iris():
"""Check consistency on dataset iris."""
classes = np.unique(iris.target)
clf_samme = prob_samme = None
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(iris.data, iris.target)
assert_array_equal(classes, clf.classes_)
proba = clf.predict_proba(iris.data)
if alg == "SAMME":
clf_samme = clf
prob_samme = proba
assert_equal(proba.shape[1], len(classes))
assert_equal(clf.decision_function(iris.data).shape[1], len(classes))
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with algorithm %s and score = %f" % \
(alg, score)
# Somewhat hacky regression test: prior to
# ae7adc880d624615a34bafdb1d75ef67051b8200,
# predict_proba returned SAMME.R values for SAMME.
clf_samme.algorithm = "SAMME.R"
assert_array_less(0,
np.abs(clf_samme.predict_proba(iris.data) - prob_samme))
def test_boston():
"""Check consistency on dataset boston house prices."""
clf = AdaBoostRegressor(random_state=0)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert score > 0.85
def test_staged_predict():
"""Check staged predictions."""
rng = np.random.RandomState(0)
iris_weights = rng.randint(10, size=iris.target.shape)
boston_weights = rng.randint(10, size=boston.target.shape)
# AdaBoost classification
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, n_estimators=10)
clf.fit(iris.data, iris.target, sample_weight=iris_weights)
predictions = clf.predict(iris.data)
staged_predictions = [p for p in clf.staged_predict(iris.data)]
proba = clf.predict_proba(iris.data)
staged_probas = [p for p in clf.staged_predict_proba(iris.data)]
score = clf.score(iris.data, iris.target, sample_weight=iris_weights)
staged_scores = [
s for s in clf.staged_score(
iris.data, iris.target, sample_weight=iris_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_probas), 10)
assert_array_almost_equal(proba, staged_probas[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
# AdaBoost regression
clf = AdaBoostRegressor(n_estimators=10, random_state=0)
clf.fit(boston.data, boston.target, sample_weight=boston_weights)
predictions = clf.predict(boston.data)
staged_predictions = [p for p in clf.staged_predict(boston.data)]
score = clf.score(boston.data, boston.target, sample_weight=boston_weights)
staged_scores = [
s for s in clf.staged_score(
boston.data, boston.target, sample_weight=boston_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
def test_gridsearch():
"""Check that base trees can be grid-searched."""
# AdaBoost classification
boost = AdaBoostClassifier(base_estimator=DecisionTreeClassifier())
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2),
'algorithm': ('SAMME', 'SAMME.R')}
clf = GridSearchCV(boost, parameters)
clf.fit(iris.data, iris.target)
# AdaBoost regression
boost = AdaBoostRegressor(base_estimator=DecisionTreeRegressor(),
random_state=0)
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2)}
clf = GridSearchCV(boost, parameters)
clf.fit(boston.data, boston.target)
def test_pickle():
"""Check pickability."""
import pickle
# Adaboost classifier
for alg in ['SAMME', 'SAMME.R']:
obj = AdaBoostClassifier(algorithm=alg)
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_equal(score, score2)
# Adaboost regressor
obj = AdaBoostRegressor(random_state=0)
obj.fit(boston.data, boston.target)
score = obj.score(boston.data, boston.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(boston.data, boston.target)
assert_equal(score, score2)
def test_importances():
"""Check variable importances."""
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=1)
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(X, y)
importances = clf.feature_importances_
assert_equal(importances.shape[0], 10)
assert_equal((importances[:3, np.newaxis] >= importances[3:]).all(),
True)
def test_error():
"""Test that it gives proper exception on deficient input."""
assert_raises(ValueError,
AdaBoostClassifier(learning_rate=-1).fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier(algorithm="foo").fit,
X, y_class)
def test_base_estimator():
"""Test different base estimators."""
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
# XXX doesn't work with y_class because RF doesn't support classes_
# Shouldn't AdaBoost run a LabelBinarizer?
clf = AdaBoostClassifier(RandomForestClassifier())
clf.fit(X, y_regr)
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
clf.fit(X, y_class)
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
clf = AdaBoostRegressor(RandomForestRegressor(), random_state=0)
clf.fit(X, y_regr)
clf = AdaBoostRegressor(SVR(), random_state=0)
clf.fit(X, y_regr)
if __name__ == "__main__":
import nose
nose.runmodule()
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from pyowm.airpollutionapi30 import airpollution_client, coindex, no2index, ozone, so2index, airstatus
from pyowm.airpollutionapi30.uris import ROOT_POLLUTION_API_URL, NEW_ROOT_POLLUTION_API_URL
from pyowm.commons.http_client import HttpClient
from pyowm.constants import AIRPOLLUTION_API_VERSION
from pyowm.utils import geo, decorators, formatting, timestamps
class AirPollutionManager:
"""
A manager objects that provides a full interface to OWM Air Pollution API.
:param API_key: the OWM AirPollution API key
:type API_key: str
:param config: the configuration dictionary
:type config: dict
:returns: an *AirPollutionManager* instance
:raises: *AssertionError* when no API Key is provided
"""
def __init__(self, API_key, config):
assert API_key is not None, 'You must provide a valid API Key'
self.API_key = API_key
assert isinstance(config, dict)
self.ap_client = airpollution_client.AirPollutionHttpClient(
API_key,
HttpClient(API_key, config, ROOT_POLLUTION_API_URL))
self.new_ap_client = airpollution_client.AirPollutionHttpClient(
API_key,
HttpClient(API_key, config, NEW_ROOT_POLLUTION_API_URL))
def airpollution_api_version(self):
return AIRPOLLUTION_API_VERSION
@decorators.deprecated('removed', '4', 'coindex_around_coords')
def coindex_around_coords(self, lat, lon, start=None, interval=None):
"""
Queries the OWM AirPollution API for Carbon Monoxide values sampled in the
surroundings of the provided geocoordinates and in the specified time
interval.
A *COIndex* object instance is returned, encapsulating a
*Location* object and the list of CO samples
If `start` is not provided, the latest available CO samples are
retrieved
If `start` is provided but `interval` is not, then `interval` defaults
to the maximum extent, which is: `year`
:param lat: the location's latitude, must be between -90.0 and 90.0
:type lat: int/float
:param lon: the location's longitude, must be between -180.0 and 180.0
:type lon: int/float
:param start: the object conveying the start value of the search time
window start (defaults to ``None``). If not provided, the latest
available CO samples value are retrieved
:type start: int, ``datetime.datetime`` or ISO8601-formatted
string
:param interval: the length of the search time window starting at
`start` (defaults to ``None``). If not provided, 'year' is used
:type interval: str among: 'minute', 'hour', 'day', 'month, 'year'
:return: a *COIndex* instance or ``None`` if data is not available
:raises: *ParseResponseException* when OWM AirPollution API responses' data
cannot be parsed, *APICallException* when OWM AirPollution API can not be
reached, *ValueError* for wrong input values
"""
geo.assert_is_lon(lon)
geo.assert_is_lat(lat)
params = {'lon': lon, 'lat': lat, 'start': start, 'interval': interval}
json_data = self.ap_client.get_coi(params)
coi = coindex.COIndex.from_dict(json_data)
if interval is None:
interval = 'year'
coi.interval = interval
return coi
@decorators.deprecated('removed', '4', 'ozone_around_coords')
def ozone_around_coords(self, lat, lon, start=None, interval=None):
"""
Queries the OWM AirPollution API for Ozone (O3) value in Dobson Units sampled in
the surroundings of the provided geocoordinates and in the specified
time interval. An *Ozone* object instance is returned, encapsulating a
*Location* object and the UV intensity value.
If `start` is not provided, the latest available ozone value is
retrieved.
If `start` is provided but `interval` is not, then `interval` defaults
to the maximum extent, which is: `year`
:param lat: the location's latitude, must be between -90.0 and 90.0
:type lat: int/float
:param lon: the location's longitude, must be between -180.0 and 180.0
:type lon: int/float
:param start: the object conveying the start value of the search time
window start (defaults to ``None``). If not provided, the latest
available Ozone value is retrieved
:type start: int, ``datetime.datetime`` or ISO8601-formatted
string
:param interval: the length of the search time window starting at
`start` (defaults to ``None``). If not provided, 'year' is used
:type interval: str among: 'minute', 'hour', 'day', 'month, 'year'
:return: an *Ozone* instance or ``None`` if data is not available
:raises: *ParseResponseException* when OWM AirPollution API responses' data
cannot be parsed, *APICallException* when OWM AirPollution API can not be
reached, *ValueError* for wrong input values
"""
geo.assert_is_lon(lon)
geo.assert_is_lat(lat)
params = {'lon': lon, 'lat': lat, 'start': start, 'interval': interval}
json_data = self.ap_client.get_o3(params)
oz = ozone.Ozone.from_dict(json_data)
if interval is None:
interval = 'year'
oz.interval = interval
return oz
@decorators.deprecated('removed', '4', 'no2index_around_coords')
def no2index_around_coords(self, lat, lon, start=None, interval=None):
"""
Queries the OWM AirPollution API for Nitrogen Dioxide values sampled in the
surroundings of the provided geocoordinates and in the specified time
interval.
A *NO2Index* object instance is returned, encapsulating a
*Location* object and the list of NO2 samples
If `start` is not provided, the latest available NO2 samples are
retrieved
If `start` is provided but `interval` is not, then `interval` defaults
to the maximum extent, which is: `year`
:param lat: the location's latitude, must be between -90.0 and 90.0
:type lat: int/float
:param lon: the location's longitude, must be between -180.0 and 180.0
:type lon: int/float
:param start: the object conveying the start value of the search time
window start (defaults to ``None``). If not provided, the latest
available NO2 samples value are retrieved
:type start: int, ``datetime.datetime`` or ISO8601-formatted
string
:param interval: the length of the search time window starting at
`start` (defaults to ``None``). If not provided, 'year' is used
:type interval: str among: 'minute', 'hour', 'day', 'month, 'year'
:return: a *NO2Index* instance or ``None`` if data is not available
:raises: *ParseResponseException* when OWM AirPollution API responses' data
cannot be parsed, *APICallException* when OWM AirPollution API can not be
reached, *ValueError* for wrong input values
"""
geo.assert_is_lon(lon)
geo.assert_is_lat(lat)
params = {'lon': lon, 'lat': lat, 'start': start, 'interval': interval}
json_data = self.ap_client.get_no2(params)
no2 = no2index.NO2Index.from_dict(json_data)
if interval is None:
interval = 'year'
no2.interval = interval
return no2
@decorators.deprecated('removed', '4', 'so2index_around_coords')
def so2index_around_coords(self, lat, lon, start=None, interval=None):
"""
Queries the OWM AirPollution API for Sulphur Dioxide values sampled in the
surroundings of the provided geocoordinates and in the specified time
interval.
A *SO2Index* object instance is returned, encapsulating a
*Location* object and the list of SO2 samples
If `start` is not provided, the latest available SO2 samples are
retrieved
If `start` is provided but `interval` is not, then `interval` defaults
to the maximum extent, which is: `year`
:param lat: the location's latitude, must be between -90.0 and 90.0
:type lat: int/float
:param lon: the location's longitude, must be between -180.0 and 180.0
:type lon: int/float
:param start: the object conveying the start value of the search time
window start (defaults to ``None``). If not provided, the latest
available SO2 samples value are retrieved
:type start: int, ``datetime.datetime`` or ISO8601-formatted
string
:param interval: the length of the search time window starting at
`start` (defaults to ``None``). If not provided, 'year' is used
:type interval: str among: 'minute', 'hour', 'day', 'month, 'year'
:return: a *SO2Index* instance or ``None`` if data is not available
:raises: *ParseResponseException* when OWM AirPollution API responses' data
cannot be parsed, *APICallException* when OWM AirPollution API can not be
reached, *ValueError* for wrong input values
"""
geo.assert_is_lon(lon)
geo.assert_is_lat(lat)
params = {'lon': lon, 'lat': lat, 'start': start, 'interval': interval}
json_data = self.ap_client.get_so2(params)
so2 = so2index.SO2Index.from_dict(json_data)
if interval is None:
interval = 'year'
so2.interval = interval
return so2
def air_quality_at_coords(self, lat, lon):
"""
Queries the OWM AirPollution API for available air quality indicators around the specified coordinates.
:param lat: the location's latitude, must be between -90.0 and 90.0
:type lat: int/float
:param lon: the location's longitude, must be between -180.0 and 180.0
:type lon: int/float
:return: a *AirStatus* instance or ``None`` if data is not available
:raises: *ParseResponseException* when OWM AirPollution API responses' data
cannot be parsed, *APICallException* when OWM AirPollution API can not be
reached, *ValueError* for wrong input values
"""
geo.assert_is_lon(lon)
geo.assert_is_lat(lat)
params = {'lon': lon, 'lat': lat}
json_data = self.new_ap_client.get_air_pollution(params)
try:
return airstatus.AirStatus.from_dict(json_data)
except:
return None
def air_quality_forecast_at_coords(self, lat, lon):
"""
Queries the OWM AirPollution API for available forecasted air quality indicators around the specified coordinates.
:param lat: the location's latitude, must be between -90.0 and 90.0
:type lat: int/float
:param lon: the location's longitude, must be between -180.0 and 180.0
:type lon: int/float
:return: a `list` of *AirStatus* instances or an empty `list` if data is not available
:raises: *ParseResponseException* when OWM AirPollution API responses' data
cannot be parsed, *APICallException* when OWM AirPollution API can not be
reached, *ValueError* for wrong input values
"""
geo.assert_is_lon(lon)
geo.assert_is_lat(lat)
params = {'lon': lon, 'lat': lat}
json_data = self.new_ap_client.get_forecast_air_pollution(params)
try:
return airstatus.AirStatus.from_dict(json_data)
except:
return []
def air_quality_history_at_coords(self, lat, lon, start, end=None):
"""
Queries the OWM AirPollution API for available forecasted air quality indicators around the specified coordinates.
:param lat: the location's latitude, must be between -90.0 and 90.0
:type lat: int/float
:param lon: the location's longitude, must be between -180.0 and 180.0
:type lon: int/float
:param start: the object conveying the start value of the search time window
:type start: int, ``datetime.datetime`` or ISO8601-formatted string
:param end: the object conveying the end value of the search time window. Values in the future will be clipped
to the current timestamp. Defaults to the current UNIX timestamp.
:type end: int, ``datetime.datetime`` or ISO8601-formatted string
:return: a `list` of *AirStatus* instances or an empty `list` if data is not available
:raises: *ParseResponseException* when OWM AirPollution API responses' data
cannot be parsed, *APICallException* when OWM AirPollution API can not be
reached, *ValueError* for wrong input values
"""
geo.assert_is_lon(lon)
geo.assert_is_lat(lat)
now = timestamps.now(timeformat='unix')
assert start is not None
start = formatting.timeformat(start, 'unix')
if end is None:
end = now
else:
end = formatting.timeformat(end, 'unix')
if end > now:
end = now
params = {'lon': lon, 'lat': lat, 'start': start, 'end': end}
json_data = self.new_ap_client.get_historical_air_pollution(params)
try:
return airstatus.AirStatus.from_dict(json_data)
except:
return []
def __repr__(self):
return '<%s.%s>' % (__name__, self.__class__.__name__)
|
|
"""Service calls related dependencies for LCN component."""
import pypck
import voluptuous as vol
from homeassistant.const import (
CONF_ADDRESS, CONF_BRIGHTNESS, CONF_STATE, CONF_UNIT_OF_MEASUREMENT)
import homeassistant.helpers.config_validation as cv
from .const import (
CONF_CONNECTIONS, CONF_KEYS, CONF_LED, CONF_OUTPUT, CONF_PCK,
CONF_RELVARREF, CONF_ROW, CONF_SETPOINT, CONF_TABLE, CONF_TEXT, CONF_TIME,
CONF_TIME_UNIT, CONF_TRANSITION, CONF_VALUE, CONF_VARIABLE, DATA_LCN,
LED_PORTS, LED_STATUS, OUTPUT_PORTS, RELVARREF, SENDKEYCOMMANDS, SETPOINTS,
THRESHOLDS, TIME_UNITS, VAR_UNITS, VARIABLES)
from .helpers import (
get_connection, is_address, is_key_lock_states_string,
is_relays_states_string)
class LcnServiceCall():
"""Parent class for all LCN service calls."""
schema = vol.Schema({
vol.Required(CONF_ADDRESS): is_address
})
def __init__(self, hass):
"""Initialize service call."""
self.connections = hass.data[DATA_LCN][CONF_CONNECTIONS]
def get_address_connection(self, call):
"""Get address connection object."""
addr, connection_id = call.data[CONF_ADDRESS]
addr = pypck.lcn_addr.LcnAddr(*addr)
if connection_id is None:
connection = self.connections[0]
else:
connection = get_connection(self.connections, connection_id)
return connection.get_address_conn(addr)
class OutputAbs(LcnServiceCall):
"""Set absolute brightness of output port in percent."""
schema = LcnServiceCall.schema.extend({
vol.Required(CONF_OUTPUT): vol.All(vol.Upper, vol.In(OUTPUT_PORTS)),
vol.Required(CONF_BRIGHTNESS):
vol.All(vol.Coerce(int), vol.Range(min=0, max=100)),
vol.Optional(CONF_TRANSITION, default=0):
vol.All(vol.Coerce(float), vol.Range(min=0., max=486.))
})
def __call__(self, call):
"""Execute service call."""
output = pypck.lcn_defs.OutputPort[call.data[CONF_OUTPUT]]
brightness = call.data[CONF_BRIGHTNESS]
transition = pypck.lcn_defs.time_to_ramp_value(
call.data[CONF_TRANSITION] * 1000)
address_connection = self.get_address_connection(call)
address_connection.dim_output(output.value, brightness, transition)
class OutputRel(LcnServiceCall):
"""Set relative brightness of output port in percent."""
schema = LcnServiceCall.schema.extend({
vol.Required(CONF_OUTPUT): vol.All(vol.Upper, vol.In(OUTPUT_PORTS)),
vol.Required(CONF_BRIGHTNESS):
vol.All(vol.Coerce(int), vol.Range(min=-100, max=100))
})
def __call__(self, call):
"""Execute service call."""
output = pypck.lcn_defs.OutputPort[call.data[CONF_OUTPUT]]
brightness = call.data[CONF_BRIGHTNESS]
address_connection = self.get_address_connection(call)
address_connection.rel_output(output.value, brightness)
class OutputToggle(LcnServiceCall):
"""Toggle output port."""
schema = LcnServiceCall.schema.extend({
vol.Required(CONF_OUTPUT): vol.All(vol.Upper, vol.In(OUTPUT_PORTS)),
vol.Optional(CONF_TRANSITION, default=0):
vol.All(vol.Coerce(float), vol.Range(min=0., max=486.))
})
def __call__(self, call):
"""Execute service call."""
output = pypck.lcn_defs.OutputPort[call.data[CONF_OUTPUT]]
transition = pypck.lcn_defs.time_to_ramp_value(
call.data[CONF_TRANSITION] * 1000)
address_connection = self.get_address_connection(call)
address_connection.toggle_output(output.value, transition)
class Relays(LcnServiceCall):
"""Set the relays status."""
schema = LcnServiceCall.schema.extend({
vol.Required(CONF_STATE): is_relays_states_string})
def __call__(self, call):
"""Execute service call."""
states = [pypck.lcn_defs.RelayStateModifier[state]
for state in call.data[CONF_STATE]]
address_connection = self.get_address_connection(call)
address_connection.control_relays(states)
class Led(LcnServiceCall):
"""Set the led state."""
schema = LcnServiceCall.schema.extend({
vol.Required(CONF_LED): vol.All(vol.Upper, vol.In(LED_PORTS)),
vol.Required(CONF_STATE): vol.All(vol.Upper, vol.In(LED_STATUS))})
def __call__(self, call):
"""Execute service call."""
led = pypck.lcn_defs.LedPort[call.data[CONF_LED]]
led_state = pypck.lcn_defs.LedStatus[
call.data[CONF_STATE]]
address_connection = self.get_address_connection(call)
address_connection.control_led(led, led_state)
class VarAbs(LcnServiceCall):
"""Set absolute value of a variable or setpoint.
Variable has to be set as counter!
Reguator setpoints can also be set using R1VARSETPOINT, R2VARSETPOINT.
"""
schema = LcnServiceCall.schema.extend({
vol.Required(CONF_VARIABLE): vol.All(vol.Upper,
vol.In(VARIABLES + SETPOINTS)),
vol.Optional(CONF_VALUE, default=0):
vol.All(vol.Coerce(int), vol.Range(min=0)),
vol.Optional(CONF_UNIT_OF_MEASUREMENT, default='native'):
vol.All(vol.Upper, vol.In(VAR_UNITS))
})
def __call__(self, call):
"""Execute service call."""
var = pypck.lcn_defs.Var[call.data[CONF_VARIABLE]]
value = call.data[CONF_VALUE]
unit = pypck.lcn_defs.VarUnit.parse(
call.data[CONF_UNIT_OF_MEASUREMENT])
address_connection = self.get_address_connection(call)
address_connection.var_abs(var, value, unit)
class VarReset(LcnServiceCall):
"""Reset value of variable or setpoint."""
schema = LcnServiceCall.schema.extend({
vol.Required(CONF_VARIABLE): vol.All(vol.Upper,
vol.In(VARIABLES + SETPOINTS))
})
def __call__(self, call):
"""Execute service call."""
var = pypck.lcn_defs.Var[call.data[CONF_VARIABLE]]
address_connection = self.get_address_connection(call)
address_connection.var_reset(var)
class VarRel(LcnServiceCall):
"""Shift value of a variable, setpoint or threshold."""
schema = LcnServiceCall.schema.extend({
vol.Required(CONF_VARIABLE):
vol.All(vol.Upper, vol.In(VARIABLES + SETPOINTS + THRESHOLDS)),
vol.Optional(CONF_VALUE, default=0): int,
vol.Optional(CONF_UNIT_OF_MEASUREMENT, default='native'):
vol.All(vol.Upper, vol.In(VAR_UNITS)),
vol.Optional(CONF_RELVARREF, default='current'):
vol.All(vol.Upper, vol.In(RELVARREF))
})
def __call__(self, call):
"""Execute service call."""
var = pypck.lcn_defs.Var[call.data[CONF_VARIABLE]]
value = call.data[CONF_VALUE]
unit = pypck.lcn_defs.VarUnit.parse(
call.data[CONF_UNIT_OF_MEASUREMENT])
value_ref = pypck.lcn_defs.RelVarRef[
call.data[CONF_RELVARREF]]
address_connection = self.get_address_connection(call)
address_connection.var_rel(var, value, unit, value_ref)
class LockRegulator(LcnServiceCall):
"""Locks a regulator setpoint."""
schema = LcnServiceCall.schema.extend({
vol.Required(CONF_SETPOINT): vol.All(vol.Upper, vol.In(SETPOINTS)),
vol.Optional(CONF_STATE, default=False): bool,
})
def __call__(self, call):
"""Execute service call."""
setpoint = pypck.lcn_defs.Var[call.data[CONF_SETPOINT]]
state = call.data[CONF_STATE]
reg_id = pypck.lcn_defs.Var.to_set_point_id(setpoint)
address_connection = self.get_address_connection(call)
address_connection.lock_regulator(reg_id, state)
class SendKeys(LcnServiceCall):
"""Sends keys (which executes bound commands)."""
schema = LcnServiceCall.schema.extend({
vol.Required(CONF_KEYS): cv.matches_regex(r'^([a-dA-D][1-8])+$'),
vol.Optional(CONF_STATE, default='hit'):
vol.All(vol.Upper, vol.In(SENDKEYCOMMANDS)),
vol.Optional(CONF_TIME, default=0): vol.All(int, vol.Range(min=0)),
vol.Optional(CONF_TIME_UNIT, default='s'): vol.All(vol.Upper,
vol.In(TIME_UNITS))
})
def __call__(self, call):
"""Execute service call."""
address_connection = self.get_address_connection(call)
keys = [[False] * 8 for i in range(4)]
key_strings = zip(call.data[CONF_KEYS][::2],
call.data[CONF_KEYS][1::2])
for table, key in key_strings:
table_id = ord(table) - 65
key_id = int(key) - 1
keys[table_id][key_id] = True
delay_time = call.data[CONF_TIME]
if delay_time != 0:
hit = pypck.lcn_defs.SendKeyCommand.HIT
if pypck.lcn_defs.SendKeyCommand[
call.data[CONF_STATE]] != hit:
raise ValueError('Only hit command is allowed when sending'
' deferred keys.')
delay_unit = pypck.lcn_defs.TimeUnit.parse(
call.data[CONF_TIME_UNIT])
address_connection.send_keys_hit_deferred(
keys, delay_time, delay_unit)
else:
state = pypck.lcn_defs.SendKeyCommand[
call.data[CONF_STATE]]
address_connection.send_keys(keys, state)
class LockKeys(LcnServiceCall):
"""Lock keys."""
schema = LcnServiceCall.schema.extend({
vol.Optional(CONF_TABLE, default='a'): cv.matches_regex(r'^[a-dA-D]$'),
vol.Required(CONF_STATE): is_key_lock_states_string,
vol.Optional(CONF_TIME, default=0): vol.All(int, vol.Range(min=0)),
vol.Optional(CONF_TIME_UNIT, default='s'): vol.All(vol.Upper,
vol.In(TIME_UNITS))
})
def __call__(self, call):
"""Execute service call."""
address_connection = self.get_address_connection(call)
states = [pypck.lcn_defs.KeyLockStateModifier[state]
for state in call.data[CONF_STATE]]
table_id = ord(call.data[CONF_TABLE]) - 65
delay_time = call.data[CONF_TIME]
if delay_time != 0:
if table_id != 0:
raise ValueError('Only table A is allowed when locking keys'
' for a specific time.')
delay_unit = pypck.lcn_defs.TimeUnit.parse(
call.data[CONF_TIME_UNIT])
address_connection.lock_keys_tab_a_temporary(
delay_time, delay_unit, states)
else:
address_connection.lock_keys(table_id, states)
address_connection.request_status_locked_keys_timeout()
class DynText(LcnServiceCall):
"""Send dynamic text to LCN-GTxD displays."""
schema = LcnServiceCall.schema.extend({
vol.Required(CONF_ROW): vol.All(int, vol.Range(min=1, max=4)),
vol.Required(CONF_TEXT): vol.All(str, vol.Length(max=60))
})
def __call__(self, call):
"""Execute service call."""
row_id = call.data[CONF_ROW] - 1
text = call.data[CONF_TEXT]
address_connection = self.get_address_connection(call)
address_connection.dyn_text(row_id, text)
class Pck(LcnServiceCall):
"""Send arbitrary PCK command."""
schema = LcnServiceCall.schema.extend({
vol.Required(CONF_PCK): str
})
def __call__(self, call):
"""Execute service call."""
pck = call.data[CONF_PCK]
address_connection = self.get_address_connection(call)
address_connection.pck(pck)
|
|
# coding: utf-8
"""
DocuSign REST API
The DocuSign REST API provides you with a powerful, convenient, and simple Web services API for interacting with DocuSign. # noqa: E501
OpenAPI spec version: v2.1
Contact: devcenter@docusign.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class PaymentGatewayAccountSetting(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'api_fields': 'str',
'authorization_code': 'str',
'credential_status': 'str',
'merchant_id': 'str'
}
attribute_map = {
'api_fields': 'apiFields',
'authorization_code': 'authorizationCode',
'credential_status': 'credentialStatus',
'merchant_id': 'merchantId'
}
def __init__(self, api_fields=None, authorization_code=None, credential_status=None, merchant_id=None): # noqa: E501
"""PaymentGatewayAccountSetting - a model defined in Swagger""" # noqa: E501
self._api_fields = None
self._authorization_code = None
self._credential_status = None
self._merchant_id = None
self.discriminator = None
if api_fields is not None:
self.api_fields = api_fields
if authorization_code is not None:
self.authorization_code = authorization_code
if credential_status is not None:
self.credential_status = credential_status
if merchant_id is not None:
self.merchant_id = merchant_id
@property
def api_fields(self):
"""Gets the api_fields of this PaymentGatewayAccountSetting. # noqa: E501
# noqa: E501
:return: The api_fields of this PaymentGatewayAccountSetting. # noqa: E501
:rtype: str
"""
return self._api_fields
@api_fields.setter
def api_fields(self, api_fields):
"""Sets the api_fields of this PaymentGatewayAccountSetting.
# noqa: E501
:param api_fields: The api_fields of this PaymentGatewayAccountSetting. # noqa: E501
:type: str
"""
self._api_fields = api_fields
@property
def authorization_code(self):
"""Gets the authorization_code of this PaymentGatewayAccountSetting. # noqa: E501
# noqa: E501
:return: The authorization_code of this PaymentGatewayAccountSetting. # noqa: E501
:rtype: str
"""
return self._authorization_code
@authorization_code.setter
def authorization_code(self, authorization_code):
"""Sets the authorization_code of this PaymentGatewayAccountSetting.
# noqa: E501
:param authorization_code: The authorization_code of this PaymentGatewayAccountSetting. # noqa: E501
:type: str
"""
self._authorization_code = authorization_code
@property
def credential_status(self):
"""Gets the credential_status of this PaymentGatewayAccountSetting. # noqa: E501
# noqa: E501
:return: The credential_status of this PaymentGatewayAccountSetting. # noqa: E501
:rtype: str
"""
return self._credential_status
@credential_status.setter
def credential_status(self, credential_status):
"""Sets the credential_status of this PaymentGatewayAccountSetting.
# noqa: E501
:param credential_status: The credential_status of this PaymentGatewayAccountSetting. # noqa: E501
:type: str
"""
self._credential_status = credential_status
@property
def merchant_id(self):
"""Gets the merchant_id of this PaymentGatewayAccountSetting. # noqa: E501
# noqa: E501
:return: The merchant_id of this PaymentGatewayAccountSetting. # noqa: E501
:rtype: str
"""
return self._merchant_id
@merchant_id.setter
def merchant_id(self, merchant_id):
"""Sets the merchant_id of this PaymentGatewayAccountSetting.
# noqa: E501
:param merchant_id: The merchant_id of this PaymentGatewayAccountSetting. # noqa: E501
:type: str
"""
self._merchant_id = merchant_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(PaymentGatewayAccountSetting, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PaymentGatewayAccountSetting):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#,============================================================================
"""Tests for model saving."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import tempfile
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.keras.engine import training
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
from tensorflow.python.training import training as training_module
try:
import h5py # pylint:disable=g-import-not-at-top
except ImportError:
h5py = None
class TestWeightSavingAndLoading(test.TestCase, parameterized.TestCase):
def test_weight_loading(self):
with self.test_session():
a = keras.layers.Input(shape=(2,))
x = keras.layers.Dense(3)(a)
b = keras.layers.Dense(1)(x)
model = keras.models.Model(a, b)
x = np.random.random((3, 2))
ref_y = model.predict(x)
weights = model.get_weights()
model.set_weights(weights)
y = model.predict(x)
self.assertAllClose(ref_y, y)
with self.assertRaises(ValueError):
model.set_weights(weights[1:])
with self.assertRaises(ValueError):
model.set_weights(weights[::-1])
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
no_extension_path = os.path.join(temp_dir, 'test')
model.save_weights(no_extension_path, save_format='tf')
model.load_weights(no_extension_path)
y = model.predict(x)
self.assertAllClose(ref_y, y)
if h5py is None:
return # Skip rest of test if H5py isn't available.
h5_path = os.path.join(temp_dir, 'test.h5')
model.save_weights(h5_path)
model.load_weights(h5_path)
y = model.predict(x)
self.assertAllClose(ref_y, y)
model.load_weights(h5_path, by_name=True)
y = model.predict(x)
self.assertAllClose(ref_y, y)
model.save_weights(no_extension_path, save_format='hdf5')
model.load_weights(no_extension_path)
y = model.predict(x)
self.assertAllClose(ref_y, y)
def test_weight_preprocessing(self):
input_dim = 3
output_dim = 3
size = 2
cases = [
[
(keras.layers.Bidirectional(keras.layers.SimpleRNN(2))),
[np.random.random((2, 1)), np.random.random((2, 1))],
(None, 3, 2),
],
[
(keras.layers.TimeDistributed(keras.layers.Dense(1))),
[np.random.random((2, 1)), np.random.random((1,))],
(None, 3, 2),
],
[
(keras.layers.Conv1D(output_dim, size, use_bias=False)),
[np.random.random((output_dim, input_dim, size, 1))],
(None, 4, input_dim),
],
[
(keras.layers.Conv2D(output_dim, size,
use_bias=False, data_format='channels_first')),
[np.random.random((output_dim, input_dim, size, size))],
(None, input_dim, 4, 4),
],
[
(keras.layers.Conv2DTranspose(output_dim, size,
use_bias=False,
data_format='channels_first')),
[np.random.random((output_dim, input_dim, size, size))],
(None, input_dim, 4, 4),
],
[
(keras.layers.Conv2DTranspose(output_dim, size,
use_bias=False,
data_format='channels_last')),
[np.random.random((size, size, input_dim, output_dim))],
(None, 4, 4, input_dim),
],
[
(keras.layers.Conv3D(output_dim, size,
use_bias=False, data_format='channels_first')),
[np.random.random((output_dim, input_dim, size, size, size))],
(None, input_dim, 4, 4, 4),
],
[
(keras.layers.GRU(output_dim)),
[np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,)),
np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,)),
np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,))],
(None, 4, input_dim),
],
[
(keras.layers.LSTM(output_dim)),
[np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,)),
np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,)),
np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,)),
np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,))],
(None, 4, input_dim),
],
]
for layer, weights, input_shape in cases:
layer.build(input_shape)
_ = keras.engine.saving.preprocess_weights_for_loading(
layer, weights, original_keras_version='1')
model = keras.models.Sequential([keras.layers.Dense(2, input_dim=2)])
_ = keras.engine.saving.preprocess_weights_for_loading(
model, model.weights, original_keras_version='1')
x = keras.Input((2,))
y = keras.layers.Dense(2)(x)
model = keras.models.Model(x, y)
_ = keras.engine.saving.preprocess_weights_for_loading(
model, model.weights, original_keras_version='1')
@parameterized.named_parameters(
('gru', keras.layers.GRU, {
'units': 2,
'input_shape': (3, 5)
}),
('gru_with_reset_after', keras.layers.GRU, {
'units': 2,
'input_shape': (3, 5),
'reset_after': True
}),
('lstm', keras.layers.LSTM, {
'units': 2,
'input_shape': (3, 5)
}),
('cudnngru', keras.layers.CuDNNGRU, {
'units': 2,
'input_shape': (3, 5)
}),
('cudnnlstm', keras.layers.CuDNNLSTM, {
'units': 2,
'input_shape': (3, 5)
}))
def test_preprocess_weights_for_loading_rnn_should_be_idempotent(
self, layer_class, layer_args):
with self.test_session():
layer = layer_class(**layer_args)
layer.build(input_shape=layer_args.get('input_shape'))
weights1 = layer.get_weights()
weights2 = keras.engine.saving.preprocess_weights_for_loading(
layer, weights1)
_ = [
self.assertAllClose(x, y, rtol=1e-05)
for (x, y) in zip(weights1, weights2)
]
def test_sequential_weight_loading(self):
if h5py is None:
return
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
h5_path = os.path.join(temp_dir, 'test.h5')
num_hidden = 5
input_dim = 3
batch_size = 5
num_classes = 2
with self.test_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(num_hidden, input_dim=input_dim))
model.add(keras.layers.Dense(num_classes))
x = np.random.random((batch_size, input_dim))
ref_y = model.predict(x)
model.save_weights(h5_path)
model = keras.models.Sequential()
model.add(keras.layers.Dense(num_hidden, input_dim=input_dim))
model.add(keras.layers.Dense(num_classes))
model.load_weights(h5_path)
y = model.predict(x)
self.assertAllClose(y, ref_y)
class TestWholeModelSaving(test.TestCase):
def test_sequential_model_saving(self):
if h5py is None:
self.skipTest('h5py required to run this test')
with self.test_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.RepeatVector(3))
model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))
model.compile(loss=keras.losses.MSE,
optimizer=keras.optimizers.RMSprop(lr=0.0001),
metrics=[keras.metrics.categorical_accuracy],
sample_weight_mode='temporal')
x = np.random.random((1, 3))
y = np.random.random((1, 3, 3))
model.train_on_batch(x, y)
out = model.predict(x)
fd, fname = tempfile.mkstemp('.h5')
keras.models.save_model(model, fname)
new_model = keras.models.load_model(fname)
os.close(fd)
os.remove(fname)
out2 = new_model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
# test that new updates are the same with both models
x = np.random.random((1, 3))
y = np.random.random((1, 3, 3))
model.train_on_batch(x, y)
new_model.train_on_batch(x, y)
out = model.predict(x)
out2 = new_model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
def test_sequential_model_saving_2(self):
if h5py is None:
self.skipTest('h5py required to run this test')
with self.test_session():
# test with custom optimizer, loss
class CustomOp(keras.optimizers.RMSprop):
pass
def custom_loss(y_true, y_pred):
return keras.losses.mse(y_true, y_pred)
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.Dense(3))
model.compile(loss=custom_loss, optimizer=CustomOp(), metrics=['acc'])
x = np.random.random((1, 3))
y = np.random.random((1, 3))
model.train_on_batch(x, y)
out = model.predict(x)
fd, fname = tempfile.mkstemp('.h5')
keras.models.save_model(model, fname)
model = keras.models.load_model(
fname,
custom_objects={'CustomOp': CustomOp,
'custom_loss': custom_loss})
os.close(fd)
os.remove(fname)
out2 = model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
def test_functional_model_saving(self):
if h5py is None:
self.skipTest('h5py required to run this test')
with self.test_session():
inputs = keras.layers.Input(shape=(3,))
x = keras.layers.Dense(2)(inputs)
output = keras.layers.Dense(3)(x)
model = keras.models.Model(inputs, output)
model.compile(loss=keras.losses.MSE,
optimizer=keras.optimizers.RMSprop(lr=0.0001),
metrics=[keras.metrics.categorical_accuracy])
x = np.random.random((1, 3))
y = np.random.random((1, 3))
model.train_on_batch(x, y)
out = model.predict(x)
fd, fname = tempfile.mkstemp('.h5')
keras.models.save_model(model, fname)
model = keras.models.load_model(fname)
os.close(fd)
os.remove(fname)
out2 = model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
def test_saving_without_compilation(self):
if h5py is None:
self.skipTest('h5py required to run this test')
with self.test_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.Dense(3))
model.compile(loss='mse', optimizer='sgd', metrics=['acc'])
fd, fname = tempfile.mkstemp('.h5')
keras.models.save_model(model, fname)
model = keras.models.load_model(fname)
os.close(fd)
os.remove(fname)
def test_saving_with_tf_optimizer(self):
if h5py is None:
self.skipTest('h5py required to run this test')
with self.test_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.Dense(3))
model.compile(loss='mse',
optimizer=training_module.AdadeltaOptimizer(0.1),
metrics=['acc'])
fd, fname = tempfile.mkstemp('.h5')
keras.models.save_model(model, fname)
model = keras.models.load_model(fname)
os.close(fd)
os.remove(fname)
def test_saving_right_after_compilation(self):
if h5py is None:
self.skipTest('h5py required to run this test')
with self.test_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.Dense(3))
model.compile(loss='mse', optimizer='sgd', metrics=['acc'])
model._make_train_function()
fd, fname = tempfile.mkstemp('.h5')
keras.models.save_model(model, fname)
model = keras.models.load_model(fname)
os.close(fd)
os.remove(fname)
def test_saving_lambda_numpy_array_arguments(self):
if h5py is None:
self.skipTest('h5py required to run this test')
mean = np.random.random((4, 2, 3))
std = np.abs(np.random.random((4, 2, 3))) + 1e-5
inputs = keras.layers.Input(shape=(4, 2, 3))
output = keras.layers.Lambda(lambda image, mu, std: (image - mu) / std,
arguments={'mu': mean, 'std': std})(inputs)
model = keras.models.Model(inputs, output)
model.compile(loss='mse', optimizer='sgd', metrics=['acc'])
fd, fname = tempfile.mkstemp('.h5')
keras.models.save_model(model, fname)
model = keras.models.load_model(fname)
os.close(fd)
os.remove(fname)
self.assertAllClose(mean, model.layers[1].arguments['mu'])
self.assertAllClose(std, model.layers[1].arguments['std'])
def test_saving_model_with_long_layer_names(self):
if h5py is None:
self.skipTest('h5py required to run this test')
with self.test_session():
# This layer name will make the `layers_name` HDF5 attribute blow
# out of proportion. Note that it fits into the internal HDF5
# attribute memory limit on its own but because h5py converts
# the list of layer names into numpy array, which uses the same
# amout of memory for every item, it increases the memory
# requirements substantially.
x = keras.Input(shape=(2,), name='input_' + ('x' * (2**15)))
f = x
for i in range(4):
f = keras.layers.Dense(2, name='dense_%d' % (i,))(f)
model = keras.Model(inputs=[x], outputs=[f])
model.compile(loss='mse', optimizer='adam', metrics=['acc'])
x = np.random.random((1, 2))
y = np.random.random((1, 2))
model.train_on_batch(x, y)
out = model.predict(x)
fd, fname = tempfile.mkstemp('.h5')
keras.models.save_model(model, fname)
model = keras.models.load_model(fname)
# Check that the HDF5 files contains chunked array
# of layer names.
with h5py.File(fname, 'r') as h5file:
num_names_arrays = len([attr for attr in h5file['model_weights'].attrs
if attr.startswith('layer_names')])
# The chunking of layer names array should have happened.
self.assertGreater(num_names_arrays, 0)
out2 = model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
# Cleanup
os.close(fd)
os.remove(fname)
def test_saving_model_with_long_weights_names(self):
if h5py is None:
self.skipTest('h5py required to run this test')
with self.test_session():
x = keras.Input(shape=(2,), name='nested_model_input')
f = x
for i in range(4):
f = keras.layers.Dense(2, name='nested_model_dense_%d' % (i,))(f)
# This layer name will make the `weights_name`
# HDF5 attribute blow out of proportion.
f = keras.layers.Dense(2, name='nested_model_output' + ('x' * (2**14)))(f)
nested_model = keras.Model(inputs=[x], outputs=[f], name='nested_model')
x = keras.Input(shape=(2,), name='outer_model_input')
f = nested_model(x)
f = keras.layers.Dense(2, name='outer_model_output')(f)
model = keras.Model(inputs=[x], outputs=[f])
model.compile(loss='mse', optimizer='adam', metrics=['acc'])
x = np.random.random((1, 2))
y = np.random.random((1, 2))
model.train_on_batch(x, y)
out = model.predict(x)
fd, fname = tempfile.mkstemp('.h5')
keras.models.save_model(model, fname)
model = keras.models.load_model(fname)
# Check that the HDF5 files contains chunked array
# of weight names.
with h5py.File(fname, 'r') as h5file:
num_weight_arrays = len(
[attr for attr in h5file['model_weights']['nested_model'].attrs
if attr.startswith('weight_names')])
# The chunking of layer names array should have happened.
self.assertGreater(num_weight_arrays, 0)
out2 = model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
# Cleanup
os.close(fd)
os.remove(fname)
def test_model_saving_to_pre_created_h5py_file(self):
if h5py is None:
self.skipTest('h5py required to run this test')
with self.test_session():
inputs = keras.Input(shape=(3,))
x = keras.layers.Dense(2)(inputs)
outputs = keras.layers.Dense(3)(x)
model = keras.Model(inputs, outputs)
model.compile(loss=keras.losses.MSE,
optimizer=keras.optimizers.Adam(),
metrics=[keras.metrics.categorical_accuracy])
x = np.random.random((1, 3))
y = np.random.random((1, 3))
model.train_on_batch(x, y)
out = model.predict(x)
fd, fname = tempfile.mkstemp('.h5')
with h5py.File(fname, mode='r+') as h5file:
keras.models.save_model(model, h5file)
loaded_model = keras.models.load_model(h5file)
out2 = loaded_model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
# Test non-default options in h5
with h5py.File('_', driver='core',
backing_store=False) as h5file:
keras.models.save_model(model, h5file)
loaded_model = keras.models.load_model(h5file)
out2 = loaded_model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
# Cleanup
os.close(fd)
os.remove(fname)
class SubclassedModel(training.Model):
def __init__(self):
super(SubclassedModel, self).__init__()
self.x_layer = keras.layers.Dense(3)
self.b_layer = keras.layers.Dense(1)
def call(self, a):
return self.b_layer(self.x_layer(a))
class TestWeightSavingAndLoadingTFFormat(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
def test_tensorflow_format_overwrite(self):
with self.test_session() as session:
model = SubclassedModel()
temp_dir = self.get_temp_dir()
prefix = os.path.join(temp_dir, 'ckpt')
x = constant_op.constant(np.random.random((3, 2)), dtype=dtypes.float32)
executing_eagerly = context.executing_eagerly()
model(x) # pylint: disable=not-callable
if not executing_eagerly:
session.run([v.initializer for v in model.variables])
model.save_weights(prefix, save_format='tensorflow')
model.save_weights(prefix, save_format='tensorflow', overwrite=True)
with self.assertRaises(EOFError):
# Indirectly tests that the user is prompted
model.save_weights(prefix, save_format='tensorflow', overwrite=False)
def test_no_graph_pollution(self):
with context.graph_mode():
graph = ops.Graph()
with graph.as_default(), self.test_session(graph) as session:
model = SubclassedModel()
temp_dir = self.get_temp_dir()
prefix = os.path.join(temp_dir, 'ckpt')
x = constant_op.constant(np.random.random((3, 2)), dtype=dtypes.float32)
model(x) # pylint: disable=not-callable
session.run([v.initializer for v in model.variables])
model.save_weights(prefix, save_format='tensorflow')
op_count = len(graph.get_operations())
model.save_weights(prefix, save_format='tensorflow')
self.assertEqual(len(graph.get_operations()), op_count)
model.load_weights(prefix)
op_count = len(graph.get_operations())
model.load_weights(prefix)
self.assertEqual(len(graph.get_operations()), op_count)
def _weight_loading_test_template(self, make_model_fn):
with self.test_session() as session:
model = make_model_fn()
temp_dir = self.get_temp_dir()
prefix = os.path.join(temp_dir, 'ckpt')
x = constant_op.constant(np.random.random((3, 2)), dtype=dtypes.float32)
executing_eagerly = context.executing_eagerly()
ref_y_tensor = model(x)
if not executing_eagerly:
session.run([v.initializer for v in model.variables])
ref_y = self.evaluate(ref_y_tensor)
model.save_weights(prefix, save_format='tf')
for v in model.variables:
self.evaluate(
v.assign(random_ops.random_normal(shape=array_ops.shape(v))))
self.addCleanup(shutil.rmtree, temp_dir)
model.load_weights(prefix)
y = self.evaluate(model(x))
self.assertAllClose(ref_y, y)
# Test restore-on-create if this is a subclassed Model (graph Networks
# will have already created their variables).
load_model = make_model_fn()
load_model.load_weights(prefix)
restore_on_create_y_tensor = load_model(x)
restore_on_create_y = self.evaluate(restore_on_create_y_tensor)
self.assertAllClose(ref_y, restore_on_create_y)
@test_util.run_in_graph_and_eager_modes()
def test_weight_loading_graph_model(self):
def _make_graph_model():
a = keras.layers.Input(shape=(2,))
x = keras.layers.Dense(3)(a)
b = keras.layers.Dense(1)(x)
return keras.models.Model(a, b)
self._weight_loading_test_template(_make_graph_model)
@test_util.run_in_graph_and_eager_modes()
def test_weight_loading_subclassed_model(self):
self._weight_loading_test_template(SubclassedModel)
def _new_layer_weight_loading_test_template(
self, first_model_fn, second_model_fn, restore_init_fn):
with self.test_session() as session:
model = first_model_fn()
temp_dir = self.get_temp_dir()
prefix = os.path.join(temp_dir, 'ckpt')
x = constant_op.constant(np.random.random((3, 2)), dtype=dtypes.float32)
executing_eagerly = context.executing_eagerly()
ref_y_tensor = model(x)
if not executing_eagerly:
session.run([v.initializer for v in model.variables])
ref_y = self.evaluate(ref_y_tensor)
model.save_weights(prefix)
for v in model.variables:
self.evaluate(
v.assign(random_ops.random_normal(shape=array_ops.shape(v))))
self.addCleanup(shutil.rmtree, temp_dir)
second_model = second_model_fn()
second_model.load_weights(prefix)
second_model(x)
self.evaluate(restore_init_fn(second_model))
second_model.save_weights(prefix)
# Check that the second model's checkpoint loads into the original model
model.load_weights(prefix)
y = self.evaluate(model(x))
self.assertAllClose(ref_y, y)
@test_util.run_in_graph_and_eager_modes()
def test_weight_loading_graph_model_added_layer(self):
def _save_graph_model():
a = keras.layers.Input(shape=(2,))
x = keras.layers.Dense(3, name='first')(a)
b = keras.layers.Dense(1, name='second')(x)
return keras.models.Model(a, b)
def _restore_graph_model():
a = keras.layers.Input(shape=(2,))
x = keras.layers.Dense(3, name='first')(a)
y = keras.layers.Dense(1, name='second')(x)
b = keras.layers.Dense(3, name='secondjr')(y)
return keras.models.Model(a, b)
def _restore_init_fn(restore_model):
return [v.initializer for v in restore_model.layers[-1].variables]
self._new_layer_weight_loading_test_template(
_save_graph_model, _restore_graph_model,
_restore_init_fn)
@test_util.run_in_graph_and_eager_modes()
def test_weight_loading_graph_model_added_no_weight_layer(self):
def _save_graph_model():
a = keras.layers.Input(shape=(2,))
x = keras.layers.Dense(3, name='first')(a)
b = keras.layers.Dense(1, name='second')(x)
return keras.models.Model(a, b)
def _restore_graph_model():
a = keras.layers.Input(shape=(2,))
x = keras.layers.Dense(3, name='first')(a)
y = keras.layers.Dropout(rate=0.1)(x)
b = keras.layers.Dense(1, name='second')(y)
return keras.models.Model(a, b)
def _restore_init_fn(restore_model):
del restore_model # unused
return []
self._new_layer_weight_loading_test_template(
_save_graph_model, _restore_graph_model,
_restore_init_fn)
@test_util.run_in_graph_and_eager_modes()
def test_weight_loading_subclassed_model_added_layer(self):
class SubclassedModelRestore(training.Model):
def __init__(self):
super(SubclassedModelRestore, self).__init__()
self.x_layer = keras.layers.Dense(3)
self.y_layer = keras.layers.Dense(3)
self.b_layer = keras.layers.Dense(1)
def call(self, a):
return self.b_layer(self.y_layer(self.x_layer(a)))
def _restore_init_fn(restore_model):
return [v.initializer for v in restore_model.y_layer.variables]
self._new_layer_weight_loading_test_template(
SubclassedModel, SubclassedModelRestore,
_restore_init_fn)
if __name__ == '__main__':
test.main()
|
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Preprocessing functions for images."""
from __future__ import absolute_import
from __future__ import division
# from __future__ import google_type_annotations
from __future__ import print_function
import tensorflow as tf
from typing import List, Optional, Text, Tuple
from official.vision.image_classification import augment
# Calculated from the ImageNet training set
MEAN_RGB = (0.485 * 255, 0.456 * 255, 0.406 * 255)
STDDEV_RGB = (0.229 * 255, 0.224 * 255, 0.225 * 255)
IMAGE_SIZE = 224
CROP_PADDING = 32
def mean_image_subtraction(
image_bytes: tf.Tensor,
means: Tuple[float, ...],
num_channels: int = 3,
dtype: tf.dtypes.DType = tf.float32,
) -> tf.Tensor:
"""Subtracts the given means from each image channel.
For example:
means = [123.68, 116.779, 103.939]
image_bytes = mean_image_subtraction(image_bytes, means)
Note that the rank of `image` must be known.
Args:
image_bytes: a tensor of size [height, width, C].
means: a C-vector of values to subtract from each channel.
num_channels: number of color channels in the image that will be distorted.
dtype: the dtype to convert the images to. Set to `None` to skip conversion.
Returns:
the centered image.
Raises:
ValueError: If the rank of `image` is unknown, if `image` has a rank other
than three or if the number of channels in `image` doesn't match the
number of values in `means`.
"""
if image_bytes.get_shape().ndims != 3:
raise ValueError('Input must be of size [height, width, C>0]')
if len(means) != num_channels:
raise ValueError('len(means) must match the number of channels')
# We have a 1-D tensor of means; convert to 3-D.
# Note(b/130245863): we explicitly call `broadcast` instead of simply
# expanding dimensions for better performance.
means = tf.broadcast_to(means, tf.shape(image_bytes))
if dtype is not None:
means = tf.cast(means, dtype=dtype)
return image_bytes - means
def standardize_image(
image_bytes: tf.Tensor,
stddev: Tuple[float, ...],
num_channels: int = 3,
dtype: tf.dtypes.DType = tf.float32,
) -> tf.Tensor:
"""Divides the given stddev from each image channel.
For example:
stddev = [123.68, 116.779, 103.939]
image_bytes = standardize_image(image_bytes, stddev)
Note that the rank of `image` must be known.
Args:
image_bytes: a tensor of size [height, width, C].
stddev: a C-vector of values to divide from each channel.
num_channels: number of color channels in the image that will be distorted.
dtype: the dtype to convert the images to. Set to `None` to skip conversion.
Returns:
the centered image.
Raises:
ValueError: If the rank of `image` is unknown, if `image` has a rank other
than three or if the number of channels in `image` doesn't match the
number of values in `stddev`.
"""
if image_bytes.get_shape().ndims != 3:
raise ValueError('Input must be of size [height, width, C>0]')
if len(stddev) != num_channels:
raise ValueError('len(stddev) must match the number of channels')
# We have a 1-D tensor of stddev; convert to 3-D.
# Note(b/130245863): we explicitly call `broadcast` instead of simply
# expanding dimensions for better performance.
stddev = tf.broadcast_to(stddev, tf.shape(image_bytes))
if dtype is not None:
stddev = tf.cast(stddev, dtype=dtype)
return image_bytes / stddev
def normalize_images(features: tf.Tensor,
mean_rgb: Tuple[float, ...] = MEAN_RGB,
stddev_rgb: Tuple[float, ...] = STDDEV_RGB,
num_channels: int = 3,
dtype: tf.dtypes.DType = tf.float32,
data_format: Text = 'channels_last') -> tf.Tensor:
"""Normalizes the input image channels with the given mean and stddev.
Args:
features: `Tensor` representing decoded images in float format.
mean_rgb: the mean of the channels to subtract.
stddev_rgb: the stddev of the channels to divide.
num_channels: the number of channels in the input image tensor.
dtype: the dtype to convert the images to. Set to `None` to skip conversion.
data_format: the format of the input image tensor
['channels_first', 'channels_last'].
Returns:
A normalized image `Tensor`.
"""
# TODO(allencwang) - figure out how to use mean_image_subtraction and
# standardize_image on batches of images and replace the following.
if data_format == 'channels_first':
stats_shape = [num_channels, 1, 1]
else:
stats_shape = [1, 1, num_channels]
if dtype is not None:
features = tf.image.convert_image_dtype(features, dtype=dtype)
if mean_rgb is not None:
mean_rgb = tf.constant(mean_rgb,
shape=stats_shape,
dtype=features.dtype)
mean_rgb = tf.broadcast_to(mean_rgb, tf.shape(features))
features = features - mean_rgb
if stddev_rgb is not None:
stddev_rgb = tf.constant(stddev_rgb,
shape=stats_shape,
dtype=features.dtype)
stddev_rgb = tf.broadcast_to(stddev_rgb, tf.shape(features))
features = features / stddev_rgb
return features
def decode_and_center_crop(image_bytes: tf.Tensor,
image_size: int = IMAGE_SIZE,
crop_padding: int = CROP_PADDING) -> tf.Tensor:
"""Crops to center of image with padding then scales image_size.
Args:
image_bytes: `Tensor` representing an image binary of arbitrary size.
image_size: image height/width dimension.
crop_padding: the padding size to use when centering the crop.
Returns:
A decoded and cropped image `Tensor`.
"""
decoded = image_bytes.dtype != tf.string
shape = (tf.shape(image_bytes) if decoded
else tf.image.extract_jpeg_shape(image_bytes))
image_height = shape[0]
image_width = shape[1]
padded_center_crop_size = tf.cast(
((image_size / (image_size + crop_padding)) *
tf.cast(tf.minimum(image_height, image_width), tf.float32)),
tf.int32)
offset_height = ((image_height - padded_center_crop_size) + 1) // 2
offset_width = ((image_width - padded_center_crop_size) + 1) // 2
crop_window = tf.stack([offset_height, offset_width,
padded_center_crop_size, padded_center_crop_size])
if decoded:
image = tf.image.crop_to_bounding_box(
image_bytes,
offset_height=offset_height,
offset_width=offset_width,
target_height=padded_center_crop_size,
target_width=padded_center_crop_size)
else:
image = tf.image.decode_and_crop_jpeg(image_bytes, crop_window, channels=3)
image = resize_image(image_bytes=image,
height=image_size,
width=image_size)
return image
def decode_crop_and_flip(image_bytes: tf.Tensor) -> tf.Tensor:
"""Crops an image to a random part of the image, then randomly flips.
Args:
image_bytes: `Tensor` representing an image binary of arbitrary size.
Returns:
A decoded and cropped image `Tensor`.
"""
decoded = image_bytes.dtype != tf.string
bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4])
shape = (tf.shape(image_bytes) if decoded
else tf.image.extract_jpeg_shape(image_bytes))
sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(
shape,
bounding_boxes=bbox,
min_object_covered=0.1,
aspect_ratio_range=[0.75, 1.33],
area_range=[0.05, 1.0],
max_attempts=100,
use_image_if_no_bounding_boxes=True)
bbox_begin, bbox_size, _ = sample_distorted_bounding_box
# Reassemble the bounding box in the format the crop op requires.
offset_height, offset_width, _ = tf.unstack(bbox_begin)
target_height, target_width, _ = tf.unstack(bbox_size)
crop_window = tf.stack([offset_height, offset_width,
target_height, target_width])
if decoded:
cropped = tf.image.crop_to_bounding_box(
image_bytes,
offset_height=offset_height,
offset_width=offset_width,
target_height=target_height,
target_width=target_width)
else:
cropped = tf.image.decode_and_crop_jpeg(image_bytes,
crop_window,
channels=3)
# Flip to add a little more random distortion in.
cropped = tf.image.random_flip_left_right(cropped)
return cropped
def resize_image(image_bytes: tf.Tensor,
height: int = IMAGE_SIZE,
width: int = IMAGE_SIZE) -> tf.Tensor:
"""Resizes an image to a given height and width.
Args:
image_bytes: `Tensor` representing an image binary of arbitrary size.
height: image height dimension.
width: image width dimension.
Returns:
A tensor containing the resized image.
"""
return tf.compat.v1.image.resize(
image_bytes, [height, width], method=tf.image.ResizeMethod.BILINEAR,
align_corners=False)
def preprocess_for_eval(
image_bytes: tf.Tensor,
image_size: int = IMAGE_SIZE,
num_channels: int = 3,
mean_subtract: bool = False,
standardize: bool = False,
dtype: tf.dtypes.DType = tf.float32
) -> tf.Tensor:
"""Preprocesses the given image for evaluation.
Args:
image_bytes: `Tensor` representing an image binary of arbitrary size.
image_size: image height/width dimension.
num_channels: number of image input channels.
mean_subtract: whether or not to apply mean subtraction.
standardize: whether or not to apply standardization.
dtype: the dtype to convert the images to. Set to `None` to skip conversion.
Returns:
A preprocessed and normalized image `Tensor`.
"""
images = decode_and_center_crop(image_bytes, image_size)
images = tf.reshape(images, [image_size, image_size, num_channels])
if mean_subtract:
images = mean_image_subtraction(image_bytes=images, means=MEAN_RGB)
if standardize:
images = standardize_image(image_bytes=images, stddev=STDDEV_RGB)
if dtype is not None:
images = tf.image.convert_image_dtype(images, dtype=dtype)
return images
def load_eval_image(filename: Text, image_size: int = IMAGE_SIZE) -> tf.Tensor:
"""Reads an image from the filesystem and applies image preprocessing.
Args:
filename: a filename path of an image.
image_size: image height/width dimension.
Returns:
A preprocessed and normalized image `Tensor`.
"""
image_bytes = tf.io.read_file(filename)
image = preprocess_for_eval(image_bytes, image_size)
return image
def build_eval_dataset(filenames: List[Text],
labels: List[int] = None,
image_size: int = IMAGE_SIZE,
batch_size: int = 1) -> tf.Tensor:
"""Builds a tf.data.Dataset from a list of filenames and labels.
Args:
filenames: a list of filename paths of images.
labels: a list of labels corresponding to each image.
image_size: image height/width dimension.
batch_size: the batch size used by the dataset
Returns:
A preprocessed and normalized image `Tensor`.
"""
if labels is None:
labels = [0] * len(filenames)
filenames = tf.constant(filenames)
labels = tf.constant(labels)
dataset = tf.data.Dataset.from_tensor_slices((filenames, labels))
dataset = dataset.map(
lambda filename, label: (load_eval_image(filename, image_size), label))
dataset = dataset.batch(batch_size)
return dataset
def preprocess_for_train(image_bytes: tf.Tensor,
image_size: int = IMAGE_SIZE,
augmenter: Optional[augment.ImageAugment] = None,
mean_subtract: bool = False,
standardize: bool = False,
dtype: tf.dtypes.DType = tf.float32) -> tf.Tensor:
"""Preprocesses the given image for training.
Args:
image_bytes: `Tensor` representing an image binary of
arbitrary size of dtype tf.uint8.
image_size: image height/width dimension.
augmenter: the image augmenter to apply.
mean_subtract: whether or not to apply mean subtraction.
standardize: whether or not to apply standardization.
dtype: the dtype to convert the images to. Set to `None` to skip conversion.
Returns:
A preprocessed and normalized image `Tensor`.
"""
images = decode_crop_and_flip(image_bytes=image_bytes)
images = resize_image(images, height=image_size, width=image_size)
if mean_subtract:
images = mean_image_subtraction(image_bytes=images, means=MEAN_RGB)
if standardize:
images = standardize_image(image_bytes=images, stddev=STDDEV_RGB)
if augmenter is not None:
images = augmenter.distort(images)
if dtype is not None:
images = tf.image.convert_image_dtype(images, dtype)
return images
|
|
#!/usr/bin/env python
import r2pipe
import sys
import os
import json
import re
import networkx as nx
from time import time
from datetime import datetime
from argparse import ArgumentParser
from base64 import b64decode
from graphityOut import toNeo, fromNeo, printGraph, printGraphInfo, plotSeGraph
from graphityUtils import gimmeDatApiName, sha1hash, getAllAttributes, is_ascii, Hvalue
import graphityFunc
# Works, takes its time, sometimes assigns wrong names to functions
def loadFlirts():
try:
# load FLIRT signatures from local flirt directory
flirtDir = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'signatures')
sigFiles = [f for f in os.listdir(flirtDir) if os.path.isfile(os.path.join(flirtDir, f))]
for sigFile in sigFiles:
r2cmd = "zF %s" % os.path.join(flirtDir, sigFile)
R2PY.cmd(r2cmd)
except Exception as e:
print str(e) + " FAIL loading FLIRT sig file"
# Too slow for now, waiting for fix
# DEPRECATED
def loadZigs():
try:
# load directory of zigs
print('Loading msvcrt.zig {:%Y-%m-%d %H:%M:%S}'.format(datetime.now()))
zigfile = '/mnt/hgfs/projects/badcoding/R2PYpe/libs/msvcrt.zig'
r2cmd = ". %s" % zigfile
R2PY.cmd(r2cmd)
print('msvcrt.zig loaded {:%Y-%m-%d %H:%M:%S}'.format(datetime.now()))
toScan = getCodeSections()
for section in toScan:
r2cmd = ".z/ %d %d" % (section[0], section[1])
R2PY.cmd(r2cmd)
print('msvcrt.zig scan on code section(s) finished {:%Y-%m-%d %H:%M:%S}'.format(datetime.now()))
except Exception as e:
print str(e)
# Checks whether an address is located in an executable section
def isValidCode(callAddress, sectionsList):
# sectionsList contains executable sections as 2-element lists, containing start and end of each section
for execSection in sectionsList:
if int(callAddress, 16) >= execSection[0] and int(callAddress, 16) < execSection[1]:
return True
return False
# Returns a list of executable sections
def getCodeSections():
returnSections = []
# regular expression to pick out the executable section(s)
execSection = re.compile("perm=....x")
# will return the section table from radare2
sections = R2PY.cmd("iS")
sectionData = {}
for line in sections.splitlines():
if re.search(execSection, line):
for element in line.split():
items = element.split('=')
sectionData[items[0]] = items[1]
start = int(sectionData['vaddr'], 16)
end = start + int(sectionData['vsz'])
psize = int(sectionData['sz'])
returnSections.append([start, end, psize])
return returnSections
# Returns an executables imports as a list
def getIat():
iatlist = []
cmd = "iij"
iatjson = json.loads(R2PY.cmd(cmd))
for item in iatjson:
iatlist.append(hex(item['plt']))
return iatlist
# Returns a dictionary of xrefs to symbols
def crossRefScan():
cmd = "axtj @@ sym.*"
finalCalls = {}
# fixing the JSON...
temp = R2PY.cmd(cmd).replace('\n', ',')
temp = "[" + temp + "]"
xrefj = json.loads(temp)
for xrefitem in xrefj:
for xreflevel2 in xrefitem:
# not data xref means its code or call
if xreflevel2['type'] != 'd':
finalCalls[hex(xreflevel2['from'])] = xreflevel2['opcode']
pass
# data potentially means API referenced by register; please note these are rather uncommon in the long list of symbol refs
# thus, bottelneck in parsing speed lies in number of refs
if xreflevel2['type'] == 'd' and ( xreflevel2['opcode'].startswith('mov') or xreflevel2['opcode'].startswith('lea') ):
# 'grepping' out the register from mov/lea operation
register = xreflevel2['opcode'].split()[1].replace(',','')
# disassemble downwards; mmmaybe smarter to disassemble until end of function, but possible that there is no function at all
# TODO find end of function, just in case
cmd = "pd 300 @ " + hex(xreflevel2['from'])
moreDisasm = R2PY.cmd(cmd)
# possible branches towards target
realCall = "call %s" % register
aJmp = "jmp %s" % register
for disasmLine in moreDisasm.splitlines()[1:]:
if realCall in disasmLine or aJmp in disasmLine:
#found a call!!
temp = disasmLine + ";" + xreflevel2['opcode'].split(',')[1].rstrip()
tempSplit = temp.split()
finalCalls[hex(int(tempSplit[0], 16))] = ' '.join(tempSplit[1:])
elif register in disasmLine:
# TODO if mov dword abc, reg is found -> follow abc?
# TODO could be parsed in more detail, e.g. mov dword, reg won't change the reg
#print disasmLine
break
#pass
return finalCalls
# Parses the binary for strings and their references to nodes
def stringScan(debugDict):
# Workflow is: get string, get xrefs to string if any, get functions of xrefs if any; fit node in graph with the string
allMyStrings = []
# izzj parses entire binary
stringCmd = "izzj"
strings = R2PY.cmd(stringCmd)
parsedStrings = json.loads(strings)
debugDict['stringsDangling'] = []
debugDict['stringsNoRef'] = []
i = 0
j = 1
while i < len(parsedStrings):
stringItem = parsedStrings[i]
# Strings when retrieved through izzj command are BASE64 encoded
thatOneString = b64decode(stringItem['string']).replace('\\',' \\\\ ')
thatOneString.replace('\'', '')
if is_ascii(thatOneString):
xrefCmd = "axtj @ " + hex(stringItem['vaddr'])
stringXrefsJ = R2PY.cmd(xrefCmd)
if stringXrefsJ:
stringXrefs = json.loads(stringXrefsJ)
# check whether string item is root of list of strings
j = 1
lastItem = stringItem
while (i+j) < len(parsedStrings):
nextStringItem = parsedStrings[i+j]
lastAddr = lastItem['vaddr']
lastSize = lastItem['size']
# string offsets are 4 byte aligned, TODO check whether this is always the case
padding = 4 - (lastSize % 4)
if padding == 4:
padding = 0
nextAddr = lastAddr + lastSize + padding
if nextAddr != nextStringItem['vaddr'] or hasXref(hex(nextStringItem['vaddr'])):
# end.. exit here
break
else:
thatOneString = thatOneString + "|" + b64decode(nextStringItem['string'])
j = j + 1
lastItem = nextStringItem
# iterate refs on string, if any
for ref in stringXrefs:
stringAddr = hex(ref['from'])
stringFuncRefCmd = "?v $FB @ " + stringAddr
stringFuncRef = R2PY.cmd(stringFuncRefCmd)
if stringFuncRef != '0x0':
allMyStrings.append([stringAddr, stringFuncRef, thatOneString])
else:
# TODO this is merely still useful strings, see how to fit them in the graphs and db
print "DANGLING STRING NO FUNCREF %s %s" % (stringAddr, thatOneString)
debugDict['stringsDangling'].append(thatOneString)
else:
debugDict['stringsNoRef'].append(thatOneString)
if j > 1:
i = i + j
else:
i = i + 1
debugDict['stringsDanglingTotal'] = len(debugDict['stringsDangling'])
debugDict['stringsNoRefTotal'] = len(debugDict['stringsNoRef'])
return allMyStrings
# Text whether xrefs exist for given address
def hasXref(vaddr):
refs = R2PY.cmd("axtj @ " + vaddr)
if refs:
return True
else:
return False
# Creating the NetworkX graph, nodes are functions, edges are calls or callbacks
def createSeGraph():
graphity = nx.DiGraph()
debugDict = {}
functions = R2PY.cmd("aflj")
if functions:
functionList=json.loads(functions)
else:
functionList = []
sectionsList = getCodeSections()
xlen = 0
for execSec in sectionsList:
xlen = xlen + execSec[2]
debugDict['xsectionsize'] = xlen
# CREATING THE GRAPH
refsGlobalVar = 0
refsUnrecognized = 0
refsFunc = 0
debugDict['functions'] = len(functionList)
for item in functionList:
graphity.add_node(hex(item['offset']), size=item['size'], calltype=item['calltype'], calls=[], apicallcount=0, strings=[])
for item in functionList:
for xref in item['callrefs']:
if xref['type'] == 'C':
# If an edge is added, that includes a non-existent node, the node will be added, but w/o the necessary attributes
# Thasss why we iterate twice, can theoretically be speeded up but needs testing
if hex(xref['addr']) in graphity:
graphity.add_edge(hex(item['offset']), hex(xref['addr']), pos=hex(xref['at']))
refsFunc = refsFunc + 1
elif hex(xref['addr']) in getIat():
pass
elif not isValidCode(hex(xref['addr']), sectionsList):
print "DANGLING call to address outside code section, glob var, dynamic API loading %s -> %s" % (hex(item['offset']), hex(xref['addr']))
refsGlobalVar = refsGlobalVar + 1
else:
print "FAIL: Call to code thats not a function, an import/symbol or otherwise recognized. Missed function perhaps. %s -> %s" % (hex(item['offset']), hex(xref['addr']))
refsUnrecognized = refsUnrecognized + 1
print '* %s Graph created with NetworkX ' % str(datetime.now())
debugDict['refsFunctions'] = refsFunc
debugDict['refsGlobalVar'] = refsGlobalVar
debugDict['refsUnrecognized'] = refsUnrecognized
#loadFlirts()
apiRefs = crossRefScan()
callNum = len(apiRefs)
missesNum = 0
# FITTING GRAPH WITH API REFS
for call in apiRefs:
# get the address of the function, that contains the call to a given symbol
refAddressCmd = "?v $FB @ " + call
funcAddress = R2PY.cmd(refAddressCmd)
if funcAddress in graphity:
# node(funcAddress) has attribute calls, which contains a list of API calls
api = gimmeDatApiName(apiRefs[call])
graphity.node[funcAddress]['calls'].append([call, api])
apicount = graphity.node[funcAddress]['apicallcount']
graphity.node[funcAddress]['apicallcount'] = apicount + 1
# detected API call reference does not resolve to a function offset, insert handling for this here
else:
print "DANGLING API CALL %s %s" % (call, apiRefs[call])
missesNum = missesNum+1
# debug: print total API refs and functionless API refs, maybe indicator for obfuscated code
print '* %s Graph extended with API calls, %d calls in total, %d dangling w/o function reference ' % (str(datetime.now()), callNum, missesNum)
debugDict['apiTotal'] = callNum
debugDict['apiMisses'] = missesNum
# FITTING GRAPH WITH STRING REFS
allTheStrings = stringScan(debugDict)
stringrefs = 0
for aString in allTheStrings:
stringAddr = aString[0]
stringFunc = aString[1]
stringData = aString[2]
# add string to respective function node in graph
if stringFunc in graphity:
graphity.node[stringFunc]['strings'].append([stringAddr, stringData])
stringrefs = stringrefs + 1
else:
print "\nFAIL: String's function not in graph %s %s" % (stringFunc, stringData)
print '* %s Graph extended with string references ' % (str(datetime.now()))
debugDict['stringsReferencedTotal'] = stringrefs
return graphity, debugDict
# Tag exports of DLLs
def analyzeExports(graphity):
exportsj = json.loads(R2PY.cmd("iEj"))
for item in exportsj:
export_address = hex(item['vaddr'])
export_name = item['name']
if export_address in graphity:
graphity.node[export_address]['type'] = 'Export'
graphity.node[export_address]['alias'] = export_name
# Removing thunks as they make my graphs fat, replace by API calls
def thunkPruning(graphity):
for aNode in graphity.nodes(data=True):
# most obvious thunks, other thunks exist too, len seen was 11, 13
# funclets that contain nothing but a jump to an import, and do not call other functions
if aNode[1]['apicallcount'] == 1 and aNode[1]['size'] == 6 and not graphity.successors(aNode[0]):
thunk = aNode[0]
thunkApi = aNode[1]['calls'][0]
# need to go on with radare from here, cause graphity doesn't know all the addressed of the xrefs to thunks from within a function
# getting all xrefs on thunk, then getting function its located in to get to node of graph
temp = R2PY.cmd("axtj " + thunk)
thunkRefs = []
if temp:
thunkRefs = json.loads(temp)
for aRef in thunkRefs:
thunkCallAddr = hex(aRef['from'])
thunkFuncRef = R2PY.cmd("?v $FB @ " + hex(aRef['from']))
# if thunk's xrefs include a detected function then add thunk as a regular API call to calls list of respective node
if thunkFuncRef != '0x0':
graphity.node[thunkFuncRef]['calls'].append([thunkCallAddr, thunkApi[1]])
# after xref to thunk has been added to all calling functions, remove thunk node from graph
graphity.remove_node(thunk)
# DEPRECATED
def fixCallbacks(apiname):
cmd = "axtj @@ sym.* | grep \"%s\"" % apiname
temp = R2PY.cmd(cmd).replace(']\n[', ',')
if temp:
callbackApis = json.loads(temp)
for item in callbackApis:
function = R2PY.cmd("?v $FB @ " + hex(item['from']))
R2PY.cmd("afr @ " + function)
# Adding edges to indirectly referenced functions, thread handlers and hook functions for now only
def tagCallbacks(graphity):
callbackList = []
for aNode in graphity.nodes(data=True):
for call in aNode[1]['calls']:
addr = ''
# TODO consider this bad practise, do something smarter, not sure yet what, consider _beginthread API etc. etc.
# also, maybe this is fixed in radare later, so consider this code redundant by then
if 'CreateThread' in call[1]:
addr = getCallback(call[0], 3)
if 'SetWindowsHookEx' in call[1]:
addr = getCallback(call[0], 2)
if addr in graphity:
graphity.node[addr]['type'] = "Callback"
graphity.add_edge(aNode[0], addr, pos=call[0], calltype="callback")
# Parsing the handler offset out of the function arguments
def getCallback(call, argcount):
# simplistic: walk up the code until xref to code is found, works as long as API only receives one code ref, works well with Windows APIs
disasmMore = "pd -30 @" + call
upwards = R2PY.cmd(disasmMore)
for otherLine in reversed(upwards.splitlines()):
if 'push' in otherLine:
argcount = argcount - 1
if not argcount:
address = otherLine.split()[2]
if 'fcn.' in address:
return hex(int(address.split('.')[1], 16))
else:
return ''
# searching nodes and nearby nodes for patterns defined by graphityFunc.py
def functionalityScan(graphity, pattern):
# search is performed by defining "anchor" node, where initial pattern is found
# search then moved from there 1 level up to search surrounding nodes (number of levels could be increased)
# pattern lists for now are kept rather small
# TODO determine distance between found patterns to see which functionalities lie close to each other
patternNum = len(pattern)
anchorList = []
allCalls = nx.get_node_attributes(graphity, 'calls')
for function in allCalls:
for call in allCalls[function]:
api = call[1]
anchorpat = pattern[0]
if anchorpat in api:
if not filter(lambda daAnchor: daAnchor['address'] == function, anchorList):
# maintain a dict of patterns per anchor to keep track of found patterns
patternCheck = {}
for item in pattern:
patternCheck[item] = False
patternCheck[anchorpat] = function
anchorList.append({'address':function, 'patterns':patternCheck})
# anchor nodes found and more than one pattern searched for
if patternNum > 1 and len(anchorList) > 0:
for anchor in anchorList:
scanNodeForApi(anchor, anchor['address'], patternNum)
if False in anchor['patterns'].values():
anchorNeighbors = nx.all_neighbors(graphity, anchor['address'])
for neighbor in anchorNeighbors:
scanNodeForApi(anchor, neighbor, patternNum)
return anchorList
# Search for a specific pattern within a node, orient by anchor pattern
def scanNodeForApi(anchor, seNode, patternNum):
for patt in anchor['patterns']:
# anchor has a dict that saves which patterns were found already
for call in graphity.node[seNode]['calls']:
api = call[1]
# found a pattern in an api call, that hasnt been found before
if patt in api and anchor['patterns'][patt] == False:
anchor['patterns'][patt] = seNode
if not False in anchor['patterns'].values():
# all patterns found - done
break
if __name__ == '__main__':
global R2PY
parser = ArgumentParser()
parser.add_argument("input", help="Tool requires an input file, batch processing not yet implemented")
parser.add_argument("-a", "--all", action="store_true", help="Perform all analysis options - graph creation, printing the graph, printing the graph info, plotting, behavior scanning and Neo4j parsing")
parser.add_argument("-p", "--printing", action="store_true", help="Print the graph as text, as in, nodes with respective content")
parser.add_argument("-i", "--info", action="store_true", help="Print info and stats of the graph")
parser.add_argument("-l", "--plotting", action="store_true", help="Plotting the graph via pyplot")
parser.add_argument("-b", "--behavior", action="store_true", help="Scan for behaviors listed in graphityFunc.py")
parser.add_argument("-n", "--neodump", action="store_true", help="Dump graph to Neo4j (configured to flush previous data from Neo, might wanna change that)")
args = parser.parse_args()
if args.input:
R2PY = r2pipe.open(args.input)
# benchmarking :P
bench = {}
allAtts = getAllAttributes(args.input)
print '* %s R2 started analysis ' % str(datetime.now())
bench['r2_start'] = time()
R2PY.cmd("e scr.color = false")
R2PY.cmd("e asm.bytes = false")
R2PY.cmd("e asm.lines = false")
R2PY.cmd("e asm.fcnlines = false")
R2PY.cmd("e asm.xrefs = false")
R2PY.cmd("e asm.lbytes = false")
R2PY.cmd("e asm.indentspace = 0")
R2PY.cmd("e anal.autoname= false")
R2PY.cmd("e anal.jmptbl = true")
R2PY.cmd("e anal.hasnext = true")
#loadZigs()
R2PY.cmd("aaa")
R2PY.cmd("afr")
R2PY.cmd("afr @@ sym*")
bench['r2_end'] = time()
print '* %s R2 finished analysis' % str(datetime.now())
# GRAPH CREATION
graphity, debug = createSeGraph()
# DLL PROCESSING
if 'DLL' in allAtts['filetype']:
analyzeExports(graphity)
# thunkPruning
thunkPruning(graphity)
# handler tagging
tagCallbacks(graphity)
bench['graph_end'] = time()
if args.printing:
# PRINT GRAPH TO CMDLINE
print "* %s Printing the graph - nodes and node attributes" % str(datetime.now())
bench['printing_start'] = time()
printGraph(graphity)
bench['printing_end'] = time()
if args.info:
# PRINT GRAPH INFO
bench['info_start'] = time()
printGraphInfo(graphity, debug)
bench['info_end'] = time()
if args.plotting:
# GRAPH PLOTTING STUFF
#try:
print '* %s Plotting routine starting ' % str(datetime.now())
bench['plotting_start'] = time()
plotSeGraph(graphity)
bench['plotting_end'] = time()
print '* %s Plotting routine finished ' % str(datetime.now())
#except:
# print '* %s Cant plot this with pydot, too big ' % str(datetime.now())
if args.neodump:
# TO NEO STUFF
bench['neo_start'] = time()
toNeo(graphity, allAtts['sha1'], allAtts['filesize'], allAtts['filetype'])
bench['neo_end'] = time()
print '* %s Dumped to Neo4J ' % str(datetime.now())
if args.behavior:
# BEHAVIOR
print '* %s Scanning for API patterns ' % str(datetime.now())
bench['behavior_start'] = time()
allThePatterns = graphityFunc.funcDict
for patty in allThePatterns:
findings = functionalityScan(graphity, allThePatterns[patty])
for hit in findings:
if not False in hit['patterns'].values():
print "For %s found %s" % (patty, str(hit['patterns']))
bench['behavior_end'] = time()
# TODO calculate dispersion for 2-n anchor addresses
# TODO handling of LoadLib/GetPAddr. for "hiding something" question, follow GetProc return value
print '* %s Stuffs all finished ' % str(datetime.now())
# TIME
print "\n__..--*** I WANNA BE A BENCHMARK WHEN I GROW UP ***--..__"
print "__ %5f R2 Analysis" % (bench['r2_end'] - bench['r2_start'])
print "__ %5f Graph construction" % (bench['graph_end'] - bench['r2_end'])
if 'printing_start' in bench:
print "__ %5f Printing" % (bench['printing_end'] - bench['printing_start'])
if 'info_start' in bench:
print "__ %5f Info" % (bench['info_end'] - bench['info_start'])
if 'plotting_start' in bench:
print "__ %5f Plotting" % (bench['plotting_end'] - bench['plotting_start'])
if 'behavior_start' in bench:
print "__ %5f Behavior" % (bench['behavior_end'] - bench['behavior_start'])
if 'neo_start' in bench:
print "__ %5f Neo4j" % (bench['neo_end'] - bench['neo_start'])
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import json
import os.path
import re
import signal
import time
from ducktape.services.service import Service
from ducktape.utils.util import wait_until
from ducktape.cluster.remoteaccount import RemoteCommandError
from config import KafkaConfig
from kafkatest.directory_layout.kafka_path import KafkaPathResolverMixin
from kafkatest.services.kafka import config_property
from kafkatest.services.monitor.jmx import JmxMixin
from kafkatest.services.security.minikdc import MiniKdc
from kafkatest.services.security.listener_security_config import ListenerSecurityConfig
from kafkatest.services.security.security_config import SecurityConfig
from kafkatest.version import DEV_BRANCH, LATEST_0_10_0
class KafkaListener:
def __init__(self, name, port_number, security_protocol, open=False):
self.name = name
self.port_number = port_number
self.security_protocol = security_protocol
self.open = open
def listener(self):
return "%s://:%s" % (self.name, str(self.port_number))
def advertised_listener(self, node):
return "%s://%s:%s" % (self.name, node.account.hostname, str(self.port_number))
def listener_security_protocol(self):
return "%s:%s" % (self.name, self.security_protocol)
class KafkaService(KafkaPathResolverMixin, JmxMixin, Service):
PERSISTENT_ROOT = "/mnt/kafka"
STDOUT_STDERR_CAPTURE = os.path.join(PERSISTENT_ROOT, "server-start-stdout-stderr.log")
LOG4J_CONFIG = os.path.join(PERSISTENT_ROOT, "kafka-log4j.properties")
# Logs such as controller.log, server.log, etc all go here
OPERATIONAL_LOG_DIR = os.path.join(PERSISTENT_ROOT, "kafka-operational-logs")
OPERATIONAL_LOG_INFO_DIR = os.path.join(OPERATIONAL_LOG_DIR, "info")
OPERATIONAL_LOG_DEBUG_DIR = os.path.join(OPERATIONAL_LOG_DIR, "debug")
# Kafka log segments etc go here
DATA_LOG_DIR_PREFIX = os.path.join(PERSISTENT_ROOT, "kafka-data-logs")
DATA_LOG_DIR_1 = "%s-1" % (DATA_LOG_DIR_PREFIX)
DATA_LOG_DIR_2 = "%s-2" % (DATA_LOG_DIR_PREFIX)
CONFIG_FILE = os.path.join(PERSISTENT_ROOT, "kafka.properties")
# Kafka Authorizer
ACL_AUTHORIZER = "kafka.security.authorizer.AclAuthorizer"
# Old Kafka Authorizer. This is deprecated but still supported.
SIMPLE_AUTHORIZER = "kafka.security.auth.SimpleAclAuthorizer"
HEAP_DUMP_FILE = os.path.join(PERSISTENT_ROOT, "kafka_heap_dump.bin")
INTERBROKER_LISTENER_NAME = 'INTERNAL'
logs = {
"kafka_server_start_stdout_stderr": {
"path": STDOUT_STDERR_CAPTURE,
"collect_default": True},
"kafka_operational_logs_info": {
"path": OPERATIONAL_LOG_INFO_DIR,
"collect_default": True},
"kafka_operational_logs_debug": {
"path": OPERATIONAL_LOG_DEBUG_DIR,
"collect_default": False},
"kafka_data_1": {
"path": DATA_LOG_DIR_1,
"collect_default": False},
"kafka_data_2": {
"path": DATA_LOG_DIR_2,
"collect_default": False},
"kafka_heap_dump_file": {
"path": HEAP_DUMP_FILE,
"collect_default": True}
}
def __init__(self, context, num_nodes, zk, security_protocol=SecurityConfig.PLAINTEXT, interbroker_security_protocol=SecurityConfig.PLAINTEXT,
client_sasl_mechanism=SecurityConfig.SASL_MECHANISM_GSSAPI, interbroker_sasl_mechanism=SecurityConfig.SASL_MECHANISM_GSSAPI,
authorizer_class_name=None, topics=None, version=DEV_BRANCH, jmx_object_names=None,
jmx_attributes=None, zk_connect_timeout=5000, zk_session_timeout=6000, server_prop_overides=None, zk_chroot=None,
listener_security_config=ListenerSecurityConfig(), per_node_server_prop_overrides=None, extra_kafka_opts=""):
"""
:param context: test context
:param ZookeeperService zk:
:param dict topics: which topics to create automatically
:param str security_protocol: security protocol for clients to use
:param str interbroker_security_protocol: security protocol to use for broker-to-broker communication
:param str client_sasl_mechanism: sasl mechanism for clients to use
:param str interbroker_sasl_mechanism: sasl mechanism to use for broker-to-broker communication
:param str authorizer_class_name: which authorizer class to use
:param str version: which kafka version to use. Defaults to "dev" branch
:param jmx_object_names:
:param jmx_attributes:
:param int zk_connect_timeout:
:param int zk_session_timeout:
:param dict server_prop_overides: overrides for kafka.properties file
:param zk_chroot:
:param ListenerSecurityConfig listener_security_config: listener config to use
:param dict per_node_server_prop_overrides:
:param str extra_kafka_opts: jvm args to add to KAFKA_OPTS variable
"""
Service.__init__(self, context, num_nodes)
JmxMixin.__init__(self, num_nodes=num_nodes, jmx_object_names=jmx_object_names, jmx_attributes=(jmx_attributes or []),
root=KafkaService.PERSISTENT_ROOT)
self.zk = zk
self.security_protocol = security_protocol
self.client_sasl_mechanism = client_sasl_mechanism
self.topics = topics
self.minikdc = None
self.authorizer_class_name = authorizer_class_name
self.zk_set_acl = False
if server_prop_overides is None:
self.server_prop_overides = []
else:
self.server_prop_overides = server_prop_overides
if per_node_server_prop_overrides is None:
self.per_node_server_prop_overrides = {}
else:
self.per_node_server_prop_overrides = per_node_server_prop_overrides
self.log_level = "DEBUG"
self.zk_chroot = zk_chroot
self.listener_security_config = listener_security_config
self.extra_kafka_opts = extra_kafka_opts
#
# In a heavily loaded and not very fast machine, it is
# sometimes necessary to give more time for the zk client
# to have its session established, especially if the client
# is authenticating and waiting for the SaslAuthenticated
# in addition to the SyncConnected event.
#
# The default value for zookeeper.connect.timeout.ms is
# 2 seconds and here we increase it to 5 seconds, but
# it can be overridden by setting the corresponding parameter
# for this constructor.
self.zk_connect_timeout = zk_connect_timeout
# Also allow the session timeout to be provided explicitly,
# primarily so that test cases can depend on it when waiting
# e.g. brokers to deregister after a hard kill.
self.zk_session_timeout = zk_session_timeout
self.port_mappings = {
'PLAINTEXT': KafkaListener('PLAINTEXT', 9092, 'PLAINTEXT', False),
'SSL': KafkaListener('SSL', 9093, 'SSL', False),
'SASL_PLAINTEXT': KafkaListener('SASL_PLAINTEXT', 9094, 'SASL_PLAINTEXT', False),
'SASL_SSL': KafkaListener('SASL_SSL', 9095, 'SASL_SSL', False),
KafkaService.INTERBROKER_LISTENER_NAME:
KafkaListener(KafkaService.INTERBROKER_LISTENER_NAME, 9099, None, False)
}
self.interbroker_listener = None
self.setup_interbroker_listener(interbroker_security_protocol, self.listener_security_config.use_separate_interbroker_listener)
self.interbroker_sasl_mechanism = interbroker_sasl_mechanism
for node in self.nodes:
node.version = version
node.config = KafkaConfig(**{config_property.BROKER_ID: self.idx(node)})
def set_version(self, version):
for node in self.nodes:
node.version = version
@property
def interbroker_security_protocol(self):
return self.interbroker_listener.security_protocol
# this is required for backwards compatibility - there are a lot of tests that set this property explicitly
# meaning 'use one of the existing listeners that match given security protocol, do not use custom listener'
@interbroker_security_protocol.setter
def interbroker_security_protocol(self, security_protocol):
self.setup_interbroker_listener(security_protocol, use_separate_listener=False)
def setup_interbroker_listener(self, security_protocol, use_separate_listener=False):
self.listener_security_config.use_separate_interbroker_listener = use_separate_listener
if self.listener_security_config.use_separate_interbroker_listener:
# do not close existing port here since it is not used exclusively for interbroker communication
self.interbroker_listener = self.port_mappings[KafkaService.INTERBROKER_LISTENER_NAME]
self.interbroker_listener.security_protocol = security_protocol
else:
# close dedicated interbroker port, so it's not dangling in 'listeners' and 'advertised.listeners'
self.close_port(KafkaService.INTERBROKER_LISTENER_NAME)
self.interbroker_listener = self.port_mappings[security_protocol]
@property
def security_config(self):
config = SecurityConfig(self.context, self.security_protocol, self.interbroker_listener.security_protocol,
zk_sasl=self.zk.zk_sasl,
client_sasl_mechanism=self.client_sasl_mechanism,
interbroker_sasl_mechanism=self.interbroker_sasl_mechanism,
listener_security_config=self.listener_security_config)
for port in self.port_mappings.values():
if port.open:
config.enable_security_protocol(port.security_protocol)
return config
def open_port(self, listener_name):
self.port_mappings[listener_name].open = True
def close_port(self, listener_name):
self.port_mappings[listener_name].open = False
def start_minikdc(self, add_principals=""):
if self.security_config.has_sasl:
if self.minikdc is None:
self.minikdc = MiniKdc(self.context, self.nodes, extra_principals = add_principals)
self.minikdc.start()
else:
self.minikdc = None
def alive(self, node):
return len(self.pids(node)) > 0
def start(self, add_principals=""):
self.open_port(self.security_protocol)
self.interbroker_listener.open = True
self.start_minikdc(add_principals)
self._ensure_zk_chroot()
Service.start(self)
self.logger.info("Waiting for brokers to register at ZK")
retries = 30
expected_broker_ids = set(self.nodes)
wait_until(lambda: {node for node in self.nodes if self.is_registered(node)} == expected_broker_ids, 30, 1)
if retries == 0:
raise RuntimeError("Kafka servers didn't register at ZK within 30 seconds")
# Create topics if necessary
if self.topics is not None:
for topic, topic_cfg in self.topics.items():
if topic_cfg is None:
topic_cfg = {}
topic_cfg["topic"] = topic
self.create_topic(topic_cfg)
def _ensure_zk_chroot(self):
self.logger.info("Ensuring zk_chroot %s exists", self.zk_chroot)
if self.zk_chroot:
if not self.zk_chroot.startswith('/'):
raise Exception("Zookeeper chroot must start with '/' but found " + self.zk_chroot)
parts = self.zk_chroot.split('/')[1:]
for i in range(len(parts)):
self.zk.create('/' + '/'.join(parts[:i+1]))
def set_protocol_and_port(self, node):
listeners = []
advertised_listeners = []
protocol_map = []
for port in self.port_mappings.values():
if port.open:
listeners.append(port.listener())
advertised_listeners.append(port.advertised_listener(node))
protocol_map.append(port.listener_security_protocol())
self.listeners = ','.join(listeners)
self.advertised_listeners = ','.join(advertised_listeners)
self.listener_security_protocol_map = ','.join(protocol_map)
self.interbroker_bootstrap_servers = self.__bootstrap_servers(self.interbroker_listener, True)
def prop_file(self, node):
self.set_protocol_and_port(node)
#load template configs as dictionary
config_template = self.render('kafka.properties', node=node, broker_id=self.idx(node),
security_config=self.security_config, num_nodes=self.num_nodes,
listener_security_config=self.listener_security_config)
configs = dict( l.rstrip().split('=', 1) for l in config_template.split('\n')
if not l.startswith("#") and "=" in l )
#load specific test override configs
override_configs = KafkaConfig(**node.config)
override_configs[config_property.ADVERTISED_HOSTNAME] = node.account.hostname
override_configs[config_property.ZOOKEEPER_CONNECT] = self.zk_connect_setting()
for prop in self.server_prop_overides:
override_configs[prop[0]] = prop[1]
for prop in self.per_node_server_prop_overrides.get(self.idx(node), []):
override_configs[prop[0]] = prop[1]
#update template configs with test override configs
configs.update(override_configs)
prop_file = self.render_configs(configs)
return prop_file
def render_configs(self, configs):
"""Render self as a series of lines key=val\n, and do so in a consistent order. """
keys = [k for k in configs.keys()]
keys.sort()
s = ""
for k in keys:
s += "%s=%s\n" % (k, str(configs[k]))
return s
def start_cmd(self, node):
cmd = "export JMX_PORT=%d; " % self.jmx_port
cmd += "export KAFKA_LOG4J_OPTS=\"-Dlog4j.configuration=file:%s\"; " % self.LOG4J_CONFIG
heap_kafka_opts = "-XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=%s" % \
self.logs["kafka_heap_dump_file"]["path"]
security_kafka_opts = self.security_config.kafka_opts.strip('\"')
cmd += "export KAFKA_OPTS=\"%s %s %s\"; " % (heap_kafka_opts, security_kafka_opts, self.extra_kafka_opts)
cmd += "%s %s 1>> %s 2>> %s &" % \
(self.path.script("kafka-server-start.sh", node),
KafkaService.CONFIG_FILE,
KafkaService.STDOUT_STDERR_CAPTURE,
KafkaService.STDOUT_STDERR_CAPTURE)
return cmd
def start_node(self, node, timeout_sec=60):
node.account.mkdirs(KafkaService.PERSISTENT_ROOT)
prop_file = self.prop_file(node)
self.logger.info("kafka.properties:")
self.logger.info(prop_file)
node.account.create_file(KafkaService.CONFIG_FILE, prop_file)
node.account.create_file(self.LOG4J_CONFIG, self.render('log4j.properties', log_dir=KafkaService.OPERATIONAL_LOG_DIR))
self.security_config.setup_node(node)
self.security_config.setup_credentials(node, self.path, self.zk_connect_setting(), broker=True)
cmd = self.start_cmd(node)
self.logger.debug("Attempting to start KafkaService on %s with command: %s" % (str(node.account), cmd))
with node.account.monitor_log(KafkaService.STDOUT_STDERR_CAPTURE) as monitor:
node.account.ssh(cmd)
# Kafka 1.0.0 and higher don't have a space between "Kafka" and "Server"
monitor.wait_until("Kafka\s*Server.*started", timeout_sec=timeout_sec, backoff_sec=.25,
err_msg="Kafka server didn't finish startup in %d seconds" % timeout_sec)
# Credentials for inter-broker communication are created before starting Kafka.
# Client credentials are created after starting Kafka so that both loading of
# existing credentials from ZK and dynamic update of credentials in Kafka are tested.
self.security_config.setup_credentials(node, self.path, self.zk_connect_setting(), broker=False)
self.start_jmx_tool(self.idx(node), node)
if len(self.pids(node)) == 0:
raise Exception("No process ids recorded on node %s" % node.account.hostname)
def pids(self, node):
"""Return process ids associated with running processes on the given node."""
try:
cmd = "jcmd | grep -e %s | awk '{print $1}'" % self.java_class_name()
pid_arr = [pid for pid in node.account.ssh_capture(cmd, allow_fail=True, callback=int)]
return pid_arr
except (RemoteCommandError, ValueError) as e:
return []
def signal_node(self, node, sig=signal.SIGTERM):
pids = self.pids(node)
for pid in pids:
node.account.signal(pid, sig)
def signal_leader(self, topic, partition=0, sig=signal.SIGTERM):
leader = self.leader(topic, partition)
self.signal_node(leader, sig)
def stop_node(self, node, clean_shutdown=True, timeout_sec=60):
pids = self.pids(node)
sig = signal.SIGTERM if clean_shutdown else signal.SIGKILL
for pid in pids:
node.account.signal(pid, sig, allow_fail=False)
try:
wait_until(lambda: len(self.pids(node)) == 0, timeout_sec=timeout_sec,
err_msg="Kafka node failed to stop in %d seconds" % timeout_sec)
except Exception:
self.thread_dump(node)
raise
def thread_dump(self, node):
for pid in self.pids(node):
try:
node.account.signal(pid, signal.SIGQUIT, allow_fail=True)
except:
self.logger.warn("Could not dump threads on node")
def clean_node(self, node):
JmxMixin.clean_node(self, node)
self.security_config.clean_node(node)
node.account.kill_java_processes(self.java_class_name(),
clean_shutdown=False, allow_fail=True)
node.account.ssh("sudo rm -rf -- %s" % KafkaService.PERSISTENT_ROOT, allow_fail=False)
def create_topic(self, topic_cfg, node=None):
"""Run the admin tool create topic command.
Specifying node is optional, and may be done if for different kafka nodes have different versions,
and we care where command gets run.
If the node is not specified, run the command from self.nodes[0]
"""
if node is None:
node = self.nodes[0]
self.logger.info("Creating topic %s with settings %s",
topic_cfg["topic"], topic_cfg)
kafka_topic_script = self.path.script("kafka-topics.sh", node)
cmd = kafka_topic_script + " "
cmd += "--zookeeper %(zk_connect)s --create --topic %(topic)s " % {
'zk_connect': self.zk_connect_setting(),
'topic': topic_cfg.get("topic"),
}
if 'replica-assignment' in topic_cfg:
cmd += " --replica-assignment %(replica-assignment)s" % {
'replica-assignment': topic_cfg.get('replica-assignment')
}
else:
cmd += " --partitions %(partitions)d --replication-factor %(replication-factor)d" % {
'partitions': topic_cfg.get('partitions', 1),
'replication-factor': topic_cfg.get('replication-factor', 1)
}
if topic_cfg.get('if-not-exists', False):
cmd += ' --if-not-exists'
if "configs" in topic_cfg.keys() and topic_cfg["configs"] is not None:
for config_name, config_value in topic_cfg["configs"].items():
cmd += " --config %s=%s" % (config_name, str(config_value))
self.logger.info("Running topic creation command...\n%s" % cmd)
node.account.ssh(cmd)
def delete_topic(self, topic, node=None):
"""
Delete a topic with the topics command
:param topic:
:param node:
:return:
"""
if node is None:
node = self.nodes[0]
self.logger.info("Deleting topic %s" % topic)
kafka_topic_script = self.path.script("kafka-topics.sh", node)
cmd = kafka_topic_script + " "
cmd += "--bootstrap-server %(bootstrap_servers)s --delete --topic %(topic)s " % {
'bootstrap_servers': self.bootstrap_servers(self.security_protocol),
'topic': topic
}
def describe_topic(self, topic, node=None):
if node is None:
node = self.nodes[0]
cmd = "%s --zookeeper %s --topic %s --describe" % \
(self.path.script("kafka-topics.sh", node), self.zk_connect_setting(), topic)
output = ""
for line in node.account.ssh_capture(cmd):
output += line
return output
def list_topics(self, topic=None, node=None):
if node is None:
node = self.nodes[0]
cmd = "%s --zookeeper %s --list" % \
(self.path.script("kafka-topics.sh", node), self.zk_connect_setting())
for line in node.account.ssh_capture(cmd):
if not line.startswith("SLF4J"):
yield line.rstrip()
def alter_message_format(self, topic, msg_format_version, node=None):
if node is None:
node = self.nodes[0]
self.logger.info("Altering message format version for topic %s with format %s", topic, msg_format_version)
cmd = "%s --zookeeper %s --entity-name %s --entity-type topics --alter --add-config message.format.version=%s" % \
(self.path.script("kafka-configs.sh", node), self.zk_connect_setting(), topic, msg_format_version)
self.logger.info("Running alter message format command...\n%s" % cmd)
node.account.ssh(cmd)
def set_unclean_leader_election(self, topic, value=True, node=None):
if node is None:
node = self.nodes[0]
if value is True:
self.logger.info("Enabling unclean leader election for topic %s", topic)
else:
self.logger.info("Disabling unclean leader election for topic %s", topic)
cmd = "%s --zookeeper %s --entity-name %s --entity-type topics --alter --add-config unclean.leader.election.enable=%s" % \
(self.path.script("kafka-configs.sh", node), self.zk_connect_setting(), topic, str(value).lower())
self.logger.info("Running alter unclean leader command...\n%s" % cmd)
node.account.ssh(cmd)
def parse_describe_topic(self, topic_description):
"""Parse output of kafka-topics.sh --describe (or describe_topic() method above), which is a string of form
PartitionCount:2\tReplicationFactor:2\tConfigs:
Topic: test_topic\ttPartition: 0\tLeader: 3\tReplicas: 3,1\tIsr: 3,1
Topic: test_topic\tPartition: 1\tLeader: 1\tReplicas: 1,2\tIsr: 1,2
into a dictionary structure appropriate for use with reassign-partitions tool:
{
"partitions": [
{"topic": "test_topic", "partition": 0, "replicas": [3, 1]},
{"topic": "test_topic", "partition": 1, "replicas": [1, 2]}
]
}
"""
lines = map(lambda x: x.strip(), topic_description.split("\n"))
partitions = []
for line in lines:
m = re.match(".*Leader:.*", line)
if m is None:
continue
fields = line.split("\t")
# ["Partition: 4", "Leader: 0"] -> ["4", "0"]
fields = map(lambda x: x.split(" ")[1], fields)
partitions.append(
{"topic": fields[0],
"partition": int(fields[1]),
"replicas": map(int, fields[3].split(','))})
return {"partitions": partitions}
def verify_reassign_partitions(self, reassignment, node=None):
"""Run the reassign partitions admin tool in "verify" mode
"""
if node is None:
node = self.nodes[0]
json_file = "/tmp/%s_reassign.json" % str(time.time())
# reassignment to json
json_str = json.dumps(reassignment)
json_str = json.dumps(json_str)
# create command
cmd = "echo %s > %s && " % (json_str, json_file)
cmd += "%s " % self.path.script("kafka-reassign-partitions.sh", node)
cmd += "--zookeeper %s " % self.zk_connect_setting()
cmd += "--reassignment-json-file %s " % json_file
cmd += "--verify "
cmd += "&& sleep 1 && rm -f %s" % json_file
# send command
self.logger.info("Verifying parition reassignment...")
self.logger.debug(cmd)
output = ""
for line in node.account.ssh_capture(cmd):
output += line
self.logger.debug(output)
if re.match(".*Reassignment of partition.*failed.*",
output.replace('\n', '')) is not None:
return False
if re.match(".*is still in progress.*",
output.replace('\n', '')) is not None:
return False
return True
def execute_reassign_partitions(self, reassignment, node=None,
throttle=None):
"""Run the reassign partitions admin tool in "verify" mode
"""
if node is None:
node = self.nodes[0]
json_file = "/tmp/%s_reassign.json" % str(time.time())
# reassignment to json
json_str = json.dumps(reassignment)
json_str = json.dumps(json_str)
# create command
cmd = "echo %s > %s && " % (json_str, json_file)
cmd += "%s " % self.path.script( "kafka-reassign-partitions.sh", node)
cmd += "--zookeeper %s " % self.zk_connect_setting()
cmd += "--reassignment-json-file %s " % json_file
cmd += "--execute"
if throttle is not None:
cmd += " --throttle %d" % throttle
cmd += " && sleep 1 && rm -f %s" % json_file
# send command
self.logger.info("Executing parition reassignment...")
self.logger.debug(cmd)
output = ""
for line in node.account.ssh_capture(cmd):
output += line
self.logger.debug("Verify partition reassignment:")
self.logger.debug(output)
def search_data_files(self, topic, messages):
"""Check if a set of messages made it into the Kakfa data files. Note that
this method takes no account of replication. It simply looks for the
payload in all the partition files of the specified topic. 'messages' should be
an array of numbers. The list of missing messages is returned.
"""
payload_match = "payload: " + "$|payload: ".join(str(x) for x in messages) + "$"
found = set([])
self.logger.debug("number of unique missing messages we will search for: %d",
len(messages))
for node in self.nodes:
# Grab all .log files in directories prefixed with this topic
files = node.account.ssh_capture("find %s* -regex '.*/%s-.*/[^/]*.log'" % (KafkaService.DATA_LOG_DIR_PREFIX, topic))
# Check each data file to see if it contains the messages we want
for log in files:
cmd = "%s kafka.tools.DumpLogSegments --print-data-log --files %s | grep -E \"%s\"" % \
(self.path.script("kafka-run-class.sh", node), log.strip(), payload_match)
for line in node.account.ssh_capture(cmd, allow_fail=True):
for val in messages:
if line.strip().endswith("payload: "+str(val)):
self.logger.debug("Found %s in data-file [%s] in line: [%s]" % (val, log.strip(), line.strip()))
found.add(val)
self.logger.debug("Number of unique messages found in the log: %d",
len(found))
missing = list(set(messages) - found)
if len(missing) > 0:
self.logger.warn("The following values were not found in the data files: " + str(missing))
return missing
def restart_cluster(self, clean_shutdown=True):
for node in self.nodes:
self.restart_node(node, clean_shutdown=clean_shutdown)
def restart_node(self, node, clean_shutdown=True):
"""Restart the given node."""
self.stop_node(node, clean_shutdown)
self.start_node(node)
def isr_idx_list(self, topic, partition=0):
""" Get in-sync replica list the given topic and partition.
"""
self.logger.debug("Querying zookeeper to find in-sync replicas for topic %s and partition %d" % (topic, partition))
zk_path = "/brokers/topics/%s/partitions/%d/state" % (topic, partition)
partition_state = self.zk.query(zk_path, chroot=self.zk_chroot)
if partition_state is None:
raise Exception("Error finding partition state for topic %s and partition %d." % (topic, partition))
partition_state = json.loads(partition_state)
self.logger.info(partition_state)
isr_idx_list = partition_state["isr"]
self.logger.info("Isr for topic %s and partition %d is now: %s" % (topic, partition, isr_idx_list))
return isr_idx_list
def replicas(self, topic, partition=0):
""" Get the assigned replicas for the given topic and partition.
"""
self.logger.debug("Querying zookeeper to find assigned replicas for topic %s and partition %d" % (topic, partition))
zk_path = "/brokers/topics/%s" % (topic)
assignemnt = self.zk.query(zk_path, chroot=self.zk_chroot)
if assignemnt is None:
raise Exception("Error finding partition state for topic %s and partition %d." % (topic, partition))
assignemnt = json.loads(assignemnt)
self.logger.info(assignemnt)
replicas = assignemnt["partitions"][str(partition)]
self.logger.info("Assigned replicas for topic %s and partition %d is now: %s" % (topic, partition, replicas))
return [self.get_node(replica) for replica in replicas]
def leader(self, topic, partition=0):
""" Get the leader replica for the given topic and partition.
"""
self.logger.debug("Querying zookeeper to find leader replica for topic %s and partition %d" % (topic, partition))
zk_path = "/brokers/topics/%s/partitions/%d/state" % (topic, partition)
partition_state = self.zk.query(zk_path, chroot=self.zk_chroot)
if partition_state is None:
raise Exception("Error finding partition state for topic %s and partition %d." % (topic, partition))
partition_state = json.loads(partition_state)
self.logger.info(partition_state)
leader_idx = int(partition_state["leader"])
self.logger.info("Leader for topic %s and partition %d is now: %d" % (topic, partition, leader_idx))
return self.get_node(leader_idx)
def cluster_id(self):
""" Get the current cluster id
"""
self.logger.debug("Querying ZooKeeper to retrieve cluster id")
cluster = self.zk.query("/cluster/id", chroot=self.zk_chroot)
try:
return json.loads(cluster)['id'] if cluster else None
except:
self.logger.debug("Data in /cluster/id znode could not be parsed. Data = %s" % cluster)
raise
def list_consumer_groups(self, node=None, command_config=None):
""" Get list of consumer groups.
"""
if node is None:
node = self.nodes[0]
consumer_group_script = self.path.script("kafka-consumer-groups.sh", node)
if command_config is None:
command_config = ""
else:
command_config = "--command-config " + command_config
cmd = "%s --bootstrap-server %s %s --list" % \
(consumer_group_script,
self.bootstrap_servers(self.security_protocol),
command_config)
output = ""
self.logger.debug(cmd)
for line in node.account.ssh_capture(cmd):
if not line.startswith("SLF4J"):
output += line
self.logger.debug(output)
return output
def describe_consumer_group(self, group, node=None, command_config=None):
""" Describe a consumer group.
"""
if node is None:
node = self.nodes[0]
consumer_group_script = self.path.script("kafka-consumer-groups.sh", node)
if command_config is None:
command_config = ""
else:
command_config = "--command-config " + command_config
cmd = "%s --bootstrap-server %s %s --group %s --describe" % \
(consumer_group_script,
self.bootstrap_servers(self.security_protocol),
command_config, group)
output = ""
self.logger.debug(cmd)
for line in node.account.ssh_capture(cmd):
if not (line.startswith("SLF4J") or line.startswith("TOPIC") or line.startswith("Could not fetch offset")):
output += line
self.logger.debug(output)
return output
def zk_connect_setting(self):
return self.zk.connect_setting(self.zk_chroot)
def __bootstrap_servers(self, port, validate=True, offline_nodes=[]):
if validate and not port.open:
raise ValueError("We are retrieving bootstrap servers for the port: %s which is not currently open. - " %
str(port.port_number))
return ','.join([node.account.hostname + ":" + str(port.port_number)
for node in self.nodes
if node not in offline_nodes])
def bootstrap_servers(self, protocol='PLAINTEXT', validate=True, offline_nodes=[]):
"""Return comma-delimited list of brokers in this cluster formatted as HOSTNAME1:PORT1,HOSTNAME:PORT2,...
This is the format expected by many config files.
"""
port_mapping = self.port_mappings[protocol]
self.logger.info("Bootstrap client port is: " + str(port_mapping.port_number))
return self.__bootstrap_servers(port_mapping, validate, offline_nodes)
def controller(self):
""" Get the controller node
"""
self.logger.debug("Querying zookeeper to find controller broker")
controller_info = self.zk.query("/controller", chroot=self.zk_chroot)
if controller_info is None:
raise Exception("Error finding controller info")
controller_info = json.loads(controller_info)
self.logger.debug(controller_info)
controller_idx = int(controller_info["brokerid"])
self.logger.info("Controller's ID: %d" % (controller_idx))
return self.get_node(controller_idx)
def is_registered(self, node):
"""
Check whether a broker is registered in Zookeeper
"""
self.logger.debug("Querying zookeeper to see if broker %s is registered", node)
broker_info = self.zk.query("/brokers/ids/%s" % self.idx(node), chroot=self.zk_chroot)
self.logger.debug("Broker info: %s", broker_info)
return broker_info is not None
def get_offset_shell(self, topic, partitions, max_wait_ms, offsets, time):
node = self.nodes[0]
cmd = self.path.script("kafka-run-class.sh", node)
cmd += " kafka.tools.GetOffsetShell"
cmd += " --topic %s --broker-list %s --max-wait-ms %s --offsets %s --time %s" % (topic, self.bootstrap_servers(self.security_protocol), max_wait_ms, offsets, time)
if partitions:
cmd += ' --partitions %s' % partitions
cmd += " 2>> %s/get_offset_shell.log" % KafkaService.PERSISTENT_ROOT
cmd += " | tee -a %s/get_offset_shell.log &" % KafkaService.PERSISTENT_ROOT
output = ""
self.logger.debug(cmd)
for line in node.account.ssh_capture(cmd):
output += line
self.logger.debug(output)
return output
def java_class_name(self):
return "kafka.Kafka"
|
|
# Copyright 2016 - Nokia Networks.
# Copyright 2016 - Brocade Communications Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import timeutils
from osprofiler import profiler
import traceback as tb
from mistral.db import utils as db_utils
from mistral.db.v2 import api as db_api
from mistral.engine import action_queue
from mistral.engine import workflows
from mistral import exceptions as exc
from mistral.services import scheduler
from mistral.workflow import states
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
_CHECK_AND_COMPLETE_PATH = (
'mistral.engine.workflow_handler._check_and_complete'
)
@profiler.trace('workflow-handler-start-workflow', hide_args=True)
def start_workflow(wf_identifier, wf_namespace, wf_ex_id, wf_input, desc,
params):
wf = workflows.Workflow()
wf_def = db_api.get_workflow_definition(wf_identifier, wf_namespace)
if 'namespace' not in params:
params['namespace'] = wf_def.namespace
wf.start(
wf_def=wf_def,
wf_ex_id=wf_ex_id,
input_dict=wf_input,
desc=desc,
params=params
)
_schedule_check_and_complete(wf.wf_ex)
return wf.wf_ex
def stop_workflow(wf_ex, state, msg=None):
wf = workflows.Workflow(wf_ex=wf_ex)
# In this case we should not try to handle possible errors. Instead,
# we need to let them pop up since the typical way of failing objects
# doesn't work here. Failing a workflow is the same as stopping it
# with ERROR state.
wf.stop(state, msg)
# Cancels subworkflows.
if state == states.CANCELLED:
for task_ex in wf_ex.task_executions:
sub_wf_exs = db_api.get_workflow_executions(
task_execution_id=task_ex.id
)
for sub_wf_ex in sub_wf_exs:
if not states.is_completed(sub_wf_ex.state):
stop_workflow(sub_wf_ex, state, msg=msg)
def force_fail_workflow(wf_ex, msg=None):
stop_workflow(wf_ex, states.ERROR, msg)
def cancel_workflow(wf_ex, msg=None):
stop_workflow(wf_ex, states.CANCELLED, msg)
@db_utils.retry_on_db_error
@action_queue.process
@profiler.trace('workflow-handler-check-and-complete', hide_args=True)
def _check_and_complete(wf_ex_id):
# Note: This method can only be called via scheduler.
with db_api.transaction():
wf_ex = db_api.load_workflow_execution(wf_ex_id)
if not wf_ex or states.is_completed(wf_ex.state):
return
wf = workflows.Workflow(wf_ex=wf_ex)
incomplete_tasks_count = 0
try:
check_and_fix_integrity(wf_ex)
incomplete_tasks_count = wf.check_and_complete()
except exc.MistralException as e:
msg = (
"Failed to check and complete [wf_ex_id=%s, wf_name=%s]:"
" %s\n%s" % (wf_ex_id, wf_ex.name, e, tb.format_exc())
)
LOG.error(msg)
force_fail_workflow(wf.wf_ex, msg)
return
finally:
if states.is_completed(wf_ex.state):
return
# Let's assume that a task takes 0.01 sec in average to complete
# and based on this assumption calculate a time of the next check.
# The estimation is very rough but this delay will be decreasing
# as tasks will be completing which will give a decent
# approximation.
# For example, if a workflow has 100 incomplete tasks then the
# next check call will happen in 1 second. For 500 tasks it will
# be 5 seconds. The larger the workflow is, the more beneficial
# this mechanism will be.
delay = (
int(incomplete_tasks_count * 0.01) if incomplete_tasks_count
else 4
)
_schedule_check_and_complete(wf_ex, delay)
@profiler.trace('workflow-handler-check-and-fix-integrity')
def check_and_fix_integrity(wf_ex):
check_after_seconds = CONF.engine.execution_integrity_check_delay
if check_after_seconds < 0:
# Never check integrity if it's a negative value.
return
# To break cyclic dependency.
from mistral.engine import task_handler
running_task_execs = db_api.get_task_executions(
workflow_execution_id=wf_ex.id,
state=states.RUNNING
)
for t_ex in running_task_execs:
# The idea is that we take the latest known timestamp of the task
# execution and consider it eligible for checking and fixing only
# if some minimum period of time elapsed since the last update.
timestamp = t_ex.updated_at or t_ex.created_at
delta = timeutils.delta_seconds(timestamp, timeutils.utcnow())
if delta < check_after_seconds:
continue
child_executions = t_ex.executions
if not child_executions:
continue
all_finished = all(
[states.is_completed(c_ex.state) for c_ex in child_executions]
)
if all_finished:
# Find the timestamp of the most recently finished child.
most_recent_child_timestamp = max(
[c_ex.updated_at or c_ex.created_at for c_ex in
child_executions]
)
interval = timeutils.delta_seconds(
most_recent_child_timestamp,
timeutils.utcnow()
)
if interval > check_after_seconds:
# We found a task execution in RUNNING state for which all
# child executions are finished. We need to call
# "schedule_on_action_complete" on the task handler for any of
# the child executions so that the task state is calculated and
# updated properly.
LOG.warning(
"Found a task execution that is likely stuck in RUNNING"
" state because all child executions are finished,"
" will try to recover [task_execution=%s]", t_ex.id
)
task_handler.schedule_on_action_complete(child_executions[-1])
def pause_workflow(wf_ex, msg=None):
# Pause subworkflows first.
for task_ex in wf_ex.task_executions:
sub_wf_exs = db_api.get_workflow_executions(
task_execution_id=task_ex.id
)
for sub_wf_ex in sub_wf_exs:
if not states.is_completed(sub_wf_ex.state):
pause_workflow(sub_wf_ex, msg=msg)
# If all subworkflows paused successfully, pause the main workflow.
# If any subworkflows failed to pause for temporary reason, this
# allows pause to be executed again on the main workflow.
wf = workflows.Workflow(wf_ex=wf_ex)
wf.pause(msg=msg)
def rerun_workflow(wf_ex, task_ex, reset=True, env=None):
if wf_ex.state == states.PAUSED:
return wf_ex.get_clone()
wf = workflows.Workflow(wf_ex=wf_ex)
wf.rerun(task_ex, reset=reset, env=env)
_schedule_check_and_complete(wf_ex)
if wf_ex.task_execution_id:
_schedule_check_and_complete(wf_ex.task_execution.workflow_execution)
def resume_workflow(wf_ex, env=None):
if not states.is_paused_or_idle(wf_ex.state):
return wf_ex.get_clone()
# Resume subworkflows first.
for task_ex in wf_ex.task_executions:
sub_wf_exs = db_api.get_workflow_executions(
task_execution_id=task_ex.id
)
for sub_wf_ex in sub_wf_exs:
if not states.is_completed(sub_wf_ex.state):
resume_workflow(sub_wf_ex)
# Resume current workflow here so to trigger continue workflow only
# after all other subworkflows are placed back in running state.
wf = workflows.Workflow(wf_ex=wf_ex)
wf.resume(env=env)
@profiler.trace('workflow-handler-set-state', hide_args=True)
def set_workflow_state(wf_ex, state, msg=None):
if states.is_completed(state):
stop_workflow(wf_ex, state, msg)
elif states.is_paused(state):
pause_workflow(wf_ex, msg)
else:
raise exc.MistralError(
'Invalid workflow execution state [wf_ex_id=%s, wf_name=%s, '
'state=%s]' % (wf_ex.id, wf_ex.name, state)
)
def _get_completion_check_key(wf_ex):
return 'wfh_on_c_a_c-%s' % wf_ex.id
@profiler.trace('workflow-handler-schedule-check-and-complete', hide_args=True)
def _schedule_check_and_complete(wf_ex, delay=0):
"""Schedules workflow completion check.
This method provides transactional decoupling of task completion from
workflow completion check. It's needed in non-locking model in order to
avoid 'phantom read' phenomena when reading state of multiple tasks
to see if a workflow is completed. Just starting a separate transaction
without using scheduler is not safe due to concurrency window that we'll
have in this case (time between transactions) whereas scheduler is a
special component that is designed to be resistant to failures.
:param wf_ex: Workflow execution.
:param delay: Minimum amount of time before task completion check
should be made.
"""
key = _get_completion_check_key(wf_ex)
scheduler.schedule_call(
None,
_CHECK_AND_COMPLETE_PATH,
delay,
key=key,
wf_ex_id=wf_ex.id
)
|
|
"""
Functions
---------
.. autosummary::
:toctree: generated/
line_search_armijo
line_search_wolfe1
line_search_wolfe2
scalar_search_wolfe1
scalar_search_wolfe2
"""
from __future__ import division, print_function, absolute_import
from warnings import warn
from scipy.optimize import minpack2
import numpy as np
from scipy._lib.six import xrange
__all__ = ['LineSearchWarning', 'line_search_wolfe1', 'line_search_wolfe2',
'scalar_search_wolfe1', 'scalar_search_wolfe2',
'line_search_armijo']
class LineSearchWarning(RuntimeWarning):
pass
#------------------------------------------------------------------------------
# Minpack's Wolfe line and scalar searches
#------------------------------------------------------------------------------
def line_search_wolfe1(f, fprime, xk, pk, gfk=None,
old_fval=None, old_old_fval=None,
args=(), c1=1e-4, c2=0.9, amax=50, amin=1e-8,
xtol=1e-14):
"""
As `scalar_search_wolfe1` but do a line search to direction `pk`
Parameters
----------
f : callable
Function `f(x)`
fprime : callable
Gradient of `f`
xk : array_like
Current point
pk : array_like
Search direction
gfk : array_like, optional
Gradient of `f` at point `xk`
old_fval : float, optional
Value of `f` at point `xk`
old_old_fval : float, optional
Value of `f` at point preceding `xk`
The rest of the parameters are the same as for `scalar_search_wolfe1`.
Returns
-------
stp, f_count, g_count, fval, old_fval
As in `line_search_wolfe1`
gval : array
Gradient of `f` at the final point
"""
if gfk is None:
gfk = fprime(xk)
if isinstance(fprime, tuple):
eps = fprime[1]
fprime = fprime[0]
newargs = (f, eps) + args
gradient = False
else:
newargs = args
gradient = True
gval = [gfk]
gc = [0]
fc = [0]
def phi(s):
fc[0] += 1
return f(xk + s*pk, *args)
def derphi(s):
gval[0] = fprime(xk + s*pk, *newargs)
if gradient:
gc[0] += 1
else:
fc[0] += len(xk) + 1
return np.dot(gval[0], pk)
derphi0 = np.dot(gfk, pk)
stp, fval, old_fval = scalar_search_wolfe1(
phi, derphi, old_fval, old_old_fval, derphi0,
c1=c1, c2=c2, amax=amax, amin=amin, xtol=xtol)
return stp, fc[0], gc[0], fval, old_fval, gval[0]
def scalar_search_wolfe1(phi, derphi, phi0=None, old_phi0=None, derphi0=None,
c1=1e-4, c2=0.9,
amax=50, amin=1e-8, xtol=1e-14):
"""
Scalar function search for alpha that satisfies strong Wolfe conditions
alpha > 0 is assumed to be a descent direction.
Parameters
----------
phi : callable phi(alpha)
Function at point `alpha`
derphi : callable dphi(alpha)
Derivative `d phi(alpha)/ds`. Returns a scalar.
phi0 : float, optional
Value of `f` at 0
old_phi0 : float, optional
Value of `f` at the previous point
derphi0 : float, optional
Value `derphi` at 0
c1, c2 : float, optional
Wolfe parameters
amax, amin : float, optional
Maximum and minimum step size
xtol : float, optional
Relative tolerance for an acceptable step.
Returns
-------
alpha : float
Step size, or None if no suitable step was found
phi : float
Value of `phi` at the new point `alpha`
phi0 : float
Value of `phi` at `alpha=0`
Notes
-----
Uses routine DCSRCH from MINPACK.
"""
if phi0 is None:
phi0 = phi(0.)
if derphi0 is None:
derphi0 = derphi(0.)
if old_phi0 is not None and derphi0 != 0:
alpha1 = min(1.0, 1.01*2*(phi0 - old_phi0)/derphi0)
if alpha1 < 0:
alpha1 = 1.0
else:
alpha1 = 1.0
phi1 = phi0
derphi1 = derphi0
isave = np.zeros((2,), np.intc)
dsave = np.zeros((13,), float)
task = b'START'
maxiter = 100
for i in xrange(maxiter):
stp, phi1, derphi1, task = minpack2.dcsrch(alpha1, phi1, derphi1,
c1, c2, xtol, task,
amin, amax, isave, dsave)
if task[:2] == b'FG':
alpha1 = stp
phi1 = phi(stp)
derphi1 = derphi(stp)
else:
break
else:
# maxiter reached, the line search did not converge
stp = None
if task[:5] == b'ERROR' or task[:4] == b'WARN':
stp = None # failed
return stp, phi1, phi0
line_search = line_search_wolfe1
#------------------------------------------------------------------------------
# Pure-Python Wolfe line and scalar searches
#------------------------------------------------------------------------------
def line_search_wolfe2(f, myfprime, xk, pk, gfk=None, old_fval=None,
old_old_fval=None, args=(), c1=1e-4, c2=0.9, amax=50,
extra_condition=None):
"""Find alpha that satisfies strong Wolfe conditions.
Parameters
----------
f : callable f(x,*args)
Objective function.
myfprime : callable f'(x,*args)
Objective function gradient.
xk : ndarray
Starting point.
pk : ndarray
Search direction.
gfk : ndarray, optional
Gradient value for x=xk (xk being the current parameter
estimate). Will be recomputed if omitted.
old_fval : float, optional
Function value for x=xk. Will be recomputed if omitted.
old_old_fval : float, optional
Function value for the point preceding x=xk
args : tuple, optional
Additional arguments passed to objective function.
c1 : float, optional
Parameter for Armijo condition rule.
c2 : float, optional
Parameter for curvature condition rule.
amax : float, optional
Maximum step size
extra_condition : callable, optional
A callable of the form ``extra_condition(alpha, x, f, g)``
returning a boolean. Arguments are the proposed step ``alpha``
and the corresponding ``x``, ``f`` and ``g`` values. The line search
accepts the value of ``alpha`` only if this
callable returns ``True``. If the callable returns ``False``
for the step length, the algorithm will continue with
new iterates. The callable is only called for iterates
satisfying the strong Wolfe conditions.
Returns
-------
alpha : float or None
Alpha for which ``x_new = x0 + alpha * pk``,
or None if the line search algorithm did not converge.
fc : int
Number of function evaluations made.
gc : int
Number of gradient evaluations made.
new_fval : float or None
New function value ``f(x_new)=f(x0+alpha*pk)``,
or None if the line search algorithm did not converge.
old_fval : float
Old function value ``f(x0)``.
new_slope : float or None
The local slope along the search direction at the
new value ``<myfprime(x_new), pk>``,
or None if the line search algorithm did not converge.
Notes
-----
Uses the line search algorithm to enforce strong Wolfe
conditions. See Wright and Nocedal, 'Numerical Optimization',
1999, pg. 59-60.
For the zoom phase it uses an algorithm by [...].
"""
fc = [0]
gc = [0]
gval = [None]
gval_alpha = [None]
def phi(alpha):
fc[0] += 1
return f(xk + alpha * pk, *args)
if isinstance(myfprime, tuple):
def derphi(alpha):
fc[0] += len(xk) + 1
eps = myfprime[1]
fprime = myfprime[0]
newargs = (f, eps) + args
gval[0] = fprime(xk + alpha * pk, *newargs) # store for later use
gval_alpha[0] = alpha
return np.dot(gval[0], pk)
else:
fprime = myfprime
def derphi(alpha):
gc[0] += 1
gval[0] = fprime(xk + alpha * pk, *args) # store for later use
gval_alpha[0] = alpha
return np.dot(gval[0], pk)
if gfk is None:
gfk = fprime(xk, *args)
derphi0 = np.dot(gfk, pk)
if extra_condition is not None:
# Add the current gradient as argument, to avoid needless
# re-evaluation
def extra_condition2(alpha, phi):
if gval_alpha[0] != alpha:
derphi(alpha)
x = xk + alpha * pk
return extra_condition(alpha, x, phi, gval[0])
else:
extra_condition2 = None
alpha_star, phi_star, old_fval, derphi_star = scalar_search_wolfe2(
phi, derphi, old_fval, old_old_fval, derphi0, c1, c2, amax,
extra_condition2)
if derphi_star is None:
warn('The line search algorithm did not converge', LineSearchWarning)
else:
# derphi_star is a number (derphi) -- so use the most recently
# calculated gradient used in computing it derphi = gfk*pk
# this is the gradient at the next step no need to compute it
# again in the outer loop.
derphi_star = gval[0]
return alpha_star, fc[0], gc[0], phi_star, old_fval, derphi_star
def scalar_search_wolfe2(phi, derphi=None, phi0=None,
old_phi0=None, derphi0=None,
c1=1e-4, c2=0.9, amax=50,
extra_condition=None):
"""Find alpha that satisfies strong Wolfe conditions.
alpha > 0 is assumed to be a descent direction.
Parameters
----------
phi : callable f(x)
Objective scalar function.
derphi : callable f'(x), optional
Objective function derivative (can be None)
phi0 : float, optional
Value of phi at s=0
old_phi0 : float, optional
Value of phi at previous point
derphi0 : float, optional
Value of derphi at s=0
c1 : float, optional
Parameter for Armijo condition rule.
c2 : float, optional
Parameter for curvature condition rule.
amax : float, optional
Maximum step size
extra_condition : callable, optional
A callable of the form ``extra_condition(alpha, phi_value)``
returning a boolean. The line search accepts the value
of ``alpha`` only if this callable returns ``True``.
If the callable returns ``False`` for the step length,
the algorithm will continue with new iterates.
The callable is only called for iterates satisfying
the strong Wolfe conditions.
Returns
-------
alpha_star : float or None
Best alpha, or None if the line search algorithm did not converge.
phi_star : float
phi at alpha_star
phi0 : float
phi at 0
derphi_star : float or None
derphi at alpha_star, or None if the line search algorithm
did not converge.
Notes
-----
Uses the line search algorithm to enforce strong Wolfe
conditions. See Wright and Nocedal, 'Numerical Optimization',
1999, pg. 59-60.
For the zoom phase it uses an algorithm by [...].
"""
if phi0 is None:
phi0 = phi(0.)
if derphi0 is None and derphi is not None:
derphi0 = derphi(0.)
alpha0 = 0
if old_phi0 is not None and derphi0 != 0:
alpha1 = min(1.0, 1.01*2*(phi0 - old_phi0)/derphi0)
else:
alpha1 = 1.0
if alpha1 < 0:
alpha1 = 1.0
if alpha1 == 0:
# This shouldn't happen. Perhaps the increment has slipped below
# machine precision? For now, set the return variables skip the
# useless while loop, and raise warnflag=2 due to possible imprecision.
alpha_star = None
phi_star = phi0
phi0 = old_phi0
derphi_star = None
phi_a1 = phi(alpha1)
#derphi_a1 = derphi(alpha1) evaluated below
phi_a0 = phi0
derphi_a0 = derphi0
if extra_condition is None:
extra_condition = lambda alpha, phi: True
i = 1
maxiter = 10
for i in xrange(maxiter):
if alpha1 == 0:
break
if (phi_a1 > phi0 + c1 * alpha1 * derphi0) or \
((phi_a1 >= phi_a0) and (i > 1)):
alpha_star, phi_star, derphi_star = \
_zoom(alpha0, alpha1, phi_a0,
phi_a1, derphi_a0, phi, derphi,
phi0, derphi0, c1, c2, extra_condition)
break
derphi_a1 = derphi(alpha1)
if (abs(derphi_a1) <= -c2*derphi0):
if extra_condition(alpha1, phi_a1):
alpha_star = alpha1
phi_star = phi_a1
derphi_star = derphi_a1
break
if (derphi_a1 >= 0):
alpha_star, phi_star, derphi_star = \
_zoom(alpha1, alpha0, phi_a1,
phi_a0, derphi_a1, phi, derphi,
phi0, derphi0, c1, c2, extra_condition)
break
alpha2 = 2 * alpha1 # increase by factor of two on each iteration
i = i + 1
alpha0 = alpha1
alpha1 = alpha2
phi_a0 = phi_a1
phi_a1 = phi(alpha1)
derphi_a0 = derphi_a1
else:
# stopping test maxiter reached
alpha_star = alpha1
phi_star = phi_a1
derphi_star = None
warn('The line search algorithm did not converge', LineSearchWarning)
return alpha_star, phi_star, phi0, derphi_star
def _cubicmin(a, fa, fpa, b, fb, c, fc):
"""
Finds the minimizer for a cubic polynomial that goes through the
points (a,fa), (b,fb), and (c,fc) with derivative at a of fpa.
If no minimizer can be found return None
"""
# f(x) = A *(x-a)^3 + B*(x-a)^2 + C*(x-a) + D
with np.errstate(divide='raise', over='raise', invalid='raise'):
try:
C = fpa
db = b - a
dc = c - a
denom = (db * dc) ** 2 * (db - dc)
d1 = np.empty((2, 2))
d1[0, 0] = dc ** 2
d1[0, 1] = -db ** 2
d1[1, 0] = -dc ** 3
d1[1, 1] = db ** 3
[A, B] = np.dot(d1, np.asarray([fb - fa - C * db,
fc - fa - C * dc]).flatten())
A /= denom
B /= denom
radical = B * B - 3 * A * C
xmin = a + (-B + np.sqrt(radical)) / (3 * A)
except ArithmeticError:
return None
if not np.isfinite(xmin):
return None
return xmin
def _quadmin(a, fa, fpa, b, fb):
"""
Finds the minimizer for a quadratic polynomial that goes through
the points (a,fa), (b,fb) with derivative at a of fpa,
"""
# f(x) = B*(x-a)^2 + C*(x-a) + D
with np.errstate(divide='raise', over='raise', invalid='raise'):
try:
D = fa
C = fpa
db = b - a * 1.0
B = (fb - D - C * db) / (db * db)
xmin = a - C / (2.0 * B)
except ArithmeticError:
return None
if not np.isfinite(xmin):
return None
return xmin
def _zoom(a_lo, a_hi, phi_lo, phi_hi, derphi_lo,
phi, derphi, phi0, derphi0, c1, c2, extra_condition):
"""
Part of the optimization algorithm in `scalar_search_wolfe2`.
"""
maxiter = 10
i = 0
delta1 = 0.2 # cubic interpolant check
delta2 = 0.1 # quadratic interpolant check
phi_rec = phi0
a_rec = 0
while True:
# interpolate to find a trial step length between a_lo and
# a_hi Need to choose interpolation here. Use cubic
# interpolation and then if the result is within delta *
# dalpha or outside of the interval bounded by a_lo or a_hi
# then use quadratic interpolation, if the result is still too
# close, then use bisection
dalpha = a_hi - a_lo
if dalpha < 0:
a, b = a_hi, a_lo
else:
a, b = a_lo, a_hi
# minimizer of cubic interpolant
# (uses phi_lo, derphi_lo, phi_hi, and the most recent value of phi)
#
# if the result is too close to the end points (or out of the
# interval) then use quadratic interpolation with phi_lo,
# derphi_lo and phi_hi if the result is stil too close to the
# end points (or out of the interval) then use bisection
if (i > 0):
cchk = delta1 * dalpha
a_j = _cubicmin(a_lo, phi_lo, derphi_lo, a_hi, phi_hi,
a_rec, phi_rec)
if (i == 0) or (a_j is None) or (a_j > b - cchk) or (a_j < a + cchk):
qchk = delta2 * dalpha
a_j = _quadmin(a_lo, phi_lo, derphi_lo, a_hi, phi_hi)
if (a_j is None) or (a_j > b-qchk) or (a_j < a+qchk):
a_j = a_lo + 0.5*dalpha
# Check new value of a_j
phi_aj = phi(a_j)
if (phi_aj > phi0 + c1*a_j*derphi0) or (phi_aj >= phi_lo):
phi_rec = phi_hi
a_rec = a_hi
a_hi = a_j
phi_hi = phi_aj
else:
derphi_aj = derphi(a_j)
if abs(derphi_aj) <= -c2*derphi0 and extra_condition(a_j, phi_aj):
a_star = a_j
val_star = phi_aj
valprime_star = derphi_aj
break
if derphi_aj*(a_hi - a_lo) >= 0:
phi_rec = phi_hi
a_rec = a_hi
a_hi = a_lo
phi_hi = phi_lo
else:
phi_rec = phi_lo
a_rec = a_lo
a_lo = a_j
phi_lo = phi_aj
derphi_lo = derphi_aj
i += 1
if (i > maxiter):
# Failed to find a conforming step size
a_star = None
val_star = None
valprime_star = None
break
return a_star, val_star, valprime_star
#------------------------------------------------------------------------------
# Armijo line and scalar searches
#------------------------------------------------------------------------------
def line_search_armijo(f, xk, pk, gfk, old_fval, args=(), c1=1e-4, alpha0=1):
"""Minimize over alpha, the function ``f(xk+alpha pk)``.
Parameters
----------
f : callable
Function to be minimized.
xk : array_like
Current point.
pk : array_like
Search direction.
gfk : array_like
Gradient of `f` at point `xk`.
old_fval : float
Value of `f` at point `xk`.
args : tuple, optional
Optional arguments.
c1 : float, optional
Value to control stopping criterion.
alpha0 : scalar, optional
Value of `alpha` at start of the optimization.
Returns
-------
alpha
f_count
f_val_at_alpha
Notes
-----
Uses the interpolation algorithm (Armijo backtracking) as suggested by
Wright and Nocedal in 'Numerical Optimization', 1999, pg. 56-57
"""
xk = np.atleast_1d(xk)
fc = [0]
def phi(alpha1):
fc[0] += 1
return f(xk + alpha1*pk, *args)
if old_fval is None:
phi0 = phi(0.)
else:
phi0 = old_fval # compute f(xk) -- done in past loop
derphi0 = np.dot(gfk, pk)
alpha, phi1 = scalar_search_armijo(phi, phi0, derphi0, c1=c1,
alpha0=alpha0)
return alpha, fc[0], phi1
def line_search_BFGS(f, xk, pk, gfk, old_fval, args=(), c1=1e-4, alpha0=1):
"""
Compatibility wrapper for `line_search_armijo`
"""
r = line_search_armijo(f, xk, pk, gfk, old_fval, args=args, c1=c1,
alpha0=alpha0)
return r[0], r[1], 0, r[2]
def scalar_search_armijo(phi, phi0, derphi0, c1=1e-4, alpha0=1, amin=0):
"""Minimize over alpha, the function ``phi(alpha)``.
Uses the interpolation algorithm (Armijo backtracking) as suggested by
Wright and Nocedal in 'Numerical Optimization', 1999, pg. 56-57
alpha > 0 is assumed to be a descent direction.
Returns
-------
alpha
phi1
"""
phi_a0 = phi(alpha0)
if phi_a0 <= phi0 + c1*alpha0*derphi0:
return alpha0, phi_a0
# Otherwise compute the minimizer of a quadratic interpolant:
alpha1 = -(derphi0) * alpha0**2 / 2.0 / (phi_a0 - phi0 - derphi0 * alpha0)
phi_a1 = phi(alpha1)
if (phi_a1 <= phi0 + c1*alpha1*derphi0):
return alpha1, phi_a1
# Otherwise loop with cubic interpolation until we find an alpha which
# satifies the first Wolfe condition (since we are backtracking, we will
# assume that the value of alpha is not too small and satisfies the second
# condition.
while alpha1 > amin: # we are assuming alpha>0 is a descent direction
factor = alpha0**2 * alpha1**2 * (alpha1-alpha0)
a = alpha0**2 * (phi_a1 - phi0 - derphi0*alpha1) - \
alpha1**2 * (phi_a0 - phi0 - derphi0*alpha0)
a = a / factor
b = -alpha0**3 * (phi_a1 - phi0 - derphi0*alpha1) + \
alpha1**3 * (phi_a0 - phi0 - derphi0*alpha0)
b = b / factor
alpha2 = (-b + np.sqrt(abs(b**2 - 3 * a * derphi0))) / (3.0*a)
phi_a2 = phi(alpha2)
if (phi_a2 <= phi0 + c1*alpha2*derphi0):
return alpha2, phi_a2
if (alpha1 - alpha2) > alpha1 / 2.0 or (1 - alpha2/alpha1) < 0.96:
alpha2 = alpha1 / 2.0
alpha0 = alpha1
alpha1 = alpha2
phi_a0 = phi_a1
phi_a1 = phi_a2
# Failed to find a suitable step length
return None, phi_a1
#------------------------------------------------------------------------------
# Non-monotone line search for DF-SANE
#------------------------------------------------------------------------------
def _nonmonotone_line_search_cruz(f, x_k, d, prev_fs, eta,
gamma=1e-4, tau_min=0.1, tau_max=0.5):
"""
Nonmonotone backtracking line search as described in [1]_
Parameters
----------
f : callable
Function returning a tuple ``(f, F)`` where ``f`` is the value
of a merit function and ``F`` the residual.
x_k : ndarray
Initial position
d : ndarray
Search direction
prev_fs : float
List of previous merit function values. Should have ``len(prev_fs) <= M``
where ``M`` is the nonmonotonicity window parameter.
eta : float
Allowed merit function increase, see [1]_
gamma, tau_min, tau_max : float, optional
Search parameters, see [1]_
Returns
-------
alpha : float
Step length
xp : ndarray
Next position
fp : float
Merit function value at next position
Fp : ndarray
Residual at next position
References
----------
[1] "Spectral residual method without gradient information for solving
large-scale nonlinear systems of equations." W. La Cruz,
J.M. Martinez, M. Raydan. Math. Comp. **75**, 1429 (2006).
"""
f_k = prev_fs[-1]
f_bar = max(prev_fs)
alpha_p = 1
alpha_m = 1
alpha = 1
while True:
xp = x_k + alpha_p * d
fp, Fp = f(xp)
if fp <= f_bar + eta - gamma * alpha_p**2 * f_k:
alpha = alpha_p
break
alpha_tp = alpha_p**2 * f_k / (fp + (2*alpha_p - 1)*f_k)
xp = x_k - alpha_m * d
fp, Fp = f(xp)
if fp <= f_bar + eta - gamma * alpha_m**2 * f_k:
alpha = -alpha_m
break
alpha_tm = alpha_m**2 * f_k / (fp + (2*alpha_m - 1)*f_k)
alpha_p = np.clip(alpha_tp, tau_min * alpha_p, tau_max * alpha_p)
alpha_m = np.clip(alpha_tm, tau_min * alpha_m, tau_max * alpha_m)
return alpha, xp, fp, Fp
def _nonmonotone_line_search_cheng(f, x_k, d, f_k, C, Q, eta,
gamma=1e-4, tau_min=0.1, tau_max=0.5,
nu=0.85):
"""
Nonmonotone line search from [1]
Parameters
----------
f : callable
Function returning a tuple ``(f, F)`` where ``f`` is the value
of a merit function and ``F`` the residual.
x_k : ndarray
Initial position
d : ndarray
Search direction
f_k : float
Initial merit function value
C, Q : float
Control parameters. On the first iteration, give values
Q=1.0, C=f_k
eta : float
Allowed merit function increase, see [1]_
nu, gamma, tau_min, tau_max : float, optional
Search parameters, see [1]_
Returns
-------
alpha : float
Step length
xp : ndarray
Next position
fp : float
Merit function value at next position
Fp : ndarray
Residual at next position
C : float
New value for the control parameter C
Q : float
New value for the control parameter Q
References
----------
.. [1] W. Cheng & D.-H. Li, ''A derivative-free nonmonotone line
search and its application to the spectral residual
method'', IMA J. Numer. Anal. 29, 814 (2009).
"""
alpha_p = 1
alpha_m = 1
alpha = 1
while True:
xp = x_k + alpha_p * d
fp, Fp = f(xp)
if fp <= C + eta - gamma * alpha_p**2 * f_k:
alpha = alpha_p
break
alpha_tp = alpha_p**2 * f_k / (fp + (2*alpha_p - 1)*f_k)
xp = x_k - alpha_m * d
fp, Fp = f(xp)
if fp <= C + eta - gamma * alpha_m**2 * f_k:
alpha = -alpha_m
break
alpha_tm = alpha_m**2 * f_k / (fp + (2*alpha_m - 1)*f_k)
alpha_p = np.clip(alpha_tp, tau_min * alpha_p, tau_max * alpha_p)
alpha_m = np.clip(alpha_tm, tau_min * alpha_m, tau_max * alpha_m)
# Update C and Q
Q_next = nu * Q + 1
C = (nu * Q * (C + eta) + fp) / Q_next
Q = Q_next
return alpha, xp, fp, Fp, C, Q
|
|
"""Class to hold all camera accessories."""
import asyncio
from datetime import timedelta
import logging
from haffmpeg.core import FFMPEG_STDERR, HAFFmpeg
from pyhap.camera import (
VIDEO_CODEC_PARAM_LEVEL_TYPES,
VIDEO_CODEC_PARAM_PROFILE_ID_TYPES,
Camera as PyhapCamera,
)
from pyhap.const import CATEGORY_CAMERA
from homeassistant.components.ffmpeg import DATA_FFMPEG
from homeassistant.const import STATE_ON
from homeassistant.core import callback
from homeassistant.helpers.event import (
async_track_state_change_event,
async_track_time_interval,
)
from homeassistant.util import get_local_ip
from .accessories import TYPES, HomeAccessory
from .const import (
CHAR_MOTION_DETECTED,
CHAR_MUTE,
CHAR_PROGRAMMABLE_SWITCH_EVENT,
CONF_AUDIO_CODEC,
CONF_AUDIO_MAP,
CONF_AUDIO_PACKET_SIZE,
CONF_LINKED_DOORBELL_SENSOR,
CONF_LINKED_MOTION_SENSOR,
CONF_MAX_FPS,
CONF_MAX_HEIGHT,
CONF_MAX_WIDTH,
CONF_STREAM_ADDRESS,
CONF_STREAM_COUNT,
CONF_STREAM_SOURCE,
CONF_SUPPORT_AUDIO,
CONF_VIDEO_CODEC,
CONF_VIDEO_MAP,
CONF_VIDEO_PACKET_SIZE,
DEFAULT_AUDIO_CODEC,
DEFAULT_AUDIO_MAP,
DEFAULT_AUDIO_PACKET_SIZE,
DEFAULT_MAX_FPS,
DEFAULT_MAX_HEIGHT,
DEFAULT_MAX_WIDTH,
DEFAULT_STREAM_COUNT,
DEFAULT_SUPPORT_AUDIO,
DEFAULT_VIDEO_CODEC,
DEFAULT_VIDEO_MAP,
DEFAULT_VIDEO_PACKET_SIZE,
SERV_DOORBELL,
SERV_MOTION_SENSOR,
SERV_SPEAKER,
SERV_STATELESS_PROGRAMMABLE_SWITCH,
)
from .img_util import scale_jpeg_camera_image
from .util import pid_is_alive
_LOGGER = logging.getLogger(__name__)
DOORBELL_SINGLE_PRESS = 0
DOORBELL_DOUBLE_PRESS = 1
DOORBELL_LONG_PRESS = 2
VIDEO_OUTPUT = (
"-map {v_map} -an "
"-c:v {v_codec} "
"{v_profile}"
"-tune zerolatency -pix_fmt yuv420p "
"-r {fps} "
"-b:v {v_max_bitrate}k -bufsize {v_bufsize}k -maxrate {v_max_bitrate}k "
"-payload_type 99 "
"-ssrc {v_ssrc} -f rtp "
"-srtp_out_suite AES_CM_128_HMAC_SHA1_80 -srtp_out_params {v_srtp_key} "
"srtp://{address}:{v_port}?rtcpport={v_port}&"
"localrtcpport={v_port}&pkt_size={v_pkt_size}"
)
AUDIO_OUTPUT = (
"-map {a_map} -vn "
"-c:a {a_encoder} "
"{a_application}"
"-ac 1 -ar {a_sample_rate}k "
"-b:a {a_max_bitrate}k -bufsize {a_bufsize}k "
"-payload_type 110 "
"-ssrc {a_ssrc} -f rtp "
"-srtp_out_suite AES_CM_128_HMAC_SHA1_80 -srtp_out_params {a_srtp_key} "
"srtp://{address}:{a_port}?rtcpport={a_port}&"
"localrtcpport={a_port}&pkt_size={a_pkt_size}"
)
SLOW_RESOLUTIONS = [
(320, 180, 15),
(320, 240, 15),
]
RESOLUTIONS = [
(320, 180),
(320, 240),
(480, 270),
(480, 360),
(640, 360),
(640, 480),
(1024, 576),
(1024, 768),
(1280, 720),
(1280, 960),
(1920, 1080),
(1600, 1200),
]
VIDEO_PROFILE_NAMES = ["baseline", "main", "high"]
FFMPEG_WATCH_INTERVAL = timedelta(seconds=5)
FFMPEG_LOGGER = "ffmpeg_logger"
FFMPEG_WATCHER = "ffmpeg_watcher"
FFMPEG_PID = "ffmpeg_pid"
SESSION_ID = "session_id"
CONFIG_DEFAULTS = {
CONF_SUPPORT_AUDIO: DEFAULT_SUPPORT_AUDIO,
CONF_MAX_WIDTH: DEFAULT_MAX_WIDTH,
CONF_MAX_HEIGHT: DEFAULT_MAX_HEIGHT,
CONF_MAX_FPS: DEFAULT_MAX_FPS,
CONF_AUDIO_CODEC: DEFAULT_AUDIO_CODEC,
CONF_AUDIO_MAP: DEFAULT_AUDIO_MAP,
CONF_VIDEO_MAP: DEFAULT_VIDEO_MAP,
CONF_VIDEO_CODEC: DEFAULT_VIDEO_CODEC,
CONF_AUDIO_PACKET_SIZE: DEFAULT_AUDIO_PACKET_SIZE,
CONF_VIDEO_PACKET_SIZE: DEFAULT_VIDEO_PACKET_SIZE,
CONF_STREAM_COUNT: DEFAULT_STREAM_COUNT,
}
@TYPES.register("Camera")
class Camera(HomeAccessory, PyhapCamera):
"""Generate a Camera accessory."""
def __init__(self, hass, driver, name, entity_id, aid, config):
"""Initialize a Camera accessory object."""
self._ffmpeg = hass.data[DATA_FFMPEG]
for config_key in CONFIG_DEFAULTS:
if config_key not in config:
config[config_key] = CONFIG_DEFAULTS[config_key]
max_fps = config[CONF_MAX_FPS]
max_width = config[CONF_MAX_WIDTH]
max_height = config[CONF_MAX_HEIGHT]
resolutions = [
(w, h, fps)
for w, h, fps in SLOW_RESOLUTIONS
if w <= max_width and h <= max_height and fps < max_fps
] + [
(w, h, max_fps)
for w, h in RESOLUTIONS
if w <= max_width and h <= max_height
]
video_options = {
"codec": {
"profiles": [
VIDEO_CODEC_PARAM_PROFILE_ID_TYPES["BASELINE"],
VIDEO_CODEC_PARAM_PROFILE_ID_TYPES["MAIN"],
VIDEO_CODEC_PARAM_PROFILE_ID_TYPES["HIGH"],
],
"levels": [
VIDEO_CODEC_PARAM_LEVEL_TYPES["TYPE3_1"],
VIDEO_CODEC_PARAM_LEVEL_TYPES["TYPE3_2"],
VIDEO_CODEC_PARAM_LEVEL_TYPES["TYPE4_0"],
],
},
"resolutions": resolutions,
}
audio_options = {
"codecs": [
{"type": "OPUS", "samplerate": 24},
{"type": "OPUS", "samplerate": 16},
]
}
stream_address = config.get(CONF_STREAM_ADDRESS, get_local_ip())
options = {
"video": video_options,
"audio": audio_options,
"address": stream_address,
"srtp": True,
"stream_count": config[CONF_STREAM_COUNT],
}
super().__init__(
hass,
driver,
name,
entity_id,
aid,
config,
category=CATEGORY_CAMERA,
options=options,
)
self._char_motion_detected = None
self.linked_motion_sensor = self.config.get(CONF_LINKED_MOTION_SENSOR)
if self.linked_motion_sensor:
state = self.hass.states.get(self.linked_motion_sensor)
if state:
serv_motion = self.add_preload_service(SERV_MOTION_SENSOR)
self._char_motion_detected = serv_motion.configure_char(
CHAR_MOTION_DETECTED, value=False
)
self._async_update_motion_state(state)
self._char_doorbell_detected = None
self._char_doorbell_detected_switch = None
self.linked_doorbell_sensor = self.config.get(CONF_LINKED_DOORBELL_SENSOR)
if self.linked_doorbell_sensor:
state = self.hass.states.get(self.linked_doorbell_sensor)
if state:
serv_doorbell = self.add_preload_service(SERV_DOORBELL)
self.set_primary_service(serv_doorbell)
self._char_doorbell_detected = serv_doorbell.configure_char(
CHAR_PROGRAMMABLE_SWITCH_EVENT,
value=0,
)
serv_stateless_switch = self.add_preload_service(
SERV_STATELESS_PROGRAMMABLE_SWITCH
)
self._char_doorbell_detected_switch = (
serv_stateless_switch.configure_char(
CHAR_PROGRAMMABLE_SWITCH_EVENT,
value=0,
valid_values={"SinglePress": DOORBELL_SINGLE_PRESS},
)
)
serv_speaker = self.add_preload_service(SERV_SPEAKER)
serv_speaker.configure_char(CHAR_MUTE, value=0)
self._async_update_doorbell_state(state)
async def run_handler(self):
"""Handle accessory driver started event.
Run inside the Home Assistant event loop.
"""
if self._char_motion_detected:
async_track_state_change_event(
self.hass,
[self.linked_motion_sensor],
self._async_update_motion_state_event,
)
if self._char_doorbell_detected:
async_track_state_change_event(
self.hass,
[self.linked_doorbell_sensor],
self._async_update_doorbell_state_event,
)
await super().run_handler()
@callback
def _async_update_motion_state_event(self, event):
"""Handle state change event listener callback."""
self._async_update_motion_state(event.data.get("new_state"))
@callback
def _async_update_motion_state(self, new_state):
"""Handle link motion sensor state change to update HomeKit value."""
if not new_state:
return
detected = new_state.state == STATE_ON
if self._char_motion_detected.value == detected:
return
self._char_motion_detected.set_value(detected)
_LOGGER.debug(
"%s: Set linked motion %s sensor to %d",
self.entity_id,
self.linked_motion_sensor,
detected,
)
@callback
def _async_update_doorbell_state_event(self, event):
"""Handle state change event listener callback."""
self._async_update_doorbell_state(event.data.get("new_state"))
@callback
def _async_update_doorbell_state(self, new_state):
"""Handle link doorbell sensor state change to update HomeKit value."""
if not new_state:
return
if new_state.state == STATE_ON:
self._char_doorbell_detected.set_value(DOORBELL_SINGLE_PRESS)
self._char_doorbell_detected_switch.set_value(DOORBELL_SINGLE_PRESS)
_LOGGER.debug(
"%s: Set linked doorbell %s sensor to %d",
self.entity_id,
self.linked_doorbell_sensor,
DOORBELL_SINGLE_PRESS,
)
@callback
def async_update_state(self, new_state):
"""Handle state change to update HomeKit value."""
pass # pylint: disable=unnecessary-pass
async def _async_get_stream_source(self):
"""Find the camera stream source url."""
stream_source = self.config.get(CONF_STREAM_SOURCE)
if stream_source:
return stream_source
try:
stream_source = await self.hass.components.camera.async_get_stream_source(
self.entity_id
)
except Exception: # pylint: disable=broad-except
_LOGGER.exception(
"Failed to get stream source - this could be a transient error or your camera might not be compatible with HomeKit yet"
)
if stream_source:
self.config[CONF_STREAM_SOURCE] = stream_source
return stream_source
async def start_stream(self, session_info, stream_config):
"""Start a new stream with the given configuration."""
_LOGGER.debug(
"[%s] Starting stream with the following parameters: %s",
session_info["id"],
stream_config,
)
input_source = await self._async_get_stream_source()
if not input_source:
_LOGGER.error("Camera has no stream source")
return False
if "-i " not in input_source:
input_source = "-i " + input_source
video_profile = ""
if self.config[CONF_VIDEO_CODEC] != "copy":
video_profile = (
"-profile:v "
+ VIDEO_PROFILE_NAMES[
int.from_bytes(stream_config["v_profile_id"], byteorder="big")
]
+ " "
)
audio_application = ""
if self.config[CONF_AUDIO_CODEC] == "libopus":
audio_application = "-application lowdelay "
output_vars = stream_config.copy()
output_vars.update(
{
"v_profile": video_profile,
"v_bufsize": stream_config["v_max_bitrate"] * 4,
"v_map": self.config[CONF_VIDEO_MAP],
"v_pkt_size": self.config[CONF_VIDEO_PACKET_SIZE],
"v_codec": self.config[CONF_VIDEO_CODEC],
"a_bufsize": stream_config["a_max_bitrate"] * 4,
"a_map": self.config[CONF_AUDIO_MAP],
"a_pkt_size": self.config[CONF_AUDIO_PACKET_SIZE],
"a_encoder": self.config[CONF_AUDIO_CODEC],
"a_application": audio_application,
}
)
output = VIDEO_OUTPUT.format(**output_vars)
if self.config[CONF_SUPPORT_AUDIO]:
output = output + " " + AUDIO_OUTPUT.format(**output_vars)
_LOGGER.debug("FFmpeg output settings: %s", output)
stream = HAFFmpeg(self._ffmpeg.binary)
opened = await stream.open(
cmd=[],
input_source=input_source,
output=output,
extra_cmd="-hide_banner -nostats",
stderr_pipe=True,
stdout_pipe=False,
)
if not opened:
_LOGGER.error("Failed to open ffmpeg stream")
return False
_LOGGER.info(
"[%s] Started stream process - PID %d",
session_info["id"],
stream.process.pid,
)
session_info["stream"] = stream
session_info[FFMPEG_PID] = stream.process.pid
stderr_reader = await stream.get_reader(source=FFMPEG_STDERR)
async def watch_session(_):
await self._async_ffmpeg_watch(session_info["id"])
session_info[FFMPEG_LOGGER] = asyncio.create_task(
self._async_log_stderr_stream(stderr_reader)
)
session_info[FFMPEG_WATCHER] = async_track_time_interval(
self.hass,
watch_session,
FFMPEG_WATCH_INTERVAL,
)
return await self._async_ffmpeg_watch(session_info["id"])
async def _async_log_stderr_stream(self, stderr_reader):
"""Log output from ffmpeg."""
_LOGGER.debug("%s: ffmpeg: started", self.display_name)
while True:
line = await stderr_reader.readline()
if line == b"":
return
_LOGGER.debug("%s: ffmpeg: %s", self.display_name, line.rstrip())
async def _async_ffmpeg_watch(self, session_id):
"""Check to make sure ffmpeg is still running and cleanup if not."""
ffmpeg_pid = self.sessions[session_id][FFMPEG_PID]
if pid_is_alive(ffmpeg_pid):
return True
_LOGGER.warning("Streaming process ended unexpectedly - PID %d", ffmpeg_pid)
self._async_stop_ffmpeg_watch(session_id)
self.set_streaming_available(self.sessions[session_id]["stream_idx"])
return False
@callback
def _async_stop_ffmpeg_watch(self, session_id):
"""Cleanup a streaming session after stopping."""
if FFMPEG_WATCHER not in self.sessions[session_id]:
return
self.sessions[session_id].pop(FFMPEG_WATCHER)()
self.sessions[session_id].pop(FFMPEG_LOGGER).cancel()
async def stop_stream(self, session_info):
"""Stop the stream for the given ``session_id``."""
session_id = session_info["id"]
stream = session_info.get("stream")
if not stream:
_LOGGER.debug("No stream for session ID %s", session_id)
return
self._async_stop_ffmpeg_watch(session_id)
if not pid_is_alive(stream.process.pid):
_LOGGER.info("[%s] Stream already stopped", session_id)
return True
for shutdown_method in ["close", "kill"]:
_LOGGER.info("[%s] %s stream", session_id, shutdown_method)
try:
await getattr(stream, shutdown_method)()
return
except Exception: # pylint: disable=broad-except
_LOGGER.exception(
"[%s] Failed to %s stream", session_id, shutdown_method
)
async def reconfigure_stream(self, session_info, stream_config):
"""Reconfigure the stream so that it uses the given ``stream_config``."""
return True
async def async_get_snapshot(self, image_size):
"""Return a jpeg of a snapshot from the camera."""
return scale_jpeg_camera_image(
await self.hass.components.camera.async_get_image(self.entity_id),
image_size["image-width"],
image_size["image-height"],
)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class TrustedIdProvidersOperations(object):
"""TrustedIdProvidersOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: Client Api Version. Constant value: "2016-11-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2016-11-01"
self.config = config
def list_by_account(
self, resource_group_name, account_name, custom_headers=None, raw=False, **operation_config):
"""Lists the Data Lake Store trusted identity providers within the
specified Data Lake Store account.
:param resource_group_name: The name of the Azure resource group.
:type resource_group_name: str
:param account_name: The name of the Data Lake Store account.
:type account_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of TrustedIdProvider
:rtype:
~azure.mgmt.datalake.store.models.TrustedIdProviderPaged[~azure.mgmt.datalake.store.models.TrustedIdProvider]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeStore/accounts/{accountName}/trustedIdProviders'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.TrustedIdProviderPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.TrustedIdProviderPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, account_name, trusted_id_provider_name, id_provider, custom_headers=None, raw=False, **operation_config):
"""Creates or updates the specified trusted identity provider. During
update, the trusted identity provider with the specified name will be
replaced with this new provider.
:param resource_group_name: The name of the Azure resource group.
:type resource_group_name: str
:param account_name: The name of the Data Lake Store account.
:type account_name: str
:param trusted_id_provider_name: The name of the trusted identity
provider. This is used for differentiation of providers in the
account.
:type trusted_id_provider_name: str
:param id_provider: The URL of this trusted identity provider.
:type id_provider: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: TrustedIdProvider or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.datalake.store.models.TrustedIdProvider or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
parameters = models.CreateOrUpdateTrustedIdProviderParameters(id_provider=id_provider)
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeStore/accounts/{accountName}/trustedIdProviders/{trustedIdProviderName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'trustedIdProviderName': self._serialize.url("trusted_id_provider_name", trusted_id_provider_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'CreateOrUpdateTrustedIdProviderParameters')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('TrustedIdProvider', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get(
self, resource_group_name, account_name, trusted_id_provider_name, custom_headers=None, raw=False, **operation_config):
"""Gets the specified Data Lake Store trusted identity provider.
:param resource_group_name: The name of the Azure resource group.
:type resource_group_name: str
:param account_name: The name of the Data Lake Store account.
:type account_name: str
:param trusted_id_provider_name: The name of the trusted identity
provider to retrieve.
:type trusted_id_provider_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: TrustedIdProvider or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.datalake.store.models.TrustedIdProvider or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeStore/accounts/{accountName}/trustedIdProviders/{trustedIdProviderName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'trustedIdProviderName': self._serialize.url("trusted_id_provider_name", trusted_id_provider_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('TrustedIdProvider', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def update(
self, resource_group_name, account_name, trusted_id_provider_name, id_provider=None, custom_headers=None, raw=False, **operation_config):
"""Updates the specified trusted identity provider.
:param resource_group_name: The name of the Azure resource group.
:type resource_group_name: str
:param account_name: The name of the Data Lake Store account.
:type account_name: str
:param trusted_id_provider_name: The name of the trusted identity
provider. This is used for differentiation of providers in the
account.
:type trusted_id_provider_name: str
:param id_provider: The URL of this trusted identity provider.
:type id_provider: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: TrustedIdProvider or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.datalake.store.models.TrustedIdProvider or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
parameters = None
if id_provider is not None:
parameters = models.UpdateTrustedIdProviderParameters(id_provider=id_provider)
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeStore/accounts/{accountName}/trustedIdProviders/{trustedIdProviderName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'trustedIdProviderName': self._serialize.url("trusted_id_provider_name", trusted_id_provider_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
if parameters is not None:
body_content = self._serialize.body(parameters, 'UpdateTrustedIdProviderParameters')
else:
body_content = None
# Construct and send request
request = self._client.patch(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('TrustedIdProvider', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def delete(
self, resource_group_name, account_name, trusted_id_provider_name, custom_headers=None, raw=False, **operation_config):
"""Deletes the specified trusted identity provider from the specified Data
Lake Store account.
:param resource_group_name: The name of the Azure resource group.
:type resource_group_name: str
:param account_name: The name of the Data Lake Store account.
:type account_name: str
:param trusted_id_provider_name: The name of the trusted identity
provider to delete.
:type trusted_id_provider_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeStore/accounts/{accountName}/trustedIdProviders/{trustedIdProviderName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'trustedIdProviderName': self._serialize.url("trusted_id_provider_name", trusted_id_provider_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
|
|
# coding=utf-8
# Copyright 2022 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""EfficientNet with BatchEnsemble."""
import os
import time
from absl import app
from absl import flags
from absl import logging
import robustness_metrics as rm
import tensorflow as tf
import tensorflow_datasets as tfds
import uncertainty_baselines as ub
from tensorboard.plugins.hparams import api as hp
# ~312.78 steps per epoch for 4x4 TPU; per_core_batch_size=128; 350 epochs;
# TODO(trandustin): Tune results.
# General model flags
flags.DEFINE_enum('model_name',
default='efficientnet-b0',
enum_values=['efficientnet-b0', 'efficientnet-b1',
'efficientnet-b2', 'efficientnet-b3'],
help='Efficientnet model name.')
flags.DEFINE_integer('ensemble_size', 4, 'Size of ensemble.')
flags.DEFINE_integer('per_core_batch_size', 128, 'Batch size per TPU core/GPU.')
flags.DEFINE_float('random_sign_init', -0.5,
'Use random sign init for fast weights.')
flags.DEFINE_integer('seed', 0, 'Random seed.')
flags.DEFINE_float('base_learning_rate', 0.016,
'Base learning rate when train batch size is 256.')
flags.DEFINE_float('one_minus_momentum', 0.1, 'Optimizer momentum.')
flags.DEFINE_float('fast_weight_lr_multiplier', 0.5,
'fast weights lr multiplier.')
flags.DEFINE_float('l2', 5e-6, 'L2 coefficient.')
flags.DEFINE_string('data_dir', None, 'Path to training and testing data.')
flags.DEFINE_string('output_dir', '/tmp/imagenet',
'The directory where the model weights and '
'training/evaluation summaries are stored.')
flags.DEFINE_integer('train_epochs', 350, 'Number of training epochs.')
flags.DEFINE_integer('checkpoint_interval', 15,
'Number of epochs between saving checkpoints. Use -1 to '
'never save checkpoints.')
flags.DEFINE_integer('evaluation_interval', 5, 'How many epochs to run test.')
flags.DEFINE_string('alexnet_errors_path', None,
'Path to AlexNet corruption errors file.')
flags.DEFINE_float('label_smoothing', 0.1, 'label smoothing constant.')
flags.DEFINE_integer('num_bins', 15, 'Number of bins for ECE computation.')
# Accelerator flags.
flags.DEFINE_bool('use_gpu', False, 'Whether to run on GPU or otherwise TPU.')
flags.DEFINE_bool('use_bfloat16', True, 'Whether to use mixed precision.')
flags.DEFINE_integer('num_cores', 32, 'Number of TPU cores or number of GPUs.')
flags.DEFINE_string('tpu', None,
'Name of the TPU. Only used if use_gpu is False.')
FLAGS = flags.FLAGS
# Number of images in ImageNet-1k train dataset.
APPROX_IMAGENET_TRAIN_IMAGES = 1281167
IMAGENET_VALIDATION_IMAGES = 50000
NUM_CLASSES = 1000
def main(argv):
del argv # unused arg
tf.io.gfile.makedirs(FLAGS.output_dir)
logging.info('Saving checkpoints at %s', FLAGS.output_dir)
tf.random.set_seed(FLAGS.seed)
per_core_batch_size = FLAGS.per_core_batch_size // FLAGS.ensemble_size
batch_size = per_core_batch_size * FLAGS.num_cores
steps_per_epoch = APPROX_IMAGENET_TRAIN_IMAGES // batch_size
steps_per_eval = IMAGENET_VALIDATION_IMAGES // batch_size
data_dir = FLAGS.data_dir
if FLAGS.use_gpu:
logging.info('Use GPU')
strategy = tf.distribute.MirroredStrategy()
else:
logging.info('Use TPU at %s',
FLAGS.tpu if FLAGS.tpu is not None else 'local')
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu=FLAGS.tpu)
tf.config.experimental_connect_to_cluster(resolver)
tf.tpu.experimental.initialize_tpu_system(resolver)
strategy = tf.distribute.TPUStrategy(resolver)
width_coefficient, depth_coefficient, input_image_size, dropout_rate = (
ub.models.efficientnet_utils.efficientnet_params(FLAGS.model_name))
train_builder = ub.datasets.ImageNetDataset(
split=tfds.Split.TRAIN,
use_bfloat16=FLAGS.use_bfloat16,
image_size=input_image_size,
normalize_input=True,
one_hot=True,
data_dir=data_dir)
train_dataset = train_builder.load(batch_size=batch_size, strategy=strategy)
test_builder = ub.datasets.ImageNetDataset(
split=tfds.Split.TEST,
use_bfloat16=FLAGS.use_bfloat16,
image_size=input_image_size,
normalize_input=True,
one_hot=True,
data_dir=data_dir)
clean_test_dataset = test_builder.load(
batch_size=batch_size, strategy=strategy)
test_datasets = {
'clean': clean_test_dataset,
}
train_iterator = iter(train_dataset)
test_iterator = iter(test_datasets['clean'])
if FLAGS.use_bfloat16:
tf.keras.mixed_precision.set_global_policy('mixed_bfloat16')
summary_writer = tf.summary.create_file_writer(
os.path.join(FLAGS.output_dir, 'summaries'))
with strategy.scope():
logging.info('Building %s model', FLAGS.model_name)
model = ub.models.efficientnet_batch_ensemble(
width_coefficient,
depth_coefficient,
dropout_rate,
ensemble_size=FLAGS.ensemble_size,
random_sign_init=FLAGS.random_sign_init)
scaled_lr = FLAGS.base_learning_rate * (batch_size / 256.0)
# Decay epoch is 2.4, warmup epoch is 5 according to the Efficientnet paper.
decay_steps = steps_per_epoch * 2.4
warmup_step = steps_per_epoch * 5
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
scaled_lr, decay_steps, decay_rate=0.97, staircase=True)
learning_rate = ub.schedules.AddWarmupDecaySchedule(
lr_schedule, warmup_step)
optimizer = tf.keras.optimizers.RMSprop(
learning_rate,
rho=0.9,
momentum=1.0 - FLAGS.one_minus_momentum,
epsilon=0.001)
metrics = {
'train/negative_log_likelihood': tf.keras.metrics.Mean(),
'train/accuracy': tf.keras.metrics.CategoricalAccuracy(),
'train/ece': rm.metrics.ExpectedCalibrationError(
num_bins=FLAGS.num_bins),
'train/loss': tf.keras.metrics.Mean(),
'test/negative_log_likelihood': tf.keras.metrics.Mean(),
'test/accuracy': tf.keras.metrics.CategoricalAccuracy(),
'test/ece': rm.metrics.ExpectedCalibrationError(
num_bins=FLAGS.num_bins),
}
logging.info('Finished building %s model', FLAGS.model_name)
checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)
latest_checkpoint = tf.train.latest_checkpoint(FLAGS.output_dir)
initial_epoch = 0
if latest_checkpoint:
# checkpoint.restore must be within a strategy.scope() so that optimizer
# slot variables are mirrored.
checkpoint.restore(latest_checkpoint)
logging.info('Loaded checkpoint %s', latest_checkpoint)
initial_epoch = optimizer.iterations.numpy() // steps_per_epoch
def train_step(inputs):
"""Build `step_fn` for efficientnet learning."""
images = inputs['features']
labels = inputs['labels']
images = tf.tile(images, [FLAGS.ensemble_size, 1, 1, 1])
labels = tf.tile(labels, [FLAGS.ensemble_size, 1])
num_replicas = tf.cast(strategy.num_replicas_in_sync, tf.float32)
l2_coeff = tf.cast(FLAGS.l2, tf.float32)
with tf.GradientTape() as tape:
logits = model(images, training=True)
logits = tf.cast(logits, tf.float32)
negative_log_likelihood = tf.reduce_mean(
tf.keras.losses.categorical_crossentropy(
labels,
logits,
from_logits=True,
label_smoothing=FLAGS.label_smoothing))
filtered_variables = []
for var in model.trainable_variables:
# Apply l2 on the slow weights and bias terms. This excludes BN
# parameters and fast weight approximate posterior/prior parameters,
# but pay caution to their naming scheme.
if 'kernel' in var.name or 'bias' in var.name:
filtered_variables.append(tf.reshape(var, (-1,)))
l2_loss = FLAGS.l2 * 2 * tf.nn.l2_loss(
tf.concat(filtered_variables, axis=0))
loss = negative_log_likelihood + l2_coeff * l2_loss
scaled_loss = loss / num_replicas
grads = tape.gradient(scaled_loss, model.trainable_weights)
# Separate learning rate implementation.
if FLAGS.fast_weight_lr_multiplier != 1.0:
grads_and_vars = []
for grad, var in zip(grads, model.trainable_variables):
# Apply different learning rate on the fast weights. This excludes BN
# and slow weights, but pay caution to the naming scheme.
if ('batch_norm' not in var.name and 'kernel' not in var.name):
grads_and_vars.append((grad * FLAGS.fast_weight_lr_multiplier,
var))
else:
grads_and_vars.append((grad, var))
optimizer.apply_gradients(grads_and_vars)
else:
optimizer.apply_gradients(zip(grads, model.trainable_variables))
sparse_labels = tf.cast(
tf.math.argmax(labels, axis=-1, output_type=tf.int32), tf.float32)
probs = tf.nn.softmax(logits)
metrics['train/loss'].update_state(loss)
metrics['train/negative_log_likelihood'].update_state(
negative_log_likelihood)
metrics['train/accuracy'].update_state(labels, logits)
metrics['train/ece'].add_batch(probs, label=sparse_labels)
step_info = {
'loss/negative_log_likelihood': negative_log_likelihood / num_replicas,
'loss/total_loss': scaled_loss,
}
return step_info
def eval_step(inputs):
"""A single step."""
images = inputs['features']
labels = inputs['labels']
images = tf.tile(images, [FLAGS.ensemble_size, 1, 1, 1])
logits = model(images, training=False)
logits = tf.cast(logits, tf.float32)
probs = tf.nn.softmax(logits)
per_probs = tf.split(
probs, num_or_size_splits=FLAGS.ensemble_size, axis=0)
probs = tf.reduce_mean(per_probs, axis=0)
negative_log_likelihood = tf.reduce_mean(
tf.keras.losses.categorical_crossentropy(labels, probs))
sparse_labels = tf.cast(
tf.math.argmax(labels, axis=-1, output_type=tf.int32), tf.float32)
metrics['test/negative_log_likelihood'].update_state(
negative_log_likelihood)
metrics['test/accuracy'].update_state(labels, probs)
metrics['test/ece'].add_batch(probs, label=sparse_labels)
@tf.function
def epoch_fn(should_eval):
"""Build `epoch_fn` for training and potential eval."""
for _ in tf.range(tf.cast(steps_per_epoch, tf.int32)):
info = strategy.run(train_step, args=(next(train_iterator),))
optim_step = optimizer.iterations
if optim_step % tf.cast(100, optim_step.dtype) == 0:
for k, v in info.items():
v_reduce = strategy.reduce(tf.distribute.ReduceOp.SUM, v, None)
tf.summary.scalar(k, v_reduce, optim_step)
tf.summary.scalar('loss/lr', learning_rate(optim_step), optim_step)
summary_writer.flush()
if should_eval:
for _ in tf.range(tf.cast(steps_per_eval, tf.int32)):
strategy.run(eval_step, args=(next(test_iterator),))
# Main training loop.
start_time = time.time()
with summary_writer.as_default():
for epoch in range(initial_epoch, FLAGS.train_epochs):
logging.info('Starting to run epoch: %s', epoch)
should_eval = (epoch % FLAGS.evaluation_interval == 0)
# Pass tf constant to avoid re-tracing.
epoch_fn(tf.constant(should_eval))
current_step = (epoch + 1) * steps_per_epoch
max_steps = steps_per_epoch * FLAGS.train_epochs
time_elapsed = time.time() - start_time
steps_per_sec = float(current_step) / time_elapsed
eta_seconds = (max_steps - current_step) / steps_per_sec
message = ('{:.1%} completion: epoch {:d}/{:d}. {:.1f} steps/s. '
'ETA: {:.0f} min. Time elapsed: {:.0f} min'.format(
current_step / max_steps,
epoch + 1,
FLAGS.train_epochs,
steps_per_sec,
eta_seconds / 60,
time_elapsed / 60))
logging.info(message)
logging.info('Train Loss: %.4f, Accuracy: %.2f%%',
metrics['train/loss'].result(),
metrics['train/accuracy'].result() * 100)
if should_eval:
logging.info('Test NLL: %.4f, Accuracy: %.2f%%',
metrics['test/negative_log_likelihood'].result(),
metrics['test/accuracy'].result() * 100)
total_metrics = metrics.copy()
total_results = {name: metric.result()
for name, metric in total_metrics.items()}
total_results.update({'lr': learning_rate(optimizer.iterations)})
# Metrics from Robustness Metrics (like ECE) will return a dict with a
# single key/value, instead of a scalar.
total_results = {
k: (list(v.values())[0] if isinstance(v, dict) else v)
for k, v in total_results.items()
}
with summary_writer.as_default():
for name, result in total_results.items():
if should_eval or 'test' not in name:
tf.summary.scalar(name, result, step=epoch + 1)
for metric in metrics.values():
metric.reset_states()
if (FLAGS.checkpoint_interval > 0 and
(epoch + 1) % FLAGS.checkpoint_interval == 0):
checkpoint_name = checkpoint.save(os.path.join(
FLAGS.output_dir, 'checkpoint'))
logging.info('Saved checkpoint to %s', checkpoint_name)
with summary_writer.as_default():
hp.hparams({
'base_learning_rate': FLAGS.base_learning_rate,
'one_minus_momentum': FLAGS.one_minus_momentum,
'l2': FLAGS.l2,
'random_sign_init': FLAGS.random_sign_init,
'fast_weight_lr_multiplier': FLAGS.fast_weight_lr_multiplier,
})
if __name__ == '__main__':
app.run(main)
|
|
#! /usr/bin/env python
#
# Copyright (C) 2015-2016 Rich Lewis <rl403@cam.ac.uk>
# License: 3-clause BSD
"""
# skchem.io.sdf
Defining input and output operations for sdf files.
"""
from functools import wraps
import warnings
from rdkit import Chem
import pandas as pd
from ..core import Mol
from ..utils import Suppressor, squeeze
def _drop_props(row):
for prop in row.structure.props.keys():
row.structure.ClearProp(prop)
def _set_props(row, cols):
for i in cols:
row.structure.SetProp(str(i), str(row[i]))
def _set_name(row):
row.structure.name = str(row.name) # rdkit props can only be strs
def read_sdf(sdf, error_bad_mol=False, warn_bad_mol=True, nmols=None,
skipmols=None, skipfooter=None, read_props=True, mol_props=False,
*args, **kwargs):
"""Read an sdf file into a `pd.DataFrame`.
The function wraps the RDKit `ForwardSDMolSupplier` object.
Args:
sdf (str or file-like):
The location of data to load as a file path, or a file-like object.
error_bad_mol (bool):
Whether an error should be raised if a molecule fails to parse.
Default is False.
warn_bad_mol (bool):
Whether a warning should be output if a molecule fails to parse.
Default is True.
nmols (int):
The number of molecules to read. If `None`, read all molecules.
Default is `None`.
skipmols (int):
The number of molecules to skip at start.
Default is `0`.
skipfooter (int):
The number of molecules to skip from the end.
Default is `0`.
read_props (bool):
Whether to read the properties into the data frame.
Default is `True`.
mol_props (bool):
Whether to keep properties in the molecule dictionary after they
are extracted to the DataFrame.
Default is `False`.
args, kwargs:
Arguments will be passed to RDKit ForwardSDMolSupplier.
Returns:
pandas.DataFrame:
The loaded data frame, with Mols supplied in the `structure` field.
See also:
rdkit.Chem.SDForwardMolSupplier
skchem.read_smiles
"""
# nmols is actually the index to cutoff. If we skip some at start, we need
# to add this number
if skipmols:
nmols += skipmols
if isinstance(sdf, str):
sdf = open(sdf, 'rb') # use read bytes for python 3 compatibility
# use the suppression context manager to not pollute our stdout with rdkit
# errors and warnings.
# perhaps this should be captured better by Mol etc.
with Suppressor():
mol_supp = Chem.ForwardSDMolSupplier(sdf, *args, **kwargs)
mols = []
# single loop through sdf
for i, mol in enumerate(mol_supp):
if skipmols and i < skipmols:
continue
if nmols and i >= nmols:
break
if mol is None:
msg = 'Molecule {} could not be decoded.'.format(i + 1)
if error_bad_mol:
raise ValueError(msg)
elif warn_bad_mol:
warnings.warn(msg)
continue
mols.append(Mol(mol))
if skipfooter:
mols = mols[:-skipfooter]
idx = pd.Index((m.name for m in mols), name='batch')
data = pd.DataFrame(mols, columns=['structure'])
if read_props:
props = pd.DataFrame([{k: v for (k, v) in mol.props.items()}
for mol in mols])
data = pd.concat([data, props], axis=1)
# now we have extracted the props, we can delete if required
if not mol_props:
data.apply(_drop_props, axis=1)
data.index = idx
return squeeze(data, axis=1)
def write_sdf(data, sdf, write_cols=True, index_as_name=True, mol_props=False,
*args, **kwargs):
""" Write an sdf file from a dataframe.
Args:
data (pandas.Series or pandas.DataFrame):
Pandas data structure with a `structure` column containing
compounds to serialize.
sdf (str or file-like):
A file path or file-like object specifying where to write the
compound data.
write_cols (bool):
Whether columns should be written as props. Default `True`.
index_as_name (bool):
Whether to use index as the header, or the molecule's name.
Default is `True`.
mol_props (bool):
Whether to write properties in the Mol dictionary in addition to
fields in the frame.
Warn:
This function will change the names of the compounds if the
`index_as_name` argument is `True`, and will delete all properties in
the molecule dictionary if `mol_props` is `False`.
"""
if isinstance(data, pd.Series):
data = data.to_frame(name='structure')
names = [m.name for m in data.structure]
writer = Chem.SDWriter(sdf, *args, **kwargs)
cols = list(data.columns.drop('structure'))
if not mol_props:
data.apply(_drop_props, axis=1)
if write_cols:
data.apply(_set_props, cols=cols, axis=1)
if index_as_name:
data.apply(_set_name, axis=1)
data.structure.apply(writer.write)
# rdkit writer changes names sometimes
for mol, name in zip(data.structure, names):
mol.name = name
@wraps(write_sdf)
def _to_sdf_series(self, *args, **kwargs):
return write_sdf(self, write_cols=False, *args, **kwargs)
@wraps(write_sdf)
def _to_sdf_df(self, *args, **kwargs):
return write_sdf(self, *args, **kwargs)
pd.Series.to_sdf = _to_sdf_series
pd.DataFrame.to_sdf = _to_sdf_df
@classmethod
@wraps(read_sdf)
def _from_sdf_df(_, *args, **kwargs):
return read_sdf(*args, **kwargs)
pd.DataFrame.from_sdf = _from_sdf_df
@classmethod
@wraps(read_sdf)
def _from_sdf_series(_, *args, **kwargs):
return read_sdf(*args, **kwargs).structure
pd.Series.from_sdf = _from_sdf_series
|
|
# Copyright 2013-2016 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import # to enable import io from stdlib
from collections import namedtuple
import logging
import socket
from uuid import UUID
import six
from six.moves import range
import io
from cassandra import ProtocolVersion
from cassandra import type_codes, DriverException
from cassandra import (Unavailable, WriteTimeout, ReadTimeout,
WriteFailure, ReadFailure, FunctionFailure,
AlreadyExists, InvalidRequest, Unauthorized,
UnsupportedOperation, UserFunctionDescriptor,
UserAggregateDescriptor, SchemaTargetType)
from cassandra.marshal import (int32_pack, int32_unpack, uint16_pack, uint16_unpack,
int8_pack, int8_unpack, uint64_pack, header_pack,
v3_header_pack, uint32_pack)
from cassandra.cqltypes import (AsciiType, BytesType, BooleanType,
CounterColumnType, DateType, DecimalType,
DoubleType, FloatType, Int32Type,
InetAddressType, IntegerType, ListType,
LongType, MapType, SetType, TimeUUIDType,
UTF8Type, VarcharType, UUIDType, UserType,
TupleType, lookup_casstype, SimpleDateType,
TimeType, ByteType, ShortType, DurationType)
from cassandra.policies import WriteType
from cassandra.cython_deps import HAVE_CYTHON, HAVE_NUMPY
from cassandra import util
log = logging.getLogger(__name__)
class NotSupportedError(Exception):
pass
class InternalError(Exception):
pass
ColumnMetadata = namedtuple("ColumnMetadata", ['keyspace_name', 'table_name', 'name', 'type'])
HEADER_DIRECTION_TO_CLIENT = 0x80
HEADER_DIRECTION_MASK = 0x80
COMPRESSED_FLAG = 0x01
TRACING_FLAG = 0x02
CUSTOM_PAYLOAD_FLAG = 0x04
WARNING_FLAG = 0x08
USE_BETA_FLAG = 0x10
USE_BETA_MASK = ~USE_BETA_FLAG
_message_types_by_opcode = {}
_UNSET_VALUE = object()
def register_class(cls):
_message_types_by_opcode[cls.opcode] = cls
def get_registered_classes():
return _message_types_by_opcode.copy()
class _RegisterMessageType(type):
def __init__(cls, name, bases, dct):
if not name.startswith('_'):
register_class(cls)
@six.add_metaclass(_RegisterMessageType)
class _MessageType(object):
tracing = False
custom_payload = None
warnings = None
def update_custom_payload(self, other):
if other:
if not self.custom_payload:
self.custom_payload = {}
self.custom_payload.update(other)
if len(self.custom_payload) > 65535:
raise ValueError("Custom payload map exceeds max count allowed by protocol (65535)")
def __repr__(self):
return '<%s(%s)>' % (self.__class__.__name__, ', '.join('%s=%r' % i for i in _get_params(self)))
def _get_params(message_obj):
base_attrs = dir(_MessageType)
return (
(n, a) for n, a in message_obj.__dict__.items()
if n not in base_attrs and not n.startswith('_') and not callable(a)
)
error_classes = {}
class ErrorMessage(_MessageType, Exception):
opcode = 0x00
name = 'ERROR'
summary = 'Unknown'
def __init__(self, code, message, info):
self.code = code
self.message = message
self.info = info
@classmethod
def recv_body(cls, f, protocol_version, *args):
code = read_int(f)
msg = read_string(f)
subcls = error_classes.get(code, cls)
extra_info = subcls.recv_error_info(f, protocol_version)
return subcls(code=code, message=msg, info=extra_info)
def summary_msg(self):
msg = 'Error from server: code=%04x [%s] message="%s"' \
% (self.code, self.summary, self.message)
if six.PY2 and isinstance(msg, six.text_type):
msg = msg.encode('utf-8')
return msg
def __str__(self):
return '<%s>' % self.summary_msg()
__repr__ = __str__
@staticmethod
def recv_error_info(f, protocol_version):
pass
def to_exception(self):
return self
class ErrorMessageSubclass(_RegisterMessageType):
def __init__(cls, name, bases, dct):
if cls.error_code is not None: # Server has an error code of 0.
error_classes[cls.error_code] = cls
@six.add_metaclass(ErrorMessageSubclass)
class ErrorMessageSub(ErrorMessage):
error_code = None
class RequestExecutionException(ErrorMessageSub):
pass
class RequestValidationException(ErrorMessageSub):
pass
class ServerError(ErrorMessageSub):
summary = 'Server error'
error_code = 0x0000
class ProtocolException(ErrorMessageSub):
summary = 'Protocol error'
error_code = 0x000A
class BadCredentials(ErrorMessageSub):
summary = 'Bad credentials'
error_code = 0x0100
class UnavailableErrorMessage(RequestExecutionException):
summary = 'Unavailable exception'
error_code = 0x1000
@staticmethod
def recv_error_info(f, protocol_version):
return {
'consistency': read_consistency_level(f),
'required_replicas': read_int(f),
'alive_replicas': read_int(f),
}
def to_exception(self):
return Unavailable(self.summary_msg(), **self.info)
class OverloadedErrorMessage(RequestExecutionException):
summary = 'Coordinator node overloaded'
error_code = 0x1001
class IsBootstrappingErrorMessage(RequestExecutionException):
summary = 'Coordinator node is bootstrapping'
error_code = 0x1002
class TruncateError(RequestExecutionException):
summary = 'Error during truncate'
error_code = 0x1003
class WriteTimeoutErrorMessage(RequestExecutionException):
summary = "Coordinator node timed out waiting for replica nodes' responses"
error_code = 0x1100
@staticmethod
def recv_error_info(f, protocol_version):
return {
'consistency': read_consistency_level(f),
'received_responses': read_int(f),
'required_responses': read_int(f),
'write_type': WriteType.name_to_value[read_string(f)],
}
def to_exception(self):
return WriteTimeout(self.summary_msg(), **self.info)
class ReadTimeoutErrorMessage(RequestExecutionException):
summary = "Coordinator node timed out waiting for replica nodes' responses"
error_code = 0x1200
@staticmethod
def recv_error_info(f, protocol_version):
return {
'consistency': read_consistency_level(f),
'received_responses': read_int(f),
'required_responses': read_int(f),
'data_retrieved': bool(read_byte(f)),
}
def to_exception(self):
return ReadTimeout(self.summary_msg(), **self.info)
class ReadFailureMessage(RequestExecutionException):
summary = "Replica(s) failed to execute read"
error_code = 0x1300
@staticmethod
def recv_error_info(f, protocol_version):
consistency = read_consistency_level(f)
received_responses = read_int(f)
required_responses = read_int(f)
if ProtocolVersion.uses_error_code_map(protocol_version):
error_code_map = read_error_code_map(f)
failures = len(error_code_map)
else:
error_code_map = None
failures = read_int(f)
data_retrieved = bool(read_byte(f))
return {
'consistency': consistency,
'received_responses': received_responses,
'required_responses': required_responses,
'failures': failures,
'error_code_map': error_code_map,
'data_retrieved': data_retrieved
}
def to_exception(self):
return ReadFailure(self.summary_msg(), **self.info)
class FunctionFailureMessage(RequestExecutionException):
summary = "User Defined Function failure"
error_code = 0x1400
@staticmethod
def recv_error_info(f, protocol_version):
return {
'keyspace': read_string(f),
'function': read_string(f),
'arg_types': [read_string(f) for _ in range(read_short(f))],
}
def to_exception(self):
return FunctionFailure(self.summary_msg(), **self.info)
class WriteFailureMessage(RequestExecutionException):
summary = "Replica(s) failed to execute write"
error_code = 0x1500
@staticmethod
def recv_error_info(f, protocol_version):
consistency = read_consistency_level(f)
received_responses = read_int(f)
required_responses = read_int(f)
if ProtocolVersion.uses_error_code_map(protocol_version):
error_code_map = read_error_code_map(f)
failures = len(error_code_map)
else:
error_code_map = None
failures = read_int(f)
write_type = WriteType.name_to_value[read_string(f)]
return {
'consistency': consistency,
'received_responses': received_responses,
'required_responses': required_responses,
'failures': failures,
'error_code_map': error_code_map,
'write_type': write_type
}
def to_exception(self):
return WriteFailure(self.summary_msg(), **self.info)
class SyntaxException(RequestValidationException):
summary = 'Syntax error in CQL query'
error_code = 0x2000
class UnauthorizedErrorMessage(RequestValidationException):
summary = 'Unauthorized'
error_code = 0x2100
def to_exception(self):
return Unauthorized(self.summary_msg())
class InvalidRequestException(RequestValidationException):
summary = 'Invalid query'
error_code = 0x2200
def to_exception(self):
return InvalidRequest(self.summary_msg())
class ConfigurationException(RequestValidationException):
summary = 'Query invalid because of configuration issue'
error_code = 0x2300
class PreparedQueryNotFound(RequestValidationException):
summary = 'Matching prepared statement not found on this node'
error_code = 0x2500
@staticmethod
def recv_error_info(f, protocol_version):
# return the query ID
return read_binary_string(f)
class AlreadyExistsException(ConfigurationException):
summary = 'Item already exists'
error_code = 0x2400
@staticmethod
def recv_error_info(f, protocol_version):
return {
'keyspace': read_string(f),
'table': read_string(f),
}
def to_exception(self):
return AlreadyExists(**self.info)
class StartupMessage(_MessageType):
opcode = 0x01
name = 'STARTUP'
KNOWN_OPTION_KEYS = set((
'CQL_VERSION',
'COMPRESSION',
))
def __init__(self, cqlversion, options):
self.cqlversion = cqlversion
self.options = options
def send_body(self, f, protocol_version):
optmap = self.options.copy()
optmap['CQL_VERSION'] = self.cqlversion
write_stringmap(f, optmap)
class ReadyMessage(_MessageType):
opcode = 0x02
name = 'READY'
@classmethod
def recv_body(cls, *args):
return cls()
class AuthenticateMessage(_MessageType):
opcode = 0x03
name = 'AUTHENTICATE'
def __init__(self, authenticator):
self.authenticator = authenticator
@classmethod
def recv_body(cls, f, *args):
authname = read_string(f)
return cls(authenticator=authname)
class CredentialsMessage(_MessageType):
opcode = 0x04
name = 'CREDENTIALS'
def __init__(self, creds):
self.creds = creds
def send_body(self, f, protocol_version):
if protocol_version > 1:
raise UnsupportedOperation(
"Credentials-based authentication is not supported with "
"protocol version 2 or higher. Use the SASL authentication "
"mechanism instead.")
write_short(f, len(self.creds))
for credkey, credval in self.creds.items():
write_string(f, credkey)
write_string(f, credval)
class AuthChallengeMessage(_MessageType):
opcode = 0x0E
name = 'AUTH_CHALLENGE'
def __init__(self, challenge):
self.challenge = challenge
@classmethod
def recv_body(cls, f, *args):
return cls(read_binary_longstring(f))
class AuthResponseMessage(_MessageType):
opcode = 0x0F
name = 'AUTH_RESPONSE'
def __init__(self, response):
self.response = response
def send_body(self, f, protocol_version):
write_longstring(f, self.response)
class AuthSuccessMessage(_MessageType):
opcode = 0x10
name = 'AUTH_SUCCESS'
def __init__(self, token):
self.token = token
@classmethod
def recv_body(cls, f, *args):
return cls(read_longstring(f))
class OptionsMessage(_MessageType):
opcode = 0x05
name = 'OPTIONS'
def send_body(self, f, protocol_version):
pass
class SupportedMessage(_MessageType):
opcode = 0x06
name = 'SUPPORTED'
def __init__(self, cql_versions, options):
self.cql_versions = cql_versions
self.options = options
@classmethod
def recv_body(cls, f, *args):
options = read_stringmultimap(f)
cql_versions = options.pop('CQL_VERSION')
return cls(cql_versions=cql_versions, options=options)
# used for QueryMessage and ExecuteMessage
_VALUES_FLAG = 0x01
_SKIP_METADATA_FLAG = 0x02
_PAGE_SIZE_FLAG = 0x04
_WITH_PAGING_STATE_FLAG = 0x08
_WITH_SERIAL_CONSISTENCY_FLAG = 0x10
_PROTOCOL_TIMESTAMP = 0x20
class QueryMessage(_MessageType):
opcode = 0x07
name = 'QUERY'
def __init__(self, query, consistency_level, serial_consistency_level=None,
fetch_size=None, paging_state=None, timestamp=None):
self.query = query
self.consistency_level = consistency_level
self.serial_consistency_level = serial_consistency_level
self.fetch_size = fetch_size
self.paging_state = paging_state
self.timestamp = timestamp
self._query_params = None # only used internally. May be set to a list of native-encoded values to have them sent with the request.
def send_body(self, f, protocol_version):
write_longstring(f, self.query)
write_consistency_level(f, self.consistency_level)
flags = 0x00
if self._query_params is not None:
flags |= _VALUES_FLAG # also v2+, but we're only setting params internally right now
if self.serial_consistency_level:
if protocol_version >= 2:
flags |= _WITH_SERIAL_CONSISTENCY_FLAG
else:
raise UnsupportedOperation(
"Serial consistency levels require the use of protocol version "
"2 or higher. Consider setting Cluster.protocol_version to 2 "
"to support serial consistency levels.")
if self.fetch_size:
if protocol_version >= 2:
flags |= _PAGE_SIZE_FLAG
else:
raise UnsupportedOperation(
"Automatic query paging may only be used with protocol version "
"2 or higher. Consider setting Cluster.protocol_version to 2.")
if self.paging_state:
if protocol_version >= 2:
flags |= _WITH_PAGING_STATE_FLAG
else:
raise UnsupportedOperation(
"Automatic query paging may only be used with protocol version "
"2 or higher. Consider setting Cluster.protocol_version to 2.")
if self.timestamp is not None:
flags |= _PROTOCOL_TIMESTAMP
if ProtocolVersion.uses_int_query_flags(protocol_version):
write_uint(f, flags)
else:
write_byte(f, flags)
if self._query_params is not None:
write_short(f, len(self._query_params))
for param in self._query_params:
write_value(f, param)
if self.fetch_size:
write_int(f, self.fetch_size)
if self.paging_state:
write_longstring(f, self.paging_state)
if self.serial_consistency_level:
write_consistency_level(f, self.serial_consistency_level)
if self.timestamp is not None:
write_long(f, self.timestamp)
CUSTOM_TYPE = object()
RESULT_KIND_VOID = 0x0001
RESULT_KIND_ROWS = 0x0002
RESULT_KIND_SET_KEYSPACE = 0x0003
RESULT_KIND_PREPARED = 0x0004
RESULT_KIND_SCHEMA_CHANGE = 0x0005
class ResultMessage(_MessageType):
opcode = 0x08
name = 'RESULT'
kind = None
results = None
paging_state = None
# Names match type name in module scope. Most are imported from cassandra.cqltypes (except CUSTOM_TYPE)
type_codes = _cqltypes_by_code = dict((v, globals()[k]) for k, v in type_codes.__dict__.items() if not k.startswith('_'))
_FLAGS_GLOBAL_TABLES_SPEC = 0x0001
_HAS_MORE_PAGES_FLAG = 0x0002
_NO_METADATA_FLAG = 0x0004
def __init__(self, kind, results, paging_state=None, col_types=None):
self.kind = kind
self.results = results
self.paging_state = paging_state
self.col_types = col_types
@classmethod
def recv_body(cls, f, protocol_version, user_type_map, result_metadata):
kind = read_int(f)
paging_state = None
col_types = None
if kind == RESULT_KIND_VOID:
results = None
elif kind == RESULT_KIND_ROWS:
paging_state, col_types, results = cls.recv_results_rows(
f, protocol_version, user_type_map, result_metadata)
elif kind == RESULT_KIND_SET_KEYSPACE:
ksname = read_string(f)
results = ksname
elif kind == RESULT_KIND_PREPARED:
results = cls.recv_results_prepared(f, protocol_version, user_type_map)
elif kind == RESULT_KIND_SCHEMA_CHANGE:
results = cls.recv_results_schema_change(f, protocol_version)
else:
raise DriverException("Unknown RESULT kind: %d" % kind)
return cls(kind, results, paging_state, col_types)
@classmethod
def recv_results_rows(cls, f, protocol_version, user_type_map, result_metadata):
paging_state, column_metadata = cls.recv_results_metadata(f, user_type_map)
column_metadata = column_metadata or result_metadata
rowcount = read_int(f)
rows = [cls.recv_row(f, len(column_metadata)) for _ in range(rowcount)]
colnames = [c[2] for c in column_metadata]
coltypes = [c[3] for c in column_metadata]
try:
parsed_rows = [
tuple(ctype.from_binary(val, protocol_version)
for ctype, val in zip(coltypes, row))
for row in rows]
except Exception:
for row in rows:
for i in range(len(row)):
try:
coltypes[i].from_binary(row[i], protocol_version)
except Exception as e:
raise DriverException('Failed decoding result column "%s" of type %s: %s' % (colnames[i],
coltypes[i].cql_parameterized_type(),
str(e)))
return paging_state, coltypes, (colnames, parsed_rows)
@classmethod
def recv_results_prepared(cls, f, protocol_version, user_type_map):
query_id = read_binary_string(f)
bind_metadata, pk_indexes, result_metadata = cls.recv_prepared_metadata(f, protocol_version, user_type_map)
return query_id, bind_metadata, pk_indexes, result_metadata
@classmethod
def recv_results_metadata(cls, f, user_type_map):
flags = read_int(f)
colcount = read_int(f)
if flags & cls._HAS_MORE_PAGES_FLAG:
paging_state = read_binary_longstring(f)
else:
paging_state = None
no_meta = bool(flags & cls._NO_METADATA_FLAG)
if no_meta:
return paging_state, []
glob_tblspec = bool(flags & cls._FLAGS_GLOBAL_TABLES_SPEC)
if glob_tblspec:
ksname = read_string(f)
cfname = read_string(f)
column_metadata = []
for _ in range(colcount):
if glob_tblspec:
colksname = ksname
colcfname = cfname
else:
colksname = read_string(f)
colcfname = read_string(f)
colname = read_string(f)
coltype = cls.read_type(f, user_type_map)
column_metadata.append((colksname, colcfname, colname, coltype))
return paging_state, column_metadata
@classmethod
def recv_prepared_metadata(cls, f, protocol_version, user_type_map):
flags = read_int(f)
colcount = read_int(f)
pk_indexes = None
if protocol_version >= 4:
num_pk_indexes = read_int(f)
pk_indexes = [read_short(f) for _ in range(num_pk_indexes)]
glob_tblspec = bool(flags & cls._FLAGS_GLOBAL_TABLES_SPEC)
if glob_tblspec:
ksname = read_string(f)
cfname = read_string(f)
bind_metadata = []
for _ in range(colcount):
if glob_tblspec:
colksname = ksname
colcfname = cfname
else:
colksname = read_string(f)
colcfname = read_string(f)
colname = read_string(f)
coltype = cls.read_type(f, user_type_map)
bind_metadata.append(ColumnMetadata(colksname, colcfname, colname, coltype))
if protocol_version >= 2:
_, result_metadata = cls.recv_results_metadata(f, user_type_map)
return bind_metadata, pk_indexes, result_metadata
else:
return bind_metadata, pk_indexes, None
@classmethod
def recv_results_schema_change(cls, f, protocol_version):
return EventMessage.recv_schema_change(f, protocol_version)
@classmethod
def read_type(cls, f, user_type_map):
optid = read_short(f)
try:
typeclass = cls.type_codes[optid]
except KeyError:
raise NotSupportedError("Unknown data type code 0x%04x. Have to skip"
" entire result set." % (optid,))
if typeclass in (ListType, SetType):
subtype = cls.read_type(f, user_type_map)
typeclass = typeclass.apply_parameters((subtype,))
elif typeclass == MapType:
keysubtype = cls.read_type(f, user_type_map)
valsubtype = cls.read_type(f, user_type_map)
typeclass = typeclass.apply_parameters((keysubtype, valsubtype))
elif typeclass == TupleType:
num_items = read_short(f)
types = tuple(cls.read_type(f, user_type_map) for _ in range(num_items))
typeclass = typeclass.apply_parameters(types)
elif typeclass == UserType:
ks = read_string(f)
udt_name = read_string(f)
num_fields = read_short(f)
names, types = zip(*((read_string(f), cls.read_type(f, user_type_map))
for _ in range(num_fields)))
specialized_type = typeclass.make_udt_class(ks, udt_name, names, types)
specialized_type.mapped_class = user_type_map.get(ks, {}).get(udt_name)
typeclass = specialized_type
elif typeclass == CUSTOM_TYPE:
classname = read_string(f)
typeclass = lookup_casstype(classname)
return typeclass
@staticmethod
def recv_row(f, colcount):
return [read_value(f) for _ in range(colcount)]
class PrepareMessage(_MessageType):
opcode = 0x09
name = 'PREPARE'
def __init__(self, query):
self.query = query
def send_body(self, f, protocol_version):
write_longstring(f, self.query)
if ProtocolVersion.uses_prepare_flags(protocol_version):
# Write the flags byte; with 0 value for now, but this should change in PYTHON-678
write_uint(f, 0)
class ExecuteMessage(_MessageType):
opcode = 0x0A
name = 'EXECUTE'
def __init__(self, query_id, query_params, consistency_level,
serial_consistency_level=None, fetch_size=None,
paging_state=None, timestamp=None, skip_meta=False):
self.query_id = query_id
self.query_params = query_params
self.consistency_level = consistency_level
self.serial_consistency_level = serial_consistency_level
self.fetch_size = fetch_size
self.paging_state = paging_state
self.timestamp = timestamp
self.skip_meta = skip_meta
def send_body(self, f, protocol_version):
write_string(f, self.query_id)
if protocol_version == 1:
if self.serial_consistency_level:
raise UnsupportedOperation(
"Serial consistency levels require the use of protocol version "
"2 or higher. Consider setting Cluster.protocol_version to 2 "
"to support serial consistency levels.")
if self.fetch_size or self.paging_state:
raise UnsupportedOperation(
"Automatic query paging may only be used with protocol version "
"2 or higher. Consider setting Cluster.protocol_version to 2.")
write_short(f, len(self.query_params))
for param in self.query_params:
write_value(f, param)
write_consistency_level(f, self.consistency_level)
else:
write_consistency_level(f, self.consistency_level)
flags = _VALUES_FLAG
if self.serial_consistency_level:
flags |= _WITH_SERIAL_CONSISTENCY_FLAG
if self.fetch_size:
flags |= _PAGE_SIZE_FLAG
if self.paging_state:
flags |= _WITH_PAGING_STATE_FLAG
if self.timestamp is not None:
if protocol_version >= 3:
flags |= _PROTOCOL_TIMESTAMP
else:
raise UnsupportedOperation(
"Protocol-level timestamps may only be used with protocol version "
"3 or higher. Consider setting Cluster.protocol_version to 3.")
if self.skip_meta:
flags |= _SKIP_METADATA_FLAG
if ProtocolVersion.uses_int_query_flags(protocol_version):
write_uint(f, flags)
else:
write_byte(f, flags)
write_short(f, len(self.query_params))
for param in self.query_params:
write_value(f, param)
if self.fetch_size:
write_int(f, self.fetch_size)
if self.paging_state:
write_longstring(f, self.paging_state)
if self.serial_consistency_level:
write_consistency_level(f, self.serial_consistency_level)
if self.timestamp is not None:
write_long(f, self.timestamp)
class BatchMessage(_MessageType):
opcode = 0x0D
name = 'BATCH'
def __init__(self, batch_type, queries, consistency_level,
serial_consistency_level=None, timestamp=None):
self.batch_type = batch_type
self.queries = queries
self.consistency_level = consistency_level
self.serial_consistency_level = serial_consistency_level
self.timestamp = timestamp
def send_body(self, f, protocol_version):
write_byte(f, self.batch_type.value)
write_short(f, len(self.queries))
for prepared, string_or_query_id, params in self.queries:
if not prepared:
write_byte(f, 0)
write_longstring(f, string_or_query_id)
else:
write_byte(f, 1)
write_short(f, len(string_or_query_id))
f.write(string_or_query_id)
write_short(f, len(params))
for param in params:
write_value(f, param)
write_consistency_level(f, self.consistency_level)
if protocol_version >= 3:
flags = 0
if self.serial_consistency_level:
flags |= _WITH_SERIAL_CONSISTENCY_FLAG
if self.timestamp is not None:
flags |= _PROTOCOL_TIMESTAMP
if ProtocolVersion.uses_int_query_flags(protocol_version):
write_int(f, flags)
else:
write_byte(f, flags)
if self.serial_consistency_level:
write_consistency_level(f, self.serial_consistency_level)
if self.timestamp is not None:
write_long(f, self.timestamp)
known_event_types = frozenset((
'TOPOLOGY_CHANGE',
'STATUS_CHANGE',
'SCHEMA_CHANGE'
))
class RegisterMessage(_MessageType):
opcode = 0x0B
name = 'REGISTER'
def __init__(self, event_list):
self.event_list = event_list
def send_body(self, f, protocol_version):
write_stringlist(f, self.event_list)
class EventMessage(_MessageType):
opcode = 0x0C
name = 'EVENT'
def __init__(self, event_type, event_args):
self.event_type = event_type
self.event_args = event_args
@classmethod
def recv_body(cls, f, protocol_version, *args):
event_type = read_string(f).upper()
if event_type in known_event_types:
read_method = getattr(cls, 'recv_' + event_type.lower())
return cls(event_type=event_type, event_args=read_method(f, protocol_version))
raise NotSupportedError('Unknown event type %r' % event_type)
@classmethod
def recv_topology_change(cls, f, protocol_version):
# "NEW_NODE" or "REMOVED_NODE"
change_type = read_string(f)
address = read_inet(f)
return dict(change_type=change_type, address=address)
@classmethod
def recv_status_change(cls, f, protocol_version):
# "UP" or "DOWN"
change_type = read_string(f)
address = read_inet(f)
return dict(change_type=change_type, address=address)
@classmethod
def recv_schema_change(cls, f, protocol_version):
# "CREATED", "DROPPED", or "UPDATED"
change_type = read_string(f)
if protocol_version >= 3:
target = read_string(f)
keyspace = read_string(f)
event = {'target_type': target, 'change_type': change_type, 'keyspace': keyspace}
if target != SchemaTargetType.KEYSPACE:
target_name = read_string(f)
if target == SchemaTargetType.FUNCTION:
event['function'] = UserFunctionDescriptor(target_name, [read_string(f) for _ in range(read_short(f))])
elif target == SchemaTargetType.AGGREGATE:
event['aggregate'] = UserAggregateDescriptor(target_name, [read_string(f) for _ in range(read_short(f))])
else:
event[target.lower()] = target_name
else:
keyspace = read_string(f)
table = read_string(f)
if table:
event = {'target_type': SchemaTargetType.TABLE, 'change_type': change_type, 'keyspace': keyspace, 'table': table}
else:
event = {'target_type': SchemaTargetType.KEYSPACE, 'change_type': change_type, 'keyspace': keyspace}
return event
class _ProtocolHandler(object):
"""
_ProtocolHander handles encoding and decoding messages.
This class can be specialized to compose Handlers which implement alternative
result decoding or type deserialization. Class definitions are passed to :class:`cassandra.cluster.Cluster`
on initialization.
Contracted class methods are :meth:`_ProtocolHandler.encode_message` and :meth:`_ProtocolHandler.decode_message`.
"""
message_types_by_opcode = _message_types_by_opcode.copy()
"""
Default mapping of opcode to Message implementation. The default ``decode_message`` implementation uses
this to instantiate a message and populate using ``recv_body``. This mapping can be updated to inject specialized
result decoding implementations.
"""
@classmethod
def encode_message(cls, msg, stream_id, protocol_version, compressor, allow_beta_protocol_version):
"""
Encodes a message using the specified frame parameters, and compressor
:param msg: the message, typically of cassandra.protocol._MessageType, generated by the driver
:param stream_id: protocol stream id for the frame header
:param protocol_version: version for the frame header, and used encoding contents
:param compressor: optional compression function to be used on the body
"""
flags = 0
body = io.BytesIO()
if msg.custom_payload:
if protocol_version < 4:
raise UnsupportedOperation("Custom key/value payloads can only be used with protocol version 4 or higher")
flags |= CUSTOM_PAYLOAD_FLAG
write_bytesmap(body, msg.custom_payload)
msg.send_body(body, protocol_version)
body = body.getvalue()
if compressor and len(body) > 0:
body = compressor(body)
flags |= COMPRESSED_FLAG
if msg.tracing:
flags |= TRACING_FLAG
if allow_beta_protocol_version:
flags |= USE_BETA_FLAG
buff = io.BytesIO()
cls._write_header(buff, protocol_version, flags, stream_id, msg.opcode, len(body))
buff.write(body)
return buff.getvalue()
@staticmethod
def _write_header(f, version, flags, stream_id, opcode, length):
"""
Write a CQL protocol frame header.
"""
pack = v3_header_pack if version >= 3 else header_pack
f.write(pack(version, flags, stream_id, opcode))
write_int(f, length)
@classmethod
def decode_message(cls, protocol_version, user_type_map, stream_id, flags, opcode, body,
decompressor, result_metadata):
"""
Decodes a native protocol message body
:param protocol_version: version to use decoding contents
:param user_type_map: map[keyspace name] = map[type name] = custom type to instantiate when deserializing this type
:param stream_id: native protocol stream id from the frame header
:param flags: native protocol flags bitmap from the header
:param opcode: native protocol opcode from the header
:param body: frame body
:param decompressor: optional decompression function to inflate the body
:return: a message decoded from the body and frame attributes
"""
if flags & COMPRESSED_FLAG:
if decompressor is None:
raise RuntimeError("No de-compressor available for compressed frame!")
body = decompressor(body)
flags ^= COMPRESSED_FLAG
body = io.BytesIO(body)
if flags & TRACING_FLAG:
trace_id = UUID(bytes=body.read(16))
flags ^= TRACING_FLAG
else:
trace_id = None
if flags & WARNING_FLAG:
warnings = read_stringlist(body)
flags ^= WARNING_FLAG
else:
warnings = None
if flags & CUSTOM_PAYLOAD_FLAG:
custom_payload = read_bytesmap(body)
flags ^= CUSTOM_PAYLOAD_FLAG
else:
custom_payload = None
flags &= USE_BETA_MASK # will only be set if we asserted it in connection estabishment
if flags:
log.warning("Unknown protocol flags set: %02x. May cause problems.", flags)
msg_class = cls.message_types_by_opcode[opcode]
msg = msg_class.recv_body(body, protocol_version, user_type_map, result_metadata)
msg.stream_id = stream_id
msg.trace_id = trace_id
msg.custom_payload = custom_payload
msg.warnings = warnings
if msg.warnings:
for w in msg.warnings:
log.warning("Server warning: %s", w)
return msg
def cython_protocol_handler(colparser):
"""
Given a column parser to deserialize ResultMessages, return a suitable
Cython-based protocol handler.
There are three Cython-based protocol handlers:
- obj_parser.ListParser
decodes result messages into a list of tuples
- obj_parser.LazyParser
decodes result messages lazily by returning an iterator
- numpy_parser.NumPyParser
decodes result messages into NumPy arrays
The default is to use obj_parser.ListParser
"""
from cassandra.row_parser import make_recv_results_rows
class FastResultMessage(ResultMessage):
"""
Cython version of Result Message that has a faster implementation of
recv_results_row.
"""
# type_codes = ResultMessage.type_codes.copy()
code_to_type = dict((v, k) for k, v in ResultMessage.type_codes.items())
recv_results_rows = classmethod(make_recv_results_rows(colparser))
class CythonProtocolHandler(_ProtocolHandler):
"""
Use FastResultMessage to decode query result message messages.
"""
my_opcodes = _ProtocolHandler.message_types_by_opcode.copy()
my_opcodes[FastResultMessage.opcode] = FastResultMessage
message_types_by_opcode = my_opcodes
col_parser = colparser
return CythonProtocolHandler
if HAVE_CYTHON:
from cassandra.obj_parser import ListParser, LazyParser
ProtocolHandler = cython_protocol_handler(ListParser())
LazyProtocolHandler = cython_protocol_handler(LazyParser())
else:
# Use Python-based ProtocolHandler
ProtocolHandler = _ProtocolHandler
LazyProtocolHandler = None
if HAVE_CYTHON and HAVE_NUMPY:
from cassandra.numpy_parser import NumpyParser
NumpyProtocolHandler = cython_protocol_handler(NumpyParser())
else:
NumpyProtocolHandler = None
def read_byte(f):
return int8_unpack(f.read(1))
def write_byte(f, b):
f.write(int8_pack(b))
def read_int(f):
return int32_unpack(f.read(4))
def write_int(f, i):
f.write(int32_pack(i))
def write_uint(f, i):
f.write(uint32_pack(i))
def write_long(f, i):
f.write(uint64_pack(i))
def read_short(f):
return uint16_unpack(f.read(2))
def write_short(f, s):
f.write(uint16_pack(s))
def read_consistency_level(f):
return read_short(f)
def write_consistency_level(f, cl):
write_short(f, cl)
def read_string(f):
size = read_short(f)
contents = f.read(size)
return contents.decode('utf8')
def read_binary_string(f):
size = read_short(f)
contents = f.read(size)
return contents
def write_string(f, s):
if isinstance(s, six.text_type):
s = s.encode('utf8')
write_short(f, len(s))
f.write(s)
def read_binary_longstring(f):
size = read_int(f)
contents = f.read(size)
return contents
def read_longstring(f):
return read_binary_longstring(f).decode('utf8')
def write_longstring(f, s):
if isinstance(s, six.text_type):
s = s.encode('utf8')
write_int(f, len(s))
f.write(s)
def read_stringlist(f):
numstrs = read_short(f)
return [read_string(f) for _ in range(numstrs)]
def write_stringlist(f, stringlist):
write_short(f, len(stringlist))
for s in stringlist:
write_string(f, s)
def read_stringmap(f):
numpairs = read_short(f)
strmap = {}
for _ in range(numpairs):
k = read_string(f)
strmap[k] = read_string(f)
return strmap
def write_stringmap(f, strmap):
write_short(f, len(strmap))
for k, v in strmap.items():
write_string(f, k)
write_string(f, v)
def read_bytesmap(f):
numpairs = read_short(f)
bytesmap = {}
for _ in range(numpairs):
k = read_string(f)
bytesmap[k] = read_value(f)
return bytesmap
def write_bytesmap(f, bytesmap):
write_short(f, len(bytesmap))
for k, v in bytesmap.items():
write_string(f, k)
write_value(f, v)
def read_stringmultimap(f):
numkeys = read_short(f)
strmmap = {}
for _ in range(numkeys):
k = read_string(f)
strmmap[k] = read_stringlist(f)
return strmmap
def write_stringmultimap(f, strmmap):
write_short(f, len(strmmap))
for k, v in strmmap.items():
write_string(f, k)
write_stringlist(f, v)
def read_error_code_map(f):
numpairs = read_int(f)
error_code_map = {}
for _ in range(numpairs):
endpoint = read_inet_addr_only(f)
error_code_map[endpoint] = read_short(f)
return error_code_map
def read_value(f):
size = read_int(f)
if size < 0:
return None
return f.read(size)
def write_value(f, v):
if v is None:
write_int(f, -1)
elif v is _UNSET_VALUE:
write_int(f, -2)
else:
write_int(f, len(v))
f.write(v)
def read_inet_addr_only(f):
size = read_byte(f)
addrbytes = f.read(size)
if size == 4:
addrfam = socket.AF_INET
elif size == 16:
addrfam = socket.AF_INET6
else:
raise InternalError("bad inet address: %r" % (addrbytes,))
return util.inet_ntop(addrfam, addrbytes)
def read_inet(f):
addr = read_inet_addr_only(f)
port = read_int(f)
return (addr, port)
def write_inet(f, addrtuple):
addr, port = addrtuple
if ':' in addr:
addrfam = socket.AF_INET6
else:
addrfam = socket.AF_INET
addrbytes = util.inet_pton(addrfam, addr)
write_byte(f, len(addrbytes))
f.write(addrbytes)
write_int(f, port)
|
|
from mudwyrm_users.admin.achaea import ScriptState
from mudwyrm_users.admin.achaea.action import Action, Outcome, EventOutcome
from mudwyrm_users.admin.achaea.trigger import Trigger, Alias, OnEvent
from mudwyrm_users.admin.achaea.common import not_
from mudwyrm_users.admin.achaea.scripts.char import \
balance, status, Balance, set_balance, set_status, \
claim_balance, lose_balance, restore_balance, skill_available
import mudwyrm_users.admin.achaea.scripts.action_aspects as aa
p = None
s = ScriptState()
def init(processor):
assert processor is not None
global p
p = processor
class scales(Action,
aa.Death, aa.Sleep, aa.Stupidity, aa.Stun,
aa.Balance, aa.Equilibrium,
aa.Paralysis, aa.Arms(1), aa.Entanglement, aa.Transfixation, aa.Prone):
@Alias(r'^scales$')
def up_aliases(match):
p.act(scales, True)
@Alias(r'^scales$')
def down_aliases(match):
p.act(scales, False)
def start(action, activate):
if activate:
p.send("scales")
claim_balance('balance')
else:
p.send("scales shed")
def fail(action):
if action.args['activate']:
restore_balance('balance')
@Outcome(r'^You concentrate and slowly your body is covered by protective, serpentine scales\.$')
def activated(match, action):
assert action.args['activate']
lose_balance('balance')
set_status('scales', True)
@Outcome(r'^You are already covered in protective, serpentine scales\.$')
def already_active(match, action):
assert action.args['activate']
restore_balance('balance')
set_status('scales', True)
@Outcome(r'^You ripple your muscles and as you watch, your skin turns white and peels off, taking your protective scaling with it\.$',
r'^You have nothing to shed\.$')
def deactivated(match, action):
assert not action.args['activate']
set_status('scales', False)
class hide(Action,
aa.Death, aa.Sleep, aa.Stupidity, aa.Stun,
aa.Balance, aa.Equilibrium,
aa.Paralysis, aa.Arms(1), aa.Entanglement, aa.Transfixation, aa.Prone):
@Alias(r'^hide$')
def up_aliases(match):
p.act(hide, True)
@Alias(r'^emerge$')
def down_aliases(match):
p.act(hide, False)
def start(action, activate):
p.send("hide" if activate else "emerge")
claim_balance('balance')
def fail(action):
restore_balance('balance')
@Outcome(r'^Too many prying eyes prevent you from finding a suitable hiding place\.$')
def cant_activate(action, match):
assert action.args['activate']
restore_balance('balance')
set_status('hide', False)
@Outcome(r'^You conceal yourself using all the guile you possess\.$')
def activated(action, match):
assert action.args['activate']
lose_balance('balance')
set_status('hide', True)
@Outcome(r'^You are already hidden\.$')
def already_active(action, match):
assert action.args['activate']
restore_balance('balance')
set_status('hide', True)
@Outcome(r'^You emerge from your hiding place\.$')
def deactivated(action, match):
assert not action.args['activate']
lose_balance('balance')
set_status('hide', False)
@Outcome(r'^From what do you wish to emerge\?$')
def already_inactive(action, match):
assert not action.args['activate']
restore_balance('balance')
set_status('hide', False)
class ghost(Action,
aa.Death, aa.Sleep, aa.Stupidity, aa.Stun,
aa.Balance, aa.Equilibrium,
aa.Paralysis, aa.Arms(1), aa.Entanglement, aa.Transfixation, aa.Prone):
@Alias(r'^conjure ghost$')
def aliases(match):
p.act(ghost)
def start(action):
p.send("conjure ghost")
claim_balance('equilibrium')
def fail(action):
restore_balance('equilibrium')
@Outcome(r'^You project a net of light about yourself until your image becomes faded and ghostly\.$')
def activated(action, match):
lose_balance('equilibrium')
set_status('ghost', True)
@Trigger(r'^Your ghostly image slowly intensifies until you appear flesh and blood again\.$')
def ghost_faded(match):
set_status('ghost', False)
class shroud(Action,
aa.Death, aa.Sleep, aa.Stupidity, aa.Stun,
aa.Balance, aa.Equilibrium,
aa.Paralysis, aa.Arms(1), aa.Entanglement, aa.Transfixation, aa.Prone,
aa.Targeted):
@Alias(r'^(?:cloak|conjure cloak)(?: (\w+))?$')
def aliases(match):
target = match.group(1)
p.act(shroud, target)
def start(action, target=None):
if target:
p.send("conjure cloak %s" % target)
else:
p.send("conjure cloak")
claim_balance('equilibrium')
def fail(action):
restore_balance('equilibrium')
@Outcome(r'^You toss a sparkling cloud of dust over yourself and as it settles you shimmer into invisibility\.$')
def activated_on_oneself(action, match):
assert 'target' not in action.args or action.args['target'] is None
lose_balance('equilibrium')
set_status('shroud', True)
@Outcome(r'^You toss a sparkling cloud of dust over (\w+), who vanishes into invisibility as it settles over (?:him|her)\.$')
def activated_on_target(action, match):
assert action.args['target'].lower() == match.group(1).lower()
lose_balance('equilibrium')
@Trigger(r'^Your shroud dissipates and you return to the realm of perception\.$')
def shroud_faded(match):
char.set_status('shroud', False)
class secrete(Action,
aa.Death, aa.Sleep, aa.Stupidity, aa.Stun):
@Alias(r'^secrete (\w+)$')
def aliases(match):
p.act(secrete, match.group(1).lower())
def start(action, venom):
p.send("secrete %s" % venom)
@Outcome(r'^You must somehow purge the venom from your blood before you may secrete another\.$')
def must_purge_first(action, match):
pass
@Outcome(r'^You feel the power of the venom (\w+) flowing through your veins\.$')
def secreted(action, match):
assert match.group(1) == action.args['venom']
char.secreted_venom = action.args['venom']
@Trigger(r'^You gasp as a terrible aching strikes all your limbs\.$')
def venom_backfired(match):
char.secreted_venom = None
class purge(Action,
aa.Death, aa.Sleep, aa.Stupidity, aa.Stun):
@Alias(r'^purge$')
def aliases(match):
p.act(purge)
def start(action):
p.send("purge")
@Outcome(r'^You purge every drop of venom from your bloodstream\.$')
def purged(action, match):
char.secreted_venom = None
class bite(Action,
aa.Death, aa.Sleep, aa.Stupidity, aa.Stun, aa.Grace,
aa.Balance, aa.Equilibrium,
aa.Paralysis, aa.Arms(1), aa.Entanglement, aa.Transfixation, aa.Prone,
aa.Endurance, aa.Targeted,
aa.PrismaticBarrier, aa.ShieldTattoo, aa.Rebounding):
@Alias(r'^bite (\w+)$')
def aliases(match):
p.act(bite, match.group(1))
def start(action, target):
p.send("bite %s" % target)
claim_balance('balance')
def fail(action):
restore_balance('balance')
@Outcome(r'^You have no venom in your bloodstream that would affect ')
def no_venom(action, match):
restore_balance('balance')
char.secreted_venom = None
@Outcome(r'^You sink your fangs into .+, injecting just the proper amount of \w+\.$')
def hit(action, match):
lose_balance('balance')
char.secreted_venom = None
class venom_bite(Action,
aa.Death, aa.Sleep, aa.Stupidity, aa.Stun, aa.Grace,
aa.Balance, aa.Equilibrium,
aa.Paralysis, aa.Arms(1), aa.Entanglement, aa.Transfixation, aa.Prone,
aa.Targeted,
aa.PrismaticBarrier, aa.ShieldTattoo, aa.Rebounding):
@Alias(r'^(?:vbite|b) (\w+)(?: (\w+))?$')
def aliases(match):
if len(match.groups()) > 2:
venom = match.group(1).lower()
target = match.group(2)
else:
target = match.group(1)
venom = 'sumac'
if char.skill_available('camus', 'venom'):
venom = 'camus'
p.act(venom_bite, venom, target)
def start(action, venom, target):
if char.secreted_venom != venom:
if char.secreted_venom is not None:
p.act(purge)
p.act(secrete, venom)
action.bite_action = p.act(bite, target)
@OnEvent('ActionFinished')
def action_finished(action, finished_action):
if finished_action == action.bite_action:
p.finish_action(action)
class garrote(Action,
aa.Death, aa.Sleep, aa.Stupidity, aa.Stun, aa.Grace,
aa.Balance, aa.Equilibrium,
aa.Paralysis, aa.Arms(1), aa.Entanglement, aa.Transfixation, aa.Prone,
aa.Endurance, aa.Targeted,
aa.PrismaticBarrier, aa.ShieldTattoo, aa.Rebounding):
@Alias(r'^(?:garrote|gar) (\w+)$')
def aliases(match):
p.act(garrote, match.group(1))
def start(action, target):
p.send("garrote %s" % target)
claim_balance('balance')
def fail(action):
restore_balance('balance')
@Outcome(r'^You must be wielding a whip in order to garrote someone\.$')
def no_whip_wielded(action, match):
action.fail()
@Outcome(r'^You attempt to slip behind .+, but s?he outmanoeuvres you\.$')
def miss(action, match):
lose_balance('balance')
@Outcome(r'^You slip behind .+ and garrote (?:him|her) with your whip\.$')
def hit(action, match):
lose_balance('balance')
|
|
#!/usr/bin/env vpython
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittests for run.py."""
import json
import mock
import re
import unittest
import run
import test_runner_test
class UnitTest(unittest.TestCase):
def test_parse_args_ok(self):
cmd = [
'--app',
'./foo-Runner.app',
'--host-app',
'./bar.app',
'--runtime-cache-prefix',
'some/dir',
'--xcode-path',
'some/Xcode.app',
'--gtest_repeat',
'2',
# Required
'--xcode-build-version',
'123abc',
'--out-dir',
'some/dir',
]
runner = run.Runner()
runner.parse_args(cmd)
self.assertTrue(runner.args.app == './foo-Runner.app')
self.assertTrue(runner.args.runtime_cache_prefix == 'some/dir')
self.assertTrue(runner.args.xcode_path == 'some/Xcode.app')
self.assertTrue(runner.args.gtest_repeat == 2)
def test_parse_args_iossim_platform_version(self):
"""
iossim, platforma and version should all be set.
missing iossim
"""
test_cases = [
{
'error':
2,
'cmd': [
'--platform',
'iPhone X',
'--version',
'13.2.2',
# Required
'--xcode-build-version',
'123abc',
'--out-dir',
'some/dir'
],
},
{
'error':
2,
'cmd': [
'--iossim',
'path/to/iossim',
'--version',
'13.2.2',
# Required
'--xcode-build-version',
'123abc',
'--out-dir',
'some/dir'
],
},
{
'error':
2,
'cmd': [
'--iossim',
'path/to/iossim',
'--platform',
'iPhone X',
# Required
'--xcode-build-version',
'123abc',
'--out-dir',
'some/dir'
],
},
]
runner = run.Runner()
for test_case in test_cases:
with self.assertRaises(SystemExit) as ctx:
runner.parse_args(test_case['cmd'])
self.assertTrue(re.match('must specify all or none of *', ctx.message))
self.assertEqual(ctx.exception.code, test_case['error'])
def test_parse_args_xcode_parallelization_requirements(self):
"""
xcode parallelization set requires both platform and version
"""
test_cases = [
{
'error':
2,
'cmd': [
'--xcode-parallelization',
'--platform',
'iPhone X',
# Required
'--xcode-build-version',
'123abc',
'--out-dir',
'some/dir'
]
},
{
'error':
2,
'cmd': [
'--xcode-parallelization',
'--version',
'13.2.2',
# Required
'--xcode-build-version',
'123abc',
'--out-dir',
'some/dir'
]
}
]
runner = run.Runner()
for test_case in test_cases:
with self.assertRaises(SystemExit) as ctx:
runner.parse_args(test_case['cmd'])
self.assertTrue(
re.match('--xcode-parallelization also requires both *',
ctx.message))
self.assertEqual(ctx.exception.code, test_case['error'])
def test_parse_args_from_json(self):
json_args = {
'test_cases': ['test1'],
'restart': 'true',
'xcode_parallelization': True,
'shards': 2
}
cmd = [
'--shards',
'1',
'--platform',
'iPhone X',
'--version',
'13.2.2',
'--args-json',
json.dumps(json_args),
# Required
'--xcode-build-version',
'123abc',
'--out-dir',
'some/dir'
]
# shards should be 2, since json arg takes precedence over cmd line
runner = run.Runner()
runner.parse_args(cmd)
# Empty array
self.assertEquals(len(runner.args.env_var), 0)
self.assertTrue(runner.args.xcode_parallelization)
self.assertTrue(runner.args.restart)
self.assertEquals(runner.args.shards, 2)
def test_merge_test_cases(self):
"""Tests test cases are merges in --test-cases and --args-json."""
cmd = [
'--app',
'./foo-Runner.app',
'--xcode-path',
'some/Xcode.app',
'--gtest_filter',
'TestClass3.TestCase4:TestClass4.TestCase5',
'--test-cases',
'TestClass1.TestCase2',
'--args-json',
'{"test_cases": ["TestClass2.TestCase3"]}',
# Required
'--xcode-build-version',
'123abc',
'--out-dir',
'some/dir',
]
runner = run.Runner()
runner.parse_args(cmd)
runner.resolve_test_cases()
expected_test_cases = [
'TestClass1.TestCase2', 'TestClass3.TestCase4', 'TestClass4.TestCase5',
'TestClass2.TestCase3'
]
self.assertEqual(runner.args.test_cases, expected_test_cases)
def test_gtest_filter_arg(self):
cmd = [
'--app',
'./foo-Runner.app',
'--xcode-path',
'some/Xcode.app',
'--gtest_filter',
'TestClass1.TestCase2:TestClass2.TestCase3',
# Required
'--xcode-build-version',
'123abc',
'--out-dir',
'some/dir',
]
runner = run.Runner()
runner.parse_args(cmd)
runner.resolve_test_cases()
expected_test_cases = ['TestClass1.TestCase2', 'TestClass2.TestCase3']
self.assertEqual(runner.args.test_cases, expected_test_cases)
@mock.patch('os.getenv', return_value='2')
def test_parser_error_sharding_environment(self, _):
cmd = [
'--app',
'./foo-Runner.app',
'--xcode-path',
'some/Xcode.app',
'--test-cases',
'SomeClass.SomeTestCase',
'--gtest_filter',
'TestClass1.TestCase2:TestClass2.TestCase3',
# Required
'--xcode-build-version',
'123abc',
'--out-dir',
'some/dir',
]
runner = run.Runner()
with self.assertRaises(SystemExit) as ctx:
runner.parse_args(cmd)
self.assertTrue(
re.match(
'Specifying test cases is not supported in multiple swarming '
'shards environment.', ctx.message))
self.assertEqual(ctx.exception.code, 2)
class RunnerInstallXcodeTest(test_runner_test.TestCase):
"""Tests Xcode and runtime installing logic in Runner.run()"""
def setUp(self):
super(RunnerInstallXcodeTest, self).setUp()
self.runner = run.Runner()
self.mock(self.runner, 'parse_args', lambda _: None)
self.mock(self.runner, 'resolve_test_cases', lambda: None)
self.runner.args = mock.MagicMock()
# Make run() choose xcodebuild_runner.SimulatorParallelTestRunner as tr.
self.runner.args.xcode_parallelization = True
# Used in run.Runner.install_xcode().
self.runner.args.mac_toolchain_cmd = 'mac_toolchain'
self.runner.args.xcode_path = 'test/xcode/path'
self.runner.args.xcode_build_version = 'testXcodeVersion'
self.runner.args.runtime_cache_prefix = 'test/runtime-ios-'
self.runner.args.version = '14.4'
self.runner.args.out_dir = 'out/dir'
@mock.patch('test_runner.defaults_delete')
@mock.patch('json.dump')
@mock.patch('xcode_util.select', autospec=True)
@mock.patch('os.path.exists', autospec=True, return_value=True)
@mock.patch('xcodebuild_runner.SimulatorParallelTestRunner')
@mock.patch('xcode_util.construct_runtime_cache_folder', autospec=True)
@mock.patch('xcode_util.install', autospec=True, return_value=True)
@mock.patch('xcode_util.move_runtime', autospec=True)
def test_legacy_xcode(self, mock_move_runtime, mock_install,
mock_construct_runtime_cache_folder, mock_tr, _1, _2,
_3, _4):
mock_construct_runtime_cache_folder.side_effect = lambda a, b: a + b
test_runner = mock_tr.return_value
test_runner.launch.return_value = True
test_runner.logs = {}
with mock.patch('run.open', mock.mock_open()):
self.runner.run(None)
mock_install.assert_called_with(
'mac_toolchain',
'testXcodeVersion',
'test/xcode/path',
runtime_cache_folder='test/runtime-ios-14.4',
ios_version='14.4')
mock_construct_runtime_cache_folder.assert_called_once_with(
'test/runtime-ios-', '14.4')
self.assertFalse(mock_move_runtime.called)
@mock.patch('test_runner.defaults_delete')
@mock.patch('json.dump')
@mock.patch('xcode_util.select', autospec=True)
@mock.patch('os.path.exists', autospec=True, return_value=True)
@mock.patch('xcodebuild_runner.SimulatorParallelTestRunner')
@mock.patch('xcode_util.construct_runtime_cache_folder', autospec=True)
@mock.patch('xcode_util.install', autospec=True, return_value=False)
@mock.patch('xcode_util.move_runtime', autospec=True)
def test_not_legacy_xcode(self, mock_move_runtime, mock_install,
mock_construct_runtime_cache_folder, mock_tr, _1,
_2, _3, _4):
mock_construct_runtime_cache_folder.side_effect = lambda a, b: a + b
test_runner = mock_tr.return_value
test_runner.launch.return_value = True
test_runner.logs = {}
with mock.patch('run.open', mock.mock_open()):
self.runner.run(None)
mock_install.assert_called_with(
'mac_toolchain',
'testXcodeVersion',
'test/xcode/path',
runtime_cache_folder='test/runtime-ios-14.4',
ios_version='14.4')
self.assertEqual(2, mock_construct_runtime_cache_folder.call_count)
mock_construct_runtime_cache_folder.assert_has_calls(calls=[
mock.call('test/runtime-ios-', '14.4'),
mock.call('test/runtime-ios-', '14.4'),
])
mock_move_runtime.assert_called_with('test/runtime-ios-14.4',
'test/xcode/path', False)
@mock.patch('test_runner.defaults_delete')
@mock.patch('json.dump')
@mock.patch('xcode_util.select', autospec=True)
@mock.patch('os.path.exists', autospec=True, return_value=True)
@mock.patch('xcodebuild_runner.SimulatorParallelTestRunner')
@mock.patch('xcode_util.construct_runtime_cache_folder', autospec=True)
@mock.patch('xcode_util.install', autospec=True, return_value=False)
@mock.patch('xcode_util.move_runtime', autospec=True)
def test_device_task(self, mock_move_runtime, mock_install,
mock_construct_runtime_cache_folder, mock_tr, _1, _2, _3,
_4):
"""Check if Xcode is correctly installed for device tasks."""
self.runner.args.version = None
test_runner = mock_tr.return_value
test_runner.launch.return_value = True
test_runner.logs = {}
with mock.patch('run.open', mock.mock_open()):
self.runner.run(None)
mock_install.assert_called_with(
'mac_toolchain',
'testXcodeVersion',
'test/xcode/path',
ios_version=None,
runtime_cache_folder=None)
self.assertFalse(mock_construct_runtime_cache_folder.called)
self.assertFalse(mock_move_runtime.called)
if __name__ == '__main__':
unittest.main()
|
|
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class config(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/mpls/lsps/constrained-path/tunnels/tunnel/p2p-tunnel-attributes/config. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Configuration parameters for P2P LSPs
"""
__slots__ = ("_path_helper", "_extmethods", "__destination")
_yang_name = "config"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__destination = YANGDynClass(
base=[
RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?"
},
),
RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?"
},
),
],
is_leaf=True,
yang_name="destination",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="inet:ip-address",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"mpls",
"lsps",
"constrained-path",
"tunnels",
"tunnel",
"p2p-tunnel-attributes",
"config",
]
def _get_destination(self):
"""
Getter method for destination, mapped from YANG variable /network_instances/network_instance/mpls/lsps/constrained_path/tunnels/tunnel/p2p_tunnel_attributes/config/destination (inet:ip-address)
YANG Description: P2P tunnel destination address
"""
return self.__destination
def _set_destination(self, v, load=False):
"""
Setter method for destination, mapped from YANG variable /network_instances/network_instance/mpls/lsps/constrained_path/tunnels/tunnel/p2p_tunnel_attributes/config/destination (inet:ip-address)
If this variable is read-only (config: false) in the
source YANG file, then _set_destination is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_destination() directly.
YANG Description: P2P tunnel destination address
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=[
RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?"
},
),
RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?"
},
),
],
is_leaf=True,
yang_name="destination",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="inet:ip-address",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """destination must be of a type compatible with inet:ip-address""",
"defined-type": "inet:ip-address",
"generated-type": """YANGDynClass(base=[RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}),RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}),], is_leaf=True, yang_name="destination", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='inet:ip-address', is_config=True)""",
}
)
self.__destination = t
if hasattr(self, "_set"):
self._set()
def _unset_destination(self):
self.__destination = YANGDynClass(
base=[
RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?"
},
),
RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?"
},
),
],
is_leaf=True,
yang_name="destination",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="inet:ip-address",
is_config=True,
)
destination = __builtin__.property(_get_destination, _set_destination)
_pyangbind_elements = OrderedDict([("destination", destination)])
class config(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/mpls/lsps/constrained-path/tunnels/tunnel/p2p-tunnel-attributes/config. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Configuration parameters for P2P LSPs
"""
__slots__ = ("_path_helper", "_extmethods", "__destination")
_yang_name = "config"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__destination = YANGDynClass(
base=[
RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?"
},
),
RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?"
},
),
],
is_leaf=True,
yang_name="destination",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="inet:ip-address",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"mpls",
"lsps",
"constrained-path",
"tunnels",
"tunnel",
"p2p-tunnel-attributes",
"config",
]
def _get_destination(self):
"""
Getter method for destination, mapped from YANG variable /network_instances/network_instance/mpls/lsps/constrained_path/tunnels/tunnel/p2p_tunnel_attributes/config/destination (inet:ip-address)
YANG Description: P2P tunnel destination address
"""
return self.__destination
def _set_destination(self, v, load=False):
"""
Setter method for destination, mapped from YANG variable /network_instances/network_instance/mpls/lsps/constrained_path/tunnels/tunnel/p2p_tunnel_attributes/config/destination (inet:ip-address)
If this variable is read-only (config: false) in the
source YANG file, then _set_destination is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_destination() directly.
YANG Description: P2P tunnel destination address
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=[
RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?"
},
),
RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?"
},
),
],
is_leaf=True,
yang_name="destination",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="inet:ip-address",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """destination must be of a type compatible with inet:ip-address""",
"defined-type": "inet:ip-address",
"generated-type": """YANGDynClass(base=[RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}),RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}),], is_leaf=True, yang_name="destination", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='inet:ip-address', is_config=True)""",
}
)
self.__destination = t
if hasattr(self, "_set"):
self._set()
def _unset_destination(self):
self.__destination = YANGDynClass(
base=[
RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?"
},
),
RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?"
},
),
],
is_leaf=True,
yang_name="destination",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="inet:ip-address",
is_config=True,
)
destination = __builtin__.property(_get_destination, _set_destination)
_pyangbind_elements = OrderedDict([("destination", destination)])
|
|
#!/usr/bin/env python
#============================================================================
# Copyright (C) Microsoft Corporation, All rights reserved.
#============================================================================
import os
import imp
import re
import codecs
import shutil
import string
import filecmp
protocol = imp.load_source('protocol', '../protocol.py')
nxDSCLog = imp.load_source('nxDSCLog', '../nxDSCLog.py')
LG = nxDSCLog.DSCLog
RESOURCE_DIR = '/opt/microsoft/omsconfig/modules/nxOMSSudoCustomLog/DSCResources/MSFT_nxOMSSudoCustomLogResource/'
RESOURCE_PLUGIN_DIR = RESOURCE_DIR + 'CustomLog/Plugin/'
PLUGIN = RESOURCE_PLUGIN_DIR + 'in_sudo_tail.rb'
SCRIPT = RESOURCE_PLUGIN_DIR + 'tailfilereader.rb'
AGENT_PLUGIN_DIR = '/opt/microsoft/omsagent/plugin/'
AGENT_PLUGIN = AGENT_PLUGIN_DIR + 'in_sudo_tail.rb'
AGENT_SCRIPT = AGENT_PLUGIN_DIR + 'tailfilereader.rb'
conf_path = '/etc/opt/microsoft/omsagent/{0}/conf/omsagent.d/customlog.conf'
oms_restart_cmd = 'sudo /opt/microsoft/omsagent/bin/service_control restart {0}'
'''
[ClassVersion("1.0.0")]
class MSFT_nxOMSSudoCustomLogObject
{
[key] string LogName;
[write] string FilePath[];
};
[ClassVersion("1.0.0")]
class MSFT_nxOMSSudoCustomLogResource : OMI_BaseResource
{
[key] string Name;
[key] string WorkspaceID;
[write] boolean EnableCustomLogConfiguration;
[write,ValueMap{"Present", "Absent"},Values{"Present", "Absent"}] string Ensure;
[write, EmbeddedInstance("MSFT_nxOMSSudoCustomLogObject") : ToSubclass] string CustomLogObjects[];
};
'''
def init_vars(WorkspaceID, Ensure, CustomLogObjects):
if WorkspaceID is not None:
WorkspaceID = WorkspaceID.encode('ascii', 'ignore')
else:
WorkspaceID = ''
if 'value' in dir(Ensure):
Ensure = Ensure.value
Ensure = Ensure.encode('ascii', 'ignore')
new_customlogs = []
if CustomLogObjects is not None :
for customlog in CustomLogObjects:
if 'value' in dir(customlog['LogName']):
customlog['LogName'] = customlog['LogName'].value.encode('ascii', 'ignore')
new_filepaths = []
if 'value' in dir(customlog['FilePath']):
for filepath in customlog['FilePath'].value:
if filepath is not None and len(filepath) > 0:
new_filepaths.append(filepath.encode('ascii', 'ignore'))
if len(new_filepaths) > 0:
customlog['FilePath'] = new_filepaths
new_customlogs.append(customlog)
CustomLogObjects = new_customlogs
# The minimum agent version that this module can be applied to is >1.3, so
# all paths and commands will be workspace-specific
global conf_path
global oms_restart_cmd
conf_path = conf_path.format(WorkspaceID)
oms_restart_cmd = oms_restart_cmd.format(WorkspaceID)
def Set_Marshall(WorkspaceID, Name, Ensure, EnableCustomLogConfiguration = False, CustomLogObjects = None):
init_vars(WorkspaceID, Ensure, CustomLogObjects)
Set(EnableCustomLogConfiguration, Ensure, CustomLogObjects)
return [0]
def Test_Marshall(WorkspaceID, Name, Ensure, EnableCustomLogConfiguration = False, CustomLogObjects = None):
init_vars(WorkspaceID, Ensure, CustomLogObjects)
return Test(EnableCustomLogConfiguration, Ensure, CustomLogObjects)
def Get_Marshall(WorkspaceID, Name, Ensure, EnableCustomLogConfiguration = False, CustomLogObjects = None):
arg_names = list(locals().keys())
init_vars(WorkspaceID, Ensure, CustomLogObjects)
Ensure, CurrentCustomLogObjects = Get(EnableCustomLogConfiguration, Ensure, CustomLogObjects)
Name = protocol.MI_String(Name)
WorkspaceID = protocol.MI_String(WorkspaceID)
EnableCustomLogConfiguration = protocol.MI_Boolean(EnableCustomLogConfiguration)
Ensure = protocol.MI_String(Ensure)
for customlog in CurrentCustomLogObjects:
customlog['LogName'] = protocol.MI_String(customlog['LogName'])
if customlog['FilePath'] is not None and len(customlog['FilePath']):
customlog['FilePath'] = protocol.MI_StringA(customlog['FilePath'])
CustomLogObjects = protocol.MI_InstanceA(CurrentCustomLogObjects)
retd = {}
ld = locals()
for k in arg_names:
retd[k] = ld[k]
return 0, retd
def Set(EnableCustomLogConfiguration, Ensure, CustomLogObjects):
if Test(EnableCustomLogConfiguration, Ensure, CustomLogObjects) == [0]:
return [0]
if EnableCustomLogConfiguration and CustomLogObjects is not None:
if os.path.isdir(RESOURCE_PLUGIN_DIR):
CopyAllFiles(RESOURCE_PLUGIN_DIR, AGENT_PLUGIN_DIR)
UpdateConf(CustomLogObjects)
return [0]
def Test(EnableCustomLogConfiguration, Ensure, CustomLogObjects):
if EnableCustomLogConfiguration:
CurrentCustomLogObjects = ReadConf()
if CurrentCustomLogObjects is None and CustomLogObjects is None:
return [0]
elif CurrentCustomLogObjects is None or CustomLogObjects is None:
return [-1]
CustomLogObjects.sort()
for customlog in CustomLogObjects:
customlog['FilePath'].sort()
CurrentCustomLogObjects.sort()
for customlog in CurrentCustomLogObjects:
customlog['FilePath'].sort()
if CustomLogObjects != CurrentCustomLogObjects:
return [-1]
if Ensure == "Absent":
return [-1]
return [0]
def Get(EnableCustomLogConfiguration, Ensure, CustomLogObjects):
CurrentCustomLogObjects = ReadConf()
return Ensure, CurrentCustomLogObjects
'''
# this file is configured by the OMS service
<source>
type tail
path /tmp/cl_syslog
pos_file /var/opt/microsoft/omsagent/state/CUSTOM_LOG_BLOB.dummysyslog_CL_817f36cf-991d-4b97-afc0-cb70ec349371.pos
read_from_head true
tag oms.blob.CustomLog.CUSTOM_LOG_BLOB.dummysyslog_CL_817f36cf-991d-4b97-afc0-cb70ec349371.*
format none
</source>
<source>
type tail
path /tmp/customlog/*.log,/tmp/none.log
pos_file /var/opt/microsoft/omsagent/state/CUSTOM_LOG_BLOB.WildCardFilePath_CL_817f36cf-991d-4b97-afc0-cb70ec349371.pos
read_from_head true
tag oms.blob.CustomLog.CUSTOM_LOG_BLOB.WildCardFilePath_CL_817f36cf-991d-4b97-afc0-cb70ec349371.*
format none
</source>
'''
def ReadConf():
if not os.path.isfile(conf_path):
return [];
txt = codecs.open(conf_path, 'r', 'utf8').read().encode('ascii','ignore')
customlog_src_srch_str = r'\n<source>\n type sudo_tail.*?path (.*?)\n.*?pos_file /var/opt/microsoft/omsagent/state/(.*?)\.pos\n.*?run_interval ([0-9]+[a-z]*).*?tag oms\.blob\.CustomLog\.(.*?)\.\*.*?format none.*?</source>\n'
customlog_src_srch = re.compile(customlog_src_srch_str, re.M|re.S)
new_customlogs = []
sources = customlog_src_srch.findall(txt)
for source in sources:
s_filepaths = []
if len(source[0]):
s_filepaths = source[0].split(',')
logname = source[1]
new_customlogs.append({'FilePath':s_filepaths,'LogName':logname})
return new_customlogs
'''
sources = [('/tmp/cl_syslog', 'CUSTOM_LOG_BLOB.dummysyslog_CL_817f36cf-991d-4b97-afc0-cb70ec349371', 'CUSTOM_LOG_BLOB.dummysyslog_CL_817f36cf-991d-4b97-afc0-cb70ec349371'), ('/tmp/customlog/*.log,/tmp/none.log', 'CUSTOM_LOG_BLOB.WildCardFilePath_CL_817f36cf-991d-4b97-afc0-cb70ec349371', 'CUSTOM_LOG_BLOB.WildCardFilePath_CL_817f36cf-991d-4b97-afc0-cb70ec349371')]
new_customlogs = [{'LogName': 'CUSTOM_LOG_BLOB.dummysyslog_CL_817f36cf-991d-4b97-afc0-cb70ec349371', 'FilePath': ['/tmp/cl_syslog']}, {'LogName': 'CUSTOM_LOG_BLOB.WildCardFilePath_CL_817f36cf-991d-4b97-afc0-cb70ec349371', 'FilePath': ['/tmp/customlog/*.log', '/tmp/none.log']}]
'''
def UpdateConf(CustomLogObjects):
header = '# This file is configured by the OMS service\n'
new_source = ''
if CustomLogObjects is not None:
for customlog in CustomLogObjects:
logname = customlog['LogName']
filepaths = ','.join(customlog['FilePath'])
new_source += '\n<source>\n type sudo_tail\n path ' + filepaths + '\n pos_file /var/opt/microsoft/omsagent/state/' + logname + '.pos\n read_from_head false\n run_interval 60\n tag oms.blob.CustomLog.' + logname + '.*\n format none\n</source>\n'
txt = header + new_source
if os.path.isfile(conf_path):
shutil.copy2(conf_path, conf_path + '.bak')
codecs.open(conf_path, 'w', 'utf8').write(txt)
os.system(oms_restart_cmd)
def CopyAllFiles(src, dest):
try:
src_files = os.listdir(src)
for file_name in src_files:
full_file_name = os.path.join(src, file_name)
if (os.path.isfile(full_file_name)):
shutil.copy(full_file_name, dest)
except:
LG().Log('ERROR', 'CopyAllFiles failed for src: ' + src + ' dest: ' + dest)
return False
def CompareFiles(file1, file2):
return filecmp.cmp(file1, file2)
|
|
#minecraft traffic lights - simple
#import the minecraft.py module from the minecraft directory
import minecraft.minecraft as minecraft
#import minecraft block module
import minecraft.block as block
#import time, so delays can be used
import time
#import threading so I can make asynchronous calls!
import threading
#import random so I can randomly create cars
import random
class Road():
def __init__(self, mc, x, y, z, width, lenght):
#create road
mc.setBlocks(x, y-1, z, x+lenght, y-1, z+width-1, block.BEDROCK.id)
#create line down the middle
mc.setBlocks(x, y-1, z+(width/2), x+lenght, y-1, z+(width/2), block.WOOL.id, 0)
#store values
self.x = x
self.y = y
self.z = z
self.width = width
self.lenght = lenght
#create empty junctions & cars list
self.junctions = []
self.cars= []
def run(self):
#start up junctions
for junction in self.junctions:
#tell the junction to run in the background
junction.daemon
junction.start()
def stop(self):
#stop junctions
for junction in self.junctions: junction.stop()
#stop cars
for car in self.cars: car.stop()
#wait for junctions to stop
for junction in self.junctions: junction.join()
#wait for cars to stop
for car in self.cars: car.join()
def createJunction(self, posDownRoad, timeOpen, timeClosed):
#create junction at position down the road
junction = Junction(mc, self.x+posDownRoad, self.y, self.z, self.x+posDownRoad+self.width, self.y, self.z+self.width-1, timeOpen, timeClosed)
#add junction to collection
self.junctions.append(junction)
def startCar(self, direction):
#create car
car = Car(mc, self, direction)
#add car to collection
self.cars.append(car)
#tell car to run in background
car.daemon
car.start()
class Car(threading.Thread):
def __init__(self, mc, road, direction):
#store variables
self.mc = mc
self.road = road
self.direction = direction
#set x,y,z
#set z & x position, left or right side, top or bottom of road depending on direction
if direction == 1:
self.x = road.x
self.z = road.z + 1
if direction == -1:
self.x = road.x + road.lenght
self.z = road.z + road.width - 2
self.y = road.y
#setup threading
threading.Thread.__init__(self)
def run(self):
lenghtOfCar = 4
self.running = True
ableToMove = True
#find the end of the road, depending on which way the car is going
endOfRoad = self.road.x
if self.direction == 1: endOfRoad = endOfRoad + self.road.lenght
#loop until i meet the end of the road
while(self.x != endOfRoad and self.running == True):
#draw the car
if ableToMove: self.drawCar()
#sleep for a bit
time.sleep(0.5)
#move the car
#where will the car be moving too?
frontOfCar = self.x + self.direction
#if im going forwards add 3 to the x, as my car's x is at the back
if self.direction == 1: frontOfCar = frontOfCar + lenghtOfCar
ableToMove = True
#am I going to enter a junction which is closed?
for junction in self.road.junctions:
if self.direction == 1 and junction.x1 == frontOfCar and junction.open == False: ableToMove = False
if self.direction == -1 and junction.x2 == frontOfCar and junction.open == False: ableToMove = False
#am I going to hit another car?
for car in self.road.cars:
if self.direction == 1 and frontOfCar >= car.x and frontOfCar <= (car.x + lenghtOfCar) and self.z == car.z and car.running == True: ableToMove = False
if self.direction == -1 and frontOfCar <= (car.x + lenghtOfCar) and frontOfCar >= car.x and self.z == car.z and car.running == True: ableToMove = False
#clear car and add 1 to the car's position
if ableToMove:
self.clearCar()
self.x = self.x + self.direction
self.running = False
def stop(self):
self.running = False
def drawCar(self):
self.mc.setBlocks(self.x, self.y, self.z, self.x + 3, self.y + 2, self.z, block.IRON_BLOCK.id)
self.mc.setBlock(self.x, self.y, self.z, block.WOOL.id, 15)
self.mc.setBlock(self.x+3, self.y, self.z, block.WOOL.id, 15)
def clearCar(self):
self.mc.setBlocks(self.x, self.y, self.z, self.x + 3, self.y + 2, self.z, block.AIR.id)
class Junction(threading.Thread):
def __init__(self, mc, x1, y1, z1, x2, y2, z2, timeOpen, timeClosed):
#create junction
mc.setBlocks(x1,y1-1,z1,x2,y2-1,z2,block.BEDROCK.id)
#create lines
mc.setBlocks(x1,y1-1,z1,x2,y2-1,z1,block.WOOL.id,0)
mc.setBlocks(x2,y1-1,z1,x2,y2-1,z2,block.WOOL.id,0)
mc.setBlocks(x2,y1-1,z2,x1,y2-1,z2,block.WOOL.id,0)
mc.setBlocks(x1,y1-1,z2,x1,y2-1,z1,block.WOOL.id,0)
#create traffic lights
self.trafficLight1 = TrafficLight(mc, x1, y1, z1-1, -1)
self.trafficLight2 = TrafficLight(mc, x2, y2, z2+1, 1)
#set to open
self.openJunction()
#store times
self.timeOpen = timeOpen
self.timeClosed = timeClosed
#setup threading
threading.Thread.__init__(self)
#store variables
self.x1 = x1
self.y1 = y1
self.z1 = z1
self.x2 = x2
self.y2 = y2
self.z2 = z2
def run(self):
#start the Junction
self.running = True
while(self.running):
self.openJunction()
time.sleep(self.timeOpen)
self.closeJunction()
time.sleep(self.timeClosed)
def stop(self):
#stop the junction
self.running = False
def openJunction(self):
#set lights to go
light1 = self.trafficLight1.go()
light2 = self.trafficLight2.go()
#wait for lights to finish changing
light1.join()
light2.join()
#set open status to True
self.open = True
def closeJunction(self):
#set lights to stop
light1 = self.trafficLight1.stop()
light2 = self.trafficLight2.stop()
#wait for lights to finish changing
light1.join()
light2.join()
#set open status to False
self.open = False
class TrafficLight():
def __init__(self, mc, x, y, z, direction):
#build traffic light
# pole straight up
mc.setBlocks(x,y,z,x,y+5,z,block.IRON_BLOCK.id, 15)
# create 3 lights out of wool
# wool values (black - 15, red - 14, yellow - 4, green - 13)
# set all the lights to off (black)
mc.setBlock(x+direction,y+5,z,block.WOOL.id,15)
mc.setBlock(x+direction,y+4,z,block.WOOL.id,15)
mc.setBlock(x+direction,y+3,z,block.WOOL.id,15)
#set to stop
mc.setBlock(x+direction,y+5,z,block.WOOL.id,14)
#store x,y,z
self.x = x
self.y = y
self.z = z
#sotre direction
self.direction = direction
#store mc
self.mc = mc
def go(self):
thread = threading.Thread(target=self.setToGo)
thread.start()
return thread
def setToGo(self):
#set to stop, prepare
self.mc.setBlock(self.x+self.direction,self.y+5,self.z,block.WOOL.id,14)
self.mc.setBlock(self.x+self.direction,self.y+4,self.z,block.WOOL.id,4)
self.mc.setBlock(self.x+self.direction,self.y+3,self.z,block.WOOL.id,15)
time.sleep(0.5)
#set to go
self.mc.setBlock(self.x+self.direction,self.y+5,self.z,block.WOOL.id,15)
self.mc.setBlock(self.x+self.direction,self.y+4,self.z,block.WOOL.id,15)
self.mc.setBlock(self.x+self.direction,self.y+3,self.z,block.WOOL.id,13)
time.sleep(0.5)
def stop(self):
thread = threading.Thread(target=self.setToStop)
thread.start()
return thread
def setToStop(self):
#set to prepare
self.mc.setBlock(self.x+self.direction,self.y+5,self.z,block.WOOL.id,15)
self.mc.setBlock(self.x+self.direction,self.y+4,self.z,block.WOOL.id,4)
self.mc.setBlock(self.x+self.direction,self.y+3,self.z,block.WOOL.id,15)
time.sleep(0.5)
#set to stop
self.mc.setBlock(self.x+self.direction,self.y+5,self.z,block.WOOL.id,14)
self.mc.setBlock(self.x+self.direction,self.y+4,self.z,block.WOOL.id,15)
self.mc.setBlock(self.x+self.direction,self.y+3,self.z,block.WOOL.id,15)
time.sleep(0.5)
if __name__ == "__main__":
#MAIN PROGRAM
#Connect to minecraft
mc = minecraft.Minecraft.create()
#clear area
mc.setBlocks(-10,0,-10,60,50,10,block.AIR.id)
#put grass on the floor
mc.setBlocks(-10,-1,-10,60,-1,10,block.GRASS.id)
#create road
road = Road(mc, 0,0,0,9,50)
#create junction, 20 blocks down, open for 10 seconds, closed for 5
road.createJunction(20,10,5)
#start the road
road.run()
#loop until Ctrl C
try:
while(True):
#create a car in a random direction, unless its 0 then we wont create a car
direction = random.randint(-1, 1)
if direction != 0:
road.startCar(direction)
#sleep for a bit
time.sleep(3)
except KeyboardInterrupt:
print("stopped")
finally:
#stop everything
road.stop()
|
|
from datetime import datetime
import unittest
from pheme.util.config import Config
from pheme.util.pg_access import AlchemyAccess
from pheme.warehouse.tables import create_tables
from pheme.warehouse.tables import HL7_Dx
from pheme.warehouse.tables import HL7_Msh
from pheme.warehouse.tables import HL7_Nte
from pheme.warehouse.tables import HL7_Obr
from pheme.warehouse.tables import HL7_Obx
from pheme.warehouse.tables import HL7_RawMessage
from pheme.warehouse.tables import HL7_Spm
from pheme.warehouse.tables import HL7_Visit
def setup_module():
"""Create a fresh db (once) for all tests in this module"""
c = Config()
if c.get('general', 'in_production'): # pragma: no cover
raise RuntimeError("DO NOT run destructive test on production system")
cfg_value = lambda v: c.get('warehouse', v)
create_tables(cfg_value('create_table_user'),
cfg_value('create_table_password'),
cfg_value('database'),
enable_delete=True)
class testSqlAObjects(unittest.TestCase):
"""We should be able to create and work with objects
that are based on tables in the database
"""
def setUp(self):
c = Config()
cfg_value = lambda v: c.get('warehouse', v)
self.alchemy = AlchemyAccess(database=cfg_value('database'),
host='localhost',
user=cfg_value('database_user'),
password=cfg_value('database_password'))
self.session = self.alchemy.session
def tearDown(self):
# Purge the unittest hl7_msh and all related data
self.session.delete(self.msh)
self.session.commit()
self.alchemy.disconnect()
def testABuildTables(self):
"""We need to build dependent tables in the correct order.
"""
self.tHL7_Msh()
self.tHL7_RawMessage()
self.tHL7_Visit()
self.tHL7_Dx()
self.tHL7_Obr()
self.tHL7_Obx()
def tHL7_RawMessage(self):
"""Create an HL7_RawMessage object that is saved to the database"""
mess = HL7_RawMessage(hl7_raw_message_id=1,
message_control_id=u'control_id',
raw_data=u'some raw data')
#Add the new message to the session
self.session.add(mess)
self.session.commit()
query = self.session.query(HL7_RawMessage).\
filter(HL7_RawMessage.hl7_raw_message_id == 1)
self.assert_(query.count() == 1,
'The message we created was not found')
result = query.first()
#Check that the __repr__ is working as expected
self.assert_(result.__repr__() == '<HL7_RawMessage 1>',
'Message string invalid.\nExpected: '\
'<HL7_RawMessage 1>\nGot: %s' % result)
#Make sure all the fields came out as expected
self.assert_(result.hl7_raw_message_id == 1,
'hl7_raw_message_id invalid.\nExpected: '\
'1\nGot: %s' % result.hl7_raw_message_id)
self.assert_(result.message_control_id == 'control_id',
'message_control_id invalid.\nExpected: '\
'control_id\nGot: %s' % result.message_control_id)
self.assert_(result.raw_data == 'some raw data',
'raw_data invalid.\nExpected: some raw '\
'data\nGot: %s' % result.raw_data)
def tHL7_Msh(self):
"""Create an HL7_Msh object that is saved to the database"""
self.msh = HL7_Msh(hl7_msh_id=1,
message_control_id=u'control_id',
message_type=u'message type',
facility=u'facility',
message_datetime=datetime(2007, 01, 01),
batch_filename=u'183749382629734')
#Add the new msh to the session
self.session.add(self.msh)
self.session.commit()
query = self.session.query(HL7_Msh)
self.assert_(query.count() == 1,
'The msh we created was not found')
result = query.first()
#Check that the __repr__ is working as expected
self.assert_(result.__repr__() == '<HL7_Msh 1>',
'Message string invalid.\nExpected: '\
'<HL7_RawMessage 1>\nGot: %s' % result)
#Make sure all the fields came out as expected
self.assert_(result.hl7_msh_id == 1,
'hl7_msh_id invalid.\nExpected: 1\nGot: '\
'%s' % result.hl7_msh_id)
self.assert_(result.message_control_id == 'control_id',
'message_control_id invalid.\nExpected: '\
'control_id\nGot: %s' % result.message_control_id)
self.assert_(result.message_type == 'message type',
'message_type invalid.\nExpected: message '\
'type\nGot: %s' % result.message_type)
self.assert_(result.facility == 'facility',
'facility invalid.\nExpected: '\
'facility\nGot: %s' % result.facility)
self.assert_(result.message_datetime ==
datetime(2007, 01, 01, 0, 0),
'message_datetime invalid.\nExpected: '\
'2007-01-01 00:00:00\nGot: %s' % result.message_datetime)
self.assert_(result.batch_filename == '183749382629734',
'batch_filename invalid.\nExpected: '\
'183749382629734\nGot: %s' % result.batch_filename)
def tHL7_Visit(self):
"""Create an HL7_Visit object that is saved to the database"""
visit = HL7_Visit(hl7_visit_id=1,
visit_id=u'45',
patient_id=u'patient id',
zip=u'zip',
admit_datetime=datetime(2007, 01, 01),
gender=u'F',
dob=u'2001,01',
chief_complaint=u'Pain',
patient_class=u'1',
hl7_msh_id=1,
disposition='01',
state='WA',
admission_source='Emergency room',
assigned_patient_location='MVMGREF')
#Add the new msh to the session
self.session.add(visit)
self.session.commit()
query = self.session.query(HL7_Visit)
self.assert_(query.count() == 1,
'The visit we created was not found')
result = query.first()
#Check that the __repr__ is working as expected
self.assert_(result.__repr__() == '<HL7_Visit 1>',
'Message string invalid.\nExpected: '\
'<HL7_Visit 1>\nGot: %s' % result)
#Make sure all the fields came out as expected
self.assert_(result.hl7_visit_id == 1,
'hl7_visit_id invalid.\nExpected: '\
'1\nGot: %s' % result.hl7_visit_id)
self.assert_(result.visit_id == '45',
'visit_id invalid.\nExpected: 45\nGot: '\
'%s' % result.visit_id)
self.assert_(result.patient_id == 'patient id',
'patient_id invalid.\nExpected: patient '\
'id\nGot: %s' % result.patient_id)
self.assert_(result.zip == 'zip',
'zip invalid.\nExpected: zip\nGot: %s' % result.zip)
self.assert_(result.admit_datetime == datetime(2007, 01, 01),
'admit_datetime invalid.\nExpected: '\
'2007-01-01 00:00:00\nGot: %s' % result.admit_datetime)
self.assert_(result.gender == 'F',
'gender invalid.\nExpected: F\nGot: %s' % result.gender)
self.assert_(result.dob == '2001,01',
'dob invalid.\nExpected: 2001-01-10 '\
'00:00:00\nGot: %s' % result.dob)
self.assert_(result.chief_complaint == 'Pain',
'chief_complaint invalid.\nExpected: '\
'Pain\nGot: %s' % result.chief_complaint)
self.assert_(result.patient_class == '1',
'patient_class invalid.\nExpected: '\
'1\nGot: %s' % result.patient_class)
self.assert_(result.disposition == '01',
'disposition invalid.\nExpected: '\
'01\nGot: %s' % result.disposition)
self.assertEquals(result.state, 'WA')
self.assertEquals(result.admission_source, 'Emergency room')
self.assertEquals(result.assigned_patient_location, 'MVMGREF')
def tHL7_Dx(self):
"""Create an HL7_Dx object that is saved to the database"""
dx = HL7_Dx(hl7_dx_id=1,
dx_code=u'dx code',
dx_description=u'description',
dx_type=u'A',
hl7_msh_id=1)
#Add the new msh to the session
self.session.add(dx)
self.session.commit()
query = self.session.query(HL7_Dx)
self.assert_(query.count() == 1,
'The dx we created was not found')
result = query.first()
#Check that the __repr__ is working as expected
self.assert_(result.__repr__() == '<HL7_Dx 1>',
'Message string invalid.\nExpected: '\
'<HL7_Dx 1>\nGot: %s' % result)
self.assert_(result.hl7_dx_id == 1,
'hl7_dx_id invalid.\nExpected: 1\nGot: '\
'%s' % result.hl7_dx_id)
self.assert_(result.dx_code == 'dx code',
'dx_code invalid.\nExpected: dx code\nGot: '\
'%s' % result.dx_code)
self.assert_(result.dx_description == 'description',
'dx_description invalid.\nExpected: '\
'description\nGot: %s' % result.dx_description)
self.assert_(result.dx_type == 'A',
'dx_type invalid.\nExpected: A\nGot: %s' % result.dx_type)
def tHL7_Obr(self):
"""Create an HL7_Obr object that is saved to the database"""
dt = datetime.now()
obr = HL7_Obr(hl7_obr_id=1,
loinc_code=u'loinc code',
loinc_text=u'loinc text',
alt_text=u'alt text',
hl7_msh_id=1,
status='W',
report_datetime=dt,
specimen_source='NASAL')
#Add the new msh to the session
self.session.add(obr)
self.session.commit()
query = self.session.query(HL7_Obr)
self.assert_(query.count() == 1,
'The obr we created was not found')
result = query.first()
#Check that the __repr__ is working as expected
self.assert_(result.__repr__() == '<HL7_Obr 1>',
'Message string invalid.\nExpected: '\
'<HL7_Obr 1>\nGot: %s' % result)
self.assert_(result.hl7_obr_id == 1,
'hl7_obr_id invalid.\nExpected: 1\nGot: '\
'%s' % result.hl7_obr_id)
self.assert_(result.loinc_code == 'loinc code',
'loinc_code invalid.\nExpected: '\
'loinc code\nGot: %s' % result.loinc_code)
self.assert_(result.loinc_text == 'loinc text',
'loinc_text invalid.\nExpected: '\
'loinc text\nGot: %s' % result.loinc_text)
self.assert_(result.alt_text == 'alt text',
'alt text invalid.\nExpected: alt '\
'text\nGot: %s' % result.alt_text)
self.assertEquals(result.status, 'W')
self.assertEquals(result.report_datetime, dt)
self.assertEquals(result.specimen_source, 'NASAL')
def tHL7_Obx(self):
"""Create an HL7_Obx object that is saved to the database"""
obx = HL7_Obx(hl7_obx_id=1,
hl7_obr_id=1,
value_type='vt',
observation_id=u'observation id',
observation_text=u'observation text',
observation_result=u'observation result',
units=u'units',
result_status=u'result status',
observation_datetime=datetime(2001, 1, 1),
hl7_msh_id=1,
performing_lab_code='SHMC')
#Add the new msh to the session
self.session.add(obx)
self.session.commit()
query = self.session.query(HL7_Obx)
self.assert_(query.count() == 1,
'The obx we created was not found')
result = query.first()
#Check that the __repr__ is working as expected
self.assert_(result.__repr__() == '<HL7_Obx 1>',
'Message string invalid.\nExpected: '\
'<HL7_Obx 1>\nGot: %s' % result)
self.assert_(result.hl7_obx_id == 1,
'hl7_obx_id invalid.\nExpected: '\
'1\nGot: %s' % result.hl7_obx_id)
self.assert_(result.hl7_obr_id == 1,
'hl7_obr_id invalid.\nExpected: '\
'1\nGot: %s' % result.hl7_obr_id)
self.assert_(result.value_type.strip() == 'vt',
'value_type invalid.\nExpected: '\
'vt\nGot: %s' % result.value_type)
self.assert_(result.observation_text == 'observation text',
'observation_text invalid.\nExpected: '\
'observation text\nGot: %s' % result.observation_text)
self.assert_(result.observation_result == 'observation result',
'observation_result invalid.\nExpected: '\
'observation result\nGot: %s' % result.observation_result)
self.assert_(result.units == 'units',
'units invalid.\nExpected: units\nGot: %s'
% result.units)
self.assert_(result.result_status == 'result status',
'result_status invalid.\nExpected: result '\
'status\nGot: %s' % result.result_status)
self.assert_(result.observation_datetime == datetime(2001, 1, 1),
'observation_datetime invalid.\nExpected: '\
'2001-01-01 00:00:00\nGot: %s' %
result.observation_datetime)
self.assertEquals(result.performing_lab_code, 'SHMC')
def testObxRelation(self):
"Use sqlalchemy relations for automated obx/obr relations "
# Need an HL7_Msh for foreign key constraint conformance
self.msh = HL7_Msh(hl7_msh_id=1,
message_control_id=u'control_id',
message_type=u'message type',
facility=u'facility',
message_datetime=datetime(2007, 01, 01),
batch_filename=u'183749382629734')
obr = HL7_Obr(loinc_code=u'loinc code',
loinc_text=u'loinc text',
alt_text=u'alt text',
hl7_msh_id=self.msh.hl7_msh_id)
obx = HL7_Obx(value_type='vt',
observation_id=u'observation id',
observation_text=u'observation text',
observation_result=u'observation result',
units=u'units',
result_status=u'result status',
observation_datetime=datetime(2001, 1, 1),
hl7_msh_id=self.msh.hl7_msh_id)
obr.obxes.append(obx)
self.session.add(self.msh)
self.session.commit()
self.session.add(obr)
self.session.commit()
# See if the commit cascaded. If so, the obx will have a
# valid pk and the obr foreign key set.
self.assertEquals(obr.hl7_obr_id, obx.hl7_obr_id)
# Now query for the obr, see if the obx is in tow.
roundTripObr = self.session.query(HL7_Obr).one()
self.assertTrue(roundTripObr.hl7_obr_id > 0)
self.assertEquals(type(roundTripObr.obxes[0]), type(obx))
self.assertEquals(roundTripObr.obxes[0], obx)
def testNte(self):
"""Test HL7_Nte table access """
self.msh = HL7_Msh(hl7_msh_id=1,
message_control_id=u'control_id',
message_type=u'message type',
facility=u'facility',
message_datetime=datetime(2007, 01, 01),
batch_filename=u'183749382629734')
self.session.add(self.msh)
self.session.commit()
obr = HL7_Obr(hl7_obr_id=1,
loinc_code=u'loinc code',
loinc_text=u'loinc text',
alt_text=u'alt text',
hl7_msh_id=1,
status='W',
report_datetime=datetime.now(),
specimen_source='NASAL')
self.session.add(obr)
self.session.commit()
obx = HL7_Obx(hl7_obx_id=1,
hl7_obr_id=1,
value_type='vt',
observation_id=u'observation id',
observation_text=u'observation text',
observation_result=u'observation result',
units=u'units',
result_status=u'result status',
observation_datetime=datetime(2001, 1, 1),
hl7_msh_id=1,
performing_lab_code=u'SHMC',
sequence=u'1.1',)
self.session.add(obx)
self.session.commit()
note = HL7_Nte(sequence_number=1,
note='fascinating unittest note',
hl7_obx_id=1)
self.session.add(note)
self.session.commit()
query = self.session.query(HL7_Nte)
self.assertEquals(query.count(), 1)
self.assertEquals(query.one().note,
'fascinating unittest note')
self.assertEquals(query.one().sequence_number, 1)
def testSpecimenSource(self):
"""Test HL7_Spm table access """
self.msh = HL7_Msh(hl7_msh_id=1,
message_control_id=u'control_id',
message_type=u'message type',
facility=u'facility',
message_datetime=datetime(2007, 01, 01),
batch_filename=u'183749382629734')
self.session.add(self.msh)
self.session.commit()
obr = HL7_Obr(hl7_obr_id=1,
loinc_code=u'loinc code',
loinc_text=u'loinc text',
alt_text=u'alt text',
hl7_msh_id=1,
status='W',
report_datetime=datetime.now(),
specimen_source='NASAL')
self.session.add(obr)
self.session.commit()
spm = HL7_Spm(id='123', description="your belly",
code='bly', hl7_obr_id=1)
self.session.add(spm)
self.session.commit()
query = self.session.query(HL7_Spm)
self.assertEquals(query.count(), 1)
self.assertEquals(query.one().description, 'your belly')
self.assertEquals(query.one().code, 'bly')
if '__main__' == __name__: # pragma: no cover
unittest.main()
|
|
from ..Qt import QtGui, QtCore, QtWidgets, USE_PYSIDE
if not USE_PYSIDE:
import sip
from .. import multiprocess as mp
from .GraphicsView import GraphicsView
from .. import CONFIG_OPTIONS
import numpy as np
import mmap, tempfile, ctypes, atexit, sys, random
__all__ = ['RemoteGraphicsView']
class RemoteGraphicsView(QtWidgets.QWidget):
"""
Replacement for GraphicsView that does all scene management and rendering on a remote process,
while displaying on the local widget.
GraphicsItems must be created by proxy to the remote process.
"""
def __init__(self, parent=None, *args, **kwds):
"""
The keyword arguments 'useOpenGL' and 'backgound', if specified, are passed to the remote
GraphicsView.__init__(). All other keyword arguments are passed to multiprocess.QtProcess.__init__().
"""
self._img = None
self._imgReq = None
self._sizeHint = (640,480) ## no clue why this is needed, but it seems to be the default sizeHint for GraphicsView.
## without it, the widget will not compete for space against another GraphicsView.
QtWidgets.QWidget.__init__(self)
# separate local keyword arguments from remote.
remoteKwds = {}
for kwd in ['useOpenGL', 'background']:
if kwd in kwds:
remoteKwds[kwd] = kwds.pop(kwd)
self._proc = mp.QtProcess(**kwds)
self.pg = self._proc._import('pyqtgraph')
self.pg.setConfigOptions(**CONFIG_OPTIONS)
rpgRemote = self._proc._import('pyqtgraph.widgets.RemoteGraphicsView')
self._view = rpgRemote.Renderer(*args, **remoteKwds)
self._view._setProxyOptions(deferGetattr=True)
self.setFocusPolicy(QtCore.Qt.StrongFocus)
self.setSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
self.setMouseTracking(True)
self.shm = None
shmFileName = self._view.shmFileName()
if sys.platform.startswith('win'):
self.shmtag = shmFileName
else:
self.shmFile = open(shmFileName, 'r')
self._view.sceneRendered.connect(mp.proxy(self.remoteSceneChanged)) #, callSync='off'))
## Note: we need synchronous signals
## even though there is no return value--
## this informs the renderer that it is
## safe to begin rendering again.
for method in ['scene', 'setCentralItem']:
setattr(self, method, getattr(self._view, method))
def resizeEvent(self, ev):
ret = QtWidgets.QWidget.resizeEvent(self, ev)
self._view.resize(self.size(), _callSync='off')
return ret
def sizeHint(self):
return QtCore.QSize(*self._sizeHint)
def remoteSceneChanged(self, data):
w, h, size, newfile = data
#self._sizeHint = (whint, hhint)
if self.shm is None or self.shm.size != size:
if self.shm is not None:
self.shm.close()
if sys.platform.startswith('win'):
self.shmtag = newfile ## on windows, we create a new tag for every resize
self.shm = mmap.mmap(-1, size, self.shmtag) ## can't use tmpfile on windows because the file can only be opened once.
else:
self.shm = mmap.mmap(self.shmFile.fileno(), size, mmap.MAP_SHARED, mmap.PROT_READ)
self.shm.seek(0)
data = self.shm.read(w*h*4)
self._img = QtGui.QImage(data, w, h, QtGui.QImage.Format_ARGB32)
self._img.data = data # data must be kept alive or PySide 1.2.1 (and probably earlier) will crash.
self.update()
def paintEvent(self, ev):
if self._img is None:
return
p = QtGui.QPainter(self)
p.drawImage(self.rect(), self._img, QtCore.QRect(0, 0, self._img.width(), self._img.height()))
p.end()
def mousePressEvent(self, ev):
self._view.mousePressEvent(int(ev.type()), ev.pos(), ev.globalPos(), int(ev.button()), int(ev.buttons()), int(ev.modifiers()), _callSync='off')
ev.accept()
return QtWidgets.QWidget.mousePressEvent(self, ev)
def mouseReleaseEvent(self, ev):
self._view.mouseReleaseEvent(int(ev.type()), ev.pos(), ev.globalPos(), int(ev.button()), int(ev.buttons()), int(ev.modifiers()), _callSync='off')
ev.accept()
return QtWidgets.QWidget.mouseReleaseEvent(self, ev)
def mouseMoveEvent(self, ev):
self._view.mouseMoveEvent(int(ev.type()), ev.pos(), ev.globalPos(), int(ev.button()), int(ev.buttons()), int(ev.modifiers()), _callSync='off')
ev.accept()
return QtWidgets.QWidget.mouseMoveEvent(self, ev)
def wheelEvent(self, ev):
self._view.wheelEvent(ev.pos(), ev.globalPos(), ev.angleDelta().y(), int(ev.buttons()), int(ev.modifiers()), int(ev.orientation()), _callSync='off')
ev.accept()
return QtWidgets.QWidget.wheelEvent(self, ev)
def keyEvent(self, ev):
if self._view.keyEvent(int(ev.type()), int(ev.modifiers()), text, autorep, count):
ev.accept()
return QtWidgets.QWidget.keyEvent(self, ev)
def enterEvent(self, ev):
self._view.enterEvent(int(ev.type()), _callSync='off')
return QtWidgets.QWidget.enterEvent(self, ev)
def leaveEvent(self, ev):
self._view.leaveEvent(int(ev.type()), _callSync='off')
return QtWidgets.QWidget.leaveEvent(self, ev)
def remoteProcess(self):
"""Return the remote process handle. (see multiprocess.remoteproxy.RemoteEventHandler)"""
return self._proc
def close(self):
"""Close the remote process. After this call, the widget will no longer be updated."""
self._proc.close()
class Renderer(GraphicsView):
## Created by the remote process to handle render requests
sceneRendered = QtCore.Signal(object)
def __init__(self, *args, **kwds):
## Create shared memory for rendered image
#pg.dbg(namespace={'r': self})
if sys.platform.startswith('win'):
self.shmtag = "pyqtgraph_shmem_" + ''.join([chr((random.getrandbits(20)%25) + 97) for i in range(20)])
self.shm = mmap.mmap(-1, mmap.PAGESIZE, self.shmtag) # use anonymous mmap on windows
else:
self.shmFile = tempfile.NamedTemporaryFile(prefix='pyqtgraph_shmem_')
self.shmFile.write(b'\x00' * (mmap.PAGESIZE+1))
fd = self.shmFile.fileno()
self.shm = mmap.mmap(fd, mmap.PAGESIZE, mmap.MAP_SHARED, mmap.PROT_WRITE)
atexit.register(self.close)
GraphicsView.__init__(self, *args, **kwds)
self.scene().changed.connect(self.update)
self.img = None
self.renderTimer = QtCore.QTimer()
self.renderTimer.timeout.connect(self.renderView)
self.renderTimer.start(16)
def close(self):
self.shm.close()
if not sys.platform.startswith('win'):
self.shmFile.close()
def shmFileName(self):
if sys.platform.startswith('win'):
return self.shmtag
else:
return self.shmFile.name
def update(self):
self.img = None
return GraphicsView.update(self)
def resize(self, size):
oldSize = self.size()
GraphicsView.resize(self, size)
self.resizeEvent(QtGui.QResizeEvent(size, oldSize))
self.update()
def renderView(self):
if self.img is None:
## make sure shm is large enough and get its address
if self.width() == 0 or self.height() == 0:
return
size = self.width() * self.height() * 4
if size > self.shm.size():
if sys.platform.startswith('win'):
## windows says "WindowsError: [Error 87] the parameter is incorrect" if we try to resize the mmap
self.shm.close()
## it also says (sometimes) 'access is denied' if we try to reuse the tag.
self.shmtag = "pyqtgraph_shmem_" + ''.join([chr((random.getrandbits(20)%25) + 97) for i in range(20)])
self.shm = mmap.mmap(-1, size, self.shmtag)
else:
self.shm.resize(size)
## render the scene directly to shared memory
if USE_PYSIDE:
ch = ctypes.c_char.from_buffer(self.shm, 0)
#ch = ctypes.c_char_p(address)
self.img = QtGui.QImage(ch, self.width(), self.height(), QtGui.QImage.Format_ARGB32)
else:
address = ctypes.addressof(ctypes.c_char.from_buffer(self.shm, 0))
# different versions of pyqt have different requirements here..
try:
self.img = QtGui.QImage(sip.voidptr(address), self.width(), self.height(), QtGui.QImage.Format_ARGB32)
except TypeError:
try:
self.img = QtGui.QImage(memoryview(buffer(self.shm)), self.width(), self.height(), QtGui.QImage.Format_ARGB32)
except TypeError:
# Works on PyQt 4.9.6
self.img = QtGui.QImage(address, self.width(), self.height(), QtGui.QImage.Format_ARGB32)
self.img.fill(0xffffffff)
p = QtGui.QPainter(self.img)
self.render(p, self.viewRect(), self.rect())
p.end()
self.sceneRendered.emit((self.width(), self.height(), self.shm.size(), self.shmFileName()))
def mousePressEvent(self, typ, pos, gpos, btn, btns, mods):
typ = QtCore.QEvent.Type(typ)
btn = QtCore.Qt.MouseButton(btn)
btns = QtCore.Qt.MouseButtons(btns)
mods = QtCore.Qt.KeyboardModifiers(mods)
return GraphicsView.mousePressEvent(self, QtGui.QMouseEvent(typ, pos, gpos, btn, btns, mods))
def mouseMoveEvent(self, typ, pos, gpos, btn, btns, mods):
typ = QtCore.QEvent.Type(typ)
btn = QtCore.Qt.MouseButton(btn)
btns = QtCore.Qt.MouseButtons(btns)
mods = QtCore.Qt.KeyboardModifiers(mods)
return GraphicsView.mouseMoveEvent(self, QtGui.QMouseEvent(typ, pos, gpos, btn, btns, mods))
def mouseReleaseEvent(self, typ, pos, gpos, btn, btns, mods):
typ = QtCore.QEvent.Type(typ)
btn = QtCore.Qt.MouseButton(btn)
btns = QtCore.Qt.MouseButtons(btns)
mods = QtCore.Qt.KeyboardModifiers(mods)
return GraphicsView.mouseReleaseEvent(self, QtGui.QMouseEvent(typ, pos, gpos, btn, btns, mods))
def wheelEvent(self, pos, gpos, d, btns, mods, ori):
btns = QtCore.Qt.MouseButtons(btns)
mods = QtCore.Qt.KeyboardModifiers(mods)
ori = (None, QtCore.Qt.Horizontal, QtCore.Qt.Vertical)[ori]
return GraphicsView.wheelEvent(self, QtGui.QWheelEvent(pos, gpos, d, btns, mods, ori))
def keyEvent(self, typ, mods, text, autorep, count):
typ = QtCore.QEvent.Type(typ)
mods = QtCore.Qt.KeyboardModifiers(mods)
GraphicsView.keyEvent(self, QtGui.QKeyEvent(typ, mods, text, autorep, count))
return ev.accepted()
def enterEvent(self, typ):
ev = QtCore.QEvent(QtCore.QEvent.Type(typ))
return GraphicsView.enterEvent(self, ev)
def leaveEvent(self, typ):
ev = QtCore.QEvent(QtCore.QEvent.Type(typ))
return GraphicsView.leaveEvent(self, ev)
|
|
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import math
from . import Command
from ..benchmarks import Benchmarks
from ..console import log
from ..machine import Machine
from ..repo import get_repo
from .. import util
from .setup import Setup
from . import common_args
def draw_graph(lo, mid, hi, total):
nchars = 60
scale = float(nchars) / total
graph = ['-'] * nchars
graph[int(lo * scale)] = '<'
graph[int(hi * scale)] = '>'
graph[int(mid * scale)] = 'O'
return ''.join(graph)
class Find(Command):
@classmethod
def setup_arguments(cls, subparsers):
parser = subparsers.add_parser(
"find", help="Find commits that introduced large regressions",
description="""Adaptively searches a range of commits for
one that produces a large regression. This only works well
when the regression in the range is mostly monotonic.""")
parser.add_argument(
'range', type=str, metavar=('from..to',),
help="""Range of commits to search. For a git
repository, this is passed as the first argument to ``git
log``. See 'specifying ranges' section of the
`gitrevisions` manpage for more info.""")
parser.add_argument(
"bench", type=str, metavar=('benchmark_name',),
help="""Name of benchmark to use in search.""")
parser.add_argument(
"--invert", "-i", action="store_true",
help="""Search for a decrease in the benchmark value,
rather than an increase.""")
common_args.add_show_stderr(parser)
common_args.add_machine(parser)
parser.set_defaults(func=cls.run_from_args)
return parser
@classmethod
def run_from_conf_args(cls, conf, args, **kwargs):
return cls.run(
conf, args.range, args.bench,
invert=args.invert, show_stderr=args.show_stderr,
machine=args.machine, **kwargs
)
@classmethod
def run(cls, conf, range_spec, bench, invert=False, show_stderr=False,
machine=None, _machine_file=None):
# TODO: Allow for choosing an environment
params = {}
machine_params = Machine.load(
machine_name=machine,
_path=_machine_file,
interactive=True)
params.update(machine_params.__dict__)
machine_params.save(conf.results_dir)
repo = get_repo(conf)
repo.pull()
commit_hashes = repo.get_hashes_from_range(range_spec)[::-1]
if len(commit_hashes) == 0:
log.error("No commit hashes selected")
return 1
environments = Setup.run(conf=conf)
if len(environments) == 0:
log.error("No environments selected")
return 1
benchmarks = Benchmarks(conf, regex=bench)
if len(benchmarks) == 0:
log.error("'{0}' benchmark not found".format(bench))
return 1
elif len(benchmarks) > 1:
log.error("'{0}' matches more than one benchmark".format(bench))
return 1
steps = int(math.log(len(commit_hashes)) / math.log(2))
log.info(
"Running approximately {0} benchmarks within {1} commits".format(
steps, len(commit_hashes)))
env = environments[0]
results = [None] * len(commit_hashes)
def do_benchmark(i):
if results[i] is not None:
return results[i]
commit_hash = commit_hashes[i]
log.info(
"For {0} commit hash {1}:".format(
conf.project, commit_hash[:8]))
env.install_project(conf, commit_hash)
x = benchmarks.run_benchmarks(
env, show_stderr=show_stderr)
result = list(x.values())[0]['result']
if isinstance(result, dict):
# parameterized results
result = result['result']
else:
# single value
result = [result]
results[i] = result
return results[i]
def non_null_results(*results):
"""
Whether some value is non-null in all result sets
"""
for values in zip(*results):
if all(x is not None for x in values):
return True
return False
def difference_3way(a, b, c):
"""
Return largest regression (a-b, b-c).
"""
results_ab = [0]
results_bc = [0]
for va, vb, vc in zip(a, b, c):
if va is not None and vb is not None and vc is not None:
denom = abs(va) + abs(vb) + abs(vc)
if denom == 0:
denom = 1.0
results_ab.append((va - vb) / denom)
results_bc.append((vb - vc) / denom)
return max(results_ab), max(results_bc)
def do_search(lo, hi):
if hi - lo <= 1:
return hi
mid = int(math.floor((hi - lo) / 2) + lo)
log.info(
"Testing {0}".format(
draw_graph(lo, mid, hi, len(commit_hashes))))
with log.indent():
lo_result = None
while lo_result is None:
lo_result = do_benchmark(lo)
if not non_null_results(lo_result):
lo_result = None
lo += 1
if lo >= mid:
raise util.UserError("Too many commits failed")
mid_result = None
while mid_result is None:
mid_result = do_benchmark(mid)
if not non_null_results(mid_result, lo_result):
mid_result = None
mid += 1
if mid >= hi:
raise util.UserError("Too many commits failed")
hi_result = None
while hi_result is None:
hi_result = do_benchmark(hi)
if not non_null_results(lo_result, mid_result, hi_result):
hi_result = None
hi -= 1
if hi <= mid:
raise util.UserError("Too many commits failed")
diff_b, diff_a = difference_3way(hi_result, mid_result, lo_result)
if invert:
diff_a *= -1.0
diff_b *= -1.0
if diff_a >= diff_b:
return do_search(lo, mid)
else:
return do_search(mid, hi)
result = do_search(0, len(commit_hashes) - 1)
log.info("Greatest regression found: {0}".format(commit_hashes[result][:8]))
return 0
|
|
# implements a factory to create codec instances for a given java charset
import codecs
from array import array
from functools import partial
from java.lang import StringBuilder
from java.nio import ByteBuffer, CharBuffer
from java.nio.charset import Charset, IllegalCharsetNameException
from StringIO import StringIO
python_to_java = {
'cp932': 'cp942',
'iso2022_jp': 'ISO-2022-JP',
'iso2022_jp_2': 'ISO-2022-JP-2',
'iso2022_kr': 'ISO-2022-KR',
'shift_jisx0213': 'x-SJIS_0213',
}
def _java_factory(encoding):
encoding = python_to_java.get(encoding, encoding)
supported = False
try:
supported = Charset.isSupported(encoding)
except IllegalCharsetNameException:
pass
if not supported:
return None, set()
charset = Charset.forName(encoding) # FIXME should we return this canonical name? could be best... TBD
entry = codecs.CodecInfo(
name=encoding,
encode=Codec(encoding).encode,
decode=Codec(encoding).decode,
incrementalencoder=partial(IncrementalEncoder, encoding=encoding),
incrementaldecoder=partial(IncrementalDecoder, encoding=encoding),
streamreader=partial(StreamReader, encoding=encoding),
streamwriter=partial(StreamWriter, encoding=encoding)
)
return entry, charset.aliases()
class Codec(object): # (codecs.Codec):
def __init__(self, encoding):
self.encoding = encoding
def decode(self, input, errors='strict', final=True):
error_function = codecs.lookup_error(errors)
input_buffer = ByteBuffer.wrap(array('b', input))
decoder = Charset.forName(self.encoding).newDecoder()
output_buffer = CharBuffer.allocate(min(max(int(len(input) / 2), 256), 1024))
builder = StringBuilder(int(decoder.averageCharsPerByte() * len(input)))
while True:
result = decoder.decode(input_buffer, output_buffer, False)
pos = output_buffer.position()
output_buffer.rewind()
builder.append(output_buffer.subSequence(0, pos))
if result.isUnderflow():
if final:
_process_incomplete_decode(self.encoding, input, error_function, input_buffer, builder)
break
_process_decode_errors(self.encoding, input, result, error_function, input_buffer, builder)
return builder.toString(), input_buffer.position()
def encode(self, input, errors='strict'):
error_function = codecs.lookup_error(errors)
# workaround non-BMP issues - need to get the exact count of chars, not codepoints
input_buffer = CharBuffer.allocate(StringBuilder(input).length())
input_buffer.put(input)
input_buffer.rewind()
encoder = Charset.forName(self.encoding).newEncoder()
output_buffer = ByteBuffer.allocate(min(max(len(input) * 2, 256), 1024))
builder = StringIO()
while True:
result = encoder.encode(input_buffer, output_buffer, True)
pos = output_buffer.position()
output_buffer.rewind()
builder.write(output_buffer.array()[0:pos].tostring())
if result.isUnderflow():
break
_process_encode_errors(self.encoding, input, result, error_function, input_buffer, builder)
return builder.getvalue(), len(input)
class NonfinalCodec(Codec):
def decode(self, input, errors='strict'):
return Codec.decode(self, input, errors, final=False)
class IncrementalEncoder(codecs.IncrementalEncoder):
def __init__(self, errors='strict', encoding=None):
assert encoding
self.encoding = encoding
self.errors = errors
self.encoder = Charset.forName(self.encoding).newEncoder()
self.output_buffer = ByteBuffer.allocate(1024)
def encode(self, input, final=False):
error_function = codecs.lookup_error(self.errors)
# workaround non-BMP issues - need to get the exact count of chars, not codepoints
input_buffer = CharBuffer.allocate(StringBuilder(input).length())
input_buffer.put(input)
input_buffer.rewind()
self.output_buffer.rewind()
builder = StringIO()
while True:
result = self.encoder.encode(input_buffer, self.output_buffer, final)
pos = self.output_buffer.position()
self.output_buffer.rewind()
builder.write(self.output_buffer.array()[0:pos].tostring())
if result.isUnderflow():
break
_process_encode_errors(self.encoding, input, result, error_function, input_buffer, builder)
return builder.getvalue()
class IncrementalDecoder(codecs.IncrementalDecoder):
def __init__(self, errors='strict', encoding=None,):
assert encoding
self.encoding = encoding
self.errors = errors
self.decoder = Charset.forName(self.encoding).newDecoder()
self.output_buffer = CharBuffer.allocate(1024)
self.buffer = ''
def decode(self, input, final=False):
error_function = codecs.lookup_error(self.errors)
input_array = array('b', self.buffer + str(input))
input_buffer = ByteBuffer.wrap(input_array)
builder = StringBuilder(int(self.decoder.averageCharsPerByte() * len(input)))
self.output_buffer.rewind()
while True:
result = self.decoder.decode(input_buffer, self.output_buffer, final)
pos = self.output_buffer.position()
self.output_buffer.rewind()
builder.append(self.output_buffer.subSequence(0, pos))
if result.isUnderflow():
if not final:
# Keep around any remaining input for next call to decode
self.buffer = input_array[input_buffer.position():input_buffer.limit()].tostring()
else:
_process_incomplete_decode(self.encoding, input, error_function, input_buffer, builder)
break
_process_decode_errors(self.encoding, input, result, error_function, input_buffer, builder)
return builder.toString()
def reset(self):
self.buffer = ""
def getstate(self):
return self.buffer or 0
def setstate(self, state):
self.buffer = state or ""
class StreamWriter(NonfinalCodec, codecs.StreamWriter):
def __init__(self, stream, errors='strict', encoding=None, ):
NonfinalCodec.__init__(self, encoding)
codecs.StreamWriter.__init__(self, stream, errors)
class StreamReader(NonfinalCodec, codecs.StreamReader):
def __init__(self, stream, errors='strict', encoding=None, ):
NonfinalCodec.__init__(self, encoding)
codecs.StreamReader.__init__(self, stream, errors)
def _process_decode_errors(encoding, input, result, error_function, input_buffer, builder):
if result.isError():
e = UnicodeDecodeError(
encoding,
input,
input_buffer.position(),
input_buffer.position() + result.length(),
'illegal multibyte sequence')
replacement, pos = error_function(e)
if not isinstance(replacement, unicode):
raise TypeError()
pos = int(pos)
if pos < 0:
pos = input_buffer.limit() + pos
if pos > input_buffer.limit():
raise IndexError()
builder.append(replacement)
input_buffer.position(pos)
def _process_incomplete_decode(encoding, input, error_function, input_buffer, builder):
if input_buffer.position() < input_buffer.limit():
e = UnicodeDecodeError(
encoding,
input,
input_buffer.position(),
input_buffer.limit(),
'illegal multibyte sequence')
replacement, pos = error_function(e)
if not isinstance(replacement, unicode):
raise TypeError()
pos = int(pos)
if pos < 0:
pos = input_buffer.limit() + pos
if pos > input_buffer.limit():
raise IndexError()
builder.append(replacement)
input_buffer.position(pos)
def _get_unicode(input_buffer, result):
return input_buffer.subSequence(0, result.length()).toString()
def _process_encode_errors(encoding, input, result, error_function, input_buffer, builder):
if result.isError():
e = UnicodeEncodeError(
encoding,
input,
input_buffer.position(),
input_buffer.position() + result.length(),
'illegal multibyte sequence')
replacement, pos = error_function(e)
if not isinstance(replacement, unicode):
raise TypeError()
pos = int(pos)
if pos < 0:
pos = input_buffer.limit() + pos
if pos > input_buffer.limit():
raise IndexError()
builder.write(str(replacement))
input_buffer.position(pos)
|
|
import unittest
import numpy as np
import numpy.testing as npt
from .. import nodes, rv
class TestDiscrete(unittest.TestCase):
def setUp(self):
self.x1 = nodes.VNode("x1", rv.Discrete)
self.x2 = nodes.VNode("x2", rv.Discrete)
self.rv1 = rv.Discrete([0.6, 0.4], self.x1)
self.rv2 = rv.Discrete([0.2, 0.8], self.x2)
self.rv3 = rv.Discrete([[0.1, 0.2],
[0.3, 0.4]], self.x1, self.x2)
def test_equality(self):
self.assertEqual(self.rv1, self.rv1)
self.assertNotEqual(self.rv1, self.rv2)
def test_initialization(self):
with self.assertRaises(rv.ParameterException):
x = nodes.VNode("x", rv.Discrete)
rv.Discrete([[0.1, 0.2]], x)
def test_string(self):
s = str(self.rv1)
self.assertEqual(s, '[ 0.6, 0.4]')
s = str(self.rv3)
self.assertEqual(s, '[[ 0.1, 0.2],\n [ 0.3, 0.4]]')
@unittest.skip("Test case is not implemented.")
def test_addition(self):
# ToDo: ...
pass
@unittest.skip("Test case is not implemented.")
def test_subtraction(self):
# ToDo: ...
pass
def test_multiplication_1D_1(self):
res = np.array([0.36, 0.16])
res /= np.sum(res)
mul = self.rv1 * self.rv1
mul = mul.normalize()
npt.assert_almost_equal(mul.pmf, res)
self.assertEqual(mul.dim, (self.x1,))
def test_multiplication_2D_1(self):
res = np.array([[0.06, 0.12],
[0.12, 0.16]])
res /= np.sum(res)
mul = self.rv1 * self.rv3
mul = mul.normalize()
npt.assert_almost_equal(mul.pmf, res)
self.assertEqual(mul.dim, (self.x1, self.x2))
def test_multiplication_2D_2(self):
res = np.array([[0.06, 0.12],
[0.12, 0.16]])
res /= np.sum(res)
mul = self.rv3 * self.rv1
mul = mul.normalize()
npt.assert_almost_equal(mul.pmf, res)
self.assertEqual(mul.dim, (self.x1, self.x2))
def test_multiplication_2D_3(self):
res = np.array([[0.02, 0.16],
[0.06, 0.32]])
res /= np.sum(res)
mul = self.rv2 * self.rv3
mul = mul.normalize()
npt.assert_almost_equal(mul.pmf, res)
self.assertEqual(mul.dim, (self.x1, self.x2))
def test_multiplication_2D_4(self):
res = np.array([[0.02, 0.16],
[0.06, 0.32]])
res /= np.sum(res)
mul = self.rv3 * self.rv2
mul = mul.normalize()
npt.assert_almost_equal(mul.pmf, res)
self.assertEqual(mul.dim, (self.x1, self.x2))
def test_unit_element_1D(self):
rv0 = rv.Discrete.unity(self.x1)
self.assertEqual(self.rv1 * rv0, self.rv1)
rv0 = rv.Discrete([1, 1], self.x1)
self.assertEqual(self.rv1 * rv0, self.rv1)
def test_unit_element_2D(self):
rv0 = rv.Discrete.unity(self.x1, self.x2)
self.assertEqual(self.rv3 * rv0, self.rv3)
rv0 = rv.Discrete([[1, 1],
[1, 1]], self.x1, self.x2)
self.assertEqual(self.rv3 * rv0, self.rv3)
def test_marginalize(self):
res = np.array([0.4, 0.6])
res /= np.sum(res)
marginalize = self.rv3.marginalize(self.x1)
npt.assert_almost_equal(marginalize.pmf, res)
res = np.array([0.3, 0.7])
res /= np.sum(res)
marginalize = self.rv3.marginalize(self.x2)
npt.assert_almost_equal(marginalize.pmf, res)
def test_maximize(self):
res = np.array([0.3, 0.4])
res /= np.sum(res)
amax = self.rv3.maximize(self.x1)
npt.assert_almost_equal(amax.pmf, res)
res = np.array([0.2, 0.4])
res /= np.sum(res)
amax = self.rv3.maximize(self.x2)
npt.assert_almost_equal(amax.pmf, res)
def test_argmax(self):
self.assertEqual(self.rv1.argmax(), (0,))
self.assertEqual(self.rv3.argmax(), (1, 1))
self.assertEqual(self.rv3.argmax(self.x1), (1,))
@unittest.skip("Test case is not implemented.")
def test_log(self):
pass
class TestGaussian(unittest.TestCase):
def setUp(self):
self.x1 = nodes.VNode("x1", rv.Gaussian)
self.x2 = nodes.VNode("x2", rv.Gaussian)
self.rv1 = rv.Gaussian([[1]], [[2]], self.x1)
self.rv2 = rv.Gaussian([[3]], [[4]], self.x1)
self.rv3 = rv.Gaussian([[1], [2]], [[3, 4], [5, 6]], self.x1, self.x2)
self.rv4 = rv.Gaussian([[1], [4]], [[2, 0], [0, 8]], self.x1, self.x2)
def test_equality(self):
self.assertEqual(self.rv1, self.rv1)
self.assertNotEqual(self.rv1, self.rv2)
def test_initialization(self):
mean = np.array([[1], [2]])
cov = np.array([[3, 4], [5, 6]])
npt.assert_almost_equal(self.rv3.mean, mean)
npt.assert_almost_equal(self.rv3.cov, cov)
tmp = rv.Gaussian.inf_form(np.linalg.inv(cov),
np.dot(np.linalg.inv(cov), mean),
self.x1, self.x2)
self.assertEqual(self.rv3, tmp)
def test_string(self):
s = str(self.rv1)
self.assertEqual(s, '[[ 1.]]\n[[ 2.]]')
s = str(self.rv3)
self.assertEqual(s, '[[ 1.],\n [ 2.]]\n[[ 3., 4.],\n [ 5., 6.]]')
def test_addition_1D_1(self):
add = self.rv1 + self.rv2
res = rv.Gaussian([[4]], [[6]], self.x1)
self.assertEqual(add, res)
add += self.rv1
res = rv.Gaussian([[5]], [[8]], self.x1)
self.assertEqual(add, res)
def test_subtraction_1D_1(self):
sub = self.rv1 - self.rv2
res = rv.Gaussian([[-2]], [[-2]], self.x1)
self.assertEqual(sub, res)
sub -= self.rv1
res = rv.Gaussian([[-3]], [[-4]], self.x1)
self.assertEqual(sub, res)
def test_multiplication_1D_1(self):
mul = self.rv2 * self.rv2
res = rv.Gaussian([[3]], [[2]], self.x1)
self.assertEqual(mul, res)
mul *= self.rv1
res = rv.Gaussian([[2]], [[1]], self.x1)
self.assertEqual(mul, res)
def test_unit_element_1D(self):
rv0 = rv.Gaussian.unity(self.x1)
self.assertEqual(self.rv1 * rv0, self.rv1)
rv0 = rv.Gaussian([[0]], [[np.Inf]], self.x1)
self.assertEqual(self.rv1 * rv0, self.rv1)
def test_unit_element_2D(self):
rv0 = rv.Gaussian.unity(self.x1, self.x2)
self.assertEqual(self.rv3 * rv0, self.rv3)
rv0 = rv.Gaussian([[0], [0]],
[[np.Inf, 0], [0, np.Inf]],
self.x1, self.x2)
self.assertEqual(self.rv3 * rv0, self.rv3)
def test_marginalize(self):
res = self.rv1
marginalize = self.rv4.marginalize(self.x2)
self.assertEqual(marginalize, res)
def test_maximize(self):
res = self.rv1
amax = self.rv1.maximize()
self.assertEqual(amax, res)
res = self.rv4
amax = self.rv4.maximize()
self.assertEqual(amax, res)
def test_argmax(self):
npt.assert_almost_equal(self.rv1.argmax(), self.rv1.mean)
npt.assert_almost_equal(self.rv3.argmax(), self.rv3.mean)
npt.assert_almost_equal(self.rv3.argmax(self.x2),
np.atleast_2d(self.rv3.mean[0]))
@unittest.skip("Test case is not implemented.")
def test_log(self):
pass
if __name__ == "__main__":
unittest.main()
|
|
# -*- coding: utf-8 -*-
"""
carrier.py
"""
from decimal import Decimal
from suds import WebFault
from suds.client import Client
from suds.plugin import MessagePlugin
from trytond.pool import PoolMeta, Pool
from trytond.model import fields, ModelView
from trytond.pyson import Eval
from trytond.wizard import Wizard, StateView, Button
from trytond.transaction import Transaction
from logbook import Logger
log = Logger('shipping_dhl_de')
__all__ = ['Carrier', 'TestConnectionStart', 'TestConnection']
__metaclass__ = PoolMeta
STATES = {
'required': Eval('carrier_cost_method') == 'dhl_de',
'invisible': Eval('carrier_cost_method') != 'dhl_de'
}
class FixPrefix(MessagePlugin):
"""
Suds client plugin to fix prefixes
"""
def marshalled(self, context):
shipment_dd = context.envelope.getChild(
'Body'
).getChild('CreateShipmentDDRequest')
shipment_dd.getChild('Version').setPrefix('ns0')
shipment_details = shipment_dd.getChild('ShipmentOrder') \
.getChild('Shipment').getChild('ShipmentDetails')
shipment_details.getChild('EKP').setPrefix('ns0')
shipment_details.getChild('Attendance').getChild('partnerID') \
.setPrefix('ns0')
class Carrier:
"Carrier"
__name__ = "carrier"
dhl_de_username = fields.Char(
'Username', states=STATES, depends=['carrier_cost_method'],
help="EntwickerID"
)
dhl_de_password = fields.Char(
'Password', states=STATES, depends=['carrier_cost_method'],
help="Application Token (Production)\nPortal Password(Staging)"
)
dhl_de_api_user = fields.Char(
'API User', states=STATES, depends=['carrier_cost_method'],
help="Intraship-User"
)
dhl_de_api_signature = fields.Char(
'API Signature', states=STATES, depends=['carrier_cost_method'],
help="IntrashipPasswort"
)
dhl_de_account_no = fields.Char(
'Account Number', states=STATES, depends=['carrier_cost_method'],
help="DHL Account Number (14 digit)"
)
dhl_de_environment = fields.Selection([
('sandbox', 'Testing & Development (Sandbox)'),
('production', 'Production'),
], 'Environment', states=STATES, depends=['carrier_cost_method'])
def __init__(self, *args, **kwargs):
super(Carrier, self).__init__(*args, **kwargs)
self._dhl_de_version = None
self._dhl_de_client = None
@classmethod
def view_attributes(cls):
return super(Carrier, cls).view_attributes() + [
('//page[@id="dhl_de_config"]', 'states', {
'invisible': ~(Eval('carrier_cost_method') == 'dhl_de')
})]
@classmethod
def __setup__(cls):
super(Carrier, cls).__setup__()
cls._error_messages.update({
'dhl_de_test_conn_error':
"Error while testing credentials from DHL DE: \n\n%s",
'dhl_de_label_error':
"Error while generating label from DHL DE: \n\n%s"
})
selection = ('dhl_de', 'DHL (DE)')
if selection not in cls.carrier_cost_method.selection:
cls.carrier_cost_method.selection.append(selection)
cls._buttons.update({
'test_dhl_de_credentials': {},
})
cls.dhl_de_wsdl_url = "https://cig.dhl.de/cig-wsdls/com/dpdhl/wsdl/geschaeftskundenversand-api/1.0/geschaeftskundenversand-api-1.0.wsdl" # noqa
@staticmethod
def default_dhl_de_environment():
return 'sandbox'
def get_dhl_de_client(self):
"""
Return the DHL DE client with the username and password set
"""
if self._dhl_de_client is None:
location = 'https://cig.dhl.de/services/sandbox/soap'
if self.dhl_de_environment == 'production': # pragma: no cover
location = 'https://cig.dhl.de/services/production/soap'
client = Client(
self.dhl_de_wsdl_url,
username=self.dhl_de_username,
password=self.dhl_de_password,
location=location,
)
self._dhl_de_client = client
return self._dhl_de_client
def get_dhl_de_version(self):
if self._dhl_de_version is None:
client = self.get_dhl_de_client()
self._dhl_de_version = client.service.getVersion()
return self._dhl_de_version
def send_dhl_de_create_shipment_shipment_dd(self, shipment_orders):
"""
Send ShipmentDD Request
"""
version = self.get_dhl_de_version()
client = self.get_dhl_de_client()
client.set_options(soapheaders=[{
'user': self.dhl_de_api_user,
'signature': self.dhl_de_api_signature,
'type': 0,
}], plugins=[FixPrefix()])
try:
response = client.service.createShipmentDD(version, shipment_orders)
except WebFault, exc: # pragma: no cover
log.debug(client.last_sent())
log.debug(client.last_received())
self.raise_user_error(
'dhl_de_label_error', error_args=(exc.message, )
)
return response
@classmethod
@ModelView.button_action('shipping_dhl_de.wizard_test_connection')
def test_dhl_de_credentials(cls, carriers):
"""
Tests the connection. If there is a WebFault, raises an UserError
"""
if len(carriers) != 1: # pragma: no cover
cls.raise_user_error('Only one carrier can be tested at a time.')
client = carriers[0].get_dhl_de_client()
try:
client.service.getVersion()
except WebFault, exc: # pragma: no cover
cls.raise_user_error(
'dhl_de_test_conn_error', error_args=(exc.message, )
)
except Exception, exc: # pragma: no cover
if exc.args and isinstance(exc.args[0], tuple):
status, reason = exc.args[0]
if status == 401:
cls.raise_user_error('Invalid Credentials')
cls.raise_user_error(
'Status: %s\nReason: %s' % exc.args[0]
)
raise
def get_sale_price(self):
"""Estimates the shipment rate for the current shipment
DHL DE dont provide and shipping cost, so here shipping_cost will be 0
returns a tuple of (value, currency_id)
:returns: A tuple of (value, currency_id which in this case is EUR)
"""
Currency = Pool().get('currency.currency')
Company = Pool().get('company.company')
if self.carrier_cost_method != 'dhl_de':
return super(Carrier, self).get_sale_price() # pragma: no cover
currency, = Currency.search([('code', '=', 'EUR')])
company = Transaction().context.get('company')
if company:
currency = Company(company).currency
return Decimal('0'), currency.id
class TestConnectionStart(ModelView):
"Test Connection"
__name__ = 'shipping_dhl_de.wizard_test_connection.start'
class TestConnection(Wizard):
"""
Test Connection Wizard
"""
__name__ = 'shipping_dhl_de.wizard_test_connection'
start = StateView(
'shipping_dhl_de.wizard_test_connection.start',
'shipping_dhl_de.wizard_test_connection_view_form',
[
Button('Ok', 'end', 'tryton-ok'),
]
)
|
|
# Copyright 2015 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Define API ManagedZones."""
import six
from google.api_core import page_iterator
from google.cloud._helpers import _rfc3339_to_datetime
from google.cloud.exceptions import NotFound
from google.cloud.dns.changes import Changes
from google.cloud.dns.resource_record_set import ResourceRecordSet
class ManagedZone(object):
"""ManagedZones are containers for DNS resource records.
See
https://cloud.google.com/dns/api/v1/managedZones
:type name: str
:param name: the name of the zone
:type dns_name: str
:param dns_name:
(Optional) the DNS name of the zone. If not passed, then calls to
:meth:`create` will fail.
:type client: :class:`google.cloud.dns.client.Client`
:param client: A client which holds credentials and project configuration
for the zone (which requires a project).
:type description: str
:param description:
(Optional) the description for the zone. If not passed, defaults to
the value of 'dns_name'.
"""
def __init__(self, name, dns_name=None, client=None, description=None):
self.name = name
self.dns_name = dns_name
self._client = client
self._properties = {}
if description is None:
description = dns_name
self.description = description
@classmethod
def from_api_repr(cls, resource, client):
"""Factory: construct a zone given its API representation
:type resource: dict
:param resource: zone resource representation returned from the API
:type client: :class:`google.cloud.dns.client.Client`
:param client: Client which holds credentials and project
configuration for the zone.
:rtype: :class:`google.cloud.dns.zone.ManagedZone`
:returns: Zone parsed from ``resource``.
"""
name = resource.get("name")
dns_name = resource.get("dnsName")
if name is None or dns_name is None:
raise KeyError(
"Resource lacks required identity information:" '["name"]["dnsName"]'
)
zone = cls(name, dns_name, client=client)
zone._set_properties(resource)
return zone
@property
def project(self):
"""Project bound to the zone.
:rtype: str
:returns: the project (derived from the client).
"""
return self._client.project
@property
def path(self):
"""URL path for the zone's APIs.
:rtype: str
:returns: the path based on project and dataste name.
"""
return "/projects/%s/managedZones/%s" % (self.project, self.name)
@property
def created(self):
"""Datetime at which the zone was created.
:rtype: ``datetime.datetime``, or ``NoneType``
:returns: the creation time (None until set from the server).
"""
return self._properties.get("creationTime")
@property
def name_servers(self):
"""Datetime at which the zone was created.
:rtype: list of strings, or ``NoneType``.
:returns: the assigned name servers (None until set from the server).
"""
return self._properties.get("nameServers")
@property
def zone_id(self):
"""ID for the zone resource.
:rtype: str, or ``NoneType``
:returns: the ID (None until set from the server).
"""
return self._properties.get("id")
@property
def description(self):
"""Description of the zone.
:rtype: str, or ``NoneType``
:returns: The description as set by the user, or None (the default).
"""
return self._properties.get("description")
@description.setter
def description(self, value):
"""Update description of the zone.
:type value: str
:param value: (Optional) new description
:raises: ValueError for invalid value types.
"""
if not isinstance(value, six.string_types) and value is not None:
raise ValueError("Pass a string, or None")
self._properties["description"] = value
@property
def name_server_set(self):
"""Named set of DNS name servers that all host the same ManagedZones.
Most users will leave this blank.
See
https://cloud.google.com/dns/api/v1/managedZones#nameServerSet
:rtype: str, or ``NoneType``
:returns: The name as set by the user, or None (the default).
"""
return self._properties.get("nameServerSet")
@name_server_set.setter
def name_server_set(self, value):
"""Update named set of DNS name servers.
:type value: str
:param value: (Optional) new title
:raises: ValueError for invalid value types.
"""
if not isinstance(value, six.string_types) and value is not None:
raise ValueError("Pass a string, or None")
self._properties["nameServerSet"] = value
def resource_record_set(self, name, record_type, ttl, rrdatas):
"""Construct a resource record set bound to this zone.
:type name: str
:param name: Name of the record set.
:type record_type: str
:param record_type: RR type
:type ttl: int
:param ttl: TTL for the RR, in seconds
:type rrdatas: list of string
:param rrdatas: resource data for the RR
:rtype: :class:`google.cloud.dns.resource_record_set.ResourceRecordSet`
:returns: a new ``ResourceRecordSet`` instance
"""
return ResourceRecordSet(name, record_type, ttl, rrdatas, zone=self)
def changes(self):
"""Construct a change set bound to this zone.
:rtype: :class:`google.cloud.dns.changes.Changes`
:returns: a new ``Changes`` instance
"""
return Changes(zone=self)
def _require_client(self, client):
"""Check client or verify over-ride.
:type client: :class:`google.cloud.dns.client.Client`
:param client:
(Optional) the client to use. If not passed, falls back to the
``client`` stored on the current zone.
:rtype: :class:`google.cloud.dns.client.Client`
:returns: The client passed in or the currently bound client.
"""
if client is None:
client = self._client
return client
def _set_properties(self, api_response):
"""Update properties from resource in body of ``api_response``
:type api_response: dict
:param api_response: response returned from an API call
"""
self._properties.clear()
cleaned = api_response.copy()
self.dns_name = cleaned.pop("dnsName", None)
if "creationTime" in cleaned:
cleaned["creationTime"] = _rfc3339_to_datetime(cleaned["creationTime"])
self._properties.update(cleaned)
def _build_resource(self):
"""Generate a resource for ``create`` or ``update``."""
resource = {"name": self.name}
if self.dns_name is not None:
resource["dnsName"] = self.dns_name
if self.description is not None:
resource["description"] = self.description
if self.name_server_set is not None:
resource["nameServerSet"] = self.name_server_set
return resource
def create(self, client=None):
"""API call: create the zone via a PUT request
See
https://cloud.google.com/dns/api/v1/managedZones/create
:type client: :class:`google.cloud.dns.client.Client`
:param client:
(Optional) the client to use. If not passed, falls back to the
``client`` stored on the current zone.
"""
client = self._require_client(client)
path = "/projects/%s/managedZones" % (self.project,)
api_response = client._connection.api_request(
method="POST", path=path, data=self._build_resource()
)
self._set_properties(api_response)
def exists(self, client=None):
"""API call: test for the existence of the zone via a GET request
See
https://cloud.google.com/dns/api/v1/managedZones/get
:type client: :class:`google.cloud.dns.client.Client`
:param client:
(Optional) the client to use. If not passed, falls back to the
``client`` stored on the current zone.
:rtype: bool
:returns: Boolean indicating existence of the managed zone.
"""
client = self._require_client(client)
try:
client._connection.api_request(
method="GET", path=self.path, query_params={"fields": "id"}
)
except NotFound:
return False
else:
return True
def reload(self, client=None):
"""API call: refresh zone properties via a GET request
See
https://cloud.google.com/dns/api/v1/managedZones/get
:type client: :class:`google.cloud.dns.client.Client`
:param client:
(Optional) the client to use. If not passed, falls back to the
``client`` stored on the current zone.
"""
client = self._require_client(client)
api_response = client._connection.api_request(method="GET", path=self.path)
self._set_properties(api_response)
def delete(self, client=None):
"""API call: delete the zone via a DELETE request
See
https://cloud.google.com/dns/api/v1/managedZones/delete
:type client: :class:`google.cloud.dns.client.Client`
:param client:
(Optional) the client to use. If not passed, falls back to the
``client`` stored on the current zone.
"""
client = self._require_client(client)
client._connection.api_request(method="DELETE", path=self.path)
def list_resource_record_sets(self, max_results=None, page_token=None, client=None):
"""List resource record sets for this zone.
See
https://cloud.google.com/dns/api/v1/resourceRecordSets/list
:type max_results: int
:param max_results: maximum number of zones to return, If not
passed, defaults to a value set by the API.
:type page_token: str
:param page_token: opaque marker for the next "page" of zones. If
not passed, the API will return the first page of
zones.
:type client: :class:`google.cloud.dns.client.Client`
:param client:
(Optional) the client to use. If not passed, falls back to the
``client`` stored on the current zone.
:rtype: :class:`~google.api_core.page_iterator.Iterator`
:returns: Iterator of :class:`~.resource_record_set.ResourceRecordSet`
belonging to this zone.
"""
client = self._require_client(client)
path = "/projects/%s/managedZones/%s/rrsets" % (self.project, self.name)
iterator = page_iterator.HTTPIterator(
client=client,
api_request=client._connection.api_request,
path=path,
item_to_value=_item_to_resource_record_set,
items_key="rrsets",
page_token=page_token,
max_results=max_results,
)
iterator.zone = self
return iterator
def list_changes(self, max_results=None, page_token=None, client=None):
"""List change sets for this zone.
See
https://cloud.google.com/dns/api/v1/resourceRecordSets/list
:type max_results: int
:param max_results: maximum number of zones to return, If not
passed, defaults to a value set by the API.
:type page_token: str
:param page_token: opaque marker for the next "page" of zones. If
not passed, the API will return the first page of
zones.
:type client: :class:`google.cloud.dns.client.Client`
:param client:
(Optional) the client to use. If not passed, falls back to the
``client`` stored on the current zone.
:rtype: :class:`~google.api_core.page_iterator.Iterator`
:returns: Iterator of :class:`~.changes.Changes`
belonging to this zone.
"""
client = self._require_client(client)
path = "/projects/%s/managedZones/%s/changes" % (self.project, self.name)
iterator = page_iterator.HTTPIterator(
client=client,
api_request=client._connection.api_request,
path=path,
item_to_value=_item_to_changes,
items_key="changes",
page_token=page_token,
max_results=max_results,
)
iterator.zone = self
return iterator
def _item_to_resource_record_set(iterator, resource):
"""Convert a JSON resource record set value to the native object.
:type iterator: :class:`~google.api_core.page_iterator.Iterator`
:param iterator: The iterator that has retrieved the item.
:type resource: dict
:param resource: An item to be converted to a resource record set.
:rtype: :class:`~.resource_record_set.ResourceRecordSet`
:returns: The next resource record set in the page.
"""
return ResourceRecordSet.from_api_repr(resource, iterator.zone)
def _item_to_changes(iterator, resource):
"""Convert a JSON "changes" value to the native object.
:type iterator: :class:`~google.api_core.page_iterator.Iterator`
:param iterator: The iterator that has retrieved the item.
:type resource: dict
:param resource: An item to be converted to a "changes".
:rtype: :class:`.Changes`
:returns: The next "changes" in the page.
"""
return Changes.from_api_repr(resource, iterator.zone)
|
|
import os
import re
import numpy
import datetime
import json
import logging
import netCDF4
import cmor
import cmor_utils
import cmor_source
import cmor_target
import cmor_task
import cdo
from ece2cmor3 import cdoapi
import Ngl
import warnings
# Logger object
log = logging.getLogger(__name__)
# Experiment name
exp_name_ = None
# Table root
table_root_ = None
# Files that are being processed in the current execution loop.
tm5_files_ = []
# Dictionary of tm5 grid type with cmor grid id.
dim_ids_ = {}
# List of depth axis ids with cmor grid id.
depth_axes_ = {}
# Dictionary of output frequencies with cmor time axis id.
time_axes_ = {}
# Dictionary of sea-ice output types, 1 by default...
type_axes_ = {}
ps_tasks = {}
time_axis_ids = {}
type_axis_ids = {}
depth_axis_ids = {}
zfactor_ids = {}
# Reference date, times will be converted to hours since refdate
ref_date_ = None
unit_miss_match =[]
failed = []
areacella_=0
# Default pressure levels if not provided (Pa)
plev19_ = numpy.array([100000., 92500., 85000., 70000., 60000., 50000., 40000., 30000., 25000., 20000., 15000., 10000., 7000., 5000., 3000., 2000., 1000., 500., 100.])
plev39_ = numpy.array([100000., 92500., 85000., 70000., 60000., 50000., 40000.,
30000., 25000., 20000., 17000., 15000., 13000., 11500.,
10000., 9000., 8000., 7000., 5000., 3000., 2000.,
1500., 1000., 700., 500., 300., 200., 150.,
100., 70., 50., 40., 30., 20., 15.,
10., 7., 5., 3.])
extra_axes = {"lambda550nm": {"ncdim": "lambda550nm",
"ncunits": "nm",
"ncvals": [550.0]}}
#
ignore_frequency=['subhrPt','3hrPt']
ps6hrpath_=None
#using_grid_=False
path_=None
# Initializes the processing loop.
def initialize(path,expname,tabledir, prefix,refdate):
"""initialize the cmorization for TM5
Description:
Input variables:
path, String: path to TM5 files
expname, string: name of the experiment
tabledir, string: path to tables
prefix, string: table prefix
Returns:
boolean: success
"""
global log,tm5_files_,exp_name_,table_root_,ref_date_,plev39_,plev19_,areacella_,path_
exp_name_ = expname
path_ = path
table_root_ =os.path.join(tabledir, prefix)
# select all TM5 files with expname from path
tm5_files_ = cmor_utils.find_tm5_output(path,expname)
if len(tm5_files_) == 0:
log.error('no TM5 varibles found, exiting!')
exit()
areacella_file = cmor_utils.find_tm5_output(path,expname,'areacella','fx')
if len(areacella_file) == 0:
log.error('Areacella not found!')
exit()
else:
areacella_=netCDF4.Dataset(areacella_file[0],'r').variables['areacella'][:]
cal = None
ref_date_ = refdate
# read pressure level definitions from CMIP6_coordante file
# and save globally
coordfile = os.path.join(tabledir, prefix + "_coordinate.json")
if os.path.exists(coordfile):
with open(coordfile) as f:
data = json.loads(f.read())
axis_entries = data.get("axis_entry", {})
axis_entries = {k.lower(): v for k, v in axis_entries.iteritems()}
plev19=numpy.array([numpy.float(value) for value in axis_entries['plev19']['requested']])
plev19_=plev19
plev39=numpy.array([numpy.float(value) for value in axis_entries['plev39']['requested']])
plev39_=plev39
else:
log.warning('Using default pressure level definitions')
cmor.load_table(table_root_ + "_grids.json")
return True
# Resets the module globals.
def finalize():
"""finalize, clear variables
Args:
none
Retruns:
none
"""
global tm5_files_,dim_ids_,depth_axes_,time_axes_,plev19_,plev39_
log.info( 'Unit miss match variables %s '%(unit_miss_match))
tm5_files_ = []
dim_ids_ = {}
depth_axes_ = {}
time_axes_ = {}
plev39_ = []
plev19_ = []
def set_freqid(freq):
"""set freqid for filenames
Args:
freq (string): set freqid depending on the table frequency
Returns:
freqid (string): Freqid for AERchemMIP data
"""
if freq=='monC':
freqid='AERmon'
elif freq=='1hr':
freqid='AERhr'
elif freq=='day':
freqid='AERday'
elif freq=='6hrPt':
freqid='AER6hr'
elif freq=='mon':
freqid='AERmon'
else:
log.error('unknown frequency %s'%freq)
return None
return freqid
def check_freqid(task):
""" Check if we freqid will be cmorized and fix teh freqid for special cases
Args:
task (cmor.task): task for which we are checking
Returns:
boolean: True if task will be cmorized
freqid (string): name of frequency in files
"""
global log
freqid=set_freqid(task.target.frequency)
if task.target.frequency=='monC':
if task.target.table=='Amon' and (task.target.variable=='pfull' or task.target.variable=='phalf'):
task.set_failed()
log.info('Variable %s in table %s will be produced by IFS'%(task.target.variable,task.target.table))
return False,None
elif task.target.frequency in ignore_frequency:
log.info('frequency %s ignored, no data prduced at this frequency'%task.target.frequency)
#continue
return False,None
elif task.target.table=='AERmonZ':
freqid=freqid+'Z'
elif freqid==None:
log.error('Frequency %s of variable %s is unkonwn'%(task.target.frequency,task.target.variable))
return False,None
return True,freqid
# Executes the processing loop.
def execute(tasks):
"""execute the cmorization tasks for TM5
Description:
Args:
tasks (list): list of tasks
Returns:
boolean: success
"""
global log,time_axes_,depth_axes_,table_root_,tm5_files_,areacella_,using_grid_,ps_tasks
log.info("Executing %d tm5 tasks..." % len(tasks))
log.info("Cmorizing tm5 tasks...")
#Assign file to each task
for task in tasks:
setattr(task,cmor_task.output_path_key,None)
if task.target.frequency=='fx':
log.info('fx frequency has no variables from TM5')
task.set_failed()
continue
elif task.target.frequency=='monC':
if 'Clim' in task.target.variable:
log.info('Variable %s in table %s is climatological variable and thus not available in TM5.'%(task.target.variable,task.target.table))
task.set_failed()
continue
elif task.target.table=='Amon' and (task.target.variable=='pfull' or task.target.variable=='phalf'):
task.set_failed()
log.info('Variable %s in table %s will be produced by IFS'%(task.target.variable,task.target.table))
continue
elif task.target.frequency in ignore_frequency:
log.info('frequency %s ignored, no data prduced at this frequency'%task.target.frequency)
continue
elif 'Clim' in task.target.variable:
log.infor("Climatological variables not supported")
task.set_failed()
continue
success,freqid=check_freqid(task)
if not success:
task.set_failed()
log.info('Frequency %s for task %s not available.'(task.target.frequency,task.target.variable))
continue
for fstr in tm5_files_:
# only select files which start with variable name and have _ behind (e.g. o3 .neq. o3loss)
# and freqid has _ behing (e.g. monZ .neq. mon)
# catch variablename + '_' to prevent o3 and o3loss mixing up...
if os.path.basename(fstr).startswith(task.source.variable()+"_") and freqid+'_' in fstr :
fname=fstr
if getattr(task,cmor_task.output_path_key) == None:
setattr(task,cmor_task.output_path_key,fstr)
else:
log.critical('Second file with same frequency and name. Currently supporting only one year per directory.')
log.critical(fstr)
exit(' Exiting ece2cmor.')
if not os.path.exists(fstr):
log.info('No path found for variable %s from TM5'%(task.target.variable))
task.set_failed()
continue
ps_tasks=get_ps_tasks(tasks)
#group the taks according to table
taskdict = cmor_utils.group(tasks,lambda t:t.target.table)
for table,tasklist in taskdict.iteritems():
try:
log.info("Loading CMOR table %s to process %d variables..." % (table,len(tasklist)))
tab_id = cmor.load_table("_".join([table_root_, table]) + ".json")
cmor.set_table(tab_id)
except Exception as e:
log.error("ERR -6: CMOR failed to load table %s, skipping variables %s. Reason: %s"
% (table, ','.join([tsk.target.variable for tsk in tasklist]), e.message))
continue
# #postprocess data to zonal mean and plev39, or do it before this point
if table == '3hr' :
for task in tasklist:
task.set_failed()
log.error("Table %s will not be implemented for TM5" %(table))
log.error("ERR -6: Skipping variable %s not implemented" %(task.target.variable))
continue
#postprocess data to zonal mean and plev39, or do it before this point
if table== 'Eday':
log.info("Table Eday not supported for variable %s "%(task.target.variable))
log.info("Creating longitude and latitude axes for table %s..." % table)
dim_ids_['lat']=create_lat()
dim_ids_['lon']=create_lon()
# create or assign time axes to tasks
log.info("Creating time axes for table %s..." % table)
#create_time_axes(tasklist)
time_axes_=create_time_axes(tasklist)#time_axes
taskmask = dict([t,False] for t in tasklist)
for task in tasklist:
#define task properties
#2D grid
if task.target.variable=='ch4Clim' or task.target.variable=='ch4globalClim' or task.target.variable=='o3Clim':
log.error('ERR -8: Task for %s is not produced in any of the simulations with TM5.'%task.target.variable)
task.set_failed()
continue
ncf=getattr(task,cmor_task.output_path_key)
tgtdims = getattr(task.target, cmor_target.dims_key).split()
if "latitude" in tgtdims and "longitude" in tgtdims:
setattr(task, 'lon', dim_ids_['lon'])
setattr(task, 'lat', dim_ids_['lat'])
#ZONAL
if "latitude" in tgtdims and not "longitude" in tgtdims:
setattr(task, "zonal", True)
if "site" in tgtdims:
log.critical('Z-dimension site not implemented ')
task.set_failed()
continue
if task.status==cmor_task.status_failed:
continue
create_depth_axes(task)
if 'lambda550nm' in tgtdims :
success=create_type_axes(task)
if not success:
log.error('Lambda 550nm could not be created, setting task failed')
task.set_failed()
continue
if(taskmask[task] ):
log.warning("Ignoring source variable in nc file %s, since it has already been cmorized." % ncf)
else:
if task.status not in [cmor_task.status_failed]:
log.info("Cmorizing source variable %s to target variable %s from file %s." % (task.source.variable(),task.target.variable,ncf))
execute_netcdf_task(task,tab_id)
if task.status<0:
if task.target.variable=='cdnc':
log.error("ERR -10: Cmorizing failed for %s, but variable is produced by IFS." % (task.target.variable))
elif task.target.variable=='o3Clim':
log.error("ERR -11: Cmorizing failed for %s, check tm5par.json since source will be o3 instead of %s." % (task.target.variable, task.source.variable()))
elif task.target.variable=='phalf':
log.error("ERR -11: Cmorizing failed for %s, but variable is produced by IFS." % (task.target.variable))
elif task.target.variable=='ch4Clim' or task.target.variable=='ch4global' or task.target.variable=='ch4globalClim':
log.error("ERR -12: Cmorizing failed for %s, check tm5par.json since source will be ch4 instead of %s." % (task.target.variable, task.source.variable()))
else:
log.error("ERR -13: Cmorizing failed for %s" % (task.target.variable))
else:
taskmask[task] = True
else:
log.info("Skipping variable %s for unknown reason..." % (task.source.variable()))
for task,executed in taskmask.iteritems():
if(not executed):
log.error("ERR -14: The source variable %s of target %s in table %s failed to cmorize" % (task.source.variable(),task.target.variable,task.target.table))
failed.append([task.target.variable,task.target.table])
if len(unit_miss_match)>0:
log.info('Unit problems: %s'% unit_miss_match)
if len(failed)>0:
for ifail in failed:
log.info('Cmorization failed for : %s'%ifail)
# Performs a single task.
def execute_netcdf_task(task,tableid):
"""excute task for netcdf data
Args:
task (cmor.task): task which will be handled
tableid (cmor.table): table which will have this task
Returns:
boolean: success of writing a variable
"""
global log,dim_ids_,depth_axes_,time_axes_,areacella_
interpolate_to_pressure=False
task.status = cmor_task.status_cmorizing
filepath = getattr(task, cmor_task.output_path_key, None)
if not filepath:
log.error("ERR -15: Could not find file containing data for variable %s in table %s" % (task.target.variable,task.target.table))
task.set_failed()
return
store_var = getattr(task, "store_with", None)
if( task.target.dims >= 3):
if ('lon' in dim_ids_ and 'lat' in dim_ids_):
axes = [dim_ids_['lat'],dim_ids_['lon']]
else:
dim_ids_['lat']=create_lat()
dim_ids_['lon']=create_lon()
axes=[dim_ids_['lat'],dim_ids_['lon']]
if hasattr(task, "z_axis_id"):
axes.append(getattr(task, "z_axis_id"))
checkaxes=getattr(task.target, cmor_target.dims_key).split()
if 'plev19' in checkaxes:
interpolate_to_pressure=True
elif'plev39' in checkaxes:
interpolate_to_pressure=True
# make explicit if just to check that all axes are there
elif 'alevel' in checkaxes:
interpolate_to_pressure=False
elif 'alevhalf' in checkaxes:
interpolate_to_pressure=False
else:
log.error('ERR -16: unknown dimension in z_axis_id')
else:
log.error('ERR -17: No z_axis_id found.')
elif ( task.target.dims == 2):
if task.target.table=='AERmonZ':
'''
2D Zonal lat+lev
'''
#cmor.load_table(table_root_ + "_coordinate.json")
if 'lat' in dim_ids_:
axes=[dim_ids_['lat']]
else:
dim_ids_['lat']=create_lat()
axes=[dim_ids_['lat']]
# zonal variables...
#needs lat only, no grid....
if hasattr(task, "z_axis_id"):
axes.append(getattr(task, "z_axis_id"))
if 'plev19' in getattr(task.target, cmor_target.dims_key).split():
interpolate_to_pressure=True
elif'plev39' in getattr(task.target, cmor_target.dims_key).split():
interpolate_to_pressure=True
elif not hasattr(task, "z_axis_id"):
'''
2D variables lon+lat
'''
if not ('lon' in dim_ids_ and 'lat' in dim_ids_):
dim_ids_['lat']=create_lat()
dim_ids_['lon']=create_lon()
axes=[dim_ids_['lat'],dim_ids_['lon']]
else:
axes = [dim_ids_['lat'],dim_ids_['lon']]
else:
log.error('ERR -18: unsupported 2D dimensions %s'%task.target.dims)
exit('Exiting!')
elif task.target.dims==0:
axes=[]
else:
log.error('ERR -19: unsupported dimensions %s for variable'%(task.target.dims,task.target.variable))
exit()
time_id = getattr(task, "time_axis", 0)
if time_id != 0:
axes.append(time_id)
for key in type_axes_:
if key[0]==task.target.table and key[1] in getattr(task.target, cmor_target.dims_key):
axes.append(type_axes_[key])
try:
dataset = netCDF4.Dataset(filepath, 'r')
except Exception as e:
log.error("ERR -20: Could not read netcdf file %s while cmorizing variable %s in table %s. Cause: %s" % (
filepath, task.target.variable, task.target.table, e.message))
return
varid = create_cmor_variable(task,dataset,axes)
if varid <0:
return False
## for pressure level variables we need to do interpolation, for which we need
## pyngl module
if interpolate_to_pressure:
psdata=get_ps_var(getattr(getattr(task,'ps_task',None),cmor_task.output_path_key,None))
pressure_levels=getattr(task,'pressure_levels')
ncvar=interpolate_plev(pressure_levels,dataset,psdata,task.source.variable())
ncvar[ncvar>1e20]=numpy.nan
else:
ncvar = dataset.variables[task.source.variable()]
# handle zonal vars
if task.target.table=='AERmonZ':
# assumption: data is shape [time,lat,lon] (roll longitude dimension
vals=numpy.copy(ncvar[:])
# zonal mean so mean over longitudes
with warnings.catch_warnings():
warnings.filterwarnings('ignore', '.*Mean of empty slice.*',)
vals=numpy.nanmean(vals,axis=-1)
# change shape, swap lat<->lev
vals=numpy.swapaxes(vals,1,2)
missval = getattr(task.target, cmor_target.missval_key, 1.e+20)
ncvar=vals.copy()
#handle global means
elif task.target.dims==0:
# global means
missval = getattr(task.target, cmor_target.missval_key, 1.e+20)
vals=numpy.copy(ncvar[:])
if task.target.variable=='co2mass':
# calculate area-weighted sum
vals=numpy.sum((vals*areacella_[numpy.newaxis,:,:]),axis=(1,2))
else:
# calculate area-weighted mean
vals=numpy.mean(vals,axis=(1))
vals=numpy.sum((vals*areacella_[numpy.newaxis,:,:]),axis=(1,2))/numpy.sum(areacella_)
ncvar=vals.copy()
#handle normal case
else:# assumption: data is shape [time,lat,lon] (we need to roll longitude dimension so that
# the data corresponds to the dimension definition of tables (from [-180 , 180] to [0,360] deg).)
# so by half the longitude dimension
missval = getattr(task.target, cmor_target.missval_key, 1.e+20)
vals=numpy.copy(ncvar[:])
dims = numpy.shape(vals)
nroll=dims[-1]/2
ncvar = numpy.roll(vals,nroll,len(dims)-1)
vals=numpy.copy(ncvar[:,:,:])
# Default values
factor = 1.0
term=0.0
timdim=0
#check for missing values
if numpy.isnan(ncvar).any():
nanmask=~numpy.isnan(ncvar)
else:
nanmask=None
# 3D variables need the surface pressure for calculating the pressure at model levels
if store_var:
#get the ps-data associated with this data
psdata=get_ps_var(getattr(getattr(task,'ps_task',None),cmor_task.output_path_key,None))
# roll psdata like the original
psdata=numpy.roll(psdata[:],nroll,len(numpy.shape(psdata[:]))-1)
cmor_utils.netcdf2cmor(varid, ncvar, timdim, factor, term, store_var, psdata,
swaplatlon=False, fliplat=True, mask=nanmask,missval=missval)
else:
cmor_utils.netcdf2cmor(varid, ncvar, timdim, factor, term, store_var, None,
swaplatlon=False, fliplat=True, mask=nanmask,missval=missval)
cmor.close(varid)
if store_var:
cmor.close(store_var)
task.status = cmor_task.status_cmorized
# Creates a variable in the cmor package
def create_cmor_variable(task,dataset,axes):
""" Create cmor variable object
Args:
task (cmor.task): task for which we are creating a variable object
dataset (netcdf-dataset): netcdf dataset containing the data for TM5 for this variable
axes (list): list of axes ids for creation of cmor.variable object
Returns:
cmor.variable object: object identifier of created variable
"""
srcvar = task.source.variable()
ncvar = dataset.variables[srcvar]
unit = getattr(ncvar,"units",None)
if unit != getattr(task.target,"units"):
if unit=='mole mole-1':
# files have mole mole-1 but should be mol mol-1
unit = getattr(task.target,"units")
elif srcvar=='toz'or srcvar=='tropoz':
# unit is just different
if unit=='DU':
setattr(task,cmor_task.conversion_key,1e-5)
unit = getattr(task.target,"units")
elif srcvar=='co2mass':
# co2mass gets converted to area sum
unit = getattr(task.target,"units")
else:
unit_miss_match.append(task.target.variable)
log.error("ERR -21: unit miss match, variable %s" % (task.target.variable))
return task.set_failed()
if((not unit) or hasattr(task,cmor_task.conversion_key)): # Explicit unit conversion
unit = getattr(task.target,"units")
if(hasattr(task.target,"positive") and len(task.target.positive) != 0):
return cmor.variable(table_entry = str(task.target.variable),units = str(unit),axis_ids = axes,original_name = str(srcvar),positive = "down")
else:
return cmor.variable(table_entry = str(task.target.variable),units = str(unit),axis_ids = axes,original_name = str(srcvar))
def interpolate_plev(pressure_levels,dataset,psdata,varname):
"""interpolate pressure levels
args:
pressure_levels (numpy array): output pressure levels
dataset(netcdf-dataset): input data for variable
psdata (numpy-array): data for pressure at surface
varname (string): name of variable for reading in the data
Returns:
interpolated_data (numpy-array): intepolated data in give pressure levels
"""
####
# Interpolate data from model levels to pressure levels
# pressure_levels defines the pressure levels
# Based on pyngl example:
# https://www.pyngl.ucar.edu/Examples/Scripts/vinth2p.py
####
# Reference pressure 1e5 Pa in TM5, here in hPa
p0mb=1000
# Vertical coordinate must be from top to bottom: [::-1]
hyam = dataset.variables["hyam"][:]
# Vertical interplation routine expects formula a*p0 + b*ps,
# TM5 has a + b*ps, change a-> a*p0 by dividing a by the reference in TM5 p0=1e5 (1000 mb / hPa)
hyam = hyam[::-1]/(100000)
# Vertical coordinate must be from top to bottom: [::-1]
hybm = dataset.variables["hybm"][:]
hybm = hybm[::-1]
# Vertical coordinate must be from top to bottom: [::-1]
data = dataset.variables[varname][:,:,:,:]
data = data[:,::-1,:,:]
interpolation=1 #1 linear, 2 log, 3 loglog
# divide pressure_levels by 100 to get in mb
interpolated_data = Ngl.vinth2p(data,hyam,hybm,pressure_levels/100,psdata[:,:,:],interpolation,p0mb,1,False)
return interpolated_data
# Creates time axes in cmor and attach the id's as attributes to the tasks
def create_time_axes(tasks):
""" Create time axes for all tasks
Args:
tasks (list): list of tasks for which time axes need to be created
Returns:
time_axes (dictionary): dictionary of time axes, with table+dimension combination as key
"""
global log#,time_axes_
time_axes = {}
for task in tasks:
freq=task.target.frequency
tgtdims = getattr(task.target, cmor_target.dims_key)
if getattr(task, cmor_task.output_path_key)==None:
continue
for time_dim in [d for d in list(set(tgtdims.split())) if d.startswith("time")]:
key=(task.target.table,time_dim)
if key in time_axes:
tid = time_axes[key]
else:
time_operator = getattr(task.target, "time_operator", ["point"])
log.info("Creating time axis using variable %s..." % task.target.variable)
tid = create_time_axis(path=getattr(task, cmor_task.output_path_key),
name=time_dim, has_bounds=(time_operator != ["point"]))
time_axes[key] = tid
setattr(task, "time_axis", tid)
break
return time_axes
# Creates a tie axis for the corresponding table (which is suppoed to be loaded)
def create_time_axis(path,name,has_bounds):
""" creage time axis for a give frequency
Args:
path (string): full path to netcdf file with this freq
name (string): tablename
has_bounds (boolean): true if it has bounds
Returns:
cmor.axis-object: time axis object with given freq
"""
global log,ref_date_
vals = None
units = None
ds = None
#
ncfile=path
refdate = ref_date_
try:
ds = netCDF4.Dataset(ncfile)
timvar = ds.variables["time"]
tm5unit = ds.variables["time"].units
vals = timvar[:]
units = getattr(timvar,"units")
if has_bounds:
bnds = getattr(timvar,"bounds")
bndvar = ds.variables[bnds]
except:
ds.close()
tm5refdate=datetime.datetime.strptime(tm5unit,"days since %Y-%m-%d %H:%M:%S")
# delta days for change of reftime
diff_days= (refdate-tm5refdate).total_seconds()/86400
vals=vals-diff_days
if has_bounds:
bndvar2=numpy.zeros_like(bndvar)
bndvar2[:,0]=bndvar[:,0]-int(diff_days)
bndvar2[:,1]=bndvar[:,1]-int(diff_days)
bndvar=bndvar2
# hourly bounds can have overlap by -1e-14, which is caught by cmor library as an error
# so just correct it always
for i in range(numpy.shape(bndvar2)[0]-1):
bndvar2[i+1,0]=bndvar2[i,1]
if(len(vals) == 0 or units == None):
log.error("ERR -22: No time values or units could be read from tm5 output files %s" % str(path))
return -1
units="days since " + str(ref_date_)
####
if has_bounds:
return cmor.axis(table_entry = str(name), units=units, coord_vals = vals,cell_bounds = bndvar[:,:])
else:
return cmor.axis(table_entry = str(name), units=units, coord_vals = vals)
def create_type_axes(task):
""" create type axes(lambda 550nm only for the moment)
Args:
task (cmor.task-object): task for which type axes will be created
Returns:
Boolean: if succesful creation
"""
global type_axes
table=task.target.table
key = (table,'lambda550nm')
if key not in type_axes_:
type_axes_[key] = {}
filepath= getattr(task,cmor_task.output_path_key)
log.info("Creating extra axes for table %s using file %s..." % (table, filepath))
table_type_axes = type_axes_[key]
tgtdims = set(getattr(task.target, cmor_target.dims_key).split()).intersection(extra_axes.keys())
for dim in tgtdims:
if dim == 'lambda550nm':
ncunits=extra_axes['lambda550nm']['ncunits']
ncvals=extra_axes['lambda550nm']['ncvals']
ax_id = cmor.axis(table_entry="lambda550nm", units=ncunits, coord_vals=ncvals)
setattr(task, "lambda_axis", ax_id)
type_axes_[key]=ax_id
else:
log.info("Unknown dimenstion %s in table %s." %(dim,table))
return False
return True
def create_depth_axes(task):
""" create depth axes
Args:
task (cmor.task-object): task for which the depth axes are created
Returns:
boolean: is creation successful or not
"""
global log_depth_axis_ids,zfactor_ids
tgtdims = getattr(task.target, cmor_target.dims_key)
# zdims all other than xy
# including lambda550nm...
zdims = getattr(task.target, "z_dims", [])
if len(zdims) == 0:
return False
if len(zdims) > 1:
log.error("ERR -23: Skipping variable %s in table %s with dimensions %s with multiple z-directions." % (
task.target.variable, task.target.table, tgtdims))
task.set_failed()
return False
zdim=str(zdims[0])
key = (task.target.table, zdim)
if key not in depth_axis_ids:
log.info("Creating vertical axis %s for table %s..." % (zdim,task.target.table))
if key in depth_axis_ids:
#setattr(task, "z_axis_id", depth_axis_ids[zdim])
if zdim == "alevel":
setattr(task, "z_axis_id", depth_axis_ids[key][0])
setattr(task, "store_with", depth_axis_ids[key][1])
elif zdim == "alevhalf":
setattr(task, "z_axis_id", depth_axis_ids[key][0])
setattr(task, "store_with", depth_axis_ids[key][1])
elif zdim == "plev19":
setattr(task, "z_axis_id", depth_axis_ids[key])
setattr(task, "pressure_levels", plev19_)
elif zdim == "plev39":
setattr(task, "z_axis_id", depth_axis_ids[key])
setattr(task, "pressure_levels", plev39_)
else:
setattr(task, "z_axis_id", depth_axis_ids[key])
return True
elif zdim == 'alevel':
log.info("Creating model full level axis for variable %s..." % task.target.variable)
axisid, psid = create_hybrid_level_axis(task)
depth_axis_ids[key] = (axisid, psid)
if key[0] not in zfactor_ids:
zfactor_ids[key[0]] =psid
setattr(task, "z_axis_id", axisid)
setattr(task, "store_with", psid)
return True
elif zdim == 'alevhalf':
#if zdim not in depth_axis_ids:
log.info("Creating model half level axis for variable %s..." % task.target.variable)
axisid, psid = create_hybrid_level_axis(task,'alevhalf')
depth_axis_ids[key] = (axisid, psid)
if key[0] not in zfactor_ids:
zfactor_ids[key[0]] =psid
setattr(task, "z_axis_id", axisid)
setattr(task, "store_with", psid)
return True
elif zdim=="lambda550nm":
log.info("Creating wavelength axis for variable %s..." % task.target.variable)
axisid=cmor.axis(table_entry = zdim,units ="nm" ,coord_vals = [550.0])
depth_axis_ids[key]=axisid
setattr(task, "z_axis_id", axisid)
return True
elif zdim=="plev19":
axisid=cmor.axis(table_entry = zdim,units ="Pa" ,coord_vals = plev19_)
depth_axis_ids[key]=axisid
setattr(task, "z_axis_id", axisid)
setattr(task, "pressure_levels", plev19_)
return True
elif zdim=="plev39":
axisid=cmor.axis(table_entry = zdim,units ="Pa" ,coord_vals = plev39_)
depth_axis_ids[key]=axisid
setattr(task, "z_axis_id", axisid)
setattr(task, "pressure_levels", plev39_)
return True
elif zdim=="site":
log.critical('Z-dimension %s will not be implemented.'%zdim)
return False
else:
log.critical("Z-dimension %s not found for variable %s..." % (zdim,task.target.variable))
return False
# Creates the hybrid model vertical axis in cmor.
def create_hybrid_level_axis(task,leveltype='alevel'):
"""Create hybrud levels
Args:
task (cmor.task-object): task for which levels are created
leveltype (string): which kind (alevel, alevhalf)
Returns:
axisid (cmor.axis-object): axis id for levels
storewith (cmor.zfactor-object): surface pressure field for saving into same file. needed for calculation of pressure on model levels.
"""
global time_axes_,store_with_ps_,dim_ids_,zfactor_ids
# define grid axes and time axis for hybrid levels
axes=[getattr(task, 'lat'), getattr(task, 'lon'), getattr(task, "time_axis")]
# define before hybrid factors, and have the same
# for
pref = 80000 # TODO: Move reference pressure level to model config
path = getattr(task, cmor_task.output_path_key)
ds = None
try:
ds = netCDF4.Dataset(path)
am = ds.variables["hyam"]
aunit = getattr(am, "units")
bm = ds.variables["hybm"]
bunit = getattr(bm, "units")
hcm = am[:] / pref + bm[:]
n = hcm.shape[0]
if "hyai" in ds.variables:
ai = ds.variables["hyai"]
abnds = numpy.empty([n, 2])
abnds[:, 0] = ai[0:n]
abnds[:, 1] = ai[1:n + 1]
bi = ds.variables["hybi"]
bbnds = numpy.empty([n, 2])
bbnds[:, 0] = bi[0:n]
bbnds[:, 1] = bi[1:n + 1]
hcbnds = abnds / pref + bbnds
hci = ai[:] / pref + bi[:]
n = hci.shape[0]
else:
log.critical("Interface values for hybrid levels not present!")
if leveltype=='alevel':
axisid = cmor.axis(table_entry="alternate_hybrid_sigma", coord_vals=hcm, cell_bounds=hcbnds, units="1")
cmor.zfactor(zaxis_id=axisid, zfactor_name="ap", units=str(aunit), axis_ids=[axisid], zfactor_values=am[:],
zfactor_bounds=abnds)
cmor.zfactor(zaxis_id=axisid, zfactor_name="b", units=str(bunit), axis_ids=[axisid], zfactor_values=bm[:],
zfactor_bounds=bbnds)
elif leveltype=='alevhalf':
axisid = cmor.axis(table_entry="alternate_hybrid_sigma_half", coord_vals=hci, units="1")
cmor.zfactor(zaxis_id=axisid, zfactor_name="ap_half", units=str(aunit), axis_ids=[axisid,], zfactor_values=ai[:])
cmor.zfactor(zaxis_id=axisid, zfactor_name="b_half", units=str(bunit), axis_ids=[axisid,], zfactor_values=bi[:])
# Use the same ps for both types of hybrid levels,
# for some reason defining their own confuses the cmor
# to check in case of Half levels, for the ps from full levels
if task.target.variable=='ec550aer':
psvarname='ps1'
else:
psvarname='ps'
#if depth_axis_ids[('task.table',leveltype)]!=None:
# setattr(task,'store_with',depth_axis_ids[('task.table',leveltype)])
if task.target.table not in zfactor_ids:
storewith = cmor.zfactor(zaxis_id=axisid, zfactor_name=psvarname,
axis_ids=axes, units="Pa")
setattr(task,'store_with',storewith)
else:
storewith=zfactor_ids[task.target.table]
return axisid, storewith
finally:
if ds is not None:
ds.close()
def create_lat():
"""Create latitude dimension
Args:
none
Returns:
lat_id (cmor.axis): cmor.axis-object
"""
yvals=numpy.linspace(89,-89,90)
ny = len(yvals)
lat_bnd=numpy.linspace(90,-90,91)
lat_id=cmor.axis(table_entry="latitude", units="degrees_north",
coord_vals=yvals, cell_bounds=lat_bnd)
return lat_id
def create_lon():
"""Create longitude dimension
Args:
none
Returns:
lon_id (cmor_axis): cmor.axis-object
"""
xvals=numpy.linspace(1.5,358.5,120)
nx = len(xvals)
lon_bnd=numpy.linspace(0,360,121)
lon_id=cmor.axis(table_entry="longitude", units="degrees_east",
coord_vals=xvals, cell_bounds=lon_bnd)
return lon_id
# Surface pressure variable lookup utility
def get_ps_var(ncpath):
""" read surface pressure variable for 3D output
Args:
ncpath (string): full path to ps_*.nc file
Returns:
numpy-array: [lon,lat,time]-array containing surface pressure values
"""
if not ncpath:
log.error("ERR -2: No path defined for surface pressure (ps).")
return None
if not os.path.exists(ncpath):
log.error("ERR -3: Path does not exist for surface pressure (ps).")
return None
ds = None
try:
ds = netCDF4.Dataset(ncpath)
if "ps" in ds.variables:
return ds.variables["ps"]
else:
log.error("ERR -4: Variable ps not present in pressure file.")
return None
except Exception as e:
log.error("ERR -5: Could not read netcdf file %s for surface pressure, reason: %s" % (ncpath, e.message))
return None
# Creates extra tasks for surface pressure
def get_ps_tasks(tasks):
""" find ps (surface preseure) tasks for different tables
Args:
tasks (list): list of tasks
Returns:
result (dictionary): dictionary based on the frequencies of different tasks with corresponding ps-tasks as values.
"""
global exp_name_,path_
tasks_by_freq = cmor_utils.group(tasks, lambda task: task.target.frequency)
result = {}
for freq, task_group in tasks_by_freq.iteritems():
tasks3d = [t for t in task_group if ("alevel" in getattr(t.target, cmor_target.dims_key).split() or "plev19" in getattr(t.target, cmor_target.dims_key).split() or
"alevhalf" in getattr(t.target, cmor_target.dims_key).split() or "plev39" in getattr(t.target, cmor_target.dims_key).split() )]
if not any(tasks3d):
continue
ps_tasks = [t for t in task_group if t.source.variable() == "ps" and
getattr(t, "time_operator", "point") in ["mean", "point"]]
ps_task = ps_tasks[0] if any(ps_tasks) else None
if ps_task:
result[freq]=ps_task
else:
source = cmor_source.tm5_source("ps")
ps_task = cmor_task.cmor_task(source, cmor_target.cmor_target("ps", freq))
setattr(ps_task.target, cmor_target.freq_key, freq)
setattr(ps_task.target, "time_operator", ["point"])
freqid=set_freqid(freq)
filepath=cmor_utils.find_tm5_output(path_,exp_name_,"ps",freqid)
setattr(ps_task, cmor_task.output_path_key, filepath[0])
result[freq]=ps_task
for task3d in tasks3d:
setattr(task3d, "ps_task", ps_task)
return result
|
|
from django.conf import settings
from django import forms
from django.forms.widgets import RadioSelect
from crits.campaigns.campaign import Campaign
from crits.core import form_consts
from crits.core.forms import add_bucketlist_to_form, add_ticket_to_form
from crits.core.widgets import CalWidget, ExtendedChoiceField
from crits.core.handlers import get_source_names, get_item_names, get_object_types
from crits.core.user_tools import get_user_organization
from crits.indicators.indicator import IndicatorAction
class IndicatorActionsForm(forms.Form):
"""
Django form for adding actions.
"""
error_css_class = 'error'
required_css_class = 'required'
action_type = forms.ChoiceField(widget=forms.Select, required=True)
begin_date = forms.DateTimeField(
widget=CalWidget(format='%Y-%m-%d %H:%M:%S',
attrs={'class': 'datetimeclass',
'size': '25',
'id': 'id_action_begin_date'}),
input_formats=settings.PY_FORM_DATETIME_FORMATS,
required=False)
end_date = forms.DateTimeField(
widget=CalWidget(format='%Y-%m-%d %H:%M:%S',
attrs={'class': 'datetimeclass',
'size': '25',
'id': 'id_action_end_date'}),
input_formats=settings.PY_FORM_DATETIME_FORMATS,
required=False)
performed_date = forms.DateTimeField(
widget=CalWidget(format='%Y-%m-%d %H:%M:%S',
attrs={'class': 'datetimeclass',
'size': '25',
'id': 'id_action_performed_date'}),
input_formats=settings.PY_FORM_DATETIME_FORMATS,
required=False)
active = forms.ChoiceField(
widget=RadioSelect,
choices=(('on', 'on'),
('off', 'off')))
reason = forms.CharField(
widget=forms.TextInput(attrs={'size': '50'}),
required=False)
date = forms.CharField(
widget=forms.HiddenInput(attrs={'size': '50',
'readonly': 'readonly',
'id': 'id_action_date'}))
def __init__(self, *args, **kwargs):
super(IndicatorActionsForm, self).__init__(*args, **kwargs)
self.fields['action_type'].choices = [
(c.name, c.name) for c in get_item_names(IndicatorAction, True)]
class IndicatorActivityForm(forms.Form):
"""
Django form for adding activity.
"""
error_css_class = 'error'
required_css_class = 'required'
description = forms.CharField(
widget=forms.TextInput(attrs={'size': '50'}),
required=False)
start_date = forms.DateTimeField(
widget=CalWidget(format='%Y-%m-%d %H:%M:%S',
attrs={'class': 'datetimeclass',
'size': '25',
'id': 'id_activity_start_date'}),
input_formats=settings.PY_FORM_DATETIME_FORMATS,
required=False)
end_date = forms.DateTimeField(
widget=CalWidget(format='%Y-%m-%d %H:%M:%S',
attrs={'class': 'datetimeclass',
'size': '25',
'id': 'id_activity_end_date'}),
input_formats=settings.PY_FORM_DATETIME_FORMATS,
required=False)
date = forms.CharField(
widget=forms.HiddenInput(attrs={'size': '50',
'readonly': 'readonly',
'id': 'id_activity_date'}))
class UploadIndicatorCSVForm(forms.Form):
"""
Django form for uploading Indicators via a CSV file.
"""
error_css_class = 'error'
required_css_class = 'required'
filedata = forms.FileField()
source = forms.ChoiceField(
widget=forms.Select(attrs={'class': 'no_clear'}),
label=form_consts.Indicator.SOURCE,
required=True)
method = forms.CharField(
widget=forms.TextInput,
label=form_consts.Indicator.SOURCE_METHOD,
required=False)
reference = forms.CharField(
widget=forms.TextInput(attrs={'size': '90'}),
label=form_consts.Indicator.SOURCE_REFERENCE,
required=False)
def __init__(self, username, *args, **kwargs):
super(UploadIndicatorCSVForm, self).__init__(*args, **kwargs)
self.fields['source'].choices = [
(c.name, c.name) for c in get_source_names(True, True, username)]
self.fields['source'].initial = get_user_organization(username)
class UploadIndicatorTextForm(forms.Form):
"""
Django form for uploading Indicators via a CSV blob.
"""
error_css_class = 'error'
required_css_class = 'required'
source = forms.ChoiceField(
widget=forms.Select(attrs={'class': 'no_clear'}),
label=form_consts.Indicator.SOURCE,
required=True)
method = forms.CharField(
widget=forms.TextInput,
label=form_consts.Indicator.SOURCE_METHOD,
required=False)
reference = forms.CharField(
widget=forms.TextInput(attrs={'size': '90'}),
label=form_consts.Indicator.SOURCE_REFERENCE,
required=False)
data = forms.CharField(
widget=forms.Textarea(attrs={'cols': '80', 'rows': '20'}),
required=True)
def __init__(self, username, *args, **kwargs):
super(UploadIndicatorTextForm, self).__init__(*args, **kwargs)
self.fields['source'].choices = [
(c.name, c.name) for c in get_source_names(True, True, username)]
self.fields['source'].initial = get_user_organization(username)
dt = "Indicator, Type, Campaign, Campaign Confidence, Confidence, Impact, Bucket List, Ticket, Action\n"
self.fields['data'].initial = dt
class UploadIndicatorForm(forms.Form):
"""
Django form for uploading a single Indicator.
"""
error_css_class = 'error'
required_css_class = 'required'
indicator_type = ExtendedChoiceField(required=True)
value = forms.CharField(
widget=forms.TextInput(attrs={'size': '100'}),
required=True)
confidence = forms.ChoiceField(widget=forms.Select, required=True)
impact = forms.ChoiceField(widget=forms.Select, required=True)
campaign = forms.ChoiceField(widget=forms.Select, required=False)
campaign_confidence = forms.ChoiceField(widget=forms.Select, required=False)
source = forms.ChoiceField(
widget=forms.Select(attrs={'class': 'no_clear'}),
label=form_consts.Indicator.SOURCE,
required=True)
method = forms.CharField(
widget=forms.TextInput,
label=form_consts.Indicator.SOURCE_METHOD,
required=False)
reference = forms.CharField(
widget=forms.TextInput(attrs={'size': '90'}),
label=form_consts.Indicator.SOURCE_REFERENCE,
required=False)
def __init__(self, username, choices=None, *args, **kwargs):
super(UploadIndicatorForm, self).__init__(*args, **kwargs)
self.fields['source'].choices = [
(c.name, c.name) for c in get_source_names(True, True, username)]
self.fields['source'].initial = get_user_organization(username)
if not choices:
#only valid types for indicators are those which don't require file upload
choices = [
(c[0], c[0], {'datatype': c[1].keys()[0],
'datatype_value': c[1].values()[0]})
for c in get_object_types(active=True, query={'datatype.file': {'$exists': 0},
'datatype.enum': {'$exists': 0}})]
self.fields['indicator_type'].choices = choices
self.fields['indicator_type'].widget.attrs = {'class': 'object-types'}
self.fields['campaign'].choices = [("", "")]
self.fields['campaign'].choices += [
(c.name, c.name) for c in get_item_names(Campaign, True)]
self.fields['campaign_confidence'].choices = [
("", ""),
("low", "low"),
("medium", "medium"),
("high", "high")]
self.fields['confidence'].choices = [
("unknown", "unknown"),
("benign", "benign"),
("low", "low"),
("medium", "medium"),
("high", "high")]
self.fields['impact'].choices = [
("unknown", "unknown"),
("benign", "benign"),
("low", "low"),
("medium", "medium"),
("high", "high")]
add_bucketlist_to_form(self)
add_ticket_to_form(self)
class NewIndicatorActionForm(forms.Form):
"""
Django form for adding a new Indicator Action.
"""
error_css_class = 'error'
required_css_class = 'required'
action = forms.CharField(widget=forms.TextInput, required=True)
|
|
import base64
import json
from cattle import ApiError, ClientApiError
from common_fixtures import * # NOQA
from datetime import timedelta
import time
def test_container_create_count(client, context):
cs = client.create_container(imageUuid=context.image_uuid,
count=3)
assert len(cs) == 3
for c in cs:
c = client.wait_success(c)
assert c.state == 'running'
def test_conatiner_simple_start(context):
context.create_container()
def test_container_build(super_client, context, client):
container = context.create_container(build={
'dockerfile': 'test/Dockerfile',
'remote': 'http://example.com',
'rm': True,
})
assert container.build.dockerfile == 'test/Dockerfile'
assert container.build.remote == 'http://example.com'
assert container.build.rm
image = super_client.reload(container).image()
assert image.data.fields.build.dockerfile == 'test/Dockerfile'
assert image.data.fields.build.remote == 'http://example.com'
assert image.data.fields.build.tag == context.image_uuid
assert image.data.fields.build.rm
def test_container_create_only(super_client, client, context):
uuid = "sim:{}".format(random_num())
container = super_client.create_container(accountId=context.project.id,
imageUuid=uuid,
name="test",
startOnCreate=False)
assert_fields(container, {
"type": "container",
"allocationState": "inactive",
"state": "creating",
"imageUuid": uuid,
"firstRunning": None,
})
container = super_client.wait_success(container)
assert_fields(container, {
"type": "container",
"allocationState": "inactive",
"state": "stopped",
"imageUuid": uuid,
})
container = super_client.reload(container)
assert container.imageId is not None
assert container.instanceTriggeredStop == 'stop'
image = super_client.wait_success(container.image())
assert_fields(image, {
"state": "active"
})
volumes = container.volumes()
assert len(volumes) == 1
root_volume = super_client.wait_success(volumes[0])
assert_fields(root_volume, {
"allocationState": "inactive",
"attachedState": "active",
"state": "inactive",
"instanceId": container.id,
"deviceNumber": 0,
})
volume_mappings = root_volume.volumeStoragePoolMaps()
assert len(volume_mappings) == 0
nics = container.nics()
assert len(nics) == 1
image = super_client.wait_success(find_one(super_client.list_image,
name=uuid))
assert_fields(image, {
"state": "active",
"name": uuid,
"isPublic": False,
})
image_mappings = image.imageStoragePoolMaps()
assert len(image_mappings) == 0
return client.reload(container)
def _assert_running(container):
assert_fields(container, {
"allocationState": "active",
"state": "running",
"startCount": NOT_NONE,
"hostId": NOT_NONE,
"firstRunning": NOT_NONE
})
root_volume = container.volumes()[0]
assert_fields(root_volume, {
"state": "active"
})
image = root_volume.image()
assert_fields(image, {
"state": "active"
})
volume_mappings = root_volume.volumeStoragePoolMaps()
assert len(volume_mappings) == 1
assert_fields(volume_mappings[0], {
"state": "active"
})
volume_pool = volume_mappings[0].storagePool()
assert_fields(volume_pool, {
"state": "active"
})
# image_mappings = image.imageStoragePoolMaps()
# assert len(image_mappings) == 2
# for image_mapping in image_mappings:
# assert_fields(image_mapping, {
# # TODO: why isn't this active?
# # "state": "active",
# "storagePoolId": volume_pool.id
# })
instance_host_mappings = container.instanceHostMaps()
assert len(instance_host_mappings) == 1
assert_fields(instance_host_mappings[0], {
"state": "active"
})
def test_container_special_labels(client, context):
uuid = "sim:{}".format(random_num())
labels = {
'io.rancher.container.display_name': 'from-label',
'io.rancher.container.network': 'true',
}
container = client.create_container(accountId=context.project.id,
networkMode='none',
imageUuid=uuid,
name="test",
labels=labels,
startOnCreate=False)
container = client.wait_success(container)
assert container.state == 'stopped'
assert container.name == 'from-label'
assert container.networkMode == 'managed'
def test_container_create_then_start(super_client, client, context):
container = client.create_container(startOnCreate=False,
imageUuid=context.image_uuid)
container = client.wait_success(container)
container = container.start()
assert container.state == "starting"
assert 'start' not in container
assert 'stop' in container
assert 'remove' not in container
_assert_running(super_client.wait_success(container))
def test_container_first_running(client, context):
c = client.create_container(imageUuid=context.image_uuid,
startOnCreate=False)
c = client.wait_success(c)
assert c.state == 'stopped'
assert c.firstRunning is None
c = client.wait_success(c.start())
assert c.state == 'running'
assert c.firstRunning is not None
first = c.firstRunning
c = client.wait_success(c.restart())
assert c.state == 'running'
assert c.firstRunning == first
def test_container_no_net(client, context):
with pytest.raises(ClientApiError) as e:
context.create_container(networkMode='foo')
assert e.value.message == 'Failed to find network for networkMode foo'
def test_container_restart(client, super_client, context):
container = context.create_container()
_assert_running(super_client.reload(container))
ip = container.primaryIpAddress
assert ip is not None
container = context.client.wait_success(container)
container = container.restart()
assert container.state == 'restarting'
container = client.wait_success(container)
_assert_running(super_client.reload(container))
assert ip == container.primaryIpAddress
def test_container_stop(client, super_client, context):
container = context.create_container(name="test")
container = client.wait_success(container)
assert_fields(container, {
"state": "running"
})
container = container.stop()
assert_fields(container, {
"state": "stopping"
})
container = client.wait_success(container)
assert_fields(super_client.reload(container), {
"allocationState": "active",
"state": "stopped"
})
container = super_client.reload(container)
root_volume = container.volumes()[0]
assert_fields(root_volume, {
"state": "detached"
})
image = root_volume.image()
assert_fields(image, {
"state": "active"
})
volume_mappings = root_volume.volumeStoragePoolMaps()
assert len(volume_mappings) == 1
assert_fields(volume_mappings[0], {
"state": "inactive"
})
volume_pool = volume_mappings[0].storagePool()
assert_fields(volume_pool, {
"state": "active"
})
image_mappings = image.imageStoragePoolMaps()
assert len(image_mappings) == 1
# for image_mapping in image_mappings:
# assert_fields(image_mapping, {
# # TODO: Why isn't this active
# # "state": "active",
# "storagePoolId": volume_pool.id
# })
instance_host_mappings = container.instanceHostMaps()
assert len(instance_host_mappings) == 1
assert instance_host_mappings[0].state == 'inactive'
def _assert_removed(container):
assert container.state == "removed"
assert_removed_fields(container)
volumes = container.volumes()
assert len(volumes) == 0
return container
def _assert_error(container):
assert container.state == "error"
volumes = container.volumes()
assert len(volumes) == 1
assert volumes[0].state != "removed"
volume_mappings = volumes[0].volumeStoragePoolMaps()
assert len(volume_mappings) == 1
assert volume_mappings[0].state == "inactive"
return container
def test_container_remove(client, super_client, context):
container = context.create_container(name="test")
container = client.wait_success(container)
container = client.wait_success(container.stop())
assert container.state == "stopped"
container = client.delete(container)
assert container.state == "removing"
container = client.wait_success(container)
_assert_removed(super_client.reload(container))
return container
def test_container_delete_while_running(client, super_client, context):
container = context.create_container(name="test")
container = client.wait_success(container)
assert container.state == 'running'
container = client.delete(container)
assert container.state == 'stopping'
container = client.wait_success(container)
_assert_removed(super_client.reload(container))
return container
def test_container_purge(client, super_client, context):
container = test_container_remove(client, super_client, context)
assert container.state == "removed"
# It's easier to call container.purge(), but this was to test other
# things too
remove_time = now() - timedelta(hours=1)
super_client.update(container, {
'removeTime': format_time(remove_time)
})
purge = super_client.list_task(name="purge.resources")[0]
purge.execute()
container = client.reload(container)
for x in range(30):
if container.state == "removed":
time.sleep(0.5)
container = client.reload(container)
else:
break
assert container.state != "removed"
container = client.wait_success(container)
assert container.state == "purged"
instance_host_mappings = super_client.reload(container).instanceHostMaps()
assert len(instance_host_mappings) == 0
volumes = container.volumes()
assert len(volumes) == 0
def test_start_stop(client, context):
container = context.create_container(name="test")
container = client.wait_success(container)
for _ in range(5):
assert container.state == 'running'
container = client.wait_success(container.stop())
assert container.state == 'stopped'
container = client.wait_success(container.start())
assert container.state == 'running'
def test_container_image_required(client):
try:
client.create_container()
assert False
except ApiError as e:
assert e.error.status == 422
assert e.error.code == 'MissingRequired'
assert e.error.fieldName == 'imageUuid'
def test_container_compute_fail(super_client, context):
data = {
'compute.instance.activate::fail': True,
'io.cattle.platform.process.instance.InstanceStart': {
'computeTries': 1
}
}
container = context.super_create_container_no_success(data=data)
assert container.transitioning == 'error'
assert container.transitioningMessage == \
'Failing [compute.instance.activate]'
_assert_error(super_client.reload(container))
def test_container_storage_fail(super_client, context):
data = {
'storage.volume.activate::fail': True,
}
container = context.super_create_container_no_success(data=data)
assert container.transitioning == 'error'
assert container.transitioningMessage == \
'Failing [storage.volume.activate]'
_assert_error(super_client.reload(container))
def test_container_restart_policy(super_client, client):
for c in [super_client, client]:
restart_policy = c.schema.types['restartPolicy']
assert len(restart_policy.resourceFields) == 2
assert 'name' in restart_policy.resourceFields
assert 'maximumRetryCount' in restart_policy.resourceFields
container = c.schema.types['container']
assert 'restartPolicy' == \
container.resourceFields['restartPolicy'].type
def test_container_exec_on_stop(client, context):
c = context.create_container()
assert callable(c.execute)
c = client.wait_success(c.stop())
assert 'execute' not in c
def test_container_exec(context):
c = context.create_container()
assert callable(c.execute)
resp = c.execute(command=['/bin/sh'])
assert resp.url is not None
assert resp.token is not None
jwt = _get_jwt(resp.token)
assert jwt['exec']['AttachStdin']
assert jwt['exec']['AttachStdout']
assert jwt['exec']['Tty']
assert jwt['exec']['Cmd'] == ['/bin/sh']
assert jwt['exec']['Container'] == c.externalId
assert jwt['exp'] is not None
resp = c.execute(command=['/bin/sh2', 'blah'], attachStdin=False,
attachStdout=False, tty=False)
assert resp.url is not None
assert resp.token is not None
jwt = _get_jwt(resp.token)
assert not jwt['exec']['AttachStdin']
assert not jwt['exec']['AttachStdout']
assert not jwt['exec']['Tty']
assert jwt['exec']['Cmd'] == ['/bin/sh2', 'blah']
context.delete(c)
def test_container_logs(context):
c = context.create_container()
assert callable(c.logs)
resp = c.logs(follow=True, lines=300)
assert resp.url is not None
assert resp.token is not None
jwt = _get_jwt(resp.token)
assert jwt['logs']['Container'] == c.externalId
assert jwt['logs']['Lines'] == 300
assert jwt['logs']['Follow'] is True
assert jwt['exp'] is not None
resp = c.logs()
assert resp.url is not None
assert resp.token is not None
jwt = _get_jwt(resp.token)
assert jwt['logs']['Container'] == c.externalId
assert jwt['logs']['Lines'] == 100
assert jwt['logs']['Follow'] is True
assert jwt['exp'] is not None
context.delete(c)
def test_container_labels(client, context):
labels = {'affinity': "container==B", '!affinity': "container==C"}
container = context.create_container(name="test",
labels=labels)
container = client.wait_success(container)
assert container.state == 'running'
assert container.labels == labels
def _get_jwt(token):
text = token.split('.')[1]
missing_padding = 4 - len(text) % 4
if missing_padding:
text += '=' * missing_padding
return json.loads(base64.b64decode(text))
def test_container_request_ip(super_client, client, context):
for i in range(2):
# Doing this twice essentially ensure that the IP gets freed the first
# time
container = client.create_container(imageUuid=context.image_uuid,
startOnCreate=False)
container = super_client.wait_success(container)
assert container.state == 'stopped'
container.data.fields['requestedIpAddress'] = '10.42.33.33'
container = super_client.update(container, data=container.data)
container = super_client.wait_success(container.start())
assert container.primaryIpAddress == '10.42.33.33'
# Try second time and should fail because it is used
container2 = client.create_container(imageUuid=context.image_uuid,
startOnCreate=False)
container2 = super_client.wait_success(container2)
assert container2.state == 'stopped'
container2.data.fields['requestedIpAddress'] = '10.42.33.33'
container2 = super_client.update(container2, data=container2.data)
container2 = super_client.wait_success(container2.start())
assert container2.primaryIpAddress != '10.42.33.33'
# Release 1.1.1.1
container = super_client.wait_success(super_client.delete(container))
container = super_client.wait_success(container.purge())
nics = container.nics()
assert len(nics) == 0
def test_container_network_modes(context, super_client):
c = context.create_container(networkMode=None)
c = super_client.wait_success(c)
assert c.state == 'running'
assert len(c.nics()) == 0
target = context.create_container(networkMode='bridge')
target = super_client.wait_success(target)
assert c.state == 'running'
assert len(target.nics()) == 1
for i in [('host', 'dockerHost'), ('none', 'dockerNone'),
('container', 'dockerContainer'), ('bridge', 'dockerBridge'),
('managed', 'network')]:
args = {
'networkMode': i[0]
}
if i[0] == 'container':
args['networkContainerId'] = target.id
c = context.create_container(**args)
c = super_client.wait_success(c)
assert c.state == 'running'
assert len(c.nics()) == 1
assert c.nics()[0].network().kind == i[1]
def test_container_resource_actions_json_state(context):
c = context.create_container(startOnCreate=True)
c.stop()
c.logs()
c = context.client.wait_success(c)
c.logs()
context.client.delete(c)
c = context.client.wait_success(c)
assert 'logs' not in c
def test_container_network_host_mode_w_dsn(context, super_client):
labels = {'io.rancher.container.dns': "true"}
c = context.create_container(networkMode='host', labels=labels)
c = super_client.wait_success(c)
assert c.state == 'running'
assert len(c.nics()) == 1
assert c.nics()[0].network().kind == 'dockerHost'
def test_container_request_ip_from_label(new_context):
client = new_context.client
labels = {
'io.rancher.container.requested_ip': '10.42.42.42'
}
c = new_context.create_container(labels=labels)
assert c.primaryIpAddress == '10.42.42.42'
c = client.wait_success(client.delete(c))
assert c.state == 'removed'
c = new_context.create_container(labels=labels)
assert c.primaryIpAddress == '10.42.42.42'
c = new_context.create_container(labels=labels)
assert c.primaryIpAddress != '10.42.42.42'
|
|
# GENERATED FILE - DO NOT EDIT THIS FILE UNLESS YOU ARE A WIZZARD
#pylint: skip-file
from heat.engine import properties
from heat.engine import constraints
from heat.engine import attributes
from heat.common.i18n import _
from avi.heat.avi_resource import AviResource
from avi.heat.avi_resource import AviNestedResource
from options import *
from options import *
class VsDebugFilter(object):
# all schemas
name_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=False,
update_allowed=True,
)
se_uuid_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'name',
'se_uuid',
)
# mapping of properties to their schemas
properties_schema = {
'name': name_schema,
'se_uuid': se_uuid_schema,
}
class AutoScaleMgrDebugFilter(object):
# all schemas
pool_uuid_schema = properties.Schema(
properties.Schema.STRING,
_("uuid of the Pool"),
required=False,
update_allowed=True,
)
intelligent_autoscale_period_schema = properties.Schema(
properties.Schema.NUMBER,
_("period of running intelligent autoscale check"),
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'pool_uuid',
'intelligent_autoscale_period',
)
# mapping of properties to their schemas
properties_schema = {
'pool_uuid': pool_uuid_schema,
'intelligent_autoscale_period': intelligent_autoscale_period_schema,
}
class CloudConnectorDebugFilter(object):
# all schemas
se_id_schema = properties.Schema(
properties.Schema.STRING,
_("filter debugs for a SE"),
required=False,
update_allowed=True,
)
app_id_schema = properties.Schema(
properties.Schema.STRING,
_("filter debugs for an app"),
required=False,
update_allowed=True,
)
disable_se_reboot_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("Disable SE reboot via cloud connector on HB miss"),
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'se_id',
'app_id',
'disable_se_reboot',
)
# mapping of properties to their schemas
properties_schema = {
'se_id': se_id_schema,
'app_id': app_id_schema,
'disable_se_reboot': disable_se_reboot_schema,
}
class HSMgrDebugFilter(object):
# all schemas
metric_entity_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=False,
update_allowed=True,
constraints=[
constraints.AllowedValues(['APPLICATION_METRICS_ENTITY', 'SE_METRICS_ENTITY', 'VM_METRICS_ENTITY', 'CONTROLLER_METRICS_ENTITY', 'TENANT_METRICS_ENTITY', 'VSERVER_METRICS_ENTITY']),
],
)
entity_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=False,
update_allowed=True,
)
pool_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=False,
update_allowed=True,
)
server_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=False,
update_allowed=True,
)
period_schema = properties.Schema(
properties.Schema.NUMBER,
_(""),
required=False,
update_allowed=True,
)
skip_hs_db_writes_schema = properties.Schema(
properties.Schema.BOOLEAN,
_(""),
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'metric_entity',
'entity',
'pool',
'server',
'period',
'skip_hs_db_writes',
)
# mapping of properties to their schemas
properties_schema = {
'metric_entity': metric_entity_schema,
'entity': entity_schema,
'pool': pool_schema,
'server': server_schema,
'period': period_schema,
'skip_hs_db_writes': skip_hs_db_writes_schema,
}
class SeMgrDebugFilter(object):
# all schemas
name_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'name',
)
# mapping of properties to their schemas
properties_schema = {
'name': name_schema,
}
class AlertMgrDebugFilter(object):
# all schemas
alert_uuid_schema = properties.Schema(
properties.Schema.STRING,
_("filter debugs for an alert id"),
required=False,
update_allowed=True,
)
alert_objid_schema = properties.Schema(
properties.Schema.STRING,
_("filter debugs for entity uuid"),
required=False,
update_allowed=True,
)
cfg_uuid_schema = properties.Schema(
properties.Schema.STRING,
_("filter debugs for an alert config"),
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'alert_uuid',
'alert_objid',
'cfg_uuid',
)
# mapping of properties to their schemas
properties_schema = {
'alert_uuid': alert_uuid_schema,
'alert_objid': alert_objid_schema,
'cfg_uuid': cfg_uuid_schema,
}
class MesosMetricsDebugFilter(object):
# all schemas
metric_entity_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=False,
update_allowed=True,
constraints=[
constraints.AllowedValues(['APPLICATION_METRICS_ENTITY', 'SE_METRICS_ENTITY', 'VM_METRICS_ENTITY', 'CONTROLLER_METRICS_ENTITY', 'TENANT_METRICS_ENTITY', 'VSERVER_METRICS_ENTITY']),
],
)
mesos_master_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=False,
update_allowed=True,
)
mesos_slave_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=False,
update_allowed=True,
)
metrics_collection_frq_schema = properties.Schema(
properties.Schema.NUMBER,
_(""),
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'metric_entity',
'mesos_master',
'mesos_slave',
'metrics_collection_frq',
)
# mapping of properties to their schemas
properties_schema = {
'metric_entity': metric_entity_schema,
'mesos_master': mesos_master_schema,
'mesos_slave': mesos_slave_schema,
'metrics_collection_frq': metrics_collection_frq_schema,
}
class MetricsMgrDebugFilter(object):
# all schemas
obj_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=False,
update_allowed=True,
)
entity_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=False,
update_allowed=True,
)
skip_metrics_db_writes_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=False,
update_allowed=True,
)
logging_freq_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=False,
update_allowed=True,
)
log_first_n_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=False,
update_allowed=True,
)
metric_instance_id_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=False,
update_allowed=True,
)
skip_cluster_map_check_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=False,
update_allowed=True,
)
disable_hw_training_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=False,
update_allowed=True,
)
license_grace_period_schema = properties.Schema(
properties.Schema.STRING,
_("setting to reduce the grace period for license expiry in hours"),
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'obj',
'entity',
'skip_metrics_db_writes',
'logging_freq',
'log_first_n',
'metric_instance_id',
'skip_cluster_map_check',
'disable_hw_training',
'license_grace_period',
)
# mapping of properties to their schemas
properties_schema = {
'obj': obj_schema,
'entity': entity_schema,
'skip_metrics_db_writes': skip_metrics_db_writes_schema,
'logging_freq': logging_freq_schema,
'log_first_n': log_first_n_schema,
'metric_instance_id': metric_instance_id_schema,
'skip_cluster_map_check': skip_cluster_map_check_schema,
'disable_hw_training': disable_hw_training_schema,
'license_grace_period': license_grace_period_schema,
}
class StateCacheMgrDebugFilter(object):
# all schemas
vs_uuid_schema = properties.Schema(
properties.Schema.STRING,
_("VirtualService UUID"),
required=False,
update_allowed=True,
)
pool_uuid_schema = properties.Schema(
properties.Schema.STRING,
_("Pool UUID"),
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'vs_uuid',
'pool_uuid',
)
# mapping of properties to their schemas
properties_schema = {
'vs_uuid': vs_uuid_schema,
'pool_uuid': pool_uuid_schema,
}
class DebugFilterUnion(object):
# all schemas
type_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=True,
update_allowed=True,
constraints=[
constraints.AllowedValues(['VI_MGR_DEBUG', 'HS_MGR_DEBUG', 'SE_MGR_DEBUG', 'SE_AGENT_DEBUG', 'RPC_INFRA_DEBUG', 'SE_AGENT_METRICS_DEBUG', 'TASK_QUEUE_DEBUG', 'TRANSACTION_DEBUG', 'METRICS_MANAGER_DEBUG', 'AUTOSCALE_MGR_DEBUG', 'RES_MGR_DEBUG', 'ALERT_MGR_DEBUG', 'REDIS_INFRA_DEBUG', 'APIC_AGENT_DEBUG', 'MESOS_METRICS_DEBUG', 'CLOUD_CONNECTOR_DEBUG', 'METRICS_MGR_DEBUG', 'VIRTUALSERVICE_DEBUG', 'STATECACHE_MGR_DEBUG', 'EVENT_API_DEBUG', 'JOB_MGR_DEBUG']),
],
)
se_mgr_debug_filter_schema = properties.Schema(
properties.Schema.MAP,
_(""),
schema=SeMgrDebugFilter.properties_schema,
required=False,
update_allowed=True,
)
vs_debug_filter_schema = properties.Schema(
properties.Schema.MAP,
_(""),
schema=VsDebugFilter.properties_schema,
required=False,
update_allowed=True,
)
metrics_debug_filter_schema = properties.Schema(
properties.Schema.MAP,
_(""),
schema=MetricsMgrDebugFilter.properties_schema,
required=False,
update_allowed=True,
)
hs_debug_filter_schema = properties.Schema(
properties.Schema.MAP,
_(""),
schema=HSMgrDebugFilter.properties_schema,
required=False,
update_allowed=True,
)
alert_debug_filter_schema = properties.Schema(
properties.Schema.MAP,
_(""),
schema=AlertMgrDebugFilter.properties_schema,
required=False,
update_allowed=True,
)
autoscale_mgr_debug_filter_schema = properties.Schema(
properties.Schema.MAP,
_(""),
schema=AutoScaleMgrDebugFilter.properties_schema,
required=False,
update_allowed=True,
)
cloud_connector_debug_filter_schema = properties.Schema(
properties.Schema.MAP,
_(""),
schema=CloudConnectorDebugFilter.properties_schema,
required=False,
update_allowed=True,
)
mesos_metrics_debug_filter_schema = properties.Schema(
properties.Schema.MAP,
_(""),
schema=MesosMetricsDebugFilter.properties_schema,
required=False,
update_allowed=True,
)
state_cache_mgr_debug_filter_schema = properties.Schema(
properties.Schema.MAP,
_(""),
schema=StateCacheMgrDebugFilter.properties_schema,
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'type',
'se_mgr_debug_filter',
'vs_debug_filter',
'metrics_debug_filter',
'hs_debug_filter',
'alert_debug_filter',
'autoscale_mgr_debug_filter',
'cloud_connector_debug_filter',
'mesos_metrics_debug_filter',
'state_cache_mgr_debug_filter',
)
# mapping of properties to their schemas
properties_schema = {
'type': type_schema,
'se_mgr_debug_filter': se_mgr_debug_filter_schema,
'vs_debug_filter': vs_debug_filter_schema,
'metrics_debug_filter': metrics_debug_filter_schema,
'hs_debug_filter': hs_debug_filter_schema,
'alert_debug_filter': alert_debug_filter_schema,
'autoscale_mgr_debug_filter': autoscale_mgr_debug_filter_schema,
'cloud_connector_debug_filter': cloud_connector_debug_filter_schema,
'mesos_metrics_debug_filter': mesos_metrics_debug_filter_schema,
'state_cache_mgr_debug_filter': state_cache_mgr_debug_filter_schema,
}
# for supporting get_avi_uuid_by_name functionality
field_references = {
'mesos_metrics_debug_filter': getattr(MesosMetricsDebugFilter, 'field_references', {}),
'cloud_connector_debug_filter': getattr(CloudConnectorDebugFilter, 'field_references', {}),
'metrics_debug_filter': getattr(MetricsMgrDebugFilter, 'field_references', {}),
'alert_debug_filter': getattr(AlertMgrDebugFilter, 'field_references', {}),
'se_mgr_debug_filter': getattr(SeMgrDebugFilter, 'field_references', {}),
'state_cache_mgr_debug_filter': getattr(StateCacheMgrDebugFilter, 'field_references', {}),
'autoscale_mgr_debug_filter': getattr(AutoScaleMgrDebugFilter, 'field_references', {}),
'vs_debug_filter': getattr(VsDebugFilter, 'field_references', {}),
'hs_debug_filter': getattr(HSMgrDebugFilter, 'field_references', {}),
}
class DebugController(AviResource):
resource_name = "debugcontroller"
# all schemas
name_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=True,
update_allowed=True,
)
sub_module_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=True,
update_allowed=True,
constraints=[
constraints.AllowedValues(['VI_MGR_DEBUG', 'HS_MGR_DEBUG', 'SE_MGR_DEBUG', 'SE_AGENT_DEBUG', 'RPC_INFRA_DEBUG', 'SE_AGENT_METRICS_DEBUG', 'TASK_QUEUE_DEBUG', 'TRANSACTION_DEBUG', 'METRICS_MANAGER_DEBUG', 'AUTOSCALE_MGR_DEBUG', 'RES_MGR_DEBUG', 'ALERT_MGR_DEBUG', 'REDIS_INFRA_DEBUG', 'APIC_AGENT_DEBUG', 'MESOS_METRICS_DEBUG', 'CLOUD_CONNECTOR_DEBUG', 'METRICS_MGR_DEBUG', 'VIRTUALSERVICE_DEBUG', 'STATECACHE_MGR_DEBUG', 'EVENT_API_DEBUG', 'JOB_MGR_DEBUG']),
],
)
trace_level_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=True,
update_allowed=True,
constraints=[
constraints.AllowedValues(['TRACE_LEVEL_DEBUG', 'TRACE_LEVEL_ERROR', 'TRACE_LEVEL_DISABLED', 'TRACE_LEVEL_DEBUG_DETAIL']),
],
)
log_level_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=True,
update_allowed=True,
constraints=[
constraints.AllowedValues(['LOG_LEVEL_ERROR', 'LOG_LEVEL_DISABLED', 'LOG_LEVEL_INFO', 'LOG_LEVEL_WARNING']),
],
)
filters_schema = properties.Schema(
properties.Schema.MAP,
_(""),
schema=DebugFilterUnion.properties_schema,
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'name',
'sub_module',
'trace_level',
'log_level',
'filters',
)
# mapping of properties to their schemas
properties_schema = {
'name': name_schema,
'sub_module': sub_module_schema,
'trace_level': trace_level_schema,
'log_level': log_level_schema,
'filters': filters_schema,
}
# for supporting get_avi_uuid_by_name functionality
field_references = {
'filters': getattr(DebugFilterUnion, 'field_references', {}),
}
def resource_mapping():
return {
'Avi::LBaaS::DebugController': DebugController,
}
|
|
from pytz import timezone, utc
from datetime import datetime, timedelta
from datetime import date
from dateutil.relativedelta import relativedelta
import calendar
import pandas as pd
class EasyDate():
######################################################
# setup and manipulate reference date local and utc
######################################################
# initialise to current UTC time
def __init__(self,
date_time='now', # 'now', datetime object or string 'YYYY-MM-DD HH:MM:SS'
local=True, # is the date_time input in your local timezone?
local_timezone='Australia/Brisbane' # pytz timezone
):
self.init_time = datetime.utcnow().replace(tzinfo=utc)
self.time_object_utc = self.init_time
self.time_object_local = self.init_time
self.set_reference_point(date_time=date_time, local=local, local_timezone=local_timezone)
self.range_start = None
self.range_end = None
self.sequence_range_start = None
self.sequence_range_end = None
def __str__(self):
utc_output = self.time_object_utc.strftime('%Y-%m-%d %H:%M:%S')
local_output = self.time_object_local.strftime('%Y-%m-%d %H:%M:%S')
return "EasyDate object: {local_time} local time, {utc_time} utc".format(local_time=local_output, utc_time=utc_output)
# set up objects
def set_reference_point(self,
date_time='now', # 'now', datetime object or string 'YYYY-MM-DD HH:MM:SS'
local=True, # is the date_time input in your local timezone?
local_timezone='Australia/Brisbane' # pytz timezone
):
# if current time is requested, get this as utc
if date_time == 'now':
date_time = datetime.utcnow().replace(tzinfo=utc)
local = False
date_time = self.parse_datetime_input(date_time)
date_time = self.parse_input_timezone(date_time, local=local, local_timezone=local_timezone)
self.time_object_utc = date_time[0]
self.time_object_local = date_time[1]
# move reference backward or forward in time in increments
def move_reference_point(self,
period="day", # unit to move time by: minutes, hours, days, weeks, fortnights, months, quarters or years
n_periods=1, # the number of units to move by
local=True, # move from the local or utc reference?
return_string=False # return string
):
# move time point
if period == "minute":
self.time_object_utc = self.time_object_utc + relativedelta(minutes=n_periods)
elif period == "hour":
self.time_object_utc = self.time_object_utc + relativedelta(hours=n_periods)
elif period == "day":
self.time_object_utc = self.time_object_utc + relativedelta(days=n_periods)
elif period == "week":
self.time_object_utc = self.time_object_utc + relativedelta(weeks=n_periods)
elif period == "fortnight":
self.time_object_utc = self.time_object_utc + relativedelta(weeks=(2 * n_periods))
elif period == "month":
self.time_object_utc = self.time_object_utc + relativedelta(months=n_periods)
elif period == "quarter":
self.time_object_utc = self.time_object_utc + relativedelta(months=(4 * n_periods))
elif period == "year":
self.time_object_utc = self.time_object_utc + relativedelta(years=n_periods)
else:
print "you have entered and invalid time period"
return None
# update local time object
self.time_object_local = self.time_object_utc.astimezone(self.local_timezone)
if return_string:
return self.time_object_utc.strftime('%Y-%m-%d %H:%M:%S')
else:
return None
# get start and end time for a time range up to the reference point
def get_range(self,
# date_time='current', # date_time to get range for, default to self.time_object_local
period='day', # unit to get the range for: minutes, hours, days, weeks, fortnights, months, quarters or years
n_periods=1, # the number of units to cover
return_string=True, # return strings or objects
local=True # for local timezone or utc
):
# get reference date
date_time = self.time_object_local if local is True else self.time_object_utc
# end time of reference point
if period == "month":
range_end_day = calendar.monthrange(date_time.year, date_time.month)[1]
else:
range_end_day = date_time.day
range_end = datetime(date_time.year, date_time.month, range_end_day) \
+ relativedelta(days=1) \
- relativedelta(microseconds=1)
# adjustment to account for months with < 31 days
# if range_end.month == 2:
# adjustment = relativedelta(days=3)
# elif range_end.day == 30:
# adjustment = relativedelta(days=1)
# else:
# adjustment = relativedelta(days=0)
# start time of range
if period == "day":
range_start = range_end - relativedelta(days=n_periods)
elif period == "week":
range_start = range_end - relativedelta(weeks=n_periods)
elif period == "fortnight":
range_start = range_end - relativedelta(weeks=(2 * n_periods))
elif period == "month":
n_periods -= 1
range_start_month = (range_end - relativedelta(months=n_periods)).month
range_start_year = (range_end - relativedelta(months=n_periods)).year
range_start = datetime(range_start_year, range_start_month, 1)
elif period == "quarter":
n_periods -= 1
range_start_month = (range_end - relativedelta(months=(3*n_periods))).month
range_start_year = (range_end - relativedelta(months=(3*n_periods))).year
range_start = datetime(range_start_year, range_start_month, 1)
elif period == "year":
range_start = range_end - relativedelta(years=n_periods)
else:
print "you entered an invalid time period"
return None
# adjust start
range_start = range_start + relativedelta(microseconds=1)
# save internally
self.range_start = range_start
self.range_end = range_end
# return object or string result
if return_string is True:
return (range_start.strftime('%Y-%m-%d %H:%M:%S'), range_end.strftime('%Y-%m-%d %H:%M:%S'))
else:
return (range_start, range_end)
# move reference to next week, month or quarter end
def move_reference_to_next(self,
next_time='friday', # One of any weekday name, month, quarter, half or year
local=True
):
weekdays = ['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday']
# utc or local
date_time = self.time_object_local if local is True else self.time_object_utc
# get end of current day
day_end = self.get_range(period='day', local=local)[1]
# reset reference point to end of current day (to set end of day time)
self.set_reference_point(date_time=day_end, local=local)
# move reference to targets
if next_time == 'month':
# calculate difference in days to end of month
current_day = date_time.day
month_end_day = calendar.monthrange(date_time.year, date_time.month)[1]
move = month_end_day - current_day
# move reference time to end of month
self.move_reference_point(period='day', n_periods=move, local=local)
elif next_time in weekdays:
# calculate difference in days to next friday
move = self.days_to_day_number(target_day=next_time, current_date_time=date_time)
# move reference time to next friday
self.move_reference_point(period='day', n_periods=move, local=local)
# unfinished - add quarter, half and year
else:
print "You chose an invalid option for the next time to move to"
return None
# output strings for reference date/time
def return_string(self, format='datetime', local=True):
result = self.time_object_local if local is True else self.time_object_utc
return result.strftime('%Y-%m-%d %H:%M:%S') if format == 'datetime' else result.strftime('%Y-%m-%d')
# ######################################################
# # derive date ranges and construct sequential reference tables
# ######################################################
# generate a sequence of dates (start and end) for a specified period and returns pandas data frame
def generate_sequence(self,
period='month', # time frequency
n_periods=30, # number of rows returned
local=True, # for local time or utc
week_end='friday', # end day for week-based frequencies
offset=0 # how far to move the reference time from the front of the results time series
):
# store reference time
reference = self.time_object_utc
# apply reference offset
self.move_reference_point(period=period, n_periods=offset, local=local)
# for week-based frequencies move to end day
if period in ['week', 'fortnight']:
self.move_reference_to_next(next_time=week_end, local=True)
# create results table
result = pd.DataFrame(columns=['start', 'end'])
# iterate time and add records to table
for n in xrange(n_periods):
# account for inconsistent number of days in months, not applicable to other periods with equal item counts
if period == 'month':
self.move_reference_to_next(next_time=period, local=True)
# get the start/end of the period
self.get_range(period=period, n_periods=1)
# save sequence range start and end
if n is 0:
self.sequence_range_end = self.range_end.strftime('%Y-%m-%d %H:%M:%S')
if n is (n_periods - 1):
self.sequence_range_start = self.range_start.strftime('%Y-%m-%d %H:%M:%S')
# add rows to dataframe
result.loc[n] = [self.range_start.strftime('%Y-%m-%d %H:%M:%S'), self.range_end.strftime('%Y-%m-%d %H:%M:%S')]
# move reference point
self.move_reference_point(period=period, n_periods=-1, local=local)
# restore reference time
self.set_reference_point(date_time=reference, local=False)
return result
######################################################
# class helper functions
######################################################
# check type of input and return date object
def parse_datetime_input(self, input):
# try to coerce to datetime object or return error
try:
if isinstance(input, datetime):
return input
else:
return datetime(int(input[:4]),
int(input[5:7]),
int(input[8:10]),
int(input[11:13]),
int(input[14:16]),
int(input[17:19]))
except ValueError:
print "input argument to parse filter must be in form 'YYYY-MM-DD HH:MM:SS' or a datetime object"
# returns the utc and local datetime objects for a given datetime input
def parse_input_timezone(self, input, local=True, local_timezone='Australia/Brisbane'):
# set local timezone
self.local_timezone = timezone(local_timezone)
# ! important
# Unfortunately using the tzinfo argument of the standard datetime constructors does not work with pytz for many timezones
# http://pytz.sourceforge.net/
# return utc and local datetime objects
if local is False:
return (input.replace(tzinfo=utc), input.replace(tzinfo=utc).astimezone(self.local_timezone))
else:
return (self.local_timezone.localize(input).astimezone(utc), self.local_timezone.localize(input))
# function to calculate days to a target day name (proxy by weekday number) from the current day
def days_to_day_number(self, target_day='friday', current_date_time=datetime.now()):
# The datetime date.weekday method represents Monday through Sunday as 0 through 6
days_by_number = {'monday': 0, 'tuesday': 1, 'wednesday': 2, 'thursday': 3, 'friday': 4, 'saturday': 5, 'sunday': 6}
target = days_by_number[target_day]
current = current_date_time.weekday()
# find days to next target weekday
if target >= current:
return target - current
else:
return (target + 7) - current
######################################################
# dev/test
######################################################
# ed = EasyDate()
# print ed
#
# ed.get_range(period='month')
#
# ed.move_reference_to_next(next_time='month')
# print ed
#
# ed.move_reference_point(period='month', n_periods=-1)
# print ed
# ed.get_range(period='month')
#
# ed.time_object_local - relativedelta(months=2)
#
# calendar.monthrange(ed.time_object_local.year, ed.time_object_local.month)[1]
# # time range generator for time periods
# def time_range_generator(time_period='day', n_periods=30):
# ed = EasyDate()
#
# # if month or week move to the next week/month end
# if time_period == 'week':
# ed.move_reference_to_next(next_time='friday')
#
# # generate outputs
# n_iter = 0
# while n_iter < n_periods:
# ed.move_reference_point(period=time_period, n_periods=-1)
# yield ed.get_range(period=time_period)
# n_iter += 1
#
#
# trg = time_range_generator(time_period='month')
# next(trg)
#
# ed = EasyDate()
# ed.move_reference_point(period='week', n_periods=1)
# ed.move_reference_to_next(next_time='friday')
# print ed
|
|
import itertools
import threading
import unittest2
from nose.tools import *
from gutter.client.arguments import Container as BaseArgument
from gutter.client import arguments
from gutter.client.models import Switch, Manager, Condition
from durabledict import MemoryDict
from durabledict.base import DurableDict
from gutter.client import signals
import mock
from exam.decorators import fixture, before
from exam.cases import Exam
class ManagerMixin(Exam):
@fixture
def manager(self):
return Manager(MemoryDict())
class EncodingDict(DurableDict):
__last_updated = 0
def last_updated(self):
return self.__last_updated
def unbound_method():
pass
class Argument(object):
def bar(self):
pass
class MOLArgument(BaseArgument):
applies = True
foo = arguments.Value(lambda self: 42)
class TestSwitch(ManagerMixin, unittest2.TestCase):
possible_properties = [
('state', (Switch.states.DISABLED, Switch.states.SELECTIVE)),
('compounded', (True, False)),
('concent', (True, False))
]
def test_switch_name_is_immutable(self):
switch = Switch('foo')
with self.assertRaises(AttributeError):
switch.name = 'bar'
def test_switch_has_state_constants(self):
self.assertTrue(Switch.states.DISABLED)
self.assertTrue(Switch.states.SELECTIVE)
self.assertTrue(Switch.states.GLOBAL)
def test_no_switch_state_is_equal_to_another(self):
states = (Switch.states.DISABLED, Switch.states.SELECTIVE,
Switch.states.GLOBAL)
eq_(list(states), list(set(states)))
def test_switch_constructs_with_a_name_attribute(self):
eq_(Switch('foo').name, 'foo')
def test_switch_has_label(self):
ok_(Switch('foo').label is None)
def test_switch_can_be_constructed_with_a_label(self):
eq_(Switch('foo', label='A label').label, 'A label')
def test_switch_has_description(self):
ok_(Switch('foo').description is None)
def test_switch_can_be_constructed_with_a_description(self):
eq_(Switch('foo', description='A description').description, 'A description')
def test_switch_strs_the_name_argument(self):
eq_(Switch(name=12345).name, '12345')
def test_switch_state_defaults_to_disabled(self):
eq_(Switch('foo').state, Switch.states.DISABLED)
def test_switch_state_can_be_changed(self):
switch = Switch('foo')
old_state = switch.state
switch.state = Switch.states.GLOBAL
eq_(switch.state, Switch.states.GLOBAL)
ok_(old_state is not switch.state)
def test_switch_compounded_defaults_to_false(self):
eq_(Switch('foo').compounded, False)
def test_swtich_can_be_constructed_with_a_state(self):
switch = Switch(name='foo', state=Switch.states.GLOBAL)
eq_(switch.state, Switch.states.GLOBAL)
def test_swtich_can_be_constructed_with_a_compounded_val(self):
switch = Switch(name='foo', compounded=True)
eq_(switch.compounded, True)
def test_conditions_defaults_to_an_empty_list(self):
eq_(Switch('foo').conditions, [])
def test_condtions_can_be_added_and_removed(self):
switch = Switch('foo')
condition = lambda: False
ok_(condition not in switch.conditions)
switch.conditions.append(condition)
ok_(condition in switch.conditions)
switch.conditions.remove(condition)
ok_(condition not in switch.conditions)
def test_parent_property_defaults_to_none(self):
eq_(Switch('foo').parent, None)
def test_can_be_constructed_with_parent(self):
eq_(Switch('dog:foo').parent, 'dog')
def test_concent_defaults_to_true(self):
eq_(Switch('foo').concent, True)
def test_can_be_constructed_with_concent(self):
eq_(Switch('foo', concent=False).concent, False)
def test_switch_manager_defaults_to_none(self):
eq_(Switch('foo').manager, None)
def test_switch_can_be_constructed_witn_a_manager(self):
eq_(Switch('foo', manager=self.manager).manager, self.manager)
@mock.patch('gutter.client.signals.switch_checked')
def test_switch_enabed_for_calls_switch_checked_signal(self, signal):
switch = Switch('foo')
switch.enabled_for(True)
signal.call.assert_called_once_with(switch)
@mock.patch('gutter.client.signals.switch_active')
def test_switch_enabed_for_calls_switch_active_signal_when_enabled(self, signal):
switch = Switch('foo', state=Switch.states.GLOBAL)
ok_(switch.enabled_for('causing input'))
signal.call.assert_called_once_with(switch, 'causing input')
@mock.patch('gutter.client.signals.switch_active')
def test_switch_enabed_for_skips_switch_active_signal_when_not_enabled(self, signal):
switch = Switch('foo', state=Switch.states.DISABLED)
eq_(switch.enabled_for('causing input'), False)
eq_(signal.call.called, False)
def test_switches_are_equal_if_they_have_the_same_properties(self):
a = Switch('a') # must init with the same name as name is immutable
b = Switch('a')
for prop, (a_value, b_value) in self.possible_properties:
setattr(a, prop, a_value)
setattr(b, prop, b_value)
self.assertNotEqual(a, b, "expected %s to not be equals" % prop)
setattr(b, prop, a_value)
eq_(a, b, "expected %s to be equal" % prop)
def test_switches_are_still_equal_with_different_managers(self):
a = Switch('a')
b = Switch('a')
eq_(a, b)
a.manager = 'foo'
b.manager = 'bar'
eq_(a, b)
class TestSwitchChanges(ManagerMixin, unittest2.TestCase):
@fixture
def switch(self):
return Switch('foo')
def changes_dict(self, previous, current):
return dict(previous=previous, current=current)
def test_switch_is_not_changed_by_default(self):
ok_(Switch('foo').changed is False)
def test_switch_is_changed_if_property_changes(self):
ok_(self.switch.changed is False)
self.switch.state = 'another name'
ok_(self.switch.changed is True)
def test_switch_reset_causes_switch_to_reset_change_tracking(self):
self.switch.state = 'another name'
ok_(self.switch.changed is True)
self.switch.reset()
ok_(self.switch.changed is False)
def test_switch_changes_returns_changes(self):
eq_(self.switch.changes, {})
self.switch.state = 'new name'
eq_(
self.switch.changes,
dict(state=self.changes_dict(1, 'new name'))
)
self.switch.concent = False
eq_(self.switch.changes,
dict(
state=self.changes_dict(1, 'new name'),
concent=self.changes_dict(True, False)
)
)
class TestCondition(unittest2.TestCase):
def argument_dict(name):
return dict(
module='module%s' % name,
klass='klass%s' % name,
func='func%s' % name
)
possible_properties = [
('argument_dict', (argument_dict('1'), argument_dict('2'))),
('operator', ('o1', 'o2')),
('negative', (False, True))
]
@fixture
def operator(self):
m = mock.Mock(name='operator')
m.applies_to.return_value = True
return m
@fixture
def condition(self):
return Condition(MOLArgument, 'foo', self.operator)
@fixture
def input(self):
return mock.Mock(name='input')
def test_returns_results_from_calling_operator_with_argument_value(self):
self.condition.call(self.input)
self.operator.applies_to.assert_called_once_with(42)
def test_condition_can_be_negated(self):
eq_(self.condition.call(self.input), True)
self.condition.negative = True
eq_(self.condition.call(self.input), False)
def test_can_be_negated_via_init_argument(self):
condition = Condition(MOLArgument, 'foo', self.operator)
eq_(condition.call(self.input), True)
condition = Condition(MOLArgument, 'foo', self.operator, negative=True)
eq_(condition.call(self.input), False)
def test_if_apply_explodes_it_returns_false(self):
self.operator.applies_to.side_effect = Exception
eq_(self.condition.call(self.input), False)
def test_returns_false_if_argument_does_not_apply_to_input(self):
self.condition.argument = mock.Mock()
eq_(self.condition.call(self.input), True)
self.condition.argument.return_value.applies = False
eq_(self.condition.call(self.input), False)
def test_if_input_is_NONE_it_returns_false(self):
eq_(self.condition.call(Manager.NONE_INPUT), False)
@mock.patch('gutter.client.signals.condition_apply_error')
def test_if_apply_explodes_it_signals_condition_apply_error(self, signal):
error = Exception('boom!')
inpt = self.input
self.operator.applies_to.side_effect = error
self.condition.call(inpt)
signal.call.assert_called_once_with(self.condition, inpt, error)
def test_str_returns_argument_and_str_of_operator(self):
def local_str(self):
return 'str of operator'
self.operator.__str__ = local_str
eq_(str(self.condition), "MOLArgument.foo is str of operator")
def test_equals_if_has_the_same_properties(self):
a = Condition(Argument, 'bar', bool)
b = Condition(Argument, 'bar', bool)
for prop, (a_val, b_val) in self.possible_properties:
setattr(a, prop, a_val)
setattr(b, prop, b_val)
self.assertNotEqual(a, b)
setattr(b, prop, a_val)
eq_(a, b)
class SwitchWithConditions(object):
@fixture
def switch(self):
switch = Switch('parent:with conditions', state=Switch.states.SELECTIVE)
switch.conditions.append(self.pessamistic_condition)
switch.conditions.append(self.pessamistic_condition)
return switch
@fixture
def parent_switch(self):
switch = Switch('parent', state=Switch.states.DISABLED)
return switch
@property
def pessamistic_condition(self):
mck = mock.MagicMock()
mck.call.return_value = False
mck.argument.COMPATIBLE_TYPE = str
return mck
class ConcentTest(Exam, SwitchWithConditions, unittest2.TestCase):
@fixture
def manager(self):
return Manager(storage=MemoryDict())
@fixture
def parent(self):
p = mock.Mock()
p.enabled_for.return_value = False
return p
@before
def make_all_conditions_true(self):
self.make_all_conditions(True)
@before
def register_switches(self):
self.manager.register(self.parent_switch)
self.manager.register(self.switch)
def make_all_conditions(self, val):
for cond in self.switch.conditions:
cond.call.return_value = val
def test_with_concent_only_enabled_if_parent_is_too(self):
self.manager.register(self.switch)
parent = self.manager.switch(self.switch.parent)
eq_(parent.enabled_for('input'), False)
eq_(self.manager.active('parent:with conditions', 'input'), False)
parent.state = Switch.states.GLOBAL
eq_(self.manager.active('parent:with conditions', 'input'), True)
def test_without_concent_ignores_parents_enabled_status(self):
self.switch.concent = False
parent = self.manager.switch(self.switch.parent)
eq_(parent.enabled_for('input'), False)
eq_(self.switch.enabled_for('input'), True)
self.make_all_conditions(False)
eq_(self.switch.enabled_for('input'), False)
class DefaultConditionsTest(SwitchWithConditions, unittest2.TestCase):
def test_enabled_for_is_true_if_any_conditions_are_true(self):
ok_(self.switch.enabled_for('input') is False)
self.switch.conditions[0].call.return_value = True
ok_(self.switch.enabled_for('input') is True)
def test_is_true_when_state_is_global(self):
eq_(self.switch.enabled_for('input'), False)
self.switch.state = Switch.states.GLOBAL
eq_(self.switch.enabled_for('input'), True)
def test_is_false_when_state_is_disabled(self):
self.switch.conditions[0].call.return_value = True
eq_(self.switch.enabled_for('input'), True)
self.switch.state = Switch.states.DISABLED
eq_(self.switch.enabled_for('input'), False)
class CompoundedConditionsTest(Exam, SwitchWithConditions, unittest2.TestCase):
@before
def make_switch_compounded(self):
self.switch.compounded = True
def test_enabled_if_all_conditions_are_true(self):
ok_(self.switch.enabled_for('input') is False)
self.switch.conditions[0].call.return_value = True
ok_(self.switch.enabled_for('input') is False)
self.switch.conditions[1].call.return_value = True
ok_(self.switch.enabled_for('input') is True)
class ManagerTest(unittest2.TestCase):
storage_with_existing_switches = {
'default.existing': 'switch',
'default.another': 'valuable switch'
}
expected_switches_from_storage = ['switch', 'valuable switch']
namespace_base = []
@fixture
def mockstorage(self):
return mock.MagicMock(dict)
@fixture
def manager(self):
return Manager(storage=self.mockstorage)
@fixture
def switch(self):
switch = mock.Mock(spec=Switch)
switch.changes = {}
# switch.parent = None
switch.name = 'foo'
switch.manager = None
return switch
def namespaced(self, *names):
parts = itertools.chain(self.manager.namespace, names)
return self.manager.namespace_separator.join(parts)
def test_autocreate_defaults_to_false(self):
eq_(Manager(storage=dict()).autocreate, False)
def test_autocreate_can_be_passed_to_init(self):
eq_(Manager(storage=dict(), autocreate=True).autocreate, True)
def test_namespace_defaults_to_default(self):
eq_(Manager(storage=dict()).namespace, ['default'])
def test_namespace_can_be_set_on_construction(self):
eq_(Manager(storage=dict(), namespace='foo').namespace, ['foo'])
def test_register_adds_switch_to_storge_keyed_by_its_name(self):
self.manager.register(self.switch)
self.mockstorage.__setitem__.assert_called_once_with(
self.namespaced(self.switch.name),
self.switch
)
def test_register_adds_self_as_manager_to_switch(self):
ok_(self.switch.manager is not self.manager)
self.manager.register(self.switch)
ok_(self.switch.manager is self.manager)
def test_uses_switches_from_storage_on_itialization(self):
self.manager.storage = self.storage_with_existing_switches
self.assertItemsEqual(
self.manager.switches,
self.expected_switches_from_storage
)
def test_update_tells_manager_to_register_with_switch_updated_signal(self):
self.manager.register = mock.Mock()
self.manager.update(self.switch)
self.manager.register.assert_called_once_with(self.switch, signal=signals.switch_updated)
@mock.patch('gutter.client.signals.switch_updated')
def test_update_calls_the_switch_updateed_signal(self, signal):
self.manager.update(self.switch)
signal.call.assert_called_once_with(self.switch)
def test_manager_resets_switch_dirty_tracking(self):
self.manager.update(self.switch)
self.switch.reset.assert_called_once_with()
def test_manager_properties_not_shared_between_threads(self):
manager = Manager(storage=self.mockstorage, autocreate=True)
def change_autocreate_to_false():
manager.autocreate = False
threading.Thread(target=change_autocreate_to_false).start()
eq_(manager.autocreate, True)
def test_can_be_constructed_with_inputs(self):
eq_(
Manager(storage=self.mockstorage, inputs=[3]).inputs,
[3]
)
def test_namespaced_returns_new_manager_only_different_by_namespace(self):
parent = self.manager
child = self.manager.namespaced('ns')
grandchild = child.namespaced('other')
self.assertNotEqual(parent.namespace, child.namespace)
self.assertNotEqual(child.namespace, grandchild.namespace)
child_ns_list = list(itertools.chain(self.namespace_base, ['ns']))
grandchild_ns_list = list(
itertools.chain(self.namespace_base, ['ns', 'other'])
)
eq_(child.namespace, child_ns_list)
eq_(grandchild.namespace, grandchild_ns_list)
properties = (
'storage',
'autocreate',
'inputs',
'switch_class'
)
for decendent_manager in (child, grandchild):
for prop in properties:
eq_(getattr(decendent_manager, prop), getattr(parent, prop))
def test_getitem_proxies_to_storage_getitem(self):
eq_(
self.manager['foo'],
self.manager.storage.__getitem__.return_value
)
self.manager.storage.__getitem__.assert_called_once_with(
self.namespaced('foo')
)
class NamespacedManagertest(ManagerTest):
storage_with_existing_switches = {
'a.b.brother': 'brother switch',
'a.b.sister': 'sister switch',
'a.b.c.grandchild': 'grandchild switch',
'a.c.cousin': 'cousin switch',
}
expected_switches_from_storage = [
'brother switch',
'sister switch',
'grandchild switch'
]
namespace_base = ['a', 'b']
@fixture
def manager(self):
return Manager(storage=self.mockstorage, namespace=['a', 'b'])
class ActsLikeManager(object):
def namespaced(self, *names):
parts = itertools.chain(self.manager.namespace, names)
return self.manager.key_separator.join(parts)
@fixture
def manager(self):
return Manager(storage=MemoryDict())
@fixture
def test_switch(self):
return self.new_switch('test')
def new_switch(self, name, parent=None):
return Switch(name=name, parent=parent)
def make_and_register_switch(self, name, parent=None):
switch = self.new_switch(name, parent)
self.manager.register(switch)
return switch
def mock_and_register_switch(self, name):
switch = mock.Mock(name=name)
switch.name = name
# switch.parent = None
switch.get_parent.return_value = None
switch.children = []
self.manager.register(switch)
return switch
def test_switches_list_registed_switches(self):
eq_(self.manager.switches, [])
self.manager.register(self.test_switch)
eq_(self.manager.switches, [self.test_switch])
def test_active_raises_exception_if_no_switch_found_with_name(self):
assert_raises(ValueError, self.manager.active, 'junk')
def test_unregister_removes_a_switch_from_storage_with_name(self):
switch = self.make_and_register_switch('foo')
ok_(switch in self.manager.switches)
self.manager.unregister(switch.name)
ok_(switch not in self.manager.switches)
def test_unregister_can_remove_if_given_switch_instance(self):
switch = self.make_and_register_switch('foo')
ok_(switch in self.manager.switches)
self.manager.unregister(switch)
ok_(switch not in self.manager.switches)
def test_register_does_not_set_parent_by_default(self):
switch = self.make_and_register_switch('foo')
eq_(switch.parent, None)
def test_register_sets_parent_on_switch_if_there_is_one(self):
parent = self.make_and_register_switch('movies')
child = self.make_and_register_switch('movies:jaws')
eq_(child.parent, parent.name)
def test_register_adds_self_to_parents_children(self):
parent = self.make_and_register_switch('movies')
child = self.make_and_register_switch('movies:jaws')
eq_(self.manager.get_children(parent.name), [child.name])
sibling = self.make_and_register_switch('movies:jaws2')
eq_(set(self.manager.get_children(parent.name)), set([child.name, sibling.name]))
def test_register_raises_value_error_for_blank_name(self):
with self.assertRaises(ValueError):
self.make_and_register_switch('')
def test_switch_returns_switch_from_manager_with_name(self):
switch = self.make_and_register_switch('foo')
eq_(switch, self.manager.switch('foo'))
def test_switch_returns_switch_with_manager_assigned(self):
switch = self.new_switch('foo')
self.manager.register(switch)
switch.manager = None
eq_(self.manager, self.manager.switch('foo').manager)
def test_swich_raises_valueerror_if_no_switch_by_name(self):
assert_raises(ValueError, self.manager.switch, 'junk')
def test_unregister_removes_all_child_switches_too(self):
grandparent = self.make_and_register_switch('movies')
parent = self.make_and_register_switch('movies:star_wars')
child1 = self.make_and_register_switch('movies:star_wars:a_new_hope')
child2 = self.make_and_register_switch('movies:star_wars:return_of_the_jedi')
great_uncle = self.make_and_register_switch('books')
ok_(grandparent in self.manager.switches)
ok_(parent in self.manager.switches)
ok_(child1 in self.manager.switches)
ok_(child2 in self.manager.switches)
ok_(great_uncle in self.manager.switches)
self.manager.unregister(grandparent.name)
ok_(grandparent not in self.manager.switches)
ok_(parent not in self.manager.switches)
ok_(child1 not in self.manager.switches)
ok_(child2 not in self.manager.switches)
ok_(great_uncle in self.manager.switches)
@mock.patch('gutter.client.signals.switch_unregistered')
def test_register_signals_switch_registered_with_switch(self, signal):
switch = self.make_and_register_switch('foo')
self.manager.unregister(switch.name)
signal.call.assert_called_once_with(switch)
class EmptyManagerInstanceTest(ActsLikeManager, unittest2.TestCase):
def test_input_accepts_variable_input_args(self):
eq_(self.manager.inputs, [])
self.manager.input('input1', 'input2')
eq_(self.manager.inputs, ['input1', 'input2'])
def test_flush_clears_all_inputs(self):
self.manager.input('input1', 'input2')
ok_(len(self.manager.inputs) is 2)
self.manager.flush()
ok_(len(self.manager.inputs) is 0)
def test_can_pass_extra_inputs_to_check_enabled_for_on(self):
switch = self.mock_and_register_switch('foo')
additional_input = mock.Mock()
self.manager.active('foo', additional_input)
switch.enabled_for_all.assert_called_once_with(additional_input)
def test_checks_against_NONE_input_if_no_inputs(self):
switch = self.mock_and_register_switch('global')
eq_(self.manager.inputs, [])
self.manager.active('global')
switch.enabled_for_all.assert_called_once_with(Manager.NONE_INPUT)
class NamespacedEmptyManagerInstanceTest(EmptyManagerInstanceTest):
@fixture
def manager(self):
return Manager(storage=MemoryDict(), namespace=['a', 'b'])
class ManagerWithInputTest(Exam, ActsLikeManager, unittest2.TestCase):
def build_and_register_switch(self, name, enabled_for=False):
switch = Switch(name)
switch.enabled_for = mock.Mock(return_value=enabled_for)
self.manager.register(switch)
return switch
@before
def add_to_inputs(self):
self.manager.input('input 1', 'input 2')
def test_returns_boolean_if_named_switch_is_enabled_for_any_input(self):
self.build_and_register_switch('disabled', enabled_for=False)
eq_(self.manager.active('disabled'), False)
self.build_and_register_switch('enabled', enabled_for=True)
eq_(self.manager.active('disabled'), False)
def test_raises_exception_if_invalid_switch_name_created(self):
self.assertRaisesRegexp(ValueError, 'switch named', self.manager.active, 'junk')
def test_autocreates_disabled_switch_when_autocreate_is_true(self):
eq_(self.manager.switches, [])
assert_raises(ValueError, self.manager.active, 'junk')
self.manager.autocreate = True
eq_(self.manager.active('junk'), False)
ok_(len(self.manager.switches) is 1)
ok_(self.manager.switches[0].state, Switch.states.DISABLED)
def test_active_extra_inputs_considered_in_check_with_global_inputs(self):
switch = self.build_and_register_switch('foo')
self.manager.active('foo', 'input 3')
calls = [mock.call(c) for c in ('input 1', 'input 2', 'input 3')]
switch.enabled_for.assert_has_calls(calls)
def test_active_with_extra_inputs_only_considers_extra_when_only_kw_arg_is_used(self):
switch = self.build_and_register_switch('foo')
self.manager.active('foo', 'input 3', exclusive=True)
switch.enabled_for.assert_called_once_with('input 3')
class NamespacedManagerWithInputTest(ManagerWithInputTest):
@fixture
def manager(self):
return Manager(storage=MemoryDict(), namespace=['a', 'b'])
|
|
from urllib.parse import urlparse
from django.contrib.auth.forms import PasswordChangeForm, SetPasswordForm
from django.contrib.auth.models import User, AnonymousUser
from django.contrib.auth.tokens import default_token_generator
from django.core import mail
from django.core.cache import cache
from django.urls import reverse
from django.test import TestCase
from django.test.client import RequestFactory
from django.test.utils import override_settings
from django.utils.encoding import force_bytes
from django.utils.http import urlsafe_base64_encode
from cellcounter.cc_kapi.factories import UserFactory, KeyboardFactory
from .forms import EmailUserCreationForm, PasswordResetForm
from .utils import read_signup_email
from .views import PasswordResetConfirmView
class TestRegistrationView(TestCase):
def setUp(self):
self.request_factory = RequestFactory()
def test_get(self):
response = self.client.get(reverse('register'))
self.assertEqual(response.status_code, 200)
self.assertIsInstance(response.context['form'], EmailUserCreationForm)
def test_valid(self):
data = {'username': '123', 'email': 'joe@example.org', 'password1': 'test', 'password2': 'test', 'tos': True}
response = self.client.post(reverse('register'), data=data, follow=True)
self.assertRedirects(response, reverse('new_count'))
user = User.objects.get(username='123')
messages = list(response.context['messages'])
self.assertEqual("Successfully registered, you are now logged in! <a href='/accounts/%s/'>View your profile</a>"
% user.id,
messages[0].message)
self.assertEqual(user, response.context['user'])
def test_invalid(self):
data = {'username': '123', 'email': 'joe@example.org', 'password1': 'test', 'password2': 'test', 'tos': False}
response = self.client.post(reverse('register'), data=data)
self.assertEqual(response.status_code, 200)
self.assertFormError(response, 'form', 'tos', 'You must agree our Terms of Service')
self.assertEqual(AnonymousUser(), response.context['user'])
@override_settings(RATELIMIT_ENABLE=True)
def test_ratelimit_registration(self):
cache.clear()
data = {'username': '123', 'email': 'joe@example.org', 'password1': 'test', 'password2': 'test', 'tos': True}
self.client.post(reverse('register'), data)
self.client.logout()
data['username'] = 'Another'
self.client.post(reverse('register'), data, follow=True)
self.client.logout()
data['username'] = 'Another2'
response = self.client.post(reverse('register'), data, follow=True)
messages = list(response.context['messages'])
self.assertEqual(1, len(messages))
self.assertEqual('You have been rate limited', messages[0].message)
@override_settings(RATELIMIT_ENABLE=True)
def test_ratelimit_invalid_form(self):
cache.clear()
data = {'username': '123', 'email': '1234', 'password1': 'test', 'password2': 'test', 'tos': True}
self.client.post(reverse('register'), data)
response = self.client.post(reverse('register'), data, follow=True)
self.assertEqual(response.status_code, 200)
self.assertNotIn('You have been rate limited', response.content.decode("utf-8"))
class TestPasswordChangeView(TestCase):
def setUp(self):
self.factory = RequestFactory()
self.user = UserFactory()
self.valid_data = {'old_password': 'test', 'new_password1': 'new', 'new_password2': 'new'}
self.invalid_data = {'old_password': 'test', 'new_password1': 'test', 'new_password2': '1234'}
def test_logged_out_get_redirect(self):
response = self.client.get(reverse('change-password'))
self.assertRedirects(response,
"%s?next=%s" % (reverse('login'), reverse('change-password')))
def test_logged_out_post_redirect(self):
response = self.client.post(reverse('change-password'), self.valid_data)
self.assertRedirects(response,
"%s?next=%s" % (reverse('login'), reverse('change-password')))
def test_logged_in_to_form(self):
self.client.force_login(self.user)
response = self.client.get(reverse('change-password'))
self.assertEqual(response.status_code, 200)
self.assertIsInstance(response.context['form'], PasswordChangeForm)
def test_post_valid(self):
self.client.force_login(self.user)
response = self.client.post(reverse('change-password'), data=self.valid_data, follow=True)
self.assertRedirects(response, reverse('new_count'))
messages = list(response.context['messages'])
self.assertEqual('Password changed successfully', messages[0].message)
def test_post_invalid(self):
self.client.force_login(self.user)
response = self.client.post(reverse('change-password'), data=self.invalid_data)
self.assertFormError(response, 'form', 'new_password2', "The two password fields didn't match.")
class TestUserDetailView(TestCase):
def setUp(self):
self.keyboard = KeyboardFactory()
def test_get_anonymous(self):
user2 = UserFactory()
response = self.client.get(reverse('user-detail', kwargs={'pk': user2.id}))
self.assertRedirects(response,
"%s?next=%s" % (reverse('login'), reverse('user-detail', kwargs={'pk': user2.id})))
def test_get_self(self):
self.client.force_login(self.keyboard.user)
response = self.client.get(reverse('user-detail', kwargs={'pk': self.keyboard.user.id}))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user_detail'], self.keyboard.user)
self.assertEqual(len(response.context['keyboards']), 1)
def test_get_someone_else(self):
user2 = UserFactory()
self.client.force_login(self.keyboard.user)
response = self.client.get(reverse('user-detail', kwargs={'pk': user2.id}))
self.assertEqual(response.status_code, 403)
class TestUserDeleteView(TestCase):
def setUp(self):
self.user = UserFactory()
def test_get_delete_anonymous(self):
response = self.client.get(reverse('user-delete', kwargs={'pk': self.user.id}))
self.assertRedirects(response,
"%s?next=%s" % (reverse('login'), reverse('user-delete', kwargs={'pk': self.user.id})))
def test_delete_anonymous(self):
user2 = UserFactory()
response = self.client.delete(reverse('user-delete', kwargs={'pk': user2.id}))
self.assertRedirects(response,
"%s?next=%s" % (reverse('login'), reverse('user-delete', kwargs={'pk': user2.id})))
def test_get_delete_self(self):
self.client.force_login(self.user)
response = self.client.get(reverse('user-delete', kwargs={'pk': self.user.id}))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'accounts/user_check_delete.html')
def test_delete_self(self):
self.client.force_login(self.user)
response = self.client.delete(reverse('user-delete', kwargs={'pk': self.user.id}), follow=True)
self.assertRedirects(response, reverse('new_count'))
self.assertEqual('User account deleted', list(response.context['messages'])[0].message)
def test_get_delete_someone_else(self):
user2 = UserFactory()
self.client.force_login(self.user)
response = self.client.get(reverse('user-delete', kwargs={'pk': user2.id}))
self.assertEqual(response.status_code, 403)
def test_delete_someone_else(self):
user2 = UserFactory()
self.client.force_login(self.user)
response = self.client.delete(reverse('user-delete', kwargs={'pk': user2.id}))
self.assertEqual(response.status_code, 403)
class TestUserUpdateView(TestCase):
def setUp(self):
self.valid_data = {'first_name': 'Jack', 'last_name': 'Example', 'email': 'test@example.org'}
self.extra_data = {'first_name': 'Joe', 'last_name': 'Example', 'email': 'test@example.org',
'username': 'invalid'}
self.invalid_data = {'first_name': 'Joe', 'last_name': 'Example', 'email': '1234'}
def test_get_update_when_anonymous(self):
user = UserFactory()
response = self.client.get(reverse('user-update', kwargs={'pk': user.id}))
self.assertRedirects(response,
"%s?next=%s" % (reverse('login'), reverse('user-update', kwargs={'pk': user.id})))
def test_post_update_when_anonymous(self):
user = UserFactory()
response = self.client.post(reverse('user-update', kwargs={'pk': user.id}), data=self.valid_data)
self.assertRedirects(response,
"%s?next=%s" % (reverse('login'), reverse('user-update', kwargs={'pk': user.id})))
def test_update_self_valid(self):
user = UserFactory()
self.client.force_login(user)
response = self.client.post(reverse('user-update', kwargs={'pk': user.id}), data=self.valid_data,
follow=True)
self.assertRedirects(response, reverse('user-detail', kwargs={'pk': user.id}))
self.assertEqual('User details updated', list(response.context['messages'])[0].message)
updated_user = User.objects.get(username=user.username)
self.assertNotEqual(updated_user.first_name, user.first_name)
self.assertNotEqual(updated_user.last_name, user.last_name)
self.assertNotEqual(updated_user.email, user.email)
def test_update_self_extra(self):
user = UserFactory()
self.client.force_login(user)
response = self.client.post(reverse('user-update', kwargs={'pk': user.id}), data=self.extra_data,
follow=True)
self.assertRedirects(response, reverse('user-detail', kwargs={'pk': user.id}))
self.assertEqual('User details updated', list(response.context['messages'])[0].message)
updated_user = User.objects.get(username=user.username)
self.assertNotEqual(updated_user.first_name, user.first_name)
self.assertNotEqual(updated_user.last_name, user.last_name)
self.assertNotEqual(updated_user.email, user.email)
self.assertEqual(updated_user.username, user.username)
def test_update_self_invalid(self):
user = UserFactory()
self.client.force_login(user)
response = self.client.post(reverse('user-update', kwargs={'pk': user.id}), data=self.invalid_data)
self.assertEqual(response.status_code, 200)
self.assertFormError(response, 'form', 'email', 'Enter a valid email address.')
def test_update_someone_else(self):
user = UserFactory()
user2 = UserFactory()
self.client.force_login(user)
response = self.client.post(reverse('user-update', kwargs={'pk': user2.id}))
self.assertEqual(response.status_code, 403)
class TestPasswordResetView(TestCase):
def setUp(self):
self.factory = RequestFactory()
self.user = UserFactory()
def test_get_form(self):
response = self.client.get(reverse('password-reset'))
self.assertEqual(response.status_code, 200)
self.assertIsInstance(response.context['form'], PasswordResetForm)
self.assertTemplateUsed(response, 'accounts/reset_form.html')
def test_post_valid_email(self):
data = {'email': self.user.email}
response = self.client.post(reverse('password-reset'), data=data, follow=True)
self.assertRedirects(response, reverse('new_count'))
self.assertEqual('Reset email sent', list(response.context['messages'])[0].message)
self.assertEqual(1, len(mail.outbox))
url, path = read_signup_email(mail.outbox[0])
uidb64, token = urlparse(url).path.split('/')[-3:-1]
self.assertEqual(path, reverse('password-reset-confirm', kwargs={'uidb64': uidb64, 'token': token}))
def test_post_invalid_email(self):
data = {'email': 'invalid@example.org'}
response = self.client.post(reverse('password-reset'), data=data, follow=True)
self.assertRedirects(response, reverse('new_count'))
self.assertEqual(0, len(mail.outbox))
@override_settings(RATELIMIT_ENABLE=True)
def test_post_ratelimit(self):
for n in range(0, 5):
self.client.post(reverse('password-reset'), data={'email': self.user.email}, follow=True)
response = self.client.post(reverse('password-reset'), data={'email': self.user.email}, follow=True)
self.assertEqual(list(response.context['messages'])[0].message, 'You have been rate limited')
cache.clear()
class TestPasswordResetConfirmView(TestCase):
def setUp(self):
self.user = UserFactory()
self.valid_uidb64 = urlsafe_base64_encode(force_bytes(self.user.pk))
self.valid_data = {'new_password1': 'newpwd', 'new_password2': 'newpwd'}
self.invalid_data = {'new_password1': 'newpwd', 'new_password2': '1234'}
def _generate_token(self, user):
return default_token_generator.make_token(user)
def test_valid_user_valid(self):
"""valid_user() with valid uidb64"""
self.assertEqual(PasswordResetConfirmView().valid_user(self.valid_uidb64), self.user)
def test_valid_user_invalid(self):
"""valid_user() with invalid uidb64"""
uidb64 = urlsafe_base64_encode(force_bytes(2))
self.assertIsNone(PasswordResetConfirmView().valid_user(uidb64))
def test_valid_token_valid(self):
"""valid_token() with valid user and token"""
self.assertTrue(PasswordResetConfirmView().valid_token(self.user, self._generate_token(self.user)))
def test_valid_token_invalid_token(self):
"""valid_token() with valid user and invalid token"""
token = "AAA-AAAAAAAAAAAAAAAAAAAA"
self.assertFalse(PasswordResetConfirmView().valid_token(self.user, token))
def test_valid_token_invalid_both(self):
"""valid_token() with invalid user and invalid token"""
token = "AAA-AAAAAAAAAAAAAAAAAAAA"
self.assertFalse(PasswordResetConfirmView().valid_token(None, self._generate_token(self.user)))
def test_get_invalid_token(self):
token = "AAA-AAAAAAAAAAAAAAAAAAAA"
response = self.client.get(reverse('password-reset-confirm',
kwargs={'uidb64': self.valid_uidb64,
'token': token}))
self.assertEqual(response.status_code, 200)
self.assertFalse(response.context['validlink'])
self.assertIn("The password reset link was invalid, possibly because it has already been used."
" Please request a new password reset.", response.content.decode("utf-8"))
def test_get_invalid_user(self):
response = self.client.get(reverse('password-reset-confirm',
kwargs={'uidb64': urlsafe_base64_encode(force_bytes(2)),
'token': self._generate_token(self.user)}))
self.assertEqual(response.status_code, 200)
self.assertFalse(response.context['validlink'])
self.assertIn("The password reset link was invalid, possibly because it has already been used."
" Please request a new password reset.", response.content.decode("utf-8"))
def test_post_invalid_token(self):
token = "AAA-AAAAAAAAAAAAAAAAAAAA"
response = self.client.post(reverse('password-reset-confirm',
kwargs={'uidb64': self.valid_uidb64,
'token': token}),
data=self.valid_data)
self.assertEqual(response.status_code, 200)
self.assertFalse(response.context['validlink'])
self.assertIn("The password reset link was invalid, possibly because it has already been used."
" Please request a new password reset.", response.content.decode("utf-8"))
def test_get_valid(self):
token = self._generate_token(self.user)
response = self.client.get(reverse('password-reset-confirm',
kwargs={'uidb64': self.valid_uidb64,
'token': token}))
self.assertEqual(response.status_code, 200)
self.assertIsInstance(response.context['form'], SetPasswordForm)
def test_post_valid(self):
token = self._generate_token(self.user)
response = self.client.post(reverse('password-reset-confirm',
kwargs={'uidb64': self.valid_uidb64,
'token': token}),
data=self.valid_data, follow=True)
self.assertRedirects(response, reverse('new_count'))
self.assertEqual('Password reset successfully', list(response.context['messages'])[0].message)
def test_post_invalid(self):
token = self._generate_token(self.user)
response = self.client.post(reverse('password-reset-confirm',
kwargs={'uidb64': self.valid_uidb64,
'token': token}),
data=self.invalid_data)
self.assertEqual(response.status_code, 200)
self.assertFormError(response, 'form', 'new_password2', "The two password fields didn't match.")
|
|
import logging
import pandas as pd
import numpy as np
import pyprind
import six
from joblib import Parallel, delayed
import py_entitymatching.catalog.catalog_manager as cm
from py_entitymatching.blocker.blocker import Blocker
from py_entitymatching.utils.catalog_helper import log_info, get_name_for_key, add_key_column
from py_entitymatching.utils.generic_helper import rem_nan
from py_entitymatching.utils.validation_helper import validate_object_type
logger = logging.getLogger(__name__)
class AttrEquivalenceBlocker(Blocker):
"""
Blocks based on the equivalence of attribute values.
"""
def block_tables(self, ltable, rtable, l_block_attr, r_block_attr,
l_output_attrs=None, r_output_attrs=None,
l_output_prefix='ltable_', r_output_prefix='rtable_',
allow_missing=False, verbose=False, n_jobs=1):
"""Blocks two tables based on attribute equivalence.
Conceptually, this will check `l_block_attr=r_block_attr` for each tuple
pair from the Cartesian product of tables `ltable` and `rtable`. It outputs a
Pandas dataframe object with tuple pairs that satisfy the equality condition.
The dataframe will include attributes '_id', key attribute from
ltable, key attributes from rtable, followed by lists `l_output_attrs` and
`r_output_attrs` if they are specified. Each of these output and key attributes will be
prefixed with given `l_output_prefix` and `r_output_prefix`. If `allow_missing` is set
to `True` then all tuple pairs with missing value in at least one of the tuples will be
included in the output dataframe.
Further, this will update the following metadata in the catalog for the output table:
(1) key, (2) ltable, (3) rtable, (4) fk_ltable, and (5) fk_rtable.
Args:
ltable (DataFrame): The left input table.
rtable (DataFrame): The right input table.
l_block_attr (string): The blocking attribute in left table.
r_block_attr (string): The blocking attribute in right table.
l_output_attrs (list): A list of attribute names from the left
table to be included in the
output candidate set (defaults to None).
r_output_attrs (list): A list of attribute names from the right
table to be included in the
output candidate set (defaults to None).
l_output_prefix (string): The prefix to be used for the attribute names
coming from the left table in the output
candidate set (defaults to 'ltable\_').
r_output_prefix (string): The prefix to be used for the attribute names
coming from the right table in the output
candidate set (defaults to 'rtable\_').
allow_missing (boolean): A flag to indicate whether tuple pairs
with missing value in at least one of the
blocking attributes should be included in
the output candidate set (defaults to
False). If this flag is set to True, a
tuple in ltable with missing value in the
blocking attribute will be matched with
every tuple in rtable and vice versa.
verbose (boolean): A flag to indicate whether the debug information
should be logged (defaults to False).
n_jobs (int): The number of parallel jobs to be used for computation
(defaults to 1). If -1 all CPUs are used. If 0 or 1,
no parallel computation is used at all, which is useful for
debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are
used (where n_cpus is the total number of CPUs in the
machine). Thus, for n_jobs = -2, all CPUs but one are used.
If (n_cpus + 1 + n_jobs) is less than 1, then no parallel
computation is used (i.e., equivalent to the default).
Returns:
A candidate set of tuple pairs that survived blocking (DataFrame).
Raises:
AssertionError: If `ltable` is not of type pandas
DataFrame.
AssertionError: If `rtable` is not of type pandas
DataFrame.
AssertionError: If `l_block_attr` is not of type string.
AssertionError: If `r_block_attr` is not of type string.
AssertionError: If `l_output_attrs` is not of type of
list.
AssertionError: If `r_output_attrs` is not of type of
list.
AssertionError: If the values in `l_output_attrs` is not of type
string.
AssertionError: If the values in `r_output_attrs` is not of type
string.
AssertionError: If `l_output_prefix` is not of type
string.
AssertionError: If `r_output_prefix` is not of type
string.
AssertionError: If `verbose` is not of type
boolean.
AssertionError: If `allow_missing` is not of type boolean.
AssertionError: If `n_jobs` is not of type
int.
AssertionError: If `l_block_attr` is not in the ltable columns.
AssertionError: If `r_block_attr` is not in the rtable columns.
AssertionError: If `l_out_attrs` are not in the ltable.
AssertionError: If `r_out_attrs` are not in the rtable.
Examples:
>>> import py_entitymatching as em
>>> A = em.read_csv_metadata('path_to_csv_dir/table_A.csv', key='ID')
>>> B = em.read_csv_metadata('path_to_csv_dir/table_B.csv', key='ID')
>>> ab = em.AttrEquivalenceBlocker()
>>> C1 = ab.block_tables(A, B, 'zipcode', 'zipcode', l_output_attrs=['name'], r_output_attrs=['name'])
# Include all possible tuple pairs with missing values
>>> C2 = ab.block_tables(A, B, 'zipcode', 'zipcode', l_output_attrs=['name'], r_output_attrs=['name'], allow_missing=True)
"""
# validate data types of input parameters
self.validate_types_params_tables(ltable, rtable,
l_output_attrs, r_output_attrs,
l_output_prefix,
r_output_prefix, verbose, n_jobs)
# validate data types of input blocking attributes
self.validate_types_block_attrs(l_block_attr, r_block_attr)
# validate data type of allow_missing
self.validate_allow_missing(allow_missing)
# validate input parameters
self.validate_block_attrs(ltable, rtable, l_block_attr, r_block_attr)
self.validate_output_attrs(ltable, rtable, l_output_attrs,
r_output_attrs)
# get and validate required metadata
log_info(logger, 'Required metadata: ltable key, rtable key', verbose)
# # get metadata
l_key, r_key = cm.get_keys_for_ltable_rtable(ltable, rtable, logger,
verbose)
# # validate metadata
cm._validate_metadata_for_table(ltable, l_key, 'ltable', logger,
verbose)
cm._validate_metadata_for_table(rtable, r_key, 'rtable', logger,
verbose)
# do blocking
# # do projection of required attributes from the tables
l_proj_attrs = self.get_attrs_to_project(l_key, l_block_attr,
l_output_attrs)
ltable_proj = ltable[l_proj_attrs]
r_proj_attrs = self.get_attrs_to_project(r_key, r_block_attr,
r_output_attrs)
rtable_proj = rtable[r_proj_attrs]
# # remove records with nans in the blocking attribute
l_df = rem_nan(ltable_proj, l_block_attr)
r_df = rem_nan(rtable_proj, r_block_attr)
# # determine number of processes to launch parallely
n_procs = self.get_num_procs(n_jobs, len(l_df) * len(r_df))
if n_procs <= 1:
# single process
candset = _block_tables_split(l_df, r_df, l_key, r_key,
l_block_attr, r_block_attr,
l_output_attrs, r_output_attrs,
l_output_prefix, r_output_prefix,
allow_missing)
else:
# multiprocessing
m, n = self.get_split_params(n_procs, len(l_df), len(r_df))
l_splits = np.array_split(l_df, m)
r_splits = np.array_split(r_df, n)
c_splits = Parallel(n_jobs=m * n)(
delayed(_block_tables_split)(l, r, l_key, r_key,
l_block_attr, r_block_attr,
l_output_attrs, r_output_attrs,
l_output_prefix, r_output_prefix,
allow_missing)
for l in l_splits for r in r_splits)
candset = pd.concat(c_splits, ignore_index=True)
# if allow_missing flag is True, then compute
# all pairs with missing value in left table, and
# all pairs with missing value in right table
if allow_missing:
missing_pairs = self.get_pairs_with_missing_value(ltable_proj,
rtable_proj,
l_key, r_key,
l_block_attr,
r_block_attr,
l_output_attrs,
r_output_attrs,
l_output_prefix,
r_output_prefix)
candset = pd.concat([candset, missing_pairs], ignore_index=True)
# update catalog
key = get_name_for_key(candset.columns)
candset = add_key_column(candset, key)
cm.set_candset_properties(candset, key, l_output_prefix + l_key,
r_output_prefix + r_key, ltable, rtable)
# return candidate set
return candset
def block_candset(self, candset, l_block_attr, r_block_attr,
allow_missing=False, verbose=False, show_progress=True,
n_jobs=1):
"""Blocks an input candidate set of tuple pairs based on attribute equivalence.
Finds tuple pairs from an input candidate set of tuple pairs
such that the value of attribute l_block_attr of the left tuple in a
tuple pair exactly matches the value of attribute r_block_attr of the
right tuple in the tuple pair.
Args:
candset (DataFrame): The input candidate set of tuple pairs.
l_block_attr (string): The blocking attribute in left table.
r_block_attr (string): The blocking attribute in right table.
allow_missing (boolean): A flag to indicate whether tuple pairs
with missing value in at least one of the
blocking attributes should be included in
the output candidate set (defaults to
False). If this flag is set to True, a
tuple pair with missing value in either
blocking attribute will be retained in the
output candidate set.
verbose (boolean): A flag to indicate whether the debug
information should be logged (defaults to False).
show_progress (boolean): A flag to indicate whether progress should
be displayed to the user (defaults to True).
n_jobs (int): The number of parallel jobs to be used for computation
(defaults to 1). If -1 all CPUs are used. If 0 or 1,
no parallel computation is used at all, which is useful for
debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are
used (where n_cpus is the total number of CPUs in the
machine). Thus, for n_jobs = -2, all CPUs but one are used.
If (n_cpus + 1 + n_jobs) is less than 1, then no parallel
computation is used (i.e., equivalent to the default).
Returns:
A candidate set of tuple pairs that survived blocking (DataFrame).
Raises:
AssertionError: If `candset` is not of type pandas
DataFrame.
AssertionError: If `l_block_attr` is not of type string.
AssertionError: If `r_block_attr` is not of type string.
AssertionError: If `verbose` is not of type
boolean.
AssertionError: If `n_jobs` is not of type
int.
AssertionError: If `l_block_attr` is not in the ltable columns.
AssertionError: If `r_block_attr` is not in the rtable columns.
Examples:
>>> import py_entitymatching as em
>>> A = em.read_csv_metadata('path_to_csv_dir/table_A.csv', key='ID')
>>> B = em.read_csv_metadata('path_to_csv_dir/table_B.csv', key='ID')
>>> ab = em.AttrEquivalenceBlocker()
>>> C = ab.block_tables(A, B, 'zipcode', 'zipcode', l_output_attrs=['name'], r_output_attrs=['name'])
>>> D1 = ab.block_candset(C, 'age', 'age', allow_missing=True)
# Include all possible tuple pairs with missing values
>>> D2 = ab.block_candset(C, 'age', 'age', allow_missing=True)
# Execute blocking using multiple cores
>>> D3 = ab.block_candset(C, 'age', 'age', n_jobs=-1)
"""
# validate data types of input parameters
self.validate_types_params_candset(candset, verbose, show_progress,
n_jobs)
# validate data types of input blocking attributes
self.validate_types_block_attrs(l_block_attr, r_block_attr)
# get and validate metadata
log_info(logger, 'Required metadata: cand.set key, fk ltable, '
'fk rtable, ltable, rtable, ltable key, rtable key',
verbose)
# # get metadata
key, fk_ltable, fk_rtable, ltable, rtable, l_key, r_key = cm.get_metadata_for_candset(
candset, logger, verbose)
# # validate metadata
cm._validate_metadata_for_candset(candset, key, fk_ltable, fk_rtable,
ltable, rtable, l_key, r_key,
logger, verbose)
# validate input parameters
self.validate_block_attrs(ltable, rtable, l_block_attr, r_block_attr)
# do blocking
# # do projection before merge
l_df = ltable[[l_key, l_block_attr]]
r_df = rtable[[r_key, r_block_attr]]
# # set index for convenience
l_df = l_df.set_index(l_key, drop=False)
r_df = r_df.set_index(r_key, drop=False)
# # determine number of processes to launch parallely
n_procs = self.get_num_procs(n_jobs, len(candset))
valid = []
if n_procs <= 1:
# single process
valid = _block_candset_split(candset, l_df, r_df, l_key, r_key,
l_block_attr, r_block_attr, fk_ltable,
fk_rtable, allow_missing, show_progress)
else:
c_splits = np.array_split(candset, n_procs)
valid_splits = Parallel(n_jobs=n_procs)(
delayed(_block_candset_split)(c_splits[i],
l_df, r_df,
l_key, r_key,
l_block_attr, r_block_attr,
fk_ltable, fk_rtable, allow_missing,
show_progress and i == len(
c_splits) - 1)
for i in range(len(c_splits)))
valid = sum(valid_splits, [])
# construct output table
if len(candset) > 0:
out_table = candset[valid]
else:
out_table = pd.DataFrame(columns=candset.columns)
# update the catalog
cm.set_candset_properties(out_table, key, fk_ltable, fk_rtable,
ltable, rtable)
# return the output table
return out_table
def block_tuples(self, ltuple, rtuple, l_block_attr, r_block_attr,
allow_missing=False):
"""Blocks a tuple pair based on attribute equivalence.
Args:
ltuple (Series): The input left tuple.
rtuple (Series): The input right tuple.
l_block_attr (string): The blocking attribute in left tuple.
r_block_attr (string): The blocking attribute in right tuple.
allow_missing (boolean): A flag to indicate whether a tuple pair
with missing value in at least one of the
blocking attributes should be blocked
(defaults to False). If this flag is set
to True, the pair will be kept if either
ltuple has missing value in l_block_attr
or rtuple has missing value in r_block_attr
or both.
Returns:
A status indicating if the tuple pair is blocked, i.e., the values
of l_block_attr in ltuple and r_block_attr in rtuple are different
(boolean).
Examples:
>>> import py_entitymatching as em
>>> A = em.read_csv_metadata('path_to_csv_dir/table_A.csv', key='ID')
>>> B = em.read_csv_metadata('path_to_csv_dir/table_B.csv', key='ID')
>>> ab = em.AttrEquivalenceBlocker()
>>> status = ab.block_tuples(A.loc[0], B.loc[0], 'zipcode', 'zipcode')
"""
l_val, r_val = ltuple[l_block_attr], rtuple[r_block_attr]
if allow_missing:
if pd.isnull(l_val) or pd.isnull(r_val) or l_val == r_val:
return False
else:
return True
else:
if pd.notnull(l_val) and pd.notnull(r_val) and l_val == r_val:
return False
else:
return True
# ------------------------------------------------------------
# utility functions specific to attribute equivalence blocking
# validate the data types of the blocking attributes
def validate_types_block_attrs(self, l_block_attr, r_block_attr):
validate_object_type(l_block_attr, six.string_types, error_prefix='Blocking attribute name of left table')
validate_object_type(r_block_attr, six.string_types, error_prefix='Blocking attribute name of right table')
# validate the blocking attributes
def validate_block_attrs(self, ltable, rtable, l_block_attr, r_block_attr):
if l_block_attr not in ltable.columns:
raise AssertionError(
'Left block attribute is not in the left table')
if r_block_attr not in rtable.columns:
raise AssertionError(
'Right block attribute is not in the right table')
def get_pairs_with_missing_value(self, l_df, r_df, l_key, r_key,
l_block_attr, r_block_attr,
l_output_attrs, r_output_attrs,
l_output_prefix, r_output_prefix):
l_df.is_copy, r_df.is_copy = False, False # to avoid setwithcopy warning
l_df['ones'] = np.ones(len(l_df))
r_df['ones'] = np.ones(len(r_df))
# find ltable records with missing value in l_block_attr
l_df_missing = l_df[pd.isnull(l_df[l_block_attr])]
# find ltable records with no missing value in l_block_attr
l_df_no_missing = l_df[pd.notnull(l_df[l_block_attr])]
# find rtable records with missing value in r_block_attr
r_df_missing = r_df[pd.isnull(r_df[r_block_attr])]
missing_pairs_1 = pd.merge(l_df_missing, r_df, left_on='ones',
right_on='ones',
suffixes=('_ltable', '_rtable'))
missing_pairs_2 = pd.merge(l_df_no_missing, r_df_missing,
left_on='ones',
right_on='ones',
suffixes=('_ltable', '_rtable'))
missing_pairs = pd.concat([missing_pairs_1, missing_pairs_2],
ignore_index=True)
retain_cols, final_cols = _output_columns(l_key, r_key,
list(missing_pairs.columns),
l_output_attrs,
r_output_attrs,
l_output_prefix,
r_output_prefix)
missing_pairs = missing_pairs[retain_cols]
missing_pairs.columns = final_cols
return missing_pairs
def _block_tables_split(l_df, r_df, l_key, r_key, l_block_attr, r_block_attr,
l_output_attrs, r_output_attrs, l_output_prefix,
r_output_prefix, allow_missing):
# perform an inner join of the two data frames with no missing values
candset = pd.merge(l_df, r_df, left_on=l_block_attr,
right_on=r_block_attr, suffixes=('_ltable', '_rtable'))
retain_cols, final_cols = _output_columns(l_key, r_key,
list(candset.columns),
l_output_attrs, r_output_attrs,
l_output_prefix, r_output_prefix)
candset = candset[retain_cols]
candset.columns = final_cols
return candset
def _block_candset_split(c_df, l_df, r_df, l_key, r_key,
l_block_attr, r_block_attr, fk_ltable, fk_rtable,
allow_missing, show_progress):
# initialize progress bar
if show_progress:
prog_bar = pyprind.ProgBar(len(c_df))
# initialize list to keep track of valid ids
valid = []
# get the indexes for the key attributes in the candset
col_names = list(c_df.columns)
lkey_idx = col_names.index(fk_ltable)
rkey_idx = col_names.index(fk_rtable)
# create a look up table for the blocking attribute values
l_dict = {}
r_dict = {}
# iterate the rows in candset
for row in c_df.itertuples(index=False):
# # update the progress bar
if show_progress:
prog_bar.update()
# # get the value of block attributes
row_lkey = row[lkey_idx]
if row_lkey not in l_dict:
l_dict[row_lkey] = l_df.loc[row_lkey, l_block_attr]
l_val = l_dict[row_lkey]
row_rkey = row[rkey_idx]
if row_rkey not in r_dict:
r_dict[row_rkey] = r_df.loc[row_rkey, r_block_attr]
r_val = r_dict[row_rkey]
if allow_missing:
if pd.isnull(l_val) or pd.isnull(r_val) or l_val == r_val:
valid.append(True)
else:
valid.append(False)
else:
if pd.notnull(l_val) and pd.notnull(r_val) and l_val == r_val:
valid.append(True)
else:
valid.append(False)
return valid
def _output_columns(l_key, r_key, col_names, l_output_attrs, r_output_attrs,
l_output_prefix, r_output_prefix):
# retain id columns from merge
ret_cols = [_retain_names(l_key, col_names, '_ltable')]
ret_cols.append(_retain_names(r_key, col_names, '_rtable'))
# final columns in the output
fin_cols = [_final_names(l_key, l_output_prefix)]
fin_cols.append(_final_names(r_key, r_output_prefix))
# retain output attrs from merge
if l_output_attrs:
for at in l_output_attrs:
if at != l_key:
ret_cols.append(_retain_names(at, col_names, '_ltable'))
fin_cols.append(_final_names(at, l_output_prefix))
if r_output_attrs:
for at in r_output_attrs:
if at != r_key:
ret_cols.append(_retain_names(at, col_names, '_rtable'))
fin_cols.append(_final_names(at, r_output_prefix))
return ret_cols, fin_cols
def _retain_names(x, col_names, suffix):
if x in col_names:
return x
else:
return str(x) + suffix
def _final_names(col, prefix):
return prefix + str(col)
|
|
#!/usr/bin/env python
# $Id: NameMapper.py,v 1.2 2006-12-19 13:32:47 skyostil Exp $
"""This module supports Cheetah's optional NameMapper syntax.
Overview
================================================================================
NameMapper provides a simple syntax for accessing Python data structures,
functions, and methods from Cheetah. It's called NameMapper because it 'maps'
simple 'names' in Cheetah templates to possibly more complex syntax in Python.
Its purpose is to make working with Cheetah easy for non-programmers.
Specifically, non-programmers using Cheetah should NOT need to be taught (a)
what the difference is between an object and a dictionary, (b) what functions
and methods are, and (c) what 'self' is. A further aim (d) is to buffer the
code in Cheetah templates from changes in the implementation of the Python data
structures behind them.
Consider this scenario:
You are building a customer information system. The designers with you want to
use information from your system on the client's website --AND-- they want to
understand the display code and so they can maintian it themselves.
You write a UI class with a 'customers' method that returns a dictionary of all
the customer objects. Each customer object has an 'address' method that returns
the a dictionary with information about the customer's address. The designers
want to be able to access that information.
Using PSP, the display code for the website would look something like the
following, assuming your servlet subclasses the class you created for managing
customer information:
<%= self.customer()[ID].address()['city'] %> (42 chars)
Using Cheetah's NameMapper syntax it could be any of the following:
$self.customers()[$ID].address()['city'] (39 chars)
--OR--
$customers()[$ID].address()['city']
--OR--
$customers()[$ID].address().city
--OR--
$customers()[$ID].address.city
--OR--
$customers()[$ID].address.city
--OR--
$customers[$ID].address.city (27 chars)
Which of these would you prefer to explain to the designers, who have no
programming experience? The last form is 15 characters shorter than the PSP
and, conceptually, is far more accessible. With PHP or ASP, the code would be
even messier than the PSP
This is a rather extreme example and, of course, you could also just implement
'$getCustomer($ID).city' and obey the Law of Demeter (search Google for more on that).
But good object orientated design isn't the point here.
Details
================================================================================
The parenthesized letters below correspond to the aims in the second paragraph.
DICTIONARY ACCESS (a)
---------------------
NameMapper allows access to items in a dictionary using the same dotted notation
used to access object attributes in Python. This aspect of NameMapper is known
as 'Unified Dotted Notation'.
For example, with Cheetah it is possible to write:
$customers()['kerr'].address() --OR-- $customers().kerr.address()
where the second form is in NameMapper syntax.
This only works with dictionary keys that are also valid python identifiers:
regex = '[a-zA-Z_][a-zA-Z_0-9]*'
AUTOCALLING (b,d)
-----------------
NameMapper automatically detects functions and methods in Cheetah $vars and calls
them if the parentheses have been left off.
For example if 'a' is an object, 'b' is a method
$a.b
is equivalent to
$a.b()
If b returns a dictionary, then following variations are possible
$a.b.c --OR-- $a.b().c --OR-- $a.b()['c']
where 'c' is a key in the dictionary that a.b() returns.
Further notes:
* NameMapper autocalls the function or method without any arguments. Thus
autocalling can only be used with functions or methods that either have no
arguments or have default values for all arguments.
* NameMapper only autocalls functions and methods. Classes and callable object instances
will not be autocalled.
* Autocalling can be disabled using Cheetah's 'useAutocalling' setting.
LEAVING OUT 'self' (c,d)
------------------------
NameMapper makes it possible to access the attributes of a servlet in Cheetah
without needing to include 'self' in the variable names. See the NAMESPACE
CASCADING section below for details.
NAMESPACE CASCADING (d)
--------------------
...
Implementation details
================================================================================
* NameMapper's search order is dictionary keys then object attributes
* NameMapper.NotFound is raised if a value can't be found for a name.
Performance and the C version
================================================================================
Cheetah comes with both a C version and a Python version of NameMapper. The C
version is significantly faster and the exception tracebacks are much easier to
read. It's still slower than standard Python syntax, but you won't notice the
difference in realistic usage scenarios.
Cheetah uses the optimized C version (_namemapper.c) if it has
been compiled or falls back to the Python version if not.
Meta-Data
================================================================================
Authors: Tavis Rudd <tavis@damnsimple.com>,
Chuck Esterbrook <echuck@mindspring.com>
Version: $Revision: 1.2 $
Start Date: 2001/04/03
Last Revision Date: $Date: 2006-12-19 13:32:47 $
"""
from __future__ import generators
__author__ = "Tavis Rudd <tavis@damnsimple.com>," +\
"\nChuck Esterbrook <echuck@mindspring.com>"
__revision__ = "$Revision: 1.2 $"[11:-2]
import types
from types import StringType, InstanceType, ClassType, TypeType
from pprint import pformat
import inspect
_INCLUDE_NAMESPACE_REPR_IN_NOTFOUND_EXCEPTIONS = False
_ALLOW_WRAPPING_OF_NOTFOUND_EXCEPTIONS = True
__all__ = ['NotFound',
'hasKey',
'valueForKey',
'valueForName',
'valueFromSearchList',
'valueFromFrameOrSearchList',
'valueFromFrame',
]
## N.B. An attempt is made at the end of this module to import C versions of
## these functions. If _namemapper.c has been compiled succesfully and the
## import goes smoothly, the Python versions defined here will be replaced with
## the C versions.
class NotFound(LookupError):
pass
def _raiseNotFoundException(key, namespace):
excString = "cannot find '%s'"%key
if _INCLUDE_NAMESPACE_REPR_IN_NOTFOUND_EXCEPTIONS:
excString += ' in the namespace %s'%pformat(namespace)
raise NotFound(excString)
def _wrapNotFoundException(exc, fullName, namespace):
if not _ALLOW_WRAPPING_OF_NOTFOUND_EXCEPTIONS:
raise
else:
excStr = exc.args[0]
if excStr.find('while searching')==-1: # only wrap once!
excStr +=" while searching for '%s'"%fullName
if _INCLUDE_NAMESPACE_REPR_IN_NOTFOUND_EXCEPTIONS:
excString += ' in the namespace %s'%pformat(namespace)
exc.args = (excStr,)
raise
def hasKey(obj, key):
"""Determine if 'obj' has 'key' """
if hasattr(obj,'has_key') and obj.has_key(key):
return True
elif hasattr(obj, key):
return True
else:
return False
def valueForKey(obj, key):
if hasattr(obj, 'has_key') and obj.has_key(key):
return obj[key]
elif hasattr(obj, key):
return getattr(obj, key)
else:
_raiseNotFoundException(key, obj)
def _valueForName(obj, name, executeCallables=False):
nameChunks=name.split('.')
for i in range(len(nameChunks)):
key = nameChunks[i]
# 19.12.2006 skyostil: Check the object's attributes before the contents, since otherwise
# for e.g. dictionaries the built-in values() method will be overwritten by a dictionary key called 'values'.
if hasattr(obj, key):
nextObj = getattr(obj, key)
elif hasattr(obj, 'has_key') and obj.has_key(key):
nextObj = obj[key]
else:
_raiseNotFoundException(key, obj)
if (executeCallables and callable(nextObj)
and (type(nextObj) not in (InstanceType, ClassType))):
obj = nextObj()
else:
obj = nextObj
return obj
def valueForName(obj, name, executeCallables=False):
try:
return _valueForName(obj, name, executeCallables)
except NotFound, e:
_wrapNotFoundException(e, fullName=name, namespace=obj)
def valueFromSearchList(searchList, name, executeCallables=False):
key = name.split('.')[0]
for namespace in searchList:
if hasKey(namespace, key):
return _valueForName(namespace, name,
executeCallables=executeCallables)
_raiseNotFoundException(key, searchList)
def _namespaces(callerFrame, searchList=None):
yield callerFrame.f_locals
if searchList:
for namespace in searchList:
yield namespace
yield callerFrame.f_globals
yield __builtins__
def valueFromFrameOrSearchList(searchList, name, executeCallables=False,
frame=None):
def __valueForName():
try:
return _valueForName(namespace, name, executeCallables=executeCallables)
except NotFound, e:
_wrapNotFoundException(e, fullName=name, namespace=searchList)
try:
if not frame:
frame = inspect.stack()[1][0]
key = name.split('.')[0]
for namespace in _namespaces(frame, searchList):
if hasKey(namespace, key): return __valueForName()
_raiseNotFoundException(key, searchList)
finally:
del frame
def valueFromFrame(name, executeCallables=False, frame=None):
# @@TR consider implementing the C version the same way
# at the moment it provides a seperate but mirror implementation
# to valueFromFrameOrSearchList
try:
if not frame:
frame = inspect.stack()[1][0]
return valueFromFrameOrSearchList(searchList=None,
name=name,
executeCallables=executeCallables,
frame=frame)
finally:
del frame
def hasName(obj, name):
#Not in the C version
"""Determine if 'obj' has the 'name' """
key = name.split('.')[0]
if not hasKey(obj, key):
return False
try:
valueForName(obj, name)
return True
except NotFound:
return False
try:
from _namemapper import NotFound, valueForKey, valueForName, \
valueFromSearchList, valueFromFrameOrSearchList, valueFromFrame
# it is possible with Jython or Windows, for example, that _namemapper.c hasn't been compiled
C_VERSION = True
except:
C_VERSION = False
##################################################
## CLASSES
class Mixin:
"""@@ document me"""
def valueForName(self, name):
return valueForName(self, name)
def valueForKey(self, key):
return valueForKey(self, key)
##################################################
## if run from the command line ##
def example():
class A(Mixin):
classVar = 'classVar val'
def method(self,arg='method 1 default arg'):
return arg
def method2(self, arg='meth 2 default arg'):
return {'item1':arg}
def method3(self, arg='meth 3 default'):
return arg
class B(A):
classBvar = 'classBvar val'
a = A()
a.one = 'valueForOne'
def function(whichOne='default'):
values = {
'default': 'default output',
'one': 'output option one',
'two': 'output option two'
}
return values[whichOne]
a.dic = {
'func': function,
'method': a.method3,
'item': 'itemval',
'subDict': {'nestedMethod':a.method3}
}
b = 'this is local b'
print valueForKey(a.dic,'subDict')
print valueForName(a, 'dic.item')
print valueForName(vars(), 'b')
print valueForName(__builtins__, 'dir')()
print valueForName(vars(), 'a.classVar')
print valueForName(vars(), 'a.dic.func', executeCallables=True)
print valueForName(vars(), 'a.method2.item1', executeCallables=True)
if __name__ == '__main__':
example()
|
|
""" Base Module for datasets to specify the interface for these """
import yaml
import os
import logging
import logging.handlers
import warnings
import socket
import cPickle
from pySPACE.run.scripts import md_creator
# import bz2
from collections import defaultdict
class UnknownDatasetTypeException(Exception):
""" Wrapper around error, when dataset type is not available """
pass
class BaseDataset(object):
""" Base class for datasets
This class (BaseDataset) acts as base class for all dataset classes
and specifies the interface for these. Furthermore it provides a factory
method *load* for all types of datasets. It expects a path to the datasets
storage directory.
The following methods must be implemented:
:__init__: The constructor must take an argument *dataset_md* that
is a dictionary containing meta data for the dataset
to be loaded.
:store: A method that stores a dataset in a certain directory.
*store* and *__init__* should be written so that *__init__* can
correctly recreate every dataset stored with *store*
:add_sample: (*optional*) Adds a new sample to the dataset.
BaseDataset provides a default implementation.
Datasets store the data in the attribute *self.data*.
This data is stored as a dictionary that maps (run, split, train/test)
tuples to the actual data obtained in this split in this run for
training/testing.
"""
def __init__(self, dataset_md=None):
# The data structure containing the actual data
# The data is stored as a dictionary that maps
# (run, split, train/test) tuples to the actual
# data obtained in this split in this run for
# training/testing.
self.data = defaultdict(list)
# A dictionary containing some meta data for the respective
# dataset type.
self.meta_data = {"train_test": False, # defaults
"splits": 1,
"runs": 1}
if not dataset_md is None:
self.update_meta_data(dataset_md)
@classmethod
def load(cls, dataset_dir):
""" Loads the dataset stored in directory *rel_dataset_dir*
This method loads the dataset stored in the directory
*rel_dataset_dir* . Depending on the type stored in the datasets
meta-data file, the method creates an instance of a specific
dataset class.
The method expects the following parameters:
* *dataset_dir* : The (absolute) directory in which the dataset \
that will be loaded is located
"""
# Loading the dataset meta data
meta_data = cls.load_meta_data(dataset_dir)
# Set the directory where this dataset is located
meta_data["dataset_directory"] = dataset_dir
# Mapping for Backward Compatibility
if meta_data["type"].lower() == "raw_eeg":
meta_data["type"] = "STREAM"
meta_data["storage_format"] = "bp_eeg"
# construct dataset module and class name dependent on the type
# for backward compatibility type is casted to lower-case
data_mod_name = meta_data["type"].lower()
data_class_name = ''.join([x.title()
for x in meta_data["type"].split('_')])
data_class_name += "Dataset"
# dynamic class import: from data_mod_name import col_class_name
try:
dataset_module = __import__(
'pySPACE.resources.dataset_defs.%s' % data_mod_name,
fromlist=[data_class_name])
except ImportError:
msg = "Dataset type %s in %s is unknown" % \
(meta_data["type"], meta_data["dataset_directory"])
raise UnknownDatasetTypeException(msg)
dataset_class = getattr(dataset_module, data_class_name)
# delegate to subclass
return dataset_class(dataset_md=meta_data,
dataset_dir=dataset_dir)
@staticmethod
def load_meta_data(dataset_dir, file_name="metadata.yaml"):
""" Load the meta data of the dataset """
try:
file_path = os.sep.join([dataset_dir, file_name])
meta_file = open(file_path, 'r')
except IOError:
pass
else:
meta_data = yaml.load(meta_file)
if "ignored_columns" in meta_data:
meta_data["ignored_columns"] = \
md_creator.parse_list(meta_data["ignored_columns"])
if meta_data.has_key("ignored_rows"):
meta_data["ignored_rows"] = \
md_creator.parse_list(meta_data["ignored_rows"])
meta_file.close()
if "input_collection_name" in meta_data:
warnings.warn(
"'input_collection_name' needs to be renamed to 'input_dataset_name'!")
meta_data["input_dataset_name"] = meta_data.pop("input_collection_name")
return meta_data
# Error handling and backward compatibility
try:
file_path = os.sep.join([dataset_dir, "collection.yaml"])
meta_file = open(file_path, 'r')
meta_data = yaml.load(meta_file)
if meta_data.has_key("ignored_columns"):
meta_data["ignored_columns"] = \
md_creator.parse_list(meta_data["ignored_columns"])
if meta_data.has_key("ignored_rows"):
meta_data["ignored_rows"] = \
md_creator.parse_list(meta_data["ignored_rows"])
meta_file.close()
warnings.warn(
"'collection.yaml' needs to be renamed to 'metadata.yaml'!")
if "input_collection_name" in meta_data:
warnings.warn(
"'input_collection_name' needs to be renamed to 'input_dataset_name'!")
meta_data["input_dataset_name"] = meta_data.pop("input_collection_name")
return meta_data
except IOError, e:
warnings.warn("IOError occurred: %s." % e)
# check if we have a feature vector dataset with missing metadata.yaml
csv_file = None
for dirpath, dirnames, files in os.walk(dataset_dir):
for file in files:
if file.endswith(".csv") or file.endswith(".arff"):
csv_file = file
break
if csv_file:
break
if csv_file:
warnings.warn(
"If you want to use csv-files, you have to " +
"generate a %s! The pySPACE documentation " % file_name +
"tells you what you have to specify. You can also use " +
":script:`pySPACE.run.scripts.md_creator.py`. " +
"We will try this in the following...")
print("Found '%s' at '%s'!" % (csv_file, dirpath))
if not dirpath == dataset_dir:
print("Maybe you specified the wrong input_path?")
md_file = dirpath + os.sep + file_name
if not os.path.isfile(md_file):
md_creator.main(md_file)
collection_meta_file = open(md_file)
meta_data = yaml.load(collection_meta_file)
collection_meta_file.close()
return meta_data
raise Exception("No pySPACE dataset '%s' found. " % dataset_dir +
"You have to specify a %s in each " % file_name +
"dataset directory. Have a look at the pySPACE "
"documentation. Continuing...")
@staticmethod
def store_meta_data(dataset_dir, meta_data, file_name="metadata.yaml"):
""" Stores the meta data of a dataset """
# Loading the dataset meta file
try:
with open(os.sep.join([dataset_dir, file_name]), 'w') as collection_meta_file:
yaml.dump(meta_data, collection_meta_file)
except IOError:
raise Exception("No pySPACE dataset %s found. Continuing..." % dataset_dir)
def add_sample(self, sample, label, train, split=0, run=0):
""" Add a sample to this dataset
Adds the sample *sample* along with its class label *label*
to this dataset.
The method expects the following parameters:
* *sample* : The respective data sample
* *label* : The label of the data sample
* *train* : If *train*, this sample has already been used for training
* *split* : The number of the split this sample belongs to. \
Defaults to 0.
* *run*: The run number this sample belongs to Defaults to 0
"""
if train == "test":
train = False
if train:
self.meta_data["train_test"] = True
if split + 1 > self.meta_data["splits"]:
self.meta_data["splits"] = split + 1
key = (run, split, "train" if train else "test")
if isinstance(self.data[key], basestring):
self.data[key] = []
self.data[key].append((sample, label))
def update_meta_data(self, meta_data):
""" Updates the internal meta_data dictionary with *meta_data* """
self.meta_data.update(meta_data)
def get_run_numbers(self):
""" Return the number of the runs contained in this dataset """
runs = set(run for run, split, train_test in self.data.keys())
return list(runs)
def get_split_numbers(self, current_run=0):
""" Return the number of the splits
Returns the number of splits contained in this dataset
for the given run number *current_number* """
splits = set(split for run, split, train_test in self.data.keys()
if run == current_run)
return list(splits)
def dump(self, result_path, name):
""" Dumps this dataset into a file.
Dumps (i.e. pickle) this dataset object into a bz2 compressed file.
In contrast to *store* this method stores the whole dataset
in a file. No meta data are stored in a YAML file etc.
The method expects the following parameters:
* *result_path* The path to the directory in which the pickle \
file will be written.
* *name* The name of the pickle file
"""
result_file = open(os.path.join(result_path,
name + ".pickle"), "wb")
# result_file.write(bz2.compress(cPickle.dumps(self, protocol=2)))
result_file.write(cPickle.dumps(self, protocol=2))
result_file.close()
def store(self, result_dir, s_format=None):
""" Stores this dataset in the directory *result_dir*.
In contrast to *dump* this method stores the dataset
not in a single file but as a whole directory structure with meta
information etc. The data sets are stored separately for each run,
split, train/test combination.
The method expects the following parameters:
* *result_dir* The directory in which the dataset will be stored
* *s_format* The format in which the actual data sets should be stored.
.. note:: Needs to be overwritten by the subclasses!
"""
raise NotImplementedError()
def _log(self, message, level=logging.INFO):
""" Logs the given message with the given logging level """
root_logger = logging.getLogger("%s.%s.%s" % (socket.gethostname(),
os.getpid(),
self))
if len(root_logger.handlers) == 0:
root_logger.addHandler(logging.handlers.SocketHandler('localhost',
logging.handlers.DEFAULT_TCP_LOGGING_PORT))
root_logger.log(level, message)
def __del__(self):
""" Remove logging handler """
root_logger = logging.getLogger("%s.%s.%s" % (socket.gethostname(),
os.getpid(),
self))
for handler in root_logger.handlers:
handler.close()
root_logger.removeHandler(handler)
del(root_logger)
def __repr__(self):
""" Return a string representation of this class"""
return self.__class__.__name__
|
|
"""
CMCCAT2000 Chromatic Adaptation Model
=====================================
Defines the *CMCCAT2000* chromatic adaptation model objects:
- :class:`colour.adaptation.InductionFactors_CMCCAT2000`
- :class:`colour.VIEWING_CONDITIONS_CMCCAT2000`
- :func:`colour.adaptation.chromatic_adaptation_forward_CMCCAT2000`
- :func:`colour.adaptation.chromatic_adaptation_inverse_CMCCAT2000`
- :func:`colour.adaptation.chromatic_adaptation_CMCCAT2000`
References
----------
- :cite:`Li2002a` : Li, C., Luo, M. R., Rigg, B., & Hunt, R. W. G. (2002).
CMC 2000 chromatic adaptation transform: CMCCAT2000. Color Research &
Application, 27(1), 49-58. doi:10.1002/col.10005
- :cite:`Westland2012k` : Westland, S., Ripamonti, C., & Cheung, V. (2012).
CMCCAT2000. In Computational Colour Science Using MATLAB (2nd ed., pp.
83-86). ISBN:978-0-470-66569-5
"""
from __future__ import annotations
import numpy as np
from typing import NamedTuple
from colour.adaptation import CAT_CMCCAT2000
from colour.algebra import vector_dot
from colour.hints import (
ArrayLike,
Floating,
FloatingOrArrayLike,
Literal,
NDArray,
Union,
)
from colour.utilities import (
CaseInsensitiveMapping,
as_float_array,
from_range_100,
to_domain_100,
validate_method,
)
__author__ = "Colour Developers"
__copyright__ = "Copyright 2013 Colour Developers"
__license__ = "New BSD License - https://opensource.org/licenses/BSD-3-Clause"
__maintainer__ = "Colour Developers"
__email__ = "colour-developers@colour-science.org"
__status__ = "Production"
__all__ = [
"CAT_INVERSE_CMCCAT2000",
"InductionFactors_CMCCAT2000",
"VIEWING_CONDITIONS_CMCCAT2000",
"chromatic_adaptation_forward_CMCCAT2000",
"chromatic_adaptation_inverse_CMCCAT2000",
"chromatic_adaptation_CMCCAT2000",
]
CAT_INVERSE_CMCCAT2000: NDArray = np.linalg.inv(CAT_CMCCAT2000)
"""
Inverse *CMCCAT2000* chromatic adaptation transform.
CAT_INVERSE_CMCCAT2000
"""
class InductionFactors_CMCCAT2000(NamedTuple):
"""
*CMCCAT2000* chromatic adaptation model induction factors.
Parameters
----------
F
:math:`F` surround condition.
References
----------
:cite:`Li2002a`, :cite:`Westland2012k`
"""
F: Floating
VIEWING_CONDITIONS_CMCCAT2000: CaseInsensitiveMapping = CaseInsensitiveMapping(
{
"Average": InductionFactors_CMCCAT2000(1),
"Dim": InductionFactors_CMCCAT2000(0.8),
"Dark": InductionFactors_CMCCAT2000(0.8),
}
)
VIEWING_CONDITIONS_CMCCAT2000.__doc__ = """
Reference *CMCCAT2000* chromatic adaptation model viewing conditions.
References
----------
:cite:`Li2002a`, :cite:`Westland2012k`
"""
def chromatic_adaptation_forward_CMCCAT2000(
XYZ: ArrayLike,
XYZ_w: ArrayLike,
XYZ_wr: ArrayLike,
L_A1: FloatingOrArrayLike,
L_A2: FloatingOrArrayLike,
surround: InductionFactors_CMCCAT2000 = VIEWING_CONDITIONS_CMCCAT2000[
"Average"
],
) -> NDArray:
"""
Adapt given stimulus *CIE XYZ* tristimulus values from test viewing
conditions to reference viewing conditions using *CMCCAT2000* forward
chromatic adaptation model.
Parameters
----------
XYZ
*CIE XYZ* tristimulus values of the stimulus to adapt.
XYZ_w
Test viewing condition *CIE XYZ* tristimulus values of the whitepoint.
XYZ_wr
Reference viewing condition *CIE XYZ* tristimulus values of the
whitepoint.
L_A1
Luminance of test adapting field :math:`L_{A1}` in :math:`cd/m^2`.
L_A2
Luminance of reference adapting field :math:`L_{A2}` in :math:`cd/m^2`.
surround
Surround viewing conditions induction factors.
Returns
-------
:class:`numpy.ndarray`
*CIE XYZ_c* tristimulus values of the stimulus corresponding colour.
Notes
-----
+------------+-----------------------+---------------+
| **Domain** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``XYZ`` | [0, 100] | [0, 1] |
+------------+-----------------------+---------------+
| ``XYZ_w`` | [0, 100] | [0, 1] |
+------------+-----------------------+---------------+
| ``XYZ_wr`` | [0, 100] | [0, 1] |
+------------+-----------------------+---------------+
+------------+-----------------------+---------------+
| **Range** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``XYZ_c`` | [0, 100] | [0, 1] |
+------------+-----------------------+---------------+
References
----------
:cite:`Li2002a`, :cite:`Westland2012k`
Examples
--------
>>> XYZ = np.array([22.48, 22.74, 8.54])
>>> XYZ_w = np.array([111.15, 100.00, 35.20])
>>> XYZ_wr = np.array([94.81, 100.00, 107.30])
>>> L_A1 = 200
>>> L_A2 = 200
>>> chromatic_adaptation_forward_CMCCAT2000(XYZ, XYZ_w, XYZ_wr, L_A1, L_A2)
... # doctest: +ELLIPSIS
array([ 19.5269832..., 23.0683396..., 24.9717522...])
"""
XYZ = to_domain_100(XYZ)
XYZ_w = to_domain_100(XYZ_w)
XYZ_wr = to_domain_100(XYZ_wr)
L_A1 = as_float_array(L_A1)
L_A2 = as_float_array(L_A2)
RGB = vector_dot(CAT_CMCCAT2000, XYZ)
RGB_w = vector_dot(CAT_CMCCAT2000, XYZ_w)
RGB_wr = vector_dot(CAT_CMCCAT2000, XYZ_wr)
D = surround.F * (
0.08 * np.log10(0.5 * (L_A1 + L_A2))
+ 0.76
- 0.45 * (L_A1 - L_A2) / (L_A1 + L_A2)
)
D = np.clip(D, 0, 1)
a = D * XYZ_w[..., 1] / XYZ_wr[..., 1]
RGB_c = RGB * (
a[..., np.newaxis] * (RGB_wr / RGB_w) + 1 - D[..., np.newaxis]
)
XYZ_c = vector_dot(CAT_INVERSE_CMCCAT2000, RGB_c)
return from_range_100(XYZ_c)
def chromatic_adaptation_inverse_CMCCAT2000(
XYZ_c: ArrayLike,
XYZ_w: ArrayLike,
XYZ_wr: ArrayLike,
L_A1: FloatingOrArrayLike,
L_A2: FloatingOrArrayLike,
surround: InductionFactors_CMCCAT2000 = VIEWING_CONDITIONS_CMCCAT2000[
"Average"
],
) -> NDArray:
"""
Adapt given stimulus corresponding colour *CIE XYZ* tristimulus values
from reference viewing conditions to test viewing conditions using
*CMCCAT2000* inverse chromatic adaptation model.
Parameters
----------
XYZ_c
*CIE XYZ* tristimulus values of the stimulus to adapt.
XYZ_w
Test viewing condition *CIE XYZ* tristimulus values of the whitepoint.
XYZ_wr
Reference viewing condition *CIE XYZ* tristimulus values of the
whitepoint.
L_A1
Luminance of test adapting field :math:`L_{A1}` in :math:`cd/m^2`.
L_A2
Luminance of reference adapting field :math:`L_{A2}` in :math:`cd/m^2`.
surround
Surround viewing conditions induction factors.
Returns
-------
:class:`numpy.ndarray`
*CIE XYZ_c* tristimulus values of the adapted stimulus.
Notes
-----
+------------+-----------------------+---------------+
| **Domain** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``XYZ_c`` | [0, 100] | [0, 1] |
+------------+-----------------------+---------------+
| ``XYZ_w`` | [0, 100] | [0, 1] |
+------------+-----------------------+---------------+
| ``XYZ_wr`` | [0, 100] | [0, 1] |
+------------+-----------------------+---------------+
+------------+-----------------------+---------------+
| **Range** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``XYZ`` | [0, 100] | [0, 1] |
+------------+-----------------------+---------------+
References
----------
:cite:`Li2002a`, :cite:`Westland2012k`
Examples
--------
>>> XYZ_c = np.array([19.53, 23.07, 24.97])
>>> XYZ_w = np.array([111.15, 100.00, 35.20])
>>> XYZ_wr = np.array([94.81, 100.00, 107.30])
>>> L_A1 = 200
>>> L_A2 = 200
>>> chromatic_adaptation_inverse_CMCCAT2000(XYZ_c, XYZ_w, XYZ_wr, L_A1,
... L_A2)
... # doctest: +ELLIPSIS
array([ 22.4839876..., 22.7419485..., 8.5393392...])
"""
XYZ_c = to_domain_100(XYZ_c)
XYZ_w = to_domain_100(XYZ_w)
XYZ_wr = to_domain_100(XYZ_wr)
L_A1 = as_float_array(L_A1)
L_A2 = as_float_array(L_A2)
RGB_c = vector_dot(CAT_CMCCAT2000, XYZ_c)
RGB_w = vector_dot(CAT_CMCCAT2000, XYZ_w)
RGB_wr = vector_dot(CAT_CMCCAT2000, XYZ_wr)
D = surround.F * (
0.08 * np.log10(0.5 * (L_A1 + L_A2))
+ 0.76
- 0.45 * (L_A1 - L_A2) / (L_A1 + L_A2)
)
D = np.clip(D, 0, 1)
a = D * XYZ_w[..., 1] / XYZ_wr[..., 1]
RGB = RGB_c / (
a[..., np.newaxis] * (RGB_wr / RGB_w) + 1 - D[..., np.newaxis]
)
XYZ = vector_dot(CAT_INVERSE_CMCCAT2000, RGB)
return from_range_100(XYZ)
def chromatic_adaptation_CMCCAT2000(
XYZ: ArrayLike,
XYZ_w: ArrayLike,
XYZ_wr: ArrayLike,
L_A1: FloatingOrArrayLike,
L_A2: FloatingOrArrayLike,
surround: InductionFactors_CMCCAT2000 = VIEWING_CONDITIONS_CMCCAT2000[
"Average"
],
direction: Union[Literal["Forward", "Inverse"], str] = "Forward",
) -> NDArray:
"""
Adapt given stimulus *CIE XYZ* tristimulus values using given viewing
conditions.
This definition is a convenient wrapper around
:func:`colour.adaptation.chromatic_adaptation_forward_CMCCAT2000` and
:func:`colour.adaptation.chromatic_adaptation_inverse_CMCCAT2000`.
Parameters
----------
XYZ
*CIE XYZ* tristimulus values of the stimulus to adapt.
XYZ_w
Source viewing condition *CIE XYZ* tristimulus values of the
whitepoint.
XYZ_wr
Target viewing condition *CIE XYZ* tristimulus values of the
whitepoint.
L_A1
Luminance of test adapting field :math:`L_{A1}` in :math:`cd/m^2`.
L_A2
Luminance of reference adapting field :math:`L_{A2}` in :math:`cd/m^2`.
surround
Surround viewing conditions induction factors.
direction
Chromatic adaptation direction.
Returns
-------
:class:`numpy.ndarray`
Adapted stimulus *CIE XYZ* tristimulus values.
Notes
-----
+------------+-----------------------+---------------+
| **Domain** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``XYZ`` | [0, 100] | [0, 1] |
+------------+-----------------------+---------------+
| ``XYZ_w`` | [0, 100] | [0, 1] |
+------------+-----------------------+---------------+
| ``XYZ_wr`` | [0, 100] | [0, 1] |
+------------+-----------------------+---------------+
+------------+-----------------------+---------------+
| **Range** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``XYZ`` | [0, 100] | [0, 1] |
+------------+-----------------------+---------------+
References
----------
:cite:`Li2002a`, :cite:`Westland2012k`
Examples
--------
>>> XYZ = np.array([22.48, 22.74, 8.54])
>>> XYZ_w = np.array([111.15, 100.00, 35.20])
>>> XYZ_wr = np.array([94.81, 100.00, 107.30])
>>> L_A1 = 200
>>> L_A2 = 200
>>> chromatic_adaptation_CMCCAT2000(
... XYZ, XYZ_w, XYZ_wr, L_A1, L_A2, direction='Forward')
... # doctest: +ELLIPSIS
array([ 19.5269832..., 23.0683396..., 24.9717522...])
Using the *CMCCAT2000* inverse model:
>>> XYZ = np.array([19.52698326, 23.06833960, 24.97175229])
>>> XYZ_w = np.array([111.15, 100.00, 35.20])
>>> XYZ_wr = np.array([94.81, 100.00, 107.30])
>>> L_A1 = 200
>>> L_A2 = 200
>>> chromatic_adaptation_CMCCAT2000(
... XYZ, XYZ_w, XYZ_wr, L_A1, L_A2, direction='Inverse')
... # doctest: +ELLIPSIS
array([ 22.48, 22.74, 8.54])
"""
direction = validate_method(
direction,
["Forward", "Inverse"],
'"{0}" direction is invalid, it must be one of {1}!',
)
if direction == "forward":
return chromatic_adaptation_forward_CMCCAT2000(
XYZ, XYZ_w, XYZ_wr, L_A1, L_A2, surround
)
else:
return chromatic_adaptation_inverse_CMCCAT2000(
XYZ, XYZ_w, XYZ_wr, L_A1, L_A2, surround
)
|
|
from . import version
from .constants import *
from shared.messages import *
from shared.protocol import JSONReceiver
from collections import Counter
import random
class ClientProtocol(JSONReceiver):
def __init__(self, factory):
JSONReceiver.__init__(self, factory)
self.addCallback(MODE_CLIENT_AUTHENTIFICATION, MSG_CLIENT_ACCEPTED, self.clientAccepted)
self.addCallback(MODE_CLIENT_AUTHENTIFICATION, MSG_CLIENT_REFUSED, self.clientRefused)
self.addCallback(MODE_CLIENT_AUTHENTIFICATION, MSG_SERVER_AUTHENTIFICATION, self.serverAuthentification)
self.addCallback(MODE_USER_AUTHENTIFICATION, MSG_USER_LOGIN, self.userLogin)
self.addCallback(MODE_USER_AUTHENTIFICATION, MSG_USER_REGISTRATION, self.userRegistration)
self.addCallback(MODE_INITIAL_SYNC, MSG_CURRENT_GAMES, self.currentGames)
self.addCallback(MODE_INITIAL_SYNC, MSG_CURRENT_USERS, self.currentUsers)
self.addCallback(MODE_INITIAL_SYNC, MSG_DATABASE_QUERY, self.databaseQuery)
self.addCallback(MODE_INITIAL_SYNC, MSG_DATABASE_PUSH, self.databasePush)
self.addCallback(MODE_INITIAL_SYNC, MSG_SYNC_FINISHED, self.syncFinished)
self.addCallback(MODE_FREE_TO_JOIN, MSG_CREATE_GAME, self.createGame)
self.addCallback(MODE_FREE_TO_JOIN, MSG_JOIN_GAME, self.joinGame)
self.addCallback(MODE_FREE_TO_JOIN, MSG_LOGGED_IN, self.loggedIn)
self.addCallback(MODE_FREE_TO_JOIN, MSG_LOGGED_OFF, self.loggedOff)
self.addCallback(MODE_FREE_TO_JOIN, MSG_LEAVE_GAME, self.leaveGame)
self.addCallback(MODE_FREE_TO_JOIN, MSG_DELETE_GAME, self.deleteGame)
self.addCallback(MODE_FREE_TO_JOIN, MSG_SUSPEND_GAME, self.suspendGame)
self.addCallback(MODE_IN_GAME, MSG_JOIN_GAME, self.joinGame)
self.addCallback(MODE_IN_GAME, MSG_START_GAME, self.startGame)
self.addCallback(MODE_IN_GAME, MSG_STARTED_GAME, self.startedGame)
self.addCallback(MODE_IN_GAME, MSG_CREATE_GAME, self.createGame)
self.addCallback(MODE_IN_GAME, MSG_DRAW_CARDS, self.drawCards)
self.addCallback(MODE_IN_GAME, MSG_CZAR_CHANGE, self.czarChange)
self.addCallback(MODE_IN_GAME, MSG_LOGGED_IN, self.loggedIn)
self.addCallback(MODE_IN_GAME, MSG_LOGGED_OFF, self.loggedOff)
self.addCallback(MODE_IN_GAME, MSG_LEAVE_GAME, self.leaveGame)
self.addCallback(MODE_IN_GAME, MSG_SUSPEND_GAME, self.suspendGame)
self.addCallback(MODE_IN_GAME, MSG_DELETE_GAME, self.deleteGame)
self.addCallback(MODE_IN_GAME, MSG_CHOOSE_CARDS, self.chooseCards)
self.addCallback(MODE_IN_GAME, MSG_CHOICES, self.choices)
self.addCallback(MODE_IN_GAME, MSG_CZAR_DECISION, self.czarDecision)
self.setMode(MODE_CLIENT_AUTHENTIFICATION)
self.database_hash = None
self.identification = 'server'
self.server_version = {'MAJOR': 0, 'MINOR': 0, 'REVISION': 0}
self.factory.client = self
self.user_id = 0
self.game_id = 0
self.manual_close = False
def connectionMade(self):
self.sendMessage(MSG_CLIENT_AUTHENTIFICATION, major=version.MAJOR, minor=version.MINOR, revision=version.REVISION)
def serverAuthentification(self, major, minor, revision):
self.server_version = {'MAJOR': major, 'MINOR': minor, 'REVISION': revision}
def clientRefused(self, reason):
self.factory.display.view.clientRefusedMessage(reason)
def clientAccepted(self):
username, password = self.factory.display.getLoginCredentials()
self.sendMessage(MSG_USER_AUTHENTIFICATION, username=username, password=password)
self.setMode(MODE_USER_AUTHENTIFICATION)
self.factory.display.view.loginMessage()
def userLogin(self, success, message, user_id = 0):
if success:
self.user_id = user_id
self.factory.display.view.syncMessage()
self.setMode(MODE_INITIAL_SYNC)
self.sendMessage(MSG_DATABASE_QUERY)
else:
self.factory.display.view.errorMessage(message)
def userRegistration(self, success, message):
if not success:
self.factory.display.view.errorMessage(message)
def databaseQuery(self, hash):
self.factory.card_database.loadPath(self.factory.display.server_name, hash)
if not self.factory.card_database.loaded:
self.sendMessage(MSG_DATABASE_PULL)
self.database_hash = hash
else:
self.factory.card_database.loadCards()
self.sendMessage(MSG_DATABASE_KNOWN)
def databasePush(self, size):
self.receiveRawData(size, self.databaseKnown)
def databaseKnown(self):
self.factory.card_database.loadData(self.raw_data, self.factory.display.server_name, self.database_hash)
self.factory.card_database.loadCards()
self.sendMessage(MSG_DATABASE_KNOWN)
def syncFinished(self):
self.setMode(MODE_FREE_TO_JOIN)
self.factory.display.setView('OverviewView')
self.factory.display.login_sound.stop()
self.factory.display.login_sound.play()
def createGame(self, success=True, game_id = '', message = '', name = '', creator = False, users = 0, rounds = 0, protected = False):
if success:
self.factory.addGame(game_id, name, creator, rounds = rounds, protected = protected)
if self.getMode() == MODE_FREE_TO_JOIN:
self.factory.display.callFunction('self.view.addGame', game_id)
self.factory.display.game_created_sound.stop()
self.factory.display.game_created_sound.play()
else:
self.factory.display.callFunction('self.view.errorMessage', message = message)
def joinGame(self, success=True, message = '', game_id = 0, user_id = 0, users = []):
if success:
self.factory.incrementUsers(game_id)
if self.getMode() == MODE_FREE_TO_JOIN:
if user_id == self.user_id:
self.game_id = game_id
self.setMode(MODE_IN_GAME)
self.factory.display.setView('GameView')
self.factory.display.callFunction('self.view.player_indicators.addPlayer', self.user_id)
for user in users:
self.factory.display.callFunction('self.view.player_indicators.addPlayer', user)
self.factory.display.game_join_sound.stop()
self.factory.display.game_join_sound.play()
else:
self.factory.display.callFunction('self.view.updateGame', game_id)
elif self.getMode() == MODE_IN_GAME and game_id == self.game_id:
self.factory.display.callFunction('self.view.writeLog', self.factory.display.translator.translate('{player} joined the game').format(player = self.factory.findUsername(user_id)))
self.factory.display.callFunction('self.view.player_indicators.addPlayer', user_id)
self.factory.display.game_join_sound.stop()
self.factory.display.game_join_sound.play()
else:
self.factory.display.callFunction('self.view.errorMessage', message = message)
def startGame(self, success, message=''):
if not success:
self.factory.display.callFunction('self.view.writeLogError', message)
def loggedIn(self, user_id, user_name):
self.factory.addUser(user_id, user_name)
def loggedOff(self, user_id):
self.factory.removeUser(user_id)
def startedGame(self, user_id, points):
if user_id == self.user_id:
self.factory.display.callFunction('self.view.writeLog', self.factory.display.translator.translate("You started the game."))
else:
self.factory.display.callFunction('self.view.writeLog', self.factory.display.translator.translate('{player} started the game').format(player = self.factory.findUsername(user_id)))
self.factory.updateGamePoints(self.game_id, points)
self.factory.display.game_start_sound.stop()
self.factory.display.game_start_sound.play()
def drawCards(self, cards):
cards = [self.factory.card_database.getCard(c) for c in cards]
self.factory.display.callFunction('self.view.setCards', *cards)
self.factory.display.game_draw_sounds[random.randint(0, len(self.factory.display.game_draw_sounds)-1)].play()
def czarChange(self, user_id, card):
if user_id == self.user_id:
self.factory.display.callFunction('self.view.writeLog', self.factory.display.translator.translate("You were chosen the new czar and therefore flip a black card open. You won't be able to play any white card until the next player will be chosen to be the czar."))
self.factory.display.callFunction('self.view.setMode', GAME_MODE_CZAR_WAITING)
else:
self.factory.display.callFunction('self.view.writeLog', self.factory.display.translator.translate("{player} was chosen the new czar and therefore flips a new black card open.").format(player = self.factory.findUsername(user_id)))
self.factory.display.callFunction('self.view.setMode', GAME_MODE_PLAYER)
card = self.factory.card_database.getCard(card)
self.factory.display.callFunction('self.view.setBlackCard', card)
self.factory.display.callFunction('self.view.player_indicators.setCzar', user_id)
def currentUsers(self, users):
for u in users:
self.factory.addUser(**u)
def currentGames(self, games):
for g in games:
self.factory.addGame(**g)
def leaveGame(self, game_id, user_id):
self.factory.decrementUsers(game_id)
if self.getMode() == MODE_IN_GAME and game_id == self.game_id:
self.factory.resetGamePoints(self.game_id)
if user_id != self.user_id:
self.factory.display.callFunction('self.view.writeLog', self.factory.display.translator.translate('{player} left the game.').format(player = self.factory.findUsername(user_id)))
self.factory.display.callFunction('self.view.player_indicators.delPlayer', user_id)
self.factory.display.callFunction('self.view.setMode', GAME_MODE_PAUSED)
else:
self.factory.display.setView('OverviewView')
self.setMode(MODE_FREE_TO_JOIN)
self.game_id = 0
self.factory.display.game_leave_sound.stop()
self.factory.display.game_leave_sound.play()
elif self.getMode() == MODE_FREE_TO_JOIN:
self.factory.display.callFunction('self.view.updateGame', game_id)
def suspendGame(self, user_id, game_id):
self.factory.decrementUsers(game_id)
if self.getMode() == MODE_IN_GAME and game_id == self.game_id:
self.factory.resetGamePoints(self.game_id)
if user_id != self.user_id:
self.factory.display.callFunction('self.view.writeLog', self.factory.display.translator.translate('{player} suspended the game.').format(player = self.factory.findUsername(user_id)))
self.factory.display.callFunction('self.view.setMode', GAME_MODE_PAUSED)
self.factory.display.callFunction('self.view.player_indicators.delPlayer', user_id)
else:
self.factory.display.setView('OverviewView')
self.setMode(MODE_FREE_TO_JOIN)
self.game_id = 0
elif self.getMode() == MODE_FREE_TO_JOIN:
self.factory.display.callFunction('self.view.updateGame', game_id)
def deleteGame(self, success = True, game_id = 0, message = ''):
if success:
self.factory.removeGame(game_id)
if self.getMode() == MODE_FREE_TO_JOIN:
self.factory.display.callFunction('self.view.deleteGame', game_id)
self.factory.display.game_deleted_sound.stop()
self.factory.display.game_deleted_sound.play()
self.factory.display.view.default_mode = True
elif not success and self.getMode() == MODE_FREE_TO_JOIN:
self.factory.display.view.errorMessage(message)
def sendStartGame(self):
self.sendMessage(MSG_START_GAME)
def chooseCards(self, success = True, message = '', user_id = 0):
if not success:
self.factory.display.callFunction('self.view.writeLogError', message)
return
self.factory.display.callFunction('self.view.player_indicators.setChosen', user_id)
self.factory.display.game_choose_sound.stop()
self.factory.display.game_choose_sound.play()
if user_id == self.user_id:
text = self.factory.display.translator.translate("You put your chosen cards onto the table.")
else:
text = self.factory.display.translator.translate("{player} put his chosen cards onto the table.").format(player = self.factory.findUsername(user_id))
self.factory.display.view.speak(text)
def choices(self, choices):
choices = [[self.factory.card_database.getCard(c) for c in o] for o in choices]
if self.factory.display.view.mode == GAME_MODE_CZAR_WAITING:
self.factory.display.callFunction('self.view.writeLog', self.factory.display.translator.translate('All players confirmed their choices. You now have to select the choice which you think is the best.'))
self.factory.display.callFunction('self.view.setMode', GAME_MODE_CZAR_DECIDING)
else:
self.factory.display.callFunction('self.view.writeLog', self.factory.display.translator.translate('All players confirmed their choices and the czar now has to select the best one out of them.'))
self.factory.display.callFunction('self.view.setChoices', choices)
def czarDecision(self, success = True, winner = 0, message = '', end = False, rounds = 0):
if not success:
self.factory.display.callFunction('self.view.writeLogError', message)
return
if winner == self.user_id:
text = self.factory.display.translator.translate("You win this round and therefore gain a point.")
else:
text = self.factory.display.translator.translate("{player} wins this round and therefore gains a point.").format(player = self.factory.findUsername(winner))
self.factory.display.callFunction('self.view.writeLog', text)
self.factory.updateGamePoints(self.game_id, [[winner, 1]])
self.factory.decrementRounds(self.game_id)
if winner == self.user_id:
self.factory.display.game_score_sound.stop()
self.factory.display.game_score_sound.play()
else:
self.factory.display.game_score_other_sound.stop()
self.factory.display.game_score_other_sound.play()
if end == True:
self.factory.setRounds(self.game_id, rounds)
fmt_winners = lambda w: ', '.join(w[:-1])+' '+self.factory.display.translator.translate('and')+' '+w[-1] if len(w)>1 else w[0]
self.factory.display.callFunction('self.view.writeLog', self.factory.display.translator.translate("This ends the game."))
winners = self.factory.getWinners(self.game_id)
resolved_winners_without_me = [self.factory.findUsername(w) for w in winners.keys() if w != self.user_id]
if self.user_id in winners:
if len(winners) == 1: # you are the only winner
self.factory.display.callFunction('self.view.writeLog', self.factory.display.translator.translate('You win this game with {points} points. Congratulations!').format(points = winners[self.user_id]))
self.factory.display.game_win_sound.stop()
self.factory.display.game_win_sound.play()
else: # tie
self.factory.display.callFunction('self.view.writeLog', self.factory.display.translator.translate('You are one of the proud winners of this game and ended up gaining {points} points. Congratulations! But {players} gained the same amount of points.').format(points = winners[self.user_id], players = fmt_winners(resolved_winners_without_me)))
self.factory.display.game_tie_sound.stop()
self.factory.display.game_tie_sound.play()
else: # you didn't win anything
self.factory.display.callFunction('self.view.writeLog', self.factory.display.translator.translate("Sad, but true. You didn't win anything. {players} scored {points} points and made the game.").format(points = winners.values()[0], players = fmt_winners(resolved_winners_without_me)))
self.factory.display.game_lose_sound.stop()
self.factory.display.game_lose_sound.play()
game = self.factory.findGame(self.game_id)
score_table = Counter(game['points'])
text = ''
for score in score_table.most_common(10):
text += self.factory.display.translator.translate('{player}: {points} points').format(player = self.factory.display.translator.translate('You') if score[0] == self.user_id else self.factory.findUsername(score[0]), points = score[1])+'\n'
self.factory.display.callFunction('self.view.writeLog', text[:-1])
self.factory.display.callFunction('self.view.setMode', GAME_MODE_PAUSED)
self.factory.resetGamePoints(self.game_id)
def sendChooseCards(self, cards):
self.sendMessage(MSG_CHOOSE_CARDS, cards = [c.id for c in cards])
def sendCzarDecision(self, cards):
self.sendMessage(MSG_CZAR_DECISION, cards = [c.id for c in cards])
def sendCreateGame(self, name, password, rounds):
cmd = {
'game_name': name
}
if password is not None:
cmd['game_password'] = password
if rounds is not None:
cmd['rounds'] = rounds
self.sendMessage(MSG_CREATE_GAME, **cmd)
def sendJoinGame(self, id, password):
cmd = {
'game_id': id
}
if password is not None:
cmd['game_password'] = password
self.sendMessage(MSG_JOIN_GAME, **cmd)
def sendSuspendGame(self):
self.sendMessage(MSG_SUSPEND_GAME)
def sendLeaveGame(self):
self.sendMessage(MSG_LEAVE_GAME)
def sendDeleteGame(self, id):
self.sendMessage(MSG_DELETE_GAME, game_id = id)
def connectionLost(self, reason):
if not self.manual_close and self.factory.display.running and self.getMode() not in [MODE_CLIENT_AUTHENTIFICATION, MODE_USER_AUTHENTIFICATION, MODE_INITIAL_SYNC]:
self.factory.display.setView('LoginView')
self.factory.display.callFunction('self.view.errorMessage', self.factory.display.translator.translate('Lost connection to server')+': '+reason.getErrorMessage())
def loseConnection(self):
self.manual_close = True
self.transport.loseConnection()
|
|
""" Module that uses CMAC 2.0 to remove and correct second trip returns,
correct velocity and more. A new radar object is then created with all CMAC
2.0 products. """
import copy
import json
import sys
import netCDF4
import numpy as np
import pyart
from .cmac_processing import (
do_my_fuzz, get_melt, get_texture, fix_phase_fields, gen_clutter_field_from_refl, beam_block)
from .config import get_cmac_values, get_field_names, get_metadata
def cmac(radar, sonde, config, geotiff=None, flip_velocity=False,
meta_append=None, verbose=True):
"""
Corrected Moments in Antenna Coordinates
Parameters
----------
radar : Radar
Radar object to use in the CMAC calculation.
sonde : Object
Object containing all the sonde data.
config : str
A string pointing to dictionaries containing values for CMAC 2.0
specific to a radar.
Other Parameters
----------------
geotiff : str
Filepath for a geotiff, if provided, will generate a beam blockage
gate id.
meta_append : dict, json and None
Value key pairs to attend to global attributes. If None,
a default metadata will be created. The metadata can also
be created by providing a dictionary or a json file.
verbose : bool
If True, this will display more statistics.
Returns
-------
radar : Radar
Radar object with new CMAC added fields.
"""
# Retrieve values from the configuration file.
cmac_config = get_cmac_values(config)
field_config = get_field_names(config)
meta_config = get_metadata(config)
# Over write site altitude
if 'site_alt' in cmac_config.keys():
radar.altitude['data'][0] = cmac_config['site_alt']
# Obtaining variables needed for fuzzy logic.
radar_start_date = netCDF4.num2date(
radar.time['data'][0], radar.time['units'],
only_use_cftime_datetimes=False, only_use_python_datetimes=True)
print('##', str(radar_start_date))
temp_field = field_config['temperature']
alt_field = field_config['altitude']
vel_field = field_config['velocity']
if 'gen_clutter_from_refl' not in cmac_config.keys():
cmac_config['gen_clutter_from_refl'] = False
if cmac_config['gen_clutter_from_refl']:
new_clutter_field = gen_clutter_field_from_refl(radar, field_config['input_clutter_corrected_reflectivity'],
field_config['reflectivity'],
diff_dbz=cmac_config['gen_clutter_from_refl_diff'],
max_h=cmac_config['gen_clutter_from_refl_alt'])
radar.add_field(field_config['clutter'], new_clutter_field, replace_existing=True)
radar.fields[field_config['clutter']]['units'] = '1'
# ZDR offsets
if 'zdr_offset' in cmac_config.keys():
if 'offset_zdrs' in cmac_config.keys():
for fld in cmac_config['offset_zdrs']:
radar.fields[fld]['data'] += cmac_config['zdr_offset']
else:
radar.fields[field_config['input_zdr']]['data'] += cmac_config['zdr_offset']
# flipping phidp
if 'flip_phidp' not in cmac_config.keys():
cmac_config['flip_phidp'] = False
if cmac_config['flip_phidp']:
if 'phidp_flipped' in cmac_config.keys(): # user specifies fields to flip
for fld in cmac_config['phidp_flipped']:
radar.fields[fld]['data'] = radar.fields[fld]['data'] * -1.0
else: # just flip defined phidp field
radar.fields[field_config['input_phidp_field']]['data'] = radar.fields[field_config['input_phidp_field']]['data']*-1.0
if flip_velocity:
radar.fields[vel_field]['data'] = radar.fields[
vel_field]['data'] * -1.0
z_dict, temp_dict = pyart.retrieve.map_profile_to_gates(
sonde.variables[temp_field][:], sonde.variables[alt_field][:], radar)
if 'clutter_mask_z_for_texture' not in cmac_config.keys():
cmac_config['clutter_mask_z_for_texture'] = False
if cmac_config['clutter_mask_z_for_texture']:
masked_vr = copy.deepcopy(radar.fields[vel_field])
if 'ground_clutter' in radar.fields.keys():
masked_vr['data'] = np.ma.masked_where(radar.fields['ground_clutter']['data'] == 1, masked_vr['data'])
masked_vr['data'][radar.fields['ground_clutter']['data'] == 1] = np.nan
radar.add_field('clutter_masked_velocity', masked_vr, replace_existing=True)
texture = get_texture(radar, 'clutter_masked_velocity')
texture['data'][np.isnan(texture['data'])] = 0.0
else:
texture = get_texture(radar, vel_field)
snr = pyart.retrieve.calculate_snr_from_reflectivity(radar)
if not verbose:
print('## Adding radar fields...')
if verbose:
print('##')
print('## These radar fields are being added:')
radar.add_field('sounding_temperature', temp_dict, replace_existing=True)
radar.fields['sounding_temperature']['units'] = 'deg_C'
radar.add_field('height', z_dict, replace_existing=True)
radar.add_field('signal_to_noise_ratio', snr, replace_existing=True)
radar.add_field('velocity_texture', texture, replace_existing=True)
if verbose:
print('## sounding_temperature')
print('## height')
print('## signal_to_noise_ratio')
print('## velocity_texture')
# Performing fuzzy logic to obtain the gate ids.
rhv_field = field_config['cross_correlation_ratio']
ncp_field = field_config['normalized_coherent_power']
if 'mbfs' not in cmac_config:
cmac_config['mbfs'] = None
if 'hard_const' not in cmac_config:
cmac_config['hard_const'] = None
# Specifically for dealing with the ingested C-SAPR2 data
my_fuzz, _ = do_my_fuzz(radar, rhv_field, ncp_field, verbose=verbose,
custom_mbfs=cmac_config['mbfs'],
custom_hard_constraints=cmac_config['hard_const'])
radar.add_field('gate_id', my_fuzz,
replace_existing=True)
if 'ground_clutter' in radar.fields.keys():
# Adding fifth gate id, clutter.
clutter_data = radar.fields['ground_clutter']['data']
gate_data = radar.fields['gate_id']['data'].copy()
radar.fields['gate_id']['data'][clutter_data == 1] = 5
notes = radar.fields['gate_id']['notes']
radar.fields['gate_id']['notes'] = notes + ',5:clutter'
radar.fields['gate_id']['valid_max'] = 5
if 'classification_mask' in radar.fields.keys():
clutter_data = radar.fields['classification_mask']['data']
gate_data = radar.fields['gate_id']['data'].copy()
radar.fields['gate_id']['data'][clutter_data == 8] = 5
radar.fields['gate_id']['data'][clutter_data == 16] = 5
radar.fields['gate_id']['data'][clutter_data == 4] = 5
radar.fields['gate_id']['data'][clutter_data == 1] = 0
radar.fields['gate_id']['data'][clutter_data == 2] = 0
radar.fields['gate_id']['data'][gate_data == 0] = 0
notes = radar.fields['gate_id']['notes']
radar.fields['gate_id']['notes'] = notes + ',5:clutter'
radar.fields['gate_id']['valid_max'] = 5
if geotiff is not None:
pbb_all, cbb_all = beam_block(
radar, geotiff, cmac_config['radar_height_offset'],
cmac_config['beam_width'])
radar.fields['gate_id']['data'][cbb_all > 0.30] = 6
notes = radar.fields['gate_id']['notes']
radar.fields['gate_id']['notes'] = notes + ',6:terrain_blockage'
radar.fields['gate_id']['valid_max'] = 6
pbb_dict = pbb_to_dict(pbb_all)
cbb_dict = cbb_to_dict(cbb_all)
radar.add_field('partial_beam_blockage', pbb_dict)
radar.add_field('cumulative_beam_blockage', cbb_dict)
cat_dict = {}
for pair_str in radar.fields['gate_id']['notes'].split(','):
cat_dict.update(
{pair_str.split(':')[1]:int(pair_str.split(':')[0])})
if verbose:
print('## gate_id')
# Corrected velocity using pyart's region dealiaser.
cmac_gates = pyart.correct.GateFilter(radar)
cmac_gates.exclude_all()
cmac_gates.include_equal('gate_id', cat_dict['rain'])
cmac_gates.include_equal('gate_id', cat_dict['melting'])
cmac_gates.include_equal('gate_id', cat_dict['snow'])
# Create a simulated velocity field from the sonde object.
u_field = field_config['u_wind']
v_field = field_config['v_wind']
u_wind = sonde.variables[u_field][:]
v_wind = sonde.variables[v_field][:]
alt_field = field_config['altitude']
sonde_alt = sonde.variables[alt_field][:]
profile = pyart.core.HorizontalWindProfile.from_u_and_v(
sonde_alt, u_wind, v_wind)
sim_vel = pyart.util.simulated_vel_from_profile(radar, profile)
radar.add_field('simulated_velocity', sim_vel, replace_existing=True)
# Create the corrected velocity field from the region dealias algorithm.
corr_vel = pyart.correct.dealias_region_based(
radar, vel_field=vel_field, ref_vel_field='simulated_velocity',
keep_original=False, gatefilter=cmac_gates, centered=True)
radar.add_field('corrected_velocity', corr_vel, replace_existing=True)
if verbose:
print('## corrected_velocity')
print('## simulated_velocity')
fzl = get_melt(radar)
# Is the freezing level realistic? If not, assume
ref_offset = cmac_config['ref_offset']
self_const = cmac_config['self_const']
# Calculating differential phase fields.
radar.fields['differential_phase']['data'][
radar.fields['differential_phase']['data']<0] += 360.0
phidp, kdp = pyart.correct.phase_proc_lp_gf(
radar, gatefilter=cmac_gates, offset=ref_offset, debug=True,
nowrap=50, fzl=fzl, self_const=self_const)
phidp_filt, kdp_filt = fix_phase_fields(
copy.deepcopy(kdp), copy.deepcopy(phidp), radar.range['data'],
cmac_gates)
radar.add_field('corrected_differential_phase', phidp,
replace_existing=True)
radar.add_field('filtered_corrected_differential_phase', phidp_filt,
replace_existing=True)
radar.add_field('corrected_specific_diff_phase', kdp,
replace_existing=True)
radar.add_field('filtered_corrected_specific_diff_phase', kdp_filt,
replace_existing=True)
if verbose:
print('## corrected_specific_diff_phase')
print('## filtered_corrected_specific_diff_phase')
print('## corrected_differential_phase')
print('## filtered_corrected_differential_phase')
# Calculating attenuation by using pyart.
refl_field = field_config['reflectivity']
attenuation_a_coef = cmac_config['attenuation_a_coef']
c_coef = cmac_config['c_coef']
d_coef = cmac_config['d_coef']
beta_coef = cmac_config['beta_coef']
rr_a = cmac_config['rain_rate_a_coef']
rr_b = cmac_config['rain_rate_b_coef']
zdr_field = field_config['differential_reflectivity']
radar.fields['corrected_differential_reflectivity'] = copy.deepcopy(
radar.fields[zdr_field])
radar.fields['corrected_reflectivity'] = copy.deepcopy(
radar.fields[refl_field])
radar.fields['corrected_reflectivity']['data'] = np.ma.masked_where(
cmac_gates.gate_excluded,
radar.fields['corrected_reflectivity']['data'])
# Get specific differential attenuation.
# Need height over 0C isobar.
iso0 = np.ma.mean(radar.fields['height']['data'][
np.where(np.abs(radar.fields['sounding_temperature']['data']) < 0.1)])
radar.fields['height_over_iso0'] = copy.deepcopy(radar.fields['height'])
radar.fields['height_over_iso0']['data'] -= iso0
phidp_field = field_config['phidp_field']
(spec_at, pia_dict, cor_z, spec_diff_at,
pida_dict, cor_zdr) = pyart.correct.calculate_attenuation_zphi(
radar, temp_field='sounding_temperature',
iso0_field='height_over_iso0',
zdr_field=field_config['zdr_field'],
pia_field=field_config['pia_field'],
phidp_field=field_config['phidp_field'],
refl_field=field_config['refl_field'], c=c_coef, d=d_coef,
a_coef=attenuation_a_coef, beta=beta_coef,
gatefilter=cmac_gates)
# cor_zdr['data'] += cmac_config['zdr_offset'] Now taken care of at start
radar.add_field('specific_attenuation', spec_at, replace_existing=True)
radar.add_field('path_integrated_attenuation', pia_dict,
replace_existing=True)
radar.add_field('corrected_reflectivity', cor_z, replace_existing=True)
radar.add_field('specific_differential_attenuation', spec_diff_at,
replace_existing=True)
radar.add_field('path_integrated_differential_attenuation', pida_dict,
replace_existing=True)
radar.add_field('corrected_differential_reflectivity', cor_zdr,
replace_existing=True)
radar.fields['corrected_velocity']['units'] = 'm/s'
radar.fields['simulated_velocity']['units'] = 'm/s'
radar.fields['velocity_texture']['units'] = 'm/s'
cat_dict = {}
for pair_str in radar.fields['gate_id']['notes'].split(','):
if verbose:
print(pair_str)
cat_dict.update({pair_str.split(':')[1]: int(pair_str.split(':')[0])})
rain_gates = pyart.correct.GateFilter(radar)
rain_gates.exclude_all()
rain_gates.include_equal('gate_id', cat_dict['rain'])
# Calculating rain rate.
R = rr_a * (radar.fields['specific_attenuation']['data']) ** rr_b
rainrate = copy.deepcopy(radar.fields['specific_attenuation'])
rainrate['data'] = R
rainrate['valid_min'] = 0.0
rainrate['valid_max'] = 400.0
rainrate['standard_name'] = 'rainfall_rate'
rainrate['long_name'] = 'rainfall_rate'
rainrate['least_significant_digit'] = 1
rainrate['units'] = 'mm/hr'
radar.fields.update({'rain_rate_A': rainrate})
# This needs to be updated to a gatefilter.
mask = radar.fields['reflectivity']['data'].mask
radar.fields['rain_rate_A'].update({
'comment': 'Rain rate calculated from specific_attenuation,'
+ ' R=51.3*specific_attenuation**0.81, note R=0.0 where'
+ ' norm coherent power < 0.4 or rhohv < 0.8'})
if verbose:
print('## Rainfall rate as a function of A ##')
print('##')
print('## All CMAC fields have been added to the radar object.')
print('##')
# Adding the metadata to the cmac radar object.
print('## Appending metadata')
command_line = ''
for item in sys.argv:
command_line = command_line + ' ' + item
if meta_append is None:
meta = {
'site_id': None,
'data_level': 'sgp',
'comment': 'This is highly experimental and initial data. '
+ 'There are many known and unknown issues. Please do '
+ 'not use before contacting the Translator responsible '
+ 'scollis@anl.gov',
'attributions': 'This data is collected by the ARM Climate Research '
+ 'facility. Radar system is operated by the radar '
+ 'engineering team radar@arm.gov and the data is '
+ 'processed by the precipitation radar products '
+ 'team. LP code courtesy of Scott Giangrande, BNL.',
'version': '2.0 lite',
'vap_name': 'cmac',
'known_issues': 'False phidp jumps in insect regions. Still uses '
+ 'old Giangrande code.',
'developers': 'Robert Jackson, ANL. Zachary Sherman, ANL.',
'translator': 'Scott Collis, ANL.',
'mentors': 'Bradley Isom, PNNL., Iosif Lindenmaier, PNNL.',
'Conventions': 'CF/Radial instrument_parameters ARM-1.3'}
else:
if meta_append.lower().endswith('.json'):
with open(meta_append, 'r') as infile:
meta = json.load(infile)
elif meta_append == 'config':
meta = meta_config
else:
raise RuntimeError('Must provide the file name of the json file',
'or say config to use the meta data from',
'config.py')
radar.metadata.clear()
radar.metadata.update(meta)
radar.metadata['command_line'] = command_line
return radar
def area_coverage(radar, precip_threshold=10.0, convection_threshold=40.0):
""" Returns percent coverage of precipitation and convection. """
temp_radar = radar.extract_sweeps([0])
ref = temp_radar.fields['corrected_reflectivity']['data']
total_len = len(ref.flatten())
ref_10_len = len(np.argwhere(ref >= precip_threshold))
ref_40_len = len(np.argwhere(ref >= convection_threshold))
ref_10_per = (ref_10_len/total_len)*100
ref_40_per = (ref_40_len/total_len)*100
del temp_radar
return ref_10_per, ref_40_per
def pbb_to_dict(pbb_all):
""" Function that takes the pbb_all array and turns
it into a dictionary to be used and added to the
pyart radar object. """
pbb_dict = {}
pbb_dict['coordinates'] = 'elevation azimuth range'
pbb_dict['units'] = '1'
pbb_dict['data'] = pbb_all
pbb_dict['standard_name'] = 'partial_beam_block'
pbb_dict['long_name'] = 'Partial Beam Block Fraction'
pbb_dict['comment'] = 'Partial beam block fraction due to terrain.'
return pbb_dict
def cbb_to_dict(cbb_all):
""" Function that takes the cbb_all array and turns
it into a dictionary to be used and added to the
pyart radar object. """
cbb_dict = {}
cbb_dict['coordinates'] = 'elevation azimuth range'
cbb_dict['units'] = '1'
cbb_dict['data'] = cbb_all
cbb_dict['standard_name'] = 'cumulative_beam_block'
cbb_dict['long_name'] = 'Cumulative Beam Block Fraction'
cbb_dict['comment'] = 'Cumulative beam block fraction due to terrain.'
return cbb_dict
|
|
"""Runs a test suite against Sublime Text.
Usage:
1. cd path/to/PACKAGE
2. python path/to/run_tests.py PACKAGE
"""
from __future__ import print_function
import json
import optparse
import os
import re
import shutil
import subprocess
import sys
import time
# todo: allow different sublime versions
PACKAGES_DIR_PATH = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..'))
UT_OUTPUT_DIR_PATH = os.path.realpath(os.path.join(PACKAGES_DIR_PATH, 'User', 'UnitTesting'))
SCHEDULE_FILE_PATH = os.path.realpath(os.path.join(UT_OUTPUT_DIR_PATH, 'schedule.json'))
UT_DIR_PATH = os.path.realpath(os.path.join(PACKAGES_DIR_PATH, 'UnitTesting'))
UT_SBIN_PATH = os.path.realpath(os.path.join(PACKAGES_DIR_PATH, 'UnitTesting', 'sbin'))
SCHEDULE_RUNNER_SOURCE = os.path.join(UT_SBIN_PATH, "run_scheduler.py")
SCHEDULE_RUNNER_TARGET = os.path.join(UT_DIR_PATH, "zzz_run_scheduler.py")
RX_RESULT = re.compile(r'^(?P<result>OK|FAILED|ERROR)', re.MULTILINE)
RX_DONE = re.compile(r'^UnitTesting: Done\.$', re.MULTILINE)
_is_windows = sys.platform == 'win32'
def create_dir_if_not_exists(path):
if not os.path.isdir(path):
os.makedirs(path)
def delete_file_if_exists(path):
if os.path.exists(path):
os.unlink(path)
def copy_file_if_not_exists(source, target):
if not os.path.exists(target):
shutil.copyfile(source, target)
def create_schedule(package, output_file, default_schedule):
schedule = []
try:
with open(SCHEDULE_FILE_PATH, 'r') as f:
schedule = json.load(f)
except Exception:
pass
if not any(s['package'] == package for s in schedule):
print('Schedule:')
for k, v in default_schedule.items():
print(' %s: %s' % (k, v))
schedule.append(default_schedule)
with open(SCHEDULE_FILE_PATH, 'w') as f:
f.write(json.dumps(schedule, ensure_ascii=False, indent=True))
def wait_for_output(path, schedule, timeout=30):
start_time = time.time()
needs_newline = False
def check_has_timed_out():
return time.time() - start_time > timeout
def check_is_output_available():
try:
return os.stat(path).st_size != 0
except Exception:
pass
while not check_is_output_available():
print(".", end="")
needs_newline = True
if check_has_timed_out():
print()
delete_file_if_exists(schedule)
raise ValueError('timeout')
time.sleep(1)
else:
if needs_newline:
print()
def start_sublime_text():
cmd = ["subl"]
if not _is_windows:
# In some Linux/macOS CI environments, starting ST simply with `subl`
# causes the CI process to time out. Using `subl &` seems to solve
# this.
cmd.append("&")
subprocess.Popen([' '.join(cmd)], shell=True)
def read_output(path):
# todo: use notification instead of polling
success = None
def check_is_success(result):
try:
return RX_RESULT.search(result).group('result') == 'OK'
except AttributeError:
return success
def check_is_done(result):
return RX_DONE.search(result) is not None
with open(path, 'r') as f:
while True:
offset = f.tell()
result = f.read()
print(result, end="")
# Keep checking while we don't have a definite result.
success = check_is_success(result)
if check_is_done(result):
assert success is not None, 'final test result must not be None'
break
elif not result:
f.seek(offset)
time.sleep(0.2)
return success
def restore_coverage_file(path, package):
# restore .coverage if it exists, needed for coveralls
if os.path.exists(path):
with open(path, 'r') as f:
txt = f.read()
txt = txt.replace(os.path.realpath(os.path.join(PACKAGES_DIR_PATH, package)), os.getcwd())
with open(os.path.join(os.getcwd(), ".coverage"), "w") as f:
f.write(txt)
def main(default_schedule_info):
package_under_test = default_schedule_info['package']
output_dir = os.path.join(UT_OUTPUT_DIR_PATH, package_under_test)
output_file = os.path.join(output_dir, "result")
coverage_file = os.path.join(output_dir, "coverage")
default_schedule_info['output'] = output_file
create_dir_if_not_exists(output_dir)
delete_file_if_exists(output_file)
delete_file_if_exists(coverage_file)
create_schedule(package_under_test, output_file, default_schedule_info)
delete_file_if_exists(SCHEDULE_RUNNER_TARGET)
copy_file_if_not_exists(SCHEDULE_RUNNER_SOURCE, SCHEDULE_RUNNER_TARGET)
start_sublime_text()
try:
print("Wait for tests output...", end="")
wait_for_output(output_file, SCHEDULE_RUNNER_TARGET)
print("Start to read output...")
if not read_output(output_file):
sys.exit(1)
except ValueError:
print("Timeout: Could not obtain tests output.")
print("Maybe Sublime Text is not responding or the tests output"
"is being written to the wrong file.")
sys.exit(1)
finally:
restore_coverage_file(coverage_file, package_under_test)
delete_file_if_exists(SCHEDULE_RUNNER_TARGET)
if __name__ == '__main__':
parser = optparse.OptionParser()
parser.add_option('--syntax-test', action='store_true')
parser.add_option('--syntax-compatibility', action='store_true')
parser.add_option('--color-scheme-test', action='store_true')
parser.add_option('--coverage', action='store_true')
options, remainder = parser.parse_args()
syntax_test = options.syntax_test
syntax_compatibility = options.syntax_compatibility
color_scheme_test = options.color_scheme_test
coverage = options.coverage
package_under_test = remainder[0] if len(remainder) > 0 else "UnitTesting"
default_schedule_info = {
'package': package_under_test,
'syntax_test': syntax_test,
'syntax_compatibility': syntax_compatibility,
'color_scheme_test': color_scheme_test,
'coverage': coverage,
}
main(default_schedule_info)
|
|
#!/usr/bin/env python2
"""Display multi-page text with a quiz at the end."""
# TappingWithTrTiming_Movie_d3.py
# Created 11/09/15 by DJ based on DistractionTask_practice_d3.py
# Updated 12/4/15 by DJ - made movie version
# Updated 12/7/15 by DJ - updated prompts, general cleanup
# Updated 1/12/16 by DJ - moved from movie to frame-by-frame display, single repeated condition
from psychopy import core, gui, data, event, sound, logging
# from psychopy import visual # visual causes a bug in the guis, so it's declared after all GUIs run.
from psychopy.tools.filetools import fromFile, toFile # saving and loading parameter files
import time as ts, numpy as np # for timing and array operations
import AppKit, os, glob # for monitor size detection, files
import BasicPromptTools # for loading/presenting prompts and questions
import random # for randomization of trials
# ====================== #
# ===== PARAMETERS ===== #
# ====================== #
# Save the parameters declared below?
saveParams = True;
newParamsFilename = 'TappingParams.pickle'
# Declare primary task parameters.
params = {
# Declare stimulus and response parameters
'nBlocks': 5, # number of blocks in this session
'condition': 'TapRight',
'movieFolder': 'Images/', # relative path to tapping videos
'blockDur_TRs': 3, # duration of each tapping block (in TRs)
'restDur_TRs': 3, # duration of each rest block (in TRs)
'tStartup_TRs': 0, # pause time before starting first stimulus (in TRs)
'triggerKey': 't', # key from scanner that says scan is starting
# declare prompt and question files
'skipPrompts': False, # go right to the scanner-wait page
'promptDir': 'Text/', # directory containing prompts and questions files
# declare display parameters
'fullScreen': True, # run in full screen mode?
'screenToShow': 1, # display on primary screen (0) or secondary (1)?
'fixCrossSize': 100, # size of cross, in pixels
'movieSize': (400,250), # size of image in pixels
'fixCrossPos': [0,0], # (x,y) pos of fixation cross displayed before each stimulus (for gaze drift correction)
'screenColor':(128,128,128), # in rgb255 space: (r,g,b) all between 0 and 255
'textHeight': 40 #(in pixels)
}
stimList = {
'conditionList':['TapRight','TapFast','TapLeft','AlmostRight','ImagineRight'],
'moviePromptList': ['Tap (Right Hand) \nAlong with the video.','Tap (Right Hand) \nAlong with the video.','Tap (Left Hand) \nAlong with the video.','Move (Right Hand) but do NOT touch fingers \nAlong with the video.','Imagine Tapping (Right Hand).'],
'moviePrefixList': ['right','right','left','right_almost','right_imagine'], # filenames of movies
'movieFrameRateList': [10, 40, 10, 10, 1], # frame rate of each movie (s)
'movieNFrameList':[10, 10, 10, 10, 1], # nFrames in each movie (numbered from 0 to nFrames-1
'promptFileList': ['TappingPrompts_Movie.txt','TappingPrompts_Movie.txt','TappingPrompts_Movie.txt','AlmostPrompts_Movie.txt','ImaginePrompts_Movie.txt'] # Name of text file containing prompts
}
# save parameters
if saveParams:
dlgResult = gui.fileSaveDlg(prompt='Save Params...',initFilePath = os.getcwd() + '/Params', initFileName = newParamsFilename,
allowed="PICKLE files (.pickle)|.pickle|All files (.*)|")
newParamsFilename = dlgResult
if newParamsFilename is None: # keep going, but don't save
saveParams = False
else:
toFile(newParamsFilename, params) # save it!
# ========================== #
# ===== SET UP LOGGING ===== #
# ========================== #
scriptName = os.path.basename(__file__)
try: # try to get a previous parameters file
lastInfo = fromFile('%s-lastExpInfo.pickle'%scriptName)
expInfo = {
'subject': lastInfo['subject'],
'session': lastInfo['session']+1,
'condition': stimList['conditionList'],
'skipPrompts':lastInfo['skipPrompts'],
'paramsFile':[lastInfo['paramsFile'],'Load...']}
except: # if not there then use a default set
expInfo = {
'subject':'1',
'session': 1,
'condition': stimList['conditionList'],
'skipPrompts':False,
'paramsFile':['DEFAULT','Load...']}
# overwrite params struct if you just saved a new parameter set
if saveParams:
expInfo['paramsFile'] = [newParamsFilename,'Load...']
#present a dialogue to change select params
dlg = gui.DlgFromDict(expInfo, title=scriptName, order=['subject','session','condition','skipPrompts','paramsFile'])
if not dlg.OK:
core.quit() # the user hit cancel, so exit
# find parameter file
if expInfo['paramsFile'] == 'Load...':
dlgResult = gui.fileOpenDlg(prompt='Select parameters file',tryFilePath=os.getcwd(),
allowed="PICKLE files (.pickle)|.pickle|All files (.*)|")
expInfo['paramsFile'] = dlgResult[0]
# load parameter file
if expInfo['paramsFile'] not in ['DEFAULT', None]: # otherwise, just use defaults.
# load params file
params = fromFile(expInfo['paramsFile'])
# transfer skipPrompts from expInfo (gui input) to params (logged parameters)
params['skipPrompts'] = expInfo['skipPrompts']
params['condition'] = expInfo['condition']
iCondition = stimList['conditionList'].index(params['condition'])
print('iCondition = %d'%iCondition)
# print params to Output
print 'params = {'
for key in sorted(params.keys()):
print " '%s': %s"%(key,params[key]) # print each value as-is (no quotes)
print '}'
# save experimental info
toFile('%s-lastExpInfo.pickle'%scriptName, expInfo)#save params to file for next time
#make a log file to save parameter/event data
dateStr = ts.strftime("%b_%d_%H%M", ts.localtime()) # add the current time
filename = '%s-%s-%d-%s'%(scriptName,expInfo['subject'], expInfo['session'], dateStr) # log filename
logging.LogFile((filename+'.log'), level=logging.INFO)#, mode='w') # w=overwrite
logging.log(level=logging.INFO, msg='---START PARAMETERS---')
logging.log(level=logging.INFO, msg='filename: %s'%filename)
logging.log(level=logging.INFO, msg='subject: %s'%expInfo['subject'])
logging.log(level=logging.INFO, msg='session: %s'%expInfo['session'])
logging.log(level=logging.INFO, msg='date: %s'%dateStr)
# log everything in the params struct
for key in sorted(params.keys()): # in alphabetical order
logging.log(level=logging.INFO, msg='%s: %s'%(key,params[key])) # log each parameter
logging.log(level=logging.INFO, msg='---END PARAMETERS---')
# ========================== #
# ===== GET SCREEN RES ===== #
# ========================== #
# kluge for secondary monitor
if params['fullScreen']:
screens = AppKit.NSScreen.screens()
screenRes = (int(screens[params['screenToShow']].frame().size.width), int(screens[params['screenToShow']].frame().size.height))
# screenRes = [1920, 1200]
if params['screenToShow']>0:
params['fullScreen'] = False
else:
screenRes = [800,600]
print "screenRes = [%d,%d]"%screenRes
# ========================== #
# ===== SET UP STIMULI ===== #
# ========================== #
from psychopy import visual
# Initialize deadline for displaying next frame
tNextFlip = [0.0] # put in a list to make it mutable (weird quirk of python variables)
#create clocks and window
globalClock = core.Clock()#to keep track of time
trialClock = core.Clock()#to keep track of time
win = visual.Window(screenRes, fullscr=params['fullScreen'], allowGUI=False, monitor='testMonitor', screen=params['screenToShow'], units='deg', name='win',color=params['screenColor'],colorSpace='rgb255')
# create fixation cross
fCS = params['fixCrossSize'] # size (for brevity)
fCP = params['fixCrossPos'] # position (for brevity)
fixation = visual.ShapeStim(win,lineColor='#000000',lineWidth=3.0,vertices=((fCP[0]-fCS/2,fCP[1]),(fCP[0]+fCS/2,fCP[1]),(fCP[0],fCP[1]),(fCP[0],fCP[1]+fCS/2),(fCP[0],fCP[1]-fCS/2)),units='pix',closeShape=False,name='fixCross');
# create text stimuli
message1 = visual.TextStim(win, pos=[0,+.5], wrapWidth=1.5, color='#000000', alignHoriz='center', name='topMsg', text="aaa",units='norm')
message2 = visual.TextStim(win, pos=[0,-.5], wrapWidth=1.5, color='#000000', alignHoriz='center', name='bottomMsg', text="bbb",units='norm')
# Load new image stimuli
tapImages = []
for i in range(0,stimList['movieNFrameList'][iCondition]):
tapImages.append(visual.ImageStim(win, pos=[0,0], name='Movie Frame %d'%i,image='%s%s_%d.png'%(params['movieFolder'],stimList['moviePrefixList'][iCondition],i), units='pix', size=params['movieSize']))
# Create bottom text stim
tapText = visual.TextStim(win, stimList['moviePromptList'][iCondition], wrapWidth=params['movieSize'][0], color='#000000', pos=(0, params['movieSize'][1]/2+params['textHeight']*2), height = params['textHeight'], units = 'pix')
# read prompts from text files
[topPrompts,bottomPrompts] = BasicPromptTools.ParsePromptFile(params['promptDir']+stimList['promptFileList'][iCondition])
print('%d prompts loaded from %s'%(len(topPrompts),stimList['promptFileList'][iCondition]))
# ============================ #
# ======= SUBFUNCTIONS ======= #
# ============================ #
# increment time of next window flip
def AddToFlipTime(tIncrement=1.0):
tNextFlip[0] += tIncrement
# flip window as soon as possible
def SetFlipTimeToNow():
tNextFlip[0] = globalClock.getTime()
def CheckForTriggers():
# get new keys
newKeys = event.getKeys(keyList=[params['triggerKey'], 'q','escape'],timeStamped=globalClock)
# check each keypress for escape or trigger keys
nTriggers = 0
if len(newKeys)>0:
for thisKey in newKeys:
if thisKey[0] in ['q','escape']: # escape keys
CoolDown() # exit gracefully
elif thisKey[0] == params['triggerKey']:
nTriggers = nTriggers + 1
return nTriggers
def PlayTappingMovie(tapImages, tapText, dt, blockDur_TRs):
# Wait for escape key press or 'blockDur_TRs' triggers
nTriggers = 0
SetFlipTimeToNow()
tBlockStart = globalClock.getTime() # record time when window flipped
iFrame = 0
SetFlipTimeToNow()
while (nTriggers < blockDur_TRs): # until it's time for the next frame # while mov.status != visual.FINISHED:
# ---tapping movie
# check for loop or movie end
if iFrame >= len(tapImages):
iFrame=0 # rewind to beginning
# Only flip when a new frame should be displayed.
if globalClock.getTime()>=tNextFlip[0]:
# draw movie frame, draw text stim, and flip
tapImages[iFrame].draw()
tapText.draw()
win.logOnFlip(level=logging.EXP, msg='Display Frame %d'%iFrame)
win.flip()
# increment iFrame
iFrame = iFrame+1
# Add to flip time
AddToFlipTime(dt)
else:
# Give the OS a break if a flip is not needed
ts.sleep(0.001)
# Check for triggers and increment trigger count
nNew = CheckForTriggers()
nTriggers = nTriggers + nNew
# check for final trigger
if nTriggers >= blockDur_TRs:
break
# allow screen update
SetFlipTimeToNow()
# Get block time
tBlock = globalClock.getTime()-tBlockStart
print('Block time: %.3f seconds'%(tBlock))
return (tBlock)
# Pause until a given number of TRs is received.
def WaitForTrs(tWait_TRs):
# do IBI
nTriggers = 0
while (nTriggers < tWait_TRs):
# Check for triggers and increment trigger count
nNew = CheckForTriggers()
nTriggers = nTriggers + nNew
if nTriggers >= tWait_TRs:
break
# Handle end of a session
def CoolDown():
# display cool-down message
message1.setText("That's the end! ")
message2.setText("Press 'q' or 'escape' to end the session.")
win.logOnFlip(level=logging.EXP, msg='Display TheEnd')
win.clearBuffer() # clear the screen
message1.draw()
message2.draw()
win.flip()
thisKey = event.waitKeys(keyList=['q','escape'])
# exit
core.quit()
# =========================== #
# ======= RUN PROMPTS ======= #
# =========================== #
# display prompts
if not params['skipPrompts']:
BasicPromptTools.RunPrompts(topPrompts,bottomPrompts,win,message1,message2)
# wait for scanner
message1.setText("Please don't move...")
message2.setText("") #("(Press '%c' to override.)"%params['triggerKey'].upper())
message1.draw()
message2.draw()
win.logOnFlip(level=logging.EXP, msg='PleaseDontMove') #'Display WaitingForScanner')
win.flip()
event.waitKeys(keyList=params['triggerKey'])
tStartSession = globalClock.getTime()
AddToFlipTime(tStartSession)
# wait before first stimulus
fixation.draw()
win.logOnFlip(level=logging.EXP, msg='Display Fixation')
win.flip()
WaitForTrs(params['tStartup_TRs']) # wait for the given # of TRs
# =========================== #
# ===== MAIN EXPERIMENT ===== #
# =========================== #
# log experiment start and set up
logging.log(level=logging.EXP, msg='---START EXPERIMENT---')
# run main experiment loop
for iBlock in range(0,params['nBlocks']):
# Run rest period
print('Resting Block %d: duration=%.2f'%(iBlock, params['restDur_TRs']) )
# Display fixation cross
win.clearBuffer() # clear the screen
fixation.draw() # draw the cross
win.logOnFlip(level=logging.EXP, msg='Display Fixation')
win.flip() # flip the window ASAP
# do rest period
WaitForTrs(params['restDur_TRs'])
# display info to experimenter
print('Tapping Block %d: movie=%s, framerate=%.2f'%(iBlock, stimList['moviePrefixList'][iCondition], stimList['movieFrameRateList'][iCondition]) )
# display tapping movie
tBlock = PlayTappingMovie(tapImages=tapImages, tapText=tapText, dt=1.0/stimList['movieFrameRateList'][iCondition], blockDur_TRs=params['blockDur_TRs'])
# Log end of experiment
logging.log(level=logging.EXP, msg='--- END EXPERIMENT ---')
# exit experiment
CoolDown()
|
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from recipe_engine import recipe_api
from . import default
import subprocess # TODO(borenet): No! Remove this.
"""Android flavor, used for running code on Android."""
class AndroidFlavor(default.DefaultFlavor):
def __init__(self, m, app_name):
super(AndroidFlavor, self).__init__(m, app_name)
self._ever_ran_adb = False
self.ADB_BINARY = '/usr/bin/adb.1.0.35'
self.ADB_PUB_KEY = '/home/chrome-bot/.android/adbkey'
if 'skia' not in self.m.vars.swarming_bot_id:
self.ADB_BINARY = '/opt/infra-android/tools/adb'
self.ADB_PUB_KEY = ('/home/chrome-bot/.android/'
'chrome_infrastructure_adbkey')
# Data should go in android_data_dir, which may be preserved across runs.
android_data_dir = '/sdcard/revenge_of_the_skiabot/'
self.device_dirs = default.DeviceDirs(
bin_dir = '/data/local/tmp/',
dm_dir = android_data_dir + 'dm_out',
perf_data_dir = android_data_dir + 'perf',
resource_dir = android_data_dir + 'resources',
images_dir = android_data_dir + 'images',
lotties_dir = android_data_dir + 'lotties',
skp_dir = android_data_dir + 'skps',
svg_dir = android_data_dir + 'svgs',
mskp_dir = android_data_dir + 'mskp',
tmp_dir = android_data_dir,
texttraces_dir = android_data_dir + 'text_blob_traces')
# A list of devices we can't root. If rooting fails and a device is not
# on the list, we fail the task to avoid perf inconsistencies.
self.rootable_blacklist = ['GalaxyS6', 'GalaxyS7_G930FD', 'GalaxyS9',
'GalaxyS20', 'MotoG4', 'NVIDIA_Shield',
'P30', 'TecnoSpark3Pro']
# Maps device type -> CPU ids that should be scaled for nanobench.
# Many devices have two (or more) different CPUs (e.g. big.LITTLE
# on Nexus5x). The CPUs listed are the biggest cpus on the device.
# The CPUs are grouped together, so we only need to scale one of them
# (the one listed) in order to scale them all.
# E.g. Nexus5x has cpu0-3 as one chip and cpu4-5 as the other. Thus,
# if one wants to run a single-threaded application (e.g. nanobench), one
# can disable cpu0-3 and scale cpu 4 to have only cpu4 and 5 at the same
# frequency. See also disable_for_nanobench.
self.cpus_to_scale = {
'Nexus5x': [4],
'Pixel': [2],
'Pixel2XL': [4]
}
# Maps device type -> CPU ids that should be turned off when running
# single-threaded applications like nanobench. The devices listed have
# multiple, differnt CPUs. We notice a lot of noise that seems to be
# caused by nanobench running on the slow CPU, then the big CPU. By
# disabling this, we see less of that noise by forcing the same CPU
# to be used for the performance testing every time.
self.disable_for_nanobench = {
'Nexus5x': range(0, 4),
'Pixel': range(0, 2),
'Pixel2XL': range(0, 4)
}
self.gpu_scaling = {
"Nexus5": 450000000,
"Nexus5x": 600000000,
}
def _adb(self, title, *cmd, **kwargs):
# The only non-infra adb steps (dm / nanobench) happen to not use _adb().
if 'infra_step' not in kwargs:
kwargs['infra_step'] = True
self._ever_ran_adb = True
# ADB seems to be occasionally flaky on every device, so always retry.
attempts = 3
def wait_for_device(attempt):
self.m.run(self.m.step,
'kill adb server after failure of \'%s\' (attempt %d)' % (
title, attempt),
cmd=[self.ADB_BINARY, 'kill-server'],
infra_step=True, timeout=30, abort_on_failure=False,
fail_build_on_failure=False)
self.m.run(self.m.step,
'wait for device after failure of \'%s\' (attempt %d)' % (
title, attempt),
cmd=[self.ADB_BINARY, 'wait-for-device'], infra_step=True,
timeout=180, abort_on_failure=False,
fail_build_on_failure=False)
with self.m.context(cwd=self.m.path['start_dir'].join('skia')):
with self.m.env({'ADB_VENDOR_KEYS': self.ADB_PUB_KEY}):
return self.m.run.with_retry(self.m.step, title, attempts,
cmd=[self.ADB_BINARY]+list(cmd),
between_attempts_fn=wait_for_device,
**kwargs)
def _scale_for_dm(self):
device = self.m.vars.builder_cfg.get('model')
if (device in self.rootable_blacklist or
self.m.vars.internal_hardware_label):
return
# This is paranoia... any CPUs we disabled while running nanobench
# ought to be back online now that we've restarted the device.
for i in self.disable_for_nanobench.get(device, []):
self._set_cpu_online(i, 1) # enable
scale_up = self.cpus_to_scale.get(device, [0])
# For big.LITTLE devices, make sure we scale the LITTLE cores up;
# there is a chance they are still in powersave mode from when
# swarming slows things down for cooling down and charging.
if 0 not in scale_up:
scale_up.append(0)
for i in scale_up:
# AndroidOne doesn't support ondemand governor. hotplug is similar.
if device == 'AndroidOne':
self._set_governor(i, 'hotplug')
elif device in ['Pixel3a', 'Pixel4']:
# Pixel3a/4 have userspace powersave performance schedutil.
# performance seems like a reasonable choice.
self._set_governor(i, 'performance')
else:
self._set_governor(i, 'ondemand')
def _scale_for_nanobench(self):
device = self.m.vars.builder_cfg.get('model')
if (device in self.rootable_blacklist or
self.m.vars.internal_hardware_label):
return
for i in self.cpus_to_scale.get(device, [0]):
self._set_governor(i, 'userspace')
self._scale_cpu(i, 0.6)
for i in self.disable_for_nanobench.get(device, []):
self._set_cpu_online(i, 0) # disable
if device in self.gpu_scaling:
#https://developer.qualcomm.com/qfile/28823/lm80-p0436-11_adb_commands.pdf
# Section 3.2.1 Commands to put the GPU in performance mode
# Nexus 5 is 320000000 by default
# Nexus 5x is 180000000 by default
gpu_freq = self.gpu_scaling[device]
self.m.run.with_retry(self.m.python.inline,
"Lock GPU to %d (and other perf tweaks)" % gpu_freq,
3, # attempts
program="""
import os
import subprocess
import sys
import time
ADB = sys.argv[1]
freq = sys.argv[2]
idle_timer = "10000"
log = subprocess.check_output([ADB, 'root'])
# check for message like 'adbd cannot run as root in production builds'
print log
if 'cannot' in log:
raise Exception('adb root failed')
subprocess.check_output([ADB, 'shell', 'stop', 'thermald'])
subprocess.check_output([ADB, 'shell', 'echo "%s" > '
'/sys/class/kgsl/kgsl-3d0/gpuclk' % freq])
actual_freq = subprocess.check_output([ADB, 'shell', 'cat '
'/sys/class/kgsl/kgsl-3d0/gpuclk']).strip()
if actual_freq != freq:
raise Exception('Frequency (actual, expected) (%s, %s)'
% (actual_freq, freq))
subprocess.check_output([ADB, 'shell', 'echo "%s" > '
'/sys/class/kgsl/kgsl-3d0/idle_timer' % idle_timer])
actual_timer = subprocess.check_output([ADB, 'shell', 'cat '
'/sys/class/kgsl/kgsl-3d0/idle_timer']).strip()
if actual_timer != idle_timer:
raise Exception('idle_timer (actual, expected) (%s, %s)'
% (actual_timer, idle_timer))
for s in ['force_bus_on', 'force_rail_on', 'force_clk_on']:
subprocess.check_output([ADB, 'shell', 'echo "1" > '
'/sys/class/kgsl/kgsl-3d0/%s' % s])
actual_set = subprocess.check_output([ADB, 'shell', 'cat '
'/sys/class/kgsl/kgsl-3d0/%s' % s]).strip()
if actual_set != "1":
raise Exception('%s (actual, expected) (%s, 1)'
% (s, actual_set))
""",
args = [self.ADB_BINARY, gpu_freq],
infra_step=True,
timeout=30)
def _set_governor(self, cpu, gov):
self._ever_ran_adb = True
self.m.run.with_retry(self.m.python.inline,
"Set CPU %d's governor to %s" % (cpu, gov),
3, # attempts
program="""
import os
import subprocess
import sys
import time
ADB = sys.argv[1]
cpu = int(sys.argv[2])
gov = sys.argv[3]
log = subprocess.check_output([ADB, 'root'])
# check for message like 'adbd cannot run as root in production builds'
print log
if 'cannot' in log:
raise Exception('adb root failed')
subprocess.check_output([ADB, 'shell', 'echo "%s" > '
'/sys/devices/system/cpu/cpu%d/cpufreq/scaling_governor' % (gov, cpu)])
actual_gov = subprocess.check_output([ADB, 'shell', 'cat '
'/sys/devices/system/cpu/cpu%d/cpufreq/scaling_governor' % cpu]).strip()
if actual_gov != gov:
raise Exception('(actual, expected) (%s, %s)'
% (actual_gov, gov))
""",
args = [self.ADB_BINARY, cpu, gov],
infra_step=True,
timeout=30)
def _set_cpu_online(self, cpu, value):
"""Set /sys/devices/system/cpu/cpu{N}/online to value (0 or 1)."""
self._ever_ran_adb = True
msg = 'Disabling'
if value:
msg = 'Enabling'
self.m.run.with_retry(self.m.python.inline,
'%s CPU %d' % (msg, cpu),
3, # attempts
program="""
import os
import subprocess
import sys
import time
ADB = sys.argv[1]
cpu = int(sys.argv[2])
value = int(sys.argv[3])
log = subprocess.check_output([ADB, 'root'])
# check for message like 'adbd cannot run as root in production builds'
print log
if 'cannot' in log:
raise Exception('adb root failed')
# If we try to echo 1 to an already online cpu, adb returns exit code 1.
# So, check the value before trying to write it.
prior_status = subprocess.check_output([ADB, 'shell', 'cat '
'/sys/devices/system/cpu/cpu%d/online' % cpu]).strip()
if prior_status == str(value):
print 'CPU %d online already %d' % (cpu, value)
sys.exit()
subprocess.check_output([ADB, 'shell', 'echo %s > '
'/sys/devices/system/cpu/cpu%d/online' % (value, cpu)])
actual_status = subprocess.check_output([ADB, 'shell', 'cat '
'/sys/devices/system/cpu/cpu%d/online' % cpu]).strip()
if actual_status != str(value):
raise Exception('(actual, expected) (%s, %d)'
% (actual_status, value))
""",
args = [self.ADB_BINARY, cpu, value],
infra_step=True,
timeout=30)
def _scale_cpu(self, cpu, target_percent):
self._ever_ran_adb = True
self.m.run.with_retry(self.m.python.inline,
'Scale CPU %d to %f' % (cpu, target_percent),
3, # attempts
program="""
import os
import subprocess
import sys
import time
ADB = sys.argv[1]
target_percent = float(sys.argv[2])
cpu = int(sys.argv[3])
log = subprocess.check_output([ADB, 'root'])
# check for message like 'adbd cannot run as root in production builds'
print log
if 'cannot' in log:
raise Exception('adb root failed')
root = '/sys/devices/system/cpu/cpu%d/cpufreq' %cpu
# All devices we test on give a list of their available frequencies.
available_freqs = subprocess.check_output([ADB, 'shell',
'cat %s/scaling_available_frequencies' % root])
# Check for message like '/system/bin/sh: file not found'
if available_freqs and '/system/bin/sh' not in available_freqs:
available_freqs = sorted(
int(i) for i in available_freqs.strip().split())
else:
raise Exception('Could not get list of available frequencies: %s' %
available_freqs)
maxfreq = available_freqs[-1]
target = int(round(maxfreq * target_percent))
freq = maxfreq
for f in reversed(available_freqs):
if f <= target:
freq = f
break
print 'Setting frequency to %d' % freq
# If scaling_max_freq is lower than our attempted setting, it won't take.
# We must set min first, because if we try to set max to be less than min
# (which sometimes happens after certain devices reboot) it returns a
# perplexing permissions error.
subprocess.check_output([ADB, 'shell', 'echo 0 > '
'%s/scaling_min_freq' % root])
subprocess.check_output([ADB, 'shell', 'echo %d > '
'%s/scaling_max_freq' % (freq, root)])
subprocess.check_output([ADB, 'shell', 'echo %d > '
'%s/scaling_setspeed' % (freq, root)])
time.sleep(5)
actual_freq = subprocess.check_output([ADB, 'shell', 'cat '
'%s/scaling_cur_freq' % root]).strip()
if actual_freq != str(freq):
raise Exception('(actual, expected) (%s, %d)'
% (actual_freq, freq))
""",
args = [self.ADB_BINARY, str(target_percent), cpu],
infra_step=True,
timeout=30)
def _asan_setup_path(self):
return self.m.vars.slave_dir.join(
'android_ndk_linux', 'toolchains', 'llvm', 'prebuilt', 'linux-x86_64',
'lib64', 'clang', '8.0.7', 'bin', 'asan_device_setup')
def install(self):
self._adb('mkdir ' + self.device_dirs.resource_dir,
'shell', 'mkdir', '-p', self.device_dirs.resource_dir)
if self.m.vars.builder_cfg.get('model') == 'GalaxyS20':
# See skia:10184, should be moot once upgraded to Android 11?
self._adb('cp libGLES_mali.so to ' + self.device_dirs.bin_dir,
'shell', 'cp',
'/vendor/lib64/egl/libGLES_mali.so',
self.device_dirs.bin_dir + 'libvulkan.so')
if 'ASAN' in self.m.vars.extra_tokens:
self._ever_ran_adb = True
self.m.run(self.m.python.inline, 'Setting up device to run ASAN',
program="""
import os
import subprocess
import sys
import time
ADB = sys.argv[1]
ASAN_SETUP = sys.argv[2]
def wait_for_device():
while True:
time.sleep(5)
print 'Waiting for device'
subprocess.check_output([ADB, 'wait-for-device'])
bit1 = subprocess.check_output([ADB, 'shell', 'getprop',
'dev.bootcomplete'])
bit2 = subprocess.check_output([ADB, 'shell', 'getprop',
'sys.boot_completed'])
if '1' in bit1 and '1' in bit2:
print 'Device detected'
break
log = subprocess.check_output([ADB, 'root'])
# check for message like 'adbd cannot run as root in production builds'
print log
if 'cannot' in log:
raise Exception('adb root failed')
output = subprocess.check_output([ADB, 'disable-verity'])
print output
if 'already disabled' not in output:
print 'Rebooting device'
subprocess.check_output([ADB, 'reboot'])
wait_for_device()
def installASAN(revert=False):
# ASAN setup script is idempotent, either it installs it or
# says it's installed. Returns True on success, false otherwise.
out = subprocess.check_output([ADB, 'wait-for-device'])
print out
cmd = [ASAN_SETUP]
if revert:
cmd = [ASAN_SETUP, '--revert']
process = subprocess.Popen(cmd, env={'ADB': ADB},
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# this also blocks until command finishes
(stdout, stderr) = process.communicate()
print stdout
print 'Stderr: %s' % stderr
return process.returncode == 0
if not installASAN():
print 'Trying to revert the ASAN install and then re-install'
# ASAN script sometimes has issues if it was interrupted or partially applied
# Try reverting it, then re-enabling it
if not installASAN(revert=True):
raise Exception('reverting ASAN install failed')
# Sleep because device does not reboot instantly
time.sleep(10)
if not installASAN():
raise Exception('Tried twice to setup ASAN and failed.')
# Sleep because device does not reboot instantly
time.sleep(10)
wait_for_device()
# Sleep again to hopefully avoid error "secure_mkdirs failed: No such file or
# directory" when pushing resources to the device.
time.sleep(60)
""",
args = [self.ADB_BINARY, self._asan_setup_path()],
infra_step=True,
timeout=300,
abort_on_failure=True)
if self.app_name:
if (self.app_name == 'nanobench'):
self._scale_for_nanobench()
else:
self._scale_for_dm()
app_path = self.host_dirs.bin_dir.join(self.app_name)
self._adb('push %s' % self.app_name,
'push', app_path, self.device_dirs.bin_dir)
def cleanup_steps(self):
if 'ASAN' in self.m.vars.extra_tokens:
self._ever_ran_adb = True
# Remove ASAN.
self.m.run(self.m.step,
'wait for device before uninstalling ASAN',
cmd=[self.ADB_BINARY, 'wait-for-device'], infra_step=True,
timeout=180, abort_on_failure=False,
fail_build_on_failure=False)
self.m.run(self.m.step, 'uninstall ASAN',
cmd=[self._asan_setup_path(), '--revert'],
infra_step=True, timeout=300,
abort_on_failure=False, fail_build_on_failure=False)
if self._ever_ran_adb:
self.m.run(self.m.python.inline, 'dump log', program="""
import os
import subprocess
import sys
out = sys.argv[1]
log = subprocess.check_output(['%s', 'logcat', '-d'])
for line in log.split('\\n'):
tokens = line.split()
if len(tokens) == 11 and tokens[-7] == 'F' and tokens[-3] == 'pc':
addr, path = tokens[-2:]
local = os.path.join(out, os.path.basename(path))
if os.path.exists(local):
try:
sym = subprocess.check_output(['addr2line', '-Cfpe', local, addr])
line = line.replace(addr, addr + ' ' + sym.strip())
except subprocess.CalledProcessError:
pass
print line
""" % self.ADB_BINARY,
args=[self.host_dirs.bin_dir],
infra_step=True,
timeout=300,
abort_on_failure=False)
# Only quarantine the bot if the first failed step
# is an infra step. If, instead, we did this for any infra failures, we
# would do this too much. For example, if a Nexus 10 died during dm
# and the following pull step would also fail "device not found" - causing
# us to run the shutdown command when the device was probably not in a
# broken state; it was just rebooting.
if (self.m.run.failed_steps and
isinstance(self.m.run.failed_steps[0], recipe_api.InfraFailure)):
bot_id = self.m.vars.swarming_bot_id
self.m.file.write_text('Quarantining Bot',
'/home/chrome-bot/%s.force_quarantine' % bot_id,
' ')
if self._ever_ran_adb:
self._adb('kill adb server', 'kill-server')
def step(self, name, cmd):
sh = '%s.sh' % cmd[0]
self.m.run.writefile(self.m.vars.tmp_dir.join(sh),
'set -x; LD_LIBRARY_PATH=%s %s%s; echo $? >%src' % (
self.device_dirs.bin_dir,
self.device_dirs.bin_dir, subprocess.list2cmdline(map(str, cmd)),
self.device_dirs.bin_dir))
self._adb('push %s' % sh,
'push', self.m.vars.tmp_dir.join(sh), self.device_dirs.bin_dir)
self._adb('clear log', 'logcat', '-c')
self.m.python.inline('%s' % cmd[0], """
import subprocess
import sys
bin_dir = sys.argv[1]
sh = sys.argv[2]
subprocess.check_call(['%s', 'shell', 'sh', bin_dir + sh])
try:
sys.exit(int(subprocess.check_output(['%s', 'shell', 'cat',
bin_dir + 'rc'])))
except ValueError:
print "Couldn't read the return code. Probably killed for OOM."
sys.exit(1)
""" % (self.ADB_BINARY, self.ADB_BINARY),
args=[self.device_dirs.bin_dir, sh])
def copy_file_to_device(self, host, device):
self._adb('push %s %s' % (host, device), 'push', host, device)
def copy_directory_contents_to_device(self, host, device):
# Copy the tree, avoiding hidden directories and resolving symlinks.
sep = self.m.path.sep
host_str = str(host).rstrip(sep) + sep
device = device.rstrip('/')
with self.m.step.nest('push %s* %s' % (host_str, device)):
contents = self.m.file.listdir('list %s' % host, host, recursive=True,
test_data=['file1',
'subdir' + sep + 'file2',
'.file3',
'.ignore' + sep + 'file4'])
for path in contents:
path_str = str(path)
assert path_str.startswith(host_str), (
'expected %s to have %s as a prefix' % (path_str, host_str))
relpath = path_str[len(host_str):]
# NOTE(dogben): Previous logic used os.walk and skipped directories
# starting with '.', but not files starting with '.'. It's not clear
# what the reason was (maybe skipping .git?), but I'm keeping that
# behavior here.
if self.m.path.dirname(relpath).startswith('.'):
continue
device_path = device + '/' + relpath # Android paths use /
self._adb('push %s' % path, 'push',
self.m.path.realpath(path), device_path)
def copy_directory_contents_to_host(self, device, host):
# TODO(borenet): When all of our devices are on Android 6.0 and up, we can
# switch to using tar to zip up the results before pulling.
with self.m.step.nest('adb pull'):
tmp = self.m.path.mkdtemp('adb_pull')
self._adb('pull %s' % device, 'pull', device, tmp)
paths = self.m.file.glob_paths(
'list pulled files',
tmp,
self.m.path.basename(device) + self.m.path.sep + '*',
test_data=['%d.png' % i for i in (1, 2)])
for p in paths:
self.m.file.copy('copy %s' % self.m.path.basename(p), p, host)
def read_file_on_device(self, path, **kwargs):
rv = self._adb('read %s' % path,
'shell', 'cat', path, stdout=self.m.raw_io.output(),
**kwargs)
return rv.stdout.rstrip() if rv and rv.stdout else None
def remove_file_on_device(self, path):
self._adb('rm %s' % path, 'shell', 'rm', '-f', path)
def create_clean_device_dir(self, path):
self._adb('rm %s' % path, 'shell', 'rm', '-rf', path)
self._adb('mkdir %s' % path, 'shell', 'mkdir', '-p', path)
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for nested structure coding."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import typing
import warnings
from google.protobuf import text_format
from tensorflow.core.protobuf import struct_pb2
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import extension_type
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import type_spec
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.platform import test
from tensorflow.python.saved_model import nested_structure_coder
class NestedStructureTest(test.TestCase):
def setUp(self):
super(NestedStructureTest, self).setUp()
self._coder = nested_structure_coder.StructureCoder()
def testEncodeDecodeList(self):
structure = [1.5, 2.5, 3.0]
self.assertTrue(self._coder.can_encode(structure))
encoded = self._coder.encode_structure(structure)
expected = struct_pb2.StructuredValue()
expected.list_value.values.add().float64_value = 1.5
expected.list_value.values.add().float64_value = 2.5
expected.list_value.values.add().float64_value = 3.0
self.assertEqual(expected, encoded)
decoded = self._coder.decode_proto(encoded)
self.assertEqual(structure, decoded)
def testEncodeDecodeTuple(self):
structure = ("hello", [3, (2, 1)])
self.assertTrue(self._coder.can_encode(structure))
encoded = self._coder.encode_structure(structure)
expected = struct_pb2.StructuredValue()
expected.tuple_value.values.add().string_value = "hello"
list_value = expected.tuple_value.values.add().list_value
list_value.values.add().int64_value = 3
tuple_value = list_value.values.add().tuple_value
tuple_value.values.add().int64_value = 2
tuple_value.values.add().int64_value = 1
self.assertEqual(expected, encoded)
decoded = self._coder.decode_proto(encoded)
self.assertEqual(structure, decoded)
def testEncodeDecodeDict(self):
structure = dict(a=3, b=[7, 2.5])
self.assertTrue(self._coder.can_encode(structure))
encoded = self._coder.encode_structure(structure)
expected = struct_pb2.StructuredValue()
expected.dict_value.fields["a"].int64_value = 3
list_value = expected.dict_value.fields["b"].list_value
list_value.values.add().int64_value = 7
list_value.values.add().float64_value = 2.5
self.assertEqual(expected, encoded)
decoded = self._coder.decode_proto(encoded)
self.assertIsInstance(decoded["a"], int)
self.assertEqual(structure, decoded)
def testEncodeDecodeTensorShape(self):
structure = [tensor_shape.TensorShape([1, 2, 3]), "hello"]
self.assertTrue(self._coder.can_encode(structure))
encoded = self._coder.encode_structure(structure)
expected = struct_pb2.StructuredValue()
expected_list = expected.list_value
expected_tensor_shape = expected_list.values.add().tensor_shape_value
expected_tensor_shape.dim.add().size = 1
expected_tensor_shape.dim.add().size = 2
expected_tensor_shape.dim.add().size = 3
expected_tensor_shape = expected_list.values.add().string_value = "hello"
self.assertEqual(expected, encoded)
decoded = self._coder.decode_proto(encoded)
self.assertEqual(structure, decoded)
def testEncodeDecodeNamedTuple(self):
named_tuple_type = collections.namedtuple("NamedTuple", ["x", "y"])
named_tuple = named_tuple_type(x=[1, 2], y="hello")
self.assertTrue(self._coder.can_encode(named_tuple))
encoded = self._coder.encode_structure(named_tuple)
expected = struct_pb2.StructuredValue()
expected_named_tuple = expected.named_tuple_value
expected_named_tuple.name = "NamedTuple"
key_value_pair = expected_named_tuple.values.add()
key_value_pair.key = "x"
list_value = key_value_pair.value.list_value
list_value.values.add().int64_value = 1
list_value.values.add().int64_value = 2
key_value_pair = expected_named_tuple.values.add()
key_value_pair.key = "y"
key_value_pair.value.string_value = "hello"
self.assertEqual(expected, encoded)
decoded = self._coder.decode_proto(encoded)
self.assertEqual(named_tuple._asdict(), decoded._asdict())
self.assertEqual(named_tuple.__class__.__name__, decoded.__class__.__name__)
def testNone(self):
structure = [1.0, None]
self.assertTrue(self._coder.can_encode(structure))
encoded = self._coder.encode_structure(structure)
expected = struct_pb2.StructuredValue()
expected.list_value.values.add().float64_value = 1.0
expected.list_value.values.add().none_value.CopyFrom(struct_pb2.NoneValue())
self.assertEqual(expected, encoded)
decoded = self._coder.decode_proto(encoded)
self.assertEqual(structure, decoded)
def testBool(self):
structure = [False]
self.assertTrue(self._coder.can_encode(structure))
encoded = self._coder.encode_structure(structure)
expected = struct_pb2.StructuredValue()
expected.list_value.values.add().bool_value = False
self.assertEqual(expected, encoded)
decoded = self._coder.decode_proto(encoded)
self.assertEqual(structure, decoded)
def testEmptyStructures(self):
structure = [list(), dict(), tuple()]
self.assertTrue(self._coder.can_encode(structure))
encoded = self._coder.encode_structure(structure)
expected = struct_pb2.StructuredValue()
expected.list_value.values.add().list_value.CopyFrom(struct_pb2.ListValue())
expected.list_value.values.add().dict_value.CopyFrom(struct_pb2.DictValue())
expected.list_value.values.add().tuple_value.CopyFrom(
struct_pb2.TupleValue())
self.assertEqual(expected, encoded)
decoded = self._coder.decode_proto(encoded)
self.assertEqual(structure, decoded)
def testDtype(self):
structure = [dtypes.int64]
self.assertTrue(self._coder.can_encode(structure))
encoded = self._coder.encode_structure(structure)
expected = struct_pb2.StructuredValue()
list_value = expected.list_value.values.add()
list_value.tensor_dtype_value = dtypes.int64.as_datatype_enum
self.assertEqual(expected, encoded)
decoded = self._coder.decode_proto(encoded)
self.assertEqual(structure, decoded)
def testEncodeDecodeTensorSpec(self):
structure = [tensor_spec.TensorSpec([1, 2, 3], dtypes.int64, "hello")]
self.assertTrue(self._coder.can_encode(structure))
encoded = self._coder.encode_structure(structure)
expected = struct_pb2.StructuredValue()
expected_list = expected.list_value
expected_tensor_spec = expected_list.values.add().tensor_spec_value
expected_tensor_spec.shape.dim.add().size = 1
expected_tensor_spec.shape.dim.add().size = 2
expected_tensor_spec.shape.dim.add().size = 3
expected_tensor_spec.name = "hello"
expected_tensor_spec.dtype = dtypes.int64.as_datatype_enum
self.assertEqual(expected, encoded)
decoded = self._coder.decode_proto(encoded)
self.assertEqual(structure, decoded)
def testEncodeDecodeTensorSpecWithNoName(self):
structure = [tensor_spec.TensorSpec([1, 2, 3], dtypes.int64)]
self.assertTrue(self._coder.can_encode(structure))
encoded = self._coder.encode_structure(structure)
expected = struct_pb2.StructuredValue()
expected_list = expected.list_value
expected_tensor_spec = expected_list.values.add().tensor_spec_value
expected_tensor_spec.shape.dim.add().size = 1
expected_tensor_spec.shape.dim.add().size = 2
expected_tensor_spec.shape.dim.add().size = 3
expected_tensor_spec.name = ""
expected_tensor_spec.dtype = dtypes.int64.as_datatype_enum
self.assertEqual(expected, encoded)
decoded = self._coder.decode_proto(encoded)
self.assertEqual(structure, decoded)
def testEncodeDecodeRaggedTensorSpec(self):
structure = [ragged_tensor.RaggedTensorSpec(
[1, 2, 3], dtypes.int64, 2, dtypes.int32)]
self.assertTrue(self._coder.can_encode(structure))
encoded = self._coder.encode_structure(structure)
expected_pbtxt = r"""
list_value {
values {
type_spec_value {
type_spec_class: RAGGED_TENSOR_SPEC
type_spec_class_name: 'RaggedTensorSpec'
type_state {
tuple_value {
# spec._shape
values {
tensor_shape_value {
dim { size: 1 }
dim { size: 2 }
dim { size: 3 }
}
}
# spec._dtype
values { tensor_dtype_value: DT_INT64 }
# spec._ragged_rank
values { int64_value: 2 }
# spec._row_splits_dtype
values { tensor_dtype_value: DT_INT32 }
}
}
}
}
}
"""
expected = struct_pb2.StructuredValue()
text_format.Parse(expected_pbtxt, expected)
self.assertEqual(expected, encoded)
decoded = self._coder.decode_proto(encoded)
self.assertEqual(structure, decoded)
def testEncodeDecodeSparseTensorSpec(self):
structure = [sparse_tensor.SparseTensorSpec([10, 20], dtypes.float32)]
self.assertTrue(self._coder.can_encode(structure))
encoded = self._coder.encode_structure(structure)
expected_pbtxt = r"""
list_value {
values {
type_spec_value {
type_spec_class: SPARSE_TENSOR_SPEC
type_spec_class_name: 'SparseTensorSpec'
type_state {
tuple_value {
# spec._shape
values {
tensor_shape_value {
dim { size: 10 }
dim { size: 20 }
}
}
# spec._dtype
values { tensor_dtype_value: DT_FLOAT }
}
}
}
}
}
"""
expected = struct_pb2.StructuredValue()
text_format.Parse(expected_pbtxt, expected)
self.assertEqual(expected, encoded)
decoded = self._coder.decode_proto(encoded)
self.assertEqual(structure, decoded)
def testEncodeDecodeExtensionTypeSpec(self):
class Zoo(extension_type.ExtensionType):
__name__ = "tf.nested_structure_coder_test.Zoo"
zookeepers: typing.Tuple[str, ...]
animals: typing.Mapping[str, ops.Tensor]
structure = [Zoo.Spec(
zookeepers=["Zoey", "Zack"],
animals={"tiger": tensor_spec.TensorSpec([16])})]
self.assertTrue(self._coder.can_encode(structure))
encoded = self._coder.encode_structure(structure)
expected_pbtxt = r"""
list_value {
values {
type_spec_value {
type_spec_class: EXTENSION_TYPE_SPEC
type_spec_class_name: "tf.nested_structure_coder_test.Zoo.Spec"
type_state {
tuple_value {
values {
tuple_value {
values { string_value: "zookeepers" }
values { tuple_value {
values { string_value: "Zoey" }
values { string_value: "Zack" } } } } }
values {
tuple_value {
values { string_value: "animals" }
values { dict_value {
fields {
key: "tiger"
value { tensor_spec_value {
shape { dim { size: 16 } }
dtype: DT_FLOAT } } } } } } } } } } } }
"""
expected = struct_pb2.StructuredValue()
text_format.Parse(expected_pbtxt, expected)
self.assertEqual(expected, encoded)
decoded = self._coder.decode_proto(encoded)
self.assertEqual(structure, decoded)
def testDecodeUnknownTensorSpec(self):
encoded = struct_pb2.StructuredValue()
encoded.type_spec_value.type_spec_class = 0
encoded.type_spec_value.type_spec_class_name = "FutureTensorSpec"
with self.assertRaisesRegex(ValueError,
"The type 'FutureTensorSpec' is not supported"):
self._coder.decode_proto(encoded)
def testEncodeDecodeBoundedTensorSpec(self):
structure = [
tensor_spec.BoundedTensorSpec([1, 2, 3], dtypes.int64, 0, 10,
"hello-0-10")
]
self.assertTrue(self._coder.can_encode(structure))
encoded = self._coder.encode_structure(structure)
expected = struct_pb2.StructuredValue()
expected_list = expected.list_value
expected_tensor_spec = expected_list.values.add().bounded_tensor_spec_value
expected_tensor_spec.shape.dim.add().size = 1
expected_tensor_spec.shape.dim.add().size = 2
expected_tensor_spec.shape.dim.add().size = 3
expected_tensor_spec.name = "hello-0-10"
expected_tensor_spec.dtype = dtypes.int64.as_datatype_enum
expected_tensor_spec.minimum.CopyFrom(
tensor_util.make_tensor_proto([0], dtype=dtypes.int64, shape=[]))
expected_tensor_spec.maximum.CopyFrom(
tensor_util.make_tensor_proto([10], dtype=dtypes.int64, shape=[]))
self.assertEqual(expected, encoded)
decoded = self._coder.decode_proto(encoded)
self.assertEqual(structure, decoded)
def testEncodeDecodeBoundedTensorSpecNoName(self):
structure = [
tensor_spec.BoundedTensorSpec((28, 28, 3), dtypes.float64, -2,
(1, 1, 20))
]
self.assertTrue(self._coder.can_encode(structure))
encoded = self._coder.encode_structure(structure)
expected = struct_pb2.StructuredValue()
expected_list = expected.list_value
expected_tensor_spec = expected_list.values.add().bounded_tensor_spec_value
expected_tensor_spec.shape.dim.add().size = 28
expected_tensor_spec.shape.dim.add().size = 28
expected_tensor_spec.shape.dim.add().size = 3
expected_tensor_spec.name = ""
expected_tensor_spec.dtype = dtypes.float64.as_datatype_enum
expected_tensor_spec.minimum.CopyFrom(
tensor_util.make_tensor_proto([-2], dtype=dtypes.float64, shape=[]))
expected_tensor_spec.maximum.CopyFrom(
tensor_util.make_tensor_proto([1, 1, 20],
dtype=dtypes.float64,
shape=[3]))
self.assertEqual(expected, encoded)
decoded = self._coder.decode_proto(encoded)
self.assertEqual(structure, decoded)
def testEncodeDataSetSpec(self):
structure = [dataset_ops.DatasetSpec(
{"rt": ragged_tensor.RaggedTensorSpec([10, None], dtypes.int32),
"st": sparse_tensor.SparseTensorSpec([10, 20], dtypes.float32),
"t": tensor_spec.TensorSpec([10, 8], dtypes.string)})]
self.assertTrue(self._coder.can_encode(structure))
encoded = self._coder.encode_structure(structure)
decoded = self._coder.decode_proto(encoded)
self.assertEqual(structure, decoded)
def testNotEncodable(self):
class NotEncodable(object):
pass
self.assertFalse(self._coder.can_encode([NotEncodable()]))
def testRegisteredTypeSpec(self):
expected_warning = ("Encoding a StructuredValue with type "
"NestedStructureTest.RegisteredTypeSpec; loading "
"this StructuredValue will require that this type "
"be imported and registered")
structure = {"x": RegisteredTypeSpec()}
self.assertTrue(self._coder.can_encode(structure))
with warnings.catch_warnings(record=True) as w:
encoded = self._coder.encode_structure(structure)
self.assertLen(w, 1)
self.assertIn(expected_warning, str(w[0].message))
decoded = self._coder.decode_proto(encoded)
self.assertEqual(structure, decoded)
def testUnregisteredTypeSpec(self):
structure = {"x": UnregisteredTypeSpec()}
self.assertFalse(self._coder.can_encode(structure))
with self.assertRaises(nested_structure_coder.NotEncodableError):
self._coder.encode_structure(structure)
# Trivial TypeSpec class for testing.
class UnregisteredTypeSpec(type_spec.TypeSpec):
value_type = property(lambda self: None)
_component_specs = property(lambda self: ())
_to_components = lambda self, v: ()
_from_components = classmethod(lambda cls, c: cls())
_serialize = lambda self: ()
# Trivial TypeSpec class for testing.
@type_spec.register("NestedStructureTest.RegisteredTypeSpec")
class RegisteredTypeSpec(type_spec.TypeSpec):
value_type = property(lambda self: None)
_component_specs = property(lambda self: ())
_to_components = lambda self, v: ()
_from_components = classmethod(lambda cls, c: cls())
_serialize = lambda self: ()
if __name__ == "__main__":
test.main()
|
|
from __future__ import print_function
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.optimizers import RMSprop
from keras.models import load_model
from keras.layers.normalization import BatchNormalization
import sklearn
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling1D, Conv1D, BatchNormalization
def mlpTrain(quizzes, solutions):
mlp = Sequential()
mlp.add(Dense(256, activation = 'relu', input_shape = (81,)))
mlp.add(BatchNormalization())
mlp.add(Dense(256, activation = 'relu'))
mlp.add(BatchNormalization())
mlp.add(Dense(256, activation = 'relu'))
mlp.add(BatchNormalization())
mlp.add(Dense(256, activation = 'relu'))
mlp.add(BatchNormalization())
mlp.add(Dense(256, activation = 'relu'))
mlp.add(BatchNormalization())
mlp.add(Dense(81, activation = 'relu'))
mlp.summary()
mlp.compile(loss='mean_squared_error', optimizer=RMSprop(), metrics=['accuracy'])
history = mlp.fit(quizzes[0:500000], solutions[0:500000], batch_size = 100, epochs = 3, verbose = 1, validation_data = (quizzes[500000:600000], solutions[500000:600000]))
score = mlp.evaluate(quizzes, solutions, verbose = 1)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
def cnnTrain(X_train, solutions):
cnn = Sequential()
cnn.add(Conv1D(64, kernel_size=3, activation='relu', input_shape=(81,1)))
cnn.add(BatchNormalization())
cnn.add(Conv1D(64, kernel_size=3, activation='relu'))
cnn.add(BatchNormalization())
cnn.add(Conv1D(64, kernel_size=3, activation='relu'))
cnn.add(BatchNormalization())
cnn.add(Conv1D(32, kernel_size=3, activation='relu'))
cnn.add(BatchNormalization())
cnn.add(Conv1D(32, kernel_size=3, activation='relu'))
cnn.add(Flatten())
cnn.add(BatchNormalization())
cnn.add(Dense(81, activation = 'linear'))
#cnn.add(MaxPooling2D(pool_size=(2, 2)))
#cnn.add(Flatten())
#cnn.add(Dense(num_classes, activation='softmax'))
cnn.compile(loss='mean_squared_error',
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
history2 = cnn.fit(X_train,solutions,
batch_size=100,
epochs=3,
verbose=1,
validation_data=(X_train, solutions))
score = cnn.evaluate(X_train, solutions, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
def trainCnn2(X_train, solutions):
cnn2 = Sequential()
cnn2.add(Conv1D(64, kernel_size=3, activation='relu', input_shape=(81,1)))
cnn2.add(BatchNormalization())
cnn2.add(Conv1D(64, kernel_size=3, activation='relu'))
cnn2.add(BatchNormalization())
cnn2.add(Conv1D(64, kernel_size=3, activation='relu'))
cnn2.add(BatchNormalization())
cnn2.add(Conv1D(64, kernel_size=3, activation='relu'))
cnn2.add(BatchNormalization())
cnn2.add(Conv1D(64, kernel_size=3, activation='relu'))
cnn2.add(BatchNormalization())
cnn2.add(Conv1D(64, kernel_size=3, activation='relu'))
cnn2.add(BatchNormalization())
cnn2.add(Conv1D(64, kernel_size=3, activation='relu'))
cnn2.add(BatchNormalization())
cnn2.add(Conv1D(64, kernel_size=3, activation='relu'))
cnn2.add(BatchNormalization())
cnn2.add(Conv1D(64, kernel_size=3, activation='relu'))
cnn2.add(Flatten())
cnn2.add(BatchNormalization())
cnn2.add(Dense(81, activation = 'linear'))
cnn2.compile(loss='mean_squared_error',
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
history2 = cnn2.fit(X_train[0:500000],solutions[0:500000],
batch_size=100,
epochs=1,
verbose=1,
validation_data=(X_train[500000:600000], solutions[500000:600000]))
score2 = cnn2.evaluate(X_train[500000:600000], solutions[500000:600000], verbose=1)
print('Test loss:', score2[0])
print('Test accuracy:', score2[1])
cnn2.save("deepSudokuCNN.h5")
# In[61]:
# print(quizzes[-1].shape)
# print(testPuzzle.shape)
# # In[75]:
# #testPuzzle = testPuzzle.reshape((1,) + testPuzzle.shape)
# print(testPuzzle.shape)
# print(mlp.predict(testPuzzle))
# prediction = mlp.predict(testPuzzle)
# #change the type to int so that you we can evaluate the prediction
# rounded = np.around(prediction)
# cast = prediction.astype(int)
# cast
# In[18]:
def solve2(nn, testBoard, solution, netType):
#into our cnn
# 1:mlp, 2:1d cnn, 3:2d cnn, 4: max poool 2d cnn
tensor = None
#depending on the type of net you want to predict with set the tensor dimensions
if netType == 2:
tensor = testBoard.reshape(1, 81, 1)
elif netType == 1:
#print("Reshaping the tensor for mlp")
tensor = testBoard.reshape(1,81)
#print(tensor.shape)
elif netType == 3 or netType == 4:
#this is the 2d cnn
tensor = testBoard.reshape(1, 9, 9, 1)
prediction = nn.predict(tensor)
rounded = np.around(prediction)
cast = prediction.astype(int)
correct = 0
for current in range(81):
#compare the values of the cast and the solution
if cast[0][current] == solution[current]:
correct += 1
accuracy = correct / 81
#print(cast)
names = {1:"MLP", 2:"1D CNN", 3:"2D CNN", 4:"2D CNN w/Max Pool"}
print("The accuracy of the "+ names[netType] +" was: " + str(accuracy))
# In[19]:
#print(quizzes[-1])
#print(quizzes[-1])
# In[20]:
#keep going until the there are no more zeros in the input
#use the nn to predict the solution
#repredict the using the update input
def iterative(nn, testBoard, solution, netType):
zeros = np.where(testBoard == 0)[0]
while len(zeros) != 0:
if netType == 2:
tensor = testBoard.reshape(1, 81, 1)
elif netType == 1:
#print("Reshaping the tensor for mlp")
tensor = testBoard.reshape(1,81)
#print(tensor.shape)
elif netType == 3 or netType == 4:
#reshape the testBoard for 2d CNNs
tensor = testBoard.reshape(1, 9, 9, 1)
prediction = nn.predict(tensor)
rounded = np.around(prediction)
cast = prediction.astype(int)
#update the testboard
#print(test)
#print(zeros[0])
#print(cast[0][zeros[0]])
index = zeros[0]
testBoard[index] = cast[0][index]
#remove the first element from zeros
zeros = np.delete(zeros, [0])
correct = 0
cast = np.copy(testBoard)
for current in range(81):
#compare the values of the cast and the solution
if cast[current] == solution[current]:
correct += 1
accuracy = correct / 81
#print(cast)
names = {1:"MLP", 2:"1D CNN", 3:"2D CNN", 4:"2D CNN w/Max Pool"}
print("The accuracy of the "+ names[netType] +" while iteratively solving was: " + str(accuracy))
#need 729 outputs 81 cells. 81 possible probabilities
def mlp2Train(quizzes, y_test, y_train):
mlp2 = Sequential()
mlp2.add(Dense(128, activation = 'relu', input_shape = (81,)))
mlp2.add(BatchNormalization())
mlp2.add(Dense(128, activation = 'relu'))
mlp2.add(BatchNormalization())
mlp2.add(Dense(128, activation = 'relu'))
mlp2.add(BatchNormalization())
mlp2.add(Dense(128, activation = 'relu'))
mlp2.add(BatchNormalization())
mlp2.add(Dense(128, activation = 'relu'))
mlp2.add(BatchNormalization())
mlp2.add(Dense(output_dim = 810, activation = 'softmax'))
mlp2.summary()
mlp2.compile(loss='categorical_crossentropy', optimizer=RMSprop(),
metrics=['accuracy'])
history = mlp2.fit(quizzes[0:500000], y_train, batch_size = 100, epochs = 3,
verbose = 1, validation_data = (quizzes[500000:600000], y_test))
score = mlp2.evaluate(quizzes[500000:600000], y_test, verbose = 1)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
# # Let's Try a CNN with 2D Convolutions
# In[29]:
#let's reshape the data so we can do 2d convolutions
def cnn2dTrain(X_train2d, solutions):
cnn2d = Sequential()
cnn2d.add(Conv2D(64, kernel_size=(3,3), activation='relu', input_shape= X_train2d.shape[1:]))
cnn2d.add(BatchNormalization())
cnn2d.add(Conv2D(64, kernel_size=(3,3), activation='relu'))
cnn2d.add(BatchNormalization())
cnn2d.add(Conv2D(64, kernel_size=(3,3), activation='relu'))
cnn2d.add(BatchNormalization())
#cnn2d.add(Conv2D(64, kernel_size=(3,3), activation='relu'))
#cnn2d.add(BatchNormalization())
cnn2d.add(Conv2D(64, kernel_size=(3,3), activation='relu'))
cnn2d.add(BatchNormalization())
#cnn2d.add(MaxPooling2D(pool_size= (2,2)))
cnn2d.add(Flatten())
cnn2d.add(Dropout(0.5))
#cnn2d.add(BatchNormalization())
cnn2d.add(Dense(81, activation = 'linear'))
cnn2d.summary()
cnn2d.compile(loss='mean_squared_error',
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
history3 = cnn2d.fit(X_train2d[0:500000],solutions[0:500000],
batch_size= 100,
epochs=5,
verbose=1,
validation_data=(X_train2d[500000:600000], solutions[500000:600000]))
score3 = cnn2d.evaluate(X_train2d[500000:600000], solutions[500000:600000], verbose=1)
print('Test loss:', score3[0])
print('Test accuracy:', score3[1])
# In[87]:
#cnn2d.save('cnnMaxPool.h5')
# In[89]:
#cnn2d.save('cnn2d.h5')
# In[22]:
def check(y,x,matrix):
boxX = 0
boxY = 0
if (y > 2):
boxY = 3
if (y > 5):
boxY = 6
if (x > 2):
boxX = 3
if (x > 5):
boxX = 6
list = []
for i in range(0, 9):
if (i != y and matrix[i][x] != 0 and matrix[i][x] not in list):
list.append(matrix[i][x])
for j in range(0, 9):
if (j != x and matrix[y][j] != 0 and matrix[y][j] not in list):
list.append(matrix[y][j])
for i in range(boxY, boxY + 3):
for j in range(boxX, boxX + 3):
if (i != y and j != x and matrix[i][j] not in list and matrix[i][j] != 0):
list.append(matrix[i][j])
if(matrix[y][x] in list):
return False
return True
# In[ ]:
def solve(matrix):
contin = True
currX = 0
currY = 0
##matrix denoting blanks
filled = np.zeros(shape=(9,9))
for x in range(0,9):
for y in range(0,9):
if(matrix[y][x]==0):
filled[y][x]=0
else:
filled[y][x]=1
while(filled[currY][currX]!=0):
currX += 1
if (currX == 9):
currX = 0
currY += 1
#print("Strart: "+str(currY)+ str(currX))
while(contin):
if(currY == 9 and currX==0):
return matrix
if (currY < 0 or currX < 0):
return np.zeros(shape=(9, 9))
#print(currX, currY)
if(matrix[currY][currX]==0):
z=1
while(z < 10):
#print(matrix)
#print(currX,currY)
##check for nonfilled
if(currY == 9 and currX==0):
return matrix
##check for no solution
if(currY <0 or currX < 0):
return numpy.zeros(shape=(9,9))
if(filled[currY][currX]==0):
matrix[currY][currX] = z
##continue
if(check(currY, currX, matrix)):
currX += 1
if (currX == 9):
currX = 0
currY += 1
if(currY == 9):
contin= False
z=0
##backtrack if no valids found
if(z==9):
##go back 1
matrix[currY][currX]=0 ##reset
currX -= 1
if (currX == -1):
currX = 8
currY -= 1
z = matrix[currY][currX]
##if its filled
if(filled[currY][currX]!=0):
while(filled[currY][currX]!=0 or (filled[currY][currX]==0 and matrix[currY][currX]==9)):
##if you get to one you need to reset
if (filled[currY][currX] == 0 and matrix[currY][currX] == 9):
matrix[currY][currX] = 0 ## reset
##go back one
currX -= 1
if (currX == -1):
currX = 8
currY -= 1
##go back 1 if filled
if(filled[currY][currX]==1):
#print(currX,currY)
currX-=1
if(currX == -1):
currX = 8
currY-=1
z = matrix[currY][currX]
##not filled
else:
##not filled and not 9
z = matrix[currY][currX]
##not filled and is 9
while(matrix[currY][currX] == 9):
##if not filled and 9
if (filled[currY][currX] == 0 and z == 9):
matrix[currY][currX] = 0
currX -= 1
if (currX == -1):
currX = 8
currY -= 1
##if filled backtrack to a nonfilled
if (filled[currY][currX] != 0):
while(filled[currY][currX]!=0):
currX -= 1
if (currX == -1):
currX = 8
currY -= 1
if (currY == 9 and currX == 0):
return matrix
z = matrix[currY][currX]
##increment
if(z!=9):
z+=1
else:
#print("else")
currX += 1
if (currX == 9):
currX = 0
currY += 1
z=1
else:
if(matrix[currY][currX]!=0):
currX -= 1
if (currX == -1):
currX = 8
currY -= 1
if(currY ==-1):
contin = False
else:
currX += 1
if (currX == 9):
currX = 0
currY += 1
def main():
#I did not write this code. It was provided in the kaggle page
#https://www.kaggle.com/bryanpark/sudoku?sortBy=null&group=datasets
quizzes = np.zeros((1000000, 81), np.int32)
solutions = np.zeros((1000000, 81), np.int32)
for i, line in enumerate(open('sudoku.csv', 'r').read().splitlines()[1:]):
quiz, solution = line.split(",")
for j, q_s in enumerate(zip(quiz, solution)):
q, s = q_s
quizzes[i, j] = q
solutions[i, j] = s
X_train = quizzes.reshape(quizzes.shape[0], 81, 1)
nn = load_model('deepSudokuCNN.h5')
mlp = load_model('sudokuMLP.h5')
cnnMax = load_model('cnnMaxPool.h5')
cnn2d = load_model('cnn2d.h5')
#reshape the data to allow for 2d convolutions
quizzes2d = np.copy(quizzes)
solutions2d = np.copy(solutions)
quizzes2d = quizzes2d.reshape((-1, 9, 9))
solutions2d = solutions2d.reshape((-1,9,9))
X_train2d = quizzes2d.reshape(quizzes2d.shape[0], 9, 9, 1)
X_train2d.shape
#let's test the accuracy on 30 different puzzles
for i in range(1,31):
print("Accuracy on test puzzle " + str(i))
solve2(nn, quizzes[-i], solutions[-i], 2)
solve2(mlp, quizzes[-i], solutions[-i], 1)
solve2(cnnMax, quizzes[-i], solutions[-i], 3)
solve2(cnn2d, quizzes[-i], solutions[-i], 4)
test2d = np.copy(quizzes[-i].reshape(9, 9))
print(solve(test2d))
iterative(nn, np.copy(quizzes[-i]), solutions[-i], netType = 2)
iterative(mlp, np.copy(quizzes[-i]), solutions[-i], netType = 1)
iterative(cnnMax, np.copy(quizzes[-i]), solutions[-i], netType = 3)
iterative(cnn2d, np.copy(quizzes[-i]), solutions[-i], netType = 4)
if __name__ == "__main__": main()
|
|
#!/usr/bin/env python
#
# Copyright 2011-2015 Splunk, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testlib
import logging
from contextlib import contextmanager
import splunklib.client as client
collections = [
'apps',
'event_types',
'indexes',
'inputs',
'jobs',
'loggers',
'messages',
'roles',
'users'
]
expected_access_keys = set(['sharing', 'app', 'owner'])
expected_fields_keys = set(['required', 'optional', 'wildcard'])
class CollectionTestCase(testlib.SDKTestCase):
def setUp(self):
super(CollectionTestCase, self).setUp()
if self.service.splunk_version[0] >= 5 and 'modular_input_kinds' not in collections:
collections.append('modular_input_kinds') # Not supported before Splunk 5.0
else:
logging.info("Skipping modular_input_kinds; not supported by Splunk %s" % \
'.'.join(str(x) for x in self.service.splunk_version))
for saved_search in self.service.saved_searches:
if saved_search.name.startswith('delete-me'):
try:
for job in saved_search.history():
job.cancel()
self.service.saved_searches.delete(saved_search.name)
except KeyError:
pass
def test_metadata(self):
self.assertRaises(client.NotSupportedError, self.service.jobs.itemmeta)
self.assertRaises(client.NotSupportedError, self.service.loggers.itemmeta)
self.assertRaises(TypeError, self.service.inputs.itemmeta)
for c in collections:
if c in ['jobs', 'loggers', 'inputs', 'modular_input_kinds']:
continue
coll = getattr(self.service, c)
metadata = coll.itemmeta()
found_access_keys = set(metadata.access.keys())
found_fields_keys = set(metadata.fields.keys())
self.assertTrue(found_access_keys >= expected_access_keys,
msg='metadata.access is missing keys on ' + \
'%s (found: %s, expected: %s)' % \
(coll, found_access_keys,
expected_access_keys))
self.assertTrue(found_fields_keys >= expected_fields_keys,
msg='metadata.fields is missing keys on ' + \
'%s (found: %s, expected: %s)' % \
(coll, found_fields_keys,
expected_fields_keys))
def test_list(self):
for coll_name in collections:
coll = getattr(self.service, coll_name)
expected = [ent.name for ent in coll.list(count=10, sort_mode="auto")]
if len(expected) == 0:
logging.debug("No entities in collection %s; skipping test.", coll_name)
found = [ent.name for ent in coll.list()][:10]
self.assertEqual(expected, found,
msg='on %s (expected: %s, found: %s)' % \
(coll_name, expected, found))
def test_list_with_count(self):
N = 5
for coll_name in collections:
coll = getattr(self.service, coll_name)
expected = [ent.name for ent in coll.list(count=N+5)][:N]
N = len(expected) # in case there are <N elements
found = [ent.name for ent in coll.list(count=N)]
self.assertEqual(expected, found,
msg='on %s (expected %s, found %s' % \
(coll_name, expected, found))
def test_list_with_offset(self):
import random
for offset in [random.randint(3,50) for x in range(5)]:
for coll_name in collections:
coll = getattr(self.service, coll_name)
expected = [ent.name for ent in coll.list(count=offset+10)][offset:]
found = [ent.name for ent in coll.list(offset=offset, count=10)]
self.assertEqual(expected, found,
msg='on %s (expected %s, found %s)' % \
(coll_name, expected, found))
def test_list_with_search(self):
for coll_name in collections:
coll = getattr(self.service, coll_name)
expected = [ent.name for ent in coll.list()]
if len(expected) == 0:
logging.debug("No entities in collection %s; skipping test.", coll_name)
# TODO: DVPL-5868 - This should use a real search instead of *. Otherwise the test passes trivially.
found = [ent.name for ent in coll.list(search="*")]
self.assertEqual(expected, found,
msg='on %s (expected: %s, found: %s)' % \
(coll_name, expected, found))
def test_list_with_sort_dir(self):
for coll_name in collections:
coll = getattr(self.service, coll_name)
expected_kwargs = {'sort_dir': 'desc'}
found_kwargs = {'sort_dir': 'asc'}
if coll_name == 'jobs':
expected_kwargs['sort_key'] = 'sid'
found_kwargs['sort_key'] = 'sid'
expected = list(reversed([ent.name for ent in coll.list(**expected_kwargs)]))
if len(expected) == 0:
logging.debug("No entities in collection %s; skipping test.", coll_name)
found = [ent.name for ent in coll.list(**found_kwargs)]
self.assertEqual(sorted(expected), sorted(found),
msg='on %s (expected: %s, found: %s)' %
(coll_name, expected, found))
def test_list_with_sort_mode_auto(self):
# The jobs collection requires special handling. The sort_dir kwarg is
# needed because the default sorting direction for jobs is "desc", not
# "asc". The sort_key kwarg is required because there is no default
# sort_key for jobs in Splunk 6.
for coll_name in collections:
coll = getattr(self.service, coll_name)
if coll_name == 'jobs':
expected = [ent.name for ent in coll.list(
sort_mode="auto", sort_dir="asc", sort_key="sid")]
else:
expected = [ent.name for ent in coll.list(sort_mode="auto")]
if len(expected) == 0:
logging.debug("No entities in collection %s; skipping test.", coll_name)
if coll_name == 'jobs':
found = [ent.name for ent in coll.list(
sort_dir="asc", sort_key="sid")]
else:
found = [ent.name for ent in coll.list()]
self.assertEqual(expected, found, msg='on %s (expected: %s, found: %s)' % (coll_name, expected, found))
def test_list_with_sort_mode_alpha_case(self):
for coll_name in collections:
coll = getattr(self.service, coll_name)
# sort_dir is needed because the default sorting direction
# for jobs is "desc", not "asc", so we have to set it explicitly or our tests break.
kwargs = {'sort_mode': 'alpha_case', 'sort_dir': 'asc', 'count': 30}
if coll_name == 'jobs':
kwargs['sort_key'] = 'sid'
found = [ent.name for ent in coll.list(**kwargs)]
if len(found) == 0:
logging.debug("No entities in collection %s; skipping test.", coll_name)
expected = sorted(found)
self.assertEqual(expected, found,
msg='on %s (expected: %s, found: %s)' % \
(coll_name, expected, found))
def test_list_with_sort_mode_alpha(self):
for coll_name in collections:
coll = getattr(self.service, coll_name)
# sort_dir is needed because the default sorting direction
# for jobs is "desc", not "asc", so we have to set it explicitly
# or our tests break. We also need to specify "sid" as sort_key
# for jobs, or things are sorted by submission time and ties go
# the same way in either sort direction.
kwargs = {'sort_mode': 'alpha', 'sort_dir': 'asc', 'count': 30}
if coll_name == 'jobs':
kwargs['sort_key'] = 'sid'
found = [ent.name for ent in coll.list(**kwargs)]
if len(found) == 0:
logging.debug("No entities in collection %s; skipping test.", coll_name)
expected = sorted(found, key=str.lower)
self.assertEqual(expected, found,
msg='on %s (expected: %s, found: %s)' % \
(coll_name, expected, found))
def test_iteration(self):
for coll_name in collections:
coll = getattr(self.service, coll_name)
expected = [ent.name for ent in coll.list(count=10)]
if len(expected) == 0:
logging.debug("No entities in collection %s; skipping test.", coll_name)
total = len(expected)
found = []
for ent in coll.iter(pagesize=max(int(total/5.0), 1), count=10):
found.append(ent.name)
self.assertEqual(expected, found,
msg='on %s (expected: %s, found: %s)' % \
(coll_name, expected, found))
def test_paging(self):
for coll_name in collections:
coll = getattr(self.service, coll_name)
expected = [ent.name for ent in coll.list(count=30)]
if len(expected) == 0:
logging.debug("No entities in collection %s; skipping test.", coll_name)
total = len(expected)
page_size = max(int(total/5.0), 1)
found = []
offset = 0
while offset < total:
page = coll.list(offset=offset, count=page_size)
count = len(page)
offset += count
self.assertTrue(count == page_size or offset == total,
msg='on %s' % coll_name)
found.extend([ent.name for ent in page])
logging.debug("Iterate: offset=%d/%d", offset, total)
self.assertEqual(expected, found,
msg='on %s (expected: %s, found: %s)' % \
(coll_name, expected, found))
def test_getitem_with_nonsense(self):
for coll_name in collections:
coll = getattr(self.service, coll_name)
name = testlib.tmpname()
self.assertTrue(name not in coll)
self.assertRaises(KeyError, coll.__getitem__, name)
def test_getitem_with_namespace_sample_in_changelog(self):
from splunklib.binding import namespace
ns = client.namespace(owner='nobody', app='search')
result = self.service.saved_searches['Top five sourcetypes', ns]
def test_collection_search_get(self):
for search in self.service.saved_searches:
self.assertEqual(self.service.saved_searches[search.name].path, search.path)
self.assertEqual(200, self.service.saved_searches.get(search.name).status)
def test_collection_inputs_getitem(self):
valid_kinds = self.service.inputs._get_kind_list()
valid_kinds.remove("script")
for inp in self.service.inputs.list(*valid_kinds):
self.assertTrue(self.service.inputs[inp.name])
if __name__ == "__main__":
try:
import unittest2 as unittest
except ImportError:
import unittest
unittest.main()
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import sys
import fixtures
import mox
from oslo_config import cfg
from oslo_log import log as logging
from oslotest import mockpatch
import testscenarios
import testtools
from heat.common import context
from heat.common import messaging
from heat.common import policy
from heat.engine.clients.os import barbican
from heat.engine.clients.os import cinder
from heat.engine.clients.os import glance
from heat.engine.clients.os import keystone
from heat.engine.clients.os.keystone import keystone_constraints as ks_constr
from heat.engine.clients.os.neutron import neutron_constraints as neutron
from heat.engine.clients.os import nova
from heat.engine.clients.os import sahara
from heat.engine.clients.os import trove
from heat.engine import environment
from heat.engine import resource
from heat.engine import resources
from heat.engine import scheduler
from heat.tests import fakes
from heat.tests import generic_resource as generic_rsrc
from heat.tests import utils
TEST_DEFAULT_LOGLEVELS = {'migrate': logging.WARN,
'sqlalchemy': logging.WARN,
'heat.engine.environment': logging.ERROR}
_LOG_FORMAT = "%(levelname)8s [%(name)s] %(message)s"
_TRUE_VALUES = ('True', 'true', '1', 'yes')
class FakeLogMixin(object):
def setup_logging(self, quieten=True):
# Assign default logs to self.LOG so we can still
# assert on heat logs.
default_level = logging.INFO
if os.environ.get('OS_DEBUG') in _TRUE_VALUES:
default_level = logging.DEBUG
self.LOG = self.useFixture(
fixtures.FakeLogger(level=default_level, format=_LOG_FORMAT))
base_list = set([nlog.split('.')[0]
for nlog in logging.logging.Logger.manager.loggerDict]
)
for base in base_list:
if base in TEST_DEFAULT_LOGLEVELS:
self.useFixture(fixtures.FakeLogger(
level=TEST_DEFAULT_LOGLEVELS[base],
name=base, format=_LOG_FORMAT))
elif base != 'heat':
self.useFixture(fixtures.FakeLogger(
name=base, format=_LOG_FORMAT))
if quieten:
for ll in TEST_DEFAULT_LOGLEVELS:
if ll.startswith('heat.'):
self.useFixture(fixtures.FakeLogger(
level=TEST_DEFAULT_LOGLEVELS[ll],
name=ll, format=_LOG_FORMAT))
class HeatTestCase(testscenarios.WithScenarios,
testtools.TestCase, FakeLogMixin):
def setUp(self, mock_keystone=True, mock_resource_policy=True,
quieten_logging=True):
super(HeatTestCase, self).setUp()
self.m = mox.Mox()
self.addCleanup(self.m.UnsetStubs)
self.setup_logging(quieten=quieten_logging)
self.warnings = self.useFixture(fixtures.WarningsCapture())
scheduler.ENABLE_SLEEP = False
self.useFixture(fixtures.MonkeyPatch(
'heat.common.exception._FATAL_EXCEPTION_FORMAT_ERRORS',
True))
def enable_sleep():
scheduler.ENABLE_SLEEP = True
self.addCleanup(enable_sleep)
mod_dir = os.path.dirname(sys.modules[__name__].__file__)
project_dir = os.path.abspath(os.path.join(mod_dir, '../../'))
env_dir = os.path.join(project_dir, 'etc', 'heat',
'environment.d')
template_dir = os.path.join(project_dir, 'etc', 'heat',
'templates')
cfg.CONF.set_default('environment_dir', env_dir)
cfg.CONF.set_override('error_wait_time', None, enforce_type=True)
cfg.CONF.set_default('template_dir', template_dir)
self.addCleanup(cfg.CONF.reset)
messaging.setup("fake://", optional=True)
self.addCleanup(messaging.cleanup)
tri_names = ['AWS::RDS::DBInstance', 'AWS::CloudWatch::Alarm']
tris = []
for name in tri_names:
tris.append(resources.global_env().get_resource_info(
name,
registry_type=environment.TemplateResourceInfo))
for tri in tris:
if tri is not None:
cur_path = tri.template_name
templ_path = os.path.join(project_dir, 'etc',
'heat', 'templates')
if templ_path not in cur_path:
tri.template_name = cur_path.replace(
'/etc/heat/templates',
templ_path)
if mock_keystone:
self.stub_keystoneclient()
if mock_resource_policy:
self.mock_resource_policy = self.patchobject(
policy.ResourceEnforcer, 'enforce')
utils.setup_dummy_db()
self.register_test_resources()
self.addCleanup(utils.reset_dummy_db)
def register_test_resources(self):
resource._register_class('GenericResourceType',
generic_rsrc.GenericResource)
resource._register_class('MultiStepResourceType',
generic_rsrc.MultiStepResource)
resource._register_class('ResWithShowAttrType',
generic_rsrc.ResWithShowAttr)
resource._register_class('SignalResourceType',
generic_rsrc.SignalResource)
resource._register_class('ResourceWithPropsType',
generic_rsrc.ResourceWithProps)
resource._register_class('ResourceWithPropsRefPropOnDelete',
generic_rsrc.ResourceWithPropsRefPropOnDelete)
resource._register_class(
'ResourceWithPropsRefPropOnValidate',
generic_rsrc.ResourceWithPropsRefPropOnValidate)
resource._register_class('StackUserResourceType',
generic_rsrc.StackUserResource)
resource._register_class('ResourceWithResourceIDType',
generic_rsrc.ResourceWithResourceID)
resource._register_class('ResourceWithAttributeType',
generic_rsrc.ResourceWithAttributeType)
resource._register_class('ResourceWithRequiredProps',
generic_rsrc.ResourceWithRequiredProps)
resource._register_class(
'ResourceWithMultipleRequiredProps',
generic_rsrc.ResourceWithMultipleRequiredProps)
resource._register_class(
'ResourceWithRequiredPropsAndEmptyAttrs',
generic_rsrc.ResourceWithRequiredPropsAndEmptyAttrs)
resource._register_class('ResourceWithPropsAndAttrs',
generic_rsrc.ResourceWithPropsAndAttrs)
resource._register_class('ResWithStringPropAndAttr',
generic_rsrc.ResWithStringPropAndAttr),
resource._register_class('ResWithComplexPropsAndAttrs',
generic_rsrc.ResWithComplexPropsAndAttrs)
resource._register_class('ResourceWithCustomConstraint',
generic_rsrc.ResourceWithCustomConstraint)
resource._register_class('ResourceWithComplexAttributesType',
generic_rsrc.ResourceWithComplexAttributes)
resource._register_class('ResourceWithDefaultClientName',
generic_rsrc.ResourceWithDefaultClientName)
resource._register_class('OverwrittenFnGetAttType',
generic_rsrc.ResourceWithFnGetAttType)
resource._register_class('OverwrittenFnGetRefIdType',
generic_rsrc.ResourceWithFnGetRefIdType)
resource._register_class('ResourceWithListProp',
generic_rsrc.ResourceWithListProp)
resource._register_class('StackResourceType',
generic_rsrc.StackResourceType)
resource._register_class('ResourceWithRestoreType',
generic_rsrc.ResourceWithRestoreType)
resource._register_class('DynamicSchemaResource',
generic_rsrc.DynamicSchemaResource)
resource._register_class('ResourceTypeUnSupportedLiberty',
generic_rsrc.ResourceTypeUnSupportedLiberty)
resource._register_class('ResourceTypeSupportedKilo',
generic_rsrc.ResourceTypeSupportedKilo)
resource._register_class('ResourceTypeHidden',
generic_rsrc.ResourceTypeHidden)
resource._register_class(
'ResourceWithHiddenPropertyAndAttribute',
generic_rsrc.ResourceWithHiddenPropertyAndAttribute)
def patchobject(self, obj, attr, **kwargs):
mockfixture = self.useFixture(mockpatch.PatchObject(obj, attr,
**kwargs))
return mockfixture.mock
# NOTE(pshchelo): this overrides the testtools.TestCase.patch method
# that does simple monkey-patching in favor of mock's patching
def patch(self, target, **kwargs):
mockfixture = self.useFixture(mockpatch.Patch(target, **kwargs))
return mockfixture.mock
def stub_auth(self, ctx=None, **kwargs):
auth = self.patchobject(ctx or context.RequestContext,
"_create_auth_plugin")
fake_auth = fakes.FakeAuth(**kwargs)
auth.return_value = fake_auth
return auth
def stub_keystoneclient(self, fake_client=None, **kwargs):
client = self.patchobject(keystone.KeystoneClientPlugin, "_create")
fkc = fake_client or fakes.FakeKeystoneClient(**kwargs)
client.return_value = fkc
return fkc
def stub_KeypairConstraint_validate(self):
validate = self.patchobject(nova.KeypairConstraint, 'validate')
validate.return_value = True
def stub_ImageConstraint_validate(self, num=None):
validate = self.patchobject(glance.ImageConstraint, 'validate')
if num is None:
validate.return_value = True
else:
validate.side_effect = [True for x in range(num)]
def stub_FlavorConstraint_validate(self):
validate = self.patchobject(nova.FlavorConstraint, 'validate')
validate.return_value = True
def stub_VolumeConstraint_validate(self):
validate = self.patchobject(cinder.VolumeConstraint, 'validate')
validate.return_value = True
def stub_SnapshotConstraint_validate(self):
validate = self.patchobject(
cinder.VolumeSnapshotConstraint, 'validate')
validate.return_value = True
def stub_VolumeTypeConstraint_validate(self):
validate = self.patchobject(cinder.VolumeTypeConstraint, 'validate')
validate.return_value = True
def stub_VolumeBackupConstraint_validate(self):
validate = self.patchobject(cinder.VolumeBackupConstraint, 'validate')
validate.return_value = True
def stub_ServerConstraint_validate(self):
validate = self.patchobject(nova.ServerConstraint, 'validate')
validate.return_value = True
def stub_NetworkConstraint_validate(self):
validate = self.patchobject(neutron.NetworkConstraint, 'validate')
validate.return_value = True
def stub_PortConstraint_validate(self):
validate = self.patchobject(neutron.PortConstraint, 'validate')
validate.return_value = True
def stub_TroveFlavorConstraint_validate(self):
validate = self.patchobject(trove.FlavorConstraint, 'validate')
validate.return_value = True
def stub_SubnetConstraint_validate(self):
validate = self.patchobject(neutron.SubnetConstraint, 'validate')
validate.return_value = True
def stub_AddressScopeConstraint_validate(self):
validate = self.patchobject(neutron.AddressScopeConstraint, 'validate')
validate.return_value = True
def stub_SubnetPoolConstraint_validate(self):
validate = self.patchobject(neutron.SubnetPoolConstraint, 'validate')
validate.return_value = True
def stub_RouterConstraint_validate(self):
validate = self.patchobject(neutron.RouterConstraint, 'validate')
validate.return_value = True
def stub_QoSPolicyConstraint_validate(self):
validate = self.patchobject(neutron.QoSPolicyConstraint, 'validate')
validate.return_value = True
def stub_NovaNetworkConstraint(self):
validate = self.patchobject(nova.NetworkConstraint, 'validate')
validate.return_value = True
def stub_KeystoneProjectConstraint(self):
validate = self.patchobject(ks_constr.KeystoneProjectConstraint,
'validate')
validate.return_value = True
def stub_SaharaPluginConstraint(self):
validate = self.patchobject(sahara.PluginConstraint, 'validate')
validate.return_value = True
def stub_ProviderConstraint_validate(self):
validate = self.patchobject(neutron.ProviderConstraint, 'validate')
validate.return_value = True
def stub_SecretConstraint_validate(self):
validate = self.patchobject(barbican.SecretConstraint, 'validate')
validate.return_value = True
|
|
########
##
## Wrapper to hold results of a tournament
##
########
class TournamentResults(object):
"""
Calculates and wraps results of tournaments
"""
def __init__(self, botList, interactions, payoffs):
"""
Calculate the scores of the interactions and the total scores for the
bots using the specified payoffs.
ARGS:
- botList: a list of BotPlayer objects where the index of botX in the
list is the tournament id used in the argument interactions. basically,
botList maps tournament id to bot
- interactions: a dictionary with
keys => (tournament_id1, tournament_id2)
values => [meeting1, meeting2, ...]
where meetingX is a list of tuples (bot1_move, bot2_move).
For example:
interactions = {
(0, 1): [
[('C', 'D'), ('D', 'D')],
[('C', 'C'), ('C', 'D'), ('D', 'D')]
],
...
}
- payoffs: defines the scores for each Prisoner's Dilemma situation,
which TournamentResults needs to correctly score each interaction
"""
self.botList = botList
self.interactions = interactions
self.payoffs = payoffs
self.numBots = len(self.botList)
# to allow lookup for bot name and description by tournament id
self.bot_info_by_id = {}
for bot in botList:
self.bot_info_by_id[bot.tournament_id] =\
{'name': bot.name, 'description': bot.description, 'total': 0}
self.interaction_lengths = []
some_pair = self.interactions.keys()[0]
for interaction in self.interactions[some_pair]:
self.interaction_lengths.append(len(interaction))
self.total_interactions = float(
self.numBots*sum(self.interaction_lengths)
)
# to be filled with scores for each bot in each interaction
self.interaction_scores = {}
# calculate and store interaction and total scores
self.calculate_scores()
def __str__(self):
# sort the bots for printing output
def get_score(bot):
return self.bot_info_by_id[bot.tournament_id]['total']
sorted_bots = sorted(self.botList, key=get_score, reverse=True)
headers = [
"Tournament ID",
"Bot Name",
"Total Score",
"Avg Score Per Turn"
]
num_cols = len(headers)
# find a good column width to use for formatting the output
long_header = max([len(h) for h in headers])
long_name = max([len(bot.name) for bot in self.botList])+1
col = max([long_header, long_name])
col_str = str(col)
format_str = (("{: <"+col_str+"} ")*num_cols)[:-1]
hr = "-"*(num_cols*col)
# construct output string
output = "\n***\n"
output += "Interaction Lengths: "+str(self.interaction_lengths)
output += "\n***\n"
headers_str = format_str.format(*headers)
output += "\n"+hr+"\n"+headers_str+"\n"+hr+"\n"
for bot in sorted_bots:
t_id = bot.tournament_id
name = self.get_name_by_id(t_id)
score = self.get_score_by_id(t_id)
avg = self.get_avg_score_by_id(t_id)
row = format_str.format(str(t_id), name, str(score), avg)
output += row+"\n"
return output
## TODO: make pretty printing for interactions
#####
# Initialization calculation methods
#####
def score_turn(self, actions):
"""
Get the scores for each bot for a single turn
ARGS:
- actions: tuple (bot1_move, bot2_move)
RETURNS:
- scores: the score for each bot (bot1_score, bot2_score)
"""
scores = -1
if actions[0] == 'C':
if actions[1] == 'C':
scores = (self.payoffs['R'], self.payoffs['R'])
elif actions[1] == 'D':
scores = (self.payoffs['S'], self.payoffs['T'])
else:
print("actions[1] is not a valid move, must be 'C' or 'D'")
return -1
elif actions[0] == 'D':
if actions[1] == 'C':
scores = (self.payoffs['T'], self.payoffs['S'])
elif actions[1] == 'D':
scores = (self.payoffs['P'], self.payoffs['P'])
else:
print("actions[1] is not a valid move, must be 'C' or 'D'")
return -1
else:
print("actions[0] is not a valid move, must be 'C' or 'D'")
return -1
return scores
def calculate_scores(self):
"""
Get the scores for each bot pair meetings list and store in
self.interaction_scores. Tally up the total score for each bot and store
in self.bot_info_by_id['total']
"""
for bot_pair in self.interactions:
self.interaction_scores[bot_pair] = []
for meeting in self.interactions[bot_pair]:
meeting_scores = [0, 0]
for turn in meeting:
turn_scores = self.score_turn(turn)
# accumulate scores for meeting
meeting_scores[0] += turn_scores[0]
meeting_scores[1] += turn_scores[1]
meeting_scores = tuple(meeting_scores)
# add scores for meeting to list of meeting scores for this pair
self.interaction_scores[bot_pair].append(meeting_scores)
# also add to total for each bot, but only once if this is a bot
# paired with its clone
if bot_pair[0] == bot_pair[1]:
self.bot_info_by_id[bot_pair[0]]['total']\
+= meeting_scores[0]
else:
for idx, bot_id in enumerate(bot_pair):
self.bot_info_by_id[bot_id]['total']\
+= meeting_scores[idx]
#####
# Getter methods
#####
def get_name_by_id(self, t_id):
return self.bot_info_by_id[t_id]['name']
def get_description_by_id(self, t_id):
return self.bot_info_by_id[t_id]['description']
def get_score_by_id(self, t_id):
return self.bot_info_by_id[t_id]['total']
def get_avg_score_by_id(self, t_id):
return self.get_score_by_id(t_id)/self.total_interactions
def get_winning_id(self):
id_list = [bot.tournament_id for bot in self.botList]
return max(id_list, key=self.get_score_by_id)
def get_winning_name(self):
return self.get_name_by_id(self.get_winning_id())
def get_interaction_score(self, id_1, id_2, meeting):
return self.interaction_scores[(id_1, id_2)][meeting]
def get_interaction_scores(self, id_1, id_2):
return self.interaction_scores[(id_1, id_2)]
def get_interaction(self, id_1, id_2, meeting):
return self.interactions[(id_1, id_2)][meeting]
def get_interactions(self, id_1, id_2):
return self.interactions[(id_1, id_2)]
def get_bot_list(self):
return self.botList
def get_sorted_bot_list(self):
def get_score(bot):
return self.get_score_by_id(bot.tournament_id)
return sorted(self.botList, key=get_score, reverse=True)
if __name__ == "__main__":
pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.