gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
# Copyright (c) 2015-2018 by the parties listed in the AUTHORS file.
# All rights reserved. Use of this source code is governed by
# a BSD-style license that can be found in the LICENSE file.
import healpy as hp
import numpy as np
from .. import timing as timing
from ..map import DistRings, PySMSky, LibSharpSmooth, DistPixels
from ..tod import OpSimScan
from ..mpi import MPI
from ..op import Operator
def extract_local_dets(data):
"""Extracts the local detectors from the TOD objects
Some detectors could only appear in some observations, so we need
to loop through all observations and accumulate all detectors in
a set
"""
autotimer = timing.auto_timer()
local_dets = set()
for obs in data.obs:
tod = obs['tod']
local_dets.update(tod.local_dets)
return local_dets
def assemble_map_on_rank0(comm, local_map, pixel_indices, n_components, npix):
autotimer = timing.auto_timer()
full_maps_rank0 = np.zeros((n_components, npix),
dtype=np.float64) if comm.rank == 0 else None
local_map_buffer = np.zeros((n_components, npix),
dtype=np.float64)
local_map_buffer[:, pixel_indices] = local_map
comm.Reduce(local_map_buffer, full_maps_rank0, root=0, op=MPI.SUM)
return full_maps_rank0
def extract_detector_parameters(det, focalplanes):
autotimer = timing.auto_timer()
for fp in focalplanes:
if det in fp:
if "fwhm" in fp[det]:
return fp[det]["bandcenter_ghz"], fp[det]["bandwidth_ghz"], \
fp[det]["fwhm"] / 60
else:
return fp[det]["bandcenter_ghz"], fp[det]["bandwidth_ghz"], -1
raise RuntimeError("Cannot find detector {} in any focalplane")
class OpSimPySM(Operator):
"""
Operator which generates sky signal by scanning from a map.
The signal to use should already be in a distributed pixel structure,
and local pointing should already exist.
Args:
distmap (DistPixels): the distributed map domain data.
pixels (str): the name of the cache object (<pixels>_<detector>)
containing the pixel indices to use.
weights (str): the name of the cache object (<weights>_<detector>)
containing the pointing weights to use.
out (str): accumulate data to the cache with name <out>_<detector>.
If the named cache objects do not exist, then they are created.
units(str): Output units.
debug(bool): Verbose progress reports.
"""
def __init__(self, comm=None,
out='signal', pysm_model='', pysm_precomputed_cmb_K_CMB=None,
focalplanes=None, nside=None,
subnpix=None, localsm=None, apply_beam=False, nest=True,
units='K_CMB', debug=False, coord="G"):
autotimer = timing.auto_timer(type(self).__name__)
# We call the parent class constructor, which currently does nothing
super().__init__()
self._out = out
self._nest = nest
self.comm = comm
self._debug = debug
self.pysm_precomputed_cmb_K_CMB = pysm_precomputed_cmb_K_CMB
self.dist_rings = DistRings(comm,
nside=nside,
nnz=3)
self.coord = coord
pysm_sky_components = [
'synchrotron',
'dust',
'freefree',
'cmb',
'ame',
]
pysm_sky_config = dict()
for component_model in pysm_model.split(','):
full_component_name = [
each for each in pysm_sky_components
if each.startswith(component_model[0])][0]
pysm_sky_config[full_component_name] = component_model
self.pysm_sky = PySMSky(comm=self.comm,
local_pixels=self.dist_rings.local_pixels,
nside=nside, pysm_sky_config=pysm_sky_config,
pysm_precomputed_cmb_K_CMB=self.pysm_precomputed_cmb_K_CMB,
units=units)
self.nside = nside
self.focalplanes = focalplanes
self.npix = hp.nside2npix(nside)
self.distmap = DistPixels(
comm=comm, size=self.npix, nnz=3,
dtype=np.float32, submap=subnpix, local=localsm)
self.apply_beam = apply_beam
def __del__(self):
# Ensure that the PySMSky member is destroyed first because
# it contains a reference to self.dist_rings.local_pixels
del self.pysm_sky
del self.dist_rings
del self.distmap
def exec(self, data):
autotimer = timing.auto_timer(type(self).__name__)
local_dets = extract_local_dets(data)
bandpasses = {}
fwhm_deg = {}
N_POINTS_BANDPASS = 10 # possibly take as parameter
for det in local_dets:
bandcenter, bandwidth, fwhm_deg[det] = \
extract_detector_parameters(det, self.focalplanes)
bandpasses[det] = \
(np.linspace(bandcenter - bandwidth / 2,
bandcenter + bandwidth / 2,
N_POINTS_BANDPASS),
np.ones(N_POINTS_BANDPASS))
lmax = 3 * self.nside - 1
if self.comm.rank == 0 and self._debug:
print('Collecting, Broadcasting map', flush=True)
start = MPI.Wtime()
local_maps = dict() # FIXME use Cache instead
for det in local_dets:
self.comm.Barrier()
if self.comm.rank == 0 and self._debug:
print('Running PySM on {}'.format(det), flush=True)
self.pysm_sky.exec(local_maps, out="sky",
bandpasses={"": bandpasses[det]})
if self.apply_beam:
if fwhm_deg[det] == -1:
raise RuntimeError(
"OpSimPySM: apply beam is True but focalplane doesn't "
"have fwhm")
# LibSharp also supports transforming multiple channels
# together each with own beam
self.comm.Barrier()
if self.comm.rank == 0 and self._debug:
print('Initializing LibSharpSmooth on {}'.format(det),
flush=True)
smooth = LibSharpSmooth(
self.comm, signal_map="sky", out="sky",
lmax=lmax, grid=self.dist_rings.libsharp_grid,
fwhm_deg=fwhm_deg[det], beam=None)
self.comm.Barrier()
if self.comm.rank == 0 and self._debug:
print('Executing LibSharpSmooth on {}'.format(det),
flush=True)
smooth.exec(local_maps)
self.comm.Barrier()
if self.comm.rank == 0 and self._debug:
print('LibSharpSmooth completed on {}'.format(det),
flush=True)
n_components = 3
self.comm.Barrier()
if self.comm.rank == 0 and self._debug:
print('Assemble PySM map on rank0, shape of local map is {}'
''.format(local_maps["sky"].shape), flush=True)
full_map_rank0 = assemble_map_on_rank0(
self.comm, local_maps["sky"], self.dist_rings.local_pixels,
n_components, self.npix)
self.comm.Barrier()
if self.comm.rank == 0 and self._debug:
print('Communication completed', flush=True)
if self.comm.rank == 0 and self.coord != "G":
# PySM is always in Galactic, make rotation to Ecliptic or Equatorial
rot = hp.Rotator(coord = ["G", self.coord])
# this requires healpy 1.12.8
try:
full_map_rank0 = rot.rotate_map_alms(full_map_rank0, use_pixel_weights=True)
except AttributeError:
print('PySM coordinate conversion from G to another reference frame requires'
'healpy.Rotator.rotate_map_alms available since healpy 1.12.8')
raise
if self.comm.rank == 0 and self._nest:
# PySM is RING, toast is NEST
full_map_rank0 = hp.reorder(full_map_rank0, r2n=True)
# full_map_rank0 dict contains on rank 0 the smoothed PySM map
self.comm.Barrier()
if self.comm.rank == 0 and self._debug:
print('PySM map min and max pixel value', hp.ma(full_map_rank0).min(), hp.ma(full_map_rank0).max(), flush=True)
print('Broadcasting the map to other processes', flush=True)
self.distmap.broadcast_healpix_map(full_map_rank0)
self.comm.Barrier()
if self.comm.rank == 0 and self._debug:
print('Running OpSimScan', flush=True)
scansim = OpSimScan(distmap=self.distmap, out=self._out, dets=[det])
scansim.exec(data)
if self.comm.rank == 0 and self._debug:
tod = data.obs[0]["tod"]
sig = tod.cache.reference(self._out + "_" + det)
print('Rank 0 timeline min max after smoothing', sig.min(), sig.max(), flush=True)
stop = MPI.Wtime()
if self.comm.rank == 0:
print('PySM Operator completed: {:.2f} seconds'
''.format(stop - start), flush=True)
| |
import collections
import heapq
import traceback
import weakref
import numpy
import six
import chainer
from chainer import _backprop_utils
from chainer.backends import cuda
from chainer import configuration
from chainer import function_hook
from chainer.utils import type_check
from chainer import variable
class FunctionNode(object):
"""Function node of the computational graph.
FunctionNode is a class representing a node in a computational graph. The
node corresponds to an application of a differentiable function to input
variables.
When a differentiable function is applied to :class:`~chainer.Variable`
objects,
it creates an instance of FunctionNode implementation and calls its
:meth:`apply` method. The :meth:`apply` method basically does the following
three things.
1. Adding an edge from the function node to the variable node corresponding
to each input. The node of each input is extracted by
:attr:`Variable.node <chainer.Variable.node>`.
2. Computing the output arrays of the function.
3. Creating a :class:`~chainer.Variable` object for each output array and
adding an edge from the node of the variable to the function node.
The output variables are then returned.
.. admonition:: Example
Let ``x`` be an instance of :class:`~chainer.Variable` and ``f`` be an
instance of :class:`FunctionNode` taking only one argument.
Then the following code
>>> import numpy, chainer, chainer.functions as F
>>> x = chainer.Variable(numpy.zeros(10))
>>> f = F.Identity()
>>> y = f.apply((x,))[0]
computes a new variable ``y`` and creates backward references. The
backward references are actually set as per the following diagram::
x.node <--- f <--- y.node
If an application of another function ``g`` occurs as
>>> g = F.Identity()
>>> z = g.apply((x,))[0]
then the graph grows with a branch::
|--- f <--- y.node
x.node <-+
|--- g <--- z.node
Note that the branching is correctly managed on backward computation,
i.e. the gradients from ``f`` and ``g`` are accumulated to the gradient
of ``x``.
Every function-node implementation should provide :meth:`forward` and
:meth:`backward`. Instead of overriding :meth:`forward`, one can also
implement :meth:`forward_cpu` and :meth:`forward_gpu` when the
implementations for CPU and GPU arrays are totally different.
Note that the input and output variables are inaccessible from
:meth:`backward` by default. If it needs accesses to these variables, the
:meth:`forward` method (or its CPU/GPU variants) has to call
:meth:`retain_inputs` and :meth:`retain_outputs` appropriately. The
retained input/output variables can be accessed from :meth:`backward` by
calling :meth:`get_retained_inputs` and :meth:`get_retained_outputs`.
.. note::
There are two types of differentiable functions in Chainer (since v3).
The first type is of a function using a subclass of
:class:`~chainer.Function`,
which is called *old-style differentiable function*. The second type is
of a function using a subclass of :class:`FunctionNode`, which is called
**new-style differentiable function**. There are several advantages on
using the new-style differentiable function.
- The new-style differentiable function supports *differentiable
backpropagation*. The backpropagated gradients computed through the
new-style differentiable functions themselves support further
backpropagations so that the automatic higher-order differentiation is
available.
- The backpropagation of the new-style differentiable function can be
more computationally efficient because the interface allows an
implementation to omit the computation of unneeded input gradients.
Note that the new-style differentiable function is the standard way of
defining a function node of the computational graph in Chainer; old-
style differentiable functions are implemented as wrappers of the new-
style differentiable functions.
Attributes:
~FunctionNode.inputs: A tuple of the input
:class:`~chainer.variable.VariableNode` objects.
~FunctionNode.outputs: A tuple of weak references to the output
:class:`~chainer.variable.VariableNode` objects.
~FunctionNode.rank (int): An ordinal following the topological order
of the computational graph.
~FunctionNode.stack: Stack trace retrieved at the forward computation.
The stack trace is available only in the debug mode.
.. versionadded:: 3.0.0
"""
inputs = None
outputs = None
rank = 0
stack = None
_input_indexes_to_retain = None
_output_indexes_to_retain = None
_retained_output_data = None
_local_function_hooks = None
lazy_grad_sum = False
@property
def local_function_hooks(self):
"""Ordered dictionary of registered function hooks.
Contrary to ``chainer.thread_local.function_hooks``,
which registers its elements to all functions,
Function hooks in this property is specific to this function.
"""
if self._local_function_hooks is None:
self._local_function_hooks = collections.OrderedDict()
return self._local_function_hooks
@property
def _n_local_function_hooks(self):
return (0 if self._local_function_hooks is None
else len(self._local_function_hooks))
@property
def label(self):
"""Short text that represents the function.
The default implementation returns its type name.
Each function should override it to give more information.
"""
return self.__class__.__name__
@property
def output_data(self):
"""A tuple of the retained output arrays.
This property is mainly used by :class:`Function`. Users basically do
not have to use this property; use :meth:`get_retained_outputs`
instead.
"""
if self._retained_output_data is None:
raise RuntimeError('retained output data is gone')
out_data = [None] * len(self.outputs)
for index, data in six.moves.zip(self._output_indexes_to_retain,
self._retained_output_data):
out_data[index] = data
return tuple(out_data)
@property
def _impl_name(self):
return self.__class__.__name__
def __call__(self, *args, **kwargs):
if self.__class__.__module__.startswith('chainer.'):
msg = '''\
Chainer's built-in function class object ({}) which is derived from \
chainer.FunctionNode has been called as if it were a callable. \
Use FunctionNode.apply() method instead.
Furthermore, it's not recommended to use built-in function classes directly; \
use corresponding function aliases (those with snake_case name, such as \
F.convolution_nd) instead.\
'''.format(self.__class__.__name__)
else:
msg = '''\
A function class object ({}) which is derived from \
chainer.FunctionNode has been called as if it were a callable. \
Use apply() method instead.\
'''.format(self.__class__.__name__)
raise RuntimeError(msg)
def apply(self, inputs):
"""Computes output variables and grows the computational graph.
Basic behavior is expressed in the documentation of
:class:`FunctionNode`.
.. note::
If the :data:`~Variable.data` attribute of input variables exist on
a GPU device, that device is made current before calling
:meth:`forward`, so implementors do not need to take care of device
selection in most cases.
Args:
inputs: Tuple of input variables. Each element can be either
:class:`~chainer.Variable`, :class:`numpy.ndarray`,
or :class:`cupy.ndarray`. If the element is an ndarray, it is
automatically wrapped with :class:`~chainer.Variable`.
Returns:
A tuple of output :class:`~chainer.Variable` objects.
"""
input_vars = [chainer.as_variable(x) for x in inputs]
in_data = tuple([x.data for x in input_vars])
requires_grad = any([x.requires_grad for x in input_vars])
# Check for input array types
if not chainer.is_arrays_compatible(in_data):
raise TypeError(
'incompatible array types are mixed in the forward input '
'({}).\n'
'Actual: {}'.format(
self.label,
', '.join(str(type(x)) for x in in_data)))
is_debug = chainer.is_debug()
if is_debug:
# Keep stack trace for debug
self.stack = traceback.extract_stack()
if configuration.config.type_check:
self._check_data_type_forward(in_data)
hooks = chainer.get_function_hooks()
if self._n_local_function_hooks > 0:
hooks = collections.OrderedDict(hooks)
hooks.update(self.local_function_hooks)
hooks = hooks.values() # avoid six for performance
for hook in hooks:
hook.forward_preprocess(self, in_data)
# Forward propagation
with cuda.get_device_from_array(*in_data):
self._input_indexes_to_retain = None
self._output_indexes_to_retain = None
outputs = self.forward(in_data)
# Check for output array types
if not isinstance(outputs, tuple):
raise TypeError(
'forward output must be a tuple ({})\n'
'Actual: {}'.format(self.label, type(outputs)))
if not chainer.is_arrays_compatible(outputs):
raise TypeError(
'incompatible array types are mixed in the forward output '
'({}).\n'
'Actual: {}'.format(
self.label,
', '.join(str(type(x)) for x in outputs)))
for hook in hooks:
hook.forward_postprocess(self, in_data)
# NaN check of output values
if is_debug:
if any(out.dtype.kind == 'f' and
cuda.get_array_module(out).isnan(out).any()
for out in outputs):
msg = ('NaN is detected on forward computation of '
'{}'.format(self.label))
raise RuntimeError(msg)
ret = tuple([variable.Variable(y, requires_grad=requires_grad)
for y in outputs])
if configuration.config.enable_backprop:
# Topological ordering
self.rank = max([x.rank for x in input_vars]) if input_vars else 0
# Add backward edges
for y in ret:
y.creator_node = self
self.inputs = tuple([x.node for x in input_vars])
# Add forward edges (must be weak references)
self.outputs = tuple([weakref.ref(y.node) for y in ret])
if self._input_indexes_to_retain is not None:
for index in self._input_indexes_to_retain:
input_vars[index].retain_data()
if self._output_indexes_to_retain is not None:
retained_data = []
for index in self._output_indexes_to_retain:
ret[index].retain_data()
retained_data.append(outputs[index])
self._retained_output_data = tuple(retained_data)
self.lazy_grad_sum = configuration.config.lazy_grad_sum
return ret
def _check_data_type_forward(self, in_data):
in_type = type_check.get_light_types(in_data)
try:
with type_check.light_mode:
self.check_type_forward(in_type)
return
except type_check.InvalidType:
# Ignore errors on first run
pass
in_type = type_check.get_types(in_data, 'in_types', False)
with type_check.get_function_check_context(self):
self.check_type_forward(in_type)
def check_type_forward(self, in_types):
"""Checks types of input data before forward propagation.
This method is called before :meth:`forward` and validates the types of
input variables using
:ref:`the type checking utilities <type-check-utils>`.
Args:
in_types (~chainer.utils.type_check.TypeInfoTuple): The type
information of input variables for :meth:`forward`.
"""
pass
def forward(self, inputs):
"""Computes the output arrays from the input arrays.
It delegates the procedure to :meth:`forward_cpu` or
:meth:`forward_gpu` by default. Which of them this method selects is
determined by the type of input arrays. Implementations of
:class:`FunctionNode` must implement either CPU/GPU methods or this
method.
Args:
inputs: Tuple of input array(s).
Returns:
Tuple of output array(s).
.. warning::
Implementations of :class:`FunctionNode` must take care that the
return value must be a tuple even if it returns only one array.
"""
assert len(inputs) > 0
if isinstance(inputs[0], cuda.ndarray):
return self.forward_gpu(inputs)
return self.forward_cpu(inputs)
def forward_cpu(self, inputs):
"""Computes the output arrays from the input NumPy arrays.
Args:
inputs: Tuple of input :class:`numpy.ndarray` objects.
Returns:
Tuple of output arrays. Each element can be NumPy or CuPy arrays.
.. warning::
Implementation of :class:`FunctionNode` must take care that the
return value must be a tuple even if it returns only one array.
"""
raise NotImplementedError
def forward_gpu(self, inputs):
"""Computes the output arrays from the input CuPy arrays.
Args:
inputs: Tuple of input :class:`cupy.ndarray` objects.
Returns:
Tuple of output arrays. Each element can be NumPy or CuPy arrays.
.. warning::
Implementation of :class:`FunctionNode` must take care that the
return value must be a tuple even if it returns only one array.
"""
raise NotImplementedError
def retain_inputs(self, indexes):
"""Lets specified input variable nodes keep data arrays.
By calling this method from :meth:`forward`, the function node can
specify which inputs are required for backprop. The input variables
with retained arrays can then be obtained by calling
:meth:`get_retained_inputs` from inside :meth:`backward`.
Unlike :class:`~chainer.Function`, the function node **DOES NOT** keep
input
arrays by default. If you want to keep some or all input arrays, do not
forget to call this method.
Note that **this method must not be called from the outside of**
:meth:`forward`.
Args:
indexes (iterable of int): Indexes of input variables that the
function will require for backprop.
"""
self._input_indexes_to_retain = indexes
def retain_outputs(self, indexes):
"""Lets specified output variable nodes keep data arrays.
By calling this method from :meth:`forward`, the function node can
specify which outputs are required for backprop. If this method is not
called, no output variables will be marked to keep their data array at
the point of returning from :meth:`apply`. The output variables with
retained arrays can then be obtained by calling
:meth:`get_retained_outputs` from inside :meth:`backward`.
.. note::
It is recommended to use this method if the function requires some
or all output arrays in backprop. The function can also use output
arrays just by keeping references to them directly, although it
might affect the performance of later function applications on the
output variables.
Note that **this method must not be called from the outside of**
:meth:`forward`.
Args:
indexes (iterable of int): Indexes of output variables that the
function will require for backprop.
"""
self._output_indexes_to_retain = indexes
def backward(self, target_input_indexes, grad_outputs):
"""Computes gradients w.r.t.\\ specified inputs given output gradients.
This method is used to compute one step of the backpropagation
corresponding to the forward computation of this function node.
Given the gradients w.r.t. output variables, this method computes the
gradients w.r.t. specified input variables. Note that this method does
not need to compute any input gradients not specified by
``target_input_indices``.
Unlike :meth:`Function.backward() <chainer.Function.backward>`,
gradients are given as :class:`~chainer.Variable` objects and this
method itself has to return input gradients as
:class:`~chainer.Variable` objects. It enables the function node to
return the input gradients with the full computational history, in
which case it supports *differentiable backpropagation* or
*higher-order differentiation*.
The default implementation returns ``None`` s, which means the
function is not differentiable.
Args:
target_input_indexes (tuple of int): Indices of the input variables
w.r.t. which the gradients are required. It is guaranteed that
this tuple contains at least one element.
grad_outputs (tuple of :class:`~chainer.Variable`\\ s): Gradients
w.r.t. the output variables.
If the gradient w.r.t. an output variable is not
given, the corresponding element is ``None``.
Returns:
Tuple of variables that represent the gradients w.r.t. specified
input variables. The length of the tuple can be same as either
``len(target_input_indexes)`` or the number of inputs. In the
latter case, the elements not specified by ``target_input_indexes``
will be discarded.
.. seealso::
:meth:`backward_accumulate` provides an alternative interface that
allows you to implement the backward computation fused with the
gradient accumulation.
"""
return (None,) * len(target_input_indexes)
def backward_accumulate(self, target_input_indexes, grad_outputs,
grad_inputs):
"""Computes gradients w.r.t.\\ specified inputs and accumulates them.
This method provides a way to fuse the backward computation and the
gradient accumulations in the case that the multiple functions are
applied to the same variable.
Users have to override either of this method or :meth:`backward`.
It is often simpler to implement :meth:`backward` and is recommended
if you do not need to provide efficient gradient accumulation.
Args:
target_input_indexes (tuple of int): Indices of the input variables
w.r.t. which the gradients are required. It is guaranteed that
this tuple contains at least one element.
grad_outputs (tuple of Variable): Gradients w.r.t. the output
variables. If the gradient w.r.t. an output variable is not
given, the corresponding element is ``None``.
grad_inputs (tuple of Variable): Gradients w.r.t. the input
variables specified by ``target_input_indexes``. These values
are computed by other computation paths. If there is no
gradient value existing for the variable, the corresponding
element is ``None``. See also the note below.
Returns:
Tuple of variables that represent the gradients w.r.t. specified
input variables. Unlike :meth:`backward`, the length of the tuple
**must** be same as that of ``target_input_indices``.
.. note::
When the same variable is passed to the multiple input arguments of
a function, only the first position of ``grad_inputs`` corresponding
to these input arguments may contain the gradient variable
corresponding to that input variable, and other entries are set to
``None``. This is an implementation-detail convention to avoid the
complication of correctly accumulating gradients in such a case.
This behavior might be changed in a future version.
"""
assert isinstance(target_input_indexes, tuple)
assert isinstance(grad_outputs, tuple)
assert isinstance(grad_inputs, tuple)
# The default implementation uses backward(). You can override this
# method without using backward().
gxs = self.backward(target_input_indexes, grad_outputs)
len_gxs = len(gxs)
if len_gxs == len(self.inputs):
gxs = tuple([gxs[i] for i in target_input_indexes])
elif len_gxs != len(target_input_indexes):
raise ValueError(
'number of gradients returned by %s (%s) is incorrect.'
% (self._impl_name, self.label))
if self.lazy_grad_sum:
gxs_output = ()
for i, (gx, g_input) in enumerate(six.moves.zip(gxs, grad_inputs)):
sum_gx = _backprop_utils.concat_variable(gx, g_input)
j = target_input_indexes[i]
if self.inputs[j].creator is None and \
isinstance(sum_gx, tuple):
sum_gx = chainer.functions.add(*sum_gx)
gxs_output += sum_gx,
return gxs_output
else:
return tuple([gx if g_input is None else
g_input if gx is None else
gx + g_input
for gx, g_input in six.moves.zip(gxs, grad_inputs)])
def get_retained_inputs(self):
"""Returns a tuple of retained input variables.
This method is used to retrieve the input variables retained in
:meth:`forward`.
Returns:
A tuple of retained input variables.
"""
inputs = self.inputs
return tuple([inputs[index].get_variable()
for index in self._input_indexes_to_retain])
def get_retained_outputs(self):
"""Returns a tuple of retained output variables.
This method is used to retrieve the output variables retained in
:meth:`forward`.
Returns:
A tuple of retained output variables.
.. note::
This method does a tricky thing to support the case of an output
node garbage-collected before this method is called; in this case,
this method creates a fresh variable node that acts as an output
node of the function node.
"""
ret = []
outputs = self.outputs
new_outputs = list(outputs)
outputs_modified = False
for index, data in six.moves.zip(self._output_indexes_to_retain,
self._retained_output_data):
output = outputs[index]()
if output is None:
# The output node is garbage collected, so create a fresh
# Variable object.
output_var = variable.Variable(data)
output_var.creator_node = self
new_outputs[index] = weakref.ref(output_var)
outputs_modified = True
else:
output_var = output.get_variable()
ret.append(output_var)
if outputs_modified:
self.outputs = tuple(new_outputs)
return tuple(ret)
def unchain(self):
"""Purges in/out nodes and this function node itself from the graph."""
for y in self.outputs:
y_ref = y()
if y_ref is not None:
y_ref.unchain()
self.inputs = None
def add_hook(self, hook, name=None):
"""Registers a function hook.
Args:
hook (~chainer.FunctionHook): Function hook to be
registered.
name (str): Name of the function hook. The name must be unique
among function hooks registered to this function. If ``None``,
the default name of the function hook is used.
"""
if not isinstance(hook, function_hook.FunctionHook):
raise TypeError('Hook must be of type FunctionHook')
if name is None:
name = hook.name
hooks = self.local_function_hooks
if name in hooks:
raise KeyError('Hook %s already exists' % name)
hooks[name] = hook
hook.added(function=self)
def delete_hook(self, name):
"""Unregisters the function hook.
Args:
name (str): The name of the function hook to be unregistered.
"""
if name in self.local_function_hooks:
self.local_function_hooks[name].deleted(function=self)
del self.local_function_hooks[name]
else:
raise KeyError('Hook %s does not exist' % name)
def grad(outputs, inputs, grad_outputs=None, grad_inputs=None, set_grad=False,
retain_grad=False, enable_double_backprop=False, loss_scale=None):
"""Computes the gradient of output variables w.r.t.\\ the input variables.
This function implements the backpropagation algorithm. While
:meth:`Variable.backward` also implements backprop, this function selects
the smallest paths in the computational graph needed to compute the
gradients w.r.t. inputs. The error is backpropagated only through these
selected paths, which may reduce the overall computational cost.
This function also differs from :meth:`Variable.backward` in the way to
return the gradients; it directly returns the gradient variables as a list
instead of setting gradients to the :attr:`Variable.grad_var` attribute of
the original variable. It means users do not need to clear the gradient
w.r.t. each variable before computing the gradient using this function.
If ``set_grad`` option is set to ``True``, the computed gradient is also
stored in the :attr:`Variable.grad_var` attribute of each variable, in
which case any original value of :attr:`Variable.grad_var` will be updated
even if it had already been set.
Args:
outputs (tuple or list of :class:`~chainer.Variable`):
A sequence of output variables from which backprop starts.
inputs (tuple or list of :class:`~chainer.Variable`):
A sequence of input variables each of which this function computes
the gradient w.r.t.
grad_outputs (tuple or list of :class:`~chainer.Variable` or None):
A sequence of variables that gives the initial value of each output
gradient.
If an element is set to ``None``, an array filled with 1 is used.
If this argument itself is ``None``, it is treated as a sequence of
``None``\\ s.
grad_inputs (tuple or list of :class:`~chainer.Variable` or None):
A sequence of variables that gives the initial value of each input
gradient. The gradients computed by the backprop
algorithm are accumulated to them (not in-place). If an element
is set to ``None``, the gradient is not accumulated to this value.
If this argument itself is ``None``, it is treated as a sequence of
``None``\\ s.
set_grad (bool): If it is ``True``, the :attr:`Variable.grad_var`
attribute of each input variable is set to the corresponding
computed gradient variable.
retain_grad (bool): If it is ``True``, the gradients w.r.t. all the
intermediate variables are stored in the :attr:`Variable.grad_var`
attribute. In this case, the ``set_grad`` option is ignored.
enable_double_backprop (bool): If it is ``True``, the computed
gradients can be further backpropagated. Enabling it may increase
the memory consumption (and possibly the computational time) to
remember the intermediate gradient values for the second
backpropagation.
loss_scale (float): Loss scaling factor. Loss scaling is a usefull
technique to mitigate vanishing gradient issue that tends to happen
when low precision data type like float16 is used during training.
If you set loss scaling factor, gradients of loss values are to be
multiplied by the factor before backprop starts. The factor is
propagated to whole gradients in a computational graph along the
backprop. The gradients of parameters are divided by the factor
just before the parameters are to be updated.
Returns:
A list of gradient variables w.r.t. the inputs.
"""
if not isinstance(outputs, (tuple, list)):
raise TypeError(
'outputs must be a tuple or a list, not {}.'.format(type(outputs)))
if not isinstance(inputs, (tuple, list)):
raise TypeError(
'inputs must be a tuple or a list, not {}.'.format(type(inputs)))
if not (grad_outputs is None or isinstance(grad_outputs, (tuple, list))):
raise TypeError(
'grad_outputs must be a tuple or a list or None, not {}.'.format(
type(grad_outputs)))
if not (grad_inputs is None or isinstance(grad_inputs, (tuple, list))):
raise TypeError(
'grad_inputs must be a tuple or a list or None, not {}.'.format(
type(grad_inputs)))
for v in outputs:
# Raise error here if v is created by Function.backward.
# In such case, we don't know exact inputs of the creator.
v.node._check_old_style_gradient()
# The implementation consists of three steps.
# 1. Backward enumeration: all the nodes reachable backward from the output
# nodes are enumerated. The forward direction links are collected in
# this step. Note that the variable nodes whose requires_grad is false
# are ignored and their creators are not searched.
candidate_funcs = [v.creator_node for v in outputs
if v.creator_node is not None]
visited_funcs = set()
forward_graph = collections.defaultdict(list)
while candidate_funcs:
func = candidate_funcs.pop()
if func in visited_funcs:
continue
visited_funcs.add(func)
for x in func.inputs:
# Raise error here if x is created by Function.backward.
# In such case, we don't know exact inputs of the creator.
x._check_old_style_gradient()
if not x.requires_grad:
continue
forward_graph[x].append(func)
creator = x.creator_node
if creator is not None and creator not in visited_funcs:
candidate_funcs.append(creator)
# 2. Forward enumeration: all the nodes in the subgraph reachable from the
# input nodes are enumerated. The extracted (sub-)subgraph is the union
# of all paths that backpropagation will visit.
candidate_vars = [x.node for x in inputs]
visited_funcs = set()
grad_required = set()
while candidate_vars:
x = candidate_vars.pop()
grad_required.add(x)
for func in forward_graph[x]:
if func in visited_funcs:
continue
visited_funcs.add(func)
for y_ref in func.outputs:
y = y_ref()
if y is not None and y in forward_graph:
candidate_vars.append(y)
# 3. Backpropagation: the backpropagation is executed along the
# (sub-)subgraph. It uses the topological order of the subgraph which is
# induced by the reversed order of function applications ("rank").
grads = {} # mapping from variable nodes to their gradients
# Initialize the gradient mapping.
if grad_outputs is None:
grad_outputs = (None,) * len(outputs)
for y, gy in zip(outputs, grad_outputs):
if gy is None:
with cuda.get_device_from_array(y.data) as device:
if device is cuda.DummyDevice:
gy_data = numpy.ones_like(y.data)
else:
gy_data = cuda.cupy.ones_like(y.data)
gy = variable.Variable(gy_data, requires_grad=False)
if loss_scale is not None:
gy.data *= loss_scale
grads[y.node] = gy
if grad_inputs is not None:
for x, gx in zip(inputs, grad_inputs):
if gx is not None:
grads[x.node] = gx
# Backprop implementation. It edits grads which will only contain the
# gradients w.r.t. the inputs.
with chainer.using_config('enable_backprop', enable_double_backprop):
_backprop(outputs, inputs, grad_required, retain_grad, grads,
loss_scale)
# Extract the gradients w.r.t. the inputs and return them.
ret = [grads.get(x.node, None) for x in inputs]
if set_grad:
for x, gx in zip(inputs, ret):
x.grad_var = gx
return ret
def _backprop(outputs, inputs, grad_required, retain_grad, grads, loss_scale):
candidate_funcs, push_candidate, pop_candidate = _get_ordered_func_heap()
for y in outputs:
creator = y.creator_node
if creator is not None:
push_candidate(creator)
input_nodes = set(x.node for x in inputs)
while candidate_funcs:
func = pop_candidate()
# Collect the gradients w.r.t. the outputs
gys = []
for y_ref in func.outputs:
y = y_ref()
if y is None:
# output is not a part of the selected subgraph and has already
# been released.
gys.append(None)
continue
gys.append(grads.get(y, None))
gys = tuple(gys)
# Collect the gradients w.r.t. the inputs
#
# Note (Tokui): when the same variable is passed multiple times as
# inputs in the same function (e.g. an expression like f(x, x)), the
# current implementation passes None as the current gradient w.r.t.
# such an input except for the first one (i.e., it builds gxs like
# (gx, None) where gx is the current gradient w.r.t. x).
gxs = []
input_indexes = []
selected_inputs = set()
for i, x in enumerate(func.inputs):
if x not in grad_required:
continue
input_indexes.append(i)
if x in selected_inputs:
gxs.append(None)
else:
gxs.append(grads.get(x, None))
selected_inputs.add(x)
gxs = tuple(gxs)
input_indexes = tuple(input_indexes)
if not input_indexes:
continue
# Do backward
gys = tuple([gy if not isinstance(gy, tuple) else
chainer.functions.add(*gy)
for gy in gys])
# Call pre-backward hooks
hooks = chainer.get_function_hooks()
if func._n_local_function_hooks != 0:
hooks = collections.OrderedDict(hooks)
hooks.update(func.local_function_hooks)
hooks = hooks.values() # avoid six for performance
in_data = tuple([x.data for x in func.inputs])
out_grad_data = tuple(
[None if g is None else g.data for g in gys])
cuda.get_device_from_array(*in_data).use()
for hook in hooks:
hook.backward_preprocess(func, in_data, out_grad_data)
new_gxs = func.backward_accumulate(input_indexes, gys, gxs)
# Call post-backward hooks
for hook in hooks:
hook.backward_postprocess(func, in_data, out_grad_data)
# Delete output gradients that are not required to return
for y_ref in func.outputs:
y = y_ref()
if y is not None and y in grads and y not in input_nodes:
del grads[y]
# Update grads
selected_inputs = set()
for i, g in zip(input_indexes, new_gxs):
if g is None:
continue
node = func.inputs[i]
if node in selected_inputs:
# Accumulate the duplicated gradients here
cur_gx = grads.get(node, None)
if cur_gx is not None:
if func.lazy_grad_sum:
if x.creator is None:
g = _backprop_utils.add(g, cur_gx)
else:
g = _backprop_utils.concat_variable(g, cur_gx)
# cur_gx can't be tuple, the lazy_grad_sum can't
# be enabled in its sibling node.
else:
g = g + cur_gx
else:
selected_inputs.add(node)
grads[node] = g
if retain_grad:
v = node.get_variable_or_none()
if v is not None:
v.grad_var = g
v._loss_scale = loss_scale
creator = node.creator_node
if creator is not None:
push_candidate(creator)
def _get_ordered_func_heap():
heap = []
visited_funcs = set()
def push_heap(func):
if func not in visited_funcs:
# Negate since heapq is min-heap
# The second element is used to make each item unique
ordered_func = -func.rank, len(visited_funcs), func
visited_funcs.add(func)
heapq.heappush(heap, ordered_func)
def pop_heap():
_, _, func = heapq.heappop(heap)
return func
return heap, push_heap, pop_heap
| |
"""
Handled exceptions raised by REST framework.
In addition Django's built in 403 and 404 exceptions are handled.
(`django.http.Http404` and `django.core.exceptions.PermissionDenied`)
"""
from __future__ import unicode_literals
import math
from django.http import JsonResponse
from django.utils import six
from django.utils.encoding import force_text
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext
from rest_framework import status
from rest_framework.compat import unicode_to_repr
from rest_framework.utils.serializer_helpers import ReturnDict, ReturnList
def _get_error_details(data, default_code=None):
"""
Descend into a nested data structure, forcing any
lazy translation strings or strings into `ErrorDetail`.
"""
if isinstance(data, list):
ret = [
_get_error_details(item, default_code) for item in data
]
if isinstance(data, ReturnList):
return ReturnList(ret, serializer=data.serializer)
return ret
elif isinstance(data, dict):
ret = {
key: _get_error_details(value, default_code)
for key, value in data.items()
}
if isinstance(data, ReturnDict):
return ReturnDict(ret, serializer=data.serializer)
return ret
text = force_text(data)
code = getattr(data, 'code', default_code)
return ErrorDetail(text, code)
def _get_codes(detail):
if isinstance(detail, list):
return [_get_codes(item) for item in detail]
elif isinstance(detail, dict):
return {key: _get_codes(value) for key, value in detail.items()}
return detail.code
def _get_full_details(detail):
if isinstance(detail, list):
return [_get_full_details(item) for item in detail]
elif isinstance(detail, dict):
return {key: _get_full_details(value) for key, value in detail.items()}
return {
'message': detail,
'code': detail.code
}
class ErrorDetail(six.text_type):
"""
A string-like object that can additionally have a code.
"""
code = None
def __new__(cls, string, code=None):
self = super(ErrorDetail, cls).__new__(cls, string)
self.code = code
return self
def __eq__(self, other):
r = super(ErrorDetail, self).__eq__(other)
try:
return r and self.code == other.code
except AttributeError:
return r
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return unicode_to_repr('ErrorDetail(string=%r, code=%r)' % (
six.text_type(self),
self.code,
))
def __hash__(self):
return hash(str(self))
class APIException(Exception):
"""
Base class for REST framework exceptions.
Subclasses should provide `.status_code` and `.default_detail` properties.
"""
status_code = status.HTTP_500_INTERNAL_SERVER_ERROR
default_detail = _('A server error occurred.')
default_code = 'error'
def __init__(self, detail=None, code=None):
if detail is None:
detail = self.default_detail
if code is None:
code = self.default_code
self.detail = _get_error_details(detail, code)
def __str__(self):
return six.text_type(self.detail)
def get_codes(self):
"""
Return only the code part of the error details.
Eg. {"name": ["required"]}
"""
return _get_codes(self.detail)
def get_full_details(self):
"""
Return both the message & code parts of the error details.
Eg. {"name": [{"message": "This field is required.", "code": "required"}]}
"""
return _get_full_details(self.detail)
# The recommended style for using `ValidationError` is to keep it namespaced
# under `serializers`, in order to minimize potential confusion with Django's
# built in `ValidationError`. For example:
#
# from rest_framework import serializers
# raise serializers.ValidationError('Value was invalid')
class ValidationError(APIException):
status_code = status.HTTP_400_BAD_REQUEST
default_detail = _('Invalid input.')
default_code = 'invalid'
def __init__(self, detail=None, code=None):
if detail is None:
detail = self.default_detail
if code is None:
code = self.default_code
# For validation failures, we may collect many errors together,
# so the details should always be coerced to a list if not already.
if not isinstance(detail, dict) and not isinstance(detail, list):
detail = [detail]
self.detail = _get_error_details(detail, code)
class ParseError(APIException):
status_code = status.HTTP_400_BAD_REQUEST
default_detail = _('Malformed request.')
default_code = 'parse_error'
class AuthenticationFailed(APIException):
status_code = status.HTTP_401_UNAUTHORIZED
default_detail = _('Incorrect authentication credentials.')
default_code = 'authentication_failed'
class NotAuthenticated(APIException):
status_code = status.HTTP_401_UNAUTHORIZED
default_detail = _('Authentication credentials were not provided.')
default_code = 'not_authenticated'
class PermissionDenied(APIException):
status_code = status.HTTP_403_FORBIDDEN
default_detail = _('You do not have permission to perform this action.')
default_code = 'permission_denied'
class NotFound(APIException):
status_code = status.HTTP_404_NOT_FOUND
default_detail = _('Not found.')
default_code = 'not_found'
class MethodNotAllowed(APIException):
status_code = status.HTTP_405_METHOD_NOT_ALLOWED
default_detail = _('Method "{method}" not allowed.')
default_code = 'method_not_allowed'
def __init__(self, method, detail=None, code=None):
if detail is None:
detail = force_text(self.default_detail).format(method=method)
super(MethodNotAllowed, self).__init__(detail, code)
class NotAcceptable(APIException):
status_code = status.HTTP_406_NOT_ACCEPTABLE
default_detail = _('Could not satisfy the request Accept header.')
default_code = 'not_acceptable'
def __init__(self, detail=None, code=None, available_renderers=None):
self.available_renderers = available_renderers
super(NotAcceptable, self).__init__(detail, code)
class UnsupportedMediaType(APIException):
status_code = status.HTTP_415_UNSUPPORTED_MEDIA_TYPE
default_detail = _('Unsupported media type "{media_type}" in request.')
default_code = 'unsupported_media_type'
def __init__(self, media_type, detail=None, code=None):
if detail is None:
detail = force_text(self.default_detail).format(media_type=media_type)
super(UnsupportedMediaType, self).__init__(detail, code)
class Throttled(APIException):
status_code = status.HTTP_429_TOO_MANY_REQUESTS
default_detail = _('Request was throttled.')
extra_detail_singular = 'Expected available in {wait} second.'
extra_detail_plural = 'Expected available in {wait} seconds.'
default_code = 'throttled'
def __init__(self, wait=None, detail=None, code=None):
if detail is None:
detail = force_text(self.default_detail)
if wait is not None:
wait = math.ceil(wait)
detail = ' '.join((
detail,
force_text(ungettext(self.extra_detail_singular.format(wait=wait),
self.extra_detail_plural.format(wait=wait),
wait))))
self.wait = wait
super(Throttled, self).__init__(detail, code)
def server_error(request, *args, **kwargs):
"""
Generic 500 error handler.
"""
data = {
'error': 'Server Error (500)'
}
return JsonResponse(data, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
def bad_request(request, exception, *args, **kwargs):
"""
Generic 400 error handler.
"""
data = {
'error': 'Bad Request (400)'
}
return JsonResponse(data, status=status.HTTP_400_BAD_REQUEST)
| |
__author__ = 'thorwhalen'
import ut.parse.google as google
import pandas as pd
import re
from bs4.element import Tag
import ut.semantics.text_processors as semantics_text_processors
import ut.util.ulist as util_ulist
import ut.daf.manip as daf_manip
import ut.coll.order_conserving as colloc
#### Utilsxw
LOCATION_LOCAL = 'LOCAL'
LOCATION_S3 = 'S3'
split_exp_01 = re.compile("[^&\w]*")
tokenizer_re = re.compile('[&\w]+')
def mk_terms_df(df, text_cols, id_cols=None, tokenizer_re=tokenizer_re):
text_cols = util_ulist.ascertain_list(text_cols)
if id_cols is None:
id_cols = colloc.setdiff(df.columns, text_cols)
else:
id_cols = util_ulist.ascertain_list(id_cols)
id_cols_missing = colloc.setdiff(id_cols, df.columns)
if id_cols_missing: # if any columns are missing, try to get them from named index
df = df.reset_index(id_cols_missing)
dd = pd.DataFrame()
for c in text_cols:
d = df[id_cols]
d['term'] = [re.findall(tokenizer_re, x) for x in df[c]]
d = daf_manip.rollout_cols(d, cols_to_rollout='term')
dd = pd.concat([dd, d])
return dd
def space_seperated_token_string_to_term_count(s):
return list_to_term_count(s.split(' '))
def list_to_term_count(term_list):
"""
takes a token list and
returns a Series whose indices are terms and values are term counts
(the number of times the term appeared in the google result)
"""
# make a dataframe of term counts TODO: Explore faster ways to do this
df = pd.DataFrame(term_list, columns=['term'])
df = df.groupby('term').count()
df.columns = ['count']
if len(df)==1:
df = pd.DataFrame({'term':term_list[:1], 'count':len(term_list)})
df = df.set_index('term')
return df
# def tokenize_text(gresult_text):
# return re.split(split_exp,gresult_text)
def get_text_from_source_gresult(gresults):
if not isinstance(gresults,dict): # if not a dict assume it's a soup, html, or filename thereof
gresults = google.parse_tag_dict(google.mk_gresult_tag_dict(gresults))
elif is_tag_dict(gresults): # if gresults is a tag_dict, and make it a info dict
gresults = google.parse_tag_dict(gresults)
if 'organic_results_list' in gresults:
title_text_concatinated = ' '.join([x['title_text'] for x in gresults['organic_results_list'] if 'title_text' in x])
snippet_text_concatinated = ' '.join([x['st_text'] for x in gresults['organic_results_list'] if 'st_text' in x])
text_concatinated = title_text_concatinated + ' ' + snippet_text_concatinated
else:
search_for_tag = ['_ires','_search','_res','_center_col']
for t in search_for_tag:
if t in gresults:
text_concatinated = soup_to_text(gresults[t])
break
if not text_concatinated: # if you still don't have anything
text_concatinated = soup_to_text(gresults) #... just get the text from the whole soup
return text_concatinated
def is_tag_dict(x):
try:
if isinstance(x[list(x.keys())[0]][0],Tag):
return True
else:
return False
except:
return False
def soup_to_text(element):
return list(filter(visible, element.findAll(text=True)))
def visible(element):
if element.parent.name in ['style', 'script', '[document]', 'head', 'title']:
return False
elif re.match('<!--.*-->', str(element), re.UNICODE):
return False
return True
class TokenizerFactor(object):
@classmethod
def simple(cls):
split_exp = re.compile("[^&\w]*")
def tokenizer(self, text):
return re.split(self.split_exp, text)
class TokenizerFactory(object):
@classmethod
def get_simple_aw_tokenizer(cls):
return cls.NegAlphabetTokenizer(split_exp=re.compile("[^&\w]*"))
class NegAlphabetTokenizer(object):
def __init__(self,split_exp):
self.split_exp = split_exp
def tokenize(self,text):
return re.split(self.split_exp, text)
class TermStatsMaker(object):
def __init__(self,
get_text_from_source=get_text_from_source_gresult,
preprocess_text=semantics_text_processors.preprocess_text_lower_ascii,
tokenizer=TokenizerFactory.get_simple_aw_tokenizer().tokenize,
mk_term_stats=list_to_term_count
):
self.get_text_from_source = get_text_from_source
self.preprocess_text = preprocess_text
self.tokenizer = tokenizer
self.mk_term_stats = mk_term_stats
def term_stats(self, text_source):
#return self.mk_term_stats(self.tokenizer(self.preprocess_text(self.get_text_from_source(text_source))))
text = self.get_text_from_source(text_source)
precessed_text = self.preprocess_text(text)
token_list = self.tokenizer(precessed_text)
term_stats = self.mk_term_stats(token_list)
return term_stats
# # consider using composition as such:
# return self.mk_term_stats(
# self.tokenize_text(
# self.preprocess_text(
# self.get_text_from_source(text_source)
# )
# )
# )
# @classmethod
# def mk_term_stats_maker(cls):
# return TermStatsMaker(
# get_text_from_source=get_text_from_source_gresult,
# preprocess_text=semantics_text_processors.preprocess_text_lower_ascii,
# tokenizer=TokenizerFactory.get_simple_aw_tokenizer().tokenize,
# mk_term_stats=list_to_term_count
# )
@classmethod
def mk_term_stats_maker(cls):
return TermStatsMaker(
get_text_from_source=get_text_from_source_gresult,
preprocess_text=semantics_text_processors.preprocess_text_lower_ascii,
tokenizer=TokenizerFactory.get_simple_aw_tokenizer().tokenize,
mk_term_stats=list_to_term_count
)
@classmethod
def mk_term_stats_maker_for_hotels(cls, term_map=None, location=LOCATION_LOCAL):
print("oh no! you commented this out!!")
#if term_map is None:
# import msvenere.factories as venere_factories
# if location==LOCATION_LOCAL:
# ds = venere_factories.data_source_for_local_term_stats_maker()
# elif location==LOCATION_S3:
# ds = venere_factories.data_source_for_s3_term_stats_maker()
# term_map = ds.d.term_map
#return TermStatsMaker(
# get_text_from_source=get_text_from_source_gresult,
# preprocess_text=venere_aw.erenev_kw_str_term_replacer(),
# # preprocess_text=semantics_text_processors.lower_ascii_term_replacer(map_spec=term_map),
# tokenizer=TokenizerFactory.get_simple_aw_tokenizer().tokenize,
# mk_term_stats=list_to_term_count
#)
# def mk_term_count_from_google_results(gresults):
# """
# takes a google result (in the form of html, filename thereof, soup, or info_dict, and
# returns a Series whose indices are terms and values are term counts
# (the number of times the term appeared in the google result)
# """
# # get preprocessed text from gresults
# gresults = preprocess_text_lower_ascii(get_text_from_source_gresult(gresults))
# # tokenize this text
# toks = tokenize_text(gresults)
# # make a dataframe of term counts TODO: Explore faster ways to do this
# df = pd.DataFrame(toks,columns=['token'])
# df = df.groupby('token').count()
# df.columns = ['count']
# df = df.sort(columns=['count'],ascending=False) # TODO: Take out sorting at some point since it's unecessary (just for diagnosis purposes)
# return df
| |
from __future__ import print_function
from aggregation_api import AggregationAPI
import pandas
import classification
import yaml
import parser
import json
import math
import csv_output
def extract_subject_id(subject_data):
"""
extract the subject id for each subject
:param subject_data:
:return:
"""
json_subject = json.loads(subject_data)
return int(json_subject.keys()[0])
def load_json(json_string):
return json.loads(json_string)
class LocalAggregationAPI(AggregationAPI):
def __init__(self,project_id,csv_classification_file):
AggregationAPI.__init__(self,project_id,"development")
# read in the csv file as a dataframe (pandas)
self.classifications_dataframe = pandas.read_csv(csv_classification_file)
# extract the subject id for each subject - based on the subject data field
self.classifications_dataframe["subject_id"] = self.classifications_dataframe["subject_data"].map(lambda x: extract_subject_id(x))
self.aggregation_results = {}
def __setup__(self):
"""
set up all the connections to panoptes and different databases
:return:
"""
print("setting up")
# just for when we are treating an ouroboros project like a panoptes one
# in which the subject ids will be zooniverse_ids, which are strings
self.subject_id_type = "int"
# todo - some time in the far future - complete support for expert annotations
self.experts = []
# load in the project id (a number)
# if one isn't provided, tried reading it from the yaml config file
# todo - if we are not using a secure connection, this forces the project_id
# todo - to be provided as a param (since we don't read in the yaml file)
# todo - probably want to change that at some point as there is value in
# todo - non logged in users having a yaml file (where they can give csv classification files etc.)
#########
# everything that follows assumes you have a secure connection to Panoptes
# plus the DBs (either production or staging)
param_file = open("/app/config/aggregation.yml","rb")
param_details = yaml.load(param_file)
environment_details = param_details[self.environment]
# do we have a specific date as the minimum date for this project?
if (self.project_id in param_details) and ("default_date" in param_details[self.project_id]):
self.previous_runtime = parser.parse(param_details[self.project_id]["default_date"])
print("trying secure Panoptes connection")
self.__panoptes_connect__(environment_details)
self.__get_project_details__()
# todo - refactor all this?
# there may be more than one workflow associated with a project - read them all in
# and set up the associated tasks
self.workflows,self.versions,self.instructions,self.updated_at_timestamps = self.__get_workflow_details__()
print("workflows are " + str(self.workflows))
self.retirement_thresholds = self.__get_retirement_threshold__()
self.workflow_names = self.__get_workflow_names__()
# set up the clustering algorithms
self.__setup_clustering_algs__()
# load the default classification algorithm
self.__set_classification_alg__(classification.VoteCount)
# a bit of a sanity check in case I forget to change back up before uploading
# production and staging should ALWAYS pay attention to the version and only
# aggregate retired subjects
if self.environment in ["production","staging"]:
self.only_retired_subjects = True
# bit of a stop gap measure - stores how many people have classified a given subject
self.classifications_per_subject = {}
# do we want to aggregate over only retired subjects?
# do we want to aggregate over only subjects that have been retired/classified since
# the last time we ran the code?
self.only_recent_subjects = False
def __migrate__(self,workflow_id,version,subject_set=None):
"""
since we don't actually have to migrate classifications between databases, just return the set of all
subjects which have had classifications
:param workflow_id:
:param version:
:param subject_set:
:return:
"""
data_frame = self.classifications_dataframe
return set(data_frame["subject_id"])
def __yield_annotations__(self,workflow_id,subject_set):
"""
get all of the annotations for this particular workflow id and each subject in this subject set
:param workflow_id:
:param subject_set:
:return:
"""
data_frame = self.classifications_dataframe
for subject_id in subject_set:
# select only those annotations for the current subject id
data_frame = data_frame[(data_frame.subject_id==int(subject_id)) & (data_frame.workflow_id == workflow_id)]
# what is the current workflow version for this particular workflow id
version = int(math.floor(float(self.versions[workflow_id])))
data_frame = data_frame[(data_frame.workflow_version >= version)]
users_per_subjects = data_frame["user_id"]
annotations_per_subjects = data_frame["annotations"]
# todo - load image dimensions
yield int(subject_id),users_per_subjects,annotations_per_subjects,(None,None)
raise StopIteration()
def __get_previously_aggregated__(self,workflow_id):
"""
only useful for doing upserts - ie in production mode
:param workflow_id:
:return:
"""
return None
def __upsert_results__(self,workflow_id,aggregations,previously_aggregated):
"""
store the results in a dictionary - the term upsert only makes sense if we are storing to a db
:param workflow_id:
:param aggregations:
:param previously_aggregated:
:return:
"""
# convert to int to be safe
workflow_id = int(workflow_id)
print("upserting " + str(workflow_id))
if workflow_id not in self.aggregation_results:
self.aggregation_results[workflow_id] = dict()
for subject_id,agg in aggregations.items():
self.aggregation_results[workflow_id][subject_id] = agg
def __count_subjects_classified__(self,workflow_id):
"""
return a count of all the subjects classified
:param workflow_id:
:return:
"""
workflow_id = int(workflow_id)
# if we haven't saved any aggregations - the total is 0
if workflow_id not in self.aggregation_results:
return 0
else:
return len(self.aggregation_results[int(workflow_id)])
def __yield_aggregations__(self,workflow_id,subject_set=None):
"""
return all of the aggregations for the given workflow_id/subject_set
:param workflow_id:
:param subject_set:
:return:
"""
workflow_id = int(workflow_id)
# stop if we don't have any aggregations
if workflow_id not in self.aggregation_results:
raise StopIteration()
for subject_id,aggregation in self.aggregation_results[workflow_id].items():
# if we have provided a filter for the subject ids and the current id is not in the filter
# skip this aggregation
if (subject_set is not None) and (subject_id not in subject_set):
continue
yield subject_id,aggregation
raise StopIteration()
if __name__ == "__main__":
project = LocalAggregationAPI(63,"/home/ggdhines/Downloads/copy-of-kitteh-zoo-subjects-classifications.csv")
project.__setup__()
project.__aggregate__()
with csv_output.CsvOut(project) as c:
c.__write_out__()
| |
''' Various kinds of data table (data grid) widgets.
'''
from __future__ import absolute_import
from ...core.enums import DateFormat, FontStyle, NumeralLanguage, TextAlign, RoundingFunction
from ...core.has_props import abstract
from ...core.properties import Bool, Color, Either, Enum, Float, Instance, Int, List, Override, String
from ...model import Model
from ..sources import DataSource, CDSView
from .widget import Widget
@abstract
class CellFormatter(Model):
''' Abstract base class for data table's cell formatters.
'''
@abstract
class CellEditor(Model):
''' Abstract base class for data table's cell editors.
'''
class StringFormatter(CellFormatter):
''' Basic string cell formatter.
'''
font_style = Enum(FontStyle, default="normal", help="""
An optional text font style, e.g. bold, italic.
""")
text_align = Enum(TextAlign, default="left", help="""
An optional text align, i.e. left, center or right.
""")
text_color = Color(help="""
An optional text color. See :class:`bokeh.core.properties.Color` for
details.
""")
class NumberFormatter(StringFormatter):
''' Number cell formatter.
'''
format = String("0,0", help="""
The number format, as defined in the following tables:
**NUMBERS**:
============ ============== ===============
Number Format String
============ ============== ===============
10000 '0,0.0000' 10,000.0000
10000.23 '0,0' 10,000
10000.23 '+0,0' +10,000
-10000 '0,0.0' -10,000.0
10000.1234 '0.000' 10000.123
10000.1234 '0[.]00000' 10000.12340
-10000 '(0,0.0000)' (10,000.0000)
-0.23 '.00' -.23
-0.23 '(.00)' (.23)
0.23 '0.00000' 0.23000
0.23 '0.0[0000]' 0.23
1230974 '0.0a' 1.2m
1460 '0 a' 1 k
-104000 '0a' -104k
1 '0o' 1st
52 '0o' 52nd
23 '0o' 23rd
100 '0o' 100th
============ ============== ===============
**CURRENCY**:
=========== =============== =============
Number Format String
=========== =============== =============
1000.234 '$0,0.00' $1,000.23
1000.2 '0,0[.]00 $' 1,000.20 $
1001 '$ 0,0[.]00' $ 1,001
-1000.234 '($0,0)' ($1,000)
-1000.234 '$0.00' -$1000.23
1230974 '($ 0.00 a)' $ 1.23 m
=========== =============== =============
**BYTES**:
=============== =========== ============
Number Format String
=============== =========== ============
100 '0b' 100B
2048 '0 b' 2 KB
7884486213 '0.0b' 7.3GB
3467479682787 '0.000 b' 3.154 TB
=============== =========== ============
**PERCENTAGES**:
============= ============= ===========
Number Format String
============= ============= ===========
1 '0%' 100%
0.974878234 '0.000%' 97.488%
-0.43 '0 %' -43 %
0.43 '(0.000 %)' 43.000 %
============= ============= ===========
**TIME**:
============ ============== ============
Number Format String
============ ============== ============
25 '00:00:00' 0:00:25
238 '00:00:00' 0:03:58
63846 '00:00:00' 17:44:06
============ ============== ============
For the complete specification, see http://numbrojs.com/format.html
""")
language = Enum(NumeralLanguage, default="en", help="""
The language to use for formatting language-specific features (e.g. thousands separator).
""")
rounding = Enum(RoundingFunction, help="""
Rounding functions (round, floor, ceil) and their synonyms (nearest, rounddown, roundup).
""")
class BooleanFormatter(CellFormatter):
''' Boolean (check mark) cell formatter.
'''
icon = Enum('check', 'check-circle', 'check-circle-o', 'check-square', 'check-square-o', help="""
The icon visualizing the check mark.
""")
class DateFormatter(CellFormatter):
''' Date cell formatter.
'''
format = Either(Enum(DateFormat), String, default='ISO-8601', help="""
The date format can be any standard `strftime`_ format string, as well
as any of the following predefined format names:
================================================ ================== ===================
Format name(s) Format string Example Output
================================================ ================== ===================
``ATOM`` / ``W3C`` / ``RFC-3339`` / ``ISO-8601`` ``"%Y-%m-%d"`` 2014-03-01
``COOKIE`` ``"%a, %d %b %Y"`` Sat, 01 Mar 2014
``RFC-850`` ``"%A, %d-%b-%y"`` Saturday, 01-Mar-14
``RFC-1123`` / ``RFC-2822`` ``"%a, %e %b %Y"`` Sat, 1 Mar 2014
``RSS`` / ``RFC-822`` / ``RFC-1036`` ``"%a, %e %b %y"`` Sat, 1 Mar 14
``TIMESTAMP`` (ms since epoch) 1393632000000
================================================ ================== ===================
Note that in the table some of the format names are synonymous, with
identical format names separated by slashes.
This list of supported `strftime`_ format codes is reproduced below.
%a
The abbreviated name of the day of the week according to the
current locale.
%A
The full name of the day of the week according to the current
locale.
%b
The abbreviated month name according to the current locale.
%B
The full month name according to the current locale.
%c
The preferred date and time representation for the current
locale.
%C
The century number (year/100) as a 2-digit integer.
%d
The day of the month as a decimal number (range 01 to 31).
%D
Equivalent to %m/%d/%y. (Americans should note that in many
other countries %d/%m/%y is rather common. This means that in
international context this format is ambiguous and should not
be used.)
%e
Like %d, the day of the month as a decimal number, but a
leading zero is replaced by a space.
%f
Microsecond as a decimal number, zero-padded on the left (range
000000-999999). This is an extension to the set of directives
available to `timezone`_.
%F
Equivalent to %Y-%m-%d (the ISO 8601 date format).
%G
The ISO 8601 week-based year with century as a decimal number.
The 4-digit year corresponding to the ISO week number (see %V).
This has the same format and value as %Y, except that if the
ISO week number belongs to the previous or next year, that year
is used instead.
%g
Like %G, but without century, that is, with a 2-digit year (00-99).
%h
Equivalent to %b.
%H
The hour as a decimal number using a 24-hour clock (range 00
to 23).
%I
The hour as a decimal number using a 12-hour clock (range 01
to 12).
%j
The day of the year as a decimal number (range 001 to 366).
%k
The hour (24-hour clock) as a decimal number (range 0 to 23).
Single digits are preceded by a blank. (See also %H.)
%l
The hour (12-hour clock) as a decimal number (range 1 to 12).
Single digits are preceded by a blank. (See also %I.) (TZ)
%m
The month as a decimal number (range 01 to 12).
%M
The minute as a decimal number (range 00 to 59).
%n
A newline character. Bokeh text does not currently support
newline characters.
%N
Nanosecond as a decimal number, zero-padded on the left (range
000000000-999999999). Supports a padding width specifier, i.e.
%3N displays 3 leftmost digits. However, this is only accurate
to the millisecond level of precision due to limitations of
`timezone`_.
%p
Either "AM" or "PM" according to the given time value, or the
corresponding strings for the current locale. Noon is treated
as "PM" and midnight as "AM".
%P
Like %p but in lowercase: "am" or "pm" or a corresponding
string for the current locale.
%r
The time in a.m. or p.m. notation. In the POSIX locale this
is equivalent to %I:%M:%S %p.
%R
The time in 24-hour notation (%H:%M). For a version including
the seconds, see %T below.
%s
The number of seconds since the Epoch, 1970-01-01 00:00:00
+0000 (UTC).
%S
The second as a decimal number (range 00 to 60). (The range
is up to 60 to allow for occasional leap seconds.)
%t
A tab character. Bokeh text does not currently support tab
characters.
%T
The time in 24-hour notation (%H:%M:%S).
%u
The day of the week as a decimal, range 1 to 7, Monday being 1.
See also %w.
%U
The week number of the current year as a decimal number, range
00 to 53, starting with the first Sunday as the first day of
week 01. See also %V and %W.
%V
The ISO 8601 week number (see NOTES) of the current year as a
decimal number, range 01 to 53, where week 1 is the first week
that has at least 4 days in the new year. See also %U and %W.
%w
The day of the week as a decimal, range 0 to 6, Sunday being 0.
See also %u.
%W
The week number of the current year as a decimal number, range
00 to 53, starting with the first Monday as the first day of
week 01.
%x
The preferred date representation for the current locale
without the time.
%X
The preferred time representation for the current locale
without the date.
%y
The year as a decimal number without a century (range 00 to 99).
%Y
The year as a decimal number including the century.
%z
The +hhmm or -hhmm numeric timezone (that is, the hour and
minute offset from UTC).
%Z
The timezone name or abbreviation.
%%
A literal '%' character.
.. warning::
The client library BokehJS uses the `timezone`_ library to
format datetimes. The inclusion of the list below is based on the
claim that `timezone`_ makes to support "the full compliment
of GNU date format specifiers." However, this claim has not
been tested exhaustively against this list. If you find formats
that do not function as expected, please submit a `github issue`_,
so that the documentation can be updated appropriately.
.. _strftime: http://man7.org/linux/man-pages/man3/strftime.3.html
.. _timezone: http://bigeasy.github.io/timezone/
.. _github issue: https://github.com/bokeh/bokeh/issues
""")
class HTMLTemplateFormatter(CellFormatter):
''' HTML formatter using a template.
This uses Underscore's `template` method and syntax. http://underscorejs.org/#template
The formatter has access other items in the row via the `dataContext` object passed to the formatter.
So, for example, if another column in the datasource was named `url`, the template could access it as:
.. code-block:: jinja
<a href="<%= url %>"><%= value %></a>
To use a different set of template delimiters, pass the appropriate values for `evaluate`, `interpolate',
or `escape`. See the Underscore `template` documentation for more information. http://underscorejs.org/#template
Example: Simple HTML template to format the column value as code.
.. code-block:: python
HTMLTemplateFormatter(template='<code><%= value %></code>')
Example: Use values from other columns (`manufacturer` and `model`) to build a hyperlink.
.. code-block:: python
HTMLTemplateFormatter(template=
'<a href="https:/www.google.com/search?q=<%= manufacturer %>+<%= model %>" target="_blank"><%= value %></a>'
)
'''
template = String('<%= value %>', help="""
Template string to be used by Underscore's template method.
""")
class StringEditor(CellEditor):
''' Basic string cell editor with auto-completion.
'''
completions = List(String, help="""
An optional list of completion strings.
""")
class TextEditor(CellEditor):
''' Multi-line string cell editor.
'''
class SelectEditor(CellEditor):
''' Select cell editor.
'''
options = List(String, help="""
The list of options to select from.
""")
class PercentEditor(CellEditor):
''' ``IntEditor`` optimized for editing percentages.
'''
class CheckboxEditor(CellEditor):
''' Boolean value cell editor.
'''
class IntEditor(CellEditor):
''' Spinner-based integer cell editor.
'''
step = Int(1, help="""
The major step value.
""")
class NumberEditor(CellEditor):
''' Spinner-based number cell editor.
'''
step = Float(0.01, help="""
The major step value.
""")
class TimeEditor(CellEditor):
''' Spinner-based time cell editor.
'''
class DateEditor(CellEditor):
''' Calendar-based date cell editor.
'''
class TableColumn(Model):
''' Table column widget.
'''
field = String(help="""
The name of the field mapping to a column in the data source.
""")
title = String(help="""
The title of this column. If not set, column's data field is
used instead.
""")
width = Int(300, help="""
The width or maximum width (depending on data table's configuration)
in pixels of this column.
""")
formatter = Instance(CellFormatter, lambda: StringFormatter(), help="""
The cell formatter for this column. By default, a simple string
formatter is used.
""")
editor = Instance(CellEditor, lambda: StringEditor(), help="""
The cell editor for this column. By default, a simple string editor
is used.
""")
sortable = Bool(True, help="""
Whether this column is sortable or not. Note that data table has
to have sorting enabled to allow sorting in general.
""")
default_sort = Enum("ascending", "descending", help="""
The default sorting order. By default ``ascending`` order is used.
""")
@abstract
class TableWidget(Widget):
''' Abstract base class for data table (data grid) widgets.
'''
source = Instance(DataSource, help="""
The source of data for the widget.
""")
view = Instance(CDSView, help="""
A view into the data source to use when rendering table rows. A default view
of the entire data source is created if a view is not passed in during
initialization.
""")
def __init__(self, **kw):
super(TableWidget, self).__init__(**kw)
if "view" not in kw:
self.view = CDSView(source=self.source)
class DataTable(TableWidget):
''' Two dimensional grid for visualisation and editing large amounts
of data.
'''
columns = List(Instance(TableColumn), help="""
The list of child column widgets.
""")
fit_columns = Bool(True, help="""
Whether columns should be fit to the available width. This results in no
horizontal scrollbar showing up, but data can get unreadable if there is
no enough space available. If set to ``True``, columns' width is
understood as maximum width.
""")
sortable = Bool(True, help="""
Allows to sort table's contents. By default natural order is preserved.
To sort a column, click on it's header. Clicking one more time changes
sort direction. Use Ctrl + click to return to natural order. Use
Shift + click to sort multiple columns simultaneously.
""")
reorderable = Bool(True, help="""
Allows the reordering of a tables's columns. To reorder a column,
click and drag a table's header to the desired location in the table.
The columns on either side will remain in their previous order.
""")
editable = Bool(False, help="""
Allows to edit table's contents. Needs cell editors to be configured on
columns that are required to be editable.
""")
selectable = Either(Bool(True), Enum("checkbox"), help="""
Whether a table's rows can be selected or not. Using ``checkbox`` is
equivalent to ``True``, but makes selection visible through a checkbox
for each row, instead of highlighting rows. Multiple selection is
allowed and can be achieved by either clicking multiple checkboxes (if
enabled) or using Shift + click on rows.
""")
index_position = Int(0, help="""
Where among the list of columns to insert a column displaying the row
index. Negative indices are supported, and specify an index position
from the end of the list of columns (i.e. standard Python behaviour).
To prevent the index column from being added, set to None.
If the absolute value of index_position is larger than the length of
the columns, then the index will appear at the beginning or end, depending
on the sign.
""")
index_header = String("#", help="""
The column header to display for the index column, if it is present.
""")
index_width = Int(40, help="""
The width of the index column, if present.
""")
scroll_to_selection = Bool(True, help="""
Whenever a selection is made on the data source, scroll the selected
rows into the table's viewport if none of the selected rows are already
in the viewport.
""")
header_row = Bool(True, help="""
Whether to show a header row with column names at the top of the table.
""")
height = Override(default=400)
| |
#!/usr/bin/env python
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script to download all datasets and create .tfrecord files.
"""
import collections
import gzip
import itertools
import os
import tarfile
import tempfile
import zipfile
from functools import partial, reduce
from urllib import request
import wget
import h5py
import numpy as np
import scipy.io
import tensorflow as tf
from PIL import Image
from absl import app
from google_drive_downloader import GoogleDriveDownloader as gdd
from objax.util.image import to_png
from tqdm import trange, tqdm
from shared.data import core as libml_data
if 'NEXTMATCH_DOWNLOAD_PATH' in os.environ:
DOWNLOAD_DIR = os.environ['NEXTMATCH_DOWNLOAD_PATH']
else:
DOWNLOAD_DIR = os.path.join(libml_data.DATA_DIR, 'Downloads')
URLS = {
'cifar10': 'https://www.cs.toronto.edu/~kriz/cifar-10-matlab.tar.gz',
'cifar100': 'https://www.cs.toronto.edu/~kriz/cifar-100-matlab.tar.gz',
'domainnet': {
'clipart': 'http://csr.bu.edu/ftp/visda/2019/multi-source/groundtruth/%sclipart%s',
'infograph': 'http://csr.bu.edu/ftp/visda/2019/multi-source/%sinfograph%s',
'painting': 'http://csr.bu.edu/ftp/visda/2019/multi-source/groundtruth/%spainting%s',
'quickdraw': 'http://csr.bu.edu/ftp/visda/2019/multi-source/%squickdraw%s',
'real': 'http://csr.bu.edu/ftp/visda/2019/multi-source/%sreal%s',
'sketch': 'http://csr.bu.edu/ftp/visda/2019/multi-source/%ssketch%s'
},
'mnist': 'http://yann.lecun.com/exdb/mnist/{}',
'office31': dict(images='0B4IapRTv9pJ1WGZVd1VDMmhwdlE'),
'svhn': 'http://ufldl.stanford.edu/housenumbers/{}_32x32.mat',
'mnistm': 'https://www.dropbox.com/s/rb7pr65fo26h9lh/mnist_m.tar.gz?dl=1',
'syndigit': 'https://storage.googleapis.com/kihyuks-0001/SynDigits/synth_{}_32x32.mat',
'usps': 'https://storage.googleapis.com/kihyuks-0001/usps.h5',
}
def _encode_png(images):
return [to_png(images[x]) for x in trange(images.shape[0], desc='PNG Encoding', leave=False)]
def _image_resize(x, size: int):
"""Resizing that tries to minimize artifacts."""
original = max(x.size)
if original < size:
return x.resize((size, size), Image.BICUBIC)
nearest = original - (original % size)
if nearest != original:
x = x.resize((nearest, nearest), Image.BILINEAR)
if nearest != size:
x = x.resize((size, size), Image.BOX)
if x.size[0] != x.size[1]:
x = x.resize((size, size), Image.BICUBIC)
return x
def _load_cifar10():
def unflatten(images):
return np.transpose(images.reshape((images.shape[0], 3, 32, 32)), [0, 2, 3, 1])
with tempfile.NamedTemporaryFile() as f:
request.urlretrieve(URLS['cifar10'], f.name)
tar = tarfile.open(fileobj=f)
train_data_batches, train_data_labels = [], []
for batch in range(1, 6):
data_dict = scipy.io.loadmat(tar.extractfile('cifar-10-batches-mat/data_batch_{}.mat'.format(batch)))
train_data_batches.append(data_dict['data'])
train_data_labels.append(data_dict['labels'].flatten())
train_set = {'images': np.concatenate(train_data_batches, axis=0),
'labels': np.concatenate(train_data_labels, axis=0)}
data_dict = scipy.io.loadmat(tar.extractfile('cifar-10-batches-mat/test_batch.mat'))
test_set = {'images': data_dict['data'],
'labels': data_dict['labels'].flatten()}
train_set['images'] = _encode_png(unflatten(train_set['images']))
test_set['images'] = _encode_png(unflatten(test_set['images']))
return dict(train=train_set, test=test_set)
def _load_cifar100():
def unflatten(images):
return np.transpose(images.reshape((images.shape[0], 3, 32, 32)), [0, 2, 3, 1])
with tempfile.NamedTemporaryFile() as f:
request.urlretrieve(URLS['cifar100'], f.name)
tar = tarfile.open(fileobj=f)
data_dict = scipy.io.loadmat(tar.extractfile('cifar-100-matlab/train.mat'))
train_set = {'images': data_dict['data'],
'labels': data_dict['fine_labels'].flatten()}
data_dict = scipy.io.loadmat(tar.extractfile('cifar-100-matlab/test.mat'))
test_set = {'images': data_dict['data'],
'labels': data_dict['fine_labels'].flatten()}
train_set['images'] = _encode_png(unflatten(train_set['images']))
test_set['images'] = _encode_png(unflatten(test_set['images']))
return dict(train=train_set, test=test_set)
def _load_domainnet(domain: str, size: int) -> dict:
assert domain in ('clipart', 'infograph', 'painting', 'quickdraw', 'real', 'sketch')
path = os.path.join(DOWNLOAD_DIR, 'DomainNet')
os.makedirs(path, exist_ok=True)
prefixes = '', 'txt/', 'txt/'
suffixes = '.zip', '_train.txt', '_test.txt'
files = [os.path.join(path, f'{domain}{suffix}') for suffix in suffixes]
for f, prefix, suffix in zip(files, prefixes, suffixes):
if not os.path.exists(f):
print(f'Downloading {URLS["domainnet"][domain] % (prefix, suffix)}')
request.urlretrieve(URLS['domainnet'][domain] % (prefix, suffix), f)
train = [(k, int(v)) for k, v in [x.split() for x in open(files[1], 'r').readlines()]]
test = [(k, int(v)) for k, v in [x.split() for x in open(files[2], 'r').readlines()]]
zipped = zipfile.ZipFile(files[0])
image = {}
for info in tqdm(zipped.infolist(), 'Resizing images', leave=False):
if info.is_dir():
continue
with zipped.open(info) as f:
x = np.array(_image_resize(Image.open(f), size))
image[info.filename] = to_png(x)
np.random.seed(0)
np.random.shuffle(train)
return dict(all=dict(images=[image[k] for k, _ in train + test], labels=np.array([v for _, v in train + test])),
test=dict(images=[image[k] for k, _ in test], labels=np.array([v for _, v in test])),
train=dict(images=[image[k] for k, _ in train], labels=np.array([v for _, v in train])))
def _load_mnist():
image_filename = '{}-images-idx3-ubyte.gz'
label_filename = '{}-labels-idx1-ubyte.gz'
split_files = [('train', 'train'), ('test', 't10k')]
splits = {}
for split, split_file in split_files:
with tempfile.NamedTemporaryFile() as f:
url = URLS['mnist'].format(image_filename.format(split_file))
print(url)
request.urlretrieve(url, f.name)
with gzip.GzipFile(fileobj=f, mode='r') as data:
assert _read32(data) == 2051
n_images = _read32(data)
row = _read32(data)
col = _read32(data)
images = np.frombuffer(data.read(n_images * row * col), dtype=np.uint8)
images = images.reshape((n_images, row, col, 1))
with tempfile.NamedTemporaryFile() as f:
request.urlretrieve(URLS['mnist'].format(label_filename.format(split_file)), f.name)
with gzip.GzipFile(fileobj=f, mode='r') as data:
assert _read32(data) == 2049
n_labels = _read32(data)
labels = np.frombuffer(data.read(n_labels), dtype=np.uint8)
splits[split] = {'images': _encode_png(images), 'labels': labels}
return splits
def _load_mnist32():
image_filename = '{}-images-idx3-ubyte.gz'
label_filename = '{}-labels-idx1-ubyte.gz'
split_files = [('train', 'train'), ('test', 't10k')]
splits = {}
for split, split_file in split_files:
with tempfile.NamedTemporaryFile() as f:
url = URLS['mnist'].format(image_filename.format(split_file))
print(url)
request.urlretrieve(url, f.name)
with gzip.GzipFile(fileobj=f, mode='r') as data:
assert _read32(data) == 2051
n_images = _read32(data)
row = _read32(data)
col = _read32(data)
images = np.frombuffer(data.read(n_images * row * col), dtype=np.uint8)
images = images.reshape((n_images, row, col, 1))
# Pad 2x2 so that it becomes 32x32
images_pad = np.zeros((images.shape[0],
images.shape[1] + 4,
images.shape[2] + 4,
images.shape[3])).astype(np.uint8)
images_pad[:, 2:-2, 2:-2, :] = images
with tempfile.NamedTemporaryFile() as f:
request.urlretrieve(URLS['mnist'].format(label_filename.format(split_file)), f.name)
with gzip.GzipFile(fileobj=f, mode='r') as data:
assert _read32(data) == 2049
n_labels = _read32(data)
labels = np.frombuffer(data.read(n_labels), dtype=np.uint8)
splits[split] = {'images': _encode_png(images_pad), 'labels': labels}
return splits
def _load_mnistm():
with tempfile.NamedTemporaryFile() as f:
request.urlretrieve(URLS['mnistm'], f.name)
tar = tarfile.open(fileobj=f)
splits = {}
for split in ['train', 'test']:
prefix = f'mnist_m/mnist_m_{split}'
img_list = tar.extractfile(f'{prefix}_labels.txt').readlines()
images = []
labels = []
for img_path in tqdm(img_list, f'Loading mnistm {split} images and labels', leave=False):
images.append(np.array(Image.open(tar.extractfile(os.path.join(
prefix, img_path.split()[0].decode('utf-8'))))))
labels.append(int(img_path.split()[1].decode('utf-8')))
images = np.stack(images, axis=0)
splits[split] = {'images': _encode_png(images), 'labels': labels}
return splits
def _load_syndigit():
splits = {}
for split in ['train', 'test']:
filename = 'synth_{}_32x32.mat'.format(split)
if not os.path.exists(filename):
wget.download(URLS['syndigit'].format(split), out=filename)
data_dict = scipy.io.loadmat(filename)
images = np.transpose(data_dict['X'], (3, 0, 1, 2))
labels = data_dict['y'].flatten()
splits[split] = {'images': _encode_png(images), 'labels': labels}
return splits
def _load_usps():
def _hdf5(path, data_key = "data", target_key = "target", flatten = True):
"""
loads data from hdf5:
- hdf5 should have 'train' and 'test' groups
- each group should have 'data' and 'target' dataset or spcify the key
- flatten means to flatten images N * (C * H * W) as N * D array
code from: https://www.kaggle.com/bistaumanga/usps-getting-started?scriptVersionId=3215146&cellId=3
"""
with h5py.File(path, 'r') as hf:
train = hf.get('train')
X_tr = train.get(data_key)[:]
y_tr = train.get(target_key)[:]
test = hf.get('test')
X_te = test.get(data_key)[:]
y_te = test.get(target_key)[:]
if flatten:
X_tr = X_tr.reshape(X_tr.shape[0], reduce(lambda a, b: a * b, X_tr.shape[1:]))
X_te = X_te.reshape(X_te.shape[0], reduce(lambda a, b: a * b, X_te.shape[1:]))
return X_tr, y_tr, X_te, y_te
filename = 'usps.h5'
if not os.path.exists(filename):
wget.download(URLS['usps'], out=filename)
X_tr, y_tr, X_te, y_te = _hdf5(filename)
X_tr = np.concatenate([(255.0 * X_tr).astype(np.uint8).reshape(-1, 16, 16, 1)] * 3, axis=-1)
X_tr = np.stack([np.array(_image_resize(Image.fromarray(x), 32)) for x in X_tr], axis=0)
X_te = np.concatenate([(255.0 * X_te).astype(np.uint8).reshape(-1, 16, 16, 1)] * 3, axis=-1)
X_te = np.stack([np.array(_image_resize(Image.fromarray(x), 32)) for x in X_te], axis=0)
splits = {'train': {'images': _encode_png(X_tr), 'labels': y_tr},
'test': {'images': _encode_png(X_te), 'labels': y_te}}
return splits
def _load_digitfive(domain: str, size: int) -> dict:
assert size == 32
assert domain in 'mnist svhn usps mnistm syndigit'.split()
if domain == 'mnist':
return _load_mnist32()
elif domain == 'svhn':
return _load_svhn()
elif domain == 'usps':
return _load_usps()
elif domain == 'mnistm':
return _load_mnistm()
elif domain == 'syndigit':
return _load_syndigit()
def _load_office31(domain: str, size: int) -> dict:
assert domain in 'amazon dslr webcam'.split()
path = os.path.join(DOWNLOAD_DIR, 'office31_images.tgz')
if not os.path.exists(path):
gdd.download_file_from_google_drive(file_id=URLS['office31']['images'], dest_path=path, overwrite=True)
if b'Quota exceeded' in open(path, 'rb').read(1024):
os.remove(path)
raise FileNotFoundError('Quota exceeded: File office31_images.tgz for Office31 could not be downloaded from'
' Google drive. Try again later.')
data = collections.defaultdict(list)
with tarfile.open(name=path, mode='r:gz') as tar:
for entry in tar.getmembers():
domain_, _, class_, name = entry.name.split('/')
if domain == domain_:
data[class_].append((class_, name, entry))
np.random.seed(0)
train, test = [], []
for class_ in data.keys():
np.random.shuffle(data[class_])
total_num_frames = len(data[class_])
num_train_frames = int(0.8*total_num_frames)
train_frames = data[class_][:num_train_frames]
test_frames = data[class_][num_train_frames:]
assert len(train_frames) + len(test_frames) == total_num_frames
train += train_frames
test += test_frames
train_images, train_labels, train_label_set = [], [], set()
for class_, name, entry in tqdm(train, leave=False, desc='Resizing train images'):
train_images.append(np.array(_image_resize(Image.open(tar.extractfile(entry)), size)))
assert train_images[-1].shape == (size, size, 3)
train_labels.append(class_)
train_label_set.add(class_)
train_label_id = {x: p for p, x in enumerate(sorted(train_label_set))}
test_images, test_labels, test_label_set = [], [], set()
for class_, name, entry in tqdm(test, leave=False, desc='Resizing train images'):
test_images.append(np.array(_image_resize(Image.open(tar.extractfile(entry)), size)))
assert test_images[-1].shape == (size, size, 3)
test_labels.append(class_)
test_label_set.add(class_)
test_label_id = {x: p for p, x in enumerate(sorted(test_label_set))}
return dict(train=dict(images=_encode_png(np.stack(train_images)),
labels=np.array([train_label_id[x] for x in train_labels], 'int32')),
test=dict(images=_encode_png(np.stack(test_images)),
labels=np.array([test_label_id[x] for x in test_labels], 'int32')))
def _load_svhn():
splits = collections.OrderedDict()
for split in ['train', 'test', 'extra']:
with tempfile.NamedTemporaryFile() as f:
request.urlretrieve(URLS['svhn'].format(split), f.name)
data_dict = scipy.io.loadmat(f.name)
dataset = {}
dataset['images'] = np.transpose(data_dict['X'], [3, 0, 1, 2])
dataset['images'] = _encode_png(dataset['images'])
dataset['labels'] = data_dict['y'].reshape((-1))
# SVHN raw data uses labels from 1 to 10; use 0 to 9 instead.
dataset['labels'] %= 10 # Label number 10 is for 0.
splits[split] = dataset
return splits
def _read32(data):
dt = np.dtype(np.uint32).newbyteorder('>')
return np.frombuffer(data.read(4), dtype=dt)[0]
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _save_as_tfrecord(data, filename):
assert len(data['images']) == len(data['labels'])
filename = os.path.join(libml_data.DATA_DIR, filename + '.tfrecord')
print('Saving dataset:', filename)
with tf.io.TFRecordWriter(filename) as writer:
for x in trange(len(data['images']), desc='Building records'):
feat = dict(image=_bytes_feature(data['images'][x]),
label=_int64_feature(data['labels'][x]))
record = tf.train.Example(features=tf.train.Features(feature=feat))
writer.write(record.SerializeToString())
print('Saved:', filename)
def _is_installed(name, checksums):
for subset, checksum in checksums.items():
filename = os.path.join(libml_data.DATA_DIR, '%s-%s.tfrecord' % (name, subset))
if not tf.io.gfile.exists(filename):
return False
return True
def _save_files(files, *args, **kwargs):
del args, kwargs
for folder in frozenset(os.path.dirname(x) for x in files):
tf.io.gfile.makedirs(os.path.join(libml_data.DATA_DIR, folder))
for filename, contents in files.items():
with tf.io.gfile.GFile(os.path.join(libml_data.DATA_DIR, filename), 'w') as f:
f.write(contents)
def _is_installed_folder(name, folder):
return tf.io.gfile.exists(os.path.join(libml_data.DATA_DIR, name, folder))
CONFIGS = {
'cifar10': dict(loader=_load_cifar10, checksums=dict(train=None, test=None)),
'cifar100': dict(loader=_load_cifar100, checksums=dict(train=None, test=None)),
'mnist': dict(loader=_load_mnist, checksums=dict(train=None, test=None)),
'svhn': dict(loader=_load_svhn, checksums=dict(train=None, test=None, extra=None)),
}
CONFIGS.update({
f'domainnet{size}_{domain}': dict(loader=partial(_load_domainnet, domain=domain, size=size),
checksums=dict(train=None, test=None, all=None))
for size, domain in
itertools.product((32, 64, 128, 224), 'clipart infograph painting quickdraw real sketch'.split())
})
CONFIGS.update({
f'office31{size}_{domain}': dict(loader=partial(_load_office31, domain=domain, size=size),
checksums=dict(train=None))
for size, domain in itertools.product((32, 64, 128, 224), 'amazon dslr webcam'.split())
})
CONFIGS.update({
f'digitfive{size}_{domain}': dict(loader=partial(_load_digitfive, domain=domain, size=size),
checksums=dict(train=None))
for size, domain in itertools.product((32,), 'mnist svhn usps mnistm syndigit'.split())
})
def main(argv):
if len(argv[1:]):
subset = set(argv[1:])
else:
subset = set(CONFIGS.keys())
tf.io.gfile.makedirs(libml_data.DATA_DIR)
for name in subset:
assert name in CONFIGS, f'Dataset not recognized {name}'
for name, config in CONFIGS.items():
if name not in subset:
continue
if 'is_installed' in config:
if config['is_installed']():
print('Skipping already installed:', name)
continue
elif _is_installed(name, config['checksums']):
print('Skipping already installed:', name)
continue
print('Preparing', name)
datas = config['loader']()
saver = config.get('saver', _save_as_tfrecord)
for sub_name, data in datas.items():
if sub_name == 'readme':
filename = os.path.join(libml_data.DATA_DIR, '%s-%s.txt' % (name, sub_name))
with tf.io.gfile.GFile(filename, 'w') as f:
f.write(data)
elif sub_name == 'files':
for file_and_data in data:
path = os.path.join(libml_data.DATA_DIR, file_and_data.filename)
with tf.io.gfile.GFile(path, "wb") as f:
f.write(file_and_data.data)
else:
saver(data, '%s-%s' % (name, sub_name))
if __name__ == '__main__':
app.run(main)
| |
from __future__ import absolute_import
from bokeh.io import save
from bokeh.models import (
BoxZoomTool,
ColumnDataSource,
DataRange1d,
PanTool,
Plot,
Range1d,
Rect,
LinearAxis
)
from selenium.webdriver.common.action_chains import ActionChains
from tests.integration.utils import has_no_console_errors, wait_for_canvas_resize
import pytest
pytestmark = pytest.mark.integration
def make_plot(xr=None, yr=None):
if xr is None:
x_range = Range1d(0, 3, bounds=None)
else:
x_range = xr
if yr is None:
y_range = Range1d(0, 3, bounds=None)
else:
y_range = yr
source = ColumnDataSource(dict(x=[1, 2], y=[1, 1]))
# explicitly set plot.id so that the plot can be accessed from Bokeh.index in browser
plot = Plot(id='plot-id', plot_height=400, plot_width=400, x_range=x_range, y_range=y_range, min_border=0)
plot.add_glyph(source, Rect(x='x', y='y', width=0.9, height=0.9))
plot.add_tools(PanTool(), BoxZoomTool())
plot.add_layout(LinearAxis(), 'below')
plot.add_layout(LinearAxis(), 'left')
return plot
def pan_plot(selenium, pan_x=None, pan_y=None):
canvas = selenium.find_element_by_tag_name('canvas')
wait_for_canvas_resize(canvas, selenium)
# Enable the pan tool
pan_buttons = selenium.find_elements_by_css_selector('.bk-button-bar-list[type="pan"] .bk-toolbar-button')
pan_button = pan_buttons[0]
if 'active' not in pan_button.get_attribute('class'):
pan_button.click()
actions = ActionChains(selenium)
actions.move_to_element_with_offset(canvas, 200, 200)
actions.click_and_hold()
actions.move_by_offset(pan_x, pan_y)
actions.release()
actions.perform()
def test_x_range_does_not_pan_left_of_x_min(output_file_url, selenium):
x_range_min = -1
plot = make_plot(xr=Range1d(0, 3, bounds=(x_range_min, None)))
save(plot)
selenium.get(output_file_url)
assert has_no_console_errors(selenium)
# Pan plot and test for new range value
pan_plot(selenium, pan_x=150, pan_y=0)
new_range_start = float(selenium.execute_script("""alert(Bokeh.index['plot-id'].model.x_range.start)"""))
selenium.switch_to_alert().dismiss()
assert round(new_range_start) == x_range_min
def test_x_range_does_not_pan_right_of_x_max(output_file_url, selenium):
x_range_max = 4
plot = make_plot(xr=Range1d(0, 3, bounds=(None, x_range_max)))
save(plot)
selenium.get(output_file_url)
assert has_no_console_errors(selenium)
# Pan plot and test for new range value
pan_plot(selenium, pan_x=-150, pan_y=0)
new_range_end = float(selenium.execute_script("""alert(Bokeh.index['plot-id'].model.x_range.end)"""))
selenium.switch_to_alert().dismiss() # This is not necessary but assists debugging
assert round(new_range_end) == x_range_max
def test_y_range_does_not_pan_below_y_min(output_file_url, selenium):
y_range_min = -1
plot = make_plot(yr=Range1d(0, 3, bounds=(y_range_min, None)))
save(plot)
selenium.get(output_file_url)
assert has_no_console_errors(selenium)
# Pan plot and test for new range value
pan_plot(selenium, pan_x=50, pan_y=-150)
new_range_start = float(selenium.execute_script("""alert(Bokeh.index['plot-id'].model.y_range.start)"""))
selenium.switch_to_alert().dismiss() # This is not necessary but assists debugging
assert round(new_range_start) == y_range_min
def test_y_range_does_not_pan_above_y_max(output_file_url, selenium):
y_range_max = 4
plot = make_plot(yr=Range1d(0, 3, bounds=(None, y_range_max)))
save(plot)
selenium.get(output_file_url)
assert has_no_console_errors(selenium)
# Pan plot and test for new range value
pan_plot(selenium, pan_x=50, pan_y=150)
new_range_end = float(selenium.execute_script("""alert(Bokeh.index['plot-id'].model.y_range.end)"""))
selenium.switch_to_alert().dismiss() # This is not necessary but assists debugging
assert round(new_range_end) == y_range_max
############################
# Test reversed ranges
############################
def test_reversed_x_range_does_not_pan_right_of_x_min(output_file_url, selenium):
x_range_min = -1
plot = make_plot(xr=Range1d(3, 0, bounds=(x_range_min, None)))
save(plot)
selenium.get(output_file_url)
assert has_no_console_errors(selenium)
# Pan plot and test for new range value
pan_plot(selenium, pan_x=-150, pan_y=0)
new_range_start = float(selenium.execute_script("""alert(Bokeh.index['plot-id'].model.x_range.min)"""))
selenium.switch_to_alert().dismiss()
assert round(new_range_start) == x_range_min
def test_reversed_x_range_does_not_pan_left_of_x_max(output_file_url, selenium):
x_range_max = 4
plot = make_plot(xr=Range1d(3, 0, bounds=(None, x_range_max)))
save(plot)
selenium.get(output_file_url)
assert has_no_console_errors(selenium)
# Pan plot and test for new range value
pan_plot(selenium, pan_x=150, pan_y=0)
new_range_end = float(selenium.execute_script("""alert(Bokeh.index['plot-id'].model.x_range.max)"""))
selenium.switch_to_alert().dismiss() # This is not necessary but assists debugging
assert round(new_range_end) == x_range_max
def test_reversed_y_range_does_not_pan_above_y_min(output_file_url, selenium):
y_range_min = -1
plot = make_plot(yr=Range1d(3, 0, bounds=(y_range_min, None)))
save(plot)
selenium.get(output_file_url)
assert has_no_console_errors(selenium)
# Pan plot and test for new range value
pan_plot(selenium, pan_x=50, pan_y=150)
new_range_start = float(selenium.execute_script("""alert(Bokeh.index['plot-id'].model.y_range.min)"""))
selenium.switch_to_alert().dismiss()
assert round(new_range_start) == y_range_min
def test_reversed_y_range_does_not_pan_below_y_max(output_file_url, selenium):
y_range_max = 4
plot = make_plot(yr=Range1d(3, 0, bounds=(None, y_range_max)))
save(plot)
selenium.get(output_file_url)
assert has_no_console_errors(selenium)
# Pan plot and test for new range value
pan_plot(selenium, pan_x=50, pan_y=-150)
new_range_end = float(selenium.execute_script("""alert(Bokeh.index['plot-id'].model.y_range.max)"""))
selenium.switch_to_alert().dismiss()
assert round(new_range_end) == y_range_max
############################
# Test auto bounds
############################
def zoom_plot(selenium):
canvas = selenium.find_element_by_tag_name('canvas')
wait_for_canvas_resize(canvas, selenium)
# Enable the box zoom tool
pan_buttons = selenium.find_elements_by_css_selector('.bk-button-bar-list[type="pan"] .bk-toolbar-button')
zoom_button = pan_buttons[1]
if 'active' not in zoom_button.get_attribute('class'):
zoom_button.click()
actions = ActionChains(selenium)
actions.move_to_element_with_offset(canvas, 30, 30)
actions.click_and_hold()
actions.move_by_offset(200, 200)
actions.release()
actions.perform()
def _assert_autorange_prevents_panning_but_can_zoom(output_file_url, selenium):
selenium.get(output_file_url)
assert has_no_console_errors(selenium)
# Zoom into plot so we can pan around a little
zoom_plot(selenium)
# Now the plot is zoomed in, try a little to the right
pan_plot(selenium, pan_x=-50, pan_y=0)
x_range_start = float(selenium.execute_script("""alert(Bokeh.index['plot-id'].model.x_range.start)"""))
selenium.switch_to_alert().dismiss()
assert x_range_start > 0.5
# Now try panning far to left to check bounds
pan_plot(selenium, pan_x=100, pan_y=0)
x_range_start = float(selenium.execute_script("""alert(Bokeh.index['plot-id'].model.x_range.start)"""))
selenium.switch_to_alert().dismiss()
assert x_range_start > 0.4
assert x_range_start < 0.5
def test_autorange_prevents_panning_but_can_zoom_in_with_datarange1d(output_file_url, selenium):
plot = make_plot(xr=DataRange1d(bounds='auto'), yr=DataRange1d(bounds='auto'))
save(plot)
_assert_autorange_prevents_panning_but_can_zoom(output_file_url, selenium)
def test_autorange_prevents_panning_but_can_zoom_in_with_range1d(output_file_url, selenium):
plot = make_plot(xr=Range1d(0.45, 3, bounds='auto'), yr=DataRange1d(0, 3, bounds='auto'))
save(plot)
_assert_autorange_prevents_panning_but_can_zoom(output_file_url, selenium)
############################
# Test no bounds
############################
#def _assert_no_bounds_allows_unlimited_panning(output_file_url, selenium):
# selenium.get(output_file_url)
#
# pan_plot(selenium, pan_x=-1000, pan_y=2000)
#
# x_range_start = float(selenium.execute_script("""alert(window.get_x_range_start())"""))
# selenium.switch_to_alert().dismiss()
# assert x_range_start > 5
#
# y_range_start = float(selenium.execute_script("""alert(window.get_y_range_start())"""))
# selenium.switch_to_alert().dismiss()
# assert y_range_start > 5
#
#
#def test_no_bounds_allows_unlimited_panning_with_datarange1d(output_file_url, selenium):
# plot = make_plot_with_callback(xr=DataRange1d(bounds=None), yr=DataRange1d(bounds=None))
# save(plot)
# _assert_no_bounds_allows_unlimited_panning(output_file_url, selenium)
#
#
#def test_no_bounds_allows_unlimited_panning_with_range1d(output_file_url, selenium):
# plot = make_plot_with_callback(xr=Range1d(0.45, 3, bounds=None), yr=DataRange1d(0, 3, bounds=None))
# save(plot)
# _assert_no_bounds_allows_unlimited_panning(output_file_url, selenium)
| |
"""
Common utilities for the shell manager.
"""
import json
import logging
import os
import re
import shutil
import string
from os import chmod, listdir, sep, unlink
from os.path import isdir, isfile, join
from shutil import copy2, copytree
from hashlib import md5
from voluptuous import (
All,
ALLOW_EXTRA,
Length,
MultipleInvalid,
Optional,
Range,
Required,
Schema,
)
logger = logging.getLogger(__name__)
# Directories used to store server state.
# Most resources (installed problems and bundles, config, etc.) are stored
# within the SHARED_ROOT directory, which can be located on a network
# filesystem and mounted onto several shell servers to sync state.
# Deployed problem instances, however, are separate to each server
# (although the same problem instance will share its flag/port across servers).
SHARED_ROOT = "/opt/hacksports/shared/"
LOCAL_ROOT = "/opt/hacksports/local/"
PROBLEM_ROOT = join(SHARED_ROOT, "sources")
EXTRA_ROOT = join(SHARED_ROOT, "extra")
STAGING_ROOT = join(SHARED_ROOT, "staging")
BUNDLE_ROOT = join(SHARED_ROOT, "bundles")
DEB_ROOT = join(SHARED_ROOT, "debs")
DEPLOYED_ROOT = join(LOCAL_ROOT, "deployed")
class ConfigDict(dict):
# Neat trick to allow configuration fields to be accessed as attributes
def __getattr__(self, attr):
return self[attr]
def __setattr__(self, attr, value):
self[attr] = value
default_shared_config = ConfigDict(
{
# secret used for deterministic deployment
"deploy_secret": "qwertyuiop",
# the default username for files to be owned by
"default_user": "hacksports",
# the root of the web server running to serve static files
# make sure this is consistent with what config/shell.nginx
# specifies.
"web_root": "/usr/share/nginx/html/",
# the root of the problem directories for the instances
"problem_directory_root": "/problems/",
# "obfuscate" problem directory names
"obfuscate_problem_directories": False,
# list of port ranges that should not be assigned to any instances
# this bans the first ports 0-1024 and 4242 for wetty
"banned_ports": [{"start": 0, "end": 1024}, {"start": 4242, "end": 4242}],
}
)
default_local_config = ConfigDict(
{
# the externally accessible address of this server
"hostname": "127.0.0.1",
# the url of the web server
"web_server": "http://127.0.0.1",
}
)
problem_schema = Schema(
{
Required("author"): All(str, Length(min=1, max=32)),
Required("score"): All(int, Range(min=0)),
Required("name"): All(str, Length(min=1, max=32)),
Required("description"): str,
Required("category"): All(str, Length(min=1, max=32)),
Required("hints"): list,
Required("organization"): All(str, Length(min=1, max=32)),
Required("event"): All(str, Length(min=1, max=32)),
"unique_name": str,
"static_flag": bool,
"walkthrough": All(str, Length(min=1, max=512)),
"version": All(str, Length(min=1, max=8)),
"tags": list,
"pkg_description": All(str, Length(min=1, max=256)),
"pkg_name": All(str, Length(min=1, max=32)),
"pkg_dependencies": list,
"pip_requirements": list,
"pip_python_version": All(str, Length(min=1, max=3)),
},
extra=ALLOW_EXTRA,
)
bundle_schema = Schema(
{
Required("author"): All(str, Length(min=1, max=32)),
Required("name"): All(str, Length(min=1, max=32)),
Required("description"): str,
"dependencies": dict,
}
)
shared_config_schema = Schema(
{
Required("deploy_secret"): str,
Required("default_user"): str,
Required("web_root"): str,
Required("problem_directory_root"): str,
Required("obfuscate_problem_directories"): bool,
Required("banned_ports"): list,
},
extra=False,
)
local_config_schema = Schema(
{
Required("hostname"): str,
Required("web_server"): str,
Required("rate_limit_bypass_key"): str,
Optional("docker_host"): str,
Optional("docker_ca_cert"): str,
Optional("docker_client_cert"): str,
Optional("docker_client_key"): str
},
extra=False,
)
port_range_schema = Schema(
{
Required("start"): All(int, Range(min=0, max=65535)),
Required("end"): All(int, Range(min=0, max=65535)),
}
)
class FatalException(Exception):
pass
def get_attributes(obj):
"""
Returns all attributes of an object, excluding those that start with
an underscore.
Args:
obj: the object
Returns:
A dictionary of attributes
"""
return {
key: getattr(obj, key) if not key.startswith("_") else None for key in dir(obj)
}
def sanitize_name(name):
"""
Sanitizes the given name such that it conforms to unix policy.
Args:
name: the name to sanitize.
Returns:
The sanitized form of name.
"""
if len(name) == 0:
raise Exception("Can not sanitize an empty field.")
sanitized_name = re.sub(r"[^a-z0-9\+-]", "-", name.lower())
if sanitized_name[0] in string.digits:
sanitized_name = "p" + sanitized_name
return sanitized_name
# I will never understand why the shutil functions act the way they do...
def full_copy(source, destination, ignore=None):
if ignore is None:
ignore = []
for f in listdir(source):
if f in ignore:
continue
source_item = join(source, f)
destination_item = join(destination, f)
if isdir(source_item):
if not isdir(destination_item):
copytree(source_item, destination_item)
else:
copy2(source_item, destination_item)
def move(source, destination, clobber=True):
if sep in source:
file_name = source.split(sep)[-1]
else:
file_name = source
new_path = join(destination, file_name)
if clobber and isfile(new_path):
unlink(new_path)
shutil.move(source, destination)
def get_problem_root(problem_name, absolute=False):
"""
Installation location for a given problem.
Args:
problem_name: the problem name.
absolute: should return an absolute path.
Returns:
The tentative installation location.
"""
problem_root = join(PROBLEM_ROOT, sanitize_name(problem_name))
assert problem_root.startswith(sep)
if absolute:
return problem_root
return problem_root[len(sep) :]
def get_problem_root_hashed(problem, absolute=False):
"""
Installation location for a given problem.
Args:
problem: the problem object.
absolute: should return an absolute path.
Returns:
The tentative installation location.
"""
problem_root = join(
PROBLEM_ROOT,
"{}-{}".format(sanitize_name(problem["name"]), get_pid_hash(problem, True)),
)
assert problem_root.startswith(sep)
if absolute:
return problem_root
return problem_root[len(sep) :]
def get_problem(problem_path):
"""
Returns a problem spec from a given problem directory.
Args:
problem_path: path to the root of the problem directory.
Returns:
A problem object.
"""
json_path = join(problem_path, "problem.json")
try:
problem = json.loads(open(json_path, "r").read())
except json.decoder.JSONDecodeError as e:
logger.critical(f"Error reading JSON file {json_path}")
logger.critical(e)
raise FatalException
problem["unique_name"] = "{}-{}".format(
sanitize_name(problem["name"]), get_pid_hash(problem, True)
)
try:
problem_schema(problem)
except MultipleInvalid as e:
logger.critical("Error validating problem object at '%s'!", json_path)
logger.critical(e)
raise FatalException
return problem
def get_bundle_root(bundle_name, absolute=False):
"""
Installation location for a given bundle.
Args:
bundle_name: the bundle name.
absolute: should return an absolute path.
Returns:
The tentative installation location.
"""
bundle_root = join(BUNDLE_ROOT, sanitize_name(bundle_name), "bundle.json")
assert bundle_root.startswith(sep)
if absolute:
return bundle_root
return bundle_root[len(sep) :]
def get_bundle(bundle_path):
"""
Returns a bundle spec from a bundle JSON file.
Args:
bundle_path: path to the bundle JSON file.
Returns:
A bundle object.
"""
bundle = json.loads(open(bundle_path, "r").read())
try:
bundle_schema(bundle)
except MultipleInvalid as e:
logger.critical("Error validating bundle object at '%s'!", bundle_path)
logger.critical(e)
raise FatalException
return bundle
def verify_shared_config(shared_config_object):
"""
Verifies the given shared configuration dict against
the shared_config_schema and the port_range_schema.
Args:
shared_config_object: The configuration options in a dict
Raises:
FatalException: if failed.
"""
try:
shared_config_schema(shared_config_object)
except MultipleInvalid as e:
logger.critical("Error validating shared config file!")
logger.critical(e)
raise FatalException
for port_range in shared_config_object["banned_ports"]:
try:
port_range_schema(port_range)
assert port_range["start"] <= port_range["end"]
except MultipleInvalid as e:
logger.critical("Error validating port range in shared config file!")
logger.critical(e)
raise FatalException
except AssertionError:
logger.critical(
"Invalid port range: (%d -> %d)", port_range["start"], port_range["end"]
)
raise FatalException
def verify_local_config(local_config_object):
"""
Verifies the given local configuration dict against
the local_config_schema.
Args:
local_config_object: The configuration options in a dict
Raises:
FatalException: if failed.
"""
try:
local_config_schema(local_config_object)
except MultipleInvalid as e:
logger.critical("Error validating local config file!")
logger.critical(e)
raise FatalException
def write_configuration_file(path, config_dict):
"""
Writes the options in config_dict to the specified path as JSON.
Args:
path: the path of the output JSON file
config_dict: the configuration dictionary
"""
with open(path, "w") as f:
json_data = json.dumps(
config_dict, sort_keys=True, indent=4, separators=(",", ": ")
)
f.write(json_data)
def get_shared_config():
"""
Returns the shared configuration options from the file in SHARED_ROOT.
"""
shared_config_location = join(SHARED_ROOT, "shared_config.json")
try:
with open(shared_config_location) as f:
config_object = json.loads(f.read())
verify_shared_config(config_object)
config = ConfigDict()
for key, value in config_object.items():
config[key] = value
return config
except PermissionError:
logger.error("You must run shell_manager with sudo.")
raise FatalException
except FileNotFoundError:
write_configuration_file(shared_config_location, default_shared_config)
chmod(shared_config_location, 0o640)
logger.info(
"There was no default configuration. One has been created for you. Please edit it accordingly using the 'shell_manager config' subcommand before deploying any instances."
)
raise FatalException
def get_local_config():
"""
Returns the local configuration options from the file in LOCAL_ROOT.
"""
local_config_location = join(LOCAL_ROOT, "local_config.json")
try:
with open(local_config_location) as f:
config_object = json.loads(f.read())
verify_local_config(config_object)
config = ConfigDict()
for key, value in config_object.items():
config[key] = value
return config
except PermissionError:
logger.error("You must run shell_manager with sudo.")
raise FatalException
except FileNotFoundError:
write_configuration_file(local_config_location, default_local_config)
chmod(local_config_location, 0o640)
logger.info(
"There was no default configuration. One has been created for you. Please edit it accordingly using the 'shell_manager config' subcommand before deploying any instances."
)
raise FatalException
def set_shared_config(config_dict):
"""
Validates and writes the options in config_dict to the shared config file.
Args:
config_dict: the configuration dictionary
"""
verify_shared_config(config_dict)
write_configuration_file(join(SHARED_ROOT, "shared_config.json"), config_dict)
def set_local_config(config_dict):
"""
Validates and writes the options in config_dict to the local config file.
Args:
config_dict: the configuration dictionary
"""
verify_local_config(config_dict)
write_configuration_file(join(LOCAL_ROOT, "local_config.json"), config_dict)
def get_pid_hash(problem, short=False):
"""
Returns a hash of a given problem.
Args:
problem: a valid problem object.
short: shorten the return value (first 7 characters)
Returns:
Hex digest of the MD5 hash
"""
try:
problem_schema(problem)
except MultipleInvalid as e:
logger.critical("Error validating problem object!")
logger.critical(e)
raise FatalException
input = "{}-{}-{}-{}".format(
problem["name"], problem["author"], problem["organization"], problem["event"]
)
output = md5(input.encode("utf-8")).hexdigest()
if short:
return output[:7]
return output
def acquire_lock():
"""Acquire the problem installation/deployment lock."""
lock_file = join(SHARED_ROOT, "deploy.lock")
if isfile(lock_file):
logger.error(
"Another problem installation or deployment appears in progress. If you believe this to be an error, "
"run 'shell_manager clean'"
)
raise FatalException
with open(lock_file, "w") as f:
f.write("1")
logger.debug(f"Obtained lock file ({str(lock_file)})")
def release_lock():
"""Release the problem installation/deployment lock."""
lock_file = join(SHARED_ROOT, "deploy.lock")
if isfile(lock_file):
os.remove(lock_file)
logger.debug(f"Released lock file ({str(lock_file)})")
| |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1
from google.api_core import grpc_helpers_async
from google.api_core import operations_v1
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.asset_v1.types import asset_service
from google.longrunning import operations_pb2 # type: ignore
from google.protobuf import empty_pb2 # type: ignore
from .base import AssetServiceTransport, DEFAULT_CLIENT_INFO
from .grpc import AssetServiceGrpcTransport
class AssetServiceGrpcAsyncIOTransport(AssetServiceTransport):
"""gRPC AsyncIO backend transport for AssetService.
Asset service definition.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(cls,
host: str = 'cloudasset.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs
)
def __init__(self, *,
host: str = 'cloudasset.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsAsyncClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Quick check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsAsyncClient(
self.grpc_channel
)
# Return the client from cache.
return self._operations_client
@property
def export_assets(self) -> Callable[
[asset_service.ExportAssetsRequest],
Awaitable[operations_pb2.Operation]]:
r"""Return a callable for the export assets method over gRPC.
Exports assets with time and resource types to a given Cloud
Storage location/BigQuery table. For Cloud Storage location
destinations, the output format is newline-delimited JSON. Each
line represents a
[google.cloud.asset.v1.Asset][google.cloud.asset.v1.Asset] in
the JSON format; for BigQuery table destinations, the output
table stores the fields in asset proto as columns. This API
implements the
[google.longrunning.Operation][google.longrunning.Operation] API
, which allows you to keep track of the export. We recommend
intervals of at least 2 seconds with exponential retry to poll
the export operation result. For regular-size resource parent,
the export operation usually finishes within 5 minutes.
Returns:
Callable[[~.ExportAssetsRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'export_assets' not in self._stubs:
self._stubs['export_assets'] = self.grpc_channel.unary_unary(
'/google.cloud.asset.v1.AssetService/ExportAssets',
request_serializer=asset_service.ExportAssetsRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs['export_assets']
@property
def list_assets(self) -> Callable[
[asset_service.ListAssetsRequest],
Awaitable[asset_service.ListAssetsResponse]]:
r"""Return a callable for the list assets method over gRPC.
Lists assets with time and resource types and returns
paged results in response.
Returns:
Callable[[~.ListAssetsRequest],
Awaitable[~.ListAssetsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'list_assets' not in self._stubs:
self._stubs['list_assets'] = self.grpc_channel.unary_unary(
'/google.cloud.asset.v1.AssetService/ListAssets',
request_serializer=asset_service.ListAssetsRequest.serialize,
response_deserializer=asset_service.ListAssetsResponse.deserialize,
)
return self._stubs['list_assets']
@property
def batch_get_assets_history(self) -> Callable[
[asset_service.BatchGetAssetsHistoryRequest],
Awaitable[asset_service.BatchGetAssetsHistoryResponse]]:
r"""Return a callable for the batch get assets history method over gRPC.
Batch gets the update history of assets that overlap a time
window. For IAM_POLICY content, this API outputs history when
the asset and its attached IAM POLICY both exist. This can
create gaps in the output history. Otherwise, this API outputs
history with asset in both non-delete or deleted status. If a
specified asset does not exist, this API returns an
INVALID_ARGUMENT error.
Returns:
Callable[[~.BatchGetAssetsHistoryRequest],
Awaitable[~.BatchGetAssetsHistoryResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'batch_get_assets_history' not in self._stubs:
self._stubs['batch_get_assets_history'] = self.grpc_channel.unary_unary(
'/google.cloud.asset.v1.AssetService/BatchGetAssetsHistory',
request_serializer=asset_service.BatchGetAssetsHistoryRequest.serialize,
response_deserializer=asset_service.BatchGetAssetsHistoryResponse.deserialize,
)
return self._stubs['batch_get_assets_history']
@property
def create_feed(self) -> Callable[
[asset_service.CreateFeedRequest],
Awaitable[asset_service.Feed]]:
r"""Return a callable for the create feed method over gRPC.
Creates a feed in a parent
project/folder/organization to listen to its asset
updates.
Returns:
Callable[[~.CreateFeedRequest],
Awaitable[~.Feed]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'create_feed' not in self._stubs:
self._stubs['create_feed'] = self.grpc_channel.unary_unary(
'/google.cloud.asset.v1.AssetService/CreateFeed',
request_serializer=asset_service.CreateFeedRequest.serialize,
response_deserializer=asset_service.Feed.deserialize,
)
return self._stubs['create_feed']
@property
def get_feed(self) -> Callable[
[asset_service.GetFeedRequest],
Awaitable[asset_service.Feed]]:
r"""Return a callable for the get feed method over gRPC.
Gets details about an asset feed.
Returns:
Callable[[~.GetFeedRequest],
Awaitable[~.Feed]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'get_feed' not in self._stubs:
self._stubs['get_feed'] = self.grpc_channel.unary_unary(
'/google.cloud.asset.v1.AssetService/GetFeed',
request_serializer=asset_service.GetFeedRequest.serialize,
response_deserializer=asset_service.Feed.deserialize,
)
return self._stubs['get_feed']
@property
def list_feeds(self) -> Callable[
[asset_service.ListFeedsRequest],
Awaitable[asset_service.ListFeedsResponse]]:
r"""Return a callable for the list feeds method over gRPC.
Lists all asset feeds in a parent
project/folder/organization.
Returns:
Callable[[~.ListFeedsRequest],
Awaitable[~.ListFeedsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'list_feeds' not in self._stubs:
self._stubs['list_feeds'] = self.grpc_channel.unary_unary(
'/google.cloud.asset.v1.AssetService/ListFeeds',
request_serializer=asset_service.ListFeedsRequest.serialize,
response_deserializer=asset_service.ListFeedsResponse.deserialize,
)
return self._stubs['list_feeds']
@property
def update_feed(self) -> Callable[
[asset_service.UpdateFeedRequest],
Awaitable[asset_service.Feed]]:
r"""Return a callable for the update feed method over gRPC.
Updates an asset feed configuration.
Returns:
Callable[[~.UpdateFeedRequest],
Awaitable[~.Feed]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'update_feed' not in self._stubs:
self._stubs['update_feed'] = self.grpc_channel.unary_unary(
'/google.cloud.asset.v1.AssetService/UpdateFeed',
request_serializer=asset_service.UpdateFeedRequest.serialize,
response_deserializer=asset_service.Feed.deserialize,
)
return self._stubs['update_feed']
@property
def delete_feed(self) -> Callable[
[asset_service.DeleteFeedRequest],
Awaitable[empty_pb2.Empty]]:
r"""Return a callable for the delete feed method over gRPC.
Deletes an asset feed.
Returns:
Callable[[~.DeleteFeedRequest],
Awaitable[~.Empty]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'delete_feed' not in self._stubs:
self._stubs['delete_feed'] = self.grpc_channel.unary_unary(
'/google.cloud.asset.v1.AssetService/DeleteFeed',
request_serializer=asset_service.DeleteFeedRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs['delete_feed']
@property
def search_all_resources(self) -> Callable[
[asset_service.SearchAllResourcesRequest],
Awaitable[asset_service.SearchAllResourcesResponse]]:
r"""Return a callable for the search all resources method over gRPC.
Searches all Cloud resources within the specified scope, such as
a project, folder, or organization. The caller must be granted
the ``cloudasset.assets.searchAllResources`` permission on the
desired scope, otherwise the request will be rejected.
Returns:
Callable[[~.SearchAllResourcesRequest],
Awaitable[~.SearchAllResourcesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'search_all_resources' not in self._stubs:
self._stubs['search_all_resources'] = self.grpc_channel.unary_unary(
'/google.cloud.asset.v1.AssetService/SearchAllResources',
request_serializer=asset_service.SearchAllResourcesRequest.serialize,
response_deserializer=asset_service.SearchAllResourcesResponse.deserialize,
)
return self._stubs['search_all_resources']
@property
def search_all_iam_policies(self) -> Callable[
[asset_service.SearchAllIamPoliciesRequest],
Awaitable[asset_service.SearchAllIamPoliciesResponse]]:
r"""Return a callable for the search all iam policies method over gRPC.
Searches all IAM policies within the specified scope, such as a
project, folder, or organization. The caller must be granted the
``cloudasset.assets.searchAllIamPolicies`` permission on the
desired scope, otherwise the request will be rejected.
Returns:
Callable[[~.SearchAllIamPoliciesRequest],
Awaitable[~.SearchAllIamPoliciesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'search_all_iam_policies' not in self._stubs:
self._stubs['search_all_iam_policies'] = self.grpc_channel.unary_unary(
'/google.cloud.asset.v1.AssetService/SearchAllIamPolicies',
request_serializer=asset_service.SearchAllIamPoliciesRequest.serialize,
response_deserializer=asset_service.SearchAllIamPoliciesResponse.deserialize,
)
return self._stubs['search_all_iam_policies']
@property
def analyze_iam_policy(self) -> Callable[
[asset_service.AnalyzeIamPolicyRequest],
Awaitable[asset_service.AnalyzeIamPolicyResponse]]:
r"""Return a callable for the analyze iam policy method over gRPC.
Analyzes IAM policies to answer which identities have
what accesses on which resources.
Returns:
Callable[[~.AnalyzeIamPolicyRequest],
Awaitable[~.AnalyzeIamPolicyResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'analyze_iam_policy' not in self._stubs:
self._stubs['analyze_iam_policy'] = self.grpc_channel.unary_unary(
'/google.cloud.asset.v1.AssetService/AnalyzeIamPolicy',
request_serializer=asset_service.AnalyzeIamPolicyRequest.serialize,
response_deserializer=asset_service.AnalyzeIamPolicyResponse.deserialize,
)
return self._stubs['analyze_iam_policy']
@property
def analyze_iam_policy_longrunning(self) -> Callable[
[asset_service.AnalyzeIamPolicyLongrunningRequest],
Awaitable[operations_pb2.Operation]]:
r"""Return a callable for the analyze iam policy longrunning method over gRPC.
Analyzes IAM policies asynchronously to answer which identities
have what accesses on which resources, and writes the analysis
results to a Google Cloud Storage or a BigQuery destination. For
Cloud Storage destination, the output format is the JSON format
that represents a
[AnalyzeIamPolicyResponse][google.cloud.asset.v1.AnalyzeIamPolicyResponse].
This method implements the
[google.longrunning.Operation][google.longrunning.Operation],
which allows you to track the operation status. We recommend
intervals of at least 2 seconds with exponential backoff retry
to poll the operation result. The metadata contains the request
to help callers to map responses to requests.
Returns:
Callable[[~.AnalyzeIamPolicyLongrunningRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'analyze_iam_policy_longrunning' not in self._stubs:
self._stubs['analyze_iam_policy_longrunning'] = self.grpc_channel.unary_unary(
'/google.cloud.asset.v1.AssetService/AnalyzeIamPolicyLongrunning',
request_serializer=asset_service.AnalyzeIamPolicyLongrunningRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs['analyze_iam_policy_longrunning']
def close(self):
return self.grpc_channel.close()
__all__ = (
'AssetServiceGrpcAsyncIOTransport',
)
| |
""" Cisco_IOS_XR_lpts_lib_cfg
This module contains a collection of YANG definitions
for Cisco IOS\-XR lpts\-lib package configuration.
This module contains definitions
for the following management objects\:
lpts\: lpts configuration commands
Copyright (c) 2013\-2015 by Cisco Systems, Inc.
All rights reserved.
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
from ydk.models.cisco_ios_xr.Cisco_IOS_XR_lpts_pre_ifib_cfg import LptsFlowEnum
from ydk.models.cisco_ios_xr.Cisco_IOS_XR_lpts_pre_ifib_cfg import LptsPreIFibPrecedenceNumberEnum
class Lpts(object):
"""
lpts configuration commands
.. attribute:: ipolicer
Pre IFiB Configuration
**type**\: :py:class:`Ipolicer <ydk.models.cisco_ios_xr.Cisco_IOS_XR_lpts_lib_cfg.Lpts.Ipolicer>`
"""
_prefix = 'lpts-lib-cfg'
_revision = '2015-11-09'
def __init__(self):
self.ipolicer = None
class Ipolicer(object):
"""
Pre IFiB Configuration
.. attribute:: enable
Enabled
**type**\: :py:class:`Empty <ydk.types.Empty>`
**mandatory**\: True
.. attribute:: flows
Table for Flows
**type**\: :py:class:`Flows <ydk.models.cisco_ios_xr.Cisco_IOS_XR_lpts_lib_cfg.Lpts.Ipolicer.Flows>`
.. attribute:: ipv4acls
Table for ACLs
**type**\: :py:class:`Ipv4Acls <ydk.models.cisco_ios_xr.Cisco_IOS_XR_lpts_lib_cfg.Lpts.Ipolicer.Ipv4Acls>`
.. attribute:: _is_presence
Is present if this instance represents presence container else not
**type**\: bool
This class is a :ref:`presence class<presence-class>`
"""
_prefix = 'lpts-pre-ifib-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self._is_presence = True
self.enable = None
self.flows = Lpts.Ipolicer.Flows()
self.flows.parent = self
self.ipv4acls = Lpts.Ipolicer.Ipv4Acls()
self.ipv4acls.parent = self
class Ipv4Acls(object):
"""
Table for ACLs
.. attribute:: ipv4acl
ACL name
**type**\: list of :py:class:`Ipv4Acl <ydk.models.cisco_ios_xr.Cisco_IOS_XR_lpts_lib_cfg.Lpts.Ipolicer.Ipv4Acls.Ipv4Acl>`
"""
_prefix = 'lpts-pre-ifib-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.ipv4acl = YList()
self.ipv4acl.parent = self
self.ipv4acl.name = 'ipv4acl'
class Ipv4Acl(object):
"""
ACL name
.. attribute:: acl_name <key>
ACL name
**type**\: str
**pattern:** [\\w\\\-\\.\:,\_@#%$\\+=\\\|;]+
.. attribute:: acl_rate
pre\-ifib policer rate config commands
**type**\: int
**range:** 0..100000
"""
_prefix = 'lpts-pre-ifib-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.acl_name = None
self.acl_rate = None
@property
def _common_path(self):
if self.acl_name is None:
raise YPYModelError('Key property acl_name is None')
return '/Cisco-IOS-XR-lpts-lib-cfg:lpts/Cisco-IOS-XR-lpts-pre-ifib-cfg:ipolicer/Cisco-IOS-XR-lpts-pre-ifib-cfg:ipv4acls/Cisco-IOS-XR-lpts-pre-ifib-cfg:ipv4acl[Cisco-IOS-XR-lpts-pre-ifib-cfg:acl-name = ' + str(self.acl_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.acl_name is not None:
return True
if self.acl_rate is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_lpts_lib_cfg as meta
return meta._meta_table['Lpts.Ipolicer.Ipv4Acls.Ipv4Acl']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-lpts-lib-cfg:lpts/Cisco-IOS-XR-lpts-pre-ifib-cfg:ipolicer/Cisco-IOS-XR-lpts-pre-ifib-cfg:ipv4acls'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.ipv4acl is not None:
for child_ref in self.ipv4acl:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_lpts_lib_cfg as meta
return meta._meta_table['Lpts.Ipolicer.Ipv4Acls']['meta_info']
class Flows(object):
"""
Table for Flows
.. attribute:: flow
selected flow type
**type**\: list of :py:class:`Flow <ydk.models.cisco_ios_xr.Cisco_IOS_XR_lpts_lib_cfg.Lpts.Ipolicer.Flows.Flow>`
"""
_prefix = 'lpts-pre-ifib-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.flow = YList()
self.flow.parent = self
self.flow.name = 'flow'
class Flow(object):
"""
selected flow type
.. attribute:: flow_type <key>
LPTS Flow Type
**type**\: :py:class:`LptsFlowEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_lpts_pre_ifib_cfg.LptsFlowEnum>`
.. attribute:: precedences
TOS Precedence value(s)
**type**\: :py:class:`Precedences <ydk.models.cisco_ios_xr.Cisco_IOS_XR_lpts_lib_cfg.Lpts.Ipolicer.Flows.Flow.Precedences>`
.. attribute:: rate
Configured rate value
**type**\: int
**range:** \-2147483648..2147483647
"""
_prefix = 'lpts-pre-ifib-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.flow_type = None
self.precedences = Lpts.Ipolicer.Flows.Flow.Precedences()
self.precedences.parent = self
self.rate = None
class Precedences(object):
"""
TOS Precedence value(s)
.. attribute:: precedence
Precedence values
**type**\: one of the below types:
**type**\: list of :py:class:`LptsPreIFibPrecedenceNumberEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_lpts_pre_ifib_cfg.LptsPreIFibPrecedenceNumberEnum>`
----
**type**\: list of int
**range:** 0..7
----
"""
_prefix = 'lpts-pre-ifib-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.precedence = YLeafList()
self.precedence.parent = self
self.precedence.name = 'precedence'
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-lpts-pre-ifib-cfg:precedences'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.precedence is not None:
for child in self.precedence:
if child is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_lpts_lib_cfg as meta
return meta._meta_table['Lpts.Ipolicer.Flows.Flow.Precedences']['meta_info']
@property
def _common_path(self):
if self.flow_type is None:
raise YPYModelError('Key property flow_type is None')
return '/Cisco-IOS-XR-lpts-lib-cfg:lpts/Cisco-IOS-XR-lpts-pre-ifib-cfg:ipolicer/Cisco-IOS-XR-lpts-pre-ifib-cfg:flows/Cisco-IOS-XR-lpts-pre-ifib-cfg:flow[Cisco-IOS-XR-lpts-pre-ifib-cfg:flow-type = ' + str(self.flow_type) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.flow_type is not None:
return True
if self.precedences is not None and self.precedences._has_data():
return True
if self.rate is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_lpts_lib_cfg as meta
return meta._meta_table['Lpts.Ipolicer.Flows.Flow']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-lpts-lib-cfg:lpts/Cisco-IOS-XR-lpts-pre-ifib-cfg:ipolicer/Cisco-IOS-XR-lpts-pre-ifib-cfg:flows'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.flow is not None:
for child_ref in self.flow:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_lpts_lib_cfg as meta
return meta._meta_table['Lpts.Ipolicer.Flows']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-lpts-lib-cfg:lpts/Cisco-IOS-XR-lpts-pre-ifib-cfg:ipolicer'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self._is_presence:
return True
if self.enable is not None:
return True
if self.flows is not None and self.flows._has_data():
return True
if self.ipv4acls is not None and self.ipv4acls._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_lpts_lib_cfg as meta
return meta._meta_table['Lpts.Ipolicer']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-lpts-lib-cfg:lpts'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.ipolicer is not None and self.ipolicer._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_lpts_lib_cfg as meta
return meta._meta_table['Lpts']['meta_info']
| |
# coding=utf-8
import os
import os.path
import sys
import json
import re
import threading
import subprocess
import tempfile
import collections
import platform
import semver
import time
import zipfile
is_python3 = sys.version_info[0] > 2
if is_python3:
import urllib.request as url_req
import urllib.error as url_err
import urllib.parse as url_parse
else:
import urllib
import urllib2
url_req = urllib2
url_err = urllib2
url_parse = urllib2
CHECK_INTERVAL = 60 * 60 * 24
# PACKAGES_URL = 'https://api.github.com/repos/emmetio/pyv8-binaries/downloads'
PACKAGES_URL = 'https://api.github.com/repos/emmetio/pyv8-binaries/contents'
def load(dest_path, delegate=None):
"""
Main function that attempts to load or update PyV8 binary.
First, it loads list of available PyV8 modules and check if
PyV8 should be downloaded or updated.
@param dest_path: Path where PyV8 lib should be downloaded
@param delegate: instance of LoaderDelegate that will receive
loader progress events
@returns: `True` if download progress was initiated
"""
if delegate is None:
delegate = LoaderDelegate()
config = get_loader_config(dest_path)
if 'PyV8' in sys.modules and (config['skip_update'] or time.time() < config['last_update'] + CHECK_INTERVAL):
# No need to load anything: user already has PyV8 binary
# or decided to disable update process
delegate.log('No need to update PyV8')
return False
def on_complete(result, *args, **kwargs):
if result is not None:
# Most recent version was downloaded
config['last_id'] = result
if 'PyV8' not in sys.modules:
# PyV8 is not loaded yet, we can safely unpack it
unpack_pyv8(dest_path)
config['last_update'] = time.time()
save_loader_config(dest_path, config)
delegate.on_complete(*args, **kwargs)
# try to download most recent version of PyV8
# As PyV8 for Sublime Text spreads the world, it's possible
# that multiple distinct PyV8Loader's may start doing the same
# job at the same time. In this case, we should check if there's
# already a thread that load PyV8 and hook on existing thread
# rather that creating a new one
thread = None
thread_exists = False
for t in threading.enumerate():
if hasattr(t, 'is_pyv8_thread'):
print('PyV8: Reusing thread')
thread = t
thread_exists = True
break
if not thread:
print('PyV8: Creating new thread')
thread = PyV8Loader(get_arch(), dest_path, config, delegate=delegate)
thread.start()
delegate.on_start()
# watch on download progress
prog = ThreadProgress(thread, delegate, thread_exists)
prog.on('complete', on_complete if not thread_exists else delegate.on_complete)
prog.on('error', delegate.on_error)
def get_arch():
"Returns architecture name for PyV8 binary"
suffix = is_python3 and '-p3' or ''
p = lambda a: '%s%s' % (a, suffix)
is_64bit = sys.maxsize > 2**32
system_name = platform.system()
if system_name == 'Darwin':
try:
if semver.match(platform.mac_ver()[0], '<10.7.0'):
return p('mac106')
except:
pass
return p('osx')
if system_name == 'Windows':
return p('win64') if is_64bit else p('win32')
if system_name == 'Linux':
return p('linux64') if is_64bit else p('linux32')
def get_loader_config(path):
config = {
"last_id": 0,
"last_update": 0,
"skip_update": False
}
config_path = os.path.join(path, 'config.json')
if os.path.exists(config_path):
with open(config_path) as fd:
for k,v in json.load(fd).items():
config[k] = v
return config
def save_loader_config(path, data):
config_path = os.path.join(path, 'config.json')
if not os.path.exists(path):
os.makedirs(path)
fp = open(config_path, 'w')
fp.write(json.dumps(data))
fp.close()
def clean_old_data():
for f in os.listdir('.'):
if f.lower() != 'config.json' and f.lower() != 'pack.zip':
try:
os.remove(f)
except Exception as e:
pass
def unpack_pyv8(package_dir):
f = os.path.join(package_dir, 'pack.zip')
if not os.path.exists(f):
return
package_zip = zipfile.ZipFile(f, 'r')
root_level_paths = []
last_path = None
for path in package_zip.namelist():
last_path = path
if path.find('/') in [len(path) - 1, -1]:
root_level_paths.append(path)
if path[0] == '/' or path.find('../') != -1 or path.find('..\\') != -1:
raise 'The PyV8 package contains files outside of the package dir and cannot be safely installed.'
if last_path and len(root_level_paths) == 0:
root_level_paths.append(last_path[0:last_path.find('/') + 1])
prev_dir = os.getcwd()
os.chdir(package_dir)
clean_old_data()
# Here we don't use .extractall() since it was having issues on OS X
skip_root_dir = len(root_level_paths) == 1 and \
root_level_paths[0].endswith('/')
extracted_paths = []
for path in package_zip.namelist():
dest = path
if not is_python3:
try:
if not isinstance(dest, unicode):
dest = unicode(dest, 'utf-8', 'strict')
except UnicodeDecodeError:
dest = unicode(dest, 'cp1252', 'replace')
if os.name == 'nt':
regex = ':|\*|\?|"|<|>|\|'
if re.search(regex, dest) != None:
print ('%s: Skipping file from package named %s due to ' +
'an invalid filename') % (__name__, path)
continue
# If there was only a single directory in the package, we remove
# that folder name from the paths as we extract entries
if skip_root_dir:
dest = dest[len(root_level_paths[0]):]
if os.name == 'nt':
dest = dest.replace('/', '\\')
else:
dest = dest.replace('\\', '/')
dest = os.path.join(package_dir, dest)
def add_extracted_dirs(dir):
while dir not in extracted_paths:
extracted_paths.append(dir)
dir = os.path.dirname(dir)
if dir == package_dir:
break
if path.endswith('/'):
if not os.path.exists(dest):
os.makedirs(dest)
add_extracted_dirs(dest)
else:
dest_dir = os.path.dirname(dest)
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
add_extracted_dirs(dest_dir)
extracted_paths.append(dest)
try:
open(dest, 'wb').write(package_zip.read(path))
except (IOError, UnicodeDecodeError):
print ('%s: Skipping file from package named %s due to ' +
'an invalid filename') % (__name__, path)
package_zip.close()
os.chdir(prev_dir)
os.remove(f)
class LoaderDelegate():
"""
Abstract class used to display PyV8 binary download progress,
and provide some settings for downloader
"""
def __init__(self, settings={}):
self.settings = settings
def on_start(self, *args, **kwargs):
"Invoked when download process is initiated"
pass
def on_progress(self, *args, **kwargs):
"Invoked on download progress"
pass
def on_complete(self, *args, **kwargs):
"Invoked when download process was finished successfully"
pass
def on_error(self, *args, **kwargs):
"Invoked when error occured during download process"
pass
def setting(self, name, default=None):
"Returns specified setting name"
return self.settings[name] if name in self.settings else default
def log(self, message):
pass
class ThreadProgress():
def __init__(self, thread, delegate, is_background=False):
self.thread = thread
self.delegate = delegate
self.is_background = is_background
self._callbacks = {}
threading.Timer(0, self.run).start()
def run(self):
if not self.thread.is_alive():
if self.thread.exit_code != 0:
return self.trigger('error', exit_code=self.thread.exit_code, progress=self)
return self.trigger('complete', result=self.thread.result, progress=self)
self.trigger('progress', progress=self)
threading.Timer(0.1, self.run).start()
def on(self, event_name, callback):
if event_name not in self._callbacks:
self._callbacks[event_name] = []
if isinstance(callback, collections.Callable):
self._callbacks[event_name].append(callback)
return self
def trigger(self, event_name, *args, **kwargs):
if event_name in self._callbacks:
for c in self._callbacks[event_name]:
c(*args, **kwargs)
if self.delegate and hasattr(self.delegate, 'on_%s' % event_name):
getattr(self.delegate, 'on_%s' % event_name)(*args, **kwargs)
return self
class BinaryNotFoundError(Exception):
pass
class NonCleanExitError(Exception):
def __init__(self, returncode):
self.returncode = returncode
def __str__(self):
return repr(self.returncode)
class CliDownloader():
def __init__(self, settings):
self.settings = settings
def find_binary(self, name):
for dir in os.environ['PATH'].split(os.pathsep):
path = os.path.join(dir, name)
if os.path.exists(path):
return path
raise BinaryNotFoundError('The binary %s could not be located' % name)
def execute(self, args):
proc = subprocess.Popen(args, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
output = proc.stdout.read()
returncode = proc.wait()
if returncode != 0:
error = NonCleanExitError(returncode)
error.output = output
raise error
return output
class WgetDownloader(CliDownloader):
def __init__(self, settings):
self.settings = settings
self.wget = self.find_binary('wget')
def clean_tmp_file(self):
os.remove(self.tmp_file)
def download(self, url, error_message, timeout, tries):
if not self.wget:
return False
self.tmp_file = tempfile.NamedTemporaryFile().name
command = [self.wget, '--connect-timeout=' + str(int(timeout)), '-o',
self.tmp_file, '-O', '-', '-U', 'Emmet PyV8 Loader',
'--no-check-certificate']
command.append(url)
if self.settings.get('http_proxy'):
os.putenv('http_proxy', self.settings.get('http_proxy'))
if not self.settings.get('https_proxy'):
os.putenv('https_proxy', self.settings.get('http_proxy'))
if self.settings.get('https_proxy'):
os.putenv('https_proxy', self.settings.get('https_proxy'))
while tries > 0:
tries -= 1
try:
result = self.execute(command)
self.clean_tmp_file()
return result
except NonCleanExitError as e:
error_line = ''
with open(self.tmp_file) as f:
for line in list(f):
if re.search('ERROR[: ]|failed: ', line):
error_line = line
break
if e.returncode == 8:
regex = re.compile('^.*ERROR (\d+):.*', re.S)
if re.sub(regex, '\\1', error_line) == '503':
# GitHub and BitBucket seem to rate limit via 503
print('%s: Downloading %s was rate limited, trying again' % (__name__, url))
continue
error_string = 'HTTP error ' + re.sub('^.*? ERROR ', '',
error_line)
elif e.returncode == 4:
error_string = re.sub('^.*?failed: ', '', error_line)
# GitHub and BitBucket seem to time out a lot
if error_string.find('timed out') != -1:
print('%s: Downloading %s timed out, trying again' % (__name__, url))
continue
else:
error_string = re.sub('^.*?(ERROR[: ]|failed: )', '\\1',
error_line)
error_string = re.sub('\\.?\s*\n\s*$', '', error_string)
print('%s: %s %s downloading %s.' % (__name__, error_message,
error_string, url))
self.clean_tmp_file()
break
return False
class CurlDownloader(CliDownloader):
def __init__(self, settings):
self.settings = settings
self.curl = self.find_binary('curl')
def download(self, url, error_message, timeout, tries):
if not self.curl:
return False
command = [self.curl, '-f', '--user-agent', 'Emmet PyV8 Loader',
'--connect-timeout', str(int(timeout)), '-sSL']
command.append(url)
if self.settings.get('http_proxy'):
os.putenv('http_proxy', self.settings.get('http_proxy'))
if not self.settings.get('https_proxy'):
os.putenv('HTTPS_PROXY', self.settings.get('http_proxy'))
if self.settings.get('https_proxy'):
os.putenv('HTTPS_PROXY', self.settings.get('https_proxy'))
while tries > 0:
tries -= 1
try:
return self.execute(command)
except NonCleanExitError as e:
if e.returncode == 22:
code = re.sub('^.*?(\d+)\s*$', '\\1', e.output)
if code == '503':
# GitHub and BitBucket seem to rate limit via 503
print('%s: Downloading %s was rate limited, trying again' % (__name__, url))
continue
error_string = 'HTTP error ' + code
elif e.returncode == 6:
error_string = 'URL error host not found'
elif e.returncode == 28:
# GitHub and BitBucket seem to time out a lot
print('%s: Downloading %s timed out, trying again' % (__name__, url))
continue
else:
error_string = e.output.rstrip()
print('%s: %s %s downloading %s.' % (__name__, error_message, error_string, url))
break
return False
class UrlLib2Downloader():
def __init__(self, settings):
self.settings = settings
def download(self, url, error_message, timeout, tries):
http_proxy = self.settings.get('http_proxy')
https_proxy = self.settings.get('https_proxy')
if http_proxy or https_proxy:
proxies = {}
if http_proxy:
proxies['http'] = http_proxy
if not https_proxy:
proxies['https'] = http_proxy
if https_proxy:
proxies['https'] = https_proxy
proxy_handler = url_req.ProxyHandler(proxies)
else:
proxy_handler = url_req.ProxyHandler()
handlers = [proxy_handler]
# secure_url_match = re.match('^https://([^/]+)', url)
# if secure_url_match != None:
# secure_domain = secure_url_match.group(1)
# bundle_path = self.check_certs(secure_domain, timeout)
# if not bundle_path:
# return False
# handlers.append(VerifiedHTTPSHandler(ca_certs=bundle_path))
url_req.install_opener(url_req.build_opener(*handlers))
while tries > 0:
tries -= 1
try:
request = url_req.Request(url, headers={"User-Agent":
"Emmet PyV8 Loader"})
http_file = url_req.urlopen(request, timeout=timeout)
return http_file.read()
except url_err.HTTPError as e:
# Bitbucket and Github ratelimit using 503 a decent amount
if str(e.code) == '503':
print('%s: Downloading %s was rate limited, trying again' % (__name__, url))
continue
print('%s: %s HTTP error %s downloading %s.' % (__name__, error_message, str(e.code), url))
except url_err.URLError as e:
# Bitbucket and Github timeout a decent amount
if str(e.reason) == 'The read operation timed out' or \
str(e.reason) == 'timed out':
print('%s: Downloading %s timed out, trying again' % (__name__, url))
continue
print('%s: %s URL error %s downloading %s.' % (__name__, error_message, str(e.reason), url))
break
return False
class PyV8Loader(threading.Thread):
def __init__(self, arch, download_path, config, delegate=None):
self.arch = arch
self.config = config
self.download_path = download_path
self.exit_code = 0
self.result = None
self.delegate = delegate or LoaderDelegate()
self.is_pyv8_thread = True
threading.Thread.__init__(self)
self.delegate.log('Creating thread')
def download_url(self, url, error_message):
# TODO add settings
has_ssl = 'ssl' in sys.modules and hasattr(url_req, 'HTTPSHandler')
is_ssl = re.search('^https://', url) != None
if (is_ssl and has_ssl) or not is_ssl:
downloader = UrlLib2Downloader(self.delegate.settings)
else:
for downloader_class in [CurlDownloader, WgetDownloader]:
try:
downloader = downloader_class(self.delegate.settings)
break
except BinaryNotFoundError:
pass
if not downloader:
self.delegate.log('Unable to download PyV8 binary due to invalid downloader')
return False
timeout = self.delegate.settings.get('timeout', 60)
# timeout = 3
return downloader.download(url.replace(' ', '%20'), error_message, timeout, 3)
def run(self):
# get list of available packages first
self.delegate.log('Loading %s' % PACKAGES_URL)
try:
packages = self.download_url(PACKAGES_URL, 'Unable to download packages list.')
except Exception as e:
self.delegate.log('Unable to download file: %s' % e)
self.exit_code = 4
return
if not packages:
self.exit_code = 1
return
if isinstance(packages, bytes):
packages = packages.decode('utf-8')
files = json.loads(packages)
# find package for current architecture
cur_item = None
bundle_name = 'pyv8-%s.zip' % self.arch
for item in files:
if bundle_name == item['name']:
cur_item = item
break
if not cur_item:
self.delegate.log('Unable to find binary for %s architecture' % self.arch)
self.exit_code = 2
return
if cur_item['sha'] == self.config['last_id']:
self.delegate.log('You have the most recent PyV8 binary')
return
url = 'https://raw.github.com/emmetio/pyv8-binaries/master/%s' % cur_item['name']
self.delegate.log('Loading PyV8 binary from %s' % url)
package = self.download_url(url, 'Unable to download package from %s' % url)
if not package:
self.exit_code = 3
return
# we should only save downloaded package and delegate module
# loading/unloading to main thread since improper PyV8 unload
# may cause editor crash
try:
os.makedirs(self.download_path)
except Exception as e:
pass
fp = open(os.path.join(self.download_path, 'pack.zip'), 'wb')
fp.write(package)
fp.close()
self.result = cur_item['sha']
# Done!
| |
# coding: utf-8
"""
======================
Using display.specshow
======================
This notebook gives a more in-depth demonstration of all things that `specshow`
can do to help generate beautiful visualizations of spectro-temporal data.
"""
# Code source: Brian McFee
# License: ISC
# sphinx_gallery_thumbnail_number = 6
# %%
# All of librosa's plotting functions rely on matplotlib.
# To demonstrate everything we can do, it will help to
# import matplotlib's pyplot API here.
import numpy as np
import matplotlib.pyplot as plt
import librosa
import librosa.display
# %%
# First, we'll load in a demo track
y, sr = librosa.load(librosa.ex('trumpet'))
# %%
# The first thing we might want to do is display an ordinary
# (linear) spectrogram.
# We'll do this by first computing the short-time Fourier
# transform, and then mapping the magnitudes to a decibel
# scale.
#
D = librosa.stft(y) # STFT of y
S_db = librosa.amplitude_to_db(np.abs(D), ref=np.max)
# %%
# If you're familiar with matplotlib already, you may know
# that there are two ways of using it: the `pyplot` interface
# and the object-oriented interface.
# Both are supported by librosa, as we'll show here.
#
# First, the pyplot interface:
plt.figure()
librosa.display.specshow(S_db)
plt.colorbar()
# %%
# And now the object-oriented interface
fig, ax = plt.subplots()
img = librosa.display.specshow(S_db, ax=ax)
fig.colorbar(img, ax=ax)
# %%
# Both figures are identical, but they use different programming
# interfaces to construct. Most people find the pyplot interface
# to be quicker to learn, but the object-oriented interface can
# be a little more flexible for complex figures.
#
# For the remainder of this example, we'll use the object-oriented
# interface.
# %%
# Decorating your plot
# --------------------
# The figure above conveys the basic content of the spectrogram,
# but it's missing axis labels. Without that information, it's
# impossible for a reader to know how to interpret the visualization.
#
# specshow provides many helpers to automatically decorate the axes
# of your plot. For the plot above, our x-axis corresponds to time,
# and our y-axis corresponds to linearly spaced frequencies produced
# by the discrete Fourier transform.
# We can tell specshow to decorate the axes accordingly:
fig, ax = plt.subplots()
img = librosa.display.specshow(S_db, x_axis='time', y_axis='linear', ax=ax)
ax.set(title='Now with labeled axes!')
fig.colorbar(img, ax=ax, format="%+2.f dB")
# %%
# This is much better already! Note that we also added a format string
# to the colorbar, so readers know how to read the color scale.
# %%
# Changing axis scales
# --------------------
# The linear frequency scale is sometimes helpful, but often it can
# difficult to read. Alternatively, it is common to use a logarithmic
# frequency axis. This has the benefit that every octave occupies
# a constant vertical extent.
#
# We can tell specshow to use log-scaled frequency axes just as above:
fig, ax = plt.subplots()
img = librosa.display.specshow(S_db, x_axis='time', y_axis='log', ax=ax)
ax.set(title='Using a logarithmic frequency axis')
fig.colorbar(img, ax=ax, format="%+2.f dB")
# %%
# Changing the analysis parameters
# --------------------------------
# The default parameter settings used by librosa (e.g., `sr=22050`, `hop_length=512`,
# etc) may not be appropriate for every signal.
# If you change a parameter from its default value, e.g. when computing an STFT,
# you can pass that same parameter to `specshow`.
# This ensures that axis scales (e.g. time or frequency) are computed correctly.
fig, ax = plt.subplots()
D_highres = librosa.stft(y, hop_length=256, n_fft=4096)
S_db_hr = librosa.amplitude_to_db(np.abs(D_highres), ref=np.max)
img = librosa.display.specshow(S_db_hr, hop_length=256, x_axis='time', y_axis='log',
ax=ax)
ax.set(title='Higher time and frequency resolution')
fig.colorbar(img, ax=ax, format="%+2.f dB")
# %%
# Note that only the parameters which are strictly necessary are supported by
# `specshow`. For example, without the `hop_length`, we wouldn't know how to
# translate frame indices to time indices. However, `n_fft` is *not* needed,
# because it can be inferred from the shape of the input spectrogram.
#
# A full list of the supported parameters is provided in the
# `librosa.display.specshow` documentation.
# %%
# Other types of spectral data
# ----------------------------
# The examples above illustrate how to plot linear spectrograms,
# but librosa provides many kinds of spectral representations:
# Mel-scaled, constant-Q, variable-Q, chromagrams, tempograms, etc.
#
# specshow can plot these just as well. For example, a Mel spectrogram
# can be displayed as follows:
fig, ax = plt.subplots()
M = librosa.feature.melspectrogram(y=y, sr=sr)
M_db = librosa.power_to_db(M, ref=np.max)
img = librosa.display.specshow(M_db, y_axis='mel', x_axis='time', ax=ax)
ax.set(title='Mel spectrogram display')
fig.colorbar(img, ax=ax, format="%+2.f dB")
# %%
# Constant-Q plots, and other logarithmically scaled frequency representations
# such as Variable-Q or `iirt` can be decorated using either the frequencies (Hz)
# or their note names in scientific pitch notation:
C = librosa.cqt(y=y, sr=sr)
C_db = librosa.amplitude_to_db(np.abs(C), ref=np.max)
fig, ax = plt.subplots()
librosa.display.specshow(C_db, y_axis='cqt_hz', x_axis='time', ax=ax)
ax.set(title='Frequency (Hz) axis decoration')
fig, ax = plt.subplots()
librosa.display.specshow(C_db, y_axis='cqt_note', x_axis='time', ax=ax)
ax.set(title='Pitch axis decoration')
# %%
# In the latter case, the underlying data representation is still measured in
# Hz; only the tick labels are changed.
# %%
# Chroma representations don't have a fixed frequency axis, and instead aggregate
# information across all frequencies corresponding to a given pitch class.
# specshow can plot these too:
chroma = librosa.feature.chroma_cqt(y=y, sr=sr)
fig, ax = plt.subplots()
img = librosa.display.specshow(chroma, y_axis='chroma', x_axis='time', ax=ax)
ax.set(title='Chromagram demonstration')
fig.colorbar(img, ax=ax)
# %%
# If you also happen to know the key of the piece being analyzed, you can
# pass this to specshow and it will spell the notes properly:
fig, ax = plt.subplots()
img = librosa.display.specshow(chroma, y_axis='chroma', x_axis='time',
key='Eb:maj', ax=ax)
ax.set(title='Chromagram explicitly in Eb:maj')
fig.colorbar(img, ax=ax)
# %%
# This will also work for 'cqt_note' mode.
# %%
# Indian notation systems
# -----------------------
# The examples above use Western music notation to identify pitch classes, but we can
# also decorate axes with either Hindustani or Carnatic svara classes.
#
# These are specified by using `y_axis='chroma_h'` or `'chroma_c'`, respectively.
#
# Just as with key identification in the chroma example above, you can specify the
# thaat (Hindustani) or melakarta number or name (Carnatic) to notate the plot.
# %%
# For example, the example above is in Eb:maj (or, more accurately, F:dorian),
# which we can also represent in Hindustani notation as Sa=5 (F) and 'kafi' thaat:
fig, ax = plt.subplots()
img = librosa.display.specshow(chroma, y_axis='chroma_h', x_axis='time',
Sa=5, thaat='kafi', ax=ax)
ax.set(title='Chromagram with Hindustani notation')
fig.colorbar(img, ax=ax)
# %%
# In Carnatic notation, we would use melakarta #22.
# Note: `thaat` is optional for Hindustani notation, but `mela` is required for
# Carnatic.
fig, ax = plt.subplots()
img = librosa.display.specshow(chroma, y_axis='chroma_c', x_axis='time',
Sa=5, mela=22, ax=ax)
ax.set(title='Chromagram with Carnatic notation')
fig.colorbar(img, ax=ax)
# %%
# These notation schemes can also be used in cqt plots by specifying
# `y_axis='cqt_svara'`.
#
# In this mode, `Sa` must be specified in Hz. Carnatic notation is used
# if `mela` is provided, and Hindustani is used if not.
#
# Individual svara are only notated if the display range is sufficiently small,
# so we'll zoom into a single octave for this example.
Sa = librosa.note_to_hz('F4')
fig, ax = plt.subplots()
librosa.display.specshow(C_db, y_axis='cqt_svara', Sa=Sa, x_axis='time', ax=ax)
ax.set(title='Hindustani decoration',
ylim=[Sa, 2*Sa])
fig, ax = plt.subplots()
librosa.display.specshow(C_db, y_axis='cqt_svara', Sa=Sa, mela=22, x_axis='time', ax=ax)
ax.set(title='Carnatic decoration',
ylim=[Sa, 2*Sa])
# %%
# Non-spectral data
# -----------------
# specshow can also be used for data that isn't exactly spectro-temporal.
# One common application is recurrence (self-similarity) plots, which
# are time-by-time, as illustrated below.
R = librosa.segment.recurrence_matrix(chroma, mode='affinity')
fig, ax = plt.subplots()
img = librosa.display.specshow(R, y_axis='time', x_axis='time', ax=ax)
ax.set(title='Recurrence / self-similarity')
fig.colorbar(img, ax=ax)
# %%
# In this example, notice that we used 'time' for both axis labels.
# In general, any of the supported modes can be used for either axis.
# For example, we could also plot the chroma covariance plot with
# chroma decorations on each axis:
ccov = np.cov(chroma)
fig, ax = plt.subplots()
img = librosa.display.specshow(ccov, y_axis='chroma', x_axis='chroma',
key='Eb:maj', ax=ax)
ax.set(title='Chroma covariance')
fig.colorbar(img, ax=ax)
# %%
# Certain plots (e.g. covariance, self-similarity) are automatically
# squared by `specshow`. To override that, pass `auto_scale=False`.
# %%
# Color maps
# ----------
# You may have noticed that the color mappings for the images above
# were selected automatically by `specshow`.
# This is done by `librosa.display.cmap` according to the following heuristic:
#
# - If the data is boolean, use black-and-white
# - If the data is (mostly) positive or (mostly) negative, use a sequential
# colormap
# - If the data contains both positive and negative values, use a diverging
# colormap.
#
# The default sequential colormap is 'magma', which is perceptually uniform and
# converts gracefully to grayscale.
#
# You can always override this automatic colormap selection by setting an
# explicit `cmap`:
fig, ax = plt.subplots()
img = librosa.display.specshow(S_db, cmap='gray_r', y_axis='log', x_axis='time', ax=ax)
ax.set(title='Inverted grayscale')
fig.colorbar(img, ax=ax, format="%+2.f dB")
# %%
# `specshow` uses `matplotlib.pyplot.pcolormesh` to generate the underlying image.
# Any parameters to `pcolormesh` can be passed through from `specshow`, for example,
# to set explicit bounds on the minimum and maximum ranges for colors.
# This can be helpful when centering divergent colormaps around 0 (or some other
# reference point).
max_var = np.max(np.abs(ccov))
fig, ax = plt.subplots()
img = librosa.display.specshow(ccov, vmin=-max_var, vmax=max_var,
y_axis='chroma', x_axis='chroma',
key='Eb:maj', ax=ax)
ax.set(title='Chroma covariance')
fig.colorbar(img, ax=ax)
# %%
# Multiple plots
# --------------
# Often, we'll want to show multiple synchronized features simultaneously.
# This can be done using matplotlib's `subplot` mechanism and sharing axes.
# There are many examples of this throughout the librosa documentation, but
# here we'll go through it step by step.
# Construct a subplot grid with 3 rows and 1 column, sharing the x-axis)
fig, ax = plt.subplots(nrows=3, ncols=1, sharex=True)
# On the first subplot, show the original spectrogram
img1 = librosa.display.specshow(S_db, x_axis='time', y_axis='log', ax=ax[0])
ax[0].set(title='STFT (log scale)')
# On the second subplot, show the mel spectrogram
img2 = librosa.display.specshow(M_db, x_axis='time', y_axis='mel', ax=ax[1])
ax[1].set(title='Mel')
# On the third subplot, show the chroma features
img3 = librosa.display.specshow(chroma, x_axis='time', y_axis='chroma',
key='Eb:maj', ax=ax[2])
ax[2].set(title='Chroma')
# To eliminate redundant axis labels, we'll use "label_outer" on all subplots:
for ax_i in ax:
ax_i.label_outer()
# And we can share colorbars:
fig.colorbar(img1, ax=[ax[0], ax[1]])
# Or have individual colorbars:
fig.colorbar(img3, ax=[ax[2]])
# We can then even do fancy things like zoom into a particular time and frequency
# region. Since the axes are shared, this will apply to all three subplots at once.
ax[0].set(xlim=[1, 3]) # Zoom to seconds 1-3
# %%
# Non-uniform axes
# ----------------
# All of the examples so far have used either uniformly, linearly, or geometrically
# spaced axes. But sometimes, we have non-uniform sampling of data, and we'd like
# to plot it in natural coordinates.
#
# One example of this is when using beat-synchronous features in the common case
# where the tempo is not exactly fixed. To demonstrate this, we'll use a longer
# example clip.
#
# To specify non-uniform axis sampling, you will need to provide the `x_coords`
# (or `y_coords`) array indicating the position of each sample, as demonstrated
# below.
y, sr = librosa.load(librosa.ex('nutcracker'))
chroma = librosa.feature.chroma_cqt(y=y, sr=sr)
tempo, beats = librosa.beat.beat_track(y=y, sr=sr)
# beats contains the frame indices of each detected beat
# for synchronization and visualization, we'll need to expand this
# to cover the limits of the data. This can be done as follows:
beats = librosa.util.fix_frames(beats, x_min=0, x_max=chroma.shape[1])
# Now beat-synchronize the chroma features
chroma_sync = librosa.util.sync(chroma, beats, aggregate=np.median)
# For visualization, we can convert to time (in seconds)
beat_times = librosa.frames_to_time(beats)
# We'll plot the synchronized and unsynchronized features next
# to each other
fig, ax = plt.subplots(nrows=2, sharex=True)
img = librosa.display.specshow(chroma, y_axis='chroma', x_axis='time', ax=ax[0],
key='Eb:maj')
ax[0].set(title='Uniform time sampling')
ax[0].label_outer()
librosa.display.specshow(chroma_sync, y_axis='chroma', x_axis='time',
x_coords=beat_times, ax=ax[1], key='Eb:maj')
ax[1].set(title='Beat-synchronous sampling')
fig.colorbar(img, ax=ax)
# For clarity, we'll zoom in on a 15-second patch
ax[1].set(xlim=[10, 25])
# %%
# Conclusion
# ----------
# This series of examples demonstrates most of the functionality of
# `librosa.display.specshow`, but it does not exhaustively show
# every option, e.g., for axis decoration.
# Interested readers should look through the rest of the API
# documentation to see how these other options can be used
# effectively.
| |
""" Unit tests for the txOauth2 module. """
import time
from uuid import uuid4
try:
from urlparse import urlparse, parse_qs
except ImportError:
# noinspection PyUnresolvedReferences
from urllib.parse import urlparse, parse_qs
try:
# noinspection PyProtectedMember
from base64 import encodebytes as _encodeBase64
except ImportError:
# noinspection PyProtectedMember
from base64 import encodestring as _encodeBase64
from twisted.trial.unittest import TestCase
from twisted.internet.defer import succeed, inlineCallbacks, returnValue
from twisted.web import server
from twisted.web.test.test_web import DummyRequest
from txoauth2 import GrantTypes
from txoauth2.token import TokenFactory, UserPasswordManager, PersistentStorage
from txoauth2.clients import ClientStorage, PasswordClient
class classProperty(object): # pylint: disable=invalid-name
""" @property for class variables. """
def __init__(self, func):
self.func = classmethod(func)
def __get__(self, *args):
# noinspection PyUnresolvedReferences
return self.func.__get__(*args)()
class TwistedTestCase(TestCase):
""" An abstract base class for the test cases. """
longMessage = True
@classProperty
def __test__(self):
# pylint: disable=no-member
return not (self.__name__.startswith('Abstract') or self.__name__ == 'TwistedTestCase')
class MockRequest(DummyRequest):
""" A request that can be used for testing. """
def __init__(self, method, url, arguments=None, headers=None, isSecure=True):
url = ensureByteString(url)
method = ensureByteString(method)
parsedUrl = urlparse(url)
super(MockRequest, self).__init__(parsedUrl.path.split(b'/'))
self.uri = url
self.user = b''
self.password = b''
self._isSecure = isSecure
self.method = method
if headers is not None:
for key, value in headers.items():
self.requestHeaders.addRawHeader(key, value)
if arguments is not None:
for key, value in arguments.items():
self.addArg(key, value)
for key, value in parse_qs(parsedUrl.query).items():
self.addArg(key, value)
def addArg(self, name, value):
"""
Add an argument to the request.
:param name: The name of the argument
:param value: The value of the argument.
"""
name = ensureByteString(name)
if isinstance(value, list):
for val in value:
self.addArg(name, val)
elif name in self.args:
self.args[name].append(ensureByteString(value))
else:
super(MockRequest, self).addArg(name, ensureByteString(value))
def addAuthorization(self, username, password, authType='Basic'):
"""
Add authorization to the request.
:param username: The username.
:param password: The password.
:param authType: The type of authorization.
"""
self.user = ensureByteString(username)
self.password = ensureByteString(password)
# pylint: disable=deprecated-method
self.setRequestHeader(b'Authorization', authType.encode('utf-8') + b' ' +
_encodeBase64(self.user + b':' + self.password))
def getUser(self):
"""
:return: The user authenticated by the request or None.
"""
return self.user
def getPassword(self):
"""
:return: The password transmitted with the request or None.
"""
return self.password
def getResponse(self):
"""
:return: The data that has been written to the request as a response.
"""
return b''.join(self.written)
def prePathURL(self):
"""
:return: The pre path url of the request.
"""
transport = b'https' if self.isSecure() else b'http'
return transport + b'://server.com/' + self.uri
def isSecure(self):
"""
:return: Whether the request is made over a secure transport.
"""
return self._isSecure
def getResponseHeader(self, name):
"""
:param name: The name of the response header.
:return: The value of the response header.
"""
return self.responseHeaders.getRawHeaders(name.lower(), [None])[0]
def setRequestHeader(self, name, value):
"""
:param name: The name of the header.
:param value: The value of the header.
"""
return self.requestHeaders.addRawHeader(name, value)
class MockSite(server.Site):
""" A site that can be used for testing. """
def makeRequest(self, request):
"""
Execute a request for this site.
:param request: The request to execute.
:return: The result of the request.
"""
resource = self.getResourceFor(request)
return self._render(resource, request)
@inlineCallbacks
def makeSynchronousRequest(self, request):
"""
Make a synchronous request to the site.
:param request: The request.
:return: The result of the request.
"""
result = yield self.makeRequest(request)
returnValue(result)
@staticmethod
def _render(resource, request):
"""
Execute the rendering of a request.
:param resource: The resource to render.
:param request: The request.
:return: The result.
"""
result = resource.render(request)
if isinstance(result, bytes):
request.write(result)
request.finish()
return succeed(None)
elif result is server.NOT_DONE_YET:
if request.finished:
return succeed(result)
return request.notifyFinish()
raise ValueError("Unexpected return value: {result!r}".format(result=result))
class TestTokenFactory(TokenFactory):
""" A token factory that can be used for tests. """
_tokens = []
_requestedTokens = []
_testCase = None
def generateToken(self, lifetime, client, scope, additionalData=None):
if len(self._tokens) == 0:
token = str(uuid4())
self._requestedTokens.append((token, lifetime, client, scope, additionalData))
else:
token, expectedLifetime, expectedClient, expectedScope, expectedAdditionalData,\
validScope = self._tokens.pop(0)
self._validateParameter(token, lifetime, expectedLifetime, client, expectedClient,
scope, expectedScope, additionalData, expectedAdditionalData)
if not validScope:
raise ValueError('scope')
return token
def expectTokenRequest(self, token, lifetime, client, scope,
additionalData=None, validScope=True):
"""
Enqueue a token and its expected parameters.
The token is returned by the generateToken method after it has checked
that it was called with the same parameters that are supplied to this call.
Tokens are used in the order they are expected.
:param token: The token that should get returned from the expected generateToken call.
:param lifetime: The lifetime that should get passed to the generateToken function.
:param client: The client that should get passed to the generateToken function.
:param scope: The scope that should get passed to the generateToken function.
:param additionalData: The additional data that should get
passed to the generateToken function.
:param validScope: If the scope should be treated as invalid.
"""
self._tokens.append((token, lifetime, client, scope, additionalData, validScope))
def expectedTokenRequest(self, lifetime, client, scope, additionalData=None):
"""
Ensure that the generateToken method was called with the expected parameters.
:param lifetime: The lifetime that should have been passed to the generateToken function.
:param client: The client that should have been passed to the generateToken function.
:param scope: The scope that should have been passed to the generateToken function.
:param additionalData: The additional data that should have been
passed to the generateToken function.
:return: The token that was returned from the generateToken call.
"""
token, actualLifetime, actualClient, actualScope, actualAdditionalData\
= self._requestedTokens.pop(0)
self._validateParameter(token, actualLifetime, lifetime, actualClient, client, actualScope,
scope, actualAdditionalData, additionalData)
return token
def assertAllTokensRequested(self):
""" Assert that all expected tokens have been requested from the token factory. """
self._testCase.assertTrue(
len(self._tokens) == 0,
msg='Not all expected tokens have been requested from the token factory: '
'{tokens}'.format(tokens=', '.join(data[0] for data in self._tokens)))
self._testCase.assertTrue(
len(self._requestedTokens) == 0,
msg='More tokens have been requested from the token factory than expected: '
'{tokens}'.format(tokens=', '.join(data[0] for data in self._tokens)))
def reset(self, testCase):
"""
Reset the token factory.
:param testCase: The current test case.
"""
self._tokens = []
self._requestedTokens = []
self._testCase = testCase
def _validateParameter(self, token, lifetime, expectedLifetime, client, expectedClient, scope,
expectedScope, additionalData, expectedAdditionalData):
""" Validate that the actual parameters to generateToken match the expected. """
self._testCase.assertEquals(
lifetime, expectedLifetime,
msg='generateToken was called with a different the lifetime than '
'expected for the requested token {token}'.format(token=token))
assertClientEquals(self._testCase, client, expectedClient,
message='generateToken was called with a different client than '
'expected for the requested token {token}'.format(token=token))
self._testCase.assertListEqual(
scope, expectedScope,
msg='generateToken was called with a different scope than '
'expected for the requested token {token}'.format(token=token))
self._testCase.assertEquals(
additionalData, expectedAdditionalData,
msg='generateToken was called with different additional data than '
'expected for the requested token {token}'.format(token=token))
class TestClientStorage(ClientStorage):
""" A client storage that can be used for tests. """
_clients = {}
def addClient(self, client):
"""
Add a new client to the storage.
:param client: The new client.
"""
self._clients[client.id] = client
def getClient(self, clientId):
return self._clients[clientId]
class TestPasswordManager(UserPasswordManager):
""" A password manager that can be used for tests. """
_passwords = {}
INVALID_PASSWORD = object()
def authenticate(self, username, password):
psw = self._passwords.pop(username, None)
if psw is None:
raise AssertionError(b'Got an authenticate request for an unexpected user ' + username)
if psw is not self.INVALID_PASSWORD and psw != password:
raise AssertionError(b'Got an authenticate request for ' + username + b' with an '
b'invalid password: Expected ' + psw + b', got ' + password)
return psw == password
def expectAuthenticateRequest(self, username, password):
"""
Enqueue an expected authentication request.
:param username: The expected username.
:param password: The expected password.
"""
self._passwords[username] = password
def allPasswordsChecked(self):
"""
:return: Whether or not all expected passwords have been checked via authenticate.
"""
passwordsLeft = len(self._passwords)
self._passwords.clear()
return passwordsLeft == 0
class TestPersistentStorage(PersistentStorage):
""" A persistent storage that can be used in tests. """
_data = {}
_expireTime = {}
def pop(self, key):
del self._expireTime[key]
return self._data.pop(key)
def getExpireTime(self, key):
"""
:param key: The data key.
:return: The expireTime of the data.
"""
return self._expireTime[key]
def put(self, key, data, expireTime=None):
self._expireTime[key] = None if expireTime is None else expireTime - int(time.time())
self._data[key] = dict(data)
def getTestPasswordClient(clientId=None, authorizedGrantTypes=None):
"""
:param clientId: The client id or None for a random client id.
:param authorizedGrantTypes: The grant types the clients will be authorized to use,
None for all.
:return: A dummy password client that can be used in the tests.
"""
if clientId is None:
clientId = str(uuid4())
if authorizedGrantTypes is None:
# noinspection PyTypeChecker
authorizedGrantTypes = list(GrantTypes)
return PasswordClient(
clientId, ['https://return.nonexistent'], authorizedGrantTypes, secret='ClientSecret')
def assertClientEquals(testCase, client, expectedClient, message):
"""
Assert that the client equals the expected client.
:param testCase: The current test case.
:param client: The client to compare.
:param expectedClient: The client to compare the first client against.
:param message: The assertion message.
"""
if message.endswith('.'):
message = message[:-1]
for name, value in expectedClient.__dict__.items():
testCase.assertTrue(hasattr(client, name),
msg=message + ': Missing attribute "{name}"'.format(name=name))
testCase.assertEquals(
value, getattr(client, name),
msg=message + ': Attribute "{name}" differs from expected value'.format(name=name))
def ensureByteString(string):
"""
:param string: A string.
:return: The string as a byte string.
"""
return string if isinstance(string, bytes) else string.encode('utf-8')
| |
import socket
from os import mkdir
from os.path import join, exists
from sys import platform
from asyncio import sleep
from math import sqrt
from uuid import uuid4
from enum import Enum
from csv import DictReader
from cyrandom import choice, shuffle, uniform
from time import time
from pickle import dump as pickle_dump, load as pickle_load, HIGHEST_PROTOCOL
from geopy import Point
from geopy.distance import distance
from aiopogo import utilities as pgoapi_utils
from pogeo import get_distance
from . import bounds, sanitized as conf
IPHONES = {'iPhone5,1': 'N41AP',
'iPhone5,2': 'N42AP',
'iPhone5,3': 'N48AP',
'iPhone5,4': 'N49AP',
'iPhone6,1': 'N51AP',
'iPhone6,2': 'N53AP',
'iPhone7,1': 'N56AP',
'iPhone7,2': 'N61AP',
'iPhone8,1': 'N71AP',
'iPhone8,2': 'N66AP',
'iPhone8,4': 'N69AP',
'iPhone9,1': 'D10AP',
'iPhone9,2': 'D11AP',
'iPhone9,3': 'D101AP',
'iPhone9,4': 'D111AP'}
class Units(Enum):
miles = 1
kilometers = 2
meters = 3
def best_factors(n):
return next(((i, n//i) for i in range(int(n**0.5), 0, -1) if n % i == 0))
def percentage_split(seq, percentages):
percentages[-1] += 1.0 - sum(percentages)
prv = 0
size = len(seq)
cum_percentage = 0
for p in percentages:
cum_percentage += p
nxt = int(cum_percentage * size)
yield seq[prv:nxt]
prv = nxt
def get_start_coords(worker_no, grid=conf.GRID, bounds=bounds):
"""Returns center of square for given worker"""
per_column = int((grid[0] * grid[1]) / grid[0])
column = worker_no % per_column
row = int(worker_no / per_column)
part_lat = (bounds.south - bounds.north) / grid[0]
part_lon = (bounds.east - bounds.west) / grid[1]
start_lat = bounds.north + part_lat * row + part_lat / 2
start_lon = bounds.west + part_lon * column + part_lon / 2
return start_lat, start_lon
def float_range(start, end, step):
"""range for floats, also capable of iterating backwards"""
if start > end:
while end <= start:
yield start
start += -step
else:
while start <= end:
yield start
start += step
def get_gains(dist=70):
"""Returns lat and lon gain
Gain is space between circles.
"""
start = Point(*bounds.center)
base = dist * sqrt(3)
height = base * sqrt(3) / 2
dis_a = distance(meters=base)
dis_h = distance(meters=height)
lon_gain = dis_a.destination(point=start, bearing=90).longitude
lat_gain = dis_h.destination(point=start, bearing=0).latitude
return abs(start.latitude - lat_gain), abs(start.longitude - lon_gain)
def round_coords(point, precision, _round=round):
return _round(point[0], precision), _round(point[1], precision)
def get_bootstrap_points(bounds):
coords = []
if bounds.multi:
for b in bounds.polygons:
coords.extend(get_bootstrap_points(b))
return coords
lat_gain, lon_gain = get_gains(conf.BOOTSTRAP_RADIUS)
west, east = bounds.west, bounds.east
bound = bool(bounds)
for map_row, lat in enumerate(
float_range(bounds.south, bounds.north, lat_gain)
):
row_start_lon = west
if map_row % 2 != 0:
row_start_lon -= 0.5 * lon_gain
for lon in float_range(row_start_lon, east, lon_gain):
point = lat, lon
if not bound or point in bounds:
coords.append(point)
shuffle(coords)
return coords
def get_device_info(account):
device_info = {'brand': 'Apple',
'device': 'iPhone',
'manufacturer': 'Apple'}
try:
if account['iOS'].startswith('1'):
device_info['product'] = 'iOS'
else:
device_info['product'] = 'iPhone OS'
device_info['hardware'] = account['model'] + '\x00'
device_info['model'] = IPHONES[account['model']] + '\x00'
except (KeyError, AttributeError):
account = generate_device_info(account)
return get_device_info(account)
device_info['version'] = account['iOS']
device_info['device_id'] = account['id']
return device_info
def generate_device_info(account):
ios8 = ('8.0', '8.0.1', '8.0.2', '8.1', '8.1.1', '8.1.2', '8.1.3', '8.2', '8.3', '8.4', '8.4.1')
ios9 = ('9.0', '9.0.1', '9.0.2', '9.1', '9.2', '9.2.1', '9.3', '9.3.1', '9.3.2', '9.3.3', '9.3.4', '9.3.5')
# 10.0 was only for iPhone 7 and 7 Plus, and is rare
ios10 = ('10.0.1', '10.0.2', '10.0.3', '10.1', '10.1.1', '10.2', '10.2.1', '10.3', '10.3.1', '10.3.2', '10.3.3')
devices = tuple(IPHONES.keys())
account['model'] = choice(devices)
account['id'] = uuid4().hex
if account['model'] in ('iPhone9,1', 'iPhone9,2',
'iPhone9,3', 'iPhone9,4'):
account['iOS'] = choice(ios10)
elif account['model'] in ('iPhone8,1', 'iPhone8,2'):
account['iOS'] = choice(ios9 + ios10)
elif account['model'] == 'iPhone8,4':
# iPhone SE started on 9.3
account['iOS'] = choice(('9.3', '9.3.1', '9.3.2', '9.3.3', '9.3.4', '9.3.5') + ios10)
else:
account['iOS'] = choice(ios8 + ios9 + ios10)
return account
def create_account_dict(account):
if isinstance(account, (tuple, list)):
length = len(account)
else:
raise TypeError('Account must be a tuple or list.')
if length not in (1, 3, 4, 6):
raise ValueError('Each account should have either 3 (account info only) or 6 values (account and device info).')
if length in (1, 4) and (not conf.PASS or not conf.PROVIDER):
raise ValueError('No default PASS or PROVIDER are set.')
entry = {}
entry['username'] = account[0]
if length == 1 or length == 4:
entry['password'], entry['provider'] = conf.PASS, conf.PROVIDER
else:
entry['password'], entry['provider'] = account[1:3]
if length == 4 or length == 6:
entry['model'], entry['iOS'], entry['id'] = account[-3:]
else:
entry = generate_device_info(entry)
entry['time'] = 0
entry['captcha'] = False
entry['banned'] = False
return entry
def accounts_from_config(pickled_accounts=None):
accounts = {}
for account in conf.ACCOUNTS:
username = account[0]
if pickled_accounts and username in pickled_accounts:
accounts[username] = pickled_accounts[username]
if len(account) == 3 or len(account) == 6:
accounts[username]['password'] = account[1]
accounts[username]['provider'] = account[2]
else:
accounts[username] = create_account_dict(account)
return accounts
def accounts_from_csv(new_accounts, pickled_accounts):
accounts = {}
for username, account in new_accounts.items():
if pickled_accounts:
pickled_account = pickled_accounts.get(username)
if pickled_account:
if pickled_account['password'] != account['password']:
del pickled_account['password']
account.update(pickled_account)
accounts[username] = account
continue
account['provider'] = account.get('provider') or 'ptc'
if not all(account.get(x) for x in ('model', 'iOS', 'id')):
account = generate_device_info(account)
account['time'] = 0
account['captcha'] = False
account['banned'] = False
accounts[username] = account
return accounts
def get_current_hour(now=None, _time=time):
now = now or _time()
return round(now - (now % 3600))
def time_until_time(seconds, seen=None, _time=time):
current_seconds = seen or _time() % 3600
if current_seconds > seconds:
return seconds + 3600 - current_seconds
elif current_seconds + 3600 < seconds:
return seconds - 3600 - current_seconds
else:
return seconds - current_seconds
def get_address():
if conf.MANAGER_ADDRESS:
return conf.MANAGER_ADDRESS
if platform == 'win32':
return r'\\.\pipe\monocle'
if hasattr(socket, 'AF_UNIX'):
return join(conf.DIRECTORY, 'monocle.sock')
return ('127.0.0.1', 5001)
def load_pickle(name, raise_exception=False):
location = join(conf.DIRECTORY, 'pickles', '{}.pickle'.format(name))
try:
with open(location, 'rb') as f:
return pickle_load(f)
except (FileNotFoundError, EOFError):
if raise_exception:
raise FileNotFoundError
else:
return None
def dump_pickle(name, var):
folder = join(conf.DIRECTORY, 'pickles')
try:
mkdir(folder)
except FileExistsError:
pass
except Exception as e:
raise OSError("Failed to create 'pickles' folder, please create it manually") from e
location = join(folder, '{}.pickle'.format(name))
with open(location, 'wb') as f:
pickle_dump(var, f, HIGHEST_PROTOCOL)
def load_accounts():
pickled_accounts = load_pickle('accounts')
if conf.ACCOUNTS_CSV:
accounts = load_accounts_csv()
if pickled_accounts and set(pickled_accounts) == set(accounts):
return pickled_accounts
else:
accounts = accounts_from_csv(accounts, pickled_accounts)
elif conf.ACCOUNTS:
if pickled_accounts and set(pickled_accounts) == set(acc[0] for acc in conf.ACCOUNTS):
return pickled_accounts
else:
accounts = accounts_from_config(pickled_accounts)
else:
raise ValueError('Must provide accounts in a CSV or your config file.')
dump_pickle('accounts', accounts)
return accounts
def load_accounts_csv():
csv_location = join(conf.DIRECTORY, conf.ACCOUNTS_CSV)
with open(csv_location, 'rt') as f:
accounts = {}
reader = DictReader(f)
for row in reader:
accounts[row['username']] = dict(row)
return accounts
def randomize_point(point, amount=0.0003, randomize=uniform):
'''Randomize point, by up to ~47 meters by default.'''
lat, lon = point
return (
randomize(lat - amount, lat + amount),
randomize(lon - amount, lon + amount)
)
def calc_pokemon_level(cp_multiplier):
if cp_multiplier < 0.734:
pokemon_level = (58.35178527 * cp_multiplier * cp_multiplier - 2.838007664 * cp_multiplier + 0.8539209906)
else:
pokemon_level = 171.0112688 * cp_multiplier - 95.20425243
pokemon_level = int((round(pokemon_level) * 2) / 2)
return pokemon_level
| |
'''@file test_reconstruction.py
this file will test the reconstruction on its own by computing
loss on test set'''
import os
from six.moves import configparser
import tensorflow as tf
import numpy as np
from nabu.neuralnetworks.classifiers.asr import asr_factory
from nabu.neuralnetworks import ops
from nabu.processing import feature_reader
tf.app.flags.DEFINE_string('expdir', 'expdir', 'The experiments directory')
FLAGS = tf.app.flags.FLAGS
def main(_):
'''does everything for testing of pure reconstruction with the simple
loss function'''
#read the database config file
parsed_database_cfg = configparser.ConfigParser()
parsed_database_cfg.read(os.path.join(FLAGS.expdir, 'database.cfg'))
database_cfg = dict(parsed_database_cfg.items('database'))
#read the features config file
parsed_feat_cfg = configparser.ConfigParser()
parsed_feat_cfg.read(os.path.join(FLAGS.expdir, 'model', 'features.cfg'))
feat_cfg = dict(parsed_feat_cfg.items('features'))
#read the asr config file
parsed_nnet_cfg = configparser.ConfigParser()
parsed_nnet_cfg.read(os.path.join(FLAGS.expdir, 'model', 'asr.cfg'))
nnet_cfg = dict(parsed_nnet_cfg.items('asr'))
# read the trainer config file
parsed_trainer_cfg = configparser.ConfigParser()
parsed_trainer_cfg.read(os.path.join(FLAGS.expdir, 'trainer.cfg'))
trainer_cfg = dict(parsed_trainer_cfg.items('trainer'))
# check on what features the reconstruction is made
if 'reconstruction_features' in trainer_cfg:
if trainer_cfg['reconstruction_features'] == 'audio_samples':
audio_used = True
else:
audio_used = False
else:
raise Exception('no reconstruction features specified, something wrong')
#read the quantization config file if necessary
if audio_used:
parsed_quant_cfg = configparser.ConfigParser()
parsed_quant_cfg.read(os.path.join(FLAGS.expdir,
'model', 'quantization.cfg'))
quant_cfg = dict(parsed_quant_cfg.items('features'))
#create a feature reader
featdir = os.path.join(database_cfg['test_dir'], feat_cfg['name'])
with open(os.path.join(featdir, 'maxlength'), 'r') as fid:
max_length_feat = int(fid.read())
feat_reader = feature_reader.FeatureReader(
scpfile=os.path.join(featdir, 'feats.scp'),
cmvnfile=os.path.join(featdir, 'cmvn.scp'),
utt2spkfile=os.path.join(featdir, 'utt2spk'),
max_length=max_length_feat)
#create an audio sample reader if necessary
if audio_used:
audiodir = os.path.join(database_cfg['test_dir'], quant_cfg['name'])
with open(os.path.join(audiodir, 'maxlength'), 'r') as fid:
max_length_audio = int(fid.read())
audio_reader = feature_reader.FeatureReader(
scpfile=os.path.join(audiodir, 'feats.scp'),
cmvnfile=None,
utt2spkfile=None,
max_length=max_length_audio)
#check number of test examples
number_examples = feat_reader.num_utt
# set a batch_size to determine how many test examples are
# processed in each steps
# this doesn't really matter, only for memory issues
# take the same one as used in training
batch_size = int(trainer_cfg['batch_size'])
#create a ndarray of all of the features
_, features, _ = feat_reader.get_utt()
features = features.reshape(1, -1, features.shape[1])
features_lengths = features.shape[1]*np.ones([1], dtype=np.int32)
features = np.concatenate([features,
np.zeros([features.shape[0],
max_length_feat-features.shape[1],
features.shape[2]])], 1)
looped = False
while not looped:
_, temp, looped = feat_reader.get_utt()
temp = temp.reshape(1, -1, temp.shape[1])
features_lengths = np.concatenate(
[features_lengths, temp.shape[1]*np.ones([1], dtype=np.int32)], 0)
temp = np.concatenate(
[temp, np.zeros([temp.shape[0],
max_length_feat-temp.shape[1], temp.shape[2]])], 1)
features = np.concatenate([features, temp], 0)
#create a ndarray of all of the targets
if audio_used:
_, audio, _ = audio_reader.get_utt()
audio = audio.reshape(1, -1, audio.shape[1])
audio_lengths = audio.shape[1]*np.ones([1], dtype=np.int32)
audio = np.concatenate(
[audio, np.zeros([audio.shape[0],
max_length_audio-audio.shape[1],
audio.shape[2]])], 1)
looped = False
while not looped:
_, temp, looped = audio_reader.get_utt()
temp = temp.reshape(1, -1, temp.shape[1])
audio_lengths = np.concatenate([audio_lengths,
temp.shape[1]*np.ones([1], \
dtype=np.int32)], 0)
temp = np.concatenate(
[temp, np.zeros([temp.shape[0],
max_length_audio-temp.shape[1],
temp.shape[2]])], 1)
audio = np.concatenate([audio, temp], 0)
# store dimensions
max_audio_length = audio.shape[1]
else:
audio = np.zeros([number_examples, 1, 1])
max_audio_length = 1
audio_lengths = np.ones([number_examples])
# store dimensions
max_feature_length = features.shape[1]
feature_dim = features.shape[2]
#create a graph
graph = tf.Graph()
with graph.as_default():
#create the classifier
if audio_used:
outputdim = int(quant_cfg['quant_levels'])
else:
outputdim = feature_dim
classifier = asr_factory.factory(
conf=nnet_cfg,
output_dim=(1, outputdim))
# create placeholders for reconstruction and features
features_ph = tf.placeholder(
tf.float32,
shape=[batch_size, max_feature_length, feature_dim],
name='features')
audio_ph = tf.placeholder(
tf.int32,
shape=[batch_size, max_audio_length, 1],
name='audio')
audio_lengths_ph = tf.placeholder(
tf.int32, shape=[batch_size], name='audio_lenght')
feature_lengths_ph = tf.placeholder(
tf.int32, shape=[batch_size], name='feat_lenght')
# decide what to give as targets
if audio_used:
rec_ph = audio_ph
rec_l_ph = audio_lengths_ph
else:
rec_ph = features_ph
rec_l_ph = audio_lengths_ph
#create the logits for reconstructed audio samples
logits, logits_lengths = classifier(
inputs=features_ph,
input_seq_length=feature_lengths_ph,
targets=(None, rec_ph),
target_seq_length=(None, rec_l_ph),
is_training=False)
#compute the loss score
score = compute_loss((None, rec_ph), logits, logits_lengths,
(None, rec_l_ph), audio_used)
saver = tf.train.Saver(tf.trainable_variables())
config = tf.ConfigProto()
config.gpu_options.allow_growth = True #pylint: disable=E1101
config.allow_soft_placement = True
with tf.Session(graph=graph, config=config) as sess:
#create a saver and load the model
saver.restore(sess, os.path.join(FLAGS.expdir, 'model', 'network.ckpt'))
all_processed = False
number_batch = 0
total_elements = 0
avrg_loss = 0.0
total_steps = int(np.ceil(number_examples/batch_size))
# process the loss on the test set batch by batch
while not all_processed:
# put a part of the features and audio samples in a batch
start = number_batch*batch_size
end = (number_batch+1)*batch_size
if end >= number_examples:
end = number_examples
all_processed = True
part_features = features[start:end, :, :]
part_features_lengths = features_lengths[start:end]
part_audio = audio[start:end, :, :]
part_audio_lengths = audio_lengths[start:end]
# pad with zeros if the last batch isn't completely filled
if all_processed:
elements_last_batch = end-start
to_add = batch_size - elements_last_batch
part_features = np.concatenate(
[part_features,
np.zeros([to_add, max_feature_length, feature_dim])], 0)
part_features_lengths = np.concatenate(
[part_features_lengths,
np.zeros([to_add], dtype=np.int32)], 0)
part_audio = np.concatenate(
[part_audio,
np.zeros([to_add, max_audio_length, 1], dtype=np.int32)],
0)
part_audio_lengths = np.concatenate(
[part_audio_lengths,
np.zeros([to_add], dtype=np.int32)], 0)
# number of elements in the current batch
numel = end-start
# compute loss on this batch
loss = sess.run(
score,
feed_dict={features_ph:part_features,
audio_ph:part_audio,
feature_lengths_ph:part_features_lengths,
audio_lengths_ph: part_audio_lengths})
# update the average loss with the result of loss on current batch
avrg_loss = ((total_elements*avrg_loss + numel*loss)/
(numel + total_elements))
total_elements += numel
number_batch = number_batch+1
# print some info about how we're proceeding
print 'Computing loss on test set: step %d of %d' \
%(number_batch, total_steps)
#test for correctness
if not total_elements == number_examples:
raise Exception(
'something went wrong in loop where test loss is calculated')
# print eventual result
print '========================================'
print 'The loss on the test set: %f' % avrg_loss
print '========================================'
def compute_loss(targets, logits, logit_seq_length,
target_seq_length, audio_used):
'''
Compute the loss
Creates the operation to compute the loss of the reconstruction that is
being done
Args:
targets: a tupple of targets, the first one being a
[batch_size, max_target_length] tensor containing the real
targets, the second one being a [batch_size, max_audioseq_length]
tensor containing the audio samples or other extra information.
logits: a tuple of [batch_size, max_logit_length, dim] tensors
containing the logits for the text and the audio samples
logit_seq_length: the length of all the logit sequences as a tuple of
[batch_size] vectors
target_seq_length: the length of all the target sequences as a
tupple of two [batch_size] vectors, both for one of the elements
in the targets tupple
audio_used: a boolean that tells wether dealing with reconstruction
based on audio samples or based on the input features
Returns:
a scalar value containing the loss
'''
if audio_used:
# extract audio targets
audio_targets = targets[1]
# cast them to integers
audio_targets = tf.cast(audio_targets, tf.int32)
# extract the audio logits
audio_logits = logits[1]
# extract lenghts
logit_lengths = logit_seq_length[1]
target_lengths = target_seq_length[1]
# compute the cross entropy
loss = ops.cross_entropy_integers_logits(audio_targets, audio_logits,
logit_lengths, target_lengths)
else:
#extract targets and approximation and length
targets = targets[1]
approx = logits[1]
lenghts = target_seq_length[1]
# compute the mean squared error
loss = ops.mse(targets, approx, lenghts)
return loss
if __name__ == '__main__':
tf.app.run()
| |
import os
import robot
from robot.errors import DataError
from selenium import webdriver
from Selenium2Library import webdrivermonkeypatches
from Selenium2Library.utils import BrowserCache
from Selenium2Library.locators import WindowManager
from keywordgroup import KeywordGroup
ROOT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
FIREFOX_PROFILE_DIR = os.path.join(ROOT_DIR, 'resources', 'firefoxprofile')
BROWSER_NAMES = {'ff': "_make_ff",
'firefox': "_make_ff",
'ie': "_make_ie",
'internetexplorer': "_make_ie",
'googlechrome': "_make_chrome",
'gc': "_make_chrome",
'chrome': "_make_chrome",
'opera' : "_make_opera",
'phantomjs' : "_make_phantomjs",
'htmlunit' : "_make_htmlunit",
'htmlunitwithjs' : "_make_htmlunitwithjs",
'android': "_make_android",
'iphone': "_make_iphone",
'safari': "_make_safari"
}
class _BrowserManagementKeywords(KeywordGroup):
def __init__(self):
self._cache = BrowserCache()
self._window_manager = WindowManager()
self._speed_in_secs = float(0)
self._timeout_in_secs = float(5)
self._implicit_wait_in_secs = float(0)
# Public, open and close
def close_all_browsers(self):
"""Closes all open browsers and resets the browser cache.
After this keyword new indexes returned from `Open Browser` keyword
are reset to 1.
This keyword should be used in test or suite teardown to make sure
all browsers are closed.
"""
self._debug('Closing all browsers')
self._cache.close_all()
def close_browser(self):
"""Closes the current browser."""
if self._cache.current:
self._debug('Closing browser with session id %s'
% self._cache.current.session_id)
self._cache.close()
def open_browser(self, url, browser='firefox', alias=None,remote_url=False,
desired_capabilities=None,ff_profile_dir=None):
"""Opens a new browser instance to given URL.
Returns the index of this browser instance which can be used later to
switch back to it. Index starts from 1 and is reset back to it when
`Close All Browsers` keyword is used. See `Switch Browser` for
example.
Optional alias is an alias for the browser instance and it can be used
for switching between browsers (just as index can be used). See `Switch
Browser` for more details.
Possible values for `browser` are as follows:
| firefox | FireFox |
| ff | FireFox |
| internetexplorer | Internet Explorer |
| ie | Internet Explorer |
| googlechrome | Google Chrome |
| gc | Google Chrome |
| chrome | Google Chrome |
| opera | Opera |
| phantomjs | PhantomJS |
| htmlunit | HTMLUnit |
| htmlunitwithjs | HTMLUnit with Javascipt support |
| android | Android |
| iphone | Iphone |
| safari | Safari |
Note, that you will encounter strange behavior, if you open
multiple Internet Explorer browser instances. That is also why
`Switch Browser` only works with one IE browser at most.
For more information see:
http://selenium-grid.seleniumhq.org/faq.html#i_get_some_strange_errors_when_i_run_multiple_internet_explorer_instances_on_the_same_machine
Optional 'remote_url' is the url for a remote selenium server for example
http://127.0.0.1/wd/hub. If you specify a value for remote you can
also specify 'desired_capabilities' which is a string in the form
key1:val1,key2:val2 that will be used to specify desired_capabilities
to the remote server. This is useful for doing things like specify a
proxy server for internet explorer or for specify browser and os if your
using saucelabs.com. 'desired_capabilities' can also be a dictonary
(created with 'Create Dictionary') to allow for more complex configurations.
Optional 'ff_profile_dir' is the path to the firefox profile dir if you
wish to overwrite the default.
"""
if remote_url:
self._info("Opening browser '%s' to base url '%s' through remote server at '%s'"
% (browser, url, remote_url))
else:
self._info("Opening browser '%s' to base url '%s'" % (browser, url))
browser_name = browser
browser = self._make_browser(browser_name,desired_capabilities,ff_profile_dir,remote_url)
browser.get(url)
self._debug('Opened browser with session id %s'
% browser.session_id)
return self._cache.register(browser, alias)
def create_webdriver(self, driver_name, alias=None, kwargs={}, **init_kwargs):
"""Creates an instance of a WebDriver.
Like `Open Browser`, but allows passing arguments to a WebDriver's
__init__. _Open Browser_ is preferred over _Create Webdriver_ when
feasible.
Returns the index of this browser instance which can be used later to
switch back to it. Index starts from 1 and is reset back to it when
`Close All Browsers` keyword is used. See `Switch Browser` for
example.
`driver_name` must be the exact name of a WebDriver in
_selenium.webdriver_ to use. WebDriver names include: Firefox, Chrome,
Ie, Opera, Safari, PhantomJS, and Remote.
Use keyword arguments to specify the arguments you want to pass to
the WebDriver's __init__. The values of the arguments are not
processed in any way before being passed on. For Robot Framework
< 2.8, which does not support keyword arguments, create a keyword
dictionary and pass it in as argument `kwargs`. See the
[http://selenium.googlecode.com/git/docs/api/py/api.html|Selenium API Documentation]
for information about argument names and appropriate argument values.
Examples:
| # use proxy for Firefox | | | |
| ${proxy}= | Evaluate | sys.modules['selenium.webdriver'].Proxy() | sys, selenium.webdriver |
| ${proxy.http_proxy}= | Set Variable | localhost:8888 | |
| Create Webdriver | Firefox | proxy=${proxy} | |
| # use a proxy for PhantomJS | | | |
| ${service args}= | Create List | --proxy=192.168.132.104:8888 | |
| Create Webdriver | PhantomJS | service_args=${service args} | |
Example for Robot Framework < 2.8:
| # debug IE driver | | | | | |
| ${kwargs}= | Create Dictionary | log_level | DEBUG | log_file | %{HOMEPATH}${/}ie.log |
| Create Webdriver | Ie | kwargs=${kwargs} | | | |
"""
if not isinstance(kwargs, dict):
raise RuntimeError("kwargs must be a dictionary.")
for arg_name in kwargs:
if arg_name in init_kwargs:
raise RuntimeError("Got multiple values for argument '%s'." % arg_name)
init_kwargs[arg_name] = kwargs[arg_name]
driver_name = driver_name.strip()
try:
creation_func = getattr(webdriver, driver_name)
except AttributeError:
raise RuntimeError("'%s' is not a valid WebDriver name" % driver_name)
self._info("Creating an instance of the %s WebDriver" % driver_name)
driver = creation_func(**init_kwargs)
self._debug("Created %s WebDriver instance with session id %s" % (driver_name, driver.session_id))
return self._cache.register(driver, alias)
def switch_browser(self, index_or_alias):
"""Switches between active browsers using index or alias.
Index is returned from `Open Browser` and alias can be given to it.
Example:
| Open Browser | http://google.com | ff |
| Location Should Be | http://google.com | |
| Open Browser | http://yahoo.com | ie | 2nd conn |
| Location Should Be | http://yahoo.com | |
| Switch Browser | 1 | # index |
| Page Should Contain | I'm feeling lucky | |
| Switch Browser | 2nd conn | # alias |
| Page Should Contain | More Yahoo! | |
| Close All Browsers | | |
Above example expects that there was no other open browsers when
opening the first one because it used index '1' when switching to it
later. If you aren't sure about that you can store the index into
a variable as below.
| ${id} = | Open Browser | http://google.com | *firefox |
| # Do something ... |
| Switch Browser | ${id} | | |
"""
try:
self._cache.switch(index_or_alias)
self._debug('Switched to browser with Selenium session id %s'
% self._cache.current.session_id)
except (RuntimeError, DataError): # RF 2.6 uses RE, earlier DE
raise RuntimeError("No browser with index or alias '%s' found."
% index_or_alias)
# Public, window management
def close_window(self):
"""Closes currently opened pop-up window."""
self._current_browser().close()
def get_window_identifiers(self):
"""Returns and logs id attributes of all windows known to the browser."""
return self._log_list(self._window_manager.get_window_ids(self._current_browser()))
def get_window_names(self):
"""Returns and logs names of all windows known to the browser."""
values = self._window_manager.get_window_names(self._current_browser())
# for backward compatibility, since Selenium 1 would always
# return this constant value for the main window
if len(values) and values[0] == 'undefined':
values[0] = 'selenium_main_app_window'
return self._log_list(values)
def get_window_titles(self):
"""Returns and logs titles of all windows known to the browser."""
return self._log_list(self._window_manager.get_window_titles(self._current_browser()))
def maximize_browser_window(self):
"""Maximizes current browser window."""
self._current_browser().maximize_window()
def get_window_size(self):
"""Returns current window size as `width` then `height`.
Example:
| ${width} | ${height}= | Get Window Size |
"""
size = self._current_browser().get_window_size()
return size['width'], size['height']
def set_window_size(self, width, height):
"""Sets the `width` and `height` of the current window to the specified values.
Example:
| Set Window Size | ${800} | ${600} |
| ${width} | ${height}= | Get Window Size |
| Should Be Equal | ${width} | ${800} |
| Should Be Equal | ${height} | ${600} |
"""
return self._current_browser().set_window_size(width, height)
def select_frame(self, locator):
"""Sets frame identified by `locator` as current frame.
Key attributes for frames are `id` and `name.` See `introduction` for
details about locating elements.
"""
self._info("Selecting frame '%s'." % locator)
element = self._element_find(locator, True, True)
self._current_browser().switch_to_frame(element)
def select_window(self, locator=None):
"""Selects the window found with `locator` as the context of actions.
If the window is found, all subsequent commands use that window, until
this keyword is used again. If the window is not found, this keyword fails.
By default, when a locator value is provided,
it is matched against the title of the window and the
javascript name of the window. If multiple windows with
same identifier are found, the first one is selected.
Special locator `main` (default) can be used to select the main window.
It is also possible to specify the approach Selenium2Library should take
to find a window by specifying a locator strategy:
| *Strategy* | *Example* | *Description* |
| title | Select Window `|` title=My Document | Matches by window title |
| name | Select Window `|` name=${name} | Matches by window javascript name |
| url | Select Window `|` url=http://google.com | Matches by window's current URL |
Example:
| Click Link | popup_link | # opens new window |
| Select Window | popupName |
| Title Should Be | Popup Title |
| Select Window | | | # Chooses the main window again |
"""
self._window_manager.select(self._current_browser(), locator)
def unselect_frame(self):
"""Sets the top frame as the current frame."""
self._current_browser().switch_to_default_content()
# Public, browser/current page properties
def get_location(self):
"""Returns the current location."""
return self._current_browser().get_current_url()
def get_source(self):
"""Returns the entire html source of the current page or frame."""
return self._current_browser().get_page_source()
def get_title(self):
"""Returns title of current page."""
return self._current_browser().get_title()
def location_should_be(self, url):
"""Verifies that current URL is exactly `url`."""
actual = self.get_location()
if actual != url:
raise AssertionError("Location should have been '%s' but was '%s'"
% (url, actual))
self._info("Current location is '%s'." % url)
def location_should_contain(self, expected):
"""Verifies that current URL contains `expected`."""
actual = self.get_location()
if not expected in actual:
raise AssertionError("Location should have contained '%s' "
"but it was '%s'." % (expected, actual))
self._info("Current location contains '%s'." % expected)
def log_location(self):
"""Logs and returns the current location."""
url = self.get_location()
self._info(url)
return url
def log_source(self, loglevel='INFO'):
"""Logs and returns the entire html source of the current page or frame.
The `loglevel` argument defines the used log level. Valid log levels are
`WARN`, `INFO` (default), `DEBUG`, `TRACE` and `NONE` (no logging).
"""
source = self.get_source()
self._log(source, loglevel.upper())
return source
def log_title(self):
"""Logs and returns the title of current page."""
title = self.get_title()
self._info(title)
return title
def title_should_be(self, title):
"""Verifies that current page title equals `title`."""
actual = self.get_title()
if actual != title:
raise AssertionError("Title should have been '%s' but was '%s'"
% (title, actual))
self._info("Page title is '%s'." % title)
# Public, navigation
def go_back(self):
"""Simulates the user clicking the "back" button on their browser."""
self._current_browser().back()
def go_to(self, url):
"""Navigates the active browser instance to the provided URL."""
self._info("Opening url '%s'" % url)
self._current_browser().get(url)
def reload_page(self):
"""Simulates user reloading page."""
self._current_browser().refresh()
# Public, execution properties
def get_selenium_speed(self):
"""Gets the delay in seconds that is waited after each Selenium command.
See `Set Selenium Speed` for an explanation."""
return robot.utils.secs_to_timestr(self._speed_in_secs)
def get_selenium_timeout(self):
"""Gets the timeout in seconds that is used by various keywords.
See `Set Selenium Timeout` for an explanation."""
return robot.utils.secs_to_timestr(self._timeout_in_secs)
def get_selenium_implicit_wait(self):
"""Gets the wait in seconds that is waited by Selenium.
See `Set Selenium Implicit Wait` for an explanation."""
return robot.utils.secs_to_timestr(self._implicit_wait_in_secs)
def set_selenium_speed(self, seconds):
"""Sets the delay in seconds that is waited after each Selenium command.
This is useful mainly in slowing down the test execution to be able to
view the execution. `seconds` may be given in Robot Framework time
format. Returns the previous speed value.
Example:
| Set Selenium Speed | .5 seconds |
"""
old_speed = self.get_selenium_speed()
self._speed_in_secs = robot.utils.timestr_to_secs(seconds)
for browser in self._cache.browsers:
browser.set_speed(self._speed_in_secs)
return old_speed
def set_selenium_timeout(self, seconds):
"""Sets the timeout in seconds used by various keywords.
There are several `Wait ...` keywords that take timeout as an
argument. All of these timeout arguments are optional. The timeout
used by all of them can be set globally using this keyword.
See `introduction` for more information about timeouts.
The previous timeout value is returned by this keyword and can
be used to set the old value back later. The default timeout
is 5 seconds, but it can be altered in `importing`.
Example:
| ${orig timeout} = | Set Selenium Timeout | 15 seconds |
| Open page that loads slowly |
| Set Selenium Timeout | ${orig timeout} |
"""
old_timeout = self.get_selenium_timeout()
self._timeout_in_secs = robot.utils.timestr_to_secs(seconds)
for browser in self._cache.get_open_browsers():
browser.set_script_timeout(self._timeout_in_secs)
return old_timeout
def set_selenium_implicit_wait(self, seconds):
"""Sets Selenium 2's default implicit wait in seconds and
sets the implicit wait for all open browsers.
From selenium 2 function 'Sets a sticky timeout to implicitly
wait for an element to be found, or a command to complete.
This method only needs to be called one time per session.'
Example:
| ${orig wait} = | Set Selenium Implicit Wait | 10 seconds |
| Perform AJAX call that is slow |
| Set Selenium Implicit Wait | ${orig wait} |
"""
old_wait = self.get_selenium_implicit_wait()
self._implicit_wait_in_secs = robot.utils.timestr_to_secs(seconds)
for browser in self._cache.get_open_browsers():
browser.implicitly_wait(self._implicit_wait_in_secs)
return old_wait
def set_browser_implicit_wait(self, seconds):
"""Sets current browser's implicit wait in seconds.
From selenium 2 function 'Sets a sticky timeout to implicitly
wait for an element to be found, or a command to complete.
This method only needs to be called one time per session.'
Example:
| Set Browser Implicit Wait | 10 seconds |
See also `Set Selenium Implicit Wait`.
"""
implicit_wait_in_secs = robot.utils.timestr_to_secs(seconds)
self._current_browser().implicitly_wait(implicit_wait_in_secs)
# Private
def _current_browser(self):
if not self._cache.current:
raise RuntimeError('No browser is open')
return self._cache.current
def _get_browser_creation_function(self, browser_name):
func_name = BROWSER_NAMES.get(browser_name.lower().replace(' ', ''))
return getattr(self, func_name) if func_name else None
def _make_browser(self, browser_name, desired_capabilities=None,
profile_dir=None, remote=None):
creation_func = self._get_browser_creation_function(browser_name)
if not creation_func:
raise ValueError(browser_name + " is not a supported browser.")
browser = creation_func(remote, desired_capabilities, profile_dir)
browser.set_speed(self._speed_in_secs)
browser.set_script_timeout(self._timeout_in_secs)
browser.implicitly_wait(self._implicit_wait_in_secs)
return browser
def _make_ff(self , remote , desired_capabilites , profile_dir):
if not profile_dir: profile_dir = FIREFOX_PROFILE_DIR
profile = webdriver.FirefoxProfile(profile_dir)
if remote:
browser = self._create_remote_web_driver(webdriver.DesiredCapabilities.FIREFOX ,
remote , desired_capabilites , profile)
else:
browser = webdriver.Firefox(firefox_profile=profile)
return browser
def _make_ie(self , remote , desired_capabilities , profile_dir):
return self._generic_make_browser(webdriver.Ie,
webdriver.DesiredCapabilities.INTERNETEXPLORER, remote, desired_capabilities)
def _make_chrome(self , remote , desired_capabilities , profile_dir):
return self._generic_make_browser(webdriver.Chrome,
webdriver.DesiredCapabilities.CHROME, remote, desired_capabilities)
def _make_opera(self , remote , desired_capabilities , profile_dir):
return self._generic_make_browser(webdriver.Opera,
webdriver.DesiredCapabilities.OPERA, remote, desired_capabilities)
def _make_phantomjs(self , remote , desired_capabilities , profile_dir):
return self._generic_make_browser(webdriver.PhantomJS,
webdriver.DesiredCapabilities.PHANTOMJS, remote, desired_capabilities)
def _make_htmlunit(self , remote , desired_capabilities , profile_dir):
return self._generic_make_browser(webdriver.Remote,
webdriver.DesiredCapabilities.HTMLUNIT, remote, desired_capabilities)
def _make_htmlunitwithjs(self , remote , desired_capabilities , profile_dir):
return self._generic_make_browser(webdriver.Remote,
webdriver.DesiredCapabilities.HTMLUNITWITHJS, remote, desired_capabilities)
def _make_android(self , remote , desired_capabilities , profile_dir):
return self._generic_make_browser(webdriver.Remote,
webdriver.DesiredCapabilities.ANDROID, remote, desired_capabilities)
def _make_iphone(self , remote , desired_capabilities , profile_dir):
return self._generic_make_browser(webdriver.Remote,
webdriver.DesiredCapabilities.IPHONE, remote, desired_capabilities)
def _make_safari(self , remote , desired_capabilities , profile_dir):
return self._generic_make_browser(webdriver.Safari,
webdriver.DesiredCapabilities.SAFARI, remote, desired_capabilities)
def _generic_make_browser(self, webdriver_type , desired_cap_type, remote_url, desired_caps):
'''most of the make browser functions just call this function which creates the
appropriate web-driver'''
if not remote_url:
browser = webdriver_type()
else:
browser = self._create_remote_web_driver(desired_cap_type,remote_url , desired_caps)
return browser
def _create_remote_web_driver(self , capabilities_type , remote_url , desired_capabilities=None , profile=None):
'''parses the string based desired_capabilities if neccessary and
creates the associated remote web driver'''
desired_capabilities_object = capabilities_type.copy()
if type(desired_capabilities) in (str, unicode):
desired_capabilities = self._parse_capabilities_string(desired_capabilities)
desired_capabilities_object.update(desired_capabilities or {})
return webdriver.Remote(desired_capabilities=desired_capabilities_object,
command_executor=str(remote_url), browser_profile=profile)
def _parse_capabilities_string(self, capabilities_string):
'''parses the string based desired_capabilities which should be in the form
key1:val1,key2:val2
'''
desired_capabilities = {}
if not capabilities_string:
return desired_capabilities
for cap in capabilities_string.split(","):
(key, value) = cap.split(":", 1)
desired_capabilities[key.strip()] = value.strip()
return desired_capabilities
| |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""
This module contains Google Kubernetes Engine operators.
"""
import os
import subprocess
import tempfile
from typing import Dict, Optional, Union
from google.cloud.container_v1.types import Cluster
from airflow import AirflowException
from airflow.contrib.operators.kubernetes_pod_operator import KubernetesPodOperator
from airflow.gcp.hooks.base import CloudBaseHook
from airflow.gcp.hooks.kubernetes_engine import GKEClusterHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class GKEClusterDeleteOperator(BaseOperator):
"""
Deletes the cluster, including the Kubernetes endpoint and all worker nodes.
To delete a certain cluster, you must specify the ``project_id``, the ``name``
of the cluster, the ``location`` that the cluster is in, and the ``task_id``.
**Operator Creation**: ::
operator = GKEClusterDeleteOperator(
task_id='cluster_delete',
project_id='my-project',
location='cluster-location'
name='cluster-name')
.. seealso::
For more detail about deleting clusters have a look at the reference:
https://google-cloud-python.readthedocs.io/en/latest/container/gapic/v1/api.html#google.cloud.container_v1.ClusterManagerClient.delete_cluster
:param project_id: The Google Developers Console [project ID or project number]
:type project_id: str
:param name: The name of the resource to delete, in this case cluster name
:type name: str
:param location: The name of the Google Compute Engine zone in which the cluster
resides.
:type location: str
:param gcp_conn_id: The connection ID to use connecting to Google Cloud Platform.
:type gcp_conn_id: str
:param api_version: The api version to use
:type api_version: str
"""
template_fields = ['project_id', 'gcp_conn_id', 'name', 'location', 'api_version']
@apply_defaults
def __init__(self,
name: str,
location: str,
project_id: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
api_version: str = 'v2',
*args,
**kwargs) -> None:
super().__init__(*args, **kwargs)
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self.location = location
self.api_version = api_version
self.name = name
self._check_input()
def _check_input(self):
if not all([self.project_id, self.name, self.location]):
self.log.error(
'One of (project_id, name, location) is missing or incorrect')
raise AirflowException('Operator has incorrect or missing input.')
def execute(self, context):
hook = GKEClusterHook(gcp_conn_id=self.gcp_conn_id, location=self.location)
delete_result = hook.delete_cluster(name=self.name, project_id=self.project_id)
return delete_result
class GKEClusterCreateOperator(BaseOperator):
"""
Create a Google Kubernetes Engine Cluster of specified dimensions
The operator will wait until the cluster is created.
The **minimum** required to define a cluster to create is:
``dict()`` ::
cluster_def = {'name': 'my-cluster-name',
'initial_node_count': 1}
or
``Cluster`` proto ::
from google.cloud.container_v1.types import Cluster
cluster_def = Cluster(name='my-cluster-name', initial_node_count=1)
**Operator Creation**: ::
operator = GKEClusterCreateOperator(
task_id='cluster_create',
project_id='my-project',
location='my-location'
body=cluster_def)
.. seealso::
For more detail on about creating clusters have a look at the reference:
:class:`google.cloud.container_v1.types.Cluster`
:param project_id: The Google Developers Console [project ID or project number]
:type project_id: str
:param location: The name of the Google Compute Engine zone in which the cluster
resides.
:type location: str
:param body: The Cluster definition to create, can be protobuf or python dict, if
dict it must match protobuf message Cluster
:type body: dict or google.cloud.container_v1.types.Cluster
:param gcp_conn_id: The connection ID to use connecting to Google Cloud Platform.
:type gcp_conn_id: str
:param api_version: The api version to use
:type api_version: str
"""
template_fields = ['project_id', 'gcp_conn_id', 'location', 'api_version', 'body']
@apply_defaults
def __init__(self,
location: str,
body: Optional[Union[Dict, Cluster]],
project_id: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
api_version: str = 'v2',
*args,
**kwargs) -> None:
super().__init__(*args, **kwargs)
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self.location = location
self.api_version = api_version
self.body = body
self._check_input()
def _check_input(self):
if not all([self.project_id, self.location, self.body]) or not (
(isinstance(self.body, dict) and "name" in self.body and "initial_node_count" in self.body) or
(getattr(self.body, "name", None) and getattr(self.body, "initial_node_count", None))
):
self.log.error(
"One of (project_id, location, body, body['name'], "
"body['initial_node_count']) is missing or incorrect"
)
raise AirflowException("Operator has incorrect or missing input.")
def execute(self, context):
hook = GKEClusterHook(gcp_conn_id=self.gcp_conn_id, location=self.location)
create_op = hook.create_cluster(cluster=self.body, project_id=self.project_id)
return create_op
KUBE_CONFIG_ENV_VAR = "KUBECONFIG"
class GKEPodOperator(KubernetesPodOperator):
"""
Executes a task in a Kubernetes pod in the specified Google Kubernetes
Engine cluster
This Operator assumes that the system has gcloud installed and has configured a
connection id with a service account.
The **minimum** required to define a cluster to create are the variables
``task_id``, ``project_id``, ``location``, ``cluster_name``, ``name``,
``namespace``, and ``image``
**Operator Creation**: ::
operator = GKEPodOperator(task_id='pod_op',
project_id='my-project',
location='us-central1-a',
cluster_name='my-cluster-name',
name='task-name',
namespace='default',
image='perl')
.. seealso::
For more detail about application authentication have a look at the reference:
https://cloud.google.com/docs/authentication/production#providing_credentials_to_your_application
:param location: The name of the Google Kubernetes Engine zone in which the
cluster resides, e.g. 'us-central1-a'
:type location: str
:param cluster_name: The name of the Google Kubernetes Engine cluster the pod
should be spawned in
:type cluster_name: str
:param project_id: The Google Developers Console project id
:type project_id: str
:param gcp_conn_id: The google cloud connection id to use. This allows for
users to specify a service account.
:type gcp_conn_id: str
"""
template_fields = ('project_id', 'location',
'cluster_name') + KubernetesPodOperator.template_fields
@apply_defaults
def __init__(self,
location: str,
cluster_name: str,
project_id: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
*args,
**kwargs) -> None:
super().__init__(*args, **kwargs)
self.project_id = project_id
self.location = location
self.cluster_name = cluster_name
self.gcp_conn_id = gcp_conn_id
if self.gcp_conn_id is None:
raise AirflowException(
"The gcp_conn_id parameter has become required. If you want to use Application Default "
"Credentials (ADC) strategy for authorization, create an empty connection "
"called `google_cloud_default`.",
)
def execute(self, context):
hook = CloudBaseHook(gcp_conn_id=self.gcp_conn_id)
self.project_id = self.project_id or hook.project_id
if not self.project_id:
raise AirflowException("The project id must be passed either as "
"keyword project_id parameter or as project_id extra "
"in GCP connection definition. Both are not set!")
# Write config to a temp file and set the environment variable to point to it.
# This is to avoid race conditions of reading/writing a single file
with tempfile.NamedTemporaryFile() as conf_file:
os.environ[KUBE_CONFIG_ENV_VAR] = conf_file.name
with hook.provide_gcp_credential_file_as_context():
# Attempt to get/update credentials
# We call gcloud directly instead of using google-cloud-python api
# because there is no way to write kubernetes config to a file, which is
# required by KubernetesPodOperator.
# The gcloud command looks at the env variable `KUBECONFIG` for where to save
# the kubernetes config file.
subprocess.check_call(
["gcloud", "container", "clusters", "get-credentials",
self.cluster_name,
"--zone", self.location,
"--project", self.project_id])
# Tell `KubernetesPodOperator` where the config file is located
self.config_file = os.environ[KUBE_CONFIG_ENV_VAR]
return super().execute(context)
| |
# coding: utf-8
# Copyright 2009 Alexandre Fiori
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division
import time
from io import BytesIO as StringIO
import os
from bson import objectid, timestamp, SON
from pymongo.write_concern import WriteConcern
import txmongo
from txmongo import database, collection, filter as qf
from txmongo.gridfs import GridFS, GridIn, GridOut, GridOutIterator, errors
from txmongo._gridfs.errors import NoFile
from twisted.trial import unittest
from twisted.internet import defer
from twisted.python.compat import _PY3
try:
from twisted._version import version as twisted_version
except ImportError:
from twisted._version import __version__ as twisted_version
mongo_host = "127.0.0.1"
mongo_port = 27017
class TestMongoObjects(unittest.TestCase):
@defer.inlineCallbacks
def test_MongoObjects(self):
""" Tests creating mongo objects """
conn = yield txmongo.MongoConnection(mongo_host, mongo_port)
mydb = conn.mydb
self.assertEqual(isinstance(mydb, database.Database), True)
if _PY3:
self.assertEqual(repr(mydb), "Database(Connection('127.0.0.1', 27017), 'mydb')")
else:
self.assertEqual(repr(mydb), "Database(Connection('127.0.0.1', 27017), u'mydb')")
self.assertEqual(repr(mydb("mydb2")), repr(mydb.__call__("mydb2")))
mycol = mydb.mycol
self.assertEqual(isinstance(mycol, collection.Collection), True)
mycol2 = yield mydb.create_collection("mycol2")
self.assertEqual(isinstance(mycol2, collection.Collection), True)
mycol3 = yield mydb.create_collection("mycol3", {"size": 1000})
self.assertEqual(isinstance(mycol3, collection.Collection), True)
yield mydb.drop_collection("mycol3")
yield mydb.drop_collection(mycol3)
self.assertRaises(TypeError, mydb.drop_collection, None)
yield conn.disconnect()
@defer.inlineCallbacks
def test_Properties(self):
conn = txmongo.MongoConnection(mongo_host, mongo_port)
db = conn.mydb
coll = db.mycol
try:
# names
self.assertEqual(db.name, u"mydb")
self.assertEqual(coll.name, u"mycol")
self.assertEqual(coll.full_name, u"mydb.mycol")
self.assertEqual(coll.subcoll.name, u"mycol.subcoll")
self.assertEqual(coll.subcoll.full_name, u"mydb.mycol.subcoll")
# database
self.assertTrue(coll.database is db)
# Write concern
w2 = coll.with_options(write_concern=WriteConcern(w=2))
dbw2 = database.Database(conn, "mydb", write_concern=WriteConcern(w=2))
self.assertEqual(w2.write_concern, WriteConcern(w=2))
self.assertEqual(dbw2.write_concern, WriteConcern(w=2))
# Connection
self.assertTrue(db.connection is conn)
finally:
yield conn.disconnect()
@defer.inlineCallbacks
def test_MongoOperations(self):
""" Tests mongo operations """
conn = yield txmongo.MongoConnection(mongo_host, mongo_port)
test = conn.foo.test
# insert
doc = {"foo": "bar", "items": [1, 2, 3]}
yield test.insert(doc, safe=True)
result = yield test.find_one(doc)
self.assertEqual("_id" in result, True)
self.assertEqual(result["foo"], "bar")
self.assertEqual(result["items"], [1, 2, 3])
# insert preserves object id
doc.update({"_id": objectid.ObjectId()})
yield test.insert(doc, safe=True)
result = yield test.find_one(doc)
self.assertEqual(result.get("_id"), doc.get("_id"))
self.assertEqual(result["foo"], "bar")
self.assertEqual(result["items"], [1, 2, 3])
# update
yield test.update({"_id": result["_id"]}, {"$set": {"one": "two"}}, safe=True)
result = yield test.find_one({"_id": result["_id"]})
self.assertEqual(result["one"], "two")
# delete
yield test.remove(result["_id"], safe=True)
# disconnect
yield conn.disconnect()
@defer.inlineCallbacks
def test_Timestamps(self):
"""Tests mongo operations with Timestamps"""
conn = yield txmongo.MongoConnection(mongo_host, mongo_port)
test = conn.foo.test_ts
test.drop()
# insert with specific timestamp
doc1 = {"_id": objectid.ObjectId(),
"ts": timestamp.Timestamp(1, 2)}
yield test.insert(doc1, safe=True)
result = yield test.find_one(doc1)
self.assertEqual(result.get("ts").time, 1)
self.assertEqual(result.get("ts").inc, 2)
# insert with specific timestamp
doc2 = {"_id": objectid.ObjectId(),
"ts": timestamp.Timestamp(2, 1)}
yield test.insert(doc2, safe=True)
# the objects come back sorted by ts correctly.
# (test that we stored inc/time in the right fields)
result = yield test.find(filter=qf.sort(qf.ASCENDING("ts")))
self.assertEqual(len(result), 2)
self.assertEqual(result[0]["_id"], doc1["_id"])
self.assertEqual(result[1]["_id"], doc2["_id"])
# insert with null timestamp
doc3 = {"_id": objectid.ObjectId(),
"ts": timestamp.Timestamp(0, 0)}
yield test.insert(doc3, safe=True)
# time field loaded correctly
result = yield test.find_one(doc3["_id"])
now = time.time()
self.assertTrue(now - 2 <= result["ts"].time <= now)
# delete
yield test.remove(doc1["_id"], safe=True)
yield test.remove(doc2["_id"], safe=True)
yield test.remove(doc3["_id"], safe=True)
# disconnect
yield conn.disconnect()
class TestGridFsObjects(unittest.TestCase):
""" Test the GridFS operations from txmongo._gridfs """
@defer.inlineCallbacks
def _disconnect(self, conn):
""" Disconnect the connection """
yield conn.disconnect()
def _drop_gridfs(self, db):
"""
Drop the default gridfs instance (i.e. ``fs``) associate to this database
"""
return defer.gatherResults([
db.drop_collection('fs.files'),
db.drop_collection('fs.chunks')
])
@defer.inlineCallbacks
def test_GridFileObjects(self):
""" Tests gridfs objects """
conn = yield txmongo.MongoConnection(mongo_host, mongo_port)
db = conn.test
yield self._drop_gridfs(db)
self.assertRaises(TypeError, GridFS, None)
_ = GridFS(db) # Default collection
self.assertRaises(TypeError, GridIn, None)
with GridIn(db.fs, filename="test_with", contentType="text/plain", chunk_size=1024):
pass
grid_in_file = GridIn(db.fs, filename="test_1", contentType="text/plain",
content_type="text/plain", chunk_size=65536, length=1048576,
upload_date="20150101")
self.assertFalse(grid_in_file.closed)
if twisted_version.major >= 15:
with self.assertRaises(TypeError):
yield grid_in_file.write(1)
with self.assertRaises(TypeError):
yield grid_in_file.write(u"0xDEADBEEF")
with self.assertRaises(AttributeError):
_ = grid_in_file.test
grid_in_file.test = 1
yield grid_in_file.write(b"0xDEADBEEF")
yield grid_in_file.write(b"0xDEADBEEF"*1048576)
self.assertTrue("20150101", grid_in_file.upload_date)
yield grid_in_file.writelines([b"0xDEADBEEF", b"0xDEADBEEF"])
yield grid_in_file.close()
if twisted_version.major >= 15:
with self.assertRaises(AttributeError):
grid_in_file.length = 1
self.assertEqual(1, grid_in_file.test)
if twisted_version.major >= 15:
with self.assertRaises(AttributeError):
_ = grid_in_file.test_none
self.assertTrue(grid_in_file.closed)
if twisted_version.major >= 15:
with self.assertRaises(ValueError):
yield grid_in_file.write(b"0xDEADBEEF")
doc = yield db.fs.files.find_one({"_id": grid_in_file._id})
grid_out_file = GridOut(db.fs, doc)
grid_out_file.seek(0, os.SEEK_END)
self.assertEqual(10 * (1 + 1048576 + 2), grid_out_file.tell())
yield grid_out_file.close()
data = b''
for block_dfr in GridOutIterator(grid_out_file, db.fs.chunks):
block = yield block_dfr
if block:
data += block
else:
break
self.assertEqual(data, b"0xDEADBEEF" * (1 + 1048576 + 2))
fake_doc = {"_id": "test_id", "length": 1048576, "filename": "test",
"upload_date": "20150101"}
self.assertRaises(TypeError, GridOut, None, None)
grid_out_file = GridOut(db.fs, fake_doc)
if twisted_version.major >= 15:
with self.assertRaises(AttributeError):
_ = grid_out_file.testing
self.assertEqual("test", grid_out_file.filename)
self.assertEqual(0, grid_out_file.tell())
grid_out_file.seek(1024)
self.assertEqual(1024, grid_out_file.tell())
grid_out_file.seek(1024, os.SEEK_CUR)
self.assertEqual(2048, grid_out_file.tell())
grid_out_file.seek(0, os.SEEK_END)
self.assertEqual(1048576, grid_out_file.tell())
self.assertRaises(IOError, grid_out_file.seek, 0, 4)
self.assertRaises(IOError, grid_out_file.seek, -1)
self.assertTrue("'_id': 'test_id'" in repr(grid_out_file))
yield conn.disconnect()
@defer.inlineCallbacks
def test_GridFsObjects(self):
""" Tests gridfs objects """
conn = yield txmongo.MongoConnection(mongo_host, mongo_port)
db = conn.test
yield self._drop_gridfs(db)
gfs = GridFS(db) # Default collection
yield gfs.delete(u"test")
_ = gfs.new_file(filename="test_1", contentType="text/plain", chunk_size=65536)
yield conn.disconnect()
conn = yield txmongo.MongoConnection(mongo_host, mongo_port)
db = conn.test
yield self._drop_gridfs(db)
gfs = GridFS(db) # Default collection
_ = yield gfs.put(b"0xDEADBEEF", filename="test_2", contentType="text/plain",
chunk_size=65536)
# disconnect
yield conn.disconnect()
conn = yield txmongo.MongoConnection(mongo_host, mongo_port)
db = conn.test
gfs = GridFS(db) # Default collection
# Missing file raises error
yield self.assertFailure(gfs.get("test_3"), NoFile)
# disconnect
yield conn.disconnect()
@defer.inlineCallbacks
def test_GridFsIteration(self):
""" Tests gridfs iterator """
conn = yield txmongo.MongoConnection(mongo_host, mongo_port)
db = conn.test
yield self._drop_gridfs(db)
gfs = GridFS(db) # Default collection
new_file = gfs.new_file(filename="testName", contentType="text/plain", length=1048576,
chunk_size=4096)
yield new_file.write(b"0xDEADBEEF"*4096*2)
yield new_file.close()
fake_doc = {"_id": new_file._id, "name": "testName", "length": 4096*2, "chunkSize": 4096,
"contentType": "text/plain"}
grid_out_file = GridOut(db.fs, fake_doc)
iterator = GridOutIterator(grid_out_file, db.fs.chunks)
next_it = yield next(iterator)
self.assertEqual(len(next_it), 4096)
_ = yield next(iterator)
next_it = yield next(iterator)
self.assertEqual(next_it, None)
fake_bad_doc = {"_id": "bad_id", "name": "testName", "length": 4096*2,
"chunkSize": 4096, "contentType": "text/plain"}
grid_bad_out_file = GridOut(db.fs, fake_bad_doc)
bad_iterator = GridOutIterator(grid_bad_out_file, db.fs.chunks)
if twisted_version.major >= 15:
with self.assertRaises(errors.CorruptGridFile):
next_it = yield bad_iterator.next()
# disconnect
yield conn.disconnect()
@defer.inlineCallbacks
def test_GridFsOperations(self):
""" Tests gridfs operations """
conn = yield txmongo.MongoConnection(mongo_host, mongo_port)
db = conn.test
# Drop files first TODO: iterate through files and delete them
yield self._drop_gridfs(db)
# Don't forget to disconnect
self.addCleanup(self._disconnect, conn)
try:
in_file = StringIO(b"Test input string")
out_file = StringIO()
except Exception as e:
self.fail("Failed to create memory files for testing: %s" % e)
g_out = None
try:
# Tests writing to a new gridfs file
gfs = GridFS(db) # Default collection
if twisted_version.major >= 15:
with self.assertRaises(NoFile):
yield gfs.get_last_version("optest")
g_in = gfs.new_file(filename="optest", contentType="text/plain",
chunk_size=65536) # non-default chunk size used
# yielding to ensure writes complete before we close and close before we try to read
yield g_in.write(in_file.read())
yield g_in.close()
# Tests reading from an existing gridfs file
g_out = yield gfs.get_last_version("optest")
data = yield g_out.read()
out_file.write(data)
_id = g_out._id
except Exception as e:
self.fail("Failed to communicate with the GridFS. " +
"Is MongoDB running? %s" % e)
else:
self.assertEqual(in_file.getvalue(), out_file.getvalue(),
"Could not read the value from writing an input")
finally:
in_file.close()
out_file.close()
if g_out:
g_out.close()
listed_files = yield gfs.list()
self.assertEqual(["optest"], listed_files,
"`optest` is the only expected file and we received %s" % listed_files)
yield gfs.delete(_id)
@defer.inlineCallbacks
def test_GridFsIndexesCreation(self):
""" Tests gridfs indexes creation"""
conn = yield txmongo.MongoConnection(mongo_host, mongo_port)
db = conn.test
yield self._drop_gridfs(db)
# Create a new GridFS instance should trigger indexes creation
gfs = GridFS(db)
# Multiple calls should return multiple defer not to mix between them
self.assertNotEqual(gfs.indexes_created(), gfs.indexes_created())
yield gfs.indexes_created()
indexes = yield db.fs.files.index_information()
self.assertTrue(any(key["key"] == SON([("filename", 1), ("uploadDate", 1)])
for key in indexes.values()))
indexes = yield db.fs.chunks.index_information()
self.assertTrue(any(key["key"] == SON([("files_id", 1), ("n", 1)])
for key in indexes.values()))
yield conn.disconnect()
| |
# This file is part of the ISIS IBEX application.
# Copyright (C) 2012-2016 Science & Technology Facilities Council.
# All rights reserved.
#
# This program is distributed in the hope that it will be useful.
# This program and the accompanying materials are made available under the
# terms of the Eclipse Public License v1.0 which accompanies this distribution.
# EXCEPT AS EXPRESSLY SET FORTH IN THE ECLIPSE PUBLIC LICENSE V1.0, THE PROGRAM
# AND ACCOMPANYING MATERIALS ARE PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND. See the Eclipse Public License v1.0 for more details.
#
# You should have received a copy of the Eclipse Public License v1.0
# along with this program; if not, you can obtain a copy from
# https://www.eclipse.org/org/documents/epl-v10.php or
# http://opensource.org/licenses/eclipse-1.0.php
import unittest
import json
import os
from mock import Mock
from parameterized import parameterized
from BlockServer.config.block import Block
from BlockServer.config.configuration import Configuration
from BlockServer.config.ioc import IOC
from BlockServer.core.active_config_holder import (ActiveConfigHolder, _blocks_changed, _blocks_changed_in_config,
_compare_ioc_properties)
from BlockServer.mocks.mock_ioc_control import MockIocControl
from BlockServer.core.macros import MACROS
from BlockServer.mocks.mock_file_manager import MockConfigurationFileManager
from BlockServer.test_modules.helpers import modify_active
from server_common.constants import IS_LINUX
CONFIG_PATH = "./test_configs/"
BASE_PATH = "./example_base/"
# Helper methods
def quick_block_to_json(name, pv, group, local=True):
return {
'name': name,
'pv': pv,
'group': group,
'local': local
}
def add_basic_blocks_and_iocs(config_holder):
config_holder.add_block(quick_block_to_json("TESTBLOCK1", "PV1", "GROUP1", True))
config_holder.add_block(quick_block_to_json("TESTBLOCK2", "PV2", "GROUP2", True))
config_holder.add_block(quick_block_to_json("TESTBLOCK3", "PV3", "GROUP2", True))
config_holder.add_block(quick_block_to_json("TESTBLOCK4", "PV4", "NONE", True))
config_holder._add_ioc("SIMPLE1")
config_holder._add_ioc("SIMPLE2")
def get_groups_and_blocks(jsondata):
return json.loads(jsondata)
def create_grouping(groups):
return json.dumps([{"name": group, "blocks": blocks} for group, blocks in groups.items()])
def create_dummy_component():
config = Configuration(MACROS)
config.add_block("COMPBLOCK1", "PV1", "GROUP1", True)
config.add_block("COMPBLOCK2", "PV2", "COMPGROUP", True)
config.add_ioc("COMPSIMPLE1")
config.is_component = True
return config
# Note that the ActiveConfigServerManager contains an instance of the Configuration class and hands a lot of
# work off to this object. Rather than testing whether the functionality in the configuration class works
# correctly (e.g. by checking that a block has been edited properly after calling configuration.edit_block),
# we should instead test that ActiveConfigServerManager passes the correct parameters to the Configuration object.
# We are testing that ActiveConfigServerManager correctly interfaces with Configuration, not testing the
# functionality of Configuration, which is done in Configuration's own suite of tests.
class TestActiveConfigHolderSequence(unittest.TestCase):
def setUp(self):
# Note: All configurations are saved in memory
self.mock_archive = Mock()
self.mock_archive.update_archiver = Mock()
self.mock_file_manager = MockConfigurationFileManager()
self.active_config_holder = self.create_active_config_holder()
def create_active_config_holder(self):
config_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "settings")
return ActiveConfigHolder(MACROS, self.mock_archive, self.mock_file_manager, MockIocControl(""), config_dir)
def test_add_ioc(self):
config_holder = self.active_config_holder
iocs = config_holder.get_ioc_names()
self.assertEqual(len(iocs), 0)
config_holder._add_ioc("SIMPLE1")
config_holder._add_ioc("SIMPLE2")
iocs = config_holder.get_ioc_names()
self.assertTrue("SIMPLE1" in iocs)
self.assertTrue("SIMPLE2" in iocs)
@unittest.skipIf(IS_LINUX, "Unable to save config on Linux")
def test_save_config(self):
config_holder = self.active_config_holder
add_basic_blocks_and_iocs(config_holder)
try:
config_holder.save_active("TEST_CONFIG")
except Exception as e:
self.fail(f"test_save_config raised Exception unexpectedly: {e}")
@unittest.skipIf(IS_LINUX, "Location of last_config.txt not correctly configured on Linux")
def test_load_config(self):
config_holder = self.active_config_holder
add_basic_blocks_and_iocs(config_holder)
config_holder.save_active("TEST_CONFIG")
config_holder.clear_config()
blocks = config_holder.get_blocknames()
self.assertEqual(len(blocks), 0)
iocs = config_holder.get_ioc_names()
self.assertEqual(len(iocs), 0)
config_holder.load_active("TEST_CONFIG")
blocks = config_holder.get_blocknames()
self.assertEqual(len(blocks), 4)
self.assertTrue('TESTBLOCK1' in blocks)
self.assertTrue('TESTBLOCK2' in blocks)
self.assertTrue('TESTBLOCK3' in blocks)
self.assertTrue('TESTBLOCK4' in blocks)
iocs = config_holder.get_ioc_names()
self.assertTrue("SIMPLE1" in iocs)
self.assertTrue("SIMPLE2" in iocs)
@unittest.skipIf(IS_LINUX, "Location of last_config.txt not correctly configured on Linux")
def test_GIVEN_load_config_WHEN_load_config_again_THEN_no_ioc_changes(self):
# This test is checking that a load will correctly cache the IOCs that are running so that a comparison will
# return no change
config_holder = self.active_config_holder
add_basic_blocks_and_iocs(config_holder)
config_holder.save_active("TEST_CONFIG")
config_holder.clear_config()
blocks = config_holder.get_blocknames()
self.assertEqual(len(blocks), 0)
iocs = config_holder.get_ioc_names()
self.assertEqual(len(iocs), 0)
config_holder.load_active("TEST_CONFIG")
config_holder.load_active("TEST_CONFIG")
iocs_to_start, iocs_to_restart, iocs_to_stop = config_holder.iocs_changed()
self.assertEqual(len(iocs_to_start), 0)
self.assertEqual(len(iocs_to_restart), 0)
self.assertEqual(len(iocs_to_stop), 0)
def test_load_notexistant_config(self):
config_holder = self.active_config_holder
self.assertRaises(IOError, lambda: config_holder.load_active("DOES_NOT_EXIST"))
def test_save_as_component(self):
config_holder = self.active_config_holder
try:
config_holder.save_active("TEST_CONFIG1", as_comp=True)
except Exception as e:
self.fail(f"test_save_as_component raised Exception unexpectedly: {e}")
@unittest.skipIf(IS_LINUX, "Unable to save config on Linux")
def test_save_config_for_component(self):
config_holder = self.active_config_holder
config_holder.save_active("TEST_CONFIG1", as_comp=True)
try:
config_holder.save_active("TEST_CONFIG1")
except Exception as e:
self.fail(f"test_save_config_for_component raised Exception unexpectedly: {e}")
def test_load_component_fails(self):
config_holder = self.active_config_holder
add_basic_blocks_and_iocs(config_holder)
config_holder.save_active("TEST_COMPONENT", as_comp=True)
config_holder.clear_config()
self.assertRaises(IOError, lambda: config_holder.load_active("TEST_COMPONENT"))
@unittest.skipIf(IS_LINUX, "Location of last_config.txt not correctly configured on Linux")
def test_load_last_config(self):
config_holder = self.active_config_holder
add_basic_blocks_and_iocs(config_holder)
config_holder.save_active("TEST_CONFIG")
config_holder.clear_config()
blocks = config_holder.get_blocknames()
self.assertEqual(len(blocks), 0)
iocs = config_holder.get_ioc_names()
self.assertEqual(len(iocs), 0)
config_holder.load_last_config()
grps = config_holder.get_group_details()
self.assertTrue(len(grps) == 3)
blocks = config_holder.get_blocknames()
self.assertEqual(len(blocks), 4)
self.assertTrue('TESTBLOCK1' in blocks)
self.assertTrue('TESTBLOCK2' in blocks)
self.assertTrue('TESTBLOCK3' in blocks)
self.assertTrue('TESTBLOCK4' in blocks)
iocs = config_holder.get_ioc_names()
self.assertTrue("SIMPLE1" in iocs)
self.assertTrue("SIMPLE2" in iocs)
def test_reloading_current_config_with_blank_name_does_nothing(self):
# arrange
config_name = self.active_config_holder.get_config_name()
self.assertEqual(config_name, "")
load_requests = self.mock_file_manager.get_load_config_history()
self.assertEqual(len(load_requests), 0)
# act
self.active_config_holder.reload_current_config()
# assert
load_requests = self.mock_file_manager.get_load_config_history()
self.assertEqual(len(load_requests), 0)
@unittest.skipIf(IS_LINUX, "Location of last_config.txt not correctly configured on Linux")
def test_reloading_current_config_sends_load_request_correctly(self):
# arrange
config_holder = self.active_config_holder
config_name = "TEST_CONFIG"
add_basic_blocks_and_iocs(config_holder)
config_holder.save_active(config_name)
load_requests = self.mock_file_manager.get_load_config_history()
self.assertEqual(len(load_requests), 0)
# act
config_holder.reload_current_config()
# assert
load_requests = self.mock_file_manager.get_load_config_history()
self.assertEqual(load_requests.count(config_name), 1)
def _modify_active(self, config_holder, new_details, name="config1"):
modify_active(name, MACROS, self.mock_file_manager, new_details, config_holder)
def test_iocs_changed_no_changes(self):
# Arrange
config_holder = self.create_active_config_holder()
details = config_holder.get_config_details()
self._modify_active(config_holder, details)
# Assert
start, restart, stop = config_holder.iocs_changed()
self.assertEqual(len(start), 0)
self.assertEqual(len(restart), 0)
self.assertEqual(len(stop), 0)
def test_iocs_changed_ioc_added(self):
# Arrange
config_holder = self.create_active_config_holder()
details = config_holder.get_config_details()
# Act
details['iocs'].append(IOC("NAME"))
self._modify_active(config_holder, details)
# Assert
start, restart, stop = config_holder.iocs_changed()
self.assertEqual(len(start), 1)
self.assertEqual(len(restart), 0)
self.assertEqual(len(stop), 0)
def test_iocs_changed_ioc_removed(self):
# Arrange
config_holder = self.create_active_config_holder()
details = config_holder.get_config_details()
details['iocs'].append(IOC("NAME"))
self._modify_active(config_holder, details)
# Act
details['iocs'].pop(0)
self._modify_active(config_holder, details)
# Assert
start, restart, stop = config_holder.iocs_changed()
self.assertEqual(len(start), 0)
self.assertEqual(len(restart), 0)
self.assertEqual(len(stop), 1)
def test_GIVEN_an_ioc_defined_in_a_component_WHEN_the_component_is_removed_THEN_the_ioc_is_stopped(self):
# Arrange
config_holder = self.create_active_config_holder()
component = create_dummy_component()
component.iocs = {"DUMMY_IOC": IOC("dummyname")}
self.mock_file_manager.comps["component_name"] = component
config_holder.add_component("component_name")
details = config_holder.get_config_details()
details["blocks"] = [block for block in details["blocks"] if block["component"] is None]
self._modify_active(config_holder, details)
# Act
config_holder.remove_comp("component_name")
# Assert
start, restart, stop = config_holder.iocs_changed()
self.assertEqual(len(start), 0)
self.assertEqual(len(restart), 0)
self.assertEqual(len(stop), 1)
def test_GIVEN_an_ioc_defined_in_a_component_WHEN_the_ioc_simlevel_is_changed_THEN_the_ioc_is_restarted(self):
# Arrange
config_holder = self.create_active_config_holder()
component = create_dummy_component()
component.iocs = {"DUMMY_IOC": IOC("dummyname", simlevel="devsim")}
self.mock_file_manager.comps["component_name"] = component
config_holder.add_component("component_name")
details = config_holder.get_config_details()
details["blocks"] = [block for block in details["blocks"] if block["component"] is None]
self._modify_active(config_holder, details)
# Act
config_holder.remove_comp("component_name")
new_component = create_dummy_component()
new_component.iocs = {"DUMMY_IOC": IOC("dummyname", simlevel="recsim")} # Change simlevel
self.mock_file_manager.comps["component_name"] = new_component
config_holder.add_component("component_name")
# Assert
start, restart, stop = config_holder.iocs_changed()
self.assertEqual(len(start), 0)
self.assertEqual(len(restart), 1)
self.assertEqual(len(stop), 0)
def test_GIVEN_an_ioc_defined_in_a_component_WHEN_the_ioc_macros_are_changed_THEN_the_ioc_is_restarted(self):
# Arrange
config_holder = self.create_active_config_holder()
component = create_dummy_component()
component.iocs = {"DUMMY_IOC": IOC("dummyname", macros={"macros": {"A_MACRO": "VALUE1"}})}
self.mock_file_manager.comps["component_name"] = component
config_holder.add_component("component_name")
details = config_holder.get_config_details()
details["blocks"] = [block for block in details["blocks"] if block["component"] is None]
self._modify_active(config_holder, details)
# Act
config_holder.remove_comp("component_name")
new_component = create_dummy_component()
new_component.iocs = {"DUMMY_IOC": IOC("dummyname", macros={"macros": {"A_MACRO": "VALUE2"}})}
self.mock_file_manager.comps["component_name"] = new_component
config_holder.add_component("component_name")
# Assert
start, restart, stop = config_holder.iocs_changed()
self.assertEqual(len(start), 0)
self.assertEqual(len(restart), 1)
self.assertEqual(len(stop), 0)
def test_GIVEN_an_ioc_defined_in_a_component_WHEN_the_ioc_macros_are_not_changed_THEN_the_ioc_is_not_restarted(self):
# Arrange
config_holder = self.create_active_config_holder()
component = create_dummy_component()
component.iocs = {"DUMMY_IOC": IOC("dummyname", macros={"macros": {"A_MACRO": "VALUE1"}})}
self.mock_file_manager.comps["component_name"] = component
config_holder.add_component("component_name")
details = config_holder.get_config_details()
details["blocks"] = [block for block in details["blocks"] if block["component"] is None]
self._modify_active(config_holder, details)
# Act
config_holder.remove_comp("component_name")
new_component = create_dummy_component()
new_component.iocs = {"DUMMY_IOC": IOC("dummyname", macros={"macros": {"A_MACRO": "VALUE1"}})}
self.mock_file_manager.comps["component_name"] = new_component
config_holder.add_component("component_name")
# Assert
start, restart, stop = config_holder.iocs_changed()
self.assertEqual(len(start), 0)
self.assertEqual(len(restart), 0)
self.assertEqual(len(stop), 0)
def test_GIVEN_an_ioc_defined_in_the_top_level_config_WHEN_the_ioc_is_removed_THEN_the_ioc_is_stopped(self):
# Arrange
config_holder = self.create_active_config_holder()
details = config_holder.get_config_details()
details['iocs'].append(IOC("NAME"))
self._modify_active(config_holder, details)
# Act
details['iocs'].pop(0)
self._modify_active(config_holder, details)
# Assert
start, restart, stop = config_holder.iocs_changed()
self.assertEqual(len(start), 0)
self.assertEqual(len(restart), 0)
self.assertEqual(len(stop), 1)
def test_given_empty_config_when_block_added_then_blocks_changed_returns_true(self):
# Arrange
config_holder = self.create_active_config_holder()
details = config_holder.get_config_details()
# Act
details['blocks'].append(Block(name="TESTNAME", pv="TESTPV").to_dict())
self._modify_active(config_holder, details)
# Assert
self.assertTrue(config_holder.blocks_changed())
def test_given_config_when_block_params_changed_then_blocks_changed_returns_true(self):
# Arrange
config_holder = self.create_active_config_holder()
details = config_holder.get_config_details()
details['blocks'].append(Block(name="TESTNAME", pv="TESTPV").to_dict())
self._modify_active(config_holder, details)
# Act
details['blocks'][0]['local'] = False
self._modify_active(config_holder, details)
# Assert
self.assertTrue(config_holder.blocks_changed())
def test_given_config_with_one_block_when_block_removed_then_blocks_changed_returns_true(self):
# Arrange
config_holder = self.create_active_config_holder()
details = config_holder.get_config_details()
details['blocks'].append(Block(name="TESTNAME", pv="TESTPV").to_dict())
self._modify_active(config_holder, details)
# Act
details['blocks'].pop(0)
self._modify_active(config_holder, details)
# Assert
self.assertTrue(config_holder.blocks_changed())
def test_given_empty_config_when_component_added_then_blocks_changed_returns_true(self):
# Arrange
config_holder = self.create_active_config_holder()
# Act
self.mock_file_manager.comps["component_name"] = create_dummy_component()
config_holder.add_component("component_name")
# Assert
self.assertTrue(config_holder.blocks_changed())
def test_given_empty_config_when_no_change_then_blocks_changed_returns_false(self):
# Arrange
config_holder = self.create_active_config_holder()
details = config_holder.get_config_details()
# Act
self._modify_active(config_holder, details)
# Assert
self.assertFalse(config_holder.blocks_changed())
def test_given_config_when_no_change_then_blocks_changed_returns_false(self):
# Arrange
config_holder = self.create_active_config_holder()
details = config_holder.get_config_details()
details['blocks'].append(Block(name="TESTNAME", pv="TESTPV").to_dict())
self._modify_active(config_holder, details)
# Act
self._modify_active(config_holder, details)
# Assert
self.assertFalse(config_holder.blocks_changed())
def test_given_no_blocks_changed_when_update_archiver_archiver_not_restarted(self):
# Arrange
config_holder = self.create_active_config_holder()
details = config_holder.get_config_details()
details['blocks'].append(Block(name="TESTNAME", pv="TESTPV").to_dict())
self._modify_active(config_holder, details)
# Act
self._modify_active(config_holder, details)
config_holder.update_archiver()
# Assert
self.assertFalse(self.mock_archive.update_archiver.called)
def test_given_blocks_changed_when_update_archiver_archiver_is_restarted(self):
# Arrange
config_holder = self.create_active_config_holder()
details = config_holder.get_config_details()
details['blocks'].append(Block(name="TESTNAME", pv="TESTPV").to_dict())
self._modify_active(config_holder, details)
# Act
details['blocks'].append(Block(name="TESTNAME2", pv="TESTPV2").to_dict())
self._modify_active(config_holder, details)
config_holder.update_archiver()
# Assert
self.assertTrue(self.mock_archive.update_archiver.called)
def test_given_no_blocks_changed_but_full_init_when_update_archiver_archiver_is_restarted(self):
# Arrange
config_holder = self.create_active_config_holder()
details = config_holder.get_config_details()
details['blocks'].append(Block(name="TESTNAME", pv="TESTPV").to_dict())
self._modify_active(config_holder, details)
# Act
self._modify_active(config_holder, details)
config_holder.update_archiver(True)
# Assert
self.assertTrue(self.mock_archive.update_archiver.called)
@parameterized.expand([
(Block(name="name", pv="pv"), Block(name="other", pv="pv")),
(Block(name="name", pv="pv"), Block(name="name", pv="other")),
(Block(name="name", pv="pv", local=True), Block(name="name", pv="pv", local=False)),
(Block(name="name", pv="pv", component="A"), Block(name="name", pv="pv", component="B")),
(Block(name="name", pv="pv", runcontrol=True), Block(name="name", pv="pv", runcontrol=False)),
(Block(name="name", pv="pv", lowlimit=True), Block(name="name", pv="pv", lowlimit=False)),
(Block(name="name", pv="pv", highlimit=True), Block(name="name", pv="pv", highlimit=False)),
(Block(name="name", pv="pv", log_periodic=True), Block(name="name", pv="pv", log_periodic=False)),
(Block(name="name", pv="pv", log_rate=True), Block(name="name", pv="pv", log_rate=False)),
(Block(name="name", pv="pv", log_deadband=True), Block(name="name", pv="pv", log_deadband=False)),
])
def test_WHEN_block_attributes_different_THEN_blocks_changed_returns_true(self, block1, block2):
self.assertTrue(_blocks_changed(block1, block2))
def test_WHEN_block_attributes_different_THEN_blocks_changed_returns_false(self):
self.assertFalse(_blocks_changed(Block(name="name", pv="pv"), Block(name="name", pv="pv")))
def test_WHEN_blocks_changed_in_config_called_for_configs_which_contain_same_blocks_THEN_returns_false(self):
config1 = Mock()
config1.blocks = {"a": Block(name="a", pv="pv")}
config2 = Mock()
config2.blocks = {"a": Block(name="a", pv="pv")}
self.assertFalse(_blocks_changed_in_config(config1, config2))
def test_WHEN_blocks_changed_in_config_called_for_configs_with_removed_blocks_THEN_returns_true(self):
config1 = Mock()
config1.blocks = {"a": Block(name="a", pv="pv")}
config2 = Mock()
config2.blocks = {}
self.assertTrue(_blocks_changed_in_config(config1, config2))
def test_WHEN_blocks_changed_in_config_called_for_configs_with_added_blocks_THEN_returns_true(self):
config1 = Mock()
config1.blocks = {}
config2 = Mock()
config2.blocks = {"a": Block(name="a", pv="pv")}
self.assertTrue(_blocks_changed_in_config(config1, config2))
def test_WHEN_blocks_changed_in_config_called_and_block_comparator_says_they_are_different_THEN_returns_true(self):
config1 = Mock()
config1.blocks = {"a": Block(name="a", pv="pv")}
config2 = Mock()
config2.blocks = {"a": Block(name="a", pv="pv")}
self.assertTrue(_blocks_changed_in_config(config1, config2, block_comparator=lambda block1, block2: True))
def test_WHEN_blocks_changed_in_config_called_and_block_comparator_says_they_are_the_same_THEN_returns_false(self):
config1 = Mock()
config1.blocks = {"a": Block(name="a", pv="pv")}
config2 = Mock()
config2.blocks = {"a": Block(name="a", pv="pv")}
self.assertFalse(_blocks_changed_in_config(config1, config2, block_comparator=lambda block1, block2: False))
def test_WHEN_compare_ioc_properties_called_with_the_same_ioc_then_returns_empty_set_of_iocs_to_start_restart(self):
old_config = Mock()
old_config.iocs = {"a": IOC("a")}
new_config = Mock()
new_config.iocs = {"a": IOC("a")}
start, restart = _compare_ioc_properties(old_config, new_config)
self.assertEqual(len(start), 0)
self.assertEqual(len(restart), 0)
@parameterized.expand([
({"a": IOC("a", macros=True)}, {"a": IOC("a", macros=False)}),
({"a": IOC("a", pvs=True)}, {"a": IOC("a", pvs=False)}),
({"a": IOC("a", pvsets=True)}, {"a": IOC("a", pvsets=False)}),
({"a": IOC("a", simlevel="recsim")}, {"a": IOC("a", simlevel="devsim")}),
({"a": IOC("a", restart=True)}, {"a": IOC("a", restart=False)}),
])
def test_WHEN_compare_ioc_properties_called_with_different_then_restarts_ioc(self, old_iocs, new_iocs):
old_config = Mock()
old_config.iocs = old_iocs
new_config = Mock()
new_config.iocs = new_iocs
start, restart = _compare_ioc_properties(old_config, new_config)
self.assertEqual(len(start), 0)
self.assertEqual(len(restart), 1)
def test_WHEN_compare_ioc_properties_called_with_new_ioc_then_starts_new_ioc(self):
old_config = Mock()
old_config.iocs = {}
new_config = Mock()
new_config.iocs = {"a": IOC("a", macros=True)}
start, restart = _compare_ioc_properties(old_config, new_config)
self.assertEqual(len(start), 1)
self.assertEqual(len(restart), 0)
if __name__ == '__main__':
# Run tests
unittest.main()
| |
from components import *
from utils.ebs import Applicator, System
import pyglet.graphics
from pyglet.gl import *
from pyglet.window import key
from pymunk import Body as pymunk_body
from pymunk import BB
from pymunk import Circle as pymunk_circle
from pymunk import Poly as pymunk_poly
from pymunk import moment_for_circle
from functions import get_dist, get_angle, smooth_in_out
import math
from timer import Timer
# import aistates
class MoveSystem(System):
def __init__(self, world):
self.is_applicator = True
self.componenttypes = (Position, Velocity)
def process(self, world, componentsets):
# print(*componentsets)
for pos, vel, *rest in componentsets:
pos.set(
pos.x + vel.x * world.dt,
pos.y + vel.y * world.dt
)
class RenderSystem(System):
def __init__(self, world):
self.componenttypes = (Sprite,)
# self.tt = TaskTimer()
def create_triangles(self, wl, ox, oy):
# ox, oy = world.window.offset_x, world.window.offset_y
vertices = []
for i, l in enumerate(wl):
x1, y1, x2, y2 = l
try:
vertices += [
x1 + ox,
y1 + oy,
x2 + ox,
y2 + oy,
wl[i+1][2] + ox,
wl[i+1][3] + oy,
]
except IndexError:
vertices += [
x1 + ox,
y1 + oy,
x2 + ox,
y2 + oy,
wl[0][2] + ox,
wl[0][3] + oy,
]
return vertices
def create_fan(self, wl, ox, oy):
vertices = []
start = wl[0]
end = wl[1]
vertices += [start[0] + ox, start[1] + oy]
vertices += [start[2] + ox, start[3] + oy]
vertices += [end[2] + ox, end[3] + oy]
for l in wl[2:]:
vertices += [l[2] + ox, l[3] + oy]
vertices += [start[2] + ox, start[3] + oy]
return vertices
def draw_triangles(self, wl, ox, oy):
# print(len(self.create_fan(wl, ox, oy)))
vc = len(wl) * 2 + 4
# print(vc)
vertices_gl = (
GLfloat * vc
)(*self.create_fan(wl, ox, oy))
glEnableClientState(GL_VERTEX_ARRAY)
glVertexPointer(2, GL_FLOAT, 0, vertices_gl)
glColor3f(1, 1, 1)
glDrawArrays(GL_TRIANGLE_FAN, 0, vc // 2)
def draw_triangles_2(self, wl, ox, oy):
vc = len(wl) * 6
vertices_gl = (
GLfloat * vc
)(*self.create_triangles(wl, ox, oy))
glEnableClientState(GL_VERTEX_ARRAY)
glVertexPointer(2, GL_FLOAT, 0, vertices_gl)
glColor3f(1, 1, 1)
glDrawArrays(GL_TRIANGLES, 0, vc // 2)
# @profile
def process(self, world, components):
light = world.cfg["lighting_enabled"]
ox, oy = world.window.offset_x, world.window.offset_y
glClearColor(0.2, 0.2, 0.2, 1)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glLoadIdentity()
glLoadIdentity()
if light:
wl = world.viewlines
glEnable(GL_STENCIL_TEST)
glColorMask(GL_FALSE, GL_FALSE, GL_FALSE, GL_FALSE)
glDepthMask(GL_FALSE)
glStencilFunc(GL_ALWAYS, 1, 0xFF)
glStencilOp(GL_KEEP, GL_KEEP, GL_REPLACE)
glStencilMask(0xFF)
glClear(GL_STENCIL_BUFFER_BIT)
# print(len(wl) * 6, len(vertices))
# vertices = self.create_triangles(wl, ox, oy)
# vertices_gl = (GLfloat * len(vertices))(*vertices)
if wl:
# self.draw_triangles_2(wl, ox, oy)
self.draw_triangles(wl, ox, oy)
glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE)
glDepthMask(GL_TRUE)
glStencilMask(0x00)
glEnable(GL_BLEND)
glBlendFunc(
GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA
)
if light:
glDisable(GL_STENCIL_TEST)
# shader.bind()
for s in components:
if s.batchless:
s.sprite.draw()
for k, v in world.batches.items():
if k == "enemies":
if light:
glEnable(GL_STENCIL_TEST)
glStencilFunc(GL_EQUAL, 1, 0xFF)
v.draw()
if light:
glDisable(GL_STENCIL_TEST)
else:
v.draw()
# glUseProgram(0)
# self.draw_triangles()
if light:
glEnable(GL_STENCIL_TEST)
glStencilFunc(GL_EQUAL, 0, 0xFF)
glColor4f(0.15, 0.10, 0.05, 0.8)
x1, x2, y1, y2 = world.view_area
pyglet.graphics.draw(
4, GL_QUADS,
('v2f', [
x1, y1, x2, y1, x2, y2, x1, y2
])
)
glDisable(GL_STENCIL_TEST)
if world.cfg["show_rays"]:
glColor4f(1, 1, 1, 0.4)
iox, ioy = int(ox), int(oy)
for l in wl:
x1, y1, x2, y2 = l
pyglet.graphics.draw(
2, GL_LINES,
('v2i', (
int(x1) + iox,
int(y1) + ioy,
int(x2) + iox,
int(y2) + ioy
))
)
glDisable(GL_BLEND)
class MobNamingSystem(System):
def __init__(self, world):
self.is_applicator = True
self.componenttypes = (CharName, IsMob)
def process(self, world, sets):
for n, *rest in sets:
pass
# if not n.name:
# n.name = "Enemy"
class SpritePosSystem(Applicator):
def __init__(self, world):
self.is_applicator = True
self.componenttypes = (Sprite, WindowPosition)
def process(self, world, sets):
for s, pos in sets:
# print("sprite: ", s.sprite.x, s.sprite.y)
if not (s.sprite.x, s.sprite.y) == (pos.x, pos.y):
s.sprite.x = pos.x
s.sprite.y = pos.y
class GlowPosSystem(Applicator):
def __init__(self, world):
self.is_applicator = True
self.componenttypes = (GlowEffect, Sprite)
def process(self, world, sets):
for g, s in sets:
# print("sprite: ", s.sprite.x, s.sprite.y)
if not (g.sprite.x, g.sprite.y) == (s.sprite.x, s.sprite.y):
g.sprite.x = s.sprite.x
g.sprite.y = s.sprite.y
g.sprite.image.anchor_x = s.sprite.image.anchor_x
g.sprite.image.anchor_y = s.sprite.image.anchor_y
class HideSpriteSystem(Applicator):
def __init__(self, world):
self.is_applicator = True
self.componenttypes = (Sprite, WindowPosition)
def process(self, world, sets):
w_min, w_max, h_min, h_max = world.view_area
for s, wp in sets:
if (
wp.x + s.sprite.width < w_min or
wp.y + s.sprite.height < h_min or
wp.x - s.sprite.width > w_max or
wp.y - s.sprite.height > h_max
):
if s.sprite.visible:
# print("Hidden")
s.sprite.visible = False
else:
if not s.sprite.visible:
# print("Shown")
s.sprite.visible = True
class StaticSpritePosSystem(Applicator):
def __init__(self, world):
self.is_applicator = True
self.componenttypes = (Position, StaticPosition, Sprite)
def process(self, world, sets):
for pos, s_pos, sprite in sets:
sprite.sprite.x = s_pos.x
sprite.sprite.y = s_pos.y
world.window.offset_x += (pos.old_x - pos.x)
world.window.offset_y += (pos.old_y - pos.y)
class StaticGlowEffectPosSystem(Applicator):
def __init__(self, world):
self.is_applicator = True
self.componenttypes = (GlowEffect, Sprite, StaticPosition)
def process(self, world, sets):
for g, s, *rest in sets:
g.sprite.x = s.sprite.x
g.sprite.y = s.sprite.y
g.sprite.image.anchor_x = s.sprite.image.anchor_x
g.sprite.image.anchor_y = s.sprite.image.anchor_y
class PulseAnimationSystem(Applicator):
def __init__(self, world):
self.is_applicator = True
self.componenttypes = (PulseAnimation,)
def process(self, world, sets):
dt = world.dt
for anim, *rest in sets:
if anim.cur_time < anim.max_time and not anim.settle:
value = smooth_in_out(
anim.cur_time / anim.max_time * 1
)
anim.owner.sprite.opacity = value * (anim.max_opacity * 255)
anim.owner.sprite.scale = anim.scale_min + (
value * (anim.scale_max - anim.scale_min)
)
anim.cur_time += dt
else:
anim.cur_time = 0
class HeadBobbingSystem(Applicator):
def __init__(self, world):
self.is_applicator = True
self.componenttypes = (HeadBobbing, Sprite, PhysBody)
def process(self, world, sets):
dt = world.dt
for hb, s, pb in sets:
if pb.body:
v = abs(pb.body.velocity.x) + abs(pb.body.velocity.y)
else:
v = 0
if v >= 30:
hb.settle = False
else:
hb.settle = True
if hb.cur_time < hb.max_time and not hb.settle:
hb.offset_y = hb.max_offset * smooth_in_out(
hb.cur_time / hb.max_time * 1
)
hb.cur_time += dt
else:
if hb.settle:
if hb.offset_y > 0:
hb.offset_y -= hb.max_offset * dt * 2
else:
hb.offset_y = 0
else:
hb.cur_time = 0
s.sprite.image.anchor_y = 16 - hb.offset_y
class SpriteBatchSystem(Applicator):
def __init__(self, world):
self.is_applicator = True
self.componenttypes = (Sprite, Batch)
def process(self, world, sets):
for s, b in sets:
if not s.batchless:
if not s.sprite.batch:
try:
s.sprite.batch = world.batches[b.batch]
except KeyError:
print(
"No such batch defined: {0}, creating.".format(
b.batch
)
)
world.batches[b.batch] = pyglet.graphics.Batch()
s.sprite.batch = world.batches[b.batch]
s.sprite.group = pyglet.graphics.OrderedGroup(b.group)
class GlowBatchSystem(Applicator):
def __init__(self, world):
self.is_applicator = True
self.componenttypes = (GlowEffect, Batch)
def process(self, world, sets):
for s, b in sets:
if not s.batchless:
if not s.sprite.batch:
try:
s.sprite.batch = world.batches[b.batch]
except KeyError:
print(
"No such batch defined: {0}, creating.".format(
b.batch
)
)
world.batches[b.batch] = pyglet.graphics.Batch()
s.sprite.batch = world.batches[b.batch]
# class ApplyGlowEffectSystem(Applicator):
# def __init__(self, world):
# self.is_applicator = True
# self.componenttypes = (GlowEffect,)
# def process(self, world, sets):
# for g, *rest in sets:
# if not g.sprite.opacity == g.opacity:
# g.sprite.opacity = g.opacity
# if not g.sprite.color == g.color:
# g.sprite.color = g.color
# if not g.sprite.scale == g.scale:
# g.sprite.scale = g.scale
class PhysicsSystem(System):
def __init__(self, world):
self.is_applicator = True
self.componenttypes = (PhysBody, Position)
self.interval = 0.1
self.interval_counter = 0
def process(self, world, sets):
for x in range(10):
world.phys_space.step(world.dt / 5)
for b, p in sets:
if not b.body:
b.body, shape, static = self.create_body(b, p)
if static:
world.phys_space.add(shape)
else:
world.phys_space.add(b.body, shape)
else:
p.set(*b.body.position)
if abs(b.body.velocity.x) + abs(b.body.velocity.y) <= 0.1:
b.body.velocity.x = 0
b.body.velocity.y = 0
# Checks if there are any ghost bodies in the physics engine
if self.interval_counter >= self.interval:
self.interval_counter = 0
if not (
len(world.get_components(PhysBody)) ==
len(world.phys_space.bodies)
):
body_checklist = [b.body for b in world.get_components(PhysBody)]
self.cleanup_bodies(world, body_checklist)
else:
self.interval_counter += world.dt
def create_body(self, b, p):
if b.shape == "circle":
static = False
inertia = moment_for_circle(b.mass, 0, b.width / 2, (0, 0))
body = pymunk_body(b.mass, inertia)
body.position = (p.x + 0.001, p.y + 0.001)
shape = pymunk_circle(body, b.width / 2, (0, 0))
shape.elasticity = 0.2
shape.group = 0
elif b.shape == "square":
static = True
body = pymunk_body()
w, h = b.width, b.height
body.position = p.x, p.y
box_points = [(0, 0), (0, h), (w, h), (w, 0)]
shape = pymunk_poly(body, box_points, (0, 0))
else:
raise PhysicsError("No method to handle {0}".format(b.shape))
return body, shape, static
def cleanup_bodies(self, world, checklist):
for body in world.phys_space.bodies:
if body not in checklist:
for s in body.shapes:
world.phys_space.remove(s)
world.phys_space.remove(body)
print("Removed a physical body.")
class WindowPosSystem(System):
def __init__(self, world):
self.is_applicator = True
self.componenttypes = (Position, WindowPosition)
def process(self, world, sets):
for pos, wpos in sets:
wpos.x = pos.x + world.window.offset_x
wpos.y = pos.y + world.window.offset_y
class InputMovementSystem(System):
def __init__(self, world):
self.is_applicator = True
self.componenttypes = (Input, PhysBody, Movement)
def process(self, world, sets):
k = world.input_keys
for i, b, m in sets:
if b.body:
if (
abs(b.body.velocity.x) +
abs(b.body.velocity.y) <
m.max_speed
):
acc = m.acceleration * m.max_speed * world.dt
if k[key.W]:
b.body.velocity.y += acc
if k[key.S]:
b.body.velocity.y -= acc
if k[key.A]:
b.body.velocity.x -= acc
if k[key.D]:
b.body.velocity.x += acc
class LevelUpSystem(Applicator):
def __init__(self, world):
self.is_applicator = True
self.componenttypes = (XP, Level)
def process(self, world, sets):
for xp, lvl in sets:
if xp.count >= lvl.lvlup_xp:
rest = xp.count - lvl.lvlup_xp
lvl.lvl += 1
lvl.lvlup_xp = int(lvl.lvlup_xp * 1.25)
xp.count = rest
class ApplyTargetEffectsSystem(Applicator):
def __init__(self, world):
self.is_applicator = True
self.componenttypes = (TargetEffects, EffectTarget)
def process(self, world, sets):
pass
class InitializeEffectiveStatsSystem(Applicator):
def __init__(self, world):
self.is_applicator = True
self.componenttypes = (BaseStats, EffectiveStats)
def process(self, world, sets):
for bs, es in sets:
if not es.initialized:
es.type = bs.type.copy()
es.initialized = True
class ApplyAttributeStatsSystem(System):
def __init__(self, world):
self.is_applicator = True
self.componenttypes = (BaseStats, Attributes, EffectiveStats)
def process(self, world, sets):
for bs, a, es in sets:
if a.updated:
# Strength modifiers
es.type["hp_max"] = bs.type["hp_max"] + a.points["str"] * 5
es.type["dmg"] = bs.type["dmg"] + a.points["str"] * 2
# Agility modifiers
es.type["sta_max"] = bs.type["sta_max"] + a.points["agi"] * 5
es.type["armor"] = bs.type["armor"] + a.points["agi"] * 0.5
es.type["aspd"] = bs.type["aspd"] + a.points["agi"] * 0.5
es.type["crit_chance"] = (
bs.type["crit_chance"] + a.points["agi"] * 0.2
)
# Intelligence modifiers
es.type["mana_max"] = bs.type["mana_max"] + a.points["int"] * 5
es.type["sp"] = bs.type["sp"] + a.points["int"] * 2
es.type["sp_crit_chance"] = (
bs.type["sp_crit_chance"] + a.points["int"] * 0.2
)
a.updated = False
class ApplyHPSystem(System):
def __init__(self, world):
self.is_applicator = True
self.componenttypes = (EffectiveStats, HP)
def process(self, world, sets):
for es, hp in sets:
hp.max = es.type["hp_max"]
class ApplyStaminaSystem(System):
def __init__(self, world):
self.is_applicator = True
self.componenttypes = (EffectiveStats, Stamina)
def process(self, world, sets):
for es, sta in sets:
sta.max = es.type["sta_max"]
class ApplyManaSystem(System):
def __init__(self, world):
self.is_applicator = True
self.componenttypes = (EffectiveStats, Mana)
def process(self, world, sets):
for es, mana in sets:
mana.max = es.type["mana_max"]
class ApplyBasicAttackSystem(System):
def __init__(self, world):
self.is_applicator = True
self.componenttypes = (EffectiveStats, BasicAttack)
def process(self, world, sets):
for es, ba in sets:
ba.dmg = es.type["dmg"]
ba.spd = es.type["aspd"]
class CheckDeadSystem(System):
def __init__(self, world):
self.is_applicator = True
self.componenttypes = (HP,)
def process(self, world, sets):
for hp, *rest in sets:
if hp.value <= 0:
p = world.get_entities(hp)
print("Killed! {0}".format(p))
world.delete_entities(p)
class CheckAttackTargetSystem(System):
def __init__(self, world):
self.is_applicator = True
self.componenttypes = (AttackTarget,)
def process(self, world, sets):
for at, *rest in sets:
if at.who not in world.entities:
o = world.get_entities(at)[0]
delattr(o, "attacktarget")
class CheckAutoAttackTargetSystem(System):
def __init__(self, world):
self.is_applicator = True
self.componenttypes = (AutoAttackTarget,)
def process(self, world, sets):
for at, *rest in sets:
if at.who not in world.entities:
o = world.get_entities(at)[0]
delattr(o, "autoattacktarget")
class CheckFollowTargetSystem(System):
def __init__(self, world):
self.is_applicator = True
self.componenttypes = (FollowTarget,)
def process(self, world, sets):
for ft, *rest in sets:
if ft.who not in world.entities:
o = world.get_entities(ft)[0]
delattr(o, "followtarget")
class AutoAttackInRangeSystem(System):
def __init__(self, world):
self.is_applicator = True
self.componenttypes = (AttackTarget,)
def process(self, world, sets):
for at, *rest in sets:
e = world.get_entities(at)[0]
dist = get_dist(
e.position.x, e.position.y,
at.who.position.x, at.who.position.y
)
if getattr(e, "autoattacktarget"):
if dist > 32:
delattr(e, "autoattacktarget")
elif dist <= 32:
e.autoattacktarget = AutoAttackTarget(target=at.who)
class SearchTargetSystem(System):
def __init__(self, world):
self.is_applicator = True
self.componenttypes = (SearchingTarget, Allegiance)
self.interval = 0.25
self.interval_counter = 0
def process(self, world, sets):
if self.interval_counter >= self.interval:
# print("Now!")
self.interval_counter = 0
for st, a in sets:
o = world.get_entities(st)[0]
pos = getattr(o, "physbody")
if pos:
if pos.body:
p1 = pos.body.position.x, pos.body.position.y
for tpos, ta in world.combined_components(
(PhysBody, Allegiance)
):
if not ta.value == a.value:
if tpos.body:
p2 = (
tpos.body.position.x,
tpos.body.position.y
)
if get_dist(
*p1, *p2
) <= 150:
for s in pos.body.shapes:
pos_old = s.group
s.group = 2
for s in tpos.body.shapes:
tpos_old = s.group
s.group = 2
ps = world.phys_space
c = ps.segment_query_first(
p1, p2, group=2
)
for s in pos.body.shapes:
s.group = pos_old
for s in tpos.body.shapes:
s.group = tpos_old
# print(c)
if not c:
t = world.get_entities(tpos)[0]
o.attacktarget = AttackTarget(t)
delattr(o, "searchingtarget")
print("Found target!")
else:
self.interval_counter += world.dt
class AutoAttackSystem(System):
def __init__(self, world):
self.is_applicator = True
self.componenttypes = (BasicAttack, AutoAttackTarget)
def process(self, world, sets):
for ba, aat in sets:
if ba.cd <= 0:
if aat.who:
if hasattr(aat.who, "hp"):
aat.who.hp.value -= ba.dmg
print(aat.who.hp.value)
ba.cd = 3 - 3 / 100 * ba.spd
else:
ba.cd -= world.dt
class ApplyMovementSpeedSystem(System):
def __init__(self, world):
self.is_applicator = True
self.componenttypes = (EffectiveStats, Movement)
def process(self, world, sets):
for es, m in sets:
m.max_speed = es.type["ms"]
m.acceleration = m.max_speed / 15
class FollowSystem(System):
def __init__(self, world):
self.is_applicator = True
self.componenttypes = (FollowTarget, Movement, Position, PhysBody)
def process(self, world, sets):
for ft, m, p, pb in sets:
if ft.who and pb.body:
if (
get_dist(
ft.who.position.x, ft.who.position.y,
p.x, p.y
) > ft.range
):
velx = ft.who.position.x - p.x
vely = ft.who.position.y - p.y
pb.body.velocity.x += velx / 10
pb.body.velocity.y += vely / 10
class TargetMobSystem(System):
def __init__(self, world):
self.is_applicator = True
self.componenttypes = (IsPlayer, MouseClicked)
def process(self, world, sets):
# p = world.get_player()
enemies = world.combined_components((IsMob, Position))
# print(enemies)
for player, mc in sets:
for e, pos in enemies:
if (
get_dist(
mc.x, mc.y,
pos.x, pos.y
) <= 32
):
if mc.button == 1:
print("Targeted.")
elif mc.button == 4:
print("Attack!")
mc.handled = True
class CleanupClickSystem(System):
"""Removes mouse clicked objects for all entities."""
def __init__(self, world):
self.is_applicator = True
self.componenttypes = (MouseClicked,)
def process(self, world, sets):
for mc, *rest in sets:
p = world.get_entities(mc)[0]
delattr(p, "mouseclicked")
print("Removed")
class LightingSystem(System):
def __init__(self, world):
self.is_applicator = True
self.componenttypes = (LightSource, PhysBody)
self.skipframes = 1
self.counter = 0
self.old_pos = (0, 0)
# @profile
def create_midpoints(self, shapes):
l = []
for sd in shapes:
s = sd["shape"]
if not isinstance(s, pymunk_circle):
if not s.group == 2:
l += [
(s.bb.left, s.bb.bottom),
(s.bb.left, s.bb.top),
(s.bb.right, s.bb.top),
(s.bb.right, s.bb.bottom)
]
else:
s.group = 2
return l
def move_in_angle(self, a, p, d):
return p[0] + d*math.cos(a), p[1] + d*math.sin(a)
def cast_ray(self, phys_space, origin, targets, single=False):
collisions = []
view_dist = 500
a_offset = 0.00001
col_group = 2
for target in targets:
a = -get_angle(*origin, *target)
c1p = self.move_in_angle(a, origin, view_dist)
c1 = phys_space.segment_query_first(
origin, c1p, group=col_group
)
if c1:
hp = c1.get_hit_point()
collisions.append((hp.x, hp.y, a))
else:
collisions.append((c1p[0], c1p[1], a))
if not single:
a2 = a - a_offset
c2p = self.move_in_angle(a2, origin, view_dist)
# print(pos, mp, c2p)
c2 = phys_space.segment_query_first(
origin, c2p, group=col_group
)
a3 = a + a_offset
c3p = self.move_in_angle(a3, origin, view_dist)
c3 = phys_space.segment_query_first(
origin, c3p, group=col_group
)
if c2:
hp = c2.get_hit_point()
collisions.append((hp.x, hp.y, a2))
else:
collisions.append((*c2p, a2))
if c3:
hp = c3.get_hit_point()
collisions.append((hp.x, hp.y, a3))
else:
collisions.append((*c3p, a3))
return collisions
def process(self, world, sets):
if self.counter < self.skipframes:
self.counter += 1
else:
self.counter = 0
dist = 600
# world.viewlines = []
midpoints = []
collisions = []
if world.cfg["lighting_enabled"]:
for ls, pb in sets:
pos = (pb.body.position.x, pb.body.position.y)
if pos == self.old_pos:
# print("OLD")
continue
else:
world.viewlines = []
self.old_pos = pos
midpoints = self.create_midpoints(
world.phys_space.nearest_point_query(
pos, dist, group=2
)
)
collisions += self.cast_ray(
world.phys_space, pos, midpoints
)
for s in pb.body.shapes:
s.group = 0
if not world.viewlines:
collisions.sort(key=lambda x: x[2], reverse=True)
world.viewlines = [(*pos, c[0], c[1]) for c in collisions]
class AIBehaviorSystem(System):
def __init__(self, world):
self.is_applicator = True
self.componenttypes = (AIBehavior,)
def process(self, world, sets):
for aib, *rest in sets:
aib.update()
class Ray:
def __init__(self, p1, p2):
self.p1, self.p2 = p1, p2
self.angle = get_angle(*p1, *p2)
class BaseSystem(System):
def __init__(self, world):
self.is_applicator = True
self.componenttypes = ()
def process(self, world, sets):
pass
| |
# Copyright 2014 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module: security_monkey.datastore
:platform: Unix
:synopsis: Contains the SQLAlchemy models and a few helper methods.
.. version:: $$VERSION$$
.. moduleauthor:: Patrick Kelley <pkelley@netflix.com> @monkeysecurity
"""
from security_monkey import db, app
from sqlalchemy.dialects.postgresql import JSON
from sqlalchemy import Column, Integer, String, DateTime, Boolean, Unicode
from sqlalchemy.schema import ForeignKey
from sqlalchemy.orm import relationship
from flask.ext.security import UserMixin, RoleMixin
import datetime
association_table = db.Table('association',
Column('user_id', Integer, ForeignKey('user.id')),
Column('account_id', Integer, ForeignKey('account.id'))
)
class Account(db.Model):
"""
Meant to model AWS accounts.
"""
__tablename__ = "account"
id = Column(Integer, primary_key=True)
active = Column(Boolean())
third_party = Column(Boolean())
name = Column(String(32))
notes = Column(String(256))
s3_name = Column(String(32))
number = Column(String(12)) # Not stored as INT because of potential leading-zeros.
items = relationship("Item", backref="account")
class Technology(db.Model):
"""
meant to model AWS primatives (elb, s3, iamuser, iamgroup, etc.)
"""
__tablename__ = 'technology'
id = Column(Integer, primary_key=True)
name = Column(String(32)) # elb, s3, iamuser, iamgroup, etc.
items = relationship("Item", backref="technology")
roles_users = db.Table('roles_users',
db.Column('user_id', db.Integer(), db.ForeignKey('user.id')),
db.Column('role_id', db.Integer(), db.ForeignKey('role.id')))
class Role(db.Model, RoleMixin):
"""
Currently unused. Will soon have roles for limited users and
admin users.
"""
id = db.Column(db.Integer(), primary_key=True)
name = db.Column(db.String(80), unique=True)
description = db.Column(db.String(255))
class User(db.Model, UserMixin):
"""
Used by Flask-Security and Flask-Login.
Represents a user of Security Monkey.
"""
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(255), unique=True)
password = db.Column(db.String(255))
active = db.Column(db.Boolean())
confirmed_at = db.Column(db.DateTime())
daily_audit_email = Column(Boolean())
change_reports = Column(String(32)) # All, OnlyWithIssues, None
accounts = relationship("Account", secondary=association_table)
item_audits = relationship("ItemAudit", uselist=False, backref="user")
revision_comments = relationship("ItemRevisionComment", backref="user")
item_comments = relationship("ItemComment", backref="user")
roles = db.relationship('Role', secondary=roles_users,
backref=db.backref('users', lazy='dynamic'))
def __str__(self):
return '<User id=%s email=%s>' % (self.id, self.email)
class ItemAudit(db.Model):
"""
Meant to model an issue attached to a single item.
"""
__tablename__ = "itemaudit"
id = Column(Integer, primary_key=True)
score = Column(Integer)
issue = Column(String(512))
notes = Column(String(512))
justified = Column(Boolean)
justified_user_id = Column(Integer, ForeignKey("user.id"), nullable=True)
justification = Column(String(512))
justified_date = Column(DateTime(), default=datetime.datetime.utcnow, nullable=True)
item_id = Column(Integer, ForeignKey("item.id"), nullable=False)
class Item(db.Model):
"""
Meant to model a specific item, like an instance of a security group.
"""
__tablename__ = "item"
id = Column(Integer, primary_key=True)
cloud = Column(String(32)) # AWS, Google, Other
region = Column(String(32))
name = Column(String(128))
tech_id = Column(Integer, ForeignKey("technology.id"), nullable=False)
account_id = Column(Integer, ForeignKey("account.id"), nullable=False)
revisions = relationship("ItemRevision", backref="item", cascade="all, delete, delete-orphan", order_by="desc(ItemRevision.date_created)")
issues = relationship("ItemAudit", backref="item", cascade="all, delete, delete-orphan")
latest_revision_id = Column(Integer, nullable=True)
comments = relationship("ItemComment", backref="revision", cascade="all, delete, delete-orphan", order_by="ItemComment.date_created")
class ItemComment(db.Model):
"""
The Web UI allows users to add comments to items.
"""
__tablename__ = "itemcomment"
id = Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey('user.id'), nullable=False)
item_id = Column(Integer, ForeignKey('item.id'), nullable=False)
date_created = Column(DateTime(), default=datetime.datetime.utcnow, nullable=False)
text = Column(Unicode(1024))
class ItemRevision(db.Model):
"""
Every new configuration for an item is saved in a new ItemRevision.
"""
__tablename__ = "itemrevision"
id = Column(Integer, primary_key=True)
active = Column(Boolean())
config = Column(JSON)
date_created = Column(DateTime(), default=datetime.datetime.utcnow, nullable=False)
item_id = Column(Integer, ForeignKey("item.id"), nullable=False)
comments = relationship("ItemRevisionComment", backref="revision", cascade="all, delete, delete-orphan", order_by="ItemRevisionComment.date_created")
class ItemRevisionComment(db.Model):
"""
The Web UI allows users to add comments to revisions.
"""
__tablename__ = "itemrevisioncomment"
id = Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey('user.id'), nullable=False)
revision_id = Column(Integer, ForeignKey('itemrevision.id'), nullable=False)
date_created = Column(DateTime(), default=datetime.datetime.utcnow, nullable=False)
text = Column(Unicode(1024))
class Datastore(object):
def __init__(self, debug=False):
pass
def get_all_ctype_filtered(self, tech=None, account=None, region=None, name=None, include_inactive=False):
"""
Returns a list of Items joined with their most recent ItemRevision,
potentially filtered by the criteria above.
"""
item_map = {}
query = Item.query
if tech:
query = query.join((Technology, Item.tech_id == Technology.id)).filter(Technology.name == tech)
if account:
query = query.join((Account, Item.account_id == Account.id)).filter(Account.name == account)
filter_by = {'region': region, 'name': name}
for k, v in filter_by.items():
if not v:
del filter_by[k]
query = query.filter_by(**filter_by)
attempt = 1
while True:
try:
items = query.all()
break
except Exception as e:
app.logger.warn("Database Exception in Datastore::get_all_ctype_filtered. Sleeping for a few seconds. Attempt {}.".format(attempt))
app.logger.debug("Exception: {}".format(e))
import time
time.sleep(5)
attempt = attempt + 1
for item in items:
if len(item.revisions) == 0:
app.logger.debug("There are no itemrevisions for this item: {}".format(item.id))
continue
most_recent = item.revisions[0]
if not most_recent.active and not include_inactive:
continue
item_map[item] = most_recent
return item_map
def get(self, ctype, region, account, name):
"""
Returns a list of all revisions for the given item.
"""
item = self._get_item(ctype, region, account, name)
return item.revisions
def get_audit_issues(self, ctype, region, account, name):
"""
Returns a list of ItemAudit objects associated with a given Item.
"""
item = self._get_item(ctype, region, account, name)
return item.issues
def store(self, ctype, region, account, name, active_flag, config, new_issues=[]):
"""
Saves an itemrevision. Create the item if it does not already exist.
"""
item = self._get_item(ctype, region, account, name)
item_revision = ItemRevision(active=active_flag, config=config)
item.revisions.append(item_revision)
# Add new issues
for new_issue in new_issues:
nk = "{}/{}".format(new_issue.issue, new_issue.notes)
if nk not in ["{}/{}".format(old_issue.issue, old_issue.notes) for old_issue in item.issues]:
item.issues.append(new_issue)
db.session.add(new_issue)
# Delete old issues
for old_issue in item.issues:
ok = "{}/{}".format(old_issue.issue, old_issue.notes)
if ok not in ["{}/{}".format(new_issue.issue, new_issue.notes) for new_issue in new_issues]:
db.session.delete(old_issue)
db.session.add(item)
db.session.add(item_revision)
db.session.commit()
self._set_latest_revision(item)
def _set_latest_revision(self, item):
sorted_revisions = sorted(item.revisions, key=lambda revision: revision.date_created)
latest_revision = sorted_revisions[-1]
item.latest_revision_id = latest_revision.id
db.session.add(item)
db.session.commit()
#db.session.close()
def _get_item(self, technology, region, account, name):
"""
Returns the first item with matching parameters.
Creates item if it doesn't exist.
"""
account_result = Account.query.filter(Account.name == account).first()
if not account_result:
raise Exception("Account with name [{}] not found.".format(account))
item = Item.query.join((Technology, Item.tech_id == Technology.id)) \
.join((Account, Item.account_id == Account.id)) \
.filter(Technology.name == technology) \
.filter(Account.name == account) \
.filter(Item.region == region) \
.filter(Item.name == name) \
.all()
if len(item) > 1:
# DB needs to be cleaned up and a bug needs to be found if this ever happens.
raise Exception("Found multiple items for tech: {} region: {} account: {} and name: {}"
.format(technology, region, account, name))
if len(item) == 1:
item = item[0]
else:
item = None
if not item:
technology_result = Technology.query.filter(Technology.name == technology).first()
if not technology_result:
technology_result = Technology(name=technology)
db.session.add(technology_result)
db.session.commit()
#db.session.close()
app.logger.info("Creating a new Technology: {} - ID: {}"
.format(technology, technology_result.id))
item = Item(tech_id=technology_result.id, region=region, account_id=account_result.id, name=name)
return item
| |
from __future__ import annotations
from inspect import getmembers, isclass, isdatadescriptor
from os import environ
from pathlib import Path
from typing import Any, Callable, Dict, Optional, Sequence, Union
from sanic.errorpages import DEFAULT_FORMAT, check_error_format
from sanic.helpers import Default, _default
from sanic.http import Http
from sanic.log import deprecation, error_logger
from sanic.utils import load_module_from_file_location, str_to_bool
SANIC_PREFIX = "SANIC_"
DEFAULT_CONFIG = {
"_FALLBACK_ERROR_FORMAT": _default,
"ACCESS_LOG": True,
"AUTO_EXTEND": True,
"AUTO_RELOAD": False,
"EVENT_AUTOREGISTER": False,
"FORWARDED_FOR_HEADER": "X-Forwarded-For",
"FORWARDED_SECRET": None,
"GRACEFUL_SHUTDOWN_TIMEOUT": 15.0, # 15 sec
"KEEP_ALIVE_TIMEOUT": 5, # 5 seconds
"KEEP_ALIVE": True,
"MOTD": True,
"MOTD_DISPLAY": {},
"NOISY_EXCEPTIONS": False,
"PROXIES_COUNT": None,
"REAL_IP_HEADER": None,
"REGISTER": True,
"REQUEST_BUFFER_SIZE": 65536, # 64 KiB
"REQUEST_MAX_HEADER_SIZE": 8192, # 8 KiB, but cannot exceed 16384
"REQUEST_ID_HEADER": "X-Request-ID",
"REQUEST_MAX_SIZE": 100000000, # 100 megabytes
"REQUEST_TIMEOUT": 60, # 60 seconds
"RESPONSE_TIMEOUT": 60, # 60 seconds
"USE_UVLOOP": _default,
"WEBSOCKET_MAX_SIZE": 2 ** 20, # 1 megabyte
"WEBSOCKET_PING_INTERVAL": 20,
"WEBSOCKET_PING_TIMEOUT": 20,
}
# These values will be removed from the Config object in v22.6 and moved
# to the application state
DEPRECATED_CONFIG = ("SERVER_RUNNING", "RELOADER_PROCESS", "RELOADED_FILES")
class DescriptorMeta(type):
def __init__(cls, *_):
cls.__setters__ = {name for name, _ in getmembers(cls, cls._is_setter)}
@staticmethod
def _is_setter(member: object):
return isdatadescriptor(member) and hasattr(member, "setter")
class Config(dict, metaclass=DescriptorMeta):
ACCESS_LOG: bool
AUTO_EXTEND: bool
AUTO_RELOAD: bool
EVENT_AUTOREGISTER: bool
FORWARDED_FOR_HEADER: str
FORWARDED_SECRET: Optional[str]
GRACEFUL_SHUTDOWN_TIMEOUT: float
KEEP_ALIVE_TIMEOUT: int
KEEP_ALIVE: bool
NOISY_EXCEPTIONS: bool
MOTD: bool
MOTD_DISPLAY: Dict[str, str]
PROXIES_COUNT: Optional[int]
REAL_IP_HEADER: Optional[str]
REGISTER: bool
REQUEST_BUFFER_SIZE: int
REQUEST_MAX_HEADER_SIZE: int
REQUEST_ID_HEADER: str
REQUEST_MAX_SIZE: int
REQUEST_TIMEOUT: int
RESPONSE_TIMEOUT: int
SERVER_NAME: str
USE_UVLOOP: Union[Default, bool]
WEBSOCKET_MAX_SIZE: int
WEBSOCKET_PING_INTERVAL: int
WEBSOCKET_PING_TIMEOUT: int
def __init__(
self,
defaults: Dict[str, Union[str, bool, int, float, None]] = None,
env_prefix: Optional[str] = SANIC_PREFIX,
keep_alive: Optional[bool] = None,
*,
converters: Optional[Sequence[Callable[[str], Any]]] = None,
):
defaults = defaults or {}
super().__init__({**DEFAULT_CONFIG, **defaults})
self._converters = [str, str_to_bool, float, int]
self._LOGO = ""
if converters:
for converter in converters:
self.register_type(converter)
if keep_alive is not None:
self.KEEP_ALIVE = keep_alive
if env_prefix != SANIC_PREFIX:
if env_prefix:
self.load_environment_vars(env_prefix)
else:
self.load_environment_vars(SANIC_PREFIX)
self._configure_header_size()
self._check_error_format()
self._init = True
def __getattr__(self, attr):
try:
return self[attr]
except KeyError as ke:
raise AttributeError(f"Config has no '{ke.args[0]}'")
def __setattr__(self, attr, value) -> None:
if attr in self.__class__.__setters__:
try:
super().__setattr__(attr, value)
except AttributeError:
...
else:
return None
self.update({attr: value})
def __setitem__(self, attr, value) -> None:
self.update({attr: value})
def update(self, *other, **kwargs) -> None:
other_mapping = {k: v for item in other for k, v in dict(item).items()}
super().update(*other, **kwargs)
for attr, value in {**other_mapping, **kwargs}.items():
self._post_set(attr, value)
def _post_set(self, attr, value) -> None:
if self.get("_init"):
if attr in (
"REQUEST_MAX_HEADER_SIZE",
"REQUEST_BUFFER_SIZE",
"REQUEST_MAX_SIZE",
):
self._configure_header_size()
elif attr == "LOGO":
self._LOGO = value
deprecation(
"Setting the config.LOGO is deprecated and will no longer "
"be supported starting in v22.6.",
22.6,
)
@property
def LOGO(self):
return self._LOGO
@property
def FALLBACK_ERROR_FORMAT(self) -> str:
if self._FALLBACK_ERROR_FORMAT is _default:
return DEFAULT_FORMAT
return self._FALLBACK_ERROR_FORMAT
@FALLBACK_ERROR_FORMAT.setter
def FALLBACK_ERROR_FORMAT(self, value):
self._check_error_format(value)
if (
self._FALLBACK_ERROR_FORMAT is not _default
and value != self._FALLBACK_ERROR_FORMAT
):
error_logger.warning(
"Setting config.FALLBACK_ERROR_FORMAT on an already "
"configured value may have unintended consequences."
)
self._FALLBACK_ERROR_FORMAT = value
def _configure_header_size(self):
Http.set_header_max_size(
self.REQUEST_MAX_HEADER_SIZE,
self.REQUEST_BUFFER_SIZE - 4096,
self.REQUEST_MAX_SIZE,
)
def _check_error_format(self, format: Optional[str] = None):
check_error_format(format or self.FALLBACK_ERROR_FORMAT)
def load_environment_vars(self, prefix=SANIC_PREFIX):
"""
Looks for prefixed environment variables and applies them to the
configuration if present. This is called automatically when Sanic
starts up to load environment variables into config.
It will automatically hydrate the following types:
- ``int``
- ``float``
- ``bool``
Anything else will be imported as a ``str``. If you would like to add
additional types to this list, you can use
:meth:`sanic.config.Config.register_type`. Just make sure that they
are registered before you instantiate your application.
.. code-block:: python
class Foo:
def __init__(self, name) -> None:
self.name = name
config = Config(converters=[Foo])
app = Sanic(__name__, config=config)
`See user guide re: config
<https://sanicframework.org/guide/deployment/configuration.html>`__
"""
lower_case_var_found = False
for key, value in environ.items():
if not key.startswith(prefix):
continue
if not key.isupper():
lower_case_var_found = True
_, config_key = key.split(prefix, 1)
for converter in reversed(self._converters):
try:
self[config_key] = converter(value)
break
except ValueError:
pass
if lower_case_var_found:
deprecation(
"Lowercase environment variables will not be "
"loaded into Sanic config beginning in v22.9.",
22.9,
)
def update_config(self, config: Union[bytes, str, dict, Any]):
"""
Update app.config.
.. note::
Only upper case settings are considered
You can upload app config by providing path to py file
holding settings.
.. code-block:: python
# /some/py/file
A = 1
B = 2
.. code-block:: python
config.update_config("${some}/py/file")
Yes you can put environment variable here, but they must be provided
in format: ``${some_env_var}``, and mark that ``$some_env_var`` is
treated as plain string.
You can upload app config by providing dict holding settings.
.. code-block:: python
d = {"A": 1, "B": 2}
config.update_config(d)
You can upload app config by providing any object holding settings,
but in such case config.__dict__ will be used as dict holding settings.
.. code-block:: python
class C:
A = 1
B = 2
config.update_config(C)
`See user guide re: config
<https://sanicframework.org/guide/deployment/configuration.html>`__
"""
if isinstance(config, (bytes, str, Path)):
config = load_module_from_file_location(location=config)
if not isinstance(config, dict):
cfg = {}
if not isclass(config):
cfg.update(
{
key: getattr(config, key)
for key in config.__class__.__dict__.keys()
}
)
config = dict(config.__dict__)
config.update(cfg)
config = dict(filter(lambda i: i[0].isupper(), config.items()))
self.update(config)
load = update_config
def register_type(self, converter: Callable[[str], Any]) -> None:
"""
Allows for adding custom function to cast from a string value to any
other type. The function should raise ValueError if it is not the
correct type.
"""
if converter in self._converters:
error_logger.warning(
f"Configuration value converter '{converter.__name__}' has "
"already been registered"
)
return
self._converters.append(converter)
| |
# -*- coding: utf-8 -*-
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Module that handles tee-ing output to a file."""
from __future__ import print_function
import errno
import fcntl
import os
import multiprocessing
import select
import signal
import sys
import traceback
from chromite.lib import cros_build_lib
from chromite.lib import cros_logging as logging
# Max amount of data we're hold in the buffer at a given time.
_BUFSIZE = 1024
# Custom signal handlers so we can catch the exception and handle it.
class ToldToDie(Exception):
"""Exception thrown via signal handlers."""
def __init__(self, signum):
Exception.__init__(self, 'We received signal %i' % (signum,))
def _TeeProcessSignalHandler(signum, _frame):
"""TeeProcess custom signal handler.
This is used to decide whether or not to kill our parent.
"""
raise ToldToDie(signum)
def _output(line, output_files, complain):
"""Print line to output_files.
Args:
line: Line to print.
output_files: List of files to print to.
complain: Print a warning if we get EAGAIN errors. Only one error
is printed per line.
"""
for f in output_files:
offset = 0
while offset < len(line):
select.select([], [f], [])
try:
offset += os.write(f.fileno(), line[offset:])
except OSError as ex:
if ex.errno == errno.EINTR:
continue
elif ex.errno != errno.EAGAIN:
raise
if offset < len(line) and complain:
flags = fcntl.fcntl(f.fileno(), fcntl.F_GETFL, 0)
if flags & os.O_NONBLOCK:
warning = '\nWarning: %s/%d is non-blocking.\n' % (f.name,
f.fileno())
_output(warning, output_files, False)
warning = '\nWarning: Short write for %s/%d.\n' % (f.name, f.fileno())
_output(warning, output_files, False)
def _tee(input_fd, output_files, complain):
"""Read data from |input_fd| and write to |output_files|."""
while True:
# We need to use os.read() directly because it will return to us when the
# other side has flushed its output (and is shorter than _BUFSIZE). If we
# use python's file object helpers (like read() and readline()), it will
# not return until either the full buffer is filled or a newline is hit.
data = os.read(input_fd, _BUFSIZE)
if not data:
return
_output(data, output_files, complain)
class _TeeProcess(multiprocessing.Process):
"""Replicate output to multiple file handles."""
def __init__(self, output_filenames, complain, error_fd,
master_pid):
"""Write to stdout and supplied filenames.
Args:
output_filenames: List of filenames to print to.
complain: Print a warning if we get EAGAIN errors.
error_fd: The fd to write exceptions/errors to during
shutdown.
master_pid: Pid to SIGTERM if we shutdown uncleanly.
"""
self._reader_pipe, self.writer_pipe = os.pipe()
self._output_filenames = output_filenames
self._complain = complain
# Dupe the fd on the offchance it's stdout/stderr,
# which we screw with.
# Not passing 3 argument (0) for unbuffered output because this is not
# supported in Python 3 and there are issues in Python 2 -- see
# https://bugs.python.org/issue17404.
self._error_handle = os.fdopen(os.dup(error_fd), 'w')
self.master_pid = master_pid
multiprocessing.Process.__init__(self)
def _CloseUnnecessaryFds(self):
# For python2 we were relying on subprocess.MAXFD but that does not exist
# in python3. However, the calculation below is how it was being computed.
try:
max_fd_value = os.sysconf('SC_OPEN_MAX')
except ValueError:
max_fd_value = 256
preserve = set([1, 2, self._error_handle.fileno(), self._reader_pipe,
max_fd_value])
preserve = iter(sorted(preserve))
fd = 0
while fd < max_fd_value:
current_low = next(preserve)
if fd != current_low:
os.closerange(fd, current_low)
fd = current_low
fd += 1
def run(self):
"""Main function for tee subprocess."""
failed = True
input_fd = None
try:
signal.signal(signal.SIGINT, _TeeProcessSignalHandler)
signal.signal(signal.SIGTERM, _TeeProcessSignalHandler)
# Cleanup every fd except for what we use.
self._CloseUnnecessaryFds()
# Read from the pipe.
input_fd = self._reader_pipe
# Create list of files to write to.
# Not passing 3 argument (0) for unbuffered output because this is not
# supported in Python 3 and there are issues in Python 2 -- see
# https://bugs.python.org/issue17404.
output_files = [os.fdopen(sys.stdout.fileno(), 'w')]
for filename in self._output_filenames:
output_files.append(open(filename, 'w'))
# Send all data from the one input to all the outputs.
_tee(input_fd, output_files, self._complain)
failed = False
except ToldToDie:
failed = False
except Exception as e:
tb = traceback.format_exc()
logging.PrintBuildbotStepFailure(self._error_handle)
self._error_handle.write(
'Unhandled exception occured in tee:\n%s\n' % (tb,))
# Try to signal the parent telling them of our
# imminent demise.
finally:
# Close input.
if input_fd:
os.close(input_fd)
if failed:
try:
os.kill(self.master_pid, signal.SIGTERM)
except Exception as e:
self._error_handle.write('\nTee failed signaling %s\n' % e)
# Finally, kill ourself.
# Specifically do it in a fashion that ensures no inherited
# cleanup code from our parent process is ran- leave that to
# the parent.
# pylint: disable=protected-access
os._exit(0)
class Tee(cros_build_lib.MasterPidContextManager):
"""Class that handles tee-ing output to a file."""
def __init__(self, output_file):
"""Initializes object with path to log file."""
cros_build_lib.MasterPidContextManager.__init__(self)
self._file = output_file
self._old_stdout = None
self._old_stderr = None
self._old_stdout_fd = None
self._old_stderr_fd = None
self._tee = None
def start(self):
"""Start tee-ing all stdout and stderr output to the file."""
# Flush and save old file descriptors.
sys.stdout.flush()
sys.stderr.flush()
self._old_stdout_fd = os.dup(sys.stdout.fileno())
self._old_stderr_fd = os.dup(sys.stderr.fileno())
# Save file objects
self._old_stdout = sys.stdout
self._old_stderr = sys.stderr
# Replace std[out|err] with unbuffered file objects
# Not passing 3 argument (0) for unbuffered output because this is not
# supported in Python 3 and there are issues in Python 2 -- see
# https://bugs.python.org/issue17404.
sys.stdout = os.fdopen(sys.stdout.fileno(), 'w')
sys.stderr = os.fdopen(sys.stderr.fileno(), 'w')
# Create a tee subprocess.
self._tee = _TeeProcess([self._file], True, self._old_stderr_fd,
os.getpid())
self._tee.start()
# Redirect stdout and stderr to the tee subprocess.
writer_pipe = self._tee.writer_pipe
os.dup2(writer_pipe, sys.stdout.fileno())
os.dup2(writer_pipe, sys.stderr.fileno())
os.close(writer_pipe)
def stop(self):
"""Restores old stdout and stderr handles and waits for tee proc to exit."""
# Close unbuffered std[out|err] file objects, as well as the tee's stdin.
sys.stdout.close()
sys.stderr.close()
# Restore file objects
sys.stdout = self._old_stdout
sys.stderr = self._old_stderr
# Restore old file descriptors.
os.dup2(self._old_stdout_fd, sys.stdout.fileno())
os.dup2(self._old_stderr_fd, sys.stderr.fileno())
os.close(self._old_stdout_fd)
os.close(self._old_stderr_fd)
self._tee.join()
def _enter(self):
self.start()
def _exit(self, exc_type, exc, exc_tb):
try:
self.stop()
finally:
if self._tee is not None:
self._tee.terminate()
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Deploy a Framework-prequantized Model with TVM - Part 3 (TFLite)
================================================================
**Author**: `Siju Samuel <https://github.com/siju-samuel>`_
Welcome to part 3 of the Deploy Framework-Prequantized Model with TVM tutorial.
In this part, we will start with a Quantized TFLite graph and then compile and execute it via TVM.
For more details on quantizing the model using TFLite, readers are encouraged to
go through `Converting Quantized Models
<https://www.tensorflow.org/lite/convert/quantization>`_.
The TFLite models can be downloaded from this `link
<https://www.tensorflow.org/lite/guide/hosted_models>`_.
To get started, Tensorflow and TFLite package needs to be installed as prerequisite.
.. code-block:: bash
# install tensorflow and tflite
pip install tensorflow==2.1.0
pip install tflite==2.1.0
Now please check if TFLite package is installed successfully, ``python -c "import tflite"``
"""
###############################################################################
# Necessary imports
# -----------------
import os
import numpy as np
import tflite
import tvm
from tvm import relay
######################################################################
# Download pretrained Quantized TFLite model
# ------------------------------------------
# Download mobilenet V2 TFLite model provided by Google
from tvm.contrib.download import download_testdata
model_url = (
"https://storage.googleapis.com/download.tensorflow.org/models/"
"tflite_11_05_08/mobilenet_v2_1.0_224_quant.tgz"
)
# Download model tar file and extract it to get mobilenet_v2_1.0_224.tflite
model_path = download_testdata(
model_url, "mobilenet_v2_1.0_224_quant.tgz", module=["tf", "official"]
)
model_dir = os.path.dirname(model_path)
######################################################################
# Utils for downloading and extracting zip files
# ----------------------------------------------
def extract(path):
import tarfile
if path.endswith("tgz") or path.endswith("gz"):
dir_path = os.path.dirname(path)
tar = tarfile.open(path)
tar.extractall(path=dir_path)
tar.close()
else:
raise RuntimeError("Could not decompress the file: " + path)
extract(model_path)
######################################################################
# Load a test image
# -----------------
#######################################################################
# Get a real image for e2e testing
# --------------------------------
def get_real_image(im_height, im_width):
from PIL import Image
repo_base = "https://github.com/dmlc/web-data/raw/main/tensorflow/models/InceptionV1/"
img_name = "elephant-299.jpg"
image_url = os.path.join(repo_base, img_name)
img_path = download_testdata(image_url, img_name, module="data")
image = Image.open(img_path).resize((im_height, im_width))
x = np.array(image).astype("uint8")
data = np.reshape(x, (1, im_height, im_width, 3))
return data
data = get_real_image(224, 224)
######################################################################
# Load a tflite model
# -------------------
######################################################################
# Now we can open mobilenet_v2_1.0_224.tflite
tflite_model_file = os.path.join(model_dir, "mobilenet_v2_1.0_224_quant.tflite")
tflite_model_buf = open(tflite_model_file, "rb").read()
# Get TFLite model from buffer
try:
import tflite
tflite_model = tflite.Model.GetRootAsModel(tflite_model_buf, 0)
except AttributeError:
import tflite.Model
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model_buf, 0)
###############################################################################
# Lets run TFLite pre-quantized model inference and get the TFLite prediction.
def run_tflite_model(tflite_model_buf, input_data):
"""Generic function to execute TFLite"""
try:
from tensorflow import lite as interpreter_wrapper
except ImportError:
from tensorflow.contrib import lite as interpreter_wrapper
input_data = input_data if isinstance(input_data, list) else [input_data]
interpreter = interpreter_wrapper.Interpreter(model_content=tflite_model_buf)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
# set input
assert len(input_data) == len(input_details)
for i in range(len(input_details)):
interpreter.set_tensor(input_details[i]["index"], input_data[i])
# Run
interpreter.invoke()
# get output
tflite_output = list()
for i in range(len(output_details)):
tflite_output.append(interpreter.get_tensor(output_details[i]["index"]))
return tflite_output
###############################################################################
# Lets run TVM compiled pre-quantized model inference and get the TVM prediction.
def run_tvm(lib):
from tvm.contrib import graph_executor
rt_mod = graph_executor.GraphModule(lib["default"](tvm.cpu(0)))
rt_mod.set_input("input", data)
rt_mod.run()
tvm_res = rt_mod.get_output(0).numpy()
tvm_pred = np.squeeze(tvm_res).argsort()[-5:][::-1]
return tvm_pred, rt_mod
###############################################################################
# TFLite inference
# ----------------
###############################################################################
# Run TFLite inference on the quantized model.
tflite_res = run_tflite_model(tflite_model_buf, data)
tflite_pred = np.squeeze(tflite_res).argsort()[-5:][::-1]
###############################################################################
# TVM compilation and inference
# -----------------------------
###############################################################################
# We use the TFLite-Relay parser to convert the TFLite pre-quantized graph into Relay IR. Note that
# frontend parser call for a pre-quantized model is exactly same as frontend parser call for a FP32
# model. We encourage you to remove the comment from print(mod) and inspect the Relay module. You
# will see many QNN operators, like, Requantize, Quantize and QNN Conv2D.
dtype_dict = {"input": data.dtype.name}
shape_dict = {"input": data.shape}
mod, params = relay.frontend.from_tflite(tflite_model, shape_dict=shape_dict, dtype_dict=dtype_dict)
# print(mod)
###############################################################################
# Lets now the compile the Relay module. We use the "llvm" target here. Please replace it with the
# target platform that you are interested in.
target = "llvm"
with tvm.transform.PassContext(opt_level=3):
lib = relay.build_module.build(mod, target=target, params=params)
###############################################################################
# Finally, lets call inference on the TVM compiled module.
tvm_pred, rt_mod = run_tvm(lib)
###############################################################################
# Accuracy comparison
# -------------------
###############################################################################
# Print the top-5 labels for MXNet and TVM inference.
# Checking the labels because the requantize implementation is different between
# TFLite and Relay. This cause final output numbers to mismatch. So, testing accuracy via labels.
print("TVM Top-5 labels:", tvm_pred)
print("TFLite Top-5 labels:", tflite_pred)
##########################################################################
# Measure performance
# -------------------
# Here we give an example of how to measure performance of TVM compiled models.
n_repeat = 100 # should be bigger to make the measurement more accurate
dev = tvm.cpu(0)
print(rt_mod.benchmark(dev, number=1, repeat=n_repeat))
######################################################################
# .. note::
#
# Unless the hardware has special support for fast 8 bit instructions, quantized models are
# not expected to be any faster than FP32 models. Without fast 8 bit instructions, TVM does
# quantized convolution in 16 bit, even if the model itself is 8 bit.
#
# For x86, the best performance can be achieved on CPUs with AVX512 instructions set.
# In this case, TVM utilizes the fastest available 8 bit instructions for the given target.
# This includes support for the VNNI 8 bit dot product instruction (CascadeLake or newer).
# For EC2 C5.12x large instance, TVM latency for this tutorial is ~2 ms.
#
# Intel conv2d NCHWc schedule on ARM gives better end-to-end latency compared to ARM NCHW
# conv2d spatial pack schedule for many TFLite networks. ARM winograd performance is higher but
# it has a high memory footprint.
#
# Moreover, the following general tips for CPU performance equally applies:
#
# * Set the environment variable TVM_NUM_THREADS to the number of physical cores
# * Choose the best target for your hardware, such as "llvm -mcpu=skylake-avx512" or
# "llvm -mcpu=cascadelake" (more CPUs with AVX512 would come in the future)
# * Perform autotuning - :ref:`Auto-tuning a convolution network for x86 CPU
# <tune_relay_x86>`.
# * To get best inference performance on ARM CPU, change target argument
# according to your device and follow :ref:`Auto-tuning a convolution
# network for ARM CPU <tune_relay_arm>`.
| |
#!/usr/bin/env python3
# Copyright (c) 2009-2019 The Bitcoin Core developers
# Copyright (c) 2014-2019 The DigiByte Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Class for auroracoind node under test"""
import contextlib
import decimal
import errno
from enum import Enum
import http.client
import json
import logging
import os
import re
import subprocess
import tempfile
import time
import urllib.parse
import collections
import shlex
import sys
from .authproxy import JSONRPCException
from .util import (
MAX_NODES,
append_config,
delete_cookie_file,
get_rpc_proxy,
rpc_url,
wait_until,
p2p_port,
)
AURORACOIND_PROC_WAIT_TIMEOUT = 60
class FailedToStartError(Exception):
"""Raised when a node fails to start correctly."""
class ErrorMatch(Enum):
FULL_TEXT = 1
FULL_REGEX = 2
PARTIAL_REGEX = 3
class TestNode():
"""A class for representing a auroracoind node under test.
This class contains:
- state about the node (whether it's running, etc)
- a Python subprocess.Popen object representing the running process
- an RPC connection to the node
- one or more P2P connections to the node
To make things easier for the test writer, any unrecognised messages will
be dispatched to the RPC connection."""
def __init__(self, i, datadir, *, chain, rpchost, timewait, auroracoind, auroracoin_cli, coverage_dir, cwd, extra_conf=None, extra_args=None, use_cli=False, start_perf=False):
"""
Kwargs:
start_perf (bool): If True, begin profiling the node with `perf` as soon as
the node starts.
"""
self.index = i
self.datadir = datadir
self.auroracoinconf = os.path.join(self.datadir, "auroracoin.conf")
self.stdout_dir = os.path.join(self.datadir, "stdout")
self.stderr_dir = os.path.join(self.datadir, "stderr")
self.chain = chain
self.rpchost = rpchost
self.rpc_timeout = timewait
self.binary = auroracoind
self.coverage_dir = coverage_dir
self.cwd = cwd
if extra_conf is not None:
append_config(datadir, extra_conf)
# Most callers will just need to add extra args to the standard list below.
# For those callers that need more flexibility, they can just set the args property directly.
# Note that common args are set in the config file (see initialize_datadir)
self.extra_args = extra_args
# Configuration for logging is set as command-line args rather than in the auroracoin.conf file.
# This means that starting a auroracoind using the temp dir to debug a failed test won't
# spam debug.log.
self.args = [
self.binary,
"-datadir=" + self.datadir,
"-logtimemicros",
"-logthreadnames",
"-debug",
"-debugexclude=libevent",
"-debugexclude=leveldb",
"-uacomment=testnode%d" % i,
]
self.cli = TestNodeCLI(auroracoin_cli, self.datadir)
self.use_cli = use_cli
self.start_perf = start_perf
self.running = False
self.process = None
self.rpc_connected = False
self.rpc = None
self.url = None
self.log = logging.getLogger('TestFramework.node%d' % i)
self.cleanup_on_exit = True # Whether to kill the node when this object goes away
# Cache perf subprocesses here by their data output filename.
self.perf_subprocesses = {}
self.p2ps = []
AddressKeyPair = collections.namedtuple('AddressKeyPair', ['address', 'key'])
PRIV_KEYS = [
# address , privkey
AddressKeyPair('mjTkW3DjgyZck4KbiRusZsqTgaYTxdSz6z', 'cVpF924EspNh8KjYsfhgY96mmxvT6DgdWiTYMtMjuM74hJaU5psW'),
AddressKeyPair('msX6jQXvxiNhx3Q62PKeLPrhrqZQdSimTg', 'cUxsWyKyZ9MAQTaAhUQWJmBbSvHMwSmuv59KgxQV7oZQU3PXN3KE'),
AddressKeyPair('mnonCMyH9TmAsSj3M59DsbH8H63U3RKoFP', 'cTrh7dkEAeJd6b3MRX9bZK8eRmNqVCMH3LSUkE3dSFDyzjU38QxK'),
AddressKeyPair('mqJupas8Dt2uestQDvV2NH3RU8uZh2dqQR', 'cVuKKa7gbehEQvVq717hYcbE9Dqmq7KEBKqWgWrYBa2CKKrhtRim'),
AddressKeyPair('msYac7Rvd5ywm6pEmkjyxhbCDKqWsVeYws', 'cQDCBuKcjanpXDpCqacNSjYfxeQj8G6CAtH1Dsk3cXyqLNC4RPuh'),
AddressKeyPair('n2rnuUnwLgXqf9kk2kjvVm8R5BZK1yxQBi', 'cQakmfPSLSqKHyMFGwAqKHgWUiofJCagVGhiB4KCainaeCSxeyYq'),
AddressKeyPair('myzuPxRwsf3vvGzEuzPfK9Nf2RfwauwYe6', 'cQMpDLJwA8DBe9NcQbdoSb1BhmFxVjWD5gRyrLZCtpuF9Zi3a9RK'),
AddressKeyPair('mumwTaMtbxEPUswmLBBN3vM9oGRtGBrys8', 'cSXmRKXVcoouhNNVpcNKFfxsTsToY5pvB9DVsFksF1ENunTzRKsy'),
AddressKeyPair('mpV7aGShMkJCZgbW7F6iZgrvuPHjZjH9qg', 'cSoXt6tm3pqy43UMabY6eUTmR3eSUYFtB2iNQDGgb3VUnRsQys2k'),
AddressKeyPair('mq4fBNdckGtvY2mijd9am7DRsbRB4KjUkf', 'cN55daf1HotwBAgAKWVgDcoppmUNDtQSfb7XLutTLeAgVc3u8hik'),
AddressKeyPair('mpFAHDjX7KregM3rVotdXzQmkbwtbQEnZ6', 'cT7qK7g1wkYEMvKowd2ZrX1E5f6JQ7TM246UfqbCiyF7kZhorpX3'),
AddressKeyPair('mzRe8QZMfGi58KyWCse2exxEFry2sfF2Y7', 'cPiRWE8KMjTRxH1MWkPerhfoHFn5iHPWVK5aPqjW8NxmdwenFinJ'),
]
def get_deterministic_priv_key(self):
"""Return a deterministic priv key in base58, that only depends on the node's index"""
assert len(self.PRIV_KEYS) == MAX_NODES
return self.PRIV_KEYS[self.index]
def get_mem_rss_kilobytes(self):
"""Get the memory usage (RSS) per `ps`.
Returns None if `ps` is unavailable.
"""
assert self.running
try:
return int(subprocess.check_output(
["ps", "h", "-o", "rss", "{}".format(self.process.pid)],
stderr=subprocess.DEVNULL).split()[-1])
# Avoid failing on platforms where ps isn't installed.
#
# We could later use something like `psutils` to work across platforms.
except (FileNotFoundError, subprocess.SubprocessError):
self.log.exception("Unable to get memory usage")
return None
def _node_msg(self, msg: str) -> str:
"""Return a modified msg that identifies this node by its index as a debugging aid."""
return "[node %d] %s" % (self.index, msg)
def _raise_assertion_error(self, msg: str):
"""Raise an AssertionError with msg modified to identify this node."""
raise AssertionError(self._node_msg(msg))
def __del__(self):
# Ensure that we don't leave any auroracoind processes lying around after
# the test ends
if self.process and self.cleanup_on_exit:
# Should only happen on test failure
# Avoid using logger, as that may have already been shutdown when
# this destructor is called.
print(self._node_msg("Cleaning up leftover process"))
self.process.kill()
def __getattr__(self, name):
"""Dispatches any unrecognised messages to the RPC connection or a CLI instance."""
if self.use_cli:
return getattr(self.cli, name)
else:
assert self.rpc_connected and self.rpc is not None, self._node_msg("Error: no RPC connection")
return getattr(self.rpc, name)
def start(self, extra_args=None, *, cwd=None, stdout=None, stderr=None, **kwargs):
"""Start the node."""
if extra_args is None:
extra_args = self.extra_args
# Add a new stdout and stderr file each time auroracoind is started
if stderr is None:
stderr = tempfile.NamedTemporaryFile(dir=self.stderr_dir, delete=False)
if stdout is None:
stdout = tempfile.NamedTemporaryFile(dir=self.stdout_dir, delete=False)
self.stderr = stderr
self.stdout = stdout
if cwd is None:
cwd = self.cwd
# Delete any existing cookie file -- if such a file exists (eg due to
# unclean shutdown), it will get overwritten anyway by auroracoind, and
# potentially interfere with our attempt to authenticate
delete_cookie_file(self.datadir, self.chain)
# add environment variable LIBC_FATAL_STDERR_=1 so that libc errors are written to stderr and not the terminal
subp_env = dict(os.environ, LIBC_FATAL_STDERR_="1")
self.process = subprocess.Popen(self.args + extra_args, env=subp_env, stdout=stdout, stderr=stderr, cwd=cwd, **kwargs)
self.running = True
self.log.debug("auroracoind started, waiting for RPC to come up")
if self.start_perf:
self._start_perf()
def wait_for_rpc_connection(self):
"""Sets up an RPC connection to the auroracoind process. Returns False if unable to connect."""
# Poll at a rate of four times per second
poll_per_s = 4
for _ in range(poll_per_s * self.rpc_timeout):
if self.process.poll() is not None:
raise FailedToStartError(self._node_msg(
'auroracoind exited with status {} during initialization'.format(self.process.returncode)))
try:
rpc = get_rpc_proxy(rpc_url(self.datadir, self.index, self.chain, self.rpchost), self.index, timeout=self.rpc_timeout, coveragedir=self.coverage_dir)
rpc.getblockcount()
# If the call to getblockcount() succeeds then the RPC connection is up
self.log.debug("RPC successfully started")
if self.use_cli:
return
self.rpc = rpc
self.rpc_connected = True
self.url = self.rpc.url
return
except IOError as e:
if e.errno != errno.ECONNREFUSED: # Port not yet open?
raise # unknown IO error
except JSONRPCException as e: # Initialization phase
# -28 RPC in warmup
# -342 Service unavailable, RPC server started but is shutting down due to error
if e.error['code'] != -28 and e.error['code'] != -342:
raise # unknown JSON RPC exception
except ValueError as e: # cookie file not found and no rpcuser or rpcassword. auroracoind still starting
if "No RPC credentials" not in str(e):
raise
time.sleep(1.0 / poll_per_s)
self._raise_assertion_error("Unable to connect to auroracoind")
def generate(self, nblocks, maxtries=1000000):
self.log.debug("TestNode.generate() dispatches `generate` call to `generatetoaddress`")
return self.generatetoaddress(nblocks=nblocks, address=self.get_deterministic_priv_key().address, maxtries=maxtries)
def get_wallet_rpc(self, wallet_name):
if self.use_cli:
return self.cli("-rpcwallet={}".format(wallet_name))
else:
assert self.rpc_connected and self.rpc, self._node_msg("RPC not connected")
wallet_path = "wallet/{}".format(urllib.parse.quote(wallet_name))
return self.rpc / wallet_path
def stop_node(self, expected_stderr='', wait=0):
"""Stop the node."""
if not self.running:
return
self.log.debug("Stopping node")
try:
self.stop()
except http.client.CannotSendRequest:
self.log.exception("Unable to stop node.")
# If there are any running perf processes, stop them.
for profile_name in tuple(self.perf_subprocesses.keys()):
self._stop_perf(profile_name)
# Check that stderr is as expected
self.stderr.seek(0)
stderr = self.stderr.read().decode('utf-8').strip()
if stderr != expected_stderr:
raise AssertionError("Unexpected stderr {} != {}".format(stderr, expected_stderr))
self.stdout.close()
self.stderr.close()
del self.p2ps[:]
def is_node_stopped(self):
"""Checks whether the node has stopped.
Returns True if the node has stopped. False otherwise.
This method is responsible for freeing resources (self.process)."""
if not self.running:
return True
return_code = self.process.poll()
if return_code is None:
return False
# process has stopped. Assert that it didn't return an error code.
assert return_code == 0, self._node_msg(
"Node returned non-zero exit code (%d) when stopping" % return_code)
self.running = False
self.process = None
self.rpc_connected = False
self.rpc = None
self.log.debug("Node stopped")
return True
def wait_until_stopped(self, timeout=AURORACOIND_PROC_WAIT_TIMEOUT):
wait_until(self.is_node_stopped, timeout=timeout)
@contextlib.contextmanager
def assert_debug_log(self, expected_msgs, timeout=2):
time_end = time.time() + timeout
debug_log = os.path.join(self.datadir, self.chain, 'debug.log')
with open(debug_log, encoding='utf-8') as dl:
dl.seek(0, 2)
prev_size = dl.tell()
yield
while True:
found = True
with open(debug_log, encoding='utf-8') as dl:
dl.seek(prev_size)
log = dl.read()
print_log = " - " + "\n - ".join(log.splitlines())
for expected_msg in expected_msgs:
if re.search(re.escape(expected_msg), log, flags=re.MULTILINE) is None:
found = False
if found:
return
if time.time() >= time_end:
break
time.sleep(0.05)
self._raise_assertion_error('Expected messages "{}" does not partially match log:\n\n{}\n\n'.format(str(expected_msgs), print_log))
@contextlib.contextmanager
def assert_memory_usage_stable(self, *, increase_allowed=0.03):
"""Context manager that allows the user to assert that a node's memory usage (RSS)
hasn't increased beyond some threshold percentage.
Args:
increase_allowed (float): the fractional increase in memory allowed until failure;
e.g. `0.12` for up to 12% increase allowed.
"""
before_memory_usage = self.get_mem_rss_kilobytes()
yield
after_memory_usage = self.get_mem_rss_kilobytes()
if not (before_memory_usage and after_memory_usage):
self.log.warning("Unable to detect memory usage (RSS) - skipping memory check.")
return
perc_increase_memory_usage = (after_memory_usage / before_memory_usage) - 1
if perc_increase_memory_usage > increase_allowed:
self._raise_assertion_error(
"Memory usage increased over threshold of {:.3f}% from {} to {} ({:.3f}%)".format(
increase_allowed * 100, before_memory_usage, after_memory_usage,
perc_increase_memory_usage * 100))
@contextlib.contextmanager
def profile_with_perf(self, profile_name):
"""
Context manager that allows easy profiling of node activity using `perf`.
See `test/functional/README.md` for details on perf usage.
Args:
profile_name (str): This string will be appended to the
profile data filename generated by perf.
"""
subp = self._start_perf(profile_name)
yield
if subp:
self._stop_perf(profile_name)
def _start_perf(self, profile_name=None):
"""Start a perf process to profile this node.
Returns the subprocess running perf."""
subp = None
def test_success(cmd):
return subprocess.call(
# shell=True required for pipe use below
cmd, shell=True,
stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL) == 0
if not sys.platform.startswith('linux'):
self.log.warning("Can't profile with perf; only available on Linux platforms")
return None
if not test_success('which perf'):
self.log.warning("Can't profile with perf; must install perf-tools")
return None
if not test_success('readelf -S {} | grep .debug_str'.format(shlex.quote(self.binary))):
self.log.warning(
"perf output won't be very useful without debug symbols compiled into auroracoind")
output_path = tempfile.NamedTemporaryFile(
dir=self.datadir,
prefix="{}.perf.data.".format(profile_name or 'test'),
delete=False,
).name
cmd = [
'perf', 'record',
'-g', # Record the callgraph.
'--call-graph', 'dwarf', # Compatibility for gcc's --fomit-frame-pointer.
'-F', '101', # Sampling frequency in Hz.
'-p', str(self.process.pid),
'-o', output_path,
]
subp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.perf_subprocesses[profile_name] = subp
return subp
def _stop_perf(self, profile_name):
"""Stop (and pop) a perf subprocess."""
subp = self.perf_subprocesses.pop(profile_name)
output_path = subp.args[subp.args.index('-o') + 1]
subp.terminate()
subp.wait(timeout=10)
stderr = subp.stderr.read().decode()
if 'Consider tweaking /proc/sys/kernel/perf_event_paranoid' in stderr:
self.log.warning(
"perf couldn't collect data! Try "
"'sudo sysctl -w kernel.perf_event_paranoid=-1'")
else:
report_cmd = "perf report -i {}".format(output_path)
self.log.info("See perf output by running '{}'".format(report_cmd))
def assert_start_raises_init_error(self, extra_args=None, expected_msg=None, match=ErrorMatch.FULL_TEXT, *args, **kwargs):
"""Attempt to start the node and expect it to raise an error.
extra_args: extra arguments to pass through to auroracoind
expected_msg: regex that stderr should match when auroracoind fails
Will throw if auroracoind starts without an error.
Will throw if an expected_msg is provided and it does not match auroracoind's stdout."""
with tempfile.NamedTemporaryFile(dir=self.stderr_dir, delete=False) as log_stderr, \
tempfile.NamedTemporaryFile(dir=self.stdout_dir, delete=False) as log_stdout:
try:
self.start(extra_args, stdout=log_stdout, stderr=log_stderr, *args, **kwargs)
self.wait_for_rpc_connection()
self.stop_node()
self.wait_until_stopped()
except FailedToStartError as e:
self.log.debug('auroracoind failed to start: %s', e)
self.running = False
self.process = None
# Check stderr for expected message
if expected_msg is not None:
log_stderr.seek(0)
stderr = log_stderr.read().decode('utf-8').strip()
if match == ErrorMatch.PARTIAL_REGEX:
if re.search(expected_msg, stderr, flags=re.MULTILINE) is None:
self._raise_assertion_error(
'Expected message "{}" does not partially match stderr:\n"{}"'.format(expected_msg, stderr))
elif match == ErrorMatch.FULL_REGEX:
if re.fullmatch(expected_msg, stderr) is None:
self._raise_assertion_error(
'Expected message "{}" does not fully match stderr:\n"{}"'.format(expected_msg, stderr))
elif match == ErrorMatch.FULL_TEXT:
if expected_msg != stderr:
self._raise_assertion_error(
'Expected message "{}" does not fully match stderr:\n"{}"'.format(expected_msg, stderr))
else:
if expected_msg is None:
assert_msg = "auroracoind should have exited with an error"
else:
assert_msg = "auroracoind should have exited with expected error " + expected_msg
self._raise_assertion_error(assert_msg)
def add_p2p_connection(self, p2p_conn, *, wait_for_verack=True, **kwargs):
"""Add a p2p connection to the node.
This method adds the p2p connection to the self.p2ps list and also
returns the connection to the caller."""
if 'dstport' not in kwargs:
kwargs['dstport'] = p2p_port(self.index)
if 'dstaddr' not in kwargs:
kwargs['dstaddr'] = '127.0.0.1'
p2p_conn.peer_connect(**kwargs, net=self.chain)()
self.p2ps.append(p2p_conn)
if wait_for_verack:
p2p_conn.wait_for_verack()
return p2p_conn
@property
def p2p(self):
"""Return the first p2p connection
Convenience property - most tests only use a single p2p connection to each
node, so this saves having to write node.p2ps[0] many times."""
assert self.p2ps, self._node_msg("No p2p connection")
return self.p2ps[0]
def disconnect_p2ps(self):
"""Close all p2p connections to the node."""
for p in self.p2ps:
p.peer_disconnect()
del self.p2ps[:]
class TestNodeCLIAttr:
def __init__(self, cli, command):
self.cli = cli
self.command = command
def __call__(self, *args, **kwargs):
return self.cli.send_cli(self.command, *args, **kwargs)
def get_request(self, *args, **kwargs):
return lambda: self(*args, **kwargs)
def arg_to_cli(arg):
if isinstance(arg, bool):
return str(arg).lower()
elif isinstance(arg, dict) or isinstance(arg, list):
return json.dumps(arg)
else:
return str(arg)
class TestNodeCLI():
"""Interface to auroracoin-cli for an individual node"""
def __init__(self, binary, datadir):
self.options = []
self.binary = binary
self.datadir = datadir
self.input = None
self.log = logging.getLogger('TestFramework.auroracoincli')
def __call__(self, *options, input=None):
# TestNodeCLI is callable with auroracoin-cli command-line options
cli = TestNodeCLI(self.binary, self.datadir)
cli.options = [str(o) for o in options]
cli.input = input
return cli
def __getattr__(self, command):
return TestNodeCLIAttr(self, command)
def batch(self, requests):
results = []
for request in requests:
try:
results.append(dict(result=request()))
except JSONRPCException as e:
results.append(dict(error=e))
return results
def send_cli(self, command=None, *args, **kwargs):
"""Run auroracoin-cli command. Deserializes returned string as python object."""
pos_args = [arg_to_cli(arg) for arg in args]
named_args = [str(key) + "=" + arg_to_cli(value) for (key, value) in kwargs.items()]
assert not (pos_args and named_args), "Cannot use positional arguments and named arguments in the same auroracoin-cli call"
p_args = [self.binary, "-datadir=" + self.datadir] + self.options
if named_args:
p_args += ["-named"]
if command is not None:
p_args += [command]
p_args += pos_args + named_args
self.log.debug("Running auroracoin-cli command: %s" % command)
process = subprocess.Popen(p_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
cli_stdout, cli_stderr = process.communicate(input=self.input)
returncode = process.poll()
if returncode:
match = re.match(r'error code: ([-0-9]+)\nerror message:\n(.*)', cli_stderr)
if match:
code, message = match.groups()
raise JSONRPCException(dict(code=int(code), message=message))
# Ignore cli_stdout, raise with cli_stderr
raise subprocess.CalledProcessError(returncode, self.binary, output=cli_stderr)
try:
return json.loads(cli_stdout, parse_float=decimal.Decimal)
except json.JSONDecodeError:
return cli_stdout.rstrip("\n")
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""Constraints: functions that impose constraints on weight values.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.python.keras._impl.keras import backend as K
from tensorflow.python.keras._impl.keras.utils.generic_utils import deserialize_keras_object
from tensorflow.python.keras._impl.keras.utils.generic_utils import serialize_keras_object
from tensorflow.python.util.tf_export import tf_export
@tf_export('keras.constraints.Constraint')
class Constraint(object):
def __call__(self, w):
return w
def get_config(self):
return {}
@tf_export('keras.constraints.MaxNorm', 'keras.constraints.max_norm')
class MaxNorm(Constraint):
"""MaxNorm weight constraint.
Constrains the weights incident to each hidden unit
to have a norm less than or equal to a desired value.
Arguments:
m: the maximum norm for the incoming weights.
axis: integer, axis along which to calculate weight norms.
For instance, in a `Dense` layer the weight matrix
has shape `(input_dim, output_dim)`,
set `axis` to `0` to constrain each weight vector
of length `(input_dim,)`.
In a `Conv2D` layer with `data_format="channels_last"`,
the weight tensor has shape
`(rows, cols, input_depth, output_depth)`,
set `axis` to `[0, 1, 2]`
to constrain the weights of each filter tensor of size
`(rows, cols, input_depth)`.
"""
def __init__(self, max_value=2, axis=0):
self.max_value = max_value
self.axis = axis
def __call__(self, w):
norms = K.sqrt(K.sum(K.square(w), axis=self.axis, keepdims=True))
desired = K.clip(norms, 0, self.max_value)
return w * (desired / (K.epsilon() + norms))
def get_config(self):
return {'max_value': self.max_value, 'axis': self.axis}
@tf_export('keras.constraints.NonNeg', 'keras.constraints.non_neg')
class NonNeg(Constraint):
"""Constrains the weights to be non-negative.
"""
def __call__(self, w):
return w * K.cast(K.greater_equal(w, 0.), K.floatx())
@tf_export('keras.constraints.UnitNorm', 'keras.constraints.unit_norm')
class UnitNorm(Constraint):
"""Constrains the weights incident to each hidden unit to have unit norm.
Arguments:
axis: integer, axis along which to calculate weight norms.
For instance, in a `Dense` layer the weight matrix
has shape `(input_dim, output_dim)`,
set `axis` to `0` to constrain each weight vector
of length `(input_dim,)`.
In a `Conv2D` layer with `data_format="channels_last"`,
the weight tensor has shape
`(rows, cols, input_depth, output_depth)`,
set `axis` to `[0, 1, 2]`
to constrain the weights of each filter tensor of size
`(rows, cols, input_depth)`.
"""
def __init__(self, axis=0):
self.axis = axis
def __call__(self, w):
return w / (
K.epsilon() + K.sqrt(K.sum(K.square(w), axis=self.axis, keepdims=True)))
def get_config(self):
return {'axis': self.axis}
@tf_export('keras.constraints.MinMaxNorm', 'keras.constraints.min_max_norm')
class MinMaxNorm(Constraint):
"""MinMaxNorm weight constraint.
Constrains the weights incident to each hidden unit
to have the norm between a lower bound and an upper bound.
Arguments:
min_value: the minimum norm for the incoming weights.
max_value: the maximum norm for the incoming weights.
rate: rate for enforcing the constraint: weights will be
rescaled to yield
`(1 - rate) * norm + rate * norm.clip(min_value, max_value)`.
Effectively, this means that rate=1.0 stands for strict
enforcement of the constraint, while rate<1.0 means that
weights will be rescaled at each step to slowly move
towards a value inside the desired interval.
axis: integer, axis along which to calculate weight norms.
For instance, in a `Dense` layer the weight matrix
has shape `(input_dim, output_dim)`,
set `axis` to `0` to constrain each weight vector
of length `(input_dim,)`.
In a `Conv2D` layer with `data_format="channels_last"`,
the weight tensor has shape
`(rows, cols, input_depth, output_depth)`,
set `axis` to `[0, 1, 2]`
to constrain the weights of each filter tensor of size
`(rows, cols, input_depth)`.
"""
def __init__(self, min_value=0.0, max_value=1.0, rate=1.0, axis=0):
self.min_value = min_value
self.max_value = max_value
self.rate = rate
self.axis = axis
def __call__(self, w):
norms = K.sqrt(K.sum(K.square(w), axis=self.axis, keepdims=True))
desired = (
self.rate * K.clip(norms, self.min_value, self.max_value) +
(1 - self.rate) * norms)
return w * (desired / (K.epsilon() + norms))
def get_config(self):
return {
'min_value': self.min_value,
'max_value': self.max_value,
'rate': self.rate,
'axis': self.axis
}
# Aliases.
max_norm = MaxNorm
non_neg = NonNeg
unit_norm = UnitNorm
min_max_norm = MinMaxNorm
# Legacy aliases.
maxnorm = max_norm
nonneg = non_neg
unitnorm = unit_norm
@tf_export('keras.constraints.serialize')
def serialize(constraint):
return serialize_keras_object(constraint)
@tf_export('keras.constraints.deserialize')
def deserialize(config, custom_objects=None):
return deserialize_keras_object(
config,
module_objects=globals(),
custom_objects=custom_objects,
printable_module_name='constraint')
@tf_export('keras.constraints.get')
def get(identifier):
if identifier is None:
return None
if isinstance(identifier, dict):
return deserialize(identifier)
elif isinstance(identifier, six.string_types):
config = {'class_name': str(identifier), 'config': {}}
return deserialize(config)
elif callable(identifier):
return identifier
else:
raise ValueError('Could not interpret constraint identifier: ' +
str(identifier))
| |
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on October 28, 2015
"""
import os
import numpy as np
from scipy import interpolate
import copy
import importlib
from .PostProcessorReadyInterface import PostProcessorReadyInterface
from utils import InputData, InputTypes
class HistorySetSnapShot(PostProcessorReadyInterface):
"""
This Post-Processor performs the conversion from HistorySet to PointSet
The conversion is made so that each history H is converted to a single point P.
Assume that each history H is a dict of n output variables x_1=[...],x_n=[...],
then the resulting point P can be as follows accordingly to the specified type:
- type = timeSlice: at time instant t: P=[x_1[t],...,x_n[t]]
- type = min, max, average, value
"""
@classmethod
def getInputSpecification(cls):
"""
Method to get a reference to a class that specifies the input data for
class cls.
@ In, cls, the class for which we are retrieving the specification
@ Out, inputSpecification, InputData.ParameterInput, class to use for
specifying input of cls.
"""
inputSpecification = super().getInputSpecification()
HSSSTypeType = InputTypes.makeEnumType("HSSSType", "HSSSTypeType", ['min','max','average','value','timeSlice','mixed'])
inputSpecification.addSub(InputData.parameterInputFactory("type", contentType=HSSSTypeType))
inputSpecification.addSub(InputData.parameterInputFactory("numberOfSamples", contentType=InputTypes.IntegerType))
HSSSExtensionType = InputTypes.makeEnumType("HSSSExtension", "HSSSExtensionType", ['zeroed','extended'])
inputSpecification.addSub(InputData.parameterInputFactory("extension", contentType=HSSSExtensionType))
inputSpecification.addSub(InputData.parameterInputFactory("pivotParameter", contentType=InputTypes.StringType))
inputSpecification.addSub(InputData.parameterInputFactory("pivotVar", contentType=InputTypes.StringType))
inputSpecification.addSub(InputData.parameterInputFactory("pivotVal", contentType=InputTypes.FloatType))
inputSpecification.addSub(InputData.parameterInputFactory("timeInstant", contentType=InputTypes.IntegerType))
inputSpecification.addSub(InputData.parameterInputFactory("mixed", contentType=InputTypes.StringListType))
for tag in ['min','max','average']:
inputSpecification.addSub(InputData.parameterInputFactory(tag, contentType=InputTypes.StringListType))
valueSub = InputData.parameterInputFactory("value")
valueSub.addParam("pivotVar", InputTypes.StringType)
valueSub.addParam("pivotVal", InputTypes.StringType)
inputSpecification.addSub(valueSub)
return inputSpecification
def __init__(self):
"""
Constructor
@ In, None
@ Out, None
"""
super().__init__()
self.setInputDataType('dict')
self.keepInputMeta(True)
self.outputMultipleRealizations = True # True indicate multiple realizations are returned
self.validDataType = ['PointSet'] # The list of accepted types of DataObject
self.type = None
self.pivotParameter = None #pivotParameter identify the ID of the temporal variabl
self.pivotVar = None
self.pivotVal = None
self.timeInstant = None
self.numberOfSamples = None
self.interpolation = None
self.classifiers = {} #for "mixed" mode
def initialize(self, runInfo, inputs, initDict=None):
"""
Method to initialize the DataClassifier post-processor.
@ In, runInfo, dict, dictionary of run info (e.g. working dir, etc)
@ In, inputs, list, list of inputs
@ In, initDict, dict, optional, dictionary with initialization options
@ Out, None
"""
super().initialize(runInfo, inputs, initDict)
if len(inputs)>1:
self.raiseAnError(IOError, 'Post-Processor', self.name, 'accepts only one dataObject')
if inputs[0].type != 'HistorySet':
self.raiseAnError(IOError, 'Post-Processor', self.name, 'accepts only HistorySet dataObject, but got "{}"'.format(inputs[0].type))
#sync if needed
if self.type == 'timeSlice':
#for syncing, need numberOfSamples, extension
if self.numberOfSamples is None:
self.raiseIOError(IOError,'When using "timeSlice" a "numberOfSamples" must be specified for synchronizing!')
if self.extension is None:
self.raiseAnError(IOError,'When using "timeSlice" an "extension" method must be specified for synchronizing!')
#perform sync
# Delayed import, import HistorySetSync as HSS
from .Factory import factory as interfaceFactory
self.HSsyncPP = interfaceFactory.returnInstance('HistorySetSync')
self.HSsyncPP.setParams(self.numberOfSamples,self.pivotParameter,self.extension,syncMethod='grid')
self.HSsyncPP.initialize(runInfo, inputs, initDict)
def _handleInput(self, paramInput):
"""
Function to handle the parameter input.
@ In, paramInput, ParameterInput, the already parsed input.
@ Out, None
"""
for child in paramInput.subparts:
tag = child.getName()
if tag =='type':
self.type = child.value
elif tag == 'numberOfSamples':
self.numberOfSamples = child.value
elif tag == 'extension':
self.extension = child.value
elif tag == 'pivotParameter':
self.pivotParameter = child.value
elif tag == 'pivotVar':
self.pivotVar = child.value
elif tag == 'pivotVal':
self.pivotVal = child.value
elif tag == 'timeInstant':
self.timeInstant = child.value
elif self.type == 'mixed':
entries = child.value
if tag not in self.classifiers.keys():
self.classifiers[tag] = []
#min,max,avg need no additional information to run, so list is [varName, varName, ...]
if tag in ['min','max','average']:
self.classifiers[tag].extend(entries)
#for now we remove timeSlice in mixed mode, until we recall why it might be desirable for a user
#timeSlice requires the time at which to slice, so list is [ (varName,time), (varName,time), ...]
#elif tag in ['timeSlice']:
# time = child.attrib.get('value',None)
# if time is None:
# self.raiseAnError('For "mixed" mode, must specify "value" as an attribute for each "timeSlice" node!')
# for entry in entries:
# self.classifiers[tag].append( (entry,float(time)) )
#value requires the dependent variable and dependent value, so list is [ (varName,depVar,depVal), ...]
elif tag == 'value':
depVar = child.parameterValues.get('pivotVar',None)
depVal = child.parameterValues.get('pivotVal',None)
if depVar is None or depVal is None:
self.raiseAnError('For "mixed" mode, must specify both "pivotVar" and "pivotVal" as an attribute for each "value" node!')
for entry in entries:
self.classifiers[tag].append( (entry,depVar,float(depVal)) )
elif tag != 'method':
self.raiseAnError(IOError,'Unrecognized node for HistorySetSnapShot in "mixed" mode:',tag)
else:
self.raiseAnError(IOError, 'HistorySetSnapShot Interfaced Post-Processor ' + str(self.name) + ' : XML node ' + str(child.tag) + ' is not recognized')
needspivotParameter = ['average','timeSlice']
if self.type in needspivotParameter or any(mode in self.classifiers.keys() for mode in needspivotParameter):
if self.pivotParameter is None:
self.raiseAnError(IOError,'"pivotParameter" is required for',needspivotParameter,'but not provided!')
def run(self,inputIn, pivotVal=None):
"""
Method to post-process the dataObjects
@ In, inputIn, dict, dictionaries which contains the data inside the input DataObjects
inputIn = {'Data':listData, 'Files':listOfFiles},
listData has the following format: (listOfInputVars, listOfOutVars, DataDict) with
DataDict is a dictionary that has the format
dataDict['dims'] = dict {varName:independentDimensions}
dataDict['metadata'] = dict {metaVarName:metaVarValue}
dataDict['type'] = str TypeOfDataObject
dataDict['inpVars'] = list of input variables
dataDict['outVars'] = list of output variables
dataDict['numberRealization'] = int SizeOfDataObject
dataDict['name'] = str DataObjectName
dataDict['metaKeys'] = list of meta variables
dataDict['data'] = dict {varName: varValue(1-D or 2-D numpy array)}
@ In, pivotVal, float, value associated to the variable considered (default None)
@ Out, outputPSDic, dict, output dictionary
"""
_, _, inputDic = inputIn['Data'][0]
#for timeSlice we call historySetWindow
if self.type == 'timeSlice':
outputHSDic = self.HSsyncPP.run(inputIn)
outDict = historySetWindow(outputHSDic,self.timeInstant,inputDic['inpVars'],inputDic['outVars'],inputDic['numberRealizations'],self.pivotParameter)
for key in inputDic['metaKeys']:
outDict['data'][key] = inputDic['data'][key]
return outDict
#for other non-mixed methods we call historySnapShot
elif self.type != 'mixed':
outputPSDic = historySnapShot(inputDic,self.pivotVar,self.type,self.pivotVal,self.pivotParameter)
return outputPSDic
# mixed is more complicated: we pull out values by method instead of a single slice type
# We use the same methods to get slices, then pick out only the requested variables
else:
#establish the output dict
outDict = {'data':{}}
#replicate input space
for var in inputDic['inpVars']:
outDict['data'][var] = inputDic['data'][var]
# replicate metadata
# add meta variables back
for key in inputDic['metaKeys']:
outDict['data'][key] = inputDic['data'][key]
outDict['dims'] = {key:[] for key in inputDic['dims'].keys()}
#loop over the methods requested to fill output space
for method,entries in self.classifiers.items():
#min, max take no special effort
if method in ['min','max']:
for var in entries:
getDict = historySnapShot(inputDic,var,method)
outDict['data'][var] = getDict['data'][var]
#average requires the pivotParameter
elif method == 'average':
for var in entries:
getDict = historySnapShot(inputDic,var,method,tempID=self.pivotParameter,pivotVal=self.pivotParameter)
outDict['data'][var] = getDict['data'][var]
#timeSlice requires the time value
#functionality removed for now until we recall why it's desirable
#elif method == 'timeSlice':
# for var,time in entries:
# getDict = historySetWindow(inputDic,time,self.pivotParameter)
#value requires the dependent variable and value
elif method == 'value':
for var,depVar,depVal in entries:
getDict = historySnapShot(inputDic,depVar,method,pivotVal=depVal)
outDict['data'][var] = getDict['data'][var]
return outDict
def historySnapShot(inputDic, pivotVar, snapShotType, pivotVal=None, tempID = None):
"""
Method do to compute a conversion from HistorySet to PointSet using the methods: min,max,average,value
@ In, inputDic, dict, it is an historySet
@ In, pivotVar, string, variable considered
@ In, pivotVal, float, value associated to the variable considered (deault None)
@ In, snapShotType, string, type of snapShot: min, max, average, value
@ In, tempID, string, name of the temporal variable (default None)
@ Out, outputDic, dict, it contains the temporal slice of all histories
"""
# place to store data results
outputDic={'data':{}}
# collect metadata, if it exists, to pass through
for key in inputDic['metaKeys']:
outputDic['data'][key] = inputDic['data'][key]
# place to store dimensionalities
outputDic['dims'] = {key: [] for key in inputDic['dims'].keys()}
for var in inputDic['inpVars']:
outputDic['data'][var] = inputDic['data'][var]
outVars = inputDic['data'].keys()
outVars = [var for var in outVars if 'Probability' not in var]
try:
outVars.remove('prefix')
except ValueError:
pass
vars = [var for var in outVars if var not in inputDic['inpVars']]
for var in vars:
outputDic['data'][var] = np.zeros(inputDic['numberRealizations'], dtype=object)
for history in range(inputDic['numberRealizations']):
if snapShotType == 'min':
idx = np.argmin(inputDic['data'][pivotVar][history])
outputDic['data'][var][history] = inputDic['data'][var][history][idx]
if snapShotType == 'max':
idx = np.argmax(inputDic['data'][pivotVar][history])
outputDic['data'][var][history] = inputDic['data'][var][history][idx]
elif snapShotType == 'value':
idx = returnIndexFirstPassage(inputDic['data'][pivotVar][history],pivotVal)
if inputDic['data'][pivotVar][history][idx]>pivotVal:
intervalFraction = (pivotVal-inputDic['data'][pivotVar][history][idx-1])/(inputDic['data'][pivotVar][history][idx]-inputDic['data'][pivotVar][history][idx-1])
outputDic['data'][var][history] = inputDic['data'][var][history][idx-1] + (inputDic['data'][var][history][idx]-inputDic['data'][var][history][idx-1])*intervalFraction
else:
intervalFraction = (pivotVal-inputDic['data'][pivotVar][history][idx])/(inputDic['data'][pivotVar][history][idx+1]-inputDic['data'][pivotVar][history][idx])
outputDic['data'][var][history] = inputDic['data'][var][history][idx] + (inputDic['data'][var][history][idx+1]-inputDic['data'][var][history][idx])*intervalFraction
elif snapShotType == 'average':
cumulative=0.0
for t in range(1,len(inputDic['data'][tempID][history])):
cumulative += (inputDic['data'][var][history][t] + inputDic['data'][var][history][t-1]) / 2.0 * (inputDic['data'][tempID][history][t] - inputDic['data'][tempID][history][t-1])
outputDic['data'][var][history] = cumulative / (inputDic['data'][tempID][history][-1] - inputDic['data'][tempID][history][0])
return outputDic
def historySetWindow(inputDic,timeStepID,inpVars,outVars,N,pivotParameter):
"""
Method do to compute a conversion from HistorySet to PointSet using the temporal slice of the historySet
@ In, inputDic, dict, it is an historySet
@ In, timeStepID, int, number of time sample of each history
@ In, inpVars, list, list of input variables
@ In, outVars, list, list of output variables
@ In, pivotParameter, string, ID name of the temporal variable
@ In, N, int, number of realizations
@ Out, outDic, dict, it contains the temporal slice of all histories
"""
outputDic={'data':{}}
outputDic['dims'] = {key:[] for key in inputDic['dims'].keys()}
for var in inpVars:
outputDic['data'][var] = inputDic['data'][var]
for var in outVars:
outputDic['data'][var] = np.zeros(N, dtype=object)
for rlz in range(N):
outputDic['data'][var][rlz] = inputDic['data'][var][rlz][timeStepID]
return outputDic
def returnIndexFirstPassage(array,value):
"""
Function that return the index of the element that firstly crosses value
@ In, array, np.array, array to be considered in the search
@ In, value, double, query value
@ Out, index, int, index of the element in the array closest to value
"""
index=-1
for i in range(1,array.size):
if (array[i]>=value and array[i-1]<=value) or (array[i]<=value and array[i-1]>=value):
index = i
break
return index
| |
# Standard lib
import os, re, glob, json, shutil
import argparse
import json
# 3rd-party modules
import phonenumbers # https://github.com/daviddrysdale/python-phonenumbers
from phonenumbers import geocoder, Leniency
from phonenumbers import carrier
from operator import attrgetter, itemgetter
from pyspark import SparkContext, SparkConf
def dump(x):
return json.dumps(x)
# Regex designed to find phone number candidates (i.e., patterns of at least seven digits,
# possibly separated by a parenthesis, space, period, or hyphen). For more info on the ways phone
# numbers are formatted throughout the world, see
# http://en.wikipedia.org/wiki/National_conventions_for_writing_telephone_numbers
#
# IMPORTANT: Keep this regex compatible with ElasticSearch! For example, don't use \d for digits
# because ES doesn't support it. For more info on ElasticSearch's regex syntax support see
# http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-regexp-query.html
#
# Regex breakdown:
#
# # Zero or one instance of a plus or an open parenthesis
# [+(]?
#
# # followed by 6-12 instances of this group: a digit followed by one of the allowed separators.
# # Keep in mind that AFTER this pattern we are matching a single digit, so the overall effect of
# # 6-12 instances of this group PLUS the digit means SEVEN digits "555-5555" to THIRTEEN digits
# # (e.g., 3-digit country code plus the standard 10-digit number, like +123 456-789-0123).
# (
# [0-9] # A digit (ElasticSearch doesn't support '\d' for digit)
# [- .()]? # followed by zero or one of a hyphen, period, parentheses, or space
# ){6,12}
#
# # followed by a digit
# [0-9]
# Original
# phonenum_candidate_regex_str = "[+(]?([0-9][- .()]?){6,12}[0-9]"
# TODO if updating regex also update the regex in Newman UI es_queries
phonenum_candidate_regex_str = "([+]?[0-9]?[0-9]?[0-9- .()]{6,15})"
# phonenum_candidate_regex_str = "^\s*(?:\+?(\d{1,3}))?[-. (]*(\d{3})[-. )]*(\d{3})[-. ]*(\d{4})(?: *x(\d+))?\s*$"
entity_descr = "phone numbers"
EXCERPT_CHAR_BUFFER = 50
def find_phone_numbers(source_txt):
# logger = get_logger()
tagged_phone_entities = []
for match in re.finditer(phonenum_candidate_regex_str, source_txt, re.MULTILINE):
# Extract the full-text value and the "normalized" value (i.e., digits only)
value = source_txt[match.start() : match.end()]
value_normalized = re.sub(u'[^\d]', u'', value)
# Extract an excerpt of text that precedes the match
excerpt_start = match.start() - EXCERPT_CHAR_BUFFER
if excerpt_start < 0:
excerpt_start = 0
excerpt_prefix = source_txt[ excerpt_start : match.start() ]
phone_number_obj = None
try:
# print "phone guess: %s"%value_normalized
# Try using the 'phonenumbers' module to parse the number. Note that we need to prefix
# the phone number string with "+" for parse() to work properly. If the number can't
# be parsed it will throw a NumberParseException.
phone_number_obj = phonenumbers.parse(u'+'+value_normalized, None)
# More lenient than valid_num
possible_num = phonenumbers.is_possible_number(phone_number_obj)
valid_num= phonenumbers.is_valid_number(phone_number_obj)
# print "possible=%s valid=%s"%(str(possible_num), str(valid_num))
# If the phonenumbers module thinks the number is invalid BUT it is preceded by text
# with a phone-related keyword, we'll accept the number. If the number is invalid and
# doesn't have a phone-related keyword, however, skip it.
if (not possible_num or not valid_num) and not contains_tel_keyword(excerpt_prefix[-15:]):
continue
except phonenumbers.phonenumberutil.NumberParseException as err:
# The phonenumbers modules couldn't parse the number; however, if it's preceded by text
# with a phone-related keyword we'll still accept it. Or put another way, if it is NOT
# preceded by a phone keyword, skip it.
if not contains_tel_keyword(excerpt_prefix[-15:]):
continue
#There seems to be a bug with some strings that cause partial numbers to be returned
if len(value_normalized) < 7:
continue
# If we got this far, it means we're accepting the number as a phone number. Extract a snippet
# of text that follows the match.
excerpt_stop = match.end() + EXCERPT_CHAR_BUFFER
if excerpt_stop > len(source_txt):
excerpt_stop = len(source_txt)
excerpt = source_txt[excerpt_start:excerpt_stop]
excerpt_value_start = match.start() - excerpt_start
excerpt_value_stop = excerpt_stop - match.end()
# Remove carriage returns replace multiple, consecutive whitespace with a single space
# so the excerpt will be compact and one line.
excerpt = re.sub('\r+', u'', excerpt)
excerpt = re.sub('\n+', u' ', excerpt)
excerpt = re.sub(u'\s+', u' ', excerpt)
print(u"Phone #: %s, \"%s\"" % (value_normalized, excerpt))
entity_dict = {
u"type": u"phone_number",
u"value": value,
u"value_normalized": value_normalized,
u"note": None,
u"body_offset_start": match.start(),
u"body_offset_stop": match.end(),
u"excerpt": excerpt,
u"excerpt_value_start": excerpt_value_start,
u"excerpt_value_stop": excerpt_value_stop,
u"possible_area": None,
u"possible_carrier": None
}
# If the phonenumbers module was able to construct an actual phone number object, attempt to
# add some notes about the possible geographic region and telco carrier.
if phone_number_obj is not None:
area_name = geocoder.description_for_number(phone_number_obj, "en")
if area_name:
entity_dict[u'possible_area'] = u"Possible area: %s. " % area_name
carrier_name = carrier.name_for_number(phone_number_obj, "en")
if carrier_name:
entity_dict[u'possible_carrier'] = u"Possible carrier: %s." % carrier_name
tagged_phone_entities.append(entity_dict)
return tagged_phone_entities
def contains_tel_keyword(sample_str):
sample_str = sample_str.lower()
tel_keyword_list = ['call', 'tel', 'cel', 'mob', 'line', 'desk', 'office', 'home', 'work', 'phone', 'fax']
# If the sample string contains *any* of the keywords return true
if any(tel_keyword in sample_str for tel_keyword in tel_keyword_list):
return True
return False
def process_email(email, keys=['body']):
doc = {}
doc['id'] = email['id']
email["phone_numbers"] = []
for key in keys:
try:
if key in email:
phones = find_phone_numbers(email[key])
# TODO extract attachment numbers
email["phone_numbers"] += [phone["value_normalized"] for phone in phones]
except:
print "Failed to process email {}".format(doc['id'])
return email
def process_patition(emails):
for email in emails:
yield process_email(email)
def test():
print find_phone_numbers( "PHONE: 1021-34662020/21/22/23/24")
print find_phone_numbers( "1021-34662020")
print "done.."
text = "Call me at ++1510-748-8230 if it's before 9:30, or on +703-4800500 after 10am. +971-9-4662020"
for match in phonenumbers.PhoneNumberMatcher(text, "US"):
print match
# text = "PHONE: +971-9-4662020"
# for match in phonenumbers.PhoneNumberMatcher(text, None, leniency=Leniency.VALID):
# print match
# print geocoder.description_for_number(match, "en")
if __name__ == '__main__':
desc='regexp extraction'
parser = argparse.ArgumentParser(
description=desc,
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=desc)
# TEST
# test()
# print "done"
# SPARK
#
parser.add_argument("input_content_path", help="input email or attachment content path")
parser.add_argument("output_content_with_phone_numbers", help="output text body enriched with phone number tags and possibly text locations.")
args = parser.parse_args()
conf = SparkConf().setAppName("Newman extract phone numbers")
sc = SparkContext(conf=conf)
rdd_emails = sc.textFile(args.input_content_path).coalesce(50).map(lambda x: json.loads(x))
rdd_emails.mapPartitions(lambda docs: process_patition(docs)).map(dump).saveAsTextFile(args.output_content_with_phone_numbers)
| |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import absolute_import
from unittest import TestCase
try:
from unittest import SkipTest
except:
try:
from unittest2 import SkipTest
except:
class SkipTest(Exception):
pass
import sys, os, subprocess
from random import randint
from threading import Thread
from socket import socket, AF_INET, SOCK_STREAM
from subprocess import Popen,PIPE,STDOUT
from string import Template
from proton import SASL, SSL
from proton.reactor import Container
from proton.handlers import Handshaker, FlowController
def free_tcp_ports(count=1):
""" return a list of 'count' TCP ports that are free to used (ie. unbound)
"""
retry = 0
ports = []
sockets = []
while len(ports) != count:
port = randint(49152, 65535)
sockets.append( socket( AF_INET, SOCK_STREAM ) )
try:
sockets[-1].bind( ("0.0.0.0", port ) )
ports.append( port )
retry = 0
except:
retry += 1
assert retry != 100, "No free sockets available for test!"
for s in sockets:
s.close()
return ports
def free_tcp_port():
return free_tcp_ports(1)[0]
def pump_uni(src, dst, buffer_size=1024):
p = src.pending()
c = dst.capacity()
if c < 0:
if p < 0:
return False
else:
src.close_head()
return True
if p < 0:
dst.close_tail()
elif p == 0 or c == 0:
return False
else:
binary = src.peek(min(c, buffer_size))
dst.push(binary)
src.pop(len(binary))
return True
def pump(transport1, transport2, buffer_size=1024):
""" Transfer all pending bytes between two Proton engines
by repeatedly calling peek/pop and push.
Asserts that each engine accepts some bytes every time
(unless it's already closed).
"""
while (pump_uni(transport1, transport2, buffer_size) or
pump_uni(transport2, transport1, buffer_size)):
pass
def findfileinpath(filename, searchpath):
"""Find filename in the searchpath
return absolute path to the file or None
"""
paths = searchpath.split(os.pathsep)
for path in paths:
if os.path.exists(os.path.join(path, filename)):
return os.path.abspath(os.path.join(path, filename))
return None
def isSSLPresent():
return SSL.present()
createdSASLDb = False
def _cyrusSetup(conf_dir):
"""Write out simple SASL config.
"""
saslpasswd = ""
if 'SASLPASSWD' in os.environ:
saslpasswd = os.environ['SASLPASSWD']
else:
saslpasswd = findfileinpath('saslpasswd2', os.getenv('PATH')) or ""
if os.path.exists(saslpasswd):
t = Template("""sasldb_path: ${db}
mech_list: EXTERNAL DIGEST-MD5 SCRAM-SHA-1 CRAM-MD5 PLAIN ANONYMOUS
""")
abs_conf_dir = os.path.abspath(conf_dir)
subprocess.call(args=['rm','-rf',abs_conf_dir])
os.mkdir(abs_conf_dir)
db = os.path.join(abs_conf_dir,'proton.sasldb')
conf = os.path.join(abs_conf_dir,'proton-server.conf')
f = open(conf, 'w')
f.write(t.substitute(db=db))
f.close()
cmd_template = Template("echo password | ${saslpasswd} -c -p -f ${db} -u proton user")
cmd = cmd_template.substitute(db=db, saslpasswd=saslpasswd)
subprocess.call(args=cmd, shell=True)
os.environ['PN_SASL_CONFIG_PATH'] = abs_conf_dir
global createdSASLDb
createdSASLDb = True
# Globally initialize Cyrus SASL configuration
if SASL.extended():
_cyrusSetup('sasl-conf')
def ensureCanTestExtendedSASL():
if not SASL.extended():
raise Skipped('Extended SASL not supported')
if not createdSASLDb:
raise Skipped("Can't Test Extended SASL: Couldn't create auth db")
class DefaultConfig:
defines = {}
class Test(TestCase):
config = DefaultConfig()
def __init__(self, name):
super(Test, self).__init__(name)
self.name = name
def configure(self, config):
self.config = config
def default(self, name, value, **profiles):
default = value
profile = self.config.defines.get("profile")
if profile:
default = profiles.get(profile, default)
return self.config.defines.get(name, default)
@property
def delay(self):
return float(self.default("delay", "1", fast="0.1"))
@property
def timeout(self):
return float(self.default("timeout", "60", fast="10"))
@property
def verbose(self):
return int(self.default("verbose", 0))
class Skipped(SkipTest):
skipped = True
class TestServer(object):
""" Base class for creating test-specific message servers.
"""
def __init__(self, **kwargs):
self.args = kwargs
self.reactor = Container(self)
self.host = "127.0.0.1"
self.port = 0
if "host" in kwargs:
self.host = kwargs["host"]
if "port" in kwargs:
self.port = kwargs["port"]
self.handlers = [FlowController(10), Handshaker()]
self.thread = Thread(name="server-thread", target=self.run)
self.thread.daemon = True
self.running = True
self.conditions = []
def start(self):
self.reactor.start()
retry = 0
if self.port == 0:
self.port = str(randint(49152, 65535))
retry = 10
while retry > 0:
try:
self.acceptor = self.reactor.acceptor(self.host, self.port)
break
except IOError:
self.port = str(randint(49152, 65535))
retry -= 1
assert retry > 0, "No free port for server to listen on!"
self.thread.start()
def stop(self):
self.running = False
self.reactor.wakeup()
self.thread.join()
# Note: all following methods all run under the thread:
def run(self):
self.reactor.timeout = 3.14159265359
while self.reactor.process():
if not self.running:
self.acceptor.close()
self.reactor.stop()
break
def on_connection_bound(self, event):
if "idle_timeout" in self.args:
event.transport.idle_timeout = self.args["idle_timeout"]
def on_connection_local_close(self, event):
self.conditions.append(event.connection.condition)
def on_delivery(self, event):
event.delivery.settle()
#
# Classes that wrap the messenger applications msgr-send and msgr-recv.
# These applications reside in the c/tools directory
#
class MessengerApp(object):
""" Interface to control a MessengerApp """
def __init__(self):
self._cmdline = None
# options common to Receivers and Senders:
self.ca_db = None
self.certificate = None
self.privatekey = None
self.password = None
self._output = None
def start(self, verbose=False):
""" Begin executing the test """
cmd = self.cmdline()
self._verbose = verbose
if self._verbose:
print("COMMAND='%s'" % str(cmd))
#print("ENV='%s'" % str(os.environ.copy()))
try:
# Handle python launch by replacing script 'filename' with
# 'python abspath-to-filename' in cmdline arg list.
if cmd[0].endswith('.py'):
foundfile = findfileinpath(cmd[0], os.getenv('PATH'))
if foundfile is None:
msg = "Unable to locate file '%s' in PATH" % cmd[0]
raise Skipped("Skipping test - %s" % msg)
del cmd[0:1]
cmd.insert(0, foundfile)
cmd.insert(0, sys.executable)
self._process = Popen(cmd, stdout=PIPE,
bufsize=4096, universal_newlines=True)
except OSError:
e = sys.exc_info()[1]
print("ERROR: '%s'" % e)
msg = "Unable to execute command '%s', is it in your PATH?" % cmd[0]
# NOTE(flaper87): Skip the test if the command is not found.
if e.errno == 2:
raise Skipped("Skipping test - %s" % msg)
assert False, msg
self._ready() # wait for it to initialize
def stop(self):
""" Signal the client to start clean shutdown """
pass
def wait(self):
""" Wait for client to complete """
self._output = self._process.communicate()
if self._verbose:
print("OUTPUT='%s'" % self.stdout())
def status(self):
""" Return status from client process """
return self._process.returncode
def stdout(self):
#self._process.communicate()[0]
if not self._output or not self._output[0]:
return "*** NO STDOUT ***"
return self._output[0]
def stderr(self):
if not self._output or not self._output[1]:
return "*** NO STDERR ***"
return self._output[1]
def cmdline(self):
if not self._cmdline:
self._build_command()
return self._cmdline
def _build_command(self):
assert False, "_build_command() needs override"
def _ready(self):
assert False, "_ready() needs override"
def _do_common_options(self):
""" Common option handling """
if self.ca_db is not None:
self._cmdline.append("-T")
self._cmdline.append(str(self.ca_db))
if self.certificate is not None:
self._cmdline.append("-C")
self._cmdline.append(str(self.certificate))
if self.privatekey is not None:
self._cmdline.append("-K")
self._cmdline.append(str(self.privatekey))
if self.password is not None:
self._cmdline.append("-P")
self._cmdline.append("pass:" + str(self.password))
class MessengerSender(MessengerApp):
""" Interface to configure a sending MessengerApp """
def __init__(self):
MessengerApp.__init__(self)
self._command = None
# @todo make these properties
self.targets = []
self.send_count = None
self.msg_size = None
self.send_batch = None
self.outgoing_window = None
self.report_interval = None
self.get_reply = False
self.timeout = None
self.incoming_window = None
self.recv_count = None
self.name = None
# command string?
def _build_command(self):
self._cmdline = self._command
self._do_common_options()
assert self.targets, "Missing targets, required for sender!"
self._cmdline.append("-a")
self._cmdline.append(",".join(self.targets))
if self.send_count is not None:
self._cmdline.append("-c")
self._cmdline.append(str(self.send_count))
if self.msg_size is not None:
self._cmdline.append("-b")
self._cmdline.append(str(self.msg_size))
if self.send_batch is not None:
self._cmdline.append("-p")
self._cmdline.append(str(self.send_batch))
if self.outgoing_window is not None:
self._cmdline.append("-w")
self._cmdline.append(str(self.outgoing_window))
if self.report_interval is not None:
self._cmdline.append("-e")
self._cmdline.append(str(self.report_interval))
if self.get_reply:
self._cmdline.append("-R")
if self.timeout is not None:
self._cmdline.append("-t")
self._cmdline.append(str(self.timeout))
if self.incoming_window is not None:
self._cmdline.append("-W")
self._cmdline.append(str(self.incoming_window))
if self.recv_count is not None:
self._cmdline.append("-B")
self._cmdline.append(str(self.recv_count))
if self.name is not None:
self._cmdline.append("-N")
self._cmdline.append(str(self.name))
def _ready(self):
pass
class MessengerReceiver(MessengerApp):
""" Interface to configure a receiving MessengerApp """
def __init__(self):
MessengerApp.__init__(self)
self._command = None
# @todo make these properties
self.subscriptions = []
self.receive_count = None
self.recv_count = None
self.incoming_window = None
self.timeout = None
self.report_interval = None
self.send_reply = False
self.outgoing_window = None
self.forwards = []
self.name = None
# command string?
def _build_command(self):
self._cmdline = os.environ.get("TEST_EXE_PREFIX", "").split()
self._cmdline += self._command
self._do_common_options()
self._cmdline += ["-X", "READY"]
assert self.subscriptions, "Missing subscriptions, required for receiver!"
self._cmdline.append("-a")
self._cmdline.append(",".join(self.subscriptions))
if self.receive_count is not None:
self._cmdline.append("-c")
self._cmdline.append(str(self.receive_count))
if self.recv_count is not None:
self._cmdline.append("-b")
self._cmdline.append(str(self.recv_count))
if self.incoming_window is not None:
self._cmdline.append("-w")
self._cmdline.append(str(self.incoming_window))
if self.timeout is not None:
self._cmdline.append("-t")
self._cmdline.append(str(self.timeout))
if self.report_interval is not None:
self._cmdline.append("-e")
self._cmdline.append(str(self.report_interval))
if self.send_reply:
self._cmdline.append("-R")
if self.outgoing_window is not None:
self._cmdline.append("-W")
self._cmdline.append(str(self.outgoing_window))
if self.forwards:
self._cmdline.append("-F")
self._cmdline.append(",".join(self.forwards))
if self.name is not None:
self._cmdline.append("-N")
self._cmdline.append(str(self.name))
def _ready(self):
""" wait for subscriptions to complete setup. """
r = self._process.stdout.readline()
assert r.strip() == "READY", "Unexpected input while waiting for receiver to initialize: %s" % r
class MessengerSenderC(MessengerSender):
def __init__(self):
MessengerSender.__init__(self)
self._command = ["msgr-send"]
class MessengerReceiverC(MessengerReceiver):
def __init__(self):
MessengerReceiver.__init__(self)
self._command = ["msgr-recv"]
class ReactorSenderC(MessengerSender):
def __init__(self):
MessengerSender.__init__(self)
self._command = ["reactor-send"]
class ReactorReceiverC(MessengerReceiver):
def __init__(self):
MessengerReceiver.__init__(self)
self._command = ["reactor-recv"]
| |
# Copyright (c) 2017 Jon Cooper
#
# This file is part of pygame-xbox360controller.
# Documentation, related files, and licensing can be found at
#
# <https://github.com/joncoop/pygame-xbox360controller>.
import pygame
import xbox360_controller
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
RED = (255, 65, 65)
GREEN = (75, 225, 25)
BLUE = (65, 65, 255)
AMBER = (255, 175, 0)
GREY = (175, 175, 175)
pygame.init()
size = [600, 670]
screen = pygame.display.set_mode(size)
pygame.display.set_caption("X-Box 360 Controller")
FPS = 60
clock = pygame.time.Clock()
# make a controller (should this be in the game loop?)
controller = xbox360_controller.Controller()
def display_text(screen, text, x, y):
my_font = pygame.font.Font(None, 30)
output = my_font.render(text, True, WHITE)
screen.blit(output, [x, y])
# game loop
running = True
while running:
# event handling
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
# joystick stuff
pressed = controller.get_buttons()
a_btn = pressed[xbox360_controller.A]
b_btn = pressed[xbox360_controller.B]
x_btn = pressed[xbox360_controller.X]
y_btn = pressed[xbox360_controller.Y]
back = pressed[xbox360_controller.BACK]
start = pressed[xbox360_controller.START]
# guide = pressed[xbox360_controller.GUIDE]
lt_bump = pressed[xbox360_controller.LEFT_BUMP]
rt_bump = pressed[xbox360_controller.RIGHT_BUMP]
lt_stick_btn = pressed[xbox360_controller.LEFT_STICK_BTN]
rt_stick_btn = pressed[xbox360_controller.RIGHT_STICK_BTN]
lt_x, lt_y = controller.get_left_stick()
rt_x, rt_y = controller.get_right_stick()
triggers = controller.get_triggers()
pad_up, pad_right, pad_down, pad_left = controller.get_pad()
# drawing
screen.fill(BLACK)
''' controller outline '''
pygame.draw.rect(screen, GREY, [40, 20, 520, 320], 3)
''' a, b, x, y '''
x, y = 450, 120
if a_btn == 1:
pygame.draw.ellipse(screen, GREEN, [x + 30, y + 60, 25, 25])
else:
pygame.draw.ellipse(screen, GREEN, [x + 30, y + 60, 25, 25], 2)
if b_btn == 1:
pygame.draw.ellipse(screen, RED, [x + 60, y + 30, 25, 25])
else:
pygame.draw.ellipse(screen, RED, [x + 60, y + 30, 25, 25], 2)
if x_btn == 1:
pygame.draw.ellipse(screen, BLUE, [x, y + 30, 25, 25])
else:
pygame.draw.ellipse(screen, BLUE, [x, y + 30, 25, 25], 2)
if y_btn == 1:
pygame.draw.ellipse(screen, AMBER, [x + 30, y, 25, 25])
else:
pygame.draw.ellipse(screen, AMBER, [x + 30, y, 25, 25], 2)
''' back, start '''
x, y = 250, 145
if back == 1:
pygame.draw.ellipse(screen, WHITE, [x, y, 25, 20])
else:
pygame.draw.ellipse(screen, WHITE, [x, y, 25, 20], 2)
pygame.draw.ellipse(screen, GREY, [x + 40, y - 10, 40, 40])
if start == 1:
pygame.draw.ellipse(screen, WHITE, [x + 95, y, 25, 20])
else:
pygame.draw.ellipse(screen, WHITE, [x + 95, y, 25, 20], 2)
''' bumpers '''
x, y = 100, 50
if lt_bump == 1:
pygame.draw.rect(screen, WHITE, [x, 50, y, 25])
else:
pygame.draw.rect(screen, WHITE, [x, 50, y, 25], 2)
if rt_bump == 1:
pygame.draw.rect(screen, WHITE, [x + 365, y, 50, 25])
else:
pygame.draw.rect(screen, WHITE, [x + 365, y, 50, 25], 2)
''' triggers '''
x, y = 210, 60
trigger_x = x + 100 + round(triggers * 100)
pygame.draw.line(screen, WHITE, [x, y], [x + 200, y])
pygame.draw.line(screen, WHITE, [trigger_x, y - 10], [trigger_x, y + 10])
''' left stick '''
x, y = 65, 100
left_x = x + 50 + round(lt_x * 50)
left_y = y + 50 + round(lt_y * 50)
pygame.draw.line(screen, WHITE, [x + 60, y], [x + 60, y + 120], 1)
pygame.draw.line(screen, WHITE, [x, y + 60], [x + 120, y + 60], 1)
if lt_stick_btn == 0:
pygame.draw.ellipse(screen, WHITE, [left_x, left_y, 20, 20], 2)
else:
pygame.draw.ellipse(screen, WHITE, [left_x, left_y, 20, 20])
''' right stick '''
x, y = 330, 190
right_x = x + 50 + round(rt_x * 50)
right_y = y + 50 + round(rt_y * 50)
pygame.draw.line(screen, WHITE, [x + 60, y], [x + 60, y + 120], 1)
pygame.draw.line(screen, WHITE, [x, y + 60], [x + 120, y + 60], 1)
if rt_stick_btn == 0:
pygame.draw.ellipse(screen, WHITE, [right_x, right_y, 20, 20], 2)
else:
pygame.draw.ellipse(screen, WHITE, [right_x, right_y, 20, 20])
''' hat '''
x, y = 180, 200
pygame.draw.ellipse(screen, WHITE, [x, y, 100, 100])
if pad_up:
pygame.draw.ellipse(screen, GREY, [x + 40, y, 20, 20])
if pad_right:
pygame.draw.ellipse(screen, GREY, [x + 80, y + 40, 20, 20])
if pad_down:
pygame.draw.ellipse(screen, GREY, [x + 40, y +80, 20, 20])
if pad_left:
pygame.draw.ellipse(screen, GREY, [x, y + 40, 20, 20])
''' joystick values '''
x, y = 50, 370
display_text(screen, "BUTTONS", x, y)
display_text(screen, "A: {}".format(a_btn), x, y+ 25)
display_text(screen, "B: {}".format(b_btn), x, y + 50)
display_text(screen, "X: {}".format(x_btn), x, y + 75)
display_text(screen, "Y: {}".format(y_btn), x, y + 100)
display_text(screen, "LB: {}".format(lt_bump), x, y + 125)
display_text(screen, "RB: {}".format(rt_bump), x, y + 150)
display_text(screen, "Back: {}".format(back), x, y + 175)
display_text(screen, "Start: {}".format(start), x, y + 200)
display_text(screen, "LT Stick Btn: {}".format(lt_stick_btn), x, y + 225)
display_text(screen, "RT Stick Btn: {}".format(rt_stick_btn), x, y + 250)
display_text(screen, "AXES", x + 275, y)
display_text(screen, "Left Stick: ({}, {})".format(round(lt_x, 2), round(lt_y, 2)), x + 275, y + 25)
display_text(screen, "Right Stick: ({}, {})".format(round(rt_x, 2), round(rt_y, 2)), x + 275, y + 50)
display_text(screen, "Triggers: {}".format(round(triggers, 2)), x + 275, y + 75)
display_text(screen, "D-PAD", x + 275, y + 125)
display_text(screen, "Up: {}".format(pad_up), x + 275, y + 150)
display_text(screen, "Right: {}".format(pad_right), x + 275, y + 175)
display_text(screen, "Down: {}".format(pad_down), x + 275, y + 200)
display_text(screen, "Left: {}".format(pad_left), x + 275, y + 225)
pygame.display.flip()
# update screen
pygame.display.flip()
clock.tick(FPS)
# close window on quit
pygame.quit ()
| |
"""
This implements the WOFF specification dated September 16, 2009.
The main object is the WOFFFont. It is a subclass for the FontTools
TTFont object, so it has very similar functionality. The WOFFReader
and WOFFWriter are also available for use outside of this module.
Those objects are much faster than WOFFFont, but they require much
more care.
"""
import zlib
import struct
from fontTools.misc import sstruct
from cStringIO import StringIO
from xml.etree import ElementTree
from fontTools.ttLib import TTFont, debugmsg, sortedTagList
from fontTools.ttLib.sfnt import getSearchRange, calcChecksum, SFNTDirectoryEntry, \
sfntDirectoryFormat, sfntDirectorySize, sfntDirectoryEntryFormat, sfntDirectoryEntrySize
# -----------
# Main Object
# -----------
class WOFFFont(TTFont):
"""
This object represents a WOFF file. It is a subclass of
the FontTools TTFont object, so the same API applies.
For information about the arguments in __init__,
refer to the TTFont documentation.
This object has two special attributes: metadata and privateData.
The metadata attribute returns an ElementTree Element object
representing the metadata stored in the font. To set new metadata
in the font, you must use this object. The privateData attribute
returns the private data stored in the font. To set private data,
set a string to font.privateData.
"""
def __init__(self, file=None, flavor="\000\001\000\000",
checkChecksums=0, verbose=False, recalcBBoxes=True,
allowVID=False, ignoreDecompileErrors=False):
# can't use the TTFont __init__ because it goes directly to the SFNTReader.
# see that method for details about all of this.
self.verbose = verbose
self.recalcBBoxes = recalcBBoxes
self.tables = {}
self.reader = None
self.last_vid = 0xFFFE
self.reverseVIDDict = {}
self.VIDDict = {}
self.allowVID = allowVID
self.ignoreDecompileErrors = ignoreDecompileErrors
self.flavor = flavor
self.majorVersion = 0
self.minorVersion = 0
self._metadata = None
self._tableOrder = None
if file is not None:
if not hasattr(file, "read"):
file = open(file, "rb")
self.reader = WOFFReader(file, checkChecksums=checkChecksums)
self.flavor = self.reader.flavor
self.majorVersion = self.reader.majorVersion
self.minorVersion = self.reader.minorVersion
self._tableOrder = self.reader.keys()
else:
self._metadata = ElementTree.Element("metadata", version="1.0")
self.privateData = None
def __getattr__(self, attr):
if attr not in ("privateData", "metadata"):
raise AttributeError(attr)
# metadata
if attr == "metadata":
if self._metadata is not None:
return self._metadata
if self.reader is not None:
text = self.reader.metadata
if text:
metadata = ElementTree.fromstring(text)
else:
metadata = ElementTree.Element("metadata", version="1.0")
self._metadata = metadata
return self._metadata
return None
# private data
elif attr == "privateData":
if not hasattr(self, "privateData"):
privateData = None
if self.reader is not None:
privateData = self.reader.privateData
self.privateData = privateData
return self.privateData
# fallback to None
return None
def keys(self):
"""
Return a list of all tables in the font. If a table order
has been set manually or as the result of opening an existing
WOFF file, the set table order will be in the list first.
Tables not defined in an existing order will be sorted following
the suggested ordering in the OTF/OFF specification.
The first table listed in all cases is the GlyphOrder pseudo table.
"""
tags = set(self.tables.keys())
if self.reader is not None:
tags = tags | set(self.reader.keys())
tags = list(tags)
if "GlyphOrder" in tags:
tags.remove("GlyphOrder")
return ["GlyphOrder"] + sortedTagList(tags, self._tableOrder)
def setTableOrder(self, order):
"""
Set the order in which tables should be written
into the font. This is required if a DSIG table
is in the font.
"""
self._tableOrder = order
def save(self, file, compressionLevel=9, recompressTables=False, reorderTables=True, recalculateHeadChecksum=True):
"""
Save a WOFF into file a file object specifified by the
file argument.. Optionally, file can be a path and a
new file will be created at that location.
compressionLevel is the compression level to be
used with zlib. This must be an int between 1 and 9.
The default is 9, the highest compression, but slowest
compression time.
Set recompressTables to True if you want any already
compressed tables to be decompressed and then recompressed
using the level specified by compressionLevel.
If you want the tables in the WOFF reordered following
the suggested optimal table orderings described in the
OTF/OFF sepecification, set reorderTables to True.
Tables cannot be reordered if a DSIG table is in the font.
If you change any of the SFNT data or reorder the tables,
the head table checkSumAdjustment must be recalculated.
If you are not changing any of the SFNT data, you can set
recalculateHeadChecksum to False to prevent the recalculation.
This must be set to False if the font contains a DSIG table.
"""
# if DSIG is to be written, the table order
# must be completely specified. otherwise the
# DSIG may not be valid after decoding the WOFF.
tags = self.keys()
if "GlyphOrder" in tags:
tags.remove("GlyphOrder")
if "DSIG" in tags:
if self._tableOrder is None or (set(self._tableOrder) != set(tags)):
raise WOFFLibError("A complete table order must be supplied when saving a font with a 'DSIG' table.")
elif reorderTables:
raise WOFFLibError("Tables can not be reordered when a 'DSIG' table is in the font. Set reorderTables to False.")
elif recalculateHeadChecksum:
raise WOFFLibError("The 'head' table checkSumAdjustment can not be recalculated when a 'DSIG' table is in the font.")
# sort the tags if necessary
if reorderTables:
tags = sortedTagList(tags)
# open a file if necessary
closeStream = False
if not hasattr(file, "write"):
closeStream = True
file = open(file, "wb")
# write the table data
if "GlyphOrder" in tags:
tags.remove("GlyphOrder")
numTables = len(tags)
writer = WOFFWriter(file, numTables, flavor=self.flavor,
majorVersion=self.majorVersion, minorVersion=self.minorVersion,
compressionLevel=compressionLevel, recalculateHeadChecksum=recalculateHeadChecksum,
verbose=self.verbose)
for tag in tags:
origData = None
origLength = None
origChecksum = None
compLength = None
# table is loaded
if self.isLoaded(tag):
origData = self.getTableData(tag)
# table is in reader
elif self.reader is not None:
if recompressTables:
origData = self.getTableData(tag)
else:
if self.verbose:
debugmsg("Reading '%s' table from disk" % tag)
origData, origLength, origChecksum, compLength = self.reader.getCompressedTableData(tag)
# add to writer
writer.setTable(tag, origData, origLength=origLength, origChecksum=origChecksum, compLength=compLength)
# write the metadata
metadata = None
metaOrigLength = None
metaLength = None
if hasattr(self, "metadata"):
declaration = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n"
tree = ElementTree.ElementTree(self.metadata)
f = StringIO()
tree.write(f, encoding="utf-8")
metadata = f.getvalue()
# make sure the metadata starts with the declaration
if not metadata.startswith(declaration):
metadata = declaration + metadata
del f
elif self.reader is not None:
if recompressTables:
metadata = self.reader.metadata
else:
metadata, metaOrigLength, metaLength = self.reader.getCompressedMetadata()
if metadata:
writer.setMetadata(metadata, metaOrigLength=metaOrigLength, metaLength=metaLength)
# write the private data
privData = self.privateData
if privData:
writer.setPrivateData(privData)
# close the writer
writer.close()
# close the file
if closeStream:
file.close()
def saveXML(self):
raise NotImplementedError
def importXML(self):
raise NotImplementedError
# ------
# Reader
# ------
woffHeaderFormat = """
> # big endian
signature: 4s
flavor: 4s
length: L
numTables: H
reserved: H
totalSFNTSize: L
majorVersion: H
minorVersion: H
metaOffset: L
metaLength: L
metaOrigLength: L
privOffset: L
privLength: L
"""
woffHeaderSize = sstruct.calcsize(woffHeaderFormat)
class WOFFReader(object):
def __init__(self, file, checkChecksums=1):
self.file = file
self.checkChecksums = checkChecksums
# unpack the header
self.file.seek(0)
bytes = self.file.read(woffHeaderSize)
if len(bytes) != woffHeaderSize:
raise WOFFLibError("Not a properly formatted WOFF file.")
sstruct.unpack(woffHeaderFormat, bytes, self)
if self.signature != "wOFF":
raise WOFFLibError("Not a properly formatted WOFF file.")
# unpack the directory
self.tables = {}
for i in range(self.numTables):
entry = WOFFDirectoryEntry()
entry.fromFile(self.file)
self.tables[entry.tag] = entry
def close(self):
self.file.close()
def __contains__(self, tag):
return tag in self.tables
has_key = __contains__
def keys(self):
"""
This returns a list of all tables in the WOFF
sorted in ascending order based on the offset
of each table.
"""
sorter = []
for tag, entry in self.tables.items():
sorter.append((entry.offset, tag))
order = [tag for offset, tag in sorted(sorter)]
return order
def __getitem__(self, tag):
entry = self.tables[tag]
self.file.seek(entry.offset)
data = self.file.read(entry.compLength)
# decompress if necessary
if entry.compLength < entry.origLength:
data = zlib.decompress(data)
else:
data = data[:entry.origLength]
# compare the checksums
if self.checkChecksums:
checksum = calcTableChecksum(tag, data)
if self.checkChecksums > 1:
assert checksum == entry.origChecksum, "bad checksum for '%s' table" % tag
elif checksum != entry.origChecksum:
print "bad checksum for '%s' table" % tag
print
return data
def getCompressedTableData(self, tag):
entry = self.tables[tag]
self.file.seek(entry.offset)
data = self.file.read(entry.compLength)
return data, entry.origLength, entry.origChecksum, entry.compLength
def getCompressedMetadata(self):
self.file.seek(self.metaOffset)
data = self.file.read(self.metaLength)
return data, self.metaOrigLength, self.metaLength
def __getattr__(self, attr):
if attr not in ("privateData", "metadata"):
raise AttributeError(attr)
if attr == "privateData":
self.file.seek(self.privOffset)
return self.file.read(self.privLength)
if attr == "metadata":
self.file.seek(self.metaOffset)
data = self.file.read(self.metaLength)
if self.metaLength:
data = zlib.decompress(data)
assert len(data) == self.metaOrigLength
return data
def __delitem__(self, tag):
del self.tables[tag]
# ------
# Writer
# ------
class WOFFWriter(object):
def __init__(self, file, numTables, flavor="\000\001\000\000",
majorVersion=0, minorVersion=0, compressionLevel=9,
recalculateHeadChecksum=True,
verbose=False):
self.signature = "wOFF"
self.flavor = flavor
self.length = woffHeaderSize + (numTables * woffDirectoryEntrySize)
self.totalSFNTSize = sfntDirectorySize + (numTables * sfntDirectoryEntrySize)
self.numTables = numTables
self.majorVersion = majorVersion
self.minorVersion = minorVersion
self.metaOffset = 0
self.metaOrigLength = 0
self.metaLength = 0
self.privOffset = 0
self.privLength = 0
self.reserved = 0
self.file = file
self.compressionLevel = compressionLevel
self.recalculateHeadChecksum = recalculateHeadChecksum
self.verbose = verbose
# the data is held to facilitate the
# head checkSumAdjustment calculation.
self.tables = {}
self.metadata = None
self.privateData = None
self.tableDataEnd = 0
self.metadataEnd = 0
def _tableOrder(self):
return [entry.tag for index, entry, data in sorted(self.tables.values())]
def setTable(self, tag, data, origLength=None, origChecksum=None, compLength=None):
# don't compress the head if the checkSumAdjustment needs to be recalculated
# the compression will be handled later.
if self.recalculateHeadChecksum and tag == "head":
# decompress
if compLength is not None and compLength < origLength:
data = zlib.decompress(data)
entry = self._prepTable(tag, data, origLength=len(data), entryOnly=True)
# compress
else:
entry, data = self._prepTable(tag, data=data, origLength=origLength, origChecksum=origChecksum, compLength=compLength)
# store
self.tables[tag] = (len(self.tables), entry, data)
def setMetadata(self, data, metaOrigLength=None, metaLength=None):
if not data:
return
if metaLength is None:
if self.verbose:
debugmsg("compressing metadata")
metaOrigLength = len(data)
data = zlib.compress(data, self.compressionLevel)
metaLength = len(data)
# set the header values
self.metaOrigLength = metaOrigLength
self.metaLength = metaLength
# store
self.metadata = data
def setPrivateData(self, data):
if not data:
return
privLength = len(data)
# set the header value
self.privLength = privLength
# store
self.privateData = data
def close(self):
if self.numTables != len(self.tables):
raise WOFFLibError("wrong number of tables; expected %d, found %d" % (self.numTables, len(self.tables)))
# first, handle the checkSumAdjustment
if self.recalculateHeadChecksum and "head" in self.tables:
self._handleHeadChecksum()
# check the table directory conformance
for tag, (index, entry, data) in sorted(self.tables.items()):
self._checkTableConformance(entry, data)
# write the header
header = sstruct.pack(woffHeaderFormat, self)
self.file.seek(0)
self.file.write(header)
# update the directory offsets
offset = woffHeaderSize + (woffDirectoryEntrySize * self.numTables)
order = self._tableOrder()
for tag in order:
index, entry, data = self.tables[tag]
entry.offset = offset
offset += calc4BytePaddedLength(entry.compLength) # ensure byte alignment
# write the directory
self._writeTableDirectory()
# write the table data
self._writeTableData()
# write the metadata
self._writeMetadata()
# write the private data
self._writePrivateData()
# write the header
self._writeHeader()
# go to the beginning of the file
self.file.seek(0)
# header support
def _writeHeader(self):
header = sstruct.pack(woffHeaderFormat, self)
self.file.seek(0)
self.file.write(header)
# sfnt support
def _prepTable(self, tag, data, origLength=None, origChecksum=None, compLength=None, entryOnly=False):
# skip data prep
if entryOnly:
origLength = origLength
origChecksum = calcTableChecksum(tag, data)
compLength = 0
# prep the data
else:
# compress
if compLength is None:
origData = data
origLength = len(origData)
origChecksum = calcTableChecksum(tag, data)
if self.verbose:
debugmsg("compressing '%s' table" % tag)
compData = zlib.compress(origData, self.compressionLevel)
compLength = len(compData)
if origLength <= compLength:
data = origData
compLength = origLength
else:
data = compData
# make the directory entry
entry = WOFFDirectoryEntry()
entry.tag = tag
entry.offset = 0
entry.origLength = origLength
entry.origChecksum = origChecksum
entry.compLength = compLength
# return
if entryOnly:
return entry
return entry, data
def _checkTableConformance(self, entry, data):
"""
Check the conformance of the table directory entries.
These must be checked because the origChecksum, origLength
and compLength can be set by an outside caller.
"""
if self.verbose:
debugmsg("checking conformance of '%s' table" % entry.tag)
# origLength must be less than or equal to compLength
if entry.origLength < entry.compLength:
raise WOFFLibError("origLength and compLength are not correct in the '%s' table entry." % entry.tag)
# unpack the data as needed
if entry.origLength > entry.compLength:
origData = zlib.decompress(data)
compData = data
else:
origData = data
compData = data
# the origLength entry must match the actual length
if entry.origLength != len(origData):
raise WOFFLibError("origLength is not correct in the '%s' table entry." % entry.tag)
# the checksum must be correct
if entry.origChecksum != calcTableChecksum(entry.tag, origData):
raise WOFFLibError("origChecksum is not correct in the '%s' table entry." % entry.tag)
# the compLength must be correct
if entry.compLength != len(compData):
raise WOFFLibError("compLength is not correct in the '%s' table entry." % entry.tag)
def _handleHeadChecksum(self):
if self.verbose:
debugmsg("updating head checkSumAdjustment")
# get the value
tables = {}
offset = sfntDirectorySize + (sfntDirectoryEntrySize * self.numTables)
for (index, entry, data) in sorted(self.tables.values()):
tables[entry.tag] = dict(offset=offset, length=entry.origLength, checkSum=entry.origChecksum)
offset += calc4BytePaddedLength(entry.origLength)
checkSumAdjustment = calcHeadCheckSumAdjustment(self.flavor, tables)
# set the value in the head table
index, entry, data = self.tables["head"]
data = data[:8] + struct.pack(">L", checkSumAdjustment) + data[12:]
# compress the data
newEntry, data = self._prepTable("head", data)
# update the entry data
assert entry.origChecksum == newEntry.origChecksum
entry.origLength = newEntry.origLength
entry.compLength = newEntry.compLength
# store
self.tables["head"] = (index, entry, data)
def _writeTableDirectory(self):
if self.verbose:
debugmsg("writing table directory")
self.file.seek(woffHeaderSize)
for tag, (index, entry, data) in sorted(self.tables.items()):
entry = sstruct.pack(woffDirectoryEntryFormat, entry)
self.file.write(entry)
def _writeTableData(self):
d = woffHeaderSize + (woffDirectoryEntrySize * self.numTables)
offset = woffHeaderSize + (woffDirectoryEntrySize * self.numTables)
self.file.seek(offset)
for tag in self._tableOrder():
if self.verbose:
debugmsg("writing '%s' table" % tag)
index, entry, data = self.tables[tag]
data += "\0" * (calc4BytePaddedLength(entry.compLength) - entry.compLength ) # ensure byte alignment
self.file.write(data)
self.length += calc4BytePaddedLength(entry.compLength) # ensure byte alignment
self.totalSFNTSize += calc4BytePaddedLength(entry.origLength) # ensure byte alignment
# store the end for use by metadata or private data
self.tableDataEnd = self.length
# metadata support
def _writeMetadata(self):
if self.metadata is None:
return
if self.verbose:
debugmsg("writing metadata")
self.length += self.metaLength
self.metaOffset = self.tableDataEnd
self.file.seek(self.metaOffset)
self.file.write(self.metadata)
# store the end for use by private data
self.metadataEnd = self.metaOffset + self.metaLength
# if private data exists, pad to a four byte boundary
if self.privateData is not None:
padding = calc4BytePaddedLength(self.metaLength) - self.metaLength
self.metadataEnd += padding
self.length += padding
padding = "\0" * padding
if padding:
self.file.write(padding)
# private data support
def _writePrivateData(self):
if self.privateData is None:
return
if self.verbose:
debugmsg("writing private data")
if self.metadata is not None:
self.privOffset = self.metadataEnd
else:
self.privOffset = self.tableDataEnd
self.length += self.privLength
self.file.seek(self.privOffset)
self.file.write(self.privateData)
# ---------
# Directory
# ---------
woffDirectoryEntryFormat = """
> # big endian
tag: 4s
offset: L
compLength: L
origLength: L
origChecksum: L
"""
woffDirectoryEntrySize = sstruct.calcsize(woffDirectoryEntryFormat)
class WOFFDirectoryEntry(object):
def fromFile(self, file):
sstruct.unpack(woffDirectoryEntryFormat, file.read(woffDirectoryEntrySize), self)
def fromString(self, str):
sstruct.unpack(woffDirectoryEntryFormat, str, self)
def toString(self):
return sstruct.pack(woffDirectoryEntryFormat, self)
def __repr__(self):
if hasattr(self, "tag"):
return "<WOFFDirectoryEntry '%s' at %x>" % (self.tag, id(self))
else:
return "<WOFFDirectoryEntry at %x>" % id(self)
# -------
# Helpers
# -------
class WOFFLibError(Exception): pass
def calc4BytePaddedLength(length):
return (length + 3) & ~3
def calcTableChecksum(tag, data):
if tag == "head":
checksum = calcChecksum(data[:8] + '\0\0\0\0' + data[12:])
else:
checksum = calcChecksum(data)
checksum = checksum & 0xffffffff
return checksum
def calcHeadCheckSumAdjustment(flavor, tables):
numTables = len(tables)
# build the sfnt header
searchRange, entrySelector, rangeShift = getSearchRange(numTables)
sfntDirectoryData = dict(
sfntVersion=flavor,
numTables=numTables,
searchRange=searchRange,
entrySelector=entrySelector,
rangeShift=rangeShift
)
# build the sfnt directory
directory = sstruct.pack(sfntDirectoryFormat, sfntDirectoryData)
for tag, entry in sorted(tables.items()):
entry = tables[tag]
sfntEntry = SFNTDirectoryEntry()
sfntEntry.tag = tag
sfntEntry.checkSum = entry["checkSum"]
sfntEntry.offset = entry["offset"]
sfntEntry.length = entry["length"]
directory += sfntEntry.toString()
# calculate the checkSumAdjustment
checkSums = [entry["checkSum"] for entry in tables.values()]
checkSums.append(calcChecksum(directory))
checkSumAdjustment = sum(checkSums)
checkSumAdjustment = (0xB1B0AFBA - checkSumAdjustment) & 0xffffffff
# done
return checkSumAdjustment
# ----------------
# SFNT Conformance
# ----------------
def checkSFNTConformance(file):
"""
This function checks a SFNT file to see if it meets
the conformance recomendations in the WOFF specification.
This includes:
- searchRange must be correct.
- entrySelector must be correct.
- rangeShift must be correct.
- offset to each table must be after the table directory
and before the end of the file.
- offset + length of each table must not extend past
the end of the file.
- the table directory must be in ascending order.
- tables must be padded to 4 byte boundaries.
- the final table must be padded to a 4 byte boundary.
- the gaps between table data blocks must not be more
than necessary to pad the table to a 4 byte boundary.
- the gap between the end of the final table and
the end of the file must not be more than necessary
to pad the table to a four byte boundary.
- the checksums for each table in the table directory
must be correct.
- the head checkSumAdjustment must be correct.
- the padding bytes must be null.
The returned value of this function will be a list.
If any errors were found, they will be represented
as strings in the list.
"""
# load the data
closeFile = False
if not hasattr(file, "read"):
file = open(file, "rb")
closeFile = True
data = file.read()
if closeFile:
file.close()
# storage
errors = []
# unpack the header
headerData = data[:sfntDirectorySize]
header = sstruct.unpack(sfntDirectoryFormat, headerData)
# unpack the table directory
numTables = header["numTables"]
directoryData = data[sfntDirectorySize : sfntDirectorySize + (sfntDirectoryEntrySize * numTables)]
tableDirectory = []
for index in range(numTables):
entry = sstruct.unpack(sfntDirectoryEntryFormat, directoryData[:sfntDirectoryEntrySize])
tableDirectory.append(entry)
directoryData = directoryData[sfntDirectoryEntrySize:]
# sanity testing
errors += _testOffsetBoundaryValidity(len(data), tableDirectory)
errors += _testLengthBoundaryValidity(len(data), tableDirectory)
# if one or more errors have already been found, something
# is very wrong and this should come to a screeching halt.
if errors:
return errors
# junk at the beginning of the file
errors += _testJunkAtTheBeginningOfTheFile(header)
# test directory order
errors += _testDirectoryOrder(tableDirectory)
# load the table data
for entry in tableDirectory:
offset = entry["offset"]
length = entry["length"]
entry["data"] = data[offset:offset+length]
# test for overlaps
errors += _testOverlaps(tableDirectory)
# test for padding
errors += _testOffsets(tableDirectory)
# test the final table padding
errors += _testFinalTablePadding(len(data), numTables, tableDirectory[-1]["tag"])
# test for gaps
errors += _testGaps(tableDirectory)
# test for a gap at the end of the file
errors += _testGapAfterFinalTable(len(data), tableDirectory)
# test padding value
errors += _testPaddingValue(tableDirectory, data)
# validate checksums
errors += _testCheckSums(tableDirectory)
errors += _testHeadCheckSum(header, tableDirectory)
# done.
return errors
def _testOffsetBoundaryValidity(dataLength, tableDirectory):
"""
>>> test = [
... dict(tag="test", offset=44)
... ]
>>> bool(_testOffsetBoundaryValidity(45, test))
False
>>> test = [
... dict(tag="test", offset=1)
... ]
>>> bool(_testOffsetBoundaryValidity(45, test))
True
>>> test = [
... dict(tag="test", offset=46)
... ]
>>> bool(_testOffsetBoundaryValidity(45, test))
True
"""
errors = []
numTables = len(tableDirectory)
minOffset = sfntDirectorySize + (sfntDirectoryEntrySize * numTables)
for entry in tableDirectory:
offset = entry["offset"]
tag = entry["tag"]
if offset < minOffset:
errors.append("The offset to the %s table is not valid." % tag)
if offset > dataLength:
errors.append("The offset to the %s table is not valid." % tag)
return errors
def _testLengthBoundaryValidity(dataLength, tableDirectory):
"""
>>> test = [
... dict(tag="test", offset=44, length=1)
... ]
>>> bool(_testLengthBoundaryValidity(45, test))
False
>>> test = [
... dict(tag="test", offset=44, length=2)
... ]
>>> bool(_testLengthBoundaryValidity(45, test))
True
"""
errors = []
entries = [(entry["offset"], entry) for entry in tableDirectory]
for o, entry in sorted(entries):
offset = entry["offset"]
length = entry["length"]
tag = entry["tag"]
end = offset + length
if end > dataLength:
errors.append("The length of the %s table is not valid." % tag)
return errors
def _testJunkAtTheBeginningOfTheFile(header):
"""
>>> test = dict(numTables=5, searchRange=64, entrySelector=2, rangeShift=16)
>>> bool(_testJunkAtTheBeginningOfTheFile(test))
False
>>> test = dict(numTables=5, searchRange=0, entrySelector=2, rangeShift=16)
>>> bool(_testJunkAtTheBeginningOfTheFile(test))
True
>>> test = dict(numTables=5, searchRange=64, entrySelector=0, rangeShift=16)
>>> bool(_testJunkAtTheBeginningOfTheFile(test))
True
>>> test = dict(numTables=5, searchRange=64, entrySelector=2, rangeShift=0)
>>> bool(_testJunkAtTheBeginningOfTheFile(test))
True
"""
errors = []
numTables = header["numTables"]
searchRange, entrySelector, rangeShift = getSearchRange(numTables)
if header["searchRange"] != searchRange:
errors.append("The searchRange value is incorrect.")
if header["entrySelector"] != entrySelector:
errors.append("The entrySelector value is incorrect.")
if header["rangeShift"] != rangeShift:
errors.append("The rangeShift value is incorrect.")
return errors
def _testDirectoryOrder(tableDirectory):
"""
>>> test = [
... dict(tag="aaaa"),
... dict(tag="bbbb")
... ]
>>> bool(_testDirectoryOrder(test))
False
>>> test = [
... dict(tag="bbbb"),
... dict(tag="aaaa")
... ]
>>> bool(_testDirectoryOrder(test))
True
"""
order = [entry["tag"] for entry in tableDirectory]
if order != list(sorted(order)):
return ["The table directory is not in ascending order."]
return []
def _testOverlaps(tableDirectory):
"""
>>> test = [
... dict(tag="aaaa", offset=0, length=100),
... dict(tag="bbbb", offset=1000, length=100),
... ]
>>> bool(_testOverlaps(test))
False
>>> test = [
... dict(tag="aaaa", offset=0, length=100),
... dict(tag="bbbb", offset=50, length=100),
... ]
>>> bool(_testOverlaps(test))
True
>>> test = [
... dict(tag="aaaa", offset=0, length=100),
... dict(tag="bbbb", offset=0, length=100),
... ]
>>> bool(_testOverlaps(test))
True
>>> test = [
... dict(tag="aaaa", offset=0, length=100),
... dict(tag="bbbb", offset=0, length=150),
... ]
>>> bool(_testOverlaps(test))
True
"""
# gather the edges
edges = {}
for entry in tableDirectory:
start = entry["offset"]
end = start + entry["length"]
edges[entry["tag"]] = (start, end)
# look for overlaps
overlaps = set()
for tag, (start, end) in edges.items():
for otherTag, (otherStart, otherEnd) in edges.items():
tag = tag.strip()
otherTag = otherTag.strip()
if tag == otherTag:
continue
if start >= otherStart and start < otherEnd:
l = sorted((tag, otherTag))
overlaps.add(tuple(l))
if end > otherStart and end <= otherEnd:
l = sorted((tag, otherTag))
overlaps.add(tuple(l))
# report
errors = []
if overlaps:
for t1, t2 in sorted(overlaps):
errors.append("The tables %s and %s overlap." % (t1, t2))
return errors
def _testOffsets(tableDirectory):
"""
>>> test = [
... dict(tag="test", offset=1)
... ]
>>> bool(_testOffsets(test))
True
>>> test = [
... dict(tag="test", offset=2)
... ]
>>> bool(_testOffsets(test))
True
>>> test = [
... dict(tag="test", offset=3)
... ]
>>> bool(_testOffsets(test))
True
>>> test = [
... dict(tag="test", offset=4)
... ]
>>> bool(_testOffsets(test))
False
"""
errors = []
# make the entries sortable
entries = [(entry["offset"], entry) for entry in tableDirectory]
for o, entry in sorted(entries):
offset = entry["offset"]
if offset % 4:
errors.append("The %s table does not begin on a 4-byte boundary." % entry["tag"].strip())
return errors
def _testFinalTablePadding(dataLength, numTables, finalTableTag):
"""
>>> bool(_testFinalTablePadding(
... sfntDirectorySize + sfntDirectoryEntrySize + 1,
... 1,
... "test"
... ))
True
>>> bool(_testFinalTablePadding(
... sfntDirectorySize + sfntDirectoryEntrySize + 2,
... 1,
... "test"
... ))
True
>>> bool(_testFinalTablePadding(
... sfntDirectorySize + sfntDirectoryEntrySize + 3,
... 1,
... "test"
... ))
True
>>> bool(_testFinalTablePadding(
... sfntDirectorySize + sfntDirectoryEntrySize + 4,
... 1,
... "test"
... ))
False
"""
errors = []
if (dataLength - (sfntDirectorySize + (sfntDirectoryEntrySize * numTables))) % 4:
errors.append("The final table (%s) is not properly padded." % finalTableTag)
return errors
def _testGaps(tableDirectory):
"""
>>> start = sfntDirectorySize + (sfntDirectoryEntrySize * 2)
>>> test = [
... dict(offset=start, length=4, tag="test1"),
... dict(offset=start+4, length=4, tag="test2"),
... ]
>>> bool(_testGaps(test))
False
>>> test = [
... dict(offset=start, length=4, tag="test1"),
... dict(offset=start+5, length=4, tag="test2"),
... ]
>>> bool(_testGaps(test))
True
>>> test = [
... dict(offset=start, length=4, tag="test1"),
... dict(offset=start+8, length=4, tag="test2"),
... ]
>>> bool(_testGaps(test))
True
"""
errors = []
sorter = []
for entry in tableDirectory:
sorter.append((entry["offset"], entry))
prevTag = None
prevEnd = None
for offset, entry in sorted(sorter):
length = entry["length"]
length = calc4BytePaddedLength(length)
tag = entry["tag"]
if prevEnd is None:
prevEnd = offset + length
prevTag = tag
else:
if offset - prevEnd != 0:
errors.append("Improper padding between the %s and %s tables." % (prevTag, tag))
prevEnd = offset + length
prevTag = tag
return errors
def _testGapAfterFinalTable(dataLength, tableDirectory):
"""
>>> start = sfntDirectorySize + (sfntDirectoryEntrySize * 2)
>>> test = [
... dict(offset=start, length=1, tag="test")
... ]
>>> bool(_testGapAfterFinalTable(start + 4, test))
False
>>> test = [
... dict(offset=start, length=1, tag="test")
... ]
>>> bool(_testGapAfterFinalTable(start + 5, test))
True
>>> test = [
... dict(offset=start, length=1, tag="test")
... ]
>>> bool(_testGapAfterFinalTable(start + 8, test))
True
"""
errors = []
sorter = []
for entry in tableDirectory:
sorter.append((entry["offset"], entry))
entry = sorted(sorter)[-1]
offset = entry[-1]["offset"]
length = entry[-1]["length"]
length = calc4BytePaddedLength(length)
lastPosition = offset + length
if dataLength - lastPosition > 0:
errors.append("Improper padding at the end of the file.")
return errors
def _testCheckSums(tableDirectory):
"""
>>> data = "0" * 44
>>> checkSum = calcTableChecksum("test", data)
>>> test = [
... dict(data=data, checkSum=checkSum, tag="test")
... ]
>>> bool(_testCheckSums(test))
False
>>> test = [
... dict(data=data, checkSum=checkSum+1, tag="test")
... ]
>>> bool(_testCheckSums(test))
True
"""
errors = []
for entry in tableDirectory:
tag = entry["tag"]
checkSum = entry["checkSum"]
data = entry["data"]
shouldBe = calcTableChecksum(tag, data)
if checkSum != shouldBe:
errors.append("Invalid checksum for the %s table." % tag)
return errors
def _testHeadCheckSum(header, tableDirectory):
"""
>>> header = dict(sfntVersion="OTTO")
>>> tableDirectory = [
... dict(tag="head", offset=100, length=100, checkSum=123, data="00000000"+struct.pack(">L", 925903070)),
... dict(tag="aaab", offset=200, length=100, checkSum=456),
... dict(tag="aaac", offset=300, length=100, checkSum=789),
... ]
>>> bool(_testHeadCheckSum(header, tableDirectory))
"""
flavor = header["sfntVersion"]
tables = {}
for entry in tableDirectory:
tables[entry["tag"]] = entry
data = tables["head"]["data"][8:12]
checkSumAdjustment = struct.unpack(">L", data)[0]
shouldBe = calcHeadCheckSumAdjustment(flavor, tables)
if checkSumAdjustment != shouldBe:
return ["The head checkSumAdjustment value is incorrect."]
return []
def _testPaddingValue(tableDirectory, data):
"""
# before first table
>>> testDirectory = [dict(tag="aaaa", offset=28, length=4)]
>>> bool(_testPaddingValue(testDirectory, "\x01" * 32))
False
>>> testDirectory = [dict(tag="aaaa", offset=32, length=4)]
>>> bool(_testPaddingValue(testDirectory, "\x01" * 36))
True
# between tables
>>> testDirectory = [dict(tag="aaaa", offset=44, length=4), dict(tag="bbbb", offset=48, length=4)]
>>> bool(_testPaddingValue(testDirectory, "\x01" * 52))
False
>>> testDirectory = [dict(tag="aaaa", offset=44, length=4), dict(tag="bbbb", offset=52, length=4)]
>>> bool(_testPaddingValue(testDirectory, "\x01" * 56))
True
# after final table
>>> testDirectory = [dict(tag="aaaa", offset=28, length=4)]
>>> bool(_testPaddingValue(testDirectory, "\x01" * 32))
False
>>> testDirectory = [dict(tag="aaaa", offset=28, length=4)]
>>> bool(_testPaddingValue(testDirectory, "\x01" * 36))
True
"""
errors = []
# check between directory and first table
# check between all tables
entries = [(entry["offset"], entry) for entry in tableDirectory]
prev = "table directory"
prevEnd = sfntDirectorySize + (sfntDirectoryEntrySize * len(tableDirectory))
for o, entry in sorted(entries):
tag = entry["tag"]
offset = entry["offset"]
length = entry["length"]
# slice the bytes between the previous and the current
if offset > prevEnd:
bytes = data[prevEnd:offset]
# replace \0 with nothing
bytes = bytes.replace("\0", "")
if bytes:
errors.append("Bytes between %s and %s are not null." % (prev, tag))
# shift for teh next table
prev = tag
prevEnd = offset + length
# check last table
entry = sorted(entries)[-1][1]
end = entry["offset"] + entry["length"]
bytes = data[end:]
bytes = bytes.replace("\0", "")
if bytes:
errors.append("Bytes after final table (%s) are not null." % entry["tag"])
return errors
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=False)
| |
import threading
from peewee import _atomic
from peewee import SqliteDatabase
from peewee import transaction
from playhouse.tests.base import database_class
from playhouse.tests.base import mock
from playhouse.tests.base import ModelTestCase
from playhouse.tests.base import skip_if
from playhouse.tests.base import test_db
from playhouse.tests.models import *
class TestTransaction(ModelTestCase):
requires = [User, Blog]
def tearDown(self):
super(TestTransaction, self).tearDown()
test_db.set_autocommit(True)
def test_transaction_connection_handling(self):
patch = 'peewee.Database'
db = SqliteDatabase(':memory:')
with mock.patch(patch, wraps=db) as patched_db:
with transaction(patched_db):
patched_db.begin.assert_called_once_with()
self.assertEqual(patched_db.commit.call_count, 0)
self.assertEqual(patched_db.rollback.call_count, 0)
patched_db.begin.assert_called_once_with()
patched_db.commit.assert_called_once_with()
self.assertEqual(patched_db.rollback.call_count, 0)
with mock.patch(patch, wraps=db) as patched_db:
def _test_patched():
patched_db.commit.side_effect = ValueError
with transaction(patched_db):
pass
self.assertRaises(ValueError, _test_patched)
patched_db.begin.assert_called_once_with()
patched_db.commit.assert_called_once_with()
patched_db.rollback.assert_called_once_with()
def test_atomic_nesting(self):
db = SqliteDatabase(':memory:')
db_patches = mock.patch.multiple(
db,
begin=mock.DEFAULT,
commit=mock.DEFAULT,
execute_sql=mock.DEFAULT,
rollback=mock.DEFAULT)
with mock.patch('peewee.Database', wraps=db) as patched_db:
with db_patches as db_mocks:
begin = db_mocks['begin']
commit = db_mocks['commit']
execute_sql = db_mocks['execute_sql']
rollback = db_mocks['rollback']
with _atomic(patched_db):
patched_db.transaction.assert_called_once_with()
begin.assert_called_once_with()
self.assertEqual(patched_db.savepoint.call_count, 0)
with _atomic(patched_db):
patched_db.transaction.assert_called_once_with()
begin.assert_called_once_with()
patched_db.savepoint.assert_called_once_with()
self.assertEqual(commit.call_count, 0)
self.assertEqual(rollback.call_count, 0)
with _atomic(patched_db):
patched_db.transaction.assert_called_once_with()
begin.assert_called_once_with()
self.assertEqual(
patched_db.savepoint.call_count,
2)
begin.assert_called_once_with()
self.assertEqual(commit.call_count, 0)
self.assertEqual(rollback.call_count, 0)
commit.assert_called_once_with()
self.assertEqual(rollback.call_count, 0)
def test_autocommit(self):
test_db.set_autocommit(False)
test_db.begin()
u1 = User.create(username='u1')
u2 = User.create(username='u2')
# open up a new connection to the database, it won't register any blogs
# as being created
new_db = self.new_connection()
res = new_db.execute_sql('select count(*) from users;')
self.assertEqual(res.fetchone()[0], 0)
# commit our blog inserts
test_db.commit()
# now the blogs are query-able from another connection
res = new_db.execute_sql('select count(*) from users;')
self.assertEqual(res.fetchone()[0], 2)
def test_transactions(self):
def transaction_generator():
with test_db.transaction():
User.create(username='u1')
yield
User.create(username='u2')
gen = transaction_generator()
next(gen)
conn2 = self.new_connection()
res = conn2.execute_sql('select count(*) from users;').fetchone()
self.assertEqual(res[0], 0)
self.assertEqual(User.select().count(), 1)
# Consume the rest of the generator.
for _ in gen:
pass
self.assertEqual(User.select().count(), 2)
res = conn2.execute_sql('select count(*) from users;').fetchone()
self.assertEqual(res[0], 2)
def test_manual_commit_rollback(self):
def assertUsers(expected):
query = User.select(User.username).order_by(User.username)
self.assertEqual(
[username for username, in query.tuples()],
expected)
with test_db.transaction() as txn:
User.create(username='charlie')
txn.commit()
User.create(username='huey')
txn.rollback()
assertUsers(['charlie'])
with test_db.transaction() as txn:
User.create(username='huey')
txn.rollback()
User.create(username='zaizee')
assertUsers(['charlie', 'zaizee'])
def test_transaction_decorator(self):
@test_db.transaction()
def create_user(username):
User.create(username=username)
create_user('charlie')
self.assertEqual(User.select().count(), 1)
def test_commit_on_success(self):
self.assertTrue(test_db.get_autocommit())
@test_db.commit_on_success
def will_fail():
User.create(username='u1')
Blog.create() # no blog, will raise an error
self.assertRaises(IntegrityError, will_fail)
self.assertEqual(User.select().count(), 0)
self.assertEqual(Blog.select().count(), 0)
@test_db.commit_on_success
def will_succeed():
u = User.create(username='u1')
Blog.create(title='b1', user=u)
will_succeed()
self.assertEqual(User.select().count(), 1)
self.assertEqual(Blog.select().count(), 1)
def test_context_mgr(self):
def do_will_fail():
with test_db.transaction():
User.create(username='u1')
Blog.create() # no blog, will raise an error
self.assertRaises(IntegrityError, do_will_fail)
self.assertEqual(Blog.select().count(), 0)
def do_will_succeed():
with transaction(test_db):
u = User.create(username='u1')
Blog.create(title='b1', user=u)
do_will_succeed()
self.assertEqual(User.select().count(), 1)
self.assertEqual(Blog.select().count(), 1)
def do_manual_rollback():
with test_db.transaction() as txn:
User.create(username='u2')
txn.rollback()
do_manual_rollback()
self.assertEqual(User.select().count(), 1)
self.assertEqual(Blog.select().count(), 1)
def test_nesting_transactions(self):
@test_db.commit_on_success
def outer(should_fail=False):
self.assertEqual(test_db.transaction_depth(), 1)
User.create(username='outer')
inner(should_fail)
self.assertEqual(test_db.transaction_depth(), 1)
@test_db.commit_on_success
def inner(should_fail):
self.assertEqual(test_db.transaction_depth(), 2)
User.create(username='inner')
if should_fail:
raise ValueError('failing')
self.assertRaises(ValueError, outer, should_fail=True)
self.assertEqual(User.select().count(), 0)
self.assertEqual(test_db.transaction_depth(), 0)
outer(should_fail=False)
self.assertEqual(User.select().count(), 2)
self.assertEqual(test_db.transaction_depth(), 0)
class TestExecutionContext(ModelTestCase):
requires = [User]
def test_context_simple(self):
with test_db.execution_context():
User.create(username='charlie')
self.assertEqual(test_db.execution_context_depth(), 1)
self.assertEqual(test_db.execution_context_depth(), 0)
with test_db.execution_context():
self.assertTrue(
User.select().where(User.username == 'charlie').exists())
self.assertEqual(test_db.execution_context_depth(), 1)
self.assertEqual(test_db.execution_context_depth(), 0)
queries = self.queries()
def test_context_ext(self):
with test_db.execution_context():
with test_db.execution_context() as inner_ctx:
with test_db.execution_context():
User.create(username='huey')
self.assertEqual(test_db.execution_context_depth(), 3)
conn = test_db.get_conn()
self.assertEqual(conn, inner_ctx.connection)
self.assertTrue(
User.select().where(User.username == 'huey').exists())
self.assertEqual(test_db.execution_context_depth(), 0)
def test_context_multithreaded(self):
conn = test_db.get_conn()
evt = threading.Event()
evt2 = threading.Event()
def create():
with test_db.execution_context() as ctx:
database = ctx.database
self.assertEqual(database.execution_context_depth(), 1)
evt2.set()
evt.wait()
self.assertNotEqual(conn, ctx.connection)
User.create(username='huey')
create_t = threading.Thread(target=create)
create_t.daemon = True
create_t.start()
evt2.wait()
self.assertEqual(test_db.execution_context_depth(), 0)
evt.set()
create_t.join()
self.assertEqual(test_db.execution_context_depth(), 0)
self.assertEqual(User.select().count(), 1)
def test_context_concurrency(self):
def create(i):
with test_db.execution_context():
with test_db.execution_context() as ctx:
User.create(username='u%s' % i)
self.assertEqual(ctx.database.execution_context_depth(), 2)
threads = [threading.Thread(target=create, args=(i,))
for i in range(5)]
for thread in threads:
thread.start()
[thread.join() for thread in threads]
self.assertEqual(
[user.username for user in User.select().order_by(User.username)],
['u0', 'u1', 'u2', 'u3', 'u4'])
class TestAutoRollback(ModelTestCase):
requires = [User, Blog]
def setUp(self):
test_db.autorollback = True
super(TestAutoRollback, self).setUp()
def tearDown(self):
test_db.autorollback = False
test_db.set_autocommit(True)
super(TestAutoRollback, self).tearDown()
def test_auto_rollback(self):
# Exceptions are still raised.
self.assertRaises(IntegrityError, Blog.create)
# The transaction should have been automatically rolled-back, allowing
# us to create new objects (in a new transaction).
u = User.create(username='u')
self.assertTrue(u.id)
# No-op, the previous INSERT was already committed.
test_db.rollback()
# Ensure we can get our user back.
u_db = User.get(User.username == 'u')
self.assertEqual(u.id, u_db.id)
def test_transaction_ctx_mgr(self):
'Only auto-rollback when autocommit is enabled.'
def create_error():
self.assertRaises(IntegrityError, Blog.create)
# autocommit is disabled in a transaction ctx manager.
with test_db.transaction():
# Error occurs, but exception is caught, leaving the current txn
# in a bad state.
create_error()
try:
create_error()
except Exception as exc:
# Subsequent call will raise an InternalError with postgres.
self.assertTrue(isinstance(exc, InternalError))
else:
self.assertFalse(
issubclass(database_class, PostgresqlDatabase))
# New transactions are not affected.
self.test_auto_rollback()
def test_manual(self):
test_db.set_autocommit(False)
# Will not be rolled back.
self.assertRaises(IntegrityError, Blog.create)
if issubclass(database_class, PostgresqlDatabase):
self.assertRaises(InternalError, User.create, username='u')
test_db.rollback()
u = User.create(username='u')
test_db.commit()
u_db = User.get(User.username == 'u')
self.assertEqual(u.id, u_db.id)
class TestSavepoints(ModelTestCase):
requires = [User]
def _outer(self, fail_outer=False, fail_inner=False):
with test_db.savepoint():
User.create(username='outer')
try:
self._inner(fail_inner)
except ValueError:
pass
if fail_outer:
raise ValueError
def _inner(self, fail_inner):
with test_db.savepoint():
User.create(username='inner')
if fail_inner:
raise ValueError('failing')
def assertNames(self, expected):
query = User.select().order_by(User.username)
self.assertEqual([u.username for u in query], expected)
def test_success(self):
with test_db.transaction():
self._outer()
self.assertEqual(User.select().count(), 2)
self.assertNames(['inner', 'outer'])
def test_inner_failure(self):
with test_db.transaction():
self._outer(fail_inner=True)
self.assertEqual(User.select().count(), 1)
self.assertNames(['outer'])
def test_outer_failure(self):
# Because the outer savepoint is rolled back, we'll lose the
# inner savepoint as well.
with test_db.transaction():
self.assertRaises(ValueError, self._outer, fail_outer=True)
self.assertEqual(User.select().count(), 0)
def test_failure(self):
with test_db.transaction():
self.assertRaises(
ValueError, self._outer, fail_outer=True, fail_inner=True)
self.assertEqual(User.select().count(), 0)
class TestAtomic(ModelTestCase):
requires = [User, UniqueModel]
def test_atomic(self):
with test_db.atomic():
User.create(username='u1')
with test_db.atomic():
User.create(username='u2')
with test_db.atomic() as txn3:
User.create(username='u3')
txn3.rollback()
with test_db.atomic():
User.create(username='u4')
with test_db.atomic() as txn5:
User.create(username='u5')
txn5.rollback()
User.create(username='u6')
query = User.select().order_by(User.username)
self.assertEqual(
[u.username for u in query],
['u1', 'u2', 'u4', 'u6'])
def test_atomic_second_connection(self):
def test_separate_conn(expected):
new_db = self.new_connection()
cursor = new_db.execute_sql('select username from users;')
usernames = sorted(row[0] for row in cursor.fetchall())
self.assertEqual(usernames, expected)
new_db.close()
with test_db.atomic():
User.create(username='u1')
test_separate_conn([])
with test_db.atomic():
User.create(username='u2')
with test_db.atomic() as tx3:
User.create(username='u3')
tx3.rollback()
test_separate_conn([])
users = User.select(User.username).order_by(User.username)
self.assertEqual(
[user.username for user in users],
['u1', 'u2'])
users = User.select(User.username).order_by(User.username)
self.assertEqual(
[user.username for user in users],
['u1', 'u2'])
def test_atomic_decorator(self):
@test_db.atomic()
def create_user(username):
User.create(username=username)
create_user('charlie')
self.assertEqual(User.select().count(), 1)
def test_atomic_decorator_nesting(self):
@test_db.atomic()
def create_unique(name):
UniqueModel.create(name=name)
@test_db.atomic()
def create_both(username):
User.create(username=username)
try:
create_unique(username)
except IntegrityError:
pass
create_unique('huey')
self.assertEqual(UniqueModel.select().count(), 1)
create_both('charlie')
self.assertEqual(User.select().count(), 1)
self.assertEqual(UniqueModel.select().count(), 2)
create_both('huey')
self.assertEqual(User.select().count(), 2)
self.assertEqual(UniqueModel.select().count(), 2)
def test_atomic_rollback(self):
with test_db.atomic():
UniqueModel.create(name='charlie')
try:
with test_db.atomic():
UniqueModel.create(name='charlie')
except IntegrityError:
pass
else:
assert False
with test_db.atomic():
UniqueModel.create(name='zaizee')
try:
with test_db.atomic():
UniqueModel.create(name='zaizee')
except IntegrityError:
pass
else:
assert False
UniqueModel.create(name='mickey')
UniqueModel.create(name='huey')
names = [um.name for um in
UniqueModel.select().order_by(UniqueModel.name)]
self.assertEqual(names, ['charlie', 'huey', 'mickey', 'zaizee'])
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
import numpy as np
from tvm import relay
from tvm.relay import transform
from tvm.relay.testing import run_infer_type
from tvm.contrib import graph_runtime
from tvm.relay.testing.temp_op_attr import TempOpAttr
# We use llvm target for testing functionality. `llvm` points to an older Intel
# generation machine, that legalizes to a simple lowering. Therefore, the
# legalization is overwritten such that it can be skipped and we use the
# QNNCanonicalizeOps lowering for the testing.
def legalize_qnn_conv2d(attrs, inputs, types):
return None
def get_ref_func(
data,
kernel,
input_zero_point,
kernel_zero_point,
input_scale,
kernel_scale,
kernel_size,
padding,
strides,
dilation,
data_layout,
kernel_layout,
out_dtype,
groups,
channels=None,
):
casted_data = relay.op.cast(data, "int32")
casted_kernel = relay.op.cast(kernel, "int32")
shifted_data = relay.op.subtract(casted_data, relay.const(input_zero_point, "int32"))
shifted_kernel = relay.op.subtract(casted_kernel, relay.const(kernel_zero_point, "int32"))
func = relay.op.nn.conv2d(
shifted_data,
shifted_kernel,
padding=padding,
strides=strides,
dilation=dilation,
groups=groups,
channels=channels,
kernel_size=kernel_size,
out_dtype=out_dtype,
data_layout=data_layout,
kernel_layout=kernel_layout,
)
func = relay.Function(relay.analysis.free_vars(func), func)
return func
def get_qnn_func(
data,
kernel,
input_zero_point,
kernel_zero_point,
input_scale,
kernel_scale,
kernel_size,
padding,
strides,
dilation,
data_layout,
kernel_layout,
out_dtype,
channels,
groups,
):
func = relay.qnn.op.conv2d(
data,
kernel,
input_zero_point=relay.const(input_zero_point, "int32"),
kernel_zero_point=relay.const(kernel_zero_point, "int32"),
input_scale=relay.const(input_scale, "float32"),
kernel_scale=relay.const(kernel_scale, "float32"),
kernel_size=kernel_size,
strides=strides,
dilation=dilation,
padding=padding,
out_dtype=out_dtype,
groups=groups,
channels=channels,
data_layout=data_layout,
kernel_layout=kernel_layout,
)
mod = relay.Function(relay.analysis.free_vars(func), func)
mod = tvm.IRModule.from_expr(mod)
return mod
def get_funcs(
data_shape,
data_dtype,
kernel_shape,
kernel_dtype,
input_zero_point,
kernel_zero_point,
input_scale,
kernel_scale,
kernel_size,
padding,
strides,
dilation,
data_layout,
kernel_layout,
out_dtype,
groups=1,
channels=None,
):
data = relay.var("data", shape=data_shape, dtype=data_dtype)
kernel = relay.var("kernel", shape=kernel_shape, dtype=kernel_dtype)
ref_func = get_ref_func(
data,
kernel,
input_zero_point,
kernel_zero_point,
input_scale,
kernel_scale,
kernel_size,
padding,
strides,
dilation,
data_layout,
kernel_layout,
out_dtype,
groups,
channels,
)
ref_func = run_infer_type(ref_func)
ref_func = tvm.IRModule.from_expr(ref_func)
qnn_func = get_qnn_func(
data,
kernel,
input_zero_point,
kernel_zero_point,
input_scale,
kernel_scale,
kernel_size,
padding,
strides,
dilation,
data_layout,
kernel_layout,
out_dtype,
channels,
groups,
)
return (ref_func, qnn_func)
def verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype):
def get_inputs(data_shape, data_dtype, kernel_shape, kernel_dtype):
# Keeping inputs multiple of 4 because of a bug in Average Pool2d
# https://discuss.tvm.ai/t/pool2d-gives-bad-output-for-integer-inputs/3377
low = -128
high = 127
if data_dtype == "uint8":
low = 0
high = 255
golden_data = np.random.randint(low=low, high=high, size=data_shape).astype(data_dtype)
low = -128
high = 127
if kernel_dtype == "uint8":
low = 0
high = 255
golden_weight = np.random.randint(low=low, high=high, size=kernel_shape).astype(
kernel_dtype
)
return (golden_data, golden_weight)
def get_output(func, golden_inputs):
with tvm.transform.PassContext(opt_level=2):
golden_data, golden_weight = golden_inputs
params = {"kernel": golden_weight}
graph, lib, params = relay.build(func, "llvm", params=params)
mod = graph_runtime.create(graph, lib, ctx=tvm.cpu(0))
mod.set_input("data", golden_data)
mod.set_input(**params)
mod.run()
res = mod.get_output(0).asnumpy()
return res
golden_inputs = get_inputs(data_shape, data_dtype, kernel_shape, kernel_dtype)
golden_output = get_output(ref_func, golden_inputs)
qnn_output = get_output(qnn_func, golden_inputs)
np.testing.assert_equal(qnn_output, golden_output)
def test_no_zero_point():
with TempOpAttr("qnn.conv2d", "FTVMQnnLegalize", legalize_qnn_conv2d):
# uint8 input
data_shape = (2, 1, 2, 4)
data_dtype = "uint8"
kernel_shape = (3, 1, 2, 2)
kernel_dtype = "uint8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=0,
kernel_zero_point=0,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
out_dtype="int32",
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
# int8 input
data_shape = (2, 1, 2, 4)
data_dtype = "int8"
kernel_shape = (3, 1, 2, 2)
kernel_dtype = "int8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=0,
kernel_zero_point=0,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
out_dtype="int32",
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
def test_kernel_zero_point():
with TempOpAttr("qnn.conv2d", "FTVMQnnLegalize", legalize_qnn_conv2d):
# uint8 input
data_shape = (2, 4, 2, 4)
data_dtype = "uint8"
kernel_shape = (3, 4, 2, 2)
kernel_dtype = "uint8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=0,
kernel_zero_point=1,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
out_dtype="int32",
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
# int8 input
data_shape = (2, 1, 2, 4)
data_dtype = "int8"
kernel_shape = (3, 1, 2, 2)
kernel_dtype = "int8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=0,
kernel_zero_point=5,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
out_dtype="int32",
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
def test_input_zero_point():
with TempOpAttr("qnn.conv2d", "FTVMQnnLegalize", legalize_qnn_conv2d):
# uint8 input
data_shape = (2, 4, 2, 4)
data_dtype = "uint8"
kernel_shape = (3, 4, 2, 2)
kernel_dtype = "uint8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=5,
kernel_zero_point=0,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
out_dtype="int32",
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
# int8 input
data_shape = (2, 4, 2, 4)
data_dtype = "int8"
kernel_shape = (3, 4, 2, 2)
kernel_dtype = "int8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=5,
kernel_zero_point=0,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
out_dtype="int32",
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
def test_both_zero_point():
with TempOpAttr("qnn.conv2d", "FTVMQnnLegalize", legalize_qnn_conv2d):
# uint8 input
data_shape = (2, 4, 2, 4)
data_dtype = "uint8"
kernel_shape = (3, 4, 2, 2)
kernel_dtype = "uint8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=5,
kernel_zero_point=3,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
out_dtype="int32",
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
# int8 input
data_shape = (2, 4, 2, 4)
data_dtype = "int8"
kernel_shape = (3, 4, 2, 2)
kernel_dtype = "int8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=5,
kernel_zero_point=3,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
out_dtype="int32",
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
def test_layout():
with TempOpAttr("qnn.conv2d", "FTVMQnnLegalize", legalize_qnn_conv2d):
# uint8 input
data_shape = (2, 2, 4, 4) # NHWC
data_dtype = "uint8"
kernel_shape = (2, 2, 4, 3) # HWIO
kernel_dtype = "uint8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=5,
kernel_zero_point=3,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
out_dtype="int32",
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
# NHWC and HWOI layout. Used in depthwise conv.
data_shape = (2, 2, 4, 3) # NHWC
data_dtype = "uint8"
kernel_shape = (2, 2, 3, 1) # HWOI
kernel_dtype = "uint8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=5,
kernel_zero_point=3,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
groups=3,
data_layout="NHWC",
kernel_layout="HWOI",
out_dtype="int32",
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
def test_padding():
with TempOpAttr("qnn.conv2d", "FTVMQnnLegalize", legalize_qnn_conv2d):
# uint8 input
data_shape = (1, 4, 2, 2)
data_dtype = "uint8"
kernel_shape = (3, 4, 2, 2)
kernel_dtype = "uint8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=8,
kernel_zero_point=5,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(1, 1),
strides=(1, 1),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
out_dtype="int32",
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
# Try different layout
data_shape = (2, 2, 4, 4) # NHWC
data_dtype = "uint8"
kernel_shape = (2, 2, 4, 3) # HWIO
kernel_dtype = "uint8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=8,
kernel_zero_point=3,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(1, 1),
strides=(1, 1),
dilation=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
out_dtype="int32",
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
# Try asymmetric padding
data_shape = (2, 2, 4, 4) # NHWC
data_dtype = "uint8"
kernel_shape = (2, 2, 4, 3) # HWIO
kernel_dtype = "uint8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=8,
kernel_zero_point=3,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(1, 1, 2, 2),
strides=(1, 1),
dilation=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
out_dtype="int32",
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
def test_dilation():
with TempOpAttr("qnn.conv2d", "FTVMQnnLegalize", legalize_qnn_conv2d):
# Non-zero kernel point - fall back to simpler lowering.
data_shape = (2, 4, 4, 4)
data_dtype = "uint8"
kernel_shape = (3, 4, 2, 2)
kernel_dtype = "uint8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=5,
kernel_zero_point=3,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(0, 0),
strides=(1, 1),
dilation=(2, 2),
data_layout="NCHW",
kernel_layout="OIHW",
out_dtype="int32",
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
# Zero kernel point
data_shape = (2, 4, 4, 4)
data_dtype = "uint8"
kernel_shape = (3, 4, 2, 2)
kernel_dtype = "uint8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=0,
kernel_zero_point=0,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(0, 0),
strides=(1, 1),
dilation=(2, 2),
data_layout="NCHW",
kernel_layout="OIHW",
out_dtype="int32",
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
def test_const_folding():
with TempOpAttr("qnn.conv2d", "FTVMQnnLegalize", legalize_qnn_conv2d):
data_shape = (2, 4, 2, 4)
data_dtype = "uint8"
kernel_shape = (3, 4, 2, 2)
kernel_dtype = "uint8"
golden_weight = np.random.randint(low=0, high=255, size=kernel_shape).astype(kernel_dtype)
data = relay.var("data", shape=data_shape, dtype=data_dtype)
kernel = relay.const(golden_weight)
qnn_func = get_qnn_func(
data,
kernel,
input_zero_point=8,
kernel_zero_point=3,
kernel_size=(2, 2),
input_scale=1.0,
kernel_scale=1.0,
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
out_dtype="int32",
channels=kernel_shape[0],
groups=1,
)
folded_mod = transform.FoldConstant()(qnn_func)
folded_func = folded_mod["main"]
assert "reshape" not in folded_func.astext()
def test_kernel_size_1x1():
with TempOpAttr("qnn.conv2d", "FTVMQnnLegalize", legalize_qnn_conv2d):
# uint8 input
data_shape = (2, 4, 2, 4)
data_dtype = "uint8"
kernel_shape = (3, 4, 1, 1)
kernel_dtype = "uint8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=5,
kernel_zero_point=3,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(1, 1),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
out_dtype="int32",
)
assert "avg_pool2d" not in qnn_func.astext()
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
def test_kernel_size_1x1_strides_2():
with TempOpAttr("qnn.conv2d", "FTVMQnnLegalize", legalize_qnn_conv2d):
# uint8 input
data_shape = (2, 4, 2, 4)
data_dtype = "uint8"
kernel_shape = (3, 4, 1, 1)
kernel_dtype = "uint8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=5,
kernel_zero_point=3,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(1, 1),
padding=(0, 0),
strides=(2, 2),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
out_dtype="int32",
)
assert "avg_pool2d" not in qnn_func.astext()
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
def test_tflite_large_irregular():
with TempOpAttr("qnn.conv2d", "FTVMQnnLegalize", legalize_qnn_conv2d):
# uint8 input
data_shape = (1, 1024, 1, 1)
data_dtype = "uint8"
kernel_shape = (1001, 1024, 1, 1)
kernel_dtype = "uint8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=127,
kernel_zero_point=127,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(1, 1),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
out_dtype="int32",
)
golden_data = np.full(data_shape, 127).astype("uint8")
golden_weight = np.full(kernel_shape, 127).astype("uint8")
with tvm.transform.PassContext(opt_level=2):
params = {"kernel": golden_weight}
graph, lib, params = relay.build(qnn_func, "llvm", params=params)
mod = graph_runtime.create(graph, lib, ctx=tvm.cpu(0))
mod.set_input("data", golden_data)
mod.set_input(**params)
mod.run()
qnn_output = mod.get_output(0).asnumpy()
golden_output = np.full((1, 1001, 1, 1), 0).astype("uint8")
np.testing.assert_equal(qnn_output, golden_output)
def test_tflite_output_multiplier_greater_than_one():
with TempOpAttr("qnn.conv2d", "FTVMQnnLegalize", legalize_qnn_conv2d):
# uint8 input
data_shape = (2, 1, 2, 4)
data_dtype = "uint8"
kernel_shape = (3, 1, 2, 2)
kernel_dtype = "uint8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_scale=1.0,
kernel_scale=1.0,
input_zero_point=128,
kernel_zero_point=128,
kernel_size=(2, 2),
padding=(0, 0),
strides=(2, 2),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
out_dtype="int32",
)
golden_data = 128 + np.array((1, 1, 1, 1, 2, 2, 2, 2, 1, 2, 3, 4, 1, 2, 3, 4)).reshape(
data_shape
).astype("uint8")
golden_weight = 128 + np.array((1, 2, 3, 4, -1, 1, -1, 1, -1, -1, 1, 1)).reshape(
kernel_shape
)
golden_weight = golden_weight.astype("uint8")
with tvm.transform.PassContext(opt_level=2):
params = {"kernel": golden_weight}
graph, lib, params = relay.build(qnn_func, "llvm", params=params)
mod = graph_runtime.create(graph, lib, ctx=tvm.cpu(0))
mod.set_input("data", golden_data)
mod.set_input(**params)
mod.run()
qnn_output = mod.get_output(0).asnumpy()
golden_output = np.array((17, 17, 0, 0, 2, 2, 16, 36, 2, 2, 0, 0)).reshape(2, 3, 1, 2)
np.testing.assert_equal(qnn_output, golden_output)
def test_tflite_anistropic_strides():
with TempOpAttr("qnn.conv2d", "FTVMQnnLegalize", legalize_qnn_conv2d):
# uint8 input
data_shape = (1, 1, 3, 6)
data_dtype = "uint8"
kernel_shape = (1, 1, 2, 2)
kernel_dtype = "uint8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=127,
kernel_zero_point=127,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(0, 0),
strides=(1, 3),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
out_dtype="int32",
)
golden_data = np.array(
(
133,
131,
129,
125,
123,
121,
135,
133,
131,
123,
121,
119,
137,
135,
133,
121,
119,
117,
)
).reshape(data_shape)
golden_data = golden_data.astype("uint8")
golden_weight = np.array((129, 131, 133, 135)).reshape(kernel_shape)
golden_weight = golden_weight.astype("uint8")
with tvm.transform.PassContext(opt_level=2):
params = {"kernel": golden_weight}
graph, lib, params = relay.build(qnn_func, "llvm", params=params)
mod = graph_runtime.create(graph, lib, ctx=tvm.cpu(0))
mod.set_input("data", golden_data)
mod.set_input(**params)
mod.run()
qnn_output = mod.get_output(0).asnumpy()
golden_output = np.array((124, -92, 164, -132)).reshape(1, 1, 2, 2)
np.testing.assert_equal(qnn_output, golden_output)
def test_broadcast_layout():
with TempOpAttr("qnn.conv2d", "FTVMQnnLegalize", legalize_qnn_conv2d):
# Test broadcast support for NHWC layout.
data_shape = (1, 229, 229, 3) # NHWC
data_dtype = "uint8"
kernel_shape = (7, 7, 3, 64) # HWIO
kernel_dtype = "int8"
_, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=8,
kernel_zero_point=3,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(7, 7),
padding=(1, 1),
strides=(1, 1),
dilation=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
out_dtype="int32",
)
func = qnn_func["main"].body
bias = relay.var("bias", shape=(64,), dtype="int32")
bias2 = relay.var("bias2", shape=(1, 225, 225, 1), dtype="int32")
# Check broadcast support on both lhs and rhs
func = relay.add(func, bias2)
func = relay.add(bias2, func)
func = relay.add(bias, func)
func = relay.add(func, bias)
func = relay.Function(relay.analysis.free_vars(func), func)
mod = tvm.IRModule.from_expr(func)
with tvm.transform.PassContext(opt_level=3):
graph, lib, params = relay.build(mod, "llvm -mcpu=skylake-avx512")
def test_depthwise_depth_multiplier():
with TempOpAttr("qnn.conv2d", "FTVMQnnLegalize", legalize_qnn_conv2d):
# uint8 input, NCHW and OIHW
# Depthwise multiplier = 1
data_shape = (2, 4, 16, 16)
data_dtype = "uint8"
kernel_shape = (4, 1, 3, 3)
kernel_dtype = "uint8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=5,
kernel_zero_point=3,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(3, 3),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
out_dtype="int32",
groups=4,
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
# Depthwise multiplier = 2
data_shape = (10, 4, 16, 16)
data_dtype = "uint8"
kernel_shape = (4, 2, 3, 3)
kernel_dtype = "uint8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=5,
kernel_zero_point=3,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(3, 3),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
out_dtype="int32",
groups=4,
channels=8,
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
# uint8 input, NHWC and HWOI
# Depthwise multiplier = 1
data_shape = (2, 16, 16, 4)
data_dtype = "uint8"
kernel_shape = (3, 3, 4, 1)
kernel_dtype = "uint8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=5,
kernel_zero_point=3,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(3, 3),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NHWC",
kernel_layout="HWOI",
out_dtype="int32",
groups=4,
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
# Depthwise multiplier = 2
data_shape = (2, 16, 16, 4)
data_dtype = "uint8"
kernel_shape = (3, 3, 4, 2)
kernel_dtype = "uint8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=5,
kernel_zero_point=3,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(3, 3),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NHWC",
kernel_layout="HWOI",
out_dtype="int32",
groups=4,
channels=8,
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
def test_per_channel_kernel_scale():
with TempOpAttr("qnn.conv2d", "FTVMQnnLegalize", legalize_qnn_conv2d):
data_shape = (2, 1, 2, 4)
data_dtype = "uint8"
kernel_shape = (3, 1, 2, 2)
kernel_dtype = "uint8"
data = relay.var("data", shape=data_shape, dtype=data_dtype)
kernel = relay.var("kernel", shape=kernel_shape, dtype=kernel_dtype)
kernel_scales = [2, 2, 2]
kernel_scales = relay.const(np.array(kernel_scales).astype("float32"))
func = relay.qnn.op.conv2d(
data,
kernel,
input_zero_point=relay.const(0, "int32"),
kernel_zero_point=relay.const(0, "int32"),
input_scale=relay.const(2.0, "float32"),
kernel_scale=kernel_scales,
kernel_size=(2, 2),
channels=kernel_shape[0],
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
out_dtype="int32",
)
mod = relay.Function(relay.analysis.free_vars(func), func)
mod = tvm.IRModule.from_expr(mod)
if __name__ == "__main__":
test_no_zero_point()
test_input_zero_point()
test_kernel_zero_point()
test_both_zero_point()
test_layout()
test_padding()
test_dilation()
test_const_folding()
test_kernel_size_1x1()
test_kernel_size_1x1_strides_2()
test_tflite_large_irregular()
test_broadcast_layout()
test_tflite_output_multiplier_greater_than_one()
test_tflite_anistropic_strides()
test_depthwise_depth_multiplier()
test_per_channel_kernel_scale()
| |
import collections.abc
import claripy
class SimVariable:
__slots__ = ['ident', 'name', 'region', 'category']
def __init__(self, ident=None, name=None, region=None, category=None):
"""
:param ident: A unique identifier provided by user or the program. Usually a string.
:param str name: Name of this variable.
"""
self.ident = ident
self.name = name
self.region = region if region is not None else ""
self.category = category
#
# Operations
#
def __add__(self, other):
if isinstance(other, int) and other == 0:
return self
return None
def __sub__(self, other):
if isinstance(other, int) and other == 0:
return self
return None
class SimConstantVariable(SimVariable):
__slots__ = ['value', '_hash']
def __init__(self, ident=None, value=None, region=None):
super(SimConstantVariable, self).__init__(ident=ident, region=region)
self.value = value
self._hash = None
def __repr__(self):
s = "<%s|const %s>" % (self.region, self.value)
return s
def __eq__(self, other):
if not isinstance(other, SimConstantVariable):
return False
if self.value is None or other.value is None:
# they may or may not represent the same constant. return not equal to be safe
return False
return self.ident == other.ident and self.value == other.value and self.region == other.region
def __hash__(self):
if self._hash is None:
self._hash = hash(('const', self.value, self.ident, self.region, self.ident))
return self._hash
class SimTemporaryVariable(SimVariable):
__slots__ = ['tmp_id', '_hash']
def __init__(self, tmp_id):
SimVariable.__init__(self)
self.tmp_id = tmp_id
self._hash = None
def __repr__(self):
s = "<tmp %d>" % (self.tmp_id)
return s
def __hash__(self):
if self._hash is None:
self._hash = hash('tmp_%d' % (self.tmp_id))
return self._hash
def __eq__(self, other):
if isinstance(other, SimTemporaryVariable):
return hash(self) == hash(other)
return False
class SimRegisterVariable(SimVariable):
__slots__ = ['reg', 'size', '_hash']
def __init__(self, reg_offset, size, ident=None, name=None, region=None, category=None):
SimVariable.__init__(self, ident=ident, name=name, region=region, category=category)
self.reg = reg_offset
self.size = size
self._hash = None
def __repr__(self):
ident_str = "[%s]" % self.ident if self.ident else ""
region_str = hex(self.region) if isinstance(self.region, int) else self.region
s = "<%s%s|Reg %s, %sB>" % (region_str, ident_str, self.reg, self.size)
return s
def __hash__(self):
if self._hash is None:
self._hash = hash(('reg', self.region, self.reg, self.size, self.ident))
return self._hash
def __eq__(self, other):
if isinstance(other, SimRegisterVariable):
return self.ident == other.ident and \
self.reg == other.reg and \
self.size == other.size and \
self.region == other.region
return False
class SimMemoryVariable(SimVariable):
__slots__ = ['addr', 'size', '_hash']
def __init__(self, addr, size, ident=None, name=None, region=None, category=None):
SimVariable.__init__(self, ident=ident, name=name, region=region, category=category)
self.addr = addr
if isinstance(size, claripy.ast.BV) and not size.symbolic:
# Convert it to a concrete number
size = size._model_concrete.value
self.size = size
self._hash = None
def __repr__(self):
if type(self.size) is int:
size = '%d' % self.size
else:
size = '%s' % self.size
if type(self.addr) is int:
s = "<%s|Mem %#x %s>" % (self.region, self.addr, size)
else:
s = "<%s|Mem %s %s>" % (self.region, self.addr, size)
return s
def __hash__(self):
if self._hash is not None:
return self._hash
if isinstance(self.addr, AddressWrapper):
addr_hash = hash(self.addr)
elif type(self.addr) is int:
addr_hash = self.addr
elif self.addr._model_concrete is not self.addr:
addr_hash = hash(self.addr._model_concrete)
elif self.addr._model_vsa is not self.addr:
addr_hash = hash(self.addr._model_vsa)
elif self.addr._model_z3 is not self.addr:
addr_hash = hash(self.addr._model_z3)
else:
addr_hash = hash(self.addr)
self._hash = hash((addr_hash, hash(self.size), self.ident))
return self._hash
def __eq__(self, other):
if isinstance(other, SimMemoryVariable):
return self.ident == other.ident and \
self.addr == other.addr and \
self.size == other.size
return False
@property
def bits(self):
return self.size * 8
class SimStackVariable(SimMemoryVariable):
__slots__ = ['base', 'offset']
def __init__(self, offset, size, base='sp', base_addr=None, ident=None, name=None, region=None, category=None):
if isinstance(offset, int) and offset > 0x1000000:
# I don't think any positive stack offset will be greater than that...
# convert it to a negative number
mask = (1 << offset.bit_length()) - 1
offset = - ((0 - offset) & mask)
if base_addr is not None:
addr = offset + base_addr
else:
# TODO: this is not optimal
addr = offset
super(SimStackVariable, self).__init__(addr, size, ident=ident, name=name, region=region, category=category)
self.base = base
self.offset = offset
def __repr__(self):
if type(self.size) is int:
size = '%d' % self.size
else:
size = '%s' % self.size
prefix = "%s(stack)" % self.name if self.name is not None else "Stack"
ident = "[%s]" % self.ident if self.ident else ""
region_str = hex(self.region) if isinstance(self.region, int) else self.region
if type(self.offset) is int:
if self.offset < 0:
offset = "%#x" % self.offset
elif self.offset > 0:
offset = "+%#x" % self.offset
else:
offset = ""
s = "<%s%s|%s %s%s, %s B>" % (region_str, ident, prefix, self.base, offset, size)
else:
s = "<%s%s|%s %s%s, %s B>" % (region_str, ident, prefix, self.base, self.addr, size)
return s
def __eq__(self, other):
if type(other) is not SimStackVariable:
return False
return self.ident == other.ident and \
self.base == other.base and \
self.offset == other.offset and \
self.size == other.size
def __hash__(self):
return hash((self.ident, self.base, self.offset, self.size))
class SimVariableSet(collections.abc.MutableSet):
"""
A collection of SimVariables.
"""
def __init__(self):
self.register_variables = set()
# For the sake of performance optimization, all elements in register_variables must be concrete integers which
# representing register offsets..
# There shouldn't be any problem apart from GetI/PutI instructions. We simply ignore them for now.
# TODO: Take care of register offsets that are not aligned to (arch.bytes)
# TODO: arch.bits/what? That number has no power here anymore.
self.register_variable_offsets = set()
# memory_variables holds SimMemoryVariable objects
self.memory_variables = set()
# For the sake of performance, we have another set that stores memory addresses of memory_variables
self.memory_variable_addresses = set()
def add(self, item): # pylint:disable=arguments-differ
if type(item) is SimRegisterVariable:
if not self.contains_register_variable(item):
self.add_register_variable(item)
elif type(item) is SimMemoryVariable:
if not self.contains_memory_variable(item):
self.add_memory_variable(item)
else:
# TODO:
raise Exception('WTF')
def add_register_variable(self, reg_var):
self.register_variables.add(reg_var)
self.register_variable_offsets.add(reg_var.reg)
def add_memory_variable(self, mem_var):
self.memory_variables.add(mem_var)
base_address = mem_var.addr.address # Dealing with AddressWrapper
for i in range(mem_var.size):
self.memory_variable_addresses.add(base_address + i)
def discard(self, item): # pylint:disable=arguments-differ
if type(item) is SimRegisterVariable:
if self.contains_register_variable(item):
self.discard_register_variable(item)
elif isinstance(item, SimMemoryVariable):
if self.contains_memory_variable(item):
self.discard_memory_variable(item)
else:
# TODO:
raise Exception('')
def discard_register_variable(self, reg_var):
self.register_variables.remove(reg_var)
self.register_variable_offsets.remove(reg_var.reg)
def discard_memory_variable(self, mem_var):
self.memory_variables.remove(mem_var)
for i in range(mem_var.size):
self.memory_variable_addresses.remove(mem_var.addr.address + i)
def __len__(self):
return len(self.register_variables) + len(self.memory_variables)
def __iter__(self):
for i in self.register_variables: yield i
for i in self.memory_variables: yield i
def add_memory_variables(self, addrs, size):
for a in addrs:
var = SimMemoryVariable(a, size)
self.add_memory_variable(var)
def copy(self):
s = SimVariableSet()
s.register_variables |= self.register_variables
s.register_variable_offsets |= self.register_variable_offsets
s.memory_variables |= self.memory_variables
s.memory_variable_addresses |= self.memory_variable_addresses
return s
def complement(self, other):
"""
Calculate the complement of `self` and `other`.
:param other: Another SimVariableSet instance.
:return: The complement result.
"""
s = SimVariableSet()
s.register_variables = self.register_variables - other.register_variables
s.register_variable_offsets = self.register_variable_offsets - other.register_variable_offsets
s.memory_variables = self.memory_variables - other.memory_variables
s.memory_variable_addresses = self.memory_variable_addresses - other.memory_variable_addresses
return s
def contains_register_variable(self, reg_var):
reg_offset = reg_var.reg
# TODO: Make sure reg_offset is aligned to machine-word length
return reg_offset in self.register_variable_offsets
def contains_memory_variable(self, mem_var):
a = mem_var.addr
if type(a) in (tuple, list): a = a[-1]
return a in self.memory_variable_addresses
def __ior__(self, other):
# other must be a SimVariableSet
self.register_variables |= other.register_variables
self.register_variable_offsets |= other.register_variable_offsets
self.memory_variables |= other.memory_variables
self.memory_variable_addresses |= other.memory_variable_addresses
def __contains__(self, item):
if type(item) is SimRegisterVariable:
return self.contains_register_variable(item)
elif type(item) is SimMemoryVariable:
# TODO: Make it better!
return self.contains_memory_variable(item)
else:
__import__('ipdb').set_trace()
raise Exception("WTF is this variable?")
from .storage.memory import AddressWrapper
| |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import *
from future.utils import iteritems
import os
import re
import sys
import glob
import codecs
from sqlalchemy.sql import select
from collections import defaultdict
from ...db_helpers import reload_annotator_labels
from ...parser import TextDocPreprocessor, CorpusParser
from ...models import Candidate, StableLabel, Document, TemporarySpan, Sentence, candidate_subclass, GoldLabel
class BratProject(object):
"""
Snorkel Import/Export for
Brat Rapid Annotation Tool
http://brat.nlplab.org/
Brat uses standoff annotation format (see: http://brat.nlplab.org/standoff.html)
Annotation ID Types
T: text-bound annotation
R: relation
E: event
A: attribute
M: modification (alias for attribute, for backward compatibility)
N: normalization [new in v1.3]
#: note
Many of of the advanced schema abilities of BRAT are not implemented, so
mind the following caveats:
(1) We do not currently support hierarchical entity definitions, e.g.,
!Anatomical_entity
!Anatomical_structure
Organism_subdivision
Anatomical_system
Organ
(2) All relations must be binary with a single argument type
(3) Attributes, normalization, and notes are added as candidate meta information
"""
TEXT_BOUND_ID = 'T'
RELATION_ID = 'R'
EVENT_ID = 'E'
ATTRIB_ID = 'A'
MOD_ID = 'M'
NORM_ID = 'N'
NOTE_ID = '#'
def __init__(self, session, tmpl_path='annotation.config.tmpl', encoding="utf-8", verbose=True):
"""
Initialize BRAT import tools.
:param session:
:param tmpl_path: annotation config template. don't change this.
:param encoding:
:param verbose:
"""
self.session = session
self.encoding = encoding
self.verbose = verbose
# load brat config template
mod_path = "{}/{}".format(os.path.abspath(os.path.dirname(__file__)), tmpl_path)
self.brat_tmpl = "".join(open(mod_path, "rU").readlines())
# snorkel dynamic types
self.subclasses = {}
def import_project(self, input_dir, annotations_only=True, annotator_name='brat', num_threads=1, parser=None):
"""
Import BART project,
:param input_dir:
:param autoreload:
:param num_threads:
:param parser:
:return:
"""
config_path = "{}/{}".format(input_dir, "annotation.conf")
if not os.path.exists(config_path):
print("Fatal error: missing 'annotation.conf' file", file=sys.stderr)
return
# load brat config (this defines relation and argument types)
config = self._parse_config(config_path)
anno_filelist = set([os.path.basename(fn).strip(".ann") for fn in glob.glob(input_dir + "/*.ann")])
# import standoff annotations for all documents
annotations = {}
for fn in anno_filelist:
txt_fn = "{}/{}.txt".format(input_dir, fn)
ann_fn = "{}/{}.ann".format(input_dir, fn)
if os.path.exists(txt_fn) and os.path.exists(ann_fn):
annotations[fn] = self._parse_annotations(txt_fn, ann_fn)
# by default, we parse and import all project documents
if not annotations_only:
self._parse_documents(input_dir + "/*.txt", num_threads, parser)
# create types
self._create_candidate_subclasses(config)
# create candidates
self._create_candidates(annotations, annotator_name)
def export_project(self, output_dir, positive_only_labels=True):
"""
:param output_dir:
:return:
"""
candidates = self.session.query(Candidate).all()
documents = self.session.query(Document).all()
gold_labels = {label.candidate_id: label for label in self.session.query(GoldLabel).all()}
gold_labels = {uid:label for uid, label in iteritems(gold_labels)
if (positive_only_labels and label.value == 1) or not positive_only_labels}
doc_index = {doc.name:doc for doc in documents}
cand_index = _group_by_document(candidates)
snorkel_types = {type(c): 1 for c in candidates}
for name in doc_index:
doc_anno = self._build_doc_annotations(cand_index[name], gold_labels) if name in cand_index else []
fname = "{}{}".format(output_dir,name)
# write .ann files
with codecs.open(fname + ".ann",'w',self.encoding) as fp:
fp.write("\n".join(doc_anno))
# write documents
with codecs.open(fname + ".txt",'w',self.encoding) as fp:
fp.write(doc_to_text(doc_index[name]))
# export config file
config = self._create_config(snorkel_types)
config_path = "{}annotation.conf".format(output_dir)
with codecs.open(config_path, 'w', self.encoding) as fp:
fp.write(config)
if self.verbose:
print("Export complete")
print("\t {} documents".format(len(doc_index)))
print("\t {} annotations".format( sum([len(cand_index[name]) for name in cand_index] )))
def _get_arg_type(self, c, span, use_titlecase=True):
"""
Given a span object, determine it's internal type
TODO: What is a better way of doing this?
:param c:
:param span:
:param use_titlecase:
:return:
"""
for key in c.__dict__.keys():
if c.__dict__[key] == span:
key = map(lambda x:x[0].upper()+x[1:], re.split("[-_]",key))
return "".join(key)
return None
def _get_normed_rela_name(self, name):
name = re.split("[-_]", name)
if len(name) == 1:
return name[0]
name = map(lambda x: x.lower(), name)
return "".join(map(lambda x: x[0].upper() + x[1:], name))
def _build_doc_annotations(self, cands, gold_labels=[]):
"""
Assume binary relation defs
:param cands:
:return:
"""
entities,relations,types = {},{},{}
for i,c in enumerate(cands):
if c.id not in gold_labels:
continue
for span in c:
if span not in entities:
types[span] = self._get_arg_type(c,span)
entities[span] = ("T",len(entities)+1)
arg1 = "{}{}".format(*entities[c[0]])
arg2 = "{}{}".format(*entities[c[1]])
relations[('R',len(relations)+1)] = "{} Arg1:{} Arg2:{}".format(type(c).__name__, arg1, arg2)
entities = {uid:span for span,uid in iteritems(entities)}
annotations = []
# export entities (relation arguments)
for uid in sorted(entities, key=lambda x:x[-1]):
span = entities[uid]
char_start, char_end = map(int,span.stable_id.split(":")[-2:])
char_end += 1
arg_id = "{}{}".format(*uid)
annotations.append("{}\t{} {} {}\t{}".format(arg_id, types[span], char_start, char_end, span.get_span()))
# export relations
for uid in sorted(relations, key=lambda x:x[-1]):
arg_id = "{}{}".format(*uid)
annotations.append("{}\t{}".format(arg_id, relations[uid]))
# candidate attributes
# TODO
return annotations
def _parse_documents(self, input_path, num_threads, parser):
"""
:param input_path:
:param num_threads:
:param parser:
:return:
"""
doc_preprocessor = TextDocPreprocessor(path=input_path, encoding=self.encoding)
corpus_parser = CorpusParser(parser)
corpus_parser.apply(doc_preprocessor, parallelism=num_threads)
def _parse_annotations(self, txt_filename, ann_filename):
"""
Use parser to import BRAT backoff format
TODO: Currently only supports Entities & Relations
:param txt_filename:
:param ann_filename:
:return:
"""
annotations = {}
# load document
doc = []
with codecs.open(txt_filename, "rU", encoding=self.encoding) as fp:
for line in fp:
doc += [line.strip().split()]
# build doc string and char to word index
doc_str = ""
char_idx = {}
for i, sent in enumerate(doc):
for j in range(0, len(sent)):
char_idx[len(doc_str)] = (i, j)
for ch in sent[j]:
doc_str += ch
char_idx[len(doc_str)] = (i, j)
doc_str += " " if j != len(sent) - 1 else "\n"
doc_str = doc_str.strip()
# load annotations
with codecs.open(ann_filename, "rU", encoding=self.encoding) as fp:
for line in fp:
row = line.strip().split("\t")
anno_id_prefix = row[0][0]
# parse each entity/relation type
if anno_id_prefix == Brat.TEXT_BOUND_ID:
anno_id, entity, text = row
entity_type = entity.split()[0]
spans = map(lambda x: map(int, x.split()),
entity.lstrip(entity_type).split(";"))
# discontinuous mentions
if len(spans) != 1:
print("NotImplementedError: Discontinuous Spans", sys.stderr)
continue
entity = []
for (i, j) in spans:
if i in char_idx:
mention = doc_str[i:j]
tokens = mention.split()
sent_id, word_offset = char_idx[i]
word_mention = doc[sent_id][word_offset:word_offset + len(tokens)]
parts = {"sent_id":sent_id,"char_start":i,"char_end":j, "entity_type":entity_type,
"idx_span":(word_offset, word_offset + len(tokens)), "span":word_mention}
entity += [parts]
else:
print("SUB SPAN ERROR {} ({},{})".format(text, i, j), file=sys.stderr)
continue
# TODO: we assume continuous spans here
annotations[anno_id] = entity if not entity else entity[0]
elif anno_id_prefix in [Brat.RELATION_ID,'*']:
anno_id, rela = row
rela_type, arg1, arg2 = rela.split()
arg1 = arg1.split(":")[1] if ":" in arg1 else arg1
arg2 = arg2.split(":")[1] if ":" in arg2 else arg2
annotations[anno_id] = (rela_type, arg1, arg2)
elif anno_id_prefix == Brat.EVENT_ID:
print("NotImplementedError: Events", file=sys.stderr)
raise NotImplementedError
elif anno_id_prefix == Brat.ATTRIB_ID:
print("NotImplementedError: Attributes", file=sys.stderr)
return annotations
def _parse_config(self, filename):
"""
Parse BRAT
:param filename:
:return:
"""
config = defaultdict(list)
with open(filename, "rU") as fp:
curr = None
for line in fp:
# skip comments
line = line.strip()
if not line or line[0] == '#':
continue
# brat definition?
m = re.search("^\[(.+)\]$", line)
if m:
curr = m.group(1)
continue
config[curr].append(line)
# type-specific parsing
tmp = []
for item in config['relations']:
m = re.search("^(.+)\s+Arg1:(.+),\s*Arg2:(.+),*\s*(.+)*$", item)
name, arg1, arg2 = m.group(1).strip(), m.group(2).strip(), m.group(3).strip()
# convert relations to camel case
name = self._get_normed_rela_name(name)
arg2 = arg2.split(",")[0] # strip any <rel-type> defs
arg1 = arg1.split("|")
arg2 = arg2.split("|")
tmp.append((name,arg1,arg2))
config['relations'] = tmp
tmp = []
for item in config['attributes']:
name, arg = item.split()
arg = arg.split(":")[-1]
tmp.append((name, arg))
config['attributes'] = tmp
return config
def _create_candidate_subclasses(self, config):
"""
Given a BRAT config file, create Snorkel candidate subclasses.
NOTE: This method has a lot of hacks to deal with the schema definition limitations in Snorkel
:param config:
:return:
"""
for class_name in config['entities']:
try:
# TODO: we strip nesting of entity defs, since Snorkel doesn't support hierarchical entity types
class_name = class_name.strip()
# see http://brat.nlplab.org/configuration.html#advanced-entities for advanced entity config
# skip disabled types or seperators (these only display in the BRAT is-a hierarchy)
if class_name[0] in ['!','-']:
continue
self.subclasses[class_name] = candidate_subclass(class_name, [class_name.lower()])
print('CREATED TYPE Entity({},[{}])'.format(class_name, class_name.lower()))
except:
pass
# NOTE: relations must be uniquely named
for item in config['relations']:
name, arg1, arg2 = item
# Skip <ENTITY> tags; the generic entity argument (currently unsupported)
ignore_args = set(['<ENTITY>'])
if ignore_args.intersection(arg1) or ignore_args.intersection(arg2):
continue
# TODO: Assume simple relation types *without* multiple argument types
if (len(arg1) > 1 or len(arg2) > 1) and arg1 != arg2:
print("Error: Snorkel currently does not support multiple argument types per relation", file=sys.stderr)
try:
args = sorted(set(arg1 + arg2))
# fix for relations across the same type
if len(arg1 + arg2) > 1 and len(set(arg1 + arg2)) == 1:
args = ["{}1".format(args[0]),"{}2".format(args[0])]
args = map(lambda x:x.lower(),args)
name = name.replace("-","_")
self.subclasses[name] = candidate_subclass(name, args)
print('CREATED TYPE Relation({},{})'.format(name, args))
except Exception as e:
print(e)
def _create_config(self, candidate_types):
"""
Export a minimal BRAT configuration schema defining
a binary relation and two argument types.
TODO: Model richer semantics here (asymmetry, n-arity relations)
:param candidate_type:
:return:
"""
entity_defs, rela_defs = [], []
for stype in candidate_types:
rel_type = str(stype.type).rstrip(".type")
arg_types = [key.rstrip("_id") for key in stype.__dict__ if "_id" in key]
arg_types = [name[0].upper()+name[1:] for name in arg_types]
entity_defs.extend(arg_types)
if len(arg_types) > 1:
rela_name = [str(stype.type).replace(".type","")] + arg_types
rela_defs.append("{}\tArg1:{}, Arg2:{}".format(*rela_name))
entity_defs = set(entity_defs)
rela_defs = set(rela_defs)
return self.brat_tmpl.format("\n".join(entity_defs), "\n".join(rela_defs), "", "")
def _create_candidates(self, annotations, annotator_name, clear=True):
"""
TODO: Add simpler candidate instantiation helper functions
:return:
"""
# create stable annotation labels
stable_labels_by_type = defaultdict(list)
for name in annotations:
if annotations[name]:
spans = [key for key in annotations[name] if key[0] == Brat.TEXT_BOUND_ID]
relations = [key for key in annotations[name] if key[0] in [Brat.RELATION_ID]]
# create span labels
spans = {key:"{}::span:{}:{}".format(name, annotations[name][key]["char_start"],
annotations[name][key]["char_end"]) for key in spans}
for key in spans:
entity_type = annotations[name][key]['entity_type']
stable_labels_by_type[entity_type].append(spans[key])
# create relation labels
for key in relations:
rela_type, arg1, arg2 = annotations[name][key]
rela = sorted([[annotations[name][arg1]["entity_type"], spans[arg1]],
[annotations[name][arg2]["entity_type"],spans[arg2]]])
stable_labels_by_type[rela_type].append("~~".join(zip(*rela)[1]))
# create stable labels
# NOTE: we store each label class type in a different split so that it is compatible with
# the current version of 'reload_annotator_labels', where we create candidates by split id
for i, class_type in enumerate(stable_labels_by_type):
for context_stable_id in stable_labels_by_type[class_type]:
query = self.session.query(StableLabel).filter(StableLabel.context_stable_ids == context_stable_id)
query = query.filter(StableLabel.annotator_name == annotator_name)
if query.count() != 0:
continue
self.session.add(StableLabel(context_stable_ids=context_stable_id, split=i,
annotator_name=annotator_name, value=1))
abs_offsets = {}
entity_types = defaultdict(list)
for i, class_type in enumerate(stable_labels_by_type):
if class_type in self.subclasses:
class_name = self.subclasses[class_type]
else:
class_name = self.subclasses[self._get_normed_rela_name(class_type)]
for et in stable_labels_by_type[class_type]:
contexts = et.split('~~')
spans = []
for c,et in zip(contexts,class_name.__argnames__):
stable_id = c.split(":")
name, offsets = stable_id[0], stable_id[-2:]
span = map(int, offsets)
doc = self.session.query(Document).filter(Document.name == name).one()
if name not in abs_offsets:
abs_offsets[name] = abs_doc_offsets(doc)
for j,offset in enumerate(abs_offsets[name]):
if span[0] >= offset[0] and span[1] <= offset[1]:
try:
tc = TemporarySpan(char_start=span[0]-offset[0], char_end=span[1]-offset[0]-1,
sentence=doc.sentences[j])
tc.load_id_or_insert(self.session)
spans.append(tc)
except Exception as e:
print("BRAT candidate conversion error {} {}".format(len(doc.sentences), j))
print(e)
entity_types[class_type].append(spans)
for i, class_type in enumerate(stable_labels_by_type):
if class_type in self.subclasses:
class_name = self.subclasses[class_type]
else:
class_name = self.subclasses[self._get_normed_rela_name(class_type)]
if clear:
self.session.query(Candidate).filter(Candidate.split == i).delete()
candidate_args = {'split': i}
for args in entity_types[class_type]:
for j, arg_name in enumerate(class_name.__argnames__):
candidate_args[arg_name + '_id'] = args[j].id
candidate = class_name(**candidate_args)
self.session.add(candidate)
self.session.commit()
def _group_by_document(candidates):
"""
:param candidates:
:return:
"""
doc_index = defaultdict(list)
for c in candidates:
name = c[0].sentence.document.name
doc_index[name].append(c)
return doc_index
def abs_doc_offsets(doc):
"""
:param doc:
:return:
"""
abs_char_offsets = []
for sent in doc.sentences:
stable_id = sent.stable_id.split(":")
name, offsets = stable_id[0], stable_id[-2:]
offsets = map(int, offsets)
abs_char_offsets.append(offsets)
return abs_char_offsets
def doc_to_text(doc, sent_delim='\n'):
"""
Convert document object to original text represention.
Assumes parser offsets map to original document offsets
:param doc:
:param sent_delim:
:return:
"""
text = []
for sent in doc.sentences:
offsets = map(int, sent.stable_id.split(":")[-2:])
char_start, char_end = offsets
text.append({"text": sent.text, "char_start": char_start, "char_end": char_end})
s = ""
for i in range(len(text) - 1):
gap = text[i + 1]['char_start'] - text[i]['char_end']
s += text[i]['text'] + (sent_delim * gap)
return s
| |
#! /usr/bin/env python
import os
import itertools as it
import sys
import textwrap
#import gtk
import numpy as np
import sympy as sy
import sympy.stats
import odespy as ode
import matplotlib
import matplotlib.pyplot as plt
import sympy.physics.mechanics as mech
"""
Pretty plotting code.
"""
_all_spines = ["top", "right", "bottom", "left"]
def hide_spines(s=["top", "right"]):
"""Hides the top and rightmost axis spines from view for all active
figures and their respective axes."""
global _all_spines
# Retrieve a list of all current figures.
figures = [x for x in matplotlib._pylab_helpers.Gcf.get_all_fig_managers()]
for figure in figures:
# Get all Axis instances related to the figure.
for ax in figure.canvas.figure.get_axes():
for spine in _all_spines :
if spine in s :
ax.spines[spine].set_color('none')
if "top" in s and "bottom" in s :
ax.xaxis.set_ticks_position('none')
elif "top" in s :
ax.xaxis.set_ticks_position('bottom')
elif "bottom" in s :
ax.xaxis.set_ticks_position('top')
else :
ax.xaxis.set_ticks_position('both')
if "left" in s and "right" in s :
ax.yaxis.set_ticks_position('none')
elif "left" in s :
ax.yaxis.set_ticks_position('right')
elif "right" in s :
ax.yaxis.set_ticks_position('left')
else :
ax.yaxis.set_ticks_position('both')
"""
FORTRAN compilation code.
"""
def find_matching_parentheses(s, popen="(", pclose=")") :
i_start = s.find(popen)
i_end = -1
count = 0
s_frame = s[i_start:]
for i in xrange(len(s_frame)) :
char = s_frame[i]
if char == popen :
count += 1
elif char == pclose :
count -= 1
if count == 0 :
i_end = i + i_start + 1
break
return i_start, i_end
def parse_merge(H, s) :
"""
Parse the first FORTRAN merge statement found within s.
H is the name of a hidden variable which will be used to store the value of
the piecewise function defined by the merge statement.
"""
# extract bracketed code in merge statement from s
# m_statement is of form "(expr1,expr2,cond)"
i_merge_start = s.find("merge")
ms = s[i_merge_start:]
i_start, i_end = find_matching_parentheses(ms)
m_statement = ms[i_start:i_end]
# print m_statement
# extract expr1, expr2, and conditional
i1 = m_statement.find(",")
i2 = m_statement.rfind(",")
expr1 = m_statement[1:i1]
expr2 = m_statement[i1 + 1:i2]
cond = m_statement[i2 + 1:-1]
# if expr1, expr2, or cond are merge statements, recursively call this
# function otherwise, set the hidden switch variable to take the value of
# the relevant expr
if expr1.find("merge") != -1 :
expr1_str = parse_merge(H, expr1)[-1]
expr1_str = "".join([" " + s + "\n" for s in expr1_str.splitlines()])
else :
expr1_str = " " + H + "=" + expr1
if expr2.find("merge") != -1 :
expr2_str = parse_merge(H, expr2)[-1]
expr2_str = "".join([" " + s + "\n" for s in expr2_str.splitlines()])
else :
expr2_str = " " + H + "=" + expr2
# format expr1_str, expr2_str, and cond into a correct FORTRAN IF-THEN-ELSE
# statement
f_code = " IF (" + cond.strip() + ") THEN \n" + expr1_str + "\n" + \
" ELSE \n" + expr2_str + "\n" + \
" ENDIF \n"
return i_merge_start, i_merge_start + i_end, f_code
def FORTRAN_f(x, f, parameters=[], verbose=False) :
"""
Produce FORTRAN function for evaluating a vector-valued SymPy expression f
given a state vector x.
The FORTRAN function will have the signature f_f77(neq, t, X, Y) where neq
is hidden and Y is an output matrix.
"""
# TODO remove code for dealing with stochastic systems -- it is not used in
# this paper
x = list(x) + list(parameters)
f = list(f) + [0]*len(parameters)
rv = list(set((np.concatenate([sy.stats.random_symbols(f_i) for f_i in f]))))
NR = len(rv)
if NR > 0 :
x += [sy.symbols("dt"), sy.symbols("seed")]
f += [0, 0]
NX = len(x)
NY = len(f)
if NX != NY :
raise Exception("System is not square!")
if verbose : print "generating FORTRAN matrices..."
_X = sy.tensor.IndexedBase("X", shape=(NX, ))
X = [_X[i + 1] for i in xrange(NX)]
_R = sy.tensor.IndexedBase("R", shape=(NR, ))
R = [_R[i + 1] for i in xrange(NR)]
if type(f) != sy.Matrix : f = sy.Matrix(f)
# WARNING : These substitution steps are VERY SLOW!!! It might be wise to
# parallelise them in the future, or at least substitute into one dynamical
# equation at a time so that progress can be monitored.
if verbose : print "substituting matrix elements for original state variables and parameters (WARNING: SLOW)..."
f_sub = f.subs(zip(x, X))
if verbose : print "substituting matrix elements for random variables (WARNING: SLOW)..."
f_sub = f_sub.subs(zip(rv, R))
# generate FORTRAN code
if verbose : print "generating FORTRAN code from dynamics equations..."
fstrs = [sy.fcode(fi, standard=95) for fi in f_sub]
# remove whitespace and newlines
if verbose : print "removing whitespace and newlines..."
fstrs = ["".join(fi.split()) for fi in fstrs]
# remove all @ (FORTRAN line continuation indicator)
if verbose : print "removing line continuations..."
fstrs = [fi.replace("@", "") for fi in fstrs]
# find FORTRAN inline merge statements and replace with a hidden "switch"
# variable whose value is set by a full IF statement at the start of the
# function call.
# -- this is needed because FORTRAN77 doesn't support inline merge statements
Hstrs = [] # to hold hidden switch expressions
if verbose : print "formatting piecewise functions..."
for i in xrange(len(fstrs)) :
while fstrs[i].find("merge") != -1 :
H = "H(" + str(len(Hstrs) + 1) + ")"
i_merge_start, i_merge_end, Hstr = parse_merge(H, fstrs[i])
fstrs[i] = fstrs[i][:i_merge_start] + H + fstrs[i][i_merge_end:]
Hstrs.append(Hstr)
NH = len(Hstrs)
# format the fstrs
wrapper = textwrap.TextWrapper(expand_tabs=True,
replace_whitespace=True,
initial_indent=" ",
subsequent_indent=" @ ",
width=60)
if verbose : print "formatting state equations..."
for i in xrange(len(fstrs)) :
fstrs[i] = wrapper.fill("Y(" + str(i + 1) + ")=" + fstrs[i]) + "\n"
# put the above elements together into a FORTRAN subroutine
if verbose : print "formatting preamble..."
hdr = " subroutine f_f77(neq, t, X, Y) \n" +\
"Cf2py intent(hide) neq \n" +\
"Cf2py intent(out) Y \n" +\
" integer neq \n" +\
" double precision t, X, Y \n" +\
" dimension X(neq), Y(neq) \n"
if NH > 0 : hdr += " real, dimension(" + str(NH) + ") :: H \n"
# TODO fix the following -- assumes dt = 0.01
# NOTE this is only important when dealing with stochastic systems
if NR > 0 : hdr += " real, dimension(" + str(NR) + ") :: R \n" +\
" integer :: SEED \n" +\
" real :: RTRASH \n" +\
" SEED = INT((t/" + sy.fcode(X[-2]).strip() +\
") + " + sy.fcode(X[-1]).strip() + ") \n" +\
" CALL SRAND(SEED) \n" +\
" DO i=1,4 \n" +\
" RTRASH=RAND(0) \n" +\
" END DO \n"
R_block = "".join([sy.fcode(R_i) + "=RAND(0) \n" for R_i in R])
H_block = "".join(Hstrs)
Y_block = "".join(fstrs)
if verbose : print "assembling source code blocks..."
fcode = hdr + R_block + H_block + Y_block + " return \n" + " end \n"
# final formatting
if verbose : print "final source code formatting..."
wrapper = textwrap.TextWrapper(expand_tabs=True, replace_whitespace=True,
initial_indent="", subsequent_indent=" @ ", width=60)
fcode = "".join([wrapper.fill(src) + "\n" for src in fcode.split("\n")])
return fcode
def FORTRAN_jacobian(x, jac, parameters=[]) :
# TODO document
# TODO remove this function if unused in paper
NX = len(x)
NP = len(parameters)
Nrowpd = jac.shape[0]
Ncolpd = jac.shape[1]
if NX != Nrowpd != Ncolpd :
raise Exception("System is not square!")
_X = sy.tensor.IndexedBase("X", shape=(NX, ))
X = [_X[i + 1] for i in xrange(NX)]
X = X + [_X[NX + i + 1] for i in xrange(NP)]
if type(jac) == sy.Matrix : jac = sy.Matrix(jac)
jac_sub = jac.subs(zip(list(x) + list(parameters), X))
ijs = [i for i in it.product(xrange(Nrowpd), xrange(Ncolpd))]
# generate FORTRAN code
fstrs = [sy.fcode(jac_ij) for jac_ij in jac_sub]
# remove whitespace and newlines
fstrs = ["".join(jac_ij.split()) for jac_ij in fstrs]
# remove all @ (FORTRAN line continuation indicator)
fstrs = [jac_ij.replace("@", "") for jac_ij in fstrs]
# find FORTRAN inline merge statements and replace with a hidden "switch"
# variable whose value is set by a full IF statement at the start of the
# function call.
# -- this is needed because FORTRAN77 doesn't support inline merge statements
Hstrs = [] # to hold hidden switch expressions
for i in xrange(len(fstrs)) :
while fstrs[i].find("merge") != -1 :
H = "H(" + str(len(Hstrs) + 1) + ")"
i_merge_start, i_merge_end, Hstr = parse_merge(H, fstrs[i])
fstrs[i] = fstrs[i][:i_merge_start] + H + fstrs[i][i_merge_end:]
Hstrs.append(Hstr)
NH = len(Hstrs)
# format the fstrs
wrapper = textwrap.TextWrapper(expand_tabs=True,
replace_whitespace=True,
initial_indent=" ",
subsequent_indent=" @ ",
width=60)
for k in xrange(len(fstrs)) :
i, j = ijs[k]
fstrs[k] = wrapper.fill("pd(" + str(i + 1) + "," + str(j + 1) + ")=" + fstrs[k]) + "\n"
# put the above elements together into a FORTRAN subroutine
hdr = " subroutine jac_f77(neq, t, X, ml, mu, pd, nrowpd) \n" +\
"Cf2py intent(hide) neq, ml, mu, nrowpd \n" +\
"Cf2py intent(out) pd \n" +\
" integer neq, ml, mu, nrowpd \n" +\
" double precision t, X, pd \n" +\
" dimension X(neq), pd(neq, neq) \n"
if NH > 0 : hdr += " real, dimension(" + str(NH) + ") :: H \n"
H_block = "".join(Hstrs)
pd_block = "".join(fstrs)
fcode = hdr + H_block + pd_block + " return \n" + " end \n"
return fcode
def FORTRAN_compile(fcode) :
f_f77 = ode.compile_f77(fcode)
os.remove("tmp_callback.so")
# reload(ode)
return f_f77
"""
Numerical integration code.
"""
def FORTRAN_integrate(t, x0, f, p0=[], jac=None, rtol=0.0001, atol=0.0001) :
solver = ode.Lsodes(f=None, f_f77=f, jac_f77=jac, rtol=rtol, atol=atol)
solver.set_initial_condition(list(x0) + list(p0))
x, _ = solver.solve(t)
return x
| |
import os
import sys
import difflib
import __builtin__
import re
import pydoc
import inspect
import keyword
import unittest
import xml.etree
import test.test_support
from collections import namedtuple
from test.script_helper import assert_python_ok
from test.test_support import (
TESTFN, rmtree, reap_children, captured_stdout)
from test import pydoc_mod
expected_text_pattern = \
"""
NAME
test.pydoc_mod - This is a test module for test_pydoc
FILE
%s
%s
CLASSES
__builtin__.object
B
A
\x20\x20\x20\x20
class A
| Hello and goodbye
|\x20\x20
| Methods defined here:
|\x20\x20
| __init__()
| Wow, I have no function!
\x20\x20\x20\x20
class B(__builtin__.object)
| Data descriptors defined here:
|\x20\x20
| __dict__
| dictionary for instance variables (if defined)
|\x20\x20
| __weakref__
| list of weak references to the object (if defined)
|\x20\x20
| ----------------------------------------------------------------------
| Data and other attributes defined here:
|\x20\x20
| NO_MEANING = 'eggs'
FUNCTIONS
doc_func()
This function solves all of the world's problems:
hunger
lack of Python
war
\x20\x20\x20\x20
nodoc_func()
DATA
__author__ = 'Benjamin Peterson'
__credits__ = 'Nobody'
__version__ = '1.2.3.4'
VERSION
1.2.3.4
AUTHOR
Benjamin Peterson
CREDITS
Nobody
""".strip()
expected_html_pattern = \
"""
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="heading">
<tr bgcolor="#7799ee">
<td valign=bottom> <br>
<font color="#ffffff" face="helvetica, arial"> <br><big><big><strong><a href="test.html"><font color="#ffffff">test</font></a>.pydoc_mod</strong></big></big> (version 1.2.3.4)</font></td
><td align=right valign=bottom
><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="file:%s">%s</a>%s</font></td></tr></table>
<p><tt>This is a test module for test_pydoc</tt></p>
<p>
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section">
<tr bgcolor="#ee77aa">
<td colspan=3 valign=bottom> <br>
<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
\x20\x20\x20\x20
<tr><td bgcolor="#ee77aa"><tt> </tt></td><td> </td>
<td width="100%%"><dl>
<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
</font></dt><dd>
<dl>
<dt><font face="helvetica, arial"><a href="test.pydoc_mod.html#B">B</a>
</font></dt></dl>
</dd>
<dt><font face="helvetica, arial"><a href="test.pydoc_mod.html#A">A</a>
</font></dt></dl>
<p>
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section">
<tr bgcolor="#ffc8d8">
<td colspan=3 valign=bottom> <br>
<font color="#000000" face="helvetica, arial"><a name="A">class <strong>A</strong></a></font></td></tr>
\x20\x20\x20\x20
<tr bgcolor="#ffc8d8"><td rowspan=2><tt> </tt></td>
<td colspan=2><tt>Hello and goodbye<br> </tt></td></tr>
<tr><td> </td>
<td width="100%%">Methods defined here:<br>
<dl><dt><a name="A-__init__"><strong>__init__</strong></a>()</dt><dd><tt>Wow, I have no function!</tt></dd></dl>
</td></tr></table> <p>
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section">
<tr bgcolor="#ffc8d8">
<td colspan=3 valign=bottom> <br>
<font color="#000000" face="helvetica, arial"><a name="B">class <strong>B</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
\x20\x20\x20\x20
<tr><td bgcolor="#ffc8d8"><tt> </tt></td><td> </td>
<td width="100%%">Data descriptors defined here:<br>
<dl><dt><strong>__dict__</strong></dt>
<dd><tt>dictionary for instance variables (if defined)</tt></dd>
</dl>
<dl><dt><strong>__weakref__</strong></dt>
<dd><tt>list of weak references to the object (if defined)</tt></dd>
</dl>
<hr>
Data and other attributes defined here:<br>
<dl><dt><strong>NO_MEANING</strong> = 'eggs'</dl>
</td></tr></table></td></tr></table><p>
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section">
<tr bgcolor="#eeaa77">
<td colspan=3 valign=bottom> <br>
<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
\x20\x20\x20\x20
<tr><td bgcolor="#eeaa77"><tt> </tt></td><td> </td>
<td width="100%%"><dl><dt><a name="-doc_func"><strong>doc_func</strong></a>()</dt><dd><tt>This function solves all of the world's problems:<br>
hunger<br>
lack of Python<br>
war</tt></dd></dl>
<dl><dt><a name="-nodoc_func"><strong>nodoc_func</strong></a>()</dt></dl>
</td></tr></table><p>
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section">
<tr bgcolor="#55aa55">
<td colspan=3 valign=bottom> <br>
<font color="#ffffff" face="helvetica, arial"><big><strong>Data</strong></big></font></td></tr>
\x20\x20\x20\x20
<tr><td bgcolor="#55aa55"><tt> </tt></td><td> </td>
<td width="100%%"><strong>__author__</strong> = 'Benjamin Peterson'<br>
<strong>__credits__</strong> = 'Nobody'<br>
<strong>__version__</strong> = '1.2.3.4'</td></tr></table><p>
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section">
<tr bgcolor="#7799ee">
<td colspan=3 valign=bottom> <br>
<font color="#ffffff" face="helvetica, arial"><big><strong>Author</strong></big></font></td></tr>
\x20\x20\x20\x20
<tr><td bgcolor="#7799ee"><tt> </tt></td><td> </td>
<td width="100%%">Benjamin Peterson</td></tr></table><p>
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section">
<tr bgcolor="#7799ee">
<td colspan=3 valign=bottom> <br>
<font color="#ffffff" face="helvetica, arial"><big><strong>Credits</strong></big></font></td></tr>
\x20\x20\x20\x20
<tr><td bgcolor="#7799ee"><tt> </tt></td><td> </td>
<td width="100%%">Nobody</td></tr></table>
""".strip()
# output pattern for missing module
missing_pattern = "no Python documentation found for '%s'"
# output pattern for module with bad imports
badimport_pattern = "problem in %s - <type 'exceptions.ImportError'>: No module named %s"
def run_pydoc(module_name, *args, **env):
"""
Runs pydoc on the specified module. Returns the stripped
output of pydoc.
"""
args = args + (module_name,)
# do not write bytecode files to avoid caching errors
rc, out, err = assert_python_ok('-B', pydoc.__file__, *args, **env)
return out.strip()
def get_pydoc_html(module):
"Returns pydoc generated output as html"
doc = pydoc.HTMLDoc()
output = doc.docmodule(module)
loc = doc.getdocloc(pydoc_mod) or ""
if loc:
loc = "<br><a href=\"" + loc + "\">Module Docs</a>"
return output.strip(), loc
def get_pydoc_text(module):
"Returns pydoc generated output as text"
doc = pydoc.TextDoc()
loc = doc.getdocloc(pydoc_mod) or ""
if loc:
loc = "\nMODULE DOCS\n " + loc + "\n"
output = doc.docmodule(module)
# cleanup the extra text formatting that pydoc preforms
patt = re.compile('\b.')
output = patt.sub('', output)
return output.strip(), loc
def print_diffs(text1, text2):
"Prints unified diffs for two texts"
lines1 = text1.splitlines(True)
lines2 = text2.splitlines(True)
diffs = difflib.unified_diff(lines1, lines2, n=0, fromfile='expected',
tofile='got')
print '\n' + ''.join(diffs)
class PyDocDocTest(unittest.TestCase):
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_html_doc(self):
result, doc_loc = get_pydoc_html(pydoc_mod)
mod_file = inspect.getabsfile(pydoc_mod)
if sys.platform == 'win32':
import nturl2path
mod_url = nturl2path.pathname2url(mod_file)
else:
mod_url = mod_file
expected_html = expected_html_pattern % (mod_url, mod_file, doc_loc)
if result != expected_html:
print_diffs(expected_html, result)
self.fail("outputs are not equal, see diff above")
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_text_doc(self):
result, doc_loc = get_pydoc_text(pydoc_mod)
expected_text = expected_text_pattern % \
(inspect.getabsfile(pydoc_mod), doc_loc)
if result != expected_text:
print_diffs(expected_text, result)
self.fail("outputs are not equal, see diff above")
def test_issue8225(self):
# Test issue8225 to ensure no doc link appears for xml.etree
result, doc_loc = get_pydoc_text(xml.etree)
self.assertEqual(doc_loc, "", "MODULE DOCS incorrectly includes a link")
def test_not_here(self):
missing_module = "test.i_am_not_here"
result = run_pydoc(missing_module)
expected = missing_pattern % missing_module
self.assertEqual(expected, result,
"documentation for missing module found")
def test_input_strip(self):
missing_module = " test.i_am_not_here "
result = run_pydoc(missing_module)
expected = missing_pattern % missing_module.strip()
self.assertEqual(expected, result,
"white space was not stripped from module name "
"or other error output mismatch")
def test_stripid(self):
# test with strings, other implementations might have different repr()
stripid = pydoc.stripid
# strip the id
self.assertEqual(stripid('<function stripid at 0x88dcee4>'),
'<function stripid>')
self.assertEqual(stripid('<function stripid at 0x01F65390>'),
'<function stripid>')
# nothing to strip, return the same text
self.assertEqual(stripid('42'), '42')
self.assertEqual(stripid("<type 'exceptions.Exception'>"),
"<type 'exceptions.Exception'>")
class PydocImportTest(unittest.TestCase):
def setUp(self):
self.test_dir = os.mkdir(TESTFN)
self.addCleanup(rmtree, TESTFN)
def test_badimport(self):
# This tests the fix for issue 5230, where if pydoc found the module
# but the module had an internal import error pydoc would report no doc
# found.
modname = 'testmod_xyzzy'
testpairs = (
('i_am_not_here', 'i_am_not_here'),
('test.i_am_not_here_either', 'test.i_am_not_here_either'),
('test.i_am_not_here.neither_am_i', 'test.i_am_not_here'),
('i_am_not_here.{}'.format(modname), 'i_am_not_here'),
('test.{}'.format(modname), 'test.{}'.format(modname)),
)
sourcefn = os.path.join(TESTFN, modname) + os.extsep + "py"
for importstring, expectedinmsg in testpairs:
with open(sourcefn, 'w') as f:
f.write("import {}\n".format(importstring))
result = run_pydoc(modname, PYTHONPATH=TESTFN)
expected = badimport_pattern % (modname, expectedinmsg)
self.assertEqual(expected, result)
def test_apropos_with_bad_package(self):
# Issue 7425 - pydoc -k failed when bad package on path
pkgdir = os.path.join(TESTFN, "syntaxerr")
os.mkdir(pkgdir)
badsyntax = os.path.join(pkgdir, "__init__") + os.extsep + "py"
with open(badsyntax, 'w') as f:
f.write("invalid python syntax = $1\n")
result = run_pydoc('zqwykjv', '-k', PYTHONPATH=TESTFN)
self.assertEqual('', result)
def test_apropos_with_unreadable_dir(self):
# Issue 7367 - pydoc -k failed when unreadable dir on path
self.unreadable_dir = os.path.join(TESTFN, "unreadable")
os.mkdir(self.unreadable_dir, 0)
self.addCleanup(os.rmdir, self.unreadable_dir)
# Note, on Windows the directory appears to be still
# readable so this is not really testing the issue there
result = run_pydoc('zqwykjv', '-k', PYTHONPATH=TESTFN)
self.assertEqual('', result)
class TestDescriptions(unittest.TestCase):
def test_module(self):
# Check that pydocfodder module can be described
from test import pydocfodder
doc = pydoc.render_doc(pydocfodder)
self.assertIn("pydocfodder", doc)
def test_classic_class(self):
class C: "Classic class"
c = C()
self.assertEqual(pydoc.describe(C), 'class C')
self.assertEqual(pydoc.describe(c), 'instance of C')
expected = 'instance of C in module %s' % __name__
self.assertIn(expected, pydoc.render_doc(c))
def test_class(self):
class C(object): "New-style class"
c = C()
self.assertEqual(pydoc.describe(C), 'class C')
self.assertEqual(pydoc.describe(c), 'C')
expected = 'C in module %s object' % __name__
self.assertIn(expected, pydoc.render_doc(c))
def test_namedtuple_public_underscore(self):
NT = namedtuple('NT', ['abc', 'def'], rename=True)
with captured_stdout() as help_io:
help(NT)
helptext = help_io.getvalue()
self.assertIn('_1', helptext)
self.assertIn('_replace', helptext)
self.assertIn('_asdict', helptext)
class TestHelper(unittest.TestCase):
def test_keywords(self):
self.assertEqual(sorted(pydoc.Helper.keywords),
sorted(keyword.kwlist))
def test_builtin(self):
for name in ('str', 'str.translate', '__builtin__.str',
'__builtin__.str.translate'):
# test low-level function
self.assertIsNotNone(pydoc.locate(name))
# test high-level function
try:
pydoc.render_doc(name)
except ImportError:
self.fail('finding the doc of {!r} failed'.format(o))
for name in ('not__builtin__', 'strrr', 'strr.translate',
'str.trrrranslate', '__builtin__.strrr',
'__builtin__.str.trrranslate'):
self.assertIsNone(pydoc.locate(name))
self.assertRaises(ImportError, pydoc.render_doc, name)
def test_main():
try:
test.test_support.run_unittest(PyDocDocTest,
PydocImportTest,
TestDescriptions,
TestHelper)
finally:
reap_children()
if __name__ == "__main__":
test_main()
| |
from __future__ import division
import datetime
import multiprocessing
from multiprocessing import sharedctypes
import signal
import sys
import threading
import warnings
import numpy
import six
from chainer.dataset import iterator
from chainer.iterators._statemachine import (IteratorState,
iterator_statemachine)
from chainer.iterators.order_samplers import ShuffleOrderSampler
_response_time = 0.1
def _raise_timeout_warning():
warnings.warn(
'Stalled dataset is detected. '
'See the documentation of MultiprocessIterator for common causes and '
'workarounds:\n'
'https://docs.chainer.org/en/stable/reference/generated/'
'chainer.iterators.MultiprocessIterator.html',
MultiprocessIterator.TimeoutWarning)
class MultiprocessIterator(iterator.Iterator):
"""Dataset iterator that loads examples in parallel.
This is an implementation of :class:`~chainer.dataset.Iterator` that loads
examples with worker processes. It uses the standard :mod:`multiprocessing`
module to parallelize the loading. The dataset is sent to the worker
processes in the standard way using pickle.
Note that this iterator effectively prefetches the examples for the next
batch asynchronously after the current batch is returned.
This iterator saves ``-1`` instead of ``None`` in snapshots since some
serializers do not support ``None``.
.. note::
When you are using OpenCV somewhere in your code and the
``MultiprocessIterator`` is used in the training code, the
training loop may get stuck at some point. In such situation,
there are several workarounds to prevent the process got stuck.
1. Set the environment variable as follows: ``OMP_NUM_THREADS=1``
2. Add ``cv2.setNumThreads(0)`` right after ``import cv2`` in your
training script.
3. Use :class:`~chainer.iterators.MultithreadIterator` instead of
``MultiprocessIterator``.
Args:
dataset (~chainer.dataset.Dataset): Dataset to iterate.
batch_size (int): Number of examples within each batch.
repeat (bool): If ``True``, it infinitely loops over the dataset.
Otherwise, it stops iteration at the end of the first epoch.
shuffle (bool): If ``True``, the order of examples is shuffled at the
beginning of each epoch. Otherwise, examples are extracted in the
order of indexes. If ``None`` and no ``order_sampler`` is given,
the behavior is the same as the case with ``shuffle=True``.
n_processes (int): Number of worker processes. The number of CPUs is
used by default.
n_prefetch (int): Number of prefetch batches.
shared_mem (int): The size of using shared memory per data.
If ``None``, size is adjusted automatically.
dataset_timeout (float): :class:`MultiprocessIterator.TimeoutWarning`
will be issued after this time in seconds elapsed in each dataset
realization. ``None`` to disable the warning. You can turn this
warning into an error by using :func:`warnings.simplefilter`::
warnings.simplefilter(
'error',
chainer.iterators.MultiprocessIterator.TimeoutWarning)
order_sampler (callable): A callable that generates the order
of the indices to sample in the next epoch when a epoch finishes.
This function should take two arguments: the current order
and the current position of the iterator.
This should return the next order. The size of the order
should remain constant.
This option cannot be used when ``shuffle`` is not ``None``.
maxtasksperchild (int): Number of tasks a worker of prefetch process
can complete before it will exit and be replaced with a fresh
worker process, to enable unused resources to be freed. If
``None``, worker processes will live as long as the pool.
"""
class TimeoutWarning(RuntimeWarning):
pass
_interruption_testing = False # for testing
_finalized = False
_prefetch_loop = None
_comm = None
def __init__(self, dataset, batch_size, repeat=True, shuffle=None,
n_processes=None, n_prefetch=1, shared_mem=None,
order_sampler=None, dataset_timeout=30.0,
maxtasksperchild=None):
self.dataset = dataset
self.batch_size = batch_size
self.repeat = repeat
self.shuffle = shuffle
self.n_processes = n_processes or multiprocessing.cpu_count()
self.n_prefetch = max(n_prefetch, 1)
self.shared_mem = shared_mem
self.dataset_timeout = dataset_timeout
self._maxtasksperchild = maxtasksperchild
if self.shuffle is not None:
if order_sampler is not None:
raise ValueError('`shuffle` is not `None` and a custom '
'`order_sampler` is set. Please set '
'`shuffle` to `None` to use the custom '
'order sampler.')
else:
if self.shuffle:
order_sampler = ShuffleOrderSampler()
else:
if order_sampler is None:
order_sampler = ShuffleOrderSampler()
self.order_sampler = order_sampler
self._comm = _Communicator(self.n_prefetch, dataset_timeout)
self.reset()
self._prefetch_loop = _PrefetchLoop(
self.dataset, self.batch_size, self.repeat,
self.n_processes, self.n_prefetch, self.shared_mem,
self._comm, self.order_sampler,
self._interruption_testing, self._maxtasksperchild)
# defer launching prefetch thread until creating the worker pool,
# not to leave a background thread in forked processes.
def __next__(self):
measure_mode = False
if self._prefetch_loop.thread is None:
if self._prefetch_loop.measure_required():
measure_mode = True
batch, state = self._prefetch_loop.measure(
self.dataset_timeout)
self._prefetch_loop.launch_thread()
if not measure_mode:
batch, state = self._comm.get()
self._previous_epoch_detail = self.epoch_detail
self._state = state
if batch is None:
raise StopIteration
else:
return batch
next = __next__
def __del__(self):
if self._finalized:
return
if self._comm is not None:
self._comm.terminate()
if self._prefetch_loop is not None:
self._prefetch_loop.terminate()
self._comm = None
self._prefetch_loop = None
self._finalized = True
finalize = __del__
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.finalize()
def __copy__(self):
# This function is implemented for backward compatibility.
# Please use `reset` normally.
other = MultiprocessIterator(
self.dataset, self.batch_size, self.repeat, shuffle=None,
n_processes=self.n_processes, n_prefetch=self.n_prefetch,
shared_mem=self.shared_mem, order_sampler=self.order_sampler)
other._reset_state(self.current_position, self.epoch,
self.is_new_epoch, self._state.order)
other._previous_epoch_detail = self._previous_epoch_detail
return other
@property
def current_position(self):
return self._state.current_position
@property
def epoch(self):
return self._state.epoch
@property
def is_new_epoch(self):
return self._state.is_new_epoch
@property
def epoch_detail(self):
return self.epoch + self.current_position / self._epoch_size
@property
def previous_epoch_detail(self):
if self._previous_epoch_detail < 0:
return None
return self._previous_epoch_detail
def serialize(self, serializer):
current_position = serializer('current_position',
self.current_position)
epoch = serializer('epoch', self.epoch)
is_new_epoch = serializer('is_new_epoch', self.is_new_epoch)
order = self._state.order.copy()
try:
serializer('order', order)
except KeyError:
serializer('_order', order)
self._reset_state(current_position, epoch, is_new_epoch, order)
try:
self._previous_epoch_detail = serializer(
'previous_epoch_detail', self._previous_epoch_detail)
except KeyError:
# guess previous_epoch_detail for older version
self._previous_epoch_detail = self.epoch + \
(self.current_position - self.batch_size) / self._epoch_size
if self.epoch_detail > 0:
self._previous_epoch_detail = max(
self._previous_epoch_detail, 0.)
else:
self._previous_epoch_detail = -1.
def reset(self):
if self.order_sampler is None:
order = None
else:
order = self.order_sampler(numpy.arange(len(self.dataset)), 0)
self._reset_state(0, 0, False, order)
self._previous_epoch_detail = -1.
def _reset_state(self, current_position, epoch, is_new_epoch, order):
if self._finalized:
raise NotImplementedError(
'Reset of finalized MultiProcessIterator is currently not '
'supported.')
self._state = IteratorState(current_position, epoch, is_new_epoch,
order)
self._comm.reset(self._state)
@property
def _epoch_size(self):
order = self._state.order
if order is None:
epoch_size = len(self.dataset)
else:
epoch_size = len(order)
return epoch_size
class _Communicator(object):
STATUS_CONTINUE = 0
STATUS_RESET = 1
STATUS_TERMINATE = 2
def __init__(self, n_prefetch, dataset_timeout):
self.n_prefetch = n_prefetch
self.dataset_timeout = dataset_timeout
self._lock = threading.Lock()
self._not_empty_cond = threading.Condition(self._lock)
self._not_full_cond = threading.Condition(self._lock)
self._batch_queue = []
self._status = _Communicator.STATUS_CONTINUE
self._reset_count = 0
@property
def is_terminated(self):
with self._lock:
return self._status == _Communicator.STATUS_TERMINATE
# called from iterator
def get(self):
with self._lock:
start = datetime.datetime.now()
while len(self._batch_queue) == 0:
self._not_empty_cond.wait(_response_time)
dt = datetime.datetime.now() - start
if (self.dataset_timeout is not None
and dt > datetime.timedelta(
seconds=self.dataset_timeout)):
_raise_timeout_warning()
batch, prefetch_state = self._batch_queue.pop(0)
self._not_full_cond.notify()
return batch, prefetch_state
# called from iterator
def reset(self, prefetch_state):
with self._lock:
self._status = _Communicator.STATUS_RESET
self._prefetch_state = prefetch_state
self._batch_queue = []
self._not_full_cond.notify()
self._reset_count += 1
# called from iterator
def terminate(self):
with self._lock:
self._status = _Communicator.STATUS_TERMINATE
self._batch_queue = []
self._not_full_cond.notify()
self._reset_count += 1
# called from thread
def check(self):
with self._lock:
status = self._status
self._status = _Communicator.STATUS_CONTINUE
prefetch_state = None
if status == _Communicator.STATUS_RESET:
prefetch_state = self._prefetch_state
return status, prefetch_state, self._reset_count
# called from thread
def put(self, batch, prefetch_state, reset_count):
with self._lock:
if len(self._batch_queue) == self.n_prefetch:
self._not_full_cond.wait()
if reset_count == self._reset_count:
self._batch_queue.append((batch, prefetch_state))
self._not_empty_cond.notify()
class _PrefetchLoop(object):
_thread = None
_pool = None
_terminating = False
def __init__(self, dataset, batch_size, repeat,
n_processes, n_prefetch, mem_size, comm,
order_sampler,
_interruption_testing, maxtasksperchild):
self.dataset = dataset
self.batch_size = batch_size
self.repeat = repeat
self.n_processes = n_processes
self.mem_size = mem_size
self._comm = comm
self.order_sampler = order_sampler
self.maxtasksperchild = maxtasksperchild
self._allocate_shared_memory()
self._interruption_testing = _interruption_testing
def terminate(self):
self._terminating = True
# Terminate the thread first because it depends on the pool.
if self._thread is not None:
while self._thread.is_alive():
self._thread.join(_response_time)
if self._pool is not None:
self._pool.terminate()
self._thread = None
self._pool = None
@property
def thread(self):
return self._thread
def measure_required(self):
return self.mem_size is None
def measure(self, dataset_timeout):
# dataset_timeout: timeout in seconds or None
status, prefetch_state, _ = self._comm.check()
if status == _Communicator.STATUS_RESET:
self.prefetch_state = prefetch_state
self.prefetch_state, indices = iterator_statemachine(
self.prefetch_state, self.batch_size, self.repeat,
self.order_sampler, len(self.dataset))
if indices is None: # stop iteration
batch = None
else:
batch_ret = [None]
def fetch_batch():
batch_ret[0] = [self.dataset[idx] for idx in indices]
if dataset_timeout is None:
# Timeout is not set: fetch synchronously
fetch_batch()
else:
# Timeout is set: fetch asynchronously and watch for timeout
thr = threading.Thread(target=fetch_batch)
thr.daemon = True
thr.start()
thr.join(dataset_timeout)
if thr.is_alive():
_raise_timeout_warning()
thr.join()
batch = batch_ret[0]
self.mem_size = max(map(_measure, batch))
self._allocate_shared_memory()
return batch, self.prefetch_state
def _allocate_shared_memory(self):
if self.measure_required():
self.mem_bulk = None
else:
self.mem_bulk = \
sharedctypes.RawArray('b', self.batch_size * self.mem_size)
def launch_thread(self):
self._pool = multiprocessing.Pool(
processes=self.n_processes,
initializer=_fetch_setup,
initargs=(self.dataset, self.mem_size, self.mem_bulk),
maxtasksperchild=self.maxtasksperchild)
if self._interruption_testing:
pids = self._pool.map(_report_pid, range(self.n_processes))
print(' '.join(map(str, pids)))
sys.stdout.flush()
thread = threading.Thread(target=self._run, name='prefetch_loop')
thread.setDaemon(True)
thread.start()
self._thread = thread
return thread
def _run(self):
# The entry routine of the prefetch thread.
alive = True
try:
while alive:
if self._terminating:
break
alive = self._task()
finally:
self._pool.close()
self._pool.join()
def _task(self):
# Do a single task in the prefetch thread.
# Returns a bool indicating whether the loop should continue running.
status, prefetch_state, reset_count = self._comm.check()
if status == _Communicator.STATUS_RESET:
self.prefetch_state = prefetch_state
elif status == _Communicator.STATUS_TERMINATE:
return False # stop loop
self.prefetch_state, indices = iterator_statemachine(
self.prefetch_state, self.batch_size, self.repeat,
self.order_sampler, len(self.dataset))
if indices is None: # stop iteration
batch = None
else:
future = self._pool.map_async(_fetch_run, enumerate(indices))
while True:
try:
data_all = future.get(_response_time)
except multiprocessing.TimeoutError:
if self._comm.is_terminated:
return False
else:
break
batch = [_unpack(data, self.mem_bulk) for data in data_all]
self._comm.put(batch, self.prefetch_state, reset_count)
return True
# Using `parameterized` function (e.g. bound method) with Pool is tricky due to
# restrictions imposed by Pickle. Picklable types differ across versions.
# Just using top-level function with globals seems to be safest.
# it doesn't mean thread safety broken or global variables visible;
# notice that each process uses different address space.
# To make static linter happy, we first initialize global variables.
_fetch_dataset = None
_fetch_mem_size = None
_fetch_mem_bulk = None
def _fetch_setup(dataset, mem_size, mem_bulk):
global _fetch_dataset, _fetch_mem_size, _fetch_mem_bulk
signal.signal(signal.SIGINT, signal.SIG_IGN)
_fetch_dataset = dataset
_fetch_mem_size = mem_size
_fetch_mem_bulk = mem_bulk
def _fetch_run(inputs):
i, index = inputs
data = _fetch_dataset[index]
if _fetch_mem_bulk is not None:
offset = i * _fetch_mem_size
limit = offset + _fetch_mem_size
data = _pack(data, _fetch_mem_bulk, offset, limit)
return data
def _report_pid(_): # for testing
return multiprocessing.current_process().pid
class _PackedNdarray(object):
def __init__(self, array, mem, offset):
self.shape = array.shape
self.dtype = array.dtype
self.nbytes = array.nbytes
self.size = array.size
self.offset = offset
total = self.offset + self.nbytes
if total > len(mem):
raise ValueError(
'Shared memory size is too small. expect:{}, actual:{}'.format(
total, len(mem)))
target = numpy.frombuffer(mem, self.dtype, self.size, self.offset)
target[...] = array.ravel()
def unpack(self, mem):
ret = numpy.frombuffer(mem, self.dtype, self.size, self.offset)
ret = ret.reshape(self.shape).copy()
return ret
def _measure(data):
expect = 0
t = type(data)
if t is tuple or t is list or t is dict:
for v in data:
if isinstance(v, numpy.ndarray):
expect += v.nbytes
return expect
def _pack(data, mem, offset, limit):
if len(mem) == 0:
return data
t = type(data)
over = False
if t is tuple or t is list:
ret = []
for v in data:
if isinstance(v, numpy.ndarray):
if v.nbytes + offset > limit:
over = True
else:
v = _PackedNdarray(v, mem, offset)
offset += v.nbytes
ret.append(v)
data = t(ret)
elif t is dict:
ret = {}
for k, v in six.iteritems(data):
if isinstance(v, numpy.ndarray):
if v.nbytes + offset > limit:
over = True
else:
v = _PackedNdarray(v, mem, offset)
offset += v.nbytes
ret[k] = v
data = ret
elif t is numpy.ndarray:
if data.nbytes + offset > limit:
over = True
else:
data = _PackedNdarray(data, mem, offset)
offset += data.nbytes
if over:
expect = _measure(data)
warnings.warn(
'Shared memory size is too small.\n' +
'Please set shared_mem option for MultiprocessIterator.\n' +
'Expect shared memory size: {} bytes.\n'.format(expect) +
'Actual shared memory size: {} bytes.'.format(limit - offset),
UserWarning)
return data
def _unpack(data, mem):
if len(mem) == 0:
return data
t = type(data)
if t is tuple or t is list:
ret = []
for v in data:
if isinstance(v, _PackedNdarray):
v = v.unpack(mem)
ret.append(v)
data = t(ret)
elif t is dict:
ret = {}
for k, v in six.iteritems(data):
if isinstance(v, _PackedNdarray):
v = v.unpack(mem)
ret[k] = v
data = ret
elif t is _PackedNdarray:
data = data.unpack(mem)
return data
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# The MIT License (MIT)
# Copyright (c) 2014 Ivan Cai
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import, division, print_function, with_statement
import os
import sys
import argparse
import logging
import datetime
import json
__author__ = 'Ivan Cai'
__version__ = '0.0.2'
# The ConfigParser module has been renamed to configparser in Python 3.
try:
import configparser
except ImportError:
import ConfigParser as configparser
try:
import pymongo
import iptc
except ImportError:
sys.exit("You don't have the required Python packages installed.")
def get_version():
"""Return a list which contains version number. Order is major, minor, micro."""
return __version__.split('.')
def check_python():
"""Check whether user's Python version meets our requirements."""
info = sys.version_info
if info[0] == 2 and not info[1] >= 6:
sys.exit("Python 2.6+ required")
elif info[0] == 3 and not info[1] >= 3:
sys.exit("Python 3.3+ required")
elif info[0] not in [2, 3]:
sys.exit("Python version not supported")
def check_root():
"""Check whether user run our script using root privilege."""
if not os.geteuid() == 0:
sys.exit("You need to have root privileges to run this script.")
def init(group_list):
"""Create init iptables rules"""
with open("/etc/tcpstat.sh", "w") as iptables_init_script:
# Set shebang
iptables_init_script.write("#!/bin/bash\n")
# Create new chain for security reason
iptables_init_script.write("/sbin/iptables -N ACCT\n")
# Flush existing rules in our custom chain
iptables_init_script.write("/sbin/iptables -F ACCT\n")
# Attach new chain
iptables_init_script.write("/sbin/iptables -A FORWARD -j ACCT\n")
iptables_init_script.write("/sbin/iptables -A INPUT -j ACCT\n")
iptables_init_script.write("/sbin/iptables -A OUTPUT -j ACCT\n")
for group in group_list:
for port in group["Port"]:
iptables_init_script.write(
"/sbin/iptables -A ACCT -p tcp --dport " + str(port) + "\n")
iptables_init_script.write(
"/sbin/iptables -A ACCT -p tcp --sport " + str(port) + "\n")
os.system("chmod +x /etc/tcpstat.sh")
def find_config(args):
"""Find user's path of config file."""
if args.config == None and os.path.exists("/etc/tcpstat/config"):
return "/etc/tcpstat/config"
elif os.path.exists(args.config):
return args.config
else:
return None
def check_port_validity(port):
"""Check whether a port is valid."""
if 0 <= int(port) <= 65535:
return True
else:
return False
def read_config(path):
"""Check whether the path of config file is valid."""
if path == None:
sys.exit("Config file doesn't exist.")
else:
config = configparser.ConfigParser()
config.read(path)
logging.info("Config file loaded.")
groupname_list = config.get("Groups", "Name").split(",")
group_list = []
for groups in groupname_list:
# In config file
# [Gp1]
# Port:-1,2,65534-65537
# Webhook:http://localhost/api/v1/tcpstats
# To
# {"Name": "Gp1", "Port": [2,65534,65535], "Webhook": "http://localhost/api/v1/tcpstats"}
logging.debug("Loading Group " + groups)
temp_dict = {"Name": groups}
port_list = []
logging.debug(config.get(groups, "Port"))
logging.debug(config.get(groups, "Port").split(","))
for port_str in config.get(groups, "Port").split(","):
logging.debug("Catch a port str " + port_str)
if '-' not in port_str:
if port_str.isdigit() and check_port_validity(port_str):
logging.debug("Appended a port " + port_str)
port_list.append(int(port_str))
else:
logging.error("You entered " + port_str +
" which is not valid port number.")
else:
logging.debug("Catch a port range " + port_str)
head = int(port_str.split('-')[0])
tail = int(port_str.split('-')[1])
map(port_list.append,
filter(check_port_validity, range(head, tail + 1)))
temp_dict.update({"Port": port_list})
temp_dict.update({"Webhook": config.get(groups, 'Webhook', '')})
group_list.append(temp_dict)
logging.debug("Final list of groups")
logging.debug(group_list)
return group_list
def check_migration_lock():
if os.path.exists("/var/lock/tcpstat.lock"):
logging.error("Migration lock found. Quit.")
sys.exit("Migration lock found. Quit.")
def update_db(group_list):
check_migration_lock()
table = iptc.Table(iptc.Table.FILTER)
client = pymongo.MongoClient('mongodb://localhost:27017/')
db = client['tcpstat']
collection = db['accounting']
today_str = str(datetime.date.today())
logging.info("Connect to db")
if not collection.find_one({"Name": group["Name"], "Time": today_str}):
migrate_db(group_list)
check_migration_lock()
chain = iptc.Chain(table, 'ACCT')
for group in group_list:
for rule in chain.rules:
for match in rule.matches:
if not match.sport:
port_number = str(match.dport)
rule_type = "RX"
else:
port_number = str(match.sport)
rule_type = "TX"
entry = collection.find_one(
{"Name": group["Name"], "Time": today_str})
if port_number in entry.keys():
# Counters in bytes
if rule_type == "TX":
# Fetch entries in db
entry = collection.find_one(
{"Name": group["Name"], "Time": today_str})
TX = rule.get_counters()[1] + entry[port_number]["TX"]
RX = entry[port_number]["RX"]
logging.debug(
"This record is TX " + str(TX) + " for " + port_number)
# Modify data in db
collection.update({"Name": group["Name"],
"Time": today_str},
{"$set":
{port_number: {
"TX": int(TX), "RX": int(RX)}}
}
)
else:
entry = collection.find_one(
{"Name": group["Name"], "Time": today_str})
# Fetch entries in db
RX = rule.get_counters()[1] + entry[port_number]["RX"]
TX = entry[port_number]["TX"]
logging.debug(
"This record is RX " + str(RX) + " for " + port_number)
collection.update({"Name": group["Name"],
"Time": today_str},
{"$set":
{port_number: {
"TX": int(TX), "RX": int(RX)}}
}
)
chain.zero_counters()
def migrate_db(group_list):
#Migration lock
open('/var/lock/tcpstat.lock', 'a').close()
client = pymongo.MongoClient('mongodb://localhost:27017/')
db = client['tcpstat']
collection = db['accounting']
today_str = str(datetime.date.today())
for group in group_list:
entry = collection.find_one({"Name": group["Name"], "Time": today_str})
if entry:
logging.info("Find an existing entry. Let's migrate it.")
for port in group["Port"]:
if str(port) not in entry.keys():
collection.update({"Name": group["Name"], "Time": today_str},
{"$set": {str(port): {"TX": 0, "RX": 0}}})
else:
logging.info("Create a new entry with new schema.")
temp_dict = {}
temp_dict.update({"Name": group["Name"], "Time": today_str})
for port in group["Port"]:
temp_dict.update({str(port): {"TX": 0, "RX": 0}})
collection.insert(temp_dict)
#Remove migration lock
os.remove('/var/lock/tcpstat.lock')
def main():
# Check whether user run our script using root privilege.
check_root()
# Check whether user's Python version meets our requirements.
check_python()
# Setting up logging module.
logging.basicConfig(filename='/var/log/tcpstat.log',
format='%(asctime)s %(levelname)s: %(message)s',
level=logging.DEBUG)
logging.info(" ".join(("Started. Version:", __version__)))
# Init command line argument.
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--config", type=str,
help="Path of config file. Default /etc/tcpstat/config")
group = parser.add_mutually_exclusive_group()
group.add_argument("-v", "--version", help="Show version.",
action="store_true")
group.add_argument("-i", "--init", help="Init iptables rules.",
action="store_true")
group.add_argument("-u", "--update", help="Update db with latest data.",
action="store_true")
group.add_argument("-m", "--migrate", help="Migrate db with new config.",
action="store_true")
# Parse command line argument.
args = parser.parse_args()
if args.version:
print(" ".join(("Tcpstat\nVersion:", __version__)))
# Init rules in /etc/tcpstat.sh which will be included in /etc/rc.local
if args.init:
init(read_config(find_config(args)))
if args.update:
update_db(read_config(find_config(args)))
if args.migrate:
migrate_db(read_config(find_config(args)))
logging.info("Exit.")
if __name__ == "__main__":
main()
| |
# This file is part of the MapProxy project.
# Copyright (C) 2010 Omniscale <http://omniscale.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division
from nose.tools import eq_, assert_almost_equal, raises
from mapproxy.grid import (
MetaGrid,
TileGrid,
_create_tile_list,
bbox_intersects,
bbox_contains,
NoTiles,
tile_grid,
resolutions,
ResolutionRange,
resolution_range,
merge_resolution_range,
)
from mapproxy.srs import SRS, TransformationError
class TestResolution(object):
def test_min_res(self):
conf = dict(min_res=1000)
res = resolutions(**conf)
eq_(res[:5], [1000, 500.0, 250.0, 125.0, 62.5])
eq_(len(res), 20)
def test_min_res_max_res(self):
conf = dict(min_res=1000, max_res=80)
res = resolutions(**conf)
eq_(res, [1000, 500.0, 250.0, 125.0])
def test_min_res_levels(self):
conf = dict(min_res=1600, num_levels=5)
res = resolutions(**conf)
eq_(res, [1600, 800.0, 400.0, 200.0, 100.0])
def test_min_res_levels_res_factor(self):
conf = dict(min_res=1600, num_levels=4, res_factor=4.0)
res = resolutions(**conf)
eq_(res, [1600, 400.0, 100.0, 25.0])
def test_min_res_levels_sqrt2(self):
conf = dict(min_res=1600, num_levels=5, res_factor='sqrt2')
res = resolutions(**conf)
eq_(list(map(round, res)), [1600.0, 1131.0, 800.0, 566.0, 400.0])
def test_min_res_max_res_levels(self):
conf = dict(min_res=1600, max_res=10, num_levels=10)
res = resolutions(**conf)
eq_(len(res), 10)
# will calculate log10 based factor of 1.75752...
assert_almost_equal(res[0], 1600)
assert_almost_equal(res[1], 1600/1.75752, 2)
assert_almost_equal(res[8], 1600/1.75752**8, 2)
assert_almost_equal(res[9], 10)
def test_bbox_levels(self):
conf = dict(bbox=[0,40,15,50], num_levels=10, tile_size=(256, 256))
res = resolutions(**conf)
eq_(len(res), 10)
assert_almost_equal(res[0], 15/256)
assert_almost_equal(res[1], 15/512)
class TestAlignedGrid(object):
def test_epsg_4326_bbox(self):
base = tile_grid(srs='epsg:4326')
bbox = (10.0, -20.0, 40.0, 10.0)
sub = tile_grid(align_with=base, bbox=bbox)
eq_(sub.bbox, bbox)
eq_(sub.resolution(0), 180/256/8)
abbox, grid_size, tiles = sub.get_affected_level_tiles(bbox, 0)
eq_(abbox, (10.0, -20.0, 55.0, 25.0))
eq_(grid_size, (2, 2))
eq_(list(tiles), [(0, 1, 0), (1, 1, 0), (0, 0, 0), (1, 0, 0)])
def test_epsg_4326_bbox_from_sqrt2(self):
base = tile_grid(srs='epsg:4326', res_factor='sqrt2')
bbox = (10.0, -20.0, 40.0, 10.0)
sub = tile_grid(align_with=base, bbox=bbox, res_factor=2.0)
eq_(sub.bbox, bbox)
eq_(sub.resolution(0), base.resolution(8))
eq_(sub.resolution(1), base.resolution(10))
eq_(sub.resolution(2), base.resolution(12))
def test_epsg_4326_bbox_to_sqrt2(self):
base = tile_grid(srs='epsg:4326', res_factor=2.0)
bbox = (10.0, -20.0, 40.0, 10.0)
sub = tile_grid(align_with=base, bbox=bbox, res_factor='sqrt2')
eq_(sub.bbox, bbox)
eq_(sub.resolution(0), base.resolution(4))
eq_(sub.resolution(2), base.resolution(5))
eq_(sub.resolution(4), base.resolution(6))
assert sub.resolution(0) > sub.resolution(1) > sub.resolution(3)
eq_(sub.resolution(3)/2, sub.resolution(5))
def test_metagrid_tiles():
mgrid = MetaGrid(grid=TileGrid(), meta_size=(2, 2))
assert list(mgrid.meta_tile((0, 0, 0)).tile_patterns) == \
[((0, 0, 0), (0, 0))]
assert list(mgrid.meta_tile((0, 1, 1)).tile_patterns) == \
[((0, 1, 1), (0, 0)), ((1, 1, 1), (256, 0)),
((0, 0, 1), (0, 256)), ((1, 0, 1), (256, 256))]
assert list(mgrid.meta_tile((1, 2, 2)).tile_patterns) == \
[((0, 3, 2), (0, 0)), ((1, 3, 2), (256, 0)),
((0, 2, 2), (0, 256)), ((1, 2, 2), (256, 256))]
def test_metagrid_tiles_w_meta_size():
mgrid = MetaGrid(grid=TileGrid(), meta_size=(4, 2))
assert list(mgrid.meta_tile((1, 2, 2)).tile_patterns) == \
[((0, 3, 2), (0, 0)), ((1, 3, 2), (256, 0)),
((2, 3, 2), (512, 0)), ((3, 3, 2), (768, 0)),
((0, 2, 2), (0, 256)), ((1, 2, 2), (256, 256)),
((2, 2, 2), (512, 256)), ((3, 2, 2), (768, 256))]
class TestMetaGridGeodetic(object):
def setup(self):
self.mgrid = MetaGrid(grid=tile_grid('EPSG:4326'), meta_size=(2, 2), meta_buffer=10)
def test_meta_bbox_level_0(self):
eq_(self.mgrid._meta_bbox((0, 0, 0)), ((-180, -90, 180, 90), (0, 0, 0, -128)))
eq_(self.mgrid._meta_bbox((0, 0, 0), limit_to_bbox=False),
((-194.0625, -104.0625, 194.0625, 284.0625), (10, 10, 10, 10)))
eq_(self.mgrid.meta_tile((0, 0, 0)).size, (256, 128))
def test_tiles_level_0(self):
meta_tile = self.mgrid.meta_tile((0, 0, 0))
eq_(meta_tile.size, (256, 128))
eq_(meta_tile.grid_size, (1, 1))
eq_(meta_tile.tile_patterns, [((0, 0, 0), (0, -128))])
def test_meta_bbox_level_1(self):
eq_(self.mgrid._meta_bbox((0, 0, 1)), ((-180, -90, 180, 90), (0, 0, 0, 0)))
eq_(self.mgrid._meta_bbox((0, 0, 1), limit_to_bbox=False),
((-187.03125, -97.03125, 187.03125, 97.03125), (10, 10, 10, 10)))
eq_(self.mgrid.meta_tile((0, 0, 1)).size, (512, 256))
def test_tiles_level_1(self):
eq_(list(self.mgrid.meta_tile((0, 0, 1)).tile_patterns),
[
((0, 0, 1), (0, 0)),
((1, 0, 1), (256, 0))
])
def test_tile_list_level_1(self):
eq_(list(self.mgrid.tile_list((0, 0, 1))),
[(0, 0, 1), (1, 0, 1)])
def test_meta_bbox_level_2(self):
eq_(self.mgrid._meta_bbox((0, 0, 2)), ((-180, -90, 3.515625, 90), (0, 0, 10, 0)))
eq_(self.mgrid._meta_bbox((0, 0, 2), limit_to_bbox=False),
((-183.515625, -93.515625, 3.515625, 93.515625), (10, 10, 10, 10)))
eq_(self.mgrid.meta_tile((0, 0, 2)).size, (522, 512))
eq_(self.mgrid._meta_bbox((2, 0, 2)), ((-3.515625, -90, 180, 90), (10, 0, 0, 0)))
meta_tile = self.mgrid.meta_tile((2, 0, 2))
eq_(meta_tile.size, (522, 512))
eq_(meta_tile.grid_size, (2, 2))
def test_tiles_level_2(self):
eq_(list(self.mgrid.meta_tile((0, 0, 2)).tile_patterns),
[
((0, 1, 2), (0, 0)),
((1, 1, 2), (256, 0)),
((0, 0, 2), (0, 256)),
((1, 0, 2), (256, 256)),
])
eq_(list(self.mgrid.meta_tile((2, 0, 2)).tile_patterns),
[
((2, 1, 2), (10, 0)),
((3, 1, 2), (266, 0)),
((2, 0, 2), (10, 256)),
((3, 0, 2), (266, 256)),
])
def test_tile_list_level_2(self):
eq_(list(self.mgrid.tile_list((0, 0, 2))),
[(0, 1, 2), (1, 1, 2), (0, 0, 2), (1, 0, 2)])
eq_(list(self.mgrid.tile_list((1, 1, 2))),
[(0, 1, 2), (1, 1, 2), (0, 0, 2), (1, 0, 2)])
def test_tiles_level_3(self):
eq_(list(self.mgrid.meta_tile((2, 0, 3)).tile_patterns),
[
((2, 1, 3), (10, 10)),
((3, 1, 3), (266, 10)),
((2, 0, 3), (10, 266)),
((3, 0, 3), (266, 266)),
])
eq_(list(self.mgrid.meta_tile((2, 2, 3)).tile_patterns),
[
((2, 3, 3), (10, 0)),
((3, 3, 3), (266, 0)),
((2, 2, 3), (10, 256)),
((3, 2, 3), (266, 256)),
])
class TestMetaGridGeodeticUL(object):
def setup(self):
self.tile_grid = tile_grid('EPSG:4326', origin='ul')
self.mgrid = MetaGrid(grid=self.tile_grid, meta_size=(2, 2), meta_buffer=10)
def test_tiles_level_0(self):
meta_tile = self.mgrid.meta_tile((0, 0, 0))
eq_(meta_tile.bbox, (-180, -90, 180, 90))
eq_(meta_tile.size, (256, 128))
eq_(meta_tile.grid_size, (1, 1))
eq_(meta_tile.tile_patterns, [((0, 0, 0), (0, 0))])
def test_tiles_level_1(self):
meta_tile = self.mgrid.meta_tile((0, 0, 1))
eq_(meta_tile.bbox, (-180, -90, 180, 90))
eq_(meta_tile.size, (512, 256))
eq_(meta_tile.grid_size, (2, 1))
eq_(list(meta_tile.tile_patterns),
[
((0, 0, 1), (0, 0)),
((1, 0, 1), (256, 0))
])
def test_tile_list_level_1(self):
eq_(list(self.mgrid.tile_list((0, 0, 1))),
[(0, 0, 1), (1, 0, 1)])
def test_tiles_level_2(self):
meta_tile = self.mgrid.meta_tile((0, 0, 2))
eq_(meta_tile.bbox, (-180, -90, 3.515625, 90))
eq_(meta_tile.size, (522, 512))
eq_(meta_tile.grid_size, (2, 2))
eq_(meta_tile.tile_patterns,
[
((0, 0, 2), (0, 0)),
((1, 0, 2), (256, 0)),
((0, 1, 2), (0, 256)),
((1, 1, 2), (256, 256)),
])
eq_(list(self.mgrid.meta_tile((2, 0, 2)).tile_patterns),
[
((2, 0, 2), (10, 0)),
((3, 0, 2), (266, 0)),
((2, 1, 2), (10, 256)),
((3, 1, 2), (266, 256)),
])
def test_tile_list_level_2(self):
eq_(list(self.mgrid.tile_list((0, 0, 2))),
[(0, 0, 2), (1, 0, 2), (0, 1, 2), (1, 1, 2)])
eq_(list(self.mgrid.tile_list((1, 1, 2))),
[(0, 0, 2), (1, 0, 2), (0, 1, 2), (1, 1, 2)])
def test_tiles_level_3(self):
meta_tile = self.mgrid.meta_tile((2, 0, 3))
eq_(meta_tile.bbox, (-91.7578125, -1.7578125, 1.7578125, 90))
eq_(meta_tile.size, (532, 522))
eq_(meta_tile.grid_size, (2, 2))
eq_(list(self.mgrid.meta_tile((2, 0, 3)).tile_patterns),
[
((2, 0, 3), (10, 0)),
((3, 0, 3), (266, 0)),
((2, 1, 3), (10, 256)),
((3, 1, 3), (266, 256)),
])
eq_(list(self.mgrid.meta_tile((2, 2, 3)).tile_patterns),
[
((2, 2, 3), (10, 10)),
((3, 2, 3), (266, 10)),
((2, 3, 3), (10, 266)),
((3, 3, 3), (266, 266)),
])
class TestMetaTile(object):
def setup(self):
self.mgrid = MetaGrid(grid=tile_grid('EPSG:4326'), meta_size=(2, 2), meta_buffer=10)
def test_meta_tile(self):
meta_tile = self.mgrid.meta_tile((2, 0, 2))
eq_(meta_tile.size, (522, 512))
def test_metatile_bbox(self):
mgrid = MetaGrid(grid=TileGrid(), meta_size=(2, 2))
meta_tile = mgrid.meta_tile((0, 0, 2))
assert meta_tile.bbox == (-20037508.342789244, -20037508.342789244, 0.0, 0.0)
meta_tile = mgrid.meta_tile((1, 1, 2))
assert meta_tile.bbox == (-20037508.342789244, -20037508.342789244, 0.0, 0.0)
meta_tile = mgrid.meta_tile((4, 5, 3))
assert_almost_equal_bbox(meta_tile.bbox, (0.0, 0.0, 10018754.171394622, 10018754.171394622))
def test_metatile_non_default_meta_size(self):
mgrid = MetaGrid(grid=TileGrid(), meta_size=(4, 2))
meta_tile = mgrid.meta_tile((4, 5, 3))
assert_almost_equal_bbox(meta_tile.bbox, (0.0, 0.0, 20037508.342789244, 10018754.171394622))
eq_(meta_tile.size, (1024, 512))
eq_(meta_tile.grid_size, (4, 2))
class TestMetaTileSQRT2(object):
def setup(self):
self.grid = tile_grid('EPSG:4326', res_factor='sqrt2')
self.mgrid = MetaGrid(grid=self.grid, meta_size=(4, 4), meta_buffer=10)
def test_meta_tile(self):
meta_tile = self.mgrid.meta_tile((0, 0, 8))
eq_(meta_tile.size, (1034, 1034))
def test_metatile_bbox(self):
meta_tile = self.mgrid.meta_tile((0, 0, 2))
eq_(meta_tile.bbox, (-180, -90, 180, 90))
eq_(meta_tile.size, (512, 256))
eq_(meta_tile.grid_size, (2, 1))
eq_(meta_tile.tile_patterns, [((0, 0, 2), (0, 0)), ((1, 0, 2), (256, 0))])
meta_tile = self.mgrid.meta_tile((1, 0, 2))
eq_(meta_tile.bbox, (-180.0, -90, 180.0, 90.0))
eq_(meta_tile.size, (512, 256))
eq_(meta_tile.grid_size, (2, 1))
meta_tile = self.mgrid.meta_tile((0, 0, 3))
eq_(meta_tile.bbox, (-180.0, -90, 180.0, 90.0))
eq_(meta_tile.size, (724, 362))
eq_(meta_tile.tile_patterns, [((0, 1, 3), (0, -149)), ((1, 1, 3), (256, -149)),
((2, 1, 3), (512, -149)), ((0, 0, 3), (0, 107)), ((1, 0, 3), (256, 107)),
((2, 0, 3), (512, 107))])
def test_metatile_non_default_meta_size(self):
mgrid = MetaGrid(grid=self.grid, meta_size=(4, 2), meta_buffer=0)
meta_tile = mgrid.meta_tile((4, 3, 6))
eq_(meta_tile.bbox, (0.0, 0.0, 180.0, 90.0))
eq_(meta_tile.size, (1024, 512))
eq_(meta_tile.grid_size, (4, 2))
eq_(meta_tile.tile_patterns, [((4, 3, 6), (0, 0)), ((5, 3, 6), (256, 0)),
((6, 3, 6), (512, 0)), ((7, 3, 6), (768, 0)), ((4, 2, 6), (0, 256)),
((5, 2, 6), (256, 256)), ((6, 2, 6), (512, 256)), ((7, 2, 6), (768, 256))])
class TestMinimalMetaTile(object):
def setup(self):
self.mgrid = MetaGrid(grid=tile_grid('EPSG:4326'), meta_size=(2, 2), meta_buffer=10)
def test_minimal_tiles(self):
sgrid = self.mgrid.minimal_meta_tile([(0, 0, 2), (1, 0, 2)])
eq_(sgrid.grid_size, (2, 1))
eq_(list(sgrid.tile_patterns),
[
((0, 0, 2), (0, 10)),
((1, 0, 2), (256, 10)),
]
)
eq_(sgrid.bbox, (-180.0, -90.0, 3.515625, 3.515625))
def test_minimal_tiles_fragmented(self):
sgrid = self.mgrid.minimal_meta_tile(
[
(2, 3, 3),
(1, 2, 3),
(2, 1, 3),
])
eq_(sgrid.grid_size, (2, 3))
eq_(list(sgrid.tile_patterns),
[
((1, 3, 3), (10, 0)), ((2, 3, 3), (266, 0)),
((1, 2, 3), (10, 256)), ((2, 2, 3), (266, 256)),
((1, 1, 3), (10, 512)), ((2, 1, 3), (266, 512)),
]
)
eq_(sgrid.bbox, (-136.7578125, -46.7578125, -43.2421875, 90.0))
def test_minimal_tiles_fragmented_ul(self):
self.mgrid = MetaGrid(grid=tile_grid('EPSG:4326', origin='ul'),
meta_size=(2, 2), meta_buffer=10)
sgrid = self.mgrid.minimal_meta_tile(
[
(2, 0, 3),
(1, 1, 3),
(2, 2, 3),
])
eq_(sgrid.grid_size, (2, 3))
eq_(list(sgrid.tile_patterns),
[
((1, 0, 3), (10, 0)), ((2, 0, 3), (266, 0)),
((1, 1, 3), (10, 256)), ((2, 1, 3), (266, 256)),
((1, 2, 3), (10, 512)), ((2, 2, 3), (266, 512)),
]
)
eq_(sgrid.bbox, (-136.7578125, -46.7578125, -43.2421875, 90.0))
class TestMetaGridLevelMetaTiles(object):
def __init__(self):
self.meta_grid = MetaGrid(TileGrid(), meta_size=(2, 2))
def test_full_grid_0(self):
bbox = (-20037508.34, -20037508.34, 20037508.34, 20037508.34)
abbox, tile_grid, meta_tiles = \
self.meta_grid.get_affected_level_tiles(bbox, 0)
meta_tiles = list(meta_tiles)
assert_almost_equal_bbox(bbox, abbox)
eq_(len(meta_tiles), 1)
eq_(meta_tiles[0], (0, 0, 0))
def test_full_grid_2(self):
bbox = (-20037508.34, -20037508.34, 20037508.34, 20037508.34)
abbox, tile_grid, meta_tiles = \
self.meta_grid.get_affected_level_tiles(bbox, 2)
meta_tiles = list(meta_tiles)
assert_almost_equal_bbox(bbox, abbox)
eq_(tile_grid, (2, 2))
eq_(len(meta_tiles), 4)
eq_(meta_tiles[0], (0, 2, 2))
eq_(meta_tiles[1], (2, 2, 2))
eq_(meta_tiles[2], (0, 0, 2))
eq_(meta_tiles[3], (2, 0, 2))
class TestMetaGridLevelMetaTilesGeodetic(object):
def __init__(self):
self.meta_grid = MetaGrid(TileGrid(is_geodetic=True), meta_size=(2, 2))
def test_full_grid_2(self):
bbox = (-180.0, -90.0, 180.0, 90)
abbox, tile_grid, meta_tiles = \
self.meta_grid.get_affected_level_tiles(bbox, 2)
meta_tiles = list(meta_tiles)
assert_almost_equal_bbox(bbox, abbox)
eq_(tile_grid, (2, 1))
eq_(len(meta_tiles), 2)
eq_(meta_tiles[0], (0, 0, 2))
eq_(meta_tiles[1], (2, 0, 2))
def test_partial_grid_3(self):
bbox = (0.0, 5.0, 45, 40)
abbox, tile_grid, meta_tiles = \
self.meta_grid.get_affected_level_tiles(bbox, 3)
meta_tiles = list(meta_tiles)
assert_almost_equal_bbox((0.0, 0.0, 90.0, 90.0), abbox)
eq_(tile_grid, (1, 1))
eq_(len(meta_tiles), 1)
eq_(meta_tiles[0], (4, 2, 3))
def assert_grid_size(grid, level, grid_size):
print(grid.grid_sizes[level], "==", grid_size)
assert grid.grid_sizes[level] == grid_size
res = grid.resolutions[level]
x, y = grid_size
assert res * x * 256 >= grid.bbox[2] - grid.bbox[0]
assert res * y * 256 >= grid.bbox[3] - grid.bbox[1]
class TileGridTest(object):
def check_grid(self, level, grid_size):
assert_grid_size(self.grid, level, grid_size)
class TestTileGridResolutions(object):
def test_explicit_grid(self):
grid = TileGrid(res=[0.1, 0.05, 0.01])
eq_(grid.resolution(0), 0.1)
eq_(grid.resolution(1), 0.05)
eq_(grid.resolution(2), 0.01)
eq_(grid.closest_level(0.00001), 2)
def test_factor_grid(self):
grid = TileGrid(is_geodetic=True, res=1/0.75, tile_size=(360, 180))
eq_(grid.resolution(0), 1.0)
eq_(grid.resolution(1), 0.75)
eq_(grid.resolution(2), 0.75*0.75)
def test_sqrt_grid(self):
grid = TileGrid(is_geodetic=True, res='sqrt2', tile_size=(360, 180))
eq_(grid.resolution(0), 1.0)
assert_almost_equal(grid.resolution(2), 0.5)
assert_almost_equal(grid.resolution(4), 0.25)
class TestWGS84TileGrid(object):
def setup(self):
self.grid = TileGrid(is_geodetic=True)
def test_resolution(self):
assert_almost_equal(self.grid.resolution(0), 1.40625)
assert_almost_equal(self.grid.resolution(1), 1.40625/2)
def test_bbox(self):
eq_(self.grid.bbox, (-180.0, -90.0, 180.0, 90.0))
def test_grid_size(self):
eq_(self.grid.grid_sizes[0], (1, 1))
eq_(self.grid.grid_sizes[1], (2, 1))
eq_(self.grid.grid_sizes[2], (4, 2))
def test_affected_tiles(self):
bbox, grid, tiles = self.grid.get_affected_tiles((-180,-90,180,90), (512,256))
eq_(bbox, (-180.0, -90.0, 180.0, 90.0))
eq_(grid, (2, 1))
eq_(list(tiles), [(0, 0, 1), (1, 0, 1)])
def test_affected_level_tiles(self):
bbox, grid, tiles = self.grid.get_affected_level_tiles((-180,-90,180,90), 1)
eq_(grid, (2, 1))
eq_(bbox, (-180.0, -90.0, 180.0, 90.0))
eq_(list(tiles), [(0, 0, 1), (1, 0, 1)])
bbox, grid, tiles = self.grid.get_affected_level_tiles((0,0,180,90), 2)
eq_(grid, (2, 1))
eq_(bbox, (0.0, 0.0, 180.0, 90.0))
eq_(list(tiles), [(2, 1, 2), (3, 1, 2)])
class TestWGS83TileGridUL(object):
def setup(self):
self.grid = TileGrid(4326, bbox=(-180, -90, 180, 90), origin='ul')
def test_resolution(self):
assert_almost_equal(self.grid.resolution(0), 1.40625)
assert_almost_equal(self.grid.resolution(1), 1.40625/2)
def test_bbox(self):
eq_(self.grid.bbox, (-180.0, -90.0, 180.0, 90.0))
def test_tile_bbox(self):
eq_(self.grid.tile_bbox((0, 0, 0)), (-180.0, -270.0, 180.0, 90.0))
eq_(self.grid.tile_bbox((0, 0, 0), limit=True), (-180.0, -90.0, 180.0, 90.0))
eq_(self.grid.tile_bbox((0, 0, 1)), (-180.0, -90.0, 0.0, 90.0))
def test_tile(self):
eq_(self.grid.tile(-170, -80, 0), (0, 0, 0))
eq_(self.grid.tile(-170, -80, 1), (0, 0, 1))
eq_(self.grid.tile(-170, -80, 2), (0, 1, 2))
def test_grid_size(self):
eq_(self.grid.grid_sizes[0], (1, 1))
eq_(self.grid.grid_sizes[1], (2, 1))
eq_(self.grid.grid_sizes[2], (4, 2))
def test_affected_tiles(self):
bbox, grid, tiles = self.grid.get_affected_tiles((-180,-90,180,90), (512,256))
eq_(bbox, (-180.0, -90.0, 180.0, 90.0))
eq_(grid, (2, 1))
eq_(list(tiles), [(0, 0, 1), (1, 0, 1)])
bbox, grid, tiles = self.grid.get_affected_tiles((-180,-90,0,90), (512, 512))
eq_(bbox, (-180.0, -90.0, 0.0, 90.0))
eq_(grid, (2, 2))
eq_(list(tiles), [(0, 0, 2), (1, 0, 2), (0, 1, 2), (1, 1, 2)])
def test_affected_level_tiles(self):
bbox, grid, tiles = self.grid.get_affected_level_tiles((-180,-90,180,90), 1)
eq_(grid, (2, 1))
eq_(bbox, (-180.0, -90.0, 180.0, 90.0))
eq_(list(tiles), [(0, 0, 1), (1, 0, 1)])
bbox, grid, tiles = self.grid.get_affected_level_tiles((0,0,180,90), 2)
eq_(grid, (2, 1))
eq_(list(tiles), [(2, 0, 2), (3, 0, 2)])
eq_(bbox, (0.0, 0.0, 180.0, 90.0))
bbox, grid, tiles = self.grid.get_affected_level_tiles((0,-90,180,90), 2)
eq_(grid, (2, 2))
eq_(list(tiles), [(2, 0, 2), (3, 0, 2), (2, 1, 2), (3, 1, 2)])
eq_(bbox, (0.0, -90.0, 180.0, 90.0))
class TestGKTileGrid(TileGridTest):
def setup(self):
self.grid = TileGrid(SRS(31467), bbox=(3250000, 5230000, 3930000, 6110000))
def test_bbox(self):
assert self.grid.bbox == (3250000, 5230000, 3930000, 6110000)
def test_resolution(self):
res = self.grid.resolution(0)
width = self.grid.bbox[2] - self.grid.bbox[0]
height = self.grid.bbox[3] - self.grid.bbox[1]
assert height == 880000.0 and width == 680000.0
assert res == 880000.0/256
def test_tile_bbox(self):
tile_bbox = self.grid.tile_bbox((0, 0, 0))
assert tile_bbox == (3250000.0, 5230000.0, 4130000.0, 6110000.0)
def test_tile(self):
x, y = 3450000, 5890000
assert [self.grid.tile(x, y, level) for level in range(5)] == \
[(0, 0, 0), (0, 1, 1), (0, 3, 2), (1, 6, 3), (3, 12, 4)]
def test_grids(self):
for level, grid_size in [(0, (1, 1)), (1, (2, 2)), (2, (4, 4)), (3, (7, 8))]:
yield self.check_grid, level, grid_size
def test_closest_level(self):
assert self.grid.closest_level(880000.0/256) == 0
assert self.grid.closest_level(600000.0/256) == 1
assert self.grid.closest_level(440000.0/256) == 1
assert self.grid.closest_level(420000.0/256) == 1
def test_adjacent_tile_bbox(self):
t1 = self.grid.tile_bbox((0, 0, 1))
t2 = self.grid.tile_bbox((1, 0, 1))
t3 = self.grid.tile_bbox((0, 1, 1))
assert t1[1] == t2[1]
assert t1[3] == t2[3]
assert t1[2] == t2[0]
assert t1[0] == t3[0]
assert t1[3] == t3[1]
class TestGKTileGridUL(TileGridTest):
"""
Custom grid with ul origin.
"""
def setup(self):
self.grid = TileGrid(SRS(31467),
bbox=(3300000, 5300000, 3900000, 6000000), origin='ul',
res=[1500, 1000, 500, 300, 150, 100])
def test_bbox(self):
assert self.grid.bbox == (3300000, 5300000, 3900000, 6000000)
def test_tile_bbox(self):
eq_(self.grid.tile_bbox((0, 0, 0)),
(3300000.0, 5616000.0, 3684000.0, 6000000.0))
eq_(self.grid.tile_bbox((1, 0, 0)),
(3684000.0, 5616000.0, 4068000.0, 6000000.0))
eq_(self.grid.tile_bbox((1, 1, 0)),
(3684000.0, 5232000.0, 4068000.0, 5616000.0))
def test_tile(self):
x, y = 3310000, 5990000
eq_(self.grid.tile(x, y, 0), (0, 0, 0))
eq_(self.grid.tile(x, y, 1), (0, 0, 1))
eq_(self.grid.tile(x, y, 2), (0, 0, 2))
x, y = 3890000, 5310000
eq_(self.grid.tile(x, y, 0), (1, 1, 0))
eq_(self.grid.tile(x, y, 1), (2, 2, 1))
eq_(self.grid.tile(x, y, 2), (4, 5, 2))
def test_grids(self):
assert_grid_size(self.grid, 0, (2, 2))
assert_grid_size(self.grid, 1, (3, 3))
assert_grid_size(self.grid, 2, (5, 6))
def test_closest_level(self):
assert self.grid.closest_level(1500) == 0
assert self.grid.closest_level(1000) == 1
assert self.grid.closest_level(900) == 1
assert self.grid.closest_level(600) == 2
def test_adjacent_tile_bbox(self):
t1 = self.grid.tile_bbox((0, 0, 1))
t2 = self.grid.tile_bbox((1, 0, 1))
t3 = self.grid.tile_bbox((0, 1, 1))
assert t1[1] == t2[1]
assert t1[3] == t2[3]
assert t1[2] == t2[0]
assert t1[0] == t3[0]
assert t1[1] == t3[3]
class TestClosestLevelTinyResFactor(object):
def setup(self):
self.grid = TileGrid(SRS(31467),
bbox=[420000,30000,900000,350000], origin='ul',
res=[4000,3750,3500,3250,3000,2750,2500,2250,2000,1750,1500,1250,1000,750,650,500,250,100,50,20,10,5,2.5,2,1.5,1,0.5],
)
def test_closest_level(self):
eq_(self.grid.closest_level(5000), 0)
eq_(self.grid.closest_level(4000), 0)
eq_(self.grid.closest_level(3750), 1)
eq_(self.grid.closest_level(3500), 2)
eq_(self.grid.closest_level(3250), 3)
eq_(self.grid.closest_level(3000), 4)
class TestOrigins(object):
def test_basic(self):
grid = tile_grid(4326, bbox=(-180, -90, 180, 90), origin='ll')
assert grid.supports_access_with_origin('ll')
assert not grid.supports_access_with_origin('ul')
grid = tile_grid(4326, bbox=(-180, -90, 180, 90), origin='ul')
assert not grid.supports_access_with_origin('ll')
assert grid.supports_access_with_origin('ul')
def test_basic_no_level_zero(self):
grid = tile_grid(4326, bbox=(-180, -90, 180, 90), origin='ll',
min_res=360/256/2)
assert grid.supports_access_with_origin('ll')
assert grid.supports_access_with_origin('ul')
grid = tile_grid(4326, bbox=(-180, -90, 180, 90), origin='ul',
min_res=360/256/2)
assert grid.supports_access_with_origin('ll')
assert grid.supports_access_with_origin('ul')
def test_basic_mixed_name(self):
grid = tile_grid(4326, bbox=(-180, -90, 180, 90), origin='ll')
assert grid.supports_access_with_origin('sw')
assert not grid.supports_access_with_origin('nw')
grid = tile_grid(4326, bbox=(-180, -90, 180, 90), origin='ul')
assert not grid.supports_access_with_origin('sw')
assert grid.supports_access_with_origin('nw')
def test_custom_with_match(self):
# height is divisible by res*tile_size
grid = tile_grid(4326, bbox=(0, 0, 1024, 1024), origin='ll',
min_res=1)
assert grid.supports_access_with_origin('ll')
assert grid.supports_access_with_origin('ul')
grid = tile_grid(4326, bbox=(0, 0, 1024, 1024), origin='ul',
min_res=1)
assert grid.supports_access_with_origin('ll')
assert grid.supports_access_with_origin('ul')
def test_custom_without_match(self):
# height is not divisible by res*tile_size
grid = tile_grid(4326, bbox=(0, 0, 1024, 1000), origin='ll',
min_res=1)
assert grid.supports_access_with_origin('ll')
assert not grid.supports_access_with_origin('ul')
grid = tile_grid(4326, bbox=(0, 0, 1024, 1000), origin='ul',
min_res=1)
assert not grid.supports_access_with_origin('ll')
assert grid.supports_access_with_origin('ul')
def test_custom_res_with_match(self):
grid = tile_grid(4326, bbox=(0, 0, 1024, 1024), origin='ll',
res=[1, 0.5, 0.25])
assert grid.supports_access_with_origin('ll')
assert grid.supports_access_with_origin('ul')
grid = tile_grid(4326, bbox=(0, 0, 1024, 1024), origin='ul',
res=[1, 0.5, 0.25])
assert grid.supports_access_with_origin('ll')
assert grid.supports_access_with_origin('ul')
def test_custom_res_without_match(self):
grid = tile_grid(4326, bbox=(0, 0, 1024, 1023), origin='ll',
res=[1, 0.5, 0.25])
assert grid.supports_access_with_origin('ll')
assert not grid.supports_access_with_origin('ul')
grid = tile_grid(4326, bbox=(0, 0, 1024, 1023), origin='ul',
res=[1, 0.5, 0.25])
assert not grid.supports_access_with_origin('ll')
assert grid.supports_access_with_origin('ul')
class TestFixedResolutionsTileGrid(TileGridTest):
def setup(self):
self.res = [1000.0, 500.0, 200.0, 100.0, 50.0, 20.0, 5.0]
bbox = (3250000, 5230000, 3930000, 6110000)
self.grid = TileGrid(SRS(31467), bbox=bbox, res=self.res)
def test_resolution(self):
for level, res in enumerate(self.res):
assert res == self.grid.resolution(level)
def test_closest_level(self):
assert self.grid.closest_level(2000) == 0
assert self.grid.closest_level(1000) == 0
assert self.grid.closest_level(950) == 0
assert self.grid.closest_level(210) == 2
def test_affected_tiles(self):
req_bbox = (3250000, 5230000, 3930000, 6110000)
self.grid.max_shrink_factor = 10
bbox, grid_size, tiles = \
self.grid.get_affected_tiles(req_bbox, (256, 256))
assert bbox == (req_bbox[0], req_bbox[1],
req_bbox[0]+1000*256*3, req_bbox[1]+1000*256*4)
assert grid_size == (3, 4)
tiles = list(tiles)
assert tiles == [(0, 3, 0), (1, 3, 0), (2, 3, 0),
(0, 2, 0), (1, 2, 0), (2, 2, 0),
(0, 1, 0), (1, 1, 0), (2, 1, 0),
(0, 0, 0), (1, 0, 0), (2, 0, 0),
]
def test_affected_tiles_2(self):
req_bbox = (3250000, 5230000, 3930000, 6110000)
self.grid.max_shrink_factor = 2.0
try:
bbox, grid_size, tiles = \
self.grid.get_affected_tiles(req_bbox, (256, 256))
except NoTiles:
pass
else:
assert False, 'got no exception'
def test_grid(self):
for level, grid_size in [(0, (3, 4)), (1, (6, 7)), (2, (14, 18))]:
yield self.check_grid, level, grid_size
def test_tile_bbox(self):
tile_bbox = self.grid.tile_bbox((0, 0, 0)) # w: 1000x256
assert tile_bbox == (3250000.0, 5230000.0, 3506000.0, 5486000.0)
tile_bbox = self.grid.tile_bbox((0, 0, 1)) # w: 500x256
assert tile_bbox == (3250000.0, 5230000.0, 3378000.0, 5358000.0)
tile_bbox = self.grid.tile_bbox((0, 0, 2)) # w: 200x256
assert tile_bbox == (3250000.0, 5230000.0, 3301200.0, 5281200.0)
class TestGeodeticTileGrid(TileGridTest):
def setup(self):
self.grid = TileGrid(is_geodetic=True, )
def test_auto_resolution(self):
grid = TileGrid(is_geodetic=True, bbox=(-10, 30, 10, 40), tile_size=(20, 20))
tile_bbox = grid.tile_bbox((0, 0, 0))
assert tile_bbox == (-10, 30, 10, 50)
assert grid.resolution(0) == 1.0
def test_grid(self):
for level, grid_size in [(0, (1, 1)), (1, (2, 1)), (2, (4, 2))]:
yield self.check_grid, level, grid_size
def test_adjacent_tile_bbox(self):
grid = TileGrid(is_geodetic=True, bbox=(-10, 30, 10, 40), tile_size=(20, 20))
t1 = grid.tile_bbox((0, 0, 2))
t2 = grid.tile_bbox((1, 0, 2))
t3 = grid.tile_bbox((0, 1, 2))
assert t1[1] == t2[1]
assert t1[3] == t2[3]
assert t1[2] == t2[0]
assert t1[0] == t3[0]
assert t1[2] == t3[2]
assert t1[3] == t3[1]
def test_w_resolution(self):
res = [1, 0.5, 0.2]
grid = TileGrid(is_geodetic=True, bbox=(-10, 30, 10, 40), tile_size=(20, 20), res=res)
assert grid.grid_sizes[0] == (1, 1)
assert grid.grid_sizes[1] == (2, 1)
assert grid.grid_sizes[2] == (5, 3)
def test_tile(self):
assert self.grid.tile(-180, -90, 0) == (0, 0, 0)
assert self.grid.tile(-180, -90, 1) == (0, 0, 1)
assert self.grid.tile(-180, -90, 2) == (0, 0, 2)
assert self.grid.tile(180-0.001, 90-0.001, 0) == (0, 0, 0)
assert self.grid.tile(10, 50, 1) == (1, 0, 1)
def test_affected_tiles(self):
bbox, grid_size, tiles = \
self.grid.get_affected_tiles((-45,-45,45,45), (512,512))
assert self.grid.grid_sizes[3] == (8, 4)
assert bbox == (-45.0, -45.0, 45.0, 45.0)
assert grid_size == (2, 2)
tiles = list(tiles)
assert tiles == [(3, 2, 3), (4, 2, 3), (3, 1, 3), (4, 1, 3)]
class TestTileGrid(object):
def test_tile_out_of_grid_bounds(self):
grid = TileGrid(is_geodetic=True)
eq_(grid.tile(-180.01, 50, 1), (-1, 0, 1))
def test_affected_tiles_out_of_grid_bounds(self):
grid = TileGrid()
#bbox from open layers
req_bbox = (-30056262.509599999, -10018754.170400001, -20037508.339999996, -0.00080000050365924835)
bbox, grid_size, tiles = \
grid.get_affected_tiles(req_bbox, (256, 256))
assert_almost_equal_bbox(bbox, req_bbox)
eq_(grid_size, (1, 1))
tiles = list(tiles)
eq_(tiles, [None])
def test_broken_bbox(self):
grid = TileGrid()
# broken request from "ArcGIS Client Using WinInet"
req_bbox = (-10000855.0573254,2847125.18913603,-9329367.42767611,4239924.78564583)
try:
grid.get_affected_tiles(req_bbox, (256, 256), req_srs=SRS(31467))
except TransformationError:
pass
else:
assert False, 'Expected TransformationError'
class TestTileGridThreshold(object):
def test_lower_bound(self):
# thresholds near the next lower res value
grid = TileGrid(res=[1000, 500, 250, 100, 50], threshold_res=[300, 110])
grid.stretch_factor = 1.1
eq_(grid.closest_level(1100), 0)
# regular transition (w/stretchfactor)
eq_(grid.closest_level(950), 0)
eq_(grid.closest_level(800), 1)
eq_(grid.closest_level(500), 1)
# transition at threshold
eq_(grid.closest_level(301), 1)
eq_(grid.closest_level(300), 2)
eq_(grid.closest_level(250), 2)
# transition at threshold
eq_(grid.closest_level(111), 2)
eq_(grid.closest_level(110), 3)
eq_(grid.closest_level(100), 3)
# regular transition (w/stretchfactor)
eq_(grid.closest_level(92), 3)
eq_(grid.closest_level(90), 4)
def test_upper_bound(self):
# thresholds near the next upper res value (within threshold)
grid = TileGrid(res=[1000, 500, 250, 100, 50], threshold_res=[495, 240])
grid.stretch_factor = 1.1
eq_(grid.closest_level(1100), 0)
# regular transition (w/stretchfactor)
eq_(grid.closest_level(950), 0)
eq_(grid.closest_level(800), 1)
eq_(grid.closest_level(500), 1)
# transition at threshold
eq_(grid.closest_level(496), 1)
eq_(grid.closest_level(495), 2)
eq_(grid.closest_level(250), 2)
# transition at threshold (within strechfactor)
eq_(grid.closest_level(241), 2)
eq_(grid.closest_level(240), 3)
eq_(grid.closest_level(100), 3)
# regular transition (w/stretchfactor)
eq_(grid.closest_level(92), 3)
eq_(grid.closest_level(90), 4)
def test_above_first_res(self):
grid = TileGrid(res=[1000, 500, 250, 100, 50], threshold_res=[1100, 750])
grid.stretch_factor = 1.1
eq_(grid.closest_level(1200), 0)
eq_(grid.closest_level(1100), 0)
eq_(grid.closest_level(1000), 0)
eq_(grid.closest_level(800), 0)
eq_(grid.closest_level(750.1), 0)
eq_(grid.closest_level(750), 1)
class TestCreateTileList(object):
def test(self):
xs = list(range(-1, 2))
ys = list(range(-2, 3))
grid_size = (1, 2)
tiles = list(_create_tile_list(xs, ys, 3, grid_size))
expected = [None, None, None,
None, None, None,
None, (0, 0, 3), None,
None, (0, 1, 3), None,
None, None, None]
eq_(expected, tiles)
def _create_tile_list(self, xs, ys, level, grid_size):
x_limit = grid_size[0]
y_limit = grid_size[1]
for y in ys:
for x in xs:
if x < 0 or y < 0 or x >= x_limit or y >= y_limit:
yield None
else:
yield x, y, level
class TestBBOXIntersects(object):
def test_no_intersect(self):
b1 = (0, 0, 10, 10)
b2 = (20, 20, 30, 30)
assert not bbox_intersects(b1, b2)
assert not bbox_intersects(b2, b1)
def test_no_intersect_only_vertical(self):
b1 = (0, 0, 10, 10)
b2 = (20, 0, 30, 10)
assert not bbox_intersects(b1, b2)
assert not bbox_intersects(b2, b1)
def test_no_intersect_touch_point(self):
b1 = (0, 0, 10, 10)
b2 = (10, 10, 20, 20)
assert not bbox_intersects(b1, b2)
assert not bbox_intersects(b2, b1)
def test_no_intersect_touch_side(self):
b1 = (0, 0, 10, 10)
b2 = (0, 10, 10, 20)
assert not bbox_intersects(b1, b2)
assert not bbox_intersects(b2, b1)
def test_full_contains(self):
b1 = (0, 0, 10, 10)
b2 = (2, 2, 8, 8)
assert bbox_intersects(b1, b2)
assert bbox_intersects(b2, b1)
def test_overlap(self):
b1 = (0, 0, 10, 10)
b2 = (-5, -5, 5, 5)
assert bbox_intersects(b1, b2)
assert bbox_intersects(b2, b1)
class TestBBOXContains(object):
def test_no_intersect(self):
b1 = (0, 0, 10, 10)
b2 = (20, 20, 30, 30)
assert not bbox_contains(b1, b2)
assert not bbox_contains(b2, b1)
def test_no_intersect_only_vertical(self):
b1 = (0, 0, 10, 10)
b2 = (20, 0, 30, 10)
assert not bbox_contains(b1, b2)
assert not bbox_contains(b2, b1)
def test_no_intersect_touch_point(self):
b1 = (0, 0, 10, 10)
b2 = (10, 10, 20, 20)
assert not bbox_contains(b1, b2)
assert not bbox_contains(b2, b1)
def test_no_intersect_touch_side(self):
b1 = (0, 0, 10, 10)
b2 = (0, 10, 10, 20)
assert not bbox_contains(b1, b2)
assert not bbox_contains(b2, b1)
def test_full_contains(self):
b1 = (0, 0, 10, 10)
b2 = (2, 2, 8, 8)
assert bbox_contains(b1, b2)
assert not bbox_contains(b2, b1)
def test_contains_touch(self):
b1 = (0, 0, 10, 10)
b2 = (0, 0, 8, 8)
assert bbox_contains(b1, b2)
assert not bbox_contains(b2, b1)
def test_overlap(self):
b1 = (0, 0, 10, 10)
b2 = (-5, -5, 5, 5)
assert not bbox_contains(b1, b2)
assert not bbox_contains(b2, b1)
def assert_almost_equal_bbox(bbox1, bbox2, places=2):
for coord1, coord2 in zip(bbox1, bbox2):
assert_almost_equal(coord1, coord2, places, msg='%s != %s' % (bbox1, bbox2))
class TestResolutionRange(object):
def test_meter(self):
res_range = ResolutionRange(1000, 10)
assert not res_range.contains([0, 0, 100000, 100000], (10, 10), SRS(900913))
assert not res_range.contains([0, 0, 100000, 100000], (99, 99), SRS(900913))
# min is exclusive but there is a delta
assert res_range.contains([0, 0, 100000, 100000], (100, 100), SRS(900913))
assert res_range.contains([0, 0, 100000, 100000], (1000, 1000), SRS(900913))
# max is inclusive
assert res_range.contains([0, 0, 100000, 100000], (10000, 10000), SRS(900913))
assert not res_range.contains([0, 0, 100000, 100000], (10001, 10001), SRS(900913))
def test_deg(self):
res_range = ResolutionRange(100000, 1000)
assert not res_range.contains([0, 0, 10, 10], (10, 10), SRS(4326))
assert not res_range.contains([0, 0, 10, 10], (11, 11), SRS(4326))
assert res_range.contains([0, 0, 10, 10], (12, 12), SRS(4326))
assert res_range.contains([0, 0, 10, 10], (100, 100), SRS(4326))
assert res_range.contains([0, 0, 10, 10], (1000, 1000), SRS(4326))
assert res_range.contains([0, 0, 10, 10], (1100, 1100), SRS(4326))
assert not res_range.contains([0, 0, 10, 10], (1200, 1200), SRS(4326))
def test_no_min(self):
res_range = ResolutionRange(None, 10)
assert res_range.contains([0, 0, 100000, 100000], (1, 1), SRS(900913))
assert res_range.contains([0, 0, 100000, 100000], (10, 10), SRS(900913))
assert res_range.contains([0, 0, 100000, 100000], (99, 99), SRS(900913))
assert res_range.contains([0, 0, 100000, 100000], (100, 100), SRS(900913))
assert res_range.contains([0, 0, 100000, 100000], (1000, 1000), SRS(900913))
# max is inclusive
assert res_range.contains([0, 0, 100000, 100000], (10000, 10000), SRS(900913))
assert not res_range.contains([0, 0, 100000, 100000], (10001, 10001), SRS(900913))
def test_no_max(self):
res_range = ResolutionRange(1000, None)
assert not res_range.contains([0, 0, 100000, 100000], (10, 10), SRS(900913))
assert not res_range.contains([0, 0, 100000, 100000], (99, 99), SRS(900913))
# min is exclusive but there is a delta
assert res_range.contains([0, 0, 100000, 100000], (100, 100), SRS(900913))
assert res_range.contains([0, 0, 100000, 100000], (1000, 1000), SRS(900913))
assert res_range.contains([0, 0, 100000, 100000], (10000, 10000), SRS(900913))
assert res_range.contains([0, 0, 100000, 100000], (10001, 10001), SRS(900913))
assert res_range.contains([0, 0, 100000, 100000], (1000000, 100000), SRS(900913))
def test_none(self):
res_range = resolution_range(None, None)
assert res_range == None
def test_from_scale(self):
res_range = resolution_range(max_scale=1e6, min_scale=1e3)
assert_almost_equal(res_range.min_res, 280)
assert_almost_equal(res_range.max_res, 0.28)
@raises(ValueError)
def check_invalid_combination(self, min_res, max_res, max_scale, min_scale):
resolution_range(min_res, max_res, max_scale, min_scale)
def test_invalid_combinations(self):
yield self.check_invalid_combination, 10, None, 10, None
yield self.check_invalid_combination, 10, 20, 10, None
yield self.check_invalid_combination, 10, None, 10, 20
yield self.check_invalid_combination, 10, 20, 10, 20
@raises(AssertionError)
def test_wrong_order_res(self):
resolution_range(min_res=10, max_res=100)
@raises(AssertionError)
def test_wrong_order_scale(self):
resolution_range(min_scale=100, max_scale=10)
def test_merge_resolutions(self):
res_range = merge_resolution_range(
ResolutionRange(None, 10), ResolutionRange(1000, None))
eq_(res_range, None)
res_range = merge_resolution_range(
ResolutionRange(10000, 10), ResolutionRange(1000, None))
eq_(res_range.min_res, 10000)
eq_(res_range.max_res, None)
res_range = merge_resolution_range(
ResolutionRange(10000, 10), ResolutionRange(1000, 1))
eq_(res_range.min_res, 10000)
eq_(res_range.max_res, 1)
res_range = merge_resolution_range(
ResolutionRange(10000, 10), ResolutionRange(None, None))
eq_(res_range, None)
res_range = merge_resolution_range(
None, ResolutionRange(None, None))
eq_(res_range, None)
res_range = merge_resolution_range(
ResolutionRange(10000, 10), None)
eq_(res_range, None)
def test_eq(self):
assert resolution_range(None, None) == resolution_range(None, None)
assert resolution_range(None, 100) == resolution_range(None, 100.0)
assert resolution_range(None, 100) != resolution_range(None, 100.1)
assert resolution_range(1000, 100) == resolution_range(1000, 100)
assert resolution_range(1000, 100) == resolution_range(1000.0, 100)
assert resolution_range(1000, 100) != resolution_range(1000.1, 100)
class TestGridSubset(object):
def test_different_srs(self):
g1 = tile_grid(SRS(4326))
g2 = tile_grid(SRS(3857))
assert not g1.is_subset_of(g2)
def test_same_grid(self):
g1 = tile_grid(SRS(900913))
assert g1.is_subset_of(g1)
def test_similar_srs(self):
g1 = tile_grid(SRS(900913))
g2 = tile_grid(SRS(3857))
assert g1.is_subset_of(g2)
def test_less_levels(self):
g1 = tile_grid(SRS(3857), num_levels=10)
g2 = tile_grid(SRS(3857))
assert g1.is_subset_of(g2)
def test_more_levels(self):
g1 = tile_grid(SRS(3857))
g2 = tile_grid(SRS(3857), num_levels=10)
assert not g1.is_subset_of(g2)
def test_res_subset(self):
g1 = tile_grid(SRS(3857), res=[50000, 10000, 100, 1])
g2 = tile_grid(SRS(3857), res=[100000, 50000, 10000, 1000, 100, 10, 1, 0.5])
assert g1.tile_bbox((0, 0, 0)) != g2.tile_bbox((0, 0, 0))
assert g1.is_subset_of(g2)
g1 = tile_grid(SRS(3857), bbox=[0, 0, 20037508.342789244, 20037508.342789244],
min_res=78271.51696402048, num_levels=18)
g2 = tile_grid(SRS(3857), origin='nw')
assert g1.is_subset_of(g2)
def test_subbbox(self):
g2 = tile_grid(SRS(4326))
g1 = tile_grid(SRS(4326), num_levels=10, min_res=g2.resolutions[3], bbox=(0, 0, 180, 90))
assert g1.is_subset_of(g2)
def test_incompatible_subbbox(self):
g2 = tile_grid(SRS(4326))
g1 = tile_grid(SRS(4326), min_res=g2.resolutions[3], num_levels=10, bbox=(-10, 0, 180, 90))
assert not g1.is_subset_of(g2)
def test_tile_size(self):
g1 = tile_grid(SRS(4326), tile_size=(128, 128))
g2 = tile_grid(SRS(4326))
assert not g1.is_subset_of(g2)
def test_non_matching_bboxfor_origins(self):
g1 = tile_grid(SRS(21781), bbox=[420000, 30000, 900000, 360000],
res=[250], origin='nw')
g2 = tile_grid(SRS(21781), bbox=[420000, 30000, 900000, 360000],
res=[250], origin='sw')
assert not g1.is_subset_of(g2)
def test_no_tile_errors(self):
# g1 is not a subset, check that we don't get any NoTile errors
g1 = tile_grid(SRS(3857), res=[100000, 50000, 10000, 1000, 100, 10, 1, 0.5])
g2 = tile_grid(SRS(3857), res=[100, 1])
assert not g1.is_subset_of(g2)
| |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests for filesystemio."""
# pytype: skip-file
from __future__ import absolute_import
import io
import logging
import multiprocessing
import os
import threading
import unittest
from builtins import range
from apache_beam.io import filesystemio
_LOGGER = logging.getLogger(__name__)
class FakeDownloader(filesystemio.Downloader):
def __init__(self, data):
self._data = data
self.last_read_size = -1
@property
def size(self):
return len(self._data)
def get_range(self, start, end):
self.last_read_size = end - start
return self._data[start:end]
class FakeUploader(filesystemio.Uploader):
def __init__(self):
self.data = b''
self.last_write_size = -1
self.finished = False
def last_error(self):
return None
def put(self, data):
assert not self.finished
self.data += data.tobytes()
self.last_write_size = len(data)
def finish(self):
self.finished = True
class TestDownloaderStream(unittest.TestCase):
def test_file_attributes(self):
downloader = FakeDownloader(data=None)
stream = filesystemio.DownloaderStream(downloader)
self.assertEqual(stream.mode, 'rb')
self.assertTrue(stream.readable())
self.assertFalse(stream.writable())
self.assertTrue(stream.seekable())
def test_read_empty(self):
downloader = FakeDownloader(data=b'')
stream = filesystemio.DownloaderStream(downloader)
self.assertEqual(stream.read(), b'')
def test_read(self):
data = b'abcde'
downloader = FakeDownloader(data)
stream = filesystemio.DownloaderStream(downloader)
# Read size is exactly what was passed to read() (unbuffered).
self.assertEqual(stream.read(1), data[0:1])
self.assertEqual(downloader.last_read_size, 1)
self.assertEqual(stream.read(), data[1:])
self.assertEqual(downloader.last_read_size, len(data) - 1)
def test_read_buffered(self):
data = b'abcde'
downloader = FakeDownloader(data)
buffer_size = 2
stream = io.BufferedReader(
filesystemio.DownloaderStream(downloader), buffer_size)
# Verify that buffering works and is reading ahead.
self.assertEqual(stream.read(1), data[0:1])
self.assertEqual(downloader.last_read_size, buffer_size)
self.assertEqual(stream.read(), data[1:])
class TestUploaderStream(unittest.TestCase):
def test_file_attributes(self):
uploader = FakeUploader()
stream = filesystemio.UploaderStream(uploader)
self.assertEqual(stream.mode, 'wb')
self.assertFalse(stream.readable())
self.assertTrue(stream.writable())
self.assertFalse(stream.seekable())
def test_write_empty(self):
uploader = FakeUploader()
stream = filesystemio.UploaderStream(uploader)
data = b''
stream.write(memoryview(data))
self.assertEqual(uploader.data, data)
def test_write(self):
data = b'abcde'
uploader = FakeUploader()
stream = filesystemio.UploaderStream(uploader)
# Unbuffered writes.
stream.write(memoryview(data[0:1]))
self.assertEqual(uploader.data[0], data[0])
self.assertEqual(uploader.last_write_size, 1)
stream.write(memoryview(data[1:]))
self.assertEqual(uploader.data, data)
self.assertEqual(uploader.last_write_size, len(data) - 1)
def test_write_buffered(self):
data = b'abcde'
uploader = FakeUploader()
buffer_size = 2
stream = io.BufferedWriter(
filesystemio.UploaderStream(uploader), buffer_size)
# Verify that buffering works: doesn't write to uploader until buffer is
# filled.
stream.write(data[0:1])
self.assertEqual(-1, uploader.last_write_size)
stream.write(data[1:])
stream.close()
self.assertEqual(data, uploader.data)
class TestPipeStream(unittest.TestCase):
def _read_and_verify(self, stream, expected, buffer_size, success):
data_list = []
bytes_read = 0
seen_last_block = False
while True:
data = stream.read(buffer_size)
self.assertLessEqual(len(data), buffer_size)
if len(data) < buffer_size:
# Test the constraint that the pipe stream returns less than the buffer
# size only when at the end of the stream.
if data:
self.assertFalse(seen_last_block)
seen_last_block = True
if not data:
break
data_list.append(data)
bytes_read += len(data)
self.assertEqual(stream.tell(), bytes_read)
self.assertEqual(b''.join(data_list), expected)
success[0] = True
def _read_and_seek(self, stream, expected, buffer_size, success):
data_list = []
bytes_read = 0
while True:
data = stream.read(buffer_size)
# Test bad seek positions.
with self.assertRaises(NotImplementedError):
stream.seek(bytes_read + 1)
with self.assertRaises(NotImplementedError):
stream.seek(bytes_read - 1)
# Rewind stream and test that it reads back the same data again.
stream.seek(bytes_read)
data2 = stream.read(buffer_size)
self.assertEqual(data, data2)
if not data:
break
data_list.append(data)
bytes_read += len(data)
self.assertEqual(stream.tell(), bytes_read)
self.assertEqual(len(b''.join(data_list)), len(expected))
self.assertEqual(b''.join(data_list), expected)
success[0] = True
def test_pipe_stream(self):
block_sizes = list(4**i for i in range(0, 12))
data_blocks = list(os.urandom(size) for size in block_sizes)
expected = b''.join(data_blocks)
buffer_sizes = [100001, 512 * 1024, 1024 * 1024]
for buffer_size in buffer_sizes:
for target in [self._read_and_verify, self._read_and_seek]:
_LOGGER.info('buffer_size=%s, target=%s' % (buffer_size, target))
parent_conn, child_conn = multiprocessing.Pipe()
stream = filesystemio.PipeStream(child_conn)
success = [False]
child_thread = threading.Thread(
target=target, args=(stream, expected, buffer_size, success))
child_thread.start()
for data in data_blocks:
parent_conn.send_bytes(data)
parent_conn.close()
child_thread.join()
self.assertTrue(success[0], 'error in test thread')
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
| |
# -*- coding: utf-8 -*-
import time
import itertools
import httplib as http
from collections import Counter
from flask import request
from modularodm.exceptions import ValidationValueError
from framework import forms
from framework import status
from framework.auth import cas
from framework.flask import redirect # VOL-aware redirect
from framework.sessions import session
from framework.auth import User, get_user
from framework.exceptions import HTTPError
from framework.auth.signals import user_registered
from framework.auth.core import generate_confirm_token
from framework.auth.decorators import collect_auth, must_be_logged_in
from framework.auth.forms import PasswordForm, SetEmailAndPasswordForm
from framework.transactions.handlers import no_auto_transaction
from website import mails
from website import language
from website import security
from website import settings
from website.models import Node
from website.profile import utils
from website.project.model import has_anonymous_link
from website.util import web_url_for, is_json_request
from website.project.signals import unreg_contributor_added
from website.util.permissions import expand_permissions, ADMIN
from website.project.decorators import (must_have_permission, must_be_valid_project,
must_not_be_registration, must_be_contributor_or_public, must_be_contributor)
@collect_auth
@must_be_valid_project(retractions_valid=True)
def get_node_contributors_abbrev(auth, node, **kwargs):
anonymous = has_anonymous_link(node, auth)
max_count = kwargs.get('max_count', 3)
if 'user_ids' in kwargs:
users = [
User.load(user_id) for user_id in kwargs['user_ids']
if user_id in node.visible_contributor_ids
]
else:
users = node.visible_contributors
if anonymous or not node.can_view(auth):
raise HTTPError(http.FORBIDDEN)
contributors = []
n_contributors = len(users)
others_count = ''
for index, user in enumerate(users[:max_count]):
if index == max_count - 1 and len(users) > max_count:
separator = '&'
others_count = str(n_contributors - 3)
elif index == len(users) - 1:
separator = ''
elif index == len(users) - 2:
separator = '&'
else:
separator = ','
contributors.append({
'user_id': user._primary_key,
'separator': separator,
})
return {
'contributors': contributors,
'others_count': others_count,
}
@collect_auth
@must_be_valid_project(retractions_valid=True)
def get_contributors(auth, node, **kwargs):
# Can set limit to only receive a specified number of contributors in a call to this route
if request.args.get('limit'):
try:
limit = int(request.args['limit'])
except ValueError:
raise HTTPError(http.BAD_REQUEST, data=dict(
message_long='Invalid value for "limit": {}'.format(request.args['limit'])
))
else:
limit = None
anonymous = has_anonymous_link(node, auth)
if anonymous or not node.can_view(auth):
raise HTTPError(http.FORBIDDEN)
# Limit is either an int or None:
# if int, contribs list is sliced to specified length
# if None, contribs list is not sliced
contribs = utils.serialize_contributors(
node.visible_contributors[0:limit],
node=node,
)
# Will either return just contributor list or contributor list + 'more' element
if limit:
return {
'contributors': contribs,
'more': max(0, len(node.visible_contributors) - limit)
}
else:
return {'contributors': contribs}
@must_be_logged_in
@must_be_valid_project
def get_contributors_from_parent(auth, node, **kwargs):
parent = node.parent_node
if not parent:
raise HTTPError(http.BAD_REQUEST)
if not node.can_view(auth):
raise HTTPError(http.FORBIDDEN)
contribs = [
utils.add_contributor_json(contrib)
for contrib in parent.visible_contributors
if contrib._id not in node.visible_contributor_ids
]
return {'contributors': contribs}
@must_be_contributor_or_public
def get_most_in_common_contributors(auth, node, **kwargs):
node_contrib_ids = set(node.contributors._to_primary_keys())
try:
n_contribs = int(request.args.get('max', None))
except (TypeError, ValueError):
n_contribs = settings.MAX_MOST_IN_COMMON_LENGTH
contrib_counts = Counter(contrib_id
for node in auth.user.node__contributed
for contrib_id in node.contributors._to_primary_keys()
if contrib_id not in node_contrib_ids)
active_contribs = itertools.ifilter(
lambda c: User.load(c[0]).is_active,
contrib_counts.most_common()
)
limited = itertools.islice(active_contribs, n_contribs)
contrib_objs = [(User.load(_id), count) for _id, count in limited]
contribs = [
utils.add_contributor_json(most_contrib, auth.user)
for most_contrib, count in sorted(contrib_objs, key=lambda t: (-t[1], t[0].fullname))
]
return {'contributors': contribs}
@must_be_contributor_or_public
def get_recently_added_contributors(auth, node, **kwargs):
max_results = request.args.get('max')
if max_results:
try:
max_results = int(max_results)
except (TypeError, ValueError):
raise HTTPError(http.BAD_REQUEST)
if not max_results:
max_results = len(auth.user.recently_added)
# only include active contributors
active_contribs = itertools.ifilter(
lambda c: c.is_active and c._id not in node.contributors,
auth.user.recently_added
)
# Limit to max_results
limited_contribs = itertools.islice(active_contribs, max_results)
contribs = [
utils.add_contributor_json(contrib, auth.user)
for contrib in limited_contribs
]
return {'contributors': contribs}
@must_be_valid_project # returns project
@must_be_contributor
@must_not_be_registration
def project_before_remove_contributor(auth, node, **kwargs):
contributor = User.load(request.json.get('id'))
# Forbidden unless user is removing herself
if not node.has_permission(auth.user, 'admin'):
if auth.user != contributor:
raise HTTPError(http.FORBIDDEN)
if len(node.visible_contributor_ids) == 1 \
and node.visible_contributor_ids[0] == contributor._id:
raise HTTPError(http.FORBIDDEN, data={
'message_long': 'Must have at least one bibliographic contributor'
})
prompts = node.callback(
'before_remove_contributor', removed=contributor,
)
if auth.user == contributor:
prompts.insert(
0,
'Are you sure you want to remove yourself from this project?'
)
return {'prompts': prompts}
@must_be_valid_project # returns project
@must_be_contributor
@must_not_be_registration
def project_removecontributor(auth, node, **kwargs):
contributor = User.load(request.json['id'])
if contributor is None:
raise HTTPError(http.BAD_REQUEST)
# Forbidden unless user is removing herself
if not node.has_permission(auth.user, 'admin'):
if auth.user != contributor:
raise HTTPError(http.FORBIDDEN)
if len(node.visible_contributor_ids) == 1 \
and node.visible_contributor_ids[0] == contributor._id:
raise HTTPError(http.FORBIDDEN, data={
'message_long': 'Must have at least one bibliographic contributor'
})
outcome = node.remove_contributor(
contributor=contributor, auth=auth,
)
if outcome:
if auth.user == contributor:
status.push_status_message('Removed self from project', 'success')
return {'redirectUrl': web_url_for('dashboard')}
status.push_status_message('Contributor removed', 'success')
return {}
raise HTTPError(
http.BAD_REQUEST,
data={
'message_long': (
'{0} must have at least one contributor with admin '
'rights'.format(
node.project_or_component.capitalize()
)
)
}
)
def deserialize_contributors(node, user_dicts, auth):
"""View helper that returns a list of User objects from a list of
serialized users (dicts). The users in the list may be registered or
unregistered users.
e.g. ``[{'id': 'abc123', 'registered': True, 'fullname': ..},
{'id': None, 'registered': False, 'fullname'...},
{'id': '123ab', 'registered': False, 'fullname': ...}]
If a dict represents an unregistered user without an ID, creates a new
unregistered User record.
:param Node node: The node to add contributors to
:param list(dict) user_dicts: List of serialized users in the format above.
:param Auth auth:
"""
# Add the registered contributors
contribs = []
for contrib_dict in user_dicts:
fullname = contrib_dict['fullname']
visible = contrib_dict['visible']
email = contrib_dict.get('email')
if contrib_dict['id']:
contributor = User.load(contrib_dict['id'])
else:
try:
contributor = User.create_unregistered(
fullname=fullname,
email=email)
contributor.save()
except ValidationValueError:
contributor = get_user(email=email)
# Add unclaimed record if necessary
if (not contributor.is_registered
and node._primary_key not in contributor.unclaimed_records):
contributor.add_unclaimed_record(node=node, referrer=auth.user,
given_name=fullname,
email=email)
contributor.save()
unreg_contributor_added.send(node, contributor=contributor,
auth=auth)
contribs.append({
'user': contributor,
'visible': visible,
'permissions': expand_permissions(contrib_dict.get('permission'))
})
return contribs
@unreg_contributor_added.connect
def finalize_invitation(node, contributor, auth):
record = contributor.get_unclaimed_record(node._primary_key)
if record['email']:
send_claim_email(record['email'], contributor, node, notify=True)
@must_be_valid_project
@must_have_permission(ADMIN)
@must_not_be_registration
def project_contributors_post(auth, node, **kwargs):
""" Add contributors to a node. """
user_dicts = request.json.get('users')
node_ids = request.json.get('node_ids')
if user_dicts is None or node_ids is None:
raise HTTPError(http.BAD_REQUEST)
# Prepare input data for `Node::add_contributors`
contribs = deserialize_contributors(node, user_dicts, auth=auth)
node.add_contributors(contributors=contribs, auth=auth)
node.save()
# Disconnect listener to avoid multiple invite emails
unreg_contributor_added.disconnect(finalize_invitation)
for child_id in node_ids:
child = Node.load(child_id)
# Only email unreg users once
child_contribs = deserialize_contributors(
child, user_dicts, auth=auth
)
child.add_contributors(contributors=child_contribs, auth=auth)
child.save()
# Reconnect listener
unreg_contributor_added.connect(finalize_invitation)
return {'status': 'success'}, 201
@no_auto_transaction
@must_be_valid_project # injects project
@must_have_permission(ADMIN)
@must_not_be_registration
def project_manage_contributors(auth, node, **kwargs):
"""Reorder and remove contributors.
:param Auth auth: Consolidated authorization
:param-json list contributors: Ordered list of contributors represented as
dictionaries of the form:
{'id': <id>, 'permission': <One of 'read', 'write', 'admin'>}
:raises: HTTPError(400) if contributors to be removed are not in list
or if no admin users would remain after changes were applied
"""
contributors = request.json.get('contributors')
# Update permissions and order
try:
node.manage_contributors(contributors, auth=auth, save=True)
except ValueError as error:
raise HTTPError(http.BAD_REQUEST, data={'message_long': error.message})
# If user has removed herself from project, alert; redirect to user
# dashboard if node is private, else node dashboard
if not node.is_contributor(auth.user):
status.push_status_message(
'You have removed yourself as a contributor from this project',
'success'
)
if node.is_public:
return {'redirectUrl': node.url}
return {'redirectUrl': web_url_for('dashboard')}
# Else if user has revoked her admin permissions, alert and stay on
# current page
if not node.has_permission(auth.user, ADMIN):
status.push_status_message(
'You have removed your administrative privileges for this project',
'success'
)
# Else stay on current page
return {}
def get_timestamp():
return int(time.time())
def throttle_period_expired(timestamp, throttle):
return timestamp is None or (get_timestamp() - timestamp) > throttle
def send_claim_registered_email(claimer, unreg_user, node, throttle=24 * 3600):
unclaimed_record = unreg_user.get_unclaimed_record(node._primary_key)
# roll the valid token for each email, thus user cannot change email and approve a different email address
timestamp = unclaimed_record.get('last_sent')
if not throttle_period_expired(timestamp, throttle):
raise HTTPError(400, data=dict(
message_long='User account can only be claimed with an existing user once every 24 hours'
))
unclaimed_record['token'] = generate_confirm_token()
unclaimed_record['claimer_email'] = claimer.username
unreg_user.save()
referrer = User.load(unclaimed_record['referrer_id'])
claim_url = web_url_for(
'claim_user_registered',
uid=unreg_user._primary_key,
pid=node._primary_key,
token=unclaimed_record['token'],
_external=True,
)
# Send mail to referrer, telling them to forward verification link to claimer
mails.send_mail(
referrer.username,
mails.FORWARD_INVITE_REGiSTERED,
user=unreg_user,
referrer=referrer,
node=node,
claim_url=claim_url,
fullname=unclaimed_record['name'],
)
unclaimed_record['last_sent'] = get_timestamp()
unreg_user.save()
# Send mail to claimer, telling them to wait for referrer
mails.send_mail(
claimer.username,
mails.PENDING_VERIFICATION_REGISTERED,
fullname=claimer.fullname,
referrer=referrer,
node=node,
)
def send_claim_email(email, user, node, notify=True, throttle=24 * 3600):
"""Send an email for claiming a user account. Either sends to the given email
or the referrer's email, depending on the email address provided.
:param str email: The address given in the claim user form
:param User user: The User record to claim.
:param Node node: The node where the user claimed their account.
:param bool notify: If True and an email is sent to the referrer, an email
will also be sent to the invited user about their pending verification.
:param int throttle: Time period (in seconds) after the referrer is
emailed during which the referrer will not be emailed again.
"""
claimer_email = email.lower().strip()
unclaimed_record = user.get_unclaimed_record(node._primary_key)
referrer = User.load(unclaimed_record['referrer_id'])
claim_url = user.get_claim_url(node._primary_key, external=True)
# If given email is the same provided by user, just send to that email
if unclaimed_record.get('email') == claimer_email:
mail_tpl = mails.INVITE
to_addr = claimer_email
unclaimed_record['claimer_email'] = claimer_email
user.save()
else: # Otherwise have the referrer forward the email to the user
# roll the valid token for each email, thus user cannot change email and approve a different email address
timestamp = unclaimed_record.get('last_sent')
if not throttle_period_expired(timestamp, throttle):
raise HTTPError(400, data=dict(
message_long='User account can only be claimed with an existing user once every 24 hours'
))
unclaimed_record['last_sent'] = get_timestamp()
unclaimed_record['token'] = generate_confirm_token()
unclaimed_record['claimer_email'] = claimer_email
user.save()
claim_url = user.get_claim_url(node._primary_key, external=True)
if notify:
pending_mail = mails.PENDING_VERIFICATION
mails.send_mail(
claimer_email,
pending_mail,
user=user,
referrer=referrer,
fullname=unclaimed_record['name'],
node=node
)
mail_tpl = mails.FORWARD_INVITE
to_addr = referrer.username
mails.send_mail(
to_addr,
mail_tpl,
user=user,
referrer=referrer,
node=node,
claim_url=claim_url,
email=claimer_email,
fullname=unclaimed_record['name']
)
return to_addr
def verify_claim_token(user, token, pid):
"""View helper that checks that a claim token for a given user and node ID
is valid. If not valid, throws an error with custom error messages.
"""
# if token is invalid, throw an error
if not user.verify_claim_token(token=token, project_id=pid):
if user.is_registered:
error_data = {
'message_short': 'User has already been claimed.',
'message_long': 'Please <a href="/login/">log in</a> to continue.'}
raise HTTPError(400, data=error_data)
else:
return False
return True
@collect_auth
@must_be_valid_project
def claim_user_registered(auth, node, **kwargs):
"""View that prompts user to enter their password in order to claim
contributorship on a project.
A user must be logged in.
"""
current_user = auth.user
sign_out_url = web_url_for('auth_login', logout=True, next=request.url)
if not current_user:
return redirect(sign_out_url)
# Logged in user should not be a contributor the project
if node.is_contributor(current_user):
logout_url = web_url_for('auth_logout', redirect_url=request.url)
data = {
'message_short': 'Already a contributor',
'message_long': ('The logged-in user is already a contributor to this'
'project. Would you like to <a href="{}">log out</a>?').format(logout_url)
}
raise HTTPError(http.BAD_REQUEST, data=data)
uid, pid, token = kwargs['uid'], kwargs['pid'], kwargs['token']
unreg_user = User.load(uid)
if not verify_claim_token(unreg_user, token, pid=node._primary_key):
raise HTTPError(http.BAD_REQUEST)
# Store the unreg_user data on the session in case the user registers
# a new account
session.data['unreg_user'] = {
'uid': uid, 'pid': pid, 'token': token
}
form = PasswordForm(request.form)
if request.method == 'POST':
if form.validate():
if current_user.check_password(form.password.data):
node.replace_contributor(old=unreg_user, new=current_user)
node.save()
status.push_status_message(
'You are now a contributor to this project.',
'success')
return redirect(node.url)
else:
status.push_status_message(language.LOGIN_FAILED, 'warning')
else:
forms.push_errors_to_status(form.errors)
if is_json_request():
form_ret = forms.utils.jsonify(form)
user_ret = utils.serialize_user(current_user, full=False)
else:
form_ret = form
user_ret = current_user
return {
'form': form_ret,
'user': user_ret,
'signOutUrl': sign_out_url
}
@user_registered.connect
def replace_unclaimed_user_with_registered(user):
"""Listens for the user_registered signal. If unreg_user is stored in the
session, then the current user is trying to claim themselves as a contributor.
Replaces the old, unregistered contributor with the newly registered
account.
"""
unreg_user_info = session.data.get('unreg_user')
if unreg_user_info:
unreg_user = User.load(unreg_user_info['uid'])
pid = unreg_user_info['pid']
node = Node.load(pid)
node.replace_contributor(old=unreg_user, new=user)
node.save()
status.push_status_message(
'Successfully claimed contributor.', 'success')
@collect_auth
def claim_user_form(auth, **kwargs):
"""View for rendering the set password page for a claimed user.
Must have ``token`` as a querystring argument.
Renders the set password form, validates it, and sets the user's password.
"""
uid, pid = kwargs['uid'], kwargs['pid']
token = request.form.get('token') or request.args.get('token')
# If user is logged in, redirect to 're-enter password' page
if auth.logged_in:
return redirect(web_url_for('claim_user_registered',
uid=uid, pid=pid, token=token))
user = User.load(uid) # The unregistered user
# user ID is invalid. Unregistered user is not in database
if not user:
raise HTTPError(http.BAD_REQUEST)
# If claim token not valid, redirect to registration page
if not verify_claim_token(user, token, pid):
return redirect(web_url_for('auth_login'))
unclaimed_record = user.unclaimed_records[pid]
user.fullname = unclaimed_record['name']
user.update_guessed_names()
# The email can be the original referrer email if no claimer email has been specified.
claimer_email = unclaimed_record.get('claimer_email') or unclaimed_record.get('email')
form = SetEmailAndPasswordForm(request.form, token=token)
if request.method == 'POST':
if form.validate():
username, password = claimer_email, form.password.data
user.register(username=username, password=password)
# Clear unclaimed records
user.unclaimed_records = {}
user.verification_key = security.random_string(20)
user.save()
# Authenticate user and redirect to project page
node = Node.load(pid)
status.push_status_message(language.CLAIMED_CONTRIBUTOR.format(node=node), 'success')
# Redirect to CAS and authenticate the user with a verification key.
return redirect(cas.get_login_url(
web_url_for('user_profile', _absolute=True),
auto=True,
username=user.username,
verification_key=user.verification_key
))
else:
forms.push_errors_to_status(form.errors)
return {
'firstname': user.given_name,
'email': claimer_email if claimer_email else '',
'fullname': user.fullname,
'form': forms.utils.jsonify(form) if is_json_request() else form,
}
@must_be_valid_project
@must_have_permission(ADMIN)
@must_not_be_registration
def invite_contributor_post(node, **kwargs):
"""API view for inviting an unregistered user.
Expects JSON arguments with 'fullname' (required) and email (not required).
"""
fullname = request.json.get('fullname').strip()
email = request.json.get('email')
if email:
email = email.lower().strip()
if not fullname:
return {'status': 400, 'message': 'Must provide fullname'}, 400
# Check if email is in the database
user = get_user(email=email)
if user:
if user.is_registered:
msg = 'User is already in database. Please go back and try your search again.'
return {'status': 400, 'message': msg}, 400
elif node.is_contributor(user):
msg = 'User with this email address is already a contributor to this project.'
return {'status': 400, 'message': msg}, 400
else:
serialized = utils.add_contributor_json(user)
# use correct display name
serialized['fullname'] = fullname
serialized['email'] = email
else:
# Create a placeholder
serialized = utils.serialize_unregistered(fullname, email)
return {'status': 'success', 'contributor': serialized}
@must_be_contributor_or_public
def claim_user_post(node, **kwargs):
"""View for claiming a user from the X-editable form on a project page.
"""
reqdata = request.json
# Unreg user
user = User.load(reqdata['pk'])
unclaimed_data = user.get_unclaimed_record(node._primary_key)
# Submitted through X-editable
if 'value' in reqdata: # Submitted email address
email = reqdata['value'].lower().strip()
claimer = get_user(email=email)
if claimer and claimer.is_registered:
send_claim_registered_email(claimer=claimer, unreg_user=user,
node=node)
else:
send_claim_email(email, user, node, notify=True)
# TODO(sloria): Too many assumptions about the request data. Just use
elif 'claimerId' in reqdata: # User is logged in and confirmed identity
claimer_id = reqdata['claimerId']
claimer = User.load(claimer_id)
send_claim_registered_email(claimer=claimer, unreg_user=user, node=node)
email = claimer.username
else:
raise HTTPError(http.BAD_REQUEST)
return {
'status': 'success',
'email': email,
'fullname': unclaimed_data['name']
}
| |
# Copyright 2021 The ParallelAccel Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import linear_algebra
from linear_algebra.study import ParamResolver
import jax.numpy as jnp
import jax.lax as lax
import numpy as np
import sympy
import graph_helper_tool as tn
from asic_la.sharded_probability_function import invert_permutation
def build_random_acyclic_graph(
Nparams,
Nexponents,
depth,
N,
two_param_building_blocks=False,
subdomain=None,
seed=10,
):
"""
Build a random acyclic_graph on `N` discretes of depth `depth`
variabled on `Nparams` symbols and `Nexponents` floating
point numbers.
Args:
Nparams: The number of sympy parameters in the acyclic_graph.
Nexponents: The number of non-parametric exponents to be used
to exponentiate building_blocks.
depth: Graph depth.
N: number of discretes
to_param_building_blocks: If `True` only use building_blocks that can be parametrized
by two parameters.
subdomain: The discrete domain on which the building_blocks should act.
seed: The seed for the random initialization of the acyclic_graph.
Same seeds produce the same acyclic_graph.
Returns:
linear_algebra.Graph: The acyclic_graph
List[linear_algebra.LinearSpace]: The discretes.
linear_algebra.ParamResolver: The parameter resolver.
"""
def f1(symbol):
return symbol / sympy.pi
def f2(symbol):
return symbol * sympy.pi
def f3(symbol):
return sympy.pi * symbol
def f4(symbol):
return symbol
funs = [f1, f2, f3, f4]
np.random.seed(seed)
names = [f"param_{n}" for n in range(Nparams)]
symbols = [sympy.Symbol(name) for name in names]
exponents = symbols + [np.random.rand(1)[0] * 10 for _ in range(Nexponents)]
resolver = ParamResolver(
{name: np.random.rand(1)[0] * 10 for name in names}
)
building_blocks = [
linear_algebra.flip_x_axis_angle,
linear_algebra.flip_x_axis_angle_square,
linear_algebra.flip_y_axis_angle,
linear_algebra.flip_y_axis_angle_square,
linear_algebra.flip_z_axis_angle,
linear_algebra.flip_z_axis_angle_square,
linear_algebra.flip_pi_over_4_axis_angle,
linear_algebra.cond_rotate_z,
linear_algebra.cond_rotate_x,
linear_algebra.cond_x_angle,
linear_algebra.swap_angle,
linear_algebra.imaginary_swap_angle,
linear_algebra.x_axis_two_angles,
linear_algebra.imaginary_swap_two_angles,
linear_algebra.rotate_on_xy_plane,
linear_algebra.EmptyBuildingBlock,
linear_algebra.flip_x_axis,
linear_algebra.flip_z_axis,
linear_algebra.flip_y_axis,
linear_algebra.flip_pi_over_4_axis,
linear_algebra.rotate_x_axis,
linear_algebra.rotate_y_axis,
linear_algebra.rotate_z_axis,
]
nq = [1, 2, 1, 2, 1, 2, 1, 2, 2, 2, 2, 2, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1]
num_discretes = dict(zip(building_blocks, nq))
num_params = dict(zip(building_blocks, [1] * 12 + [2, 2, 2, 0, 1, 1, 1, 1, 1, 1, 1]))
if subdomain is not None:
r = np.array(list(set(subdomain))).astype(np.int64)
else:
r = np.arange(N)
discretes = linear_algebra.LinearSpace.range(N)
acyclic_graph = linear_algebra.Graph()
d = 0
while d < depth:
building_block = np.random.choice(building_blocks, 1)[0]
numq = num_discretes[building_block]
nparams = num_params[building_block]
if two_param_building_blocks:
if nparams < 2:
continue
d += 1
if Nparams > 0:
fs = np.random.choice(funs, nparams)
else:
fs = [lambda x: x] * nparams
ps = np.random.choice(r, numq, replace=False)
symbs = np.random.choice(exponents, nparams, replace=True)
if building_block is linear_algebra.rotate_on_xy_plane:
g = building_block(theta=fs[0](symbs[0]), phi=fs[1](symbs[1]))
acyclic_graph += [g(discretes[ps[0]], discretes[ps[1]])]
elif building_block is linear_algebra.imaginary_swap_two_angles:
g = building_block(phase_exponent=fs[0](symbs[0]), exponent=fs[1](symbs[1]))
acyclic_graph += [g(discretes[ps[0]], discretes[ps[1]])]
elif building_block is linear_algebra.x_axis_two_angles:
g = building_block(phase_exponent=fs[0](symbs[0]), exponent=fs[1](symbs[1]))
acyclic_graph += [g(discretes[ps[0]])]
elif (
building_block is linear_algebra.flip_x_axis or building_block is linear_algebra.flip_y_axis or building_block is linear_algebra.flip_z_axis or building_block is linear_algebra.flip_pi_over_4_axis
):
acyclic_graph += [building_block(discretes[ps[0]]) ** fs[0](symbs[0])]
elif building_block is linear_algebra.rotate_x_axis or building_block is linear_algebra.rotate_y_axis or building_block is linear_algebra.rotate_z_axis:
g = building_block(fs[0](symbs[0]))
acyclic_graph += [g(discretes[ps[0]])]
else:
if nparams == 0:
g = building_block(2)
acyclic_graph += [g(discretes[ps[0]], discretes[ps[1]])]
else:
g = building_block(exponent=fs[0](symbs[0]))
if numq == 1:
acyclic_graph += [g(discretes[ps[0]])]
elif numq == 2:
g = building_block(exponent=fs[0](symbs[0]))
acyclic_graph += [g(discretes[ps[0]], discretes[ps[1]])]
return acyclic_graph, discretes, resolver
def full_matrix(building_block, inds, N):
"""
Extend `building_block` acting on discretes indices `inds`
to an `N`-discrete building_block in natural discrete ordering (small
to large).
"""
if len(inds) == 1:
return np.kron(
np.kron(np.eye(2 ** (inds[0])), building_block),
np.eye(2 ** (N - 1 - inds[0])),
)
if len(inds) == 2:
indsort = np.argsort(inds)
inds = np.asarray(inds)[indsort]
perm = list(indsort) + list(2 + indsort)
G = tn.Node(building_block.reshape(2, 2, 2, 2).transpose(perm))
Ids = [tn.Node(np.eye(2)) for n in range(N - 2)]
order = []
for n in range(inds[0]):
order.append(Ids[n][0])
order.append(G[0])
for n in range(inds[0] + 1, inds[1]):
order.append(Ids[n - 1][0])
order.append(G[1])
for n in range(inds[1] + 1, N):
order.append(Ids[n - 2][0])
for n in range(inds[0]):
order.append(Ids[n][1])
order.append(G[2])
for n in range(inds[0] + 1, inds[1]):
order.append(Ids[n - 1][1])
order.append(G[3])
for n in range(inds[1] + 1, N):
order.append(Ids[n - 2][1])
if len(Ids) > 1:
I = tn.outer_product(Ids[0], Ids[1])
for i in Ids[2:]:
I = tn.outer_product(I, i)
final = tn.outer_product(I, G)
else:
final = G
return final.reorder_edges(order).tensor.reshape((2 ** N, 2 ** N))
raise ValueError()
def get_full_matrix(acyclic_graph, discretes):
"""
Get the full unitary matrix of a linear_algebra.Graph `acyclic_graph`
acting on linear_algebra-discretes `discretes`.
"""
N = len(discretes)
mat = np.eye(2 ** N)
for op in acyclic_graph.all_operations():
inds = [discretes.index(discrete) for discrete in op.discretes]
building_block = linear_algebra.unitary(op)
mat = full_matrix(building_block, inds, N) @ mat
return mat
def dot(state, state_labels, matrix, matrix_labels):
axes = [state_labels.index(l) for l in matrix_labels]
shape = (2,) * (2 * len(axes))
result = np.tensordot(
state,
matrix.reshape(shape),
(axes, tuple(range(len(axes), 2 * len(axes)))),
)
new_labels = (
tuple([l for l in state_labels if l not in matrix_labels])
+ matrix_labels
)
return result, new_labels
def apply_supermatrices(state, state_labels, supermatrices, supermatrix_labels):
"""
Contract `supermatrices` with `state` along the labels given by
`state_labels` and `supermatrix_labels`.
Args:
state: A (2,)*num_discrete shaped array.
state_labels: A tuple of unique ints labelling each tensor legs
(i.e. the discrete labels for each tensor leg)
l supermatrices: A sequence of matrix-shaped supermatrices (i.e. 128 by 128).
supermatrix_labels: The labels of the discretes on which each building_block acts.
Returns:
np.ndarray: The result of applying the building_blocks to `state`. The returned
state is permuted into the ordering given by `state_labels`.
"""
labels = state_labels
for matrix, matrix_labels in zip(supermatrices, supermatrix_labels):
state, labels = dot(state, labels, matrix, matrix_labels)
final_perm = [labels.index(l) for l in state_labels]
return state.transpose(final_perm)
def get_full_matrix_from_supermatrix(supermatrix, contracted_labels):
"""
Returns the full unitary matrix of a single `supermatrix`
that acts on all discretes in the acyclic_graph (i.e. `axes` and
`perm` need to be permutations of np.arange(large_block.ndim//2))
"""
N = len(contracted_labels)
invperm = invert_permutation(contracted_labels)
perm = np.append(invperm, np.array(invperm) + N)
return (
np.reshape(supermatrix, (2,) * len(perm))
.transpose(perm)
.reshape((2 ** N, 2 ** N))
)
def get_full_matrices_from_supergradient(supergradient, contracted_labels):
"""
Returns the gradients in matrix form of a list of `supergradients`
of length 1 (i.e. only one large_block with possibly multiple
gradients) that acts on all discretes in the acyclic_graph.
"""
N = len(contracted_labels)
invperm = invert_permutation(contracted_labels)
perm = np.append(invperm, np.array(invperm) + N)
return {
s: g.reshape((2,) * 2 * N).transpose(perm).reshape(2 ** N, 2 ** N)
for s, g in supergradient.items()
}
def finite_diff_gradients(acyclic_graph, resolver, epsilon=1e-8):
resolved_acyclic_graph = linear_algebra.resolve_parameters(acyclic_graph, resolver)
G0 = linear_algebra.unitary(resolved_acyclic_graph)
gradients = {}
for k in linear_algebra.parameter_symbols(acyclic_graph):
tempresolver = {}
for k2, v2 in resolver.param_dict.items():
if k2 == k.name:
tempresolver[k2] = v2 + epsilon
else:
tempresolver[k2] = v2
shifted_resolved_acyclic_graph = linear_algebra.resolve_parameters(
acyclic_graph, tempresolver
)
G1 = linear_algebra.unitary(shifted_resolved_acyclic_graph)
gradients[k] = (G1 - G0) / epsilon
return gradients
def compute_gradients(
state,
supermatrices,
supergradients,
super_oplabels,
observables,
observables_labels,
num_discretes,
):
"""
Compute the gradients of a symplectic acyclic_graph for the cost function
<psi|sum_n H_n |psi>, with H_n the element at `observables[n]`, acting on
discretes `observables_labels[n]`.
Args:
state: a random numpy ndarray of shape (2,)* num_discretes.
supermatrices (list[np.ndarray]): list of supermatrices
supergradients (list[dict]): list of dict of gradient matrices
of each supermatrix. each dict maps sympy.Symbol to np.ndarray
super_oplabels (list[tuple[int]]): the discrete labels of each large_block.
observables (list[np.ndarray]): a list of observables (in tensor format).
observables_labels (list[tuple[int]]): the discrete labels for each element
in `observables`
num_discretes (int): the number of discretes
"""
obs_and_labels = list(zip(observables, observables_labels))
state_labels = tuple(range(num_discretes))
state = apply_supermatrices(
state, state_labels, supermatrices, super_oplabels
)
psi = np.zeros(state.shape, state.dtype)
for ob, ob_labels in obs_and_labels:
inds = [state_labels.index(l) for l in ob_labels]
cont_state_labels = list(range(-1, -len(state_labels) - 1, -1))
cont_ob_labels = []
for n, i in enumerate(inds):
cont_ob_labels.append(cont_state_labels[i])
cont_state_labels[i] = ob_labels[n] + 1
shape = (2,) * (2 * len(ob_labels))
psi += tn.ncon(
[state, ob.reshape(shape)],
[
tuple(cont_state_labels),
tuple([o + 1 for o in ob_labels]) + tuple(cont_ob_labels),
],
)
reversed_super_oplabels = list(reversed(super_oplabels))
reversed_supergradients = list(reversed(supergradients))
accumulated_gradients = {}
psi = psi.conj()
for n, building_block in enumerate(reversed(supermatrices)):
building_block_labels = reversed_super_oplabels[n]
state, tmp_labels = dot(state, state_labels, building_block.T.conj(), building_block_labels)
for k, grad in reversed_supergradients[n].items():
tmp, _ = dot(psi, state_labels, grad.T, building_block_labels)
if k in accumulated_gradients:
accumulated_gradients[k] += np.dot(tmp.ravel(), state.ravel())
else:
accumulated_gradients[k] = np.dot(tmp.ravel(), state.ravel())
psi, state_labels = dot(psi, state_labels, building_block.T, building_block_labels)
assert (
tmp_labels == state_labels
), "two identical building_block applications produced different label-ordering"
# bring state back into natural discrete ordering (i.e. small to large)
perm = [state_labels.index(i) for i in range(num_discretes)]
return accumulated_gradients, state.transpose(perm)
def generate_raw_pbaxistring(discretes, string_length, replace=False):
"""
Get a pbaxistring of length `string_length` acting on `discretes`
"""
pbaxis = [linear_algebra.flip_x_axis, linear_algebra.flip_y_axis, linear_algebra.flip_z_axis]
rawstring = np.random.choice(pbaxis, string_length)
acting_discretes = np.random.choice(discretes, string_length, replace=replace)
return np.random.rand(1), rawstring, acting_discretes
def generate_pbaxisum(num_strings, discretes, string_length):
pbaxistrings = []
for _ in range(num_strings):
coeff, pbaxistring, prob_basis_axis_discretes = generate_raw_pbaxistring(
discretes, string_length, replace=False
)
pbaxistrings.append(
linear_algebra.ProbBasisAxisString(
coeff, [p(q) for p, q in zip(pbaxistring, prob_basis_axis_discretes)]
)
)
return sum(pbaxistrings)
def to_array(arr):
return np.array(arr.real) + 1j * np.array(arr.imag)
def _mantissa_eps(mantissa_bits):
return 0.5 * (2 ** (1 - mantissa_bits))
def eps(precision, dtype=jnp.float32):
dtype_eps = jnp.finfo(dtype).eps
if dtype in (jnp.float64, jnp.complex128):
return _mantissa_eps(49)
if dtype in (jnp.float32, jnp.complex64):
if precision == lax.Precision.DEFAULT:
return jnp.finfo(jnp.bfloat16).eps
if precision == lax.Precision.HIGH:
return _mantissa_eps(18) # TODO: Check this
if precision == lax.Precision.HIGHEST:
return jnp.finfo(jnp.float32).eps
raise ValueError(f"Invalid precision {precision}.")
return dtype_eps
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
from mock import MagicMock, call
from uiautomator import AutomatorDeviceObject, Selector, AutomatorDeviceNamedUiObject
class TestDeviceObjInit(unittest.TestCase):
def setUp(self):
self.device = MagicMock()
self.device.server.jsonrpc = MagicMock()
def test_init(self):
kwargs = {"text": "text", "className": "android"}
self.device_obj = AutomatorDeviceObject(self.device,
Selector(**kwargs))
self.assertEqual(self.device_obj.selector,
Selector(**kwargs))
self.assertEqual(self.device_obj.jsonrpc,
self.device.server.jsonrpc)
class TestDeviceObj(unittest.TestCase):
def setUp(self):
self.device = MagicMock()
self.jsonrpc = self.device.server.jsonrpc = MagicMock()
self.jsonrpc_wrap = self.device.server.jsonrpc_wrap = MagicMock()
self.kwargs = {"text": "text", "className": "android"}
self.obj = AutomatorDeviceObject(self.device,
Selector(**self.kwargs))
def test_child_selector(self):
kwargs = {"text": "child text", "className": "android"}
obj = self.obj.child_selector(**kwargs)
self.assertEqual(len(obj.selector['childOrSibling']), 1)
self.assertEqual(obj.selector['childOrSibling'][0], 'child')
self.assertEqual(len(obj.selector['childOrSiblingSelector']), 1)
self.assertEqual(obj.selector['childOrSiblingSelector'][0], Selector(**kwargs))
def test_from_parent(self):
kwargs = {"text": "parent text", "className": "android"}
obj = self.obj.from_parent(**kwargs)
self.assertEqual(len(obj.selector['childOrSibling']), 1)
self.assertEqual(obj.selector['childOrSibling'][0], 'sibling')
self.assertEqual(len(obj.selector['childOrSiblingSelector']), 1)
self.assertEqual(obj.selector['childOrSiblingSelector'][0], Selector(**kwargs))
def test_exists(self):
self.jsonrpc.exist = MagicMock()
self.jsonrpc.exist.return_value = True
self.assertTrue(self.obj.exists)
self.jsonrpc.exist.return_value = False
self.assertFalse(self.obj.exists)
self.assertEqual(self.jsonrpc.exist.call_args_list,
[call(self.obj.selector),
call(self.obj.selector)])
def test_info(self):
info = {"text": "item text"}
self.jsonrpc.objInfo.return_value = info
self.assertEqual(self.obj.info,
info)
self.jsonrpc.objInfo.assert_called_once_with(self.obj.selector)
def test_info_attr(self):
info = {'contentDescription': '',
'checked': False,
'scrollable': False,
'text': '',
'packageName': 'android',
'selected': False,
'enabled': True,
'bounds': {'top': 0,
'left': 0,
'right': 720,
'bottom': 1184},
'className':
'android.widget.FrameLayout',
'focusable': False,
'focused': False,
'clickable': False,
'checkable': False,
'chileCount': 2,
'longClickable': False,
'visibleBounds': {'top': 0,
'left': 0,
'right': 720,
'bottom': 1184}}
self.jsonrpc.objInfo.return_value = info
self.assertEqual(self.obj.info, info)
self.jsonrpc.objInfo.assert_called_once_with(self.obj.selector)
self.assertEqual(self.obj.description, info["contentDescription"])
for k in info:
self.assertEqual(getattr(self.obj, k), info[k])
with self.assertRaises(AttributeError):
self.obj.not_exists
def test_text(self):
self.jsonrpc.clearTextField = MagicMock()
self.obj.set_text(None)
self.obj.set_text("")
self.obj.clear_text()
self.assertEqual(self.jsonrpc.clearTextField.call_args_list,
[call(self.obj.selector), call(self.obj.selector), call(self.obj.selector)])
self.jsonrpc.setText.return_value = False
texts = ["abc", "123", "()#*$&"]
for text in texts:
self.assertFalse(self.obj.set_text(text))
self.assertEqual(self.jsonrpc.setText.call_args_list,
[call(self.obj.selector, t) for t in texts])
def test_click(self):
self.jsonrpc.click.return_value = False
corners = ["tl", "topleft", "br", "bottomright"]
for c in corners:
self.assertFalse(self.obj.click(c))
self.assertEqual(self.jsonrpc.click.call_args_list,
[call(self.obj.selector, c) for c in corners])
self.jsonrpc.click = MagicMock()
self.jsonrpc.click.return_value = True
corners = ["tl", "topleft", "br", "bottomright"]
for c in corners:
self.assertTrue(getattr(self.obj.click, c)())
self.assertEqual(self.jsonrpc.click.call_args_list,
[call(self.obj.selector, c) for c in corners])
self.jsonrpc.click = MagicMock()
self.jsonrpc.click.return_value = True
self.assertTrue(self.obj.click())
self.jsonrpc.click.assert_called_once_with(self.obj.selector)
def test_click_wait(self):
self.jsonrpc.clickAndWaitForNewWindow.return_value = True
self.assertTrue(self.obj.click.wait(timeout=321))
self.jsonrpc.clickAndWaitForNewWindow.assert_called_once_with(self.obj.selector, 321)
def test_long_click(self):
self.jsonrpc.longClick.return_value = False
corners = ["tl", "topleft", "br", "bottomright"]
for c in corners:
self.assertFalse(self.obj.long_click(c))
self.assertEqual(self.jsonrpc.longClick.call_args_list,
[call(self.obj.selector, c) for c in corners])
self.jsonrpc.longClick = MagicMock()
self.jsonrpc.longClick.return_value = True
corners = ["tl", "topleft", "br", "bottomright"]
for c in corners:
self.assertTrue(getattr(self.obj.long_click, c)())
self.assertEqual(self.jsonrpc.longClick.call_args_list,
[call(self.obj.selector, c) for c in corners])
self.jsonrpc.longClick = MagicMock()
self.jsonrpc.longClick.return_value = True
self.assertTrue(self.obj.long_click())
self.jsonrpc.longClick.assert_called_once_with(self.obj.selector)
def test_long_click_using_swipe(self):
self.device.long_click.return_value = False
self.jsonrpc.objInfo.return_value = {
'longClickable': False,
'visibleBounds': {
'top': 0,
'bottom': 60,
'left': 0,
'right': 60
}
}
corners = ["tl", "topleft", "br", "bottomright"]
for c in corners:
self.assertFalse(self.obj.long_click(c))
self.assertEqual(self.device.long_click.call_args_list,
[call(10, 10), call(10, 10), call(50, 50), call(50, 50)])
self.device.long_click = MagicMock()
self.device.long_click.return_value = True
corners = ["tl", "topleft", "br", "bottomright"]
for c in corners:
self.assertTrue(getattr(self.obj.long_click, c)())
self.assertEqual(self.device.long_click.call_args_list,
[call(10, 10), call(10, 10), call(50, 50), call(50, 50)])
self.device.long_click = MagicMock()
self.device.long_click.return_value = True
self.assertTrue(self.obj.long_click())
self.device.long_click.assert_called_once_with(30, 30)
def test_drag_to(self):
self.jsonrpc.dragTo.return_value = False
self.assertFalse(self.obj.drag.to(10, 20, steps=10))
self.jsonrpc.dragTo.return_value = True
self.assertTrue(self.obj.drag.to(x=10, y=20, steps=20))
sel = {"text": "text..."}
self.assertTrue(self.obj.drag.to(steps=30, **sel))
self.assertEqual(self.jsonrpc.dragTo.call_args_list,
[call(self.obj.selector, 10, 20, 10),
call(self.obj.selector, 10, 20, 20),
call(self.obj.selector, Selector(**sel), 30)])
def test_gesture(self):
self.jsonrpc.gesture.return_value = True
self.assertTrue(self.obj.gesture((1, 1), (2, 2), (3, 3), (4, 4), 100))
self.assertTrue(self.obj.gesture(4, 3).to(2, 1, 20))
self.assertEqual(self.jsonrpc.gesture.call_args_list,
[call(self.obj.selector, {'x':1, 'y': 1}, {'x':2, 'y':2}, {'x':3, 'y':3}, {'x':4, 'y':4}, 100), call(self.obj.selector, 4, 3, 2, 1, 20)])
def test_pinch(self):
self.jsonrpc.pinchIn.return_value = True
self.assertTrue(self.obj.pinch.In(percent=90, steps=30))
self.assertTrue(self.obj.pinch("in", 80, 40))
self.assertTrue(self.obj.pinch("In", 70, 50))
self.assertEqual(self.jsonrpc.pinchIn.call_args_list,
[call(self.obj.selector, 90, 30), call(self.obj.selector, 80, 40), call(self.obj.selector, 70, 50)])
self.jsonrpc.pinchOut.return_value = True
self.assertTrue(self.obj.pinch.Out(percent=90, steps=30))
self.assertTrue(self.obj.pinch("out", 80, 40))
self.assertTrue(self.obj.pinch("Out", 70, 50))
self.assertEqual(self.jsonrpc.pinchIn.call_args_list,
[call(self.obj.selector, 90, 30), call(self.obj.selector, 80, 40), call(self.obj.selector, 70, 50)])
def test_swipe(self):
self.jsonrpc.swipe.return_value = True
dirs = ["up", "down", "right", "left"]
for d in dirs:
self.assertTrue(self.obj.swipe(d, 30))
self.assertEqual(self.jsonrpc.swipe.call_args_list,
[call(self.obj.selector, d, 30) for d in dirs])
self.jsonrpc.swipe = MagicMock()
self.jsonrpc.swipe.return_value = True
dirs = ["up", "down", "right", "left"]
for d in dirs:
self.assertTrue(getattr(self.obj.swipe, d)(steps=30))
self.assertEqual(self.jsonrpc.swipe.call_args_list,
[call(self.obj.selector, d, 30) for d in dirs])
def test_fling(self):
self.jsonrpc.flingForward.return_value = True
self.assertTrue(self.obj.fling.horiz.forward())
self.assertTrue(self.obj.fling.horizentally.forward())
self.assertTrue(self.obj.fling.vert.forward())
self.assertTrue(self.obj.fling())
self.assertEqual(self.jsonrpc.flingForward.call_args_list,
[call(self.obj.selector, False), call(self.obj.selector, False), call(self.obj.selector, True), call(self.obj.selector, True)])
self.jsonrpc.flingBackward.return_value = True
self.assertTrue(self.obj.fling.horiz.backward())
self.assertTrue(self.obj.fling.horizentally.backward())
self.assertTrue(self.obj.fling.vert.backward())
self.assertTrue(self.obj.fling.vertically.backward())
self.assertEqual(self.jsonrpc.flingBackward.call_args_list,
[call(self.obj.selector, False), call(self.obj.selector, False), call(self.obj.selector, True), call(self.obj.selector, True)])
max_swipes = 1000
self.jsonrpc.flingToBeginning.return_value = True
self.assertTrue(self.obj.fling.horiz.toBeginning())
self.assertTrue(self.obj.fling.horizentally.toBeginning())
self.assertTrue(self.obj.fling.vert.toBeginning())
self.assertTrue(self.obj.fling.vertically.toBeginning(max_swipes=100))
self.assertEqual(self.jsonrpc.flingToBeginning.call_args_list,
[call(self.obj.selector, False, max_swipes), call(self.obj.selector, False, max_swipes), call(self.obj.selector, True, max_swipes), call(self.obj.selector, True, 100)])
self.jsonrpc.flingToEnd.return_value = True
self.assertTrue(self.obj.fling.horiz.toEnd())
self.assertTrue(self.obj.fling.horizentally.toEnd())
self.assertTrue(self.obj.fling.vert.toEnd())
self.assertTrue(self.obj.fling.vertically.toEnd(max_swipes=100))
self.assertEqual(self.jsonrpc.flingToEnd.call_args_list,
[call(self.obj.selector, False, max_swipes), call(self.obj.selector, False, max_swipes), call(self.obj.selector, True, max_swipes), call(self.obj.selector, True, 100)])
def test_scroll(self):
steps = 100
max_swipes = 1000
self.jsonrpc.scrollForward.return_value = True
self.assertTrue(self.obj.scroll.horiz.forward())
self.assertTrue(self.obj.scroll.horizentally.forward())
self.assertTrue(self.obj.scroll.vert.forward())
self.assertTrue(self.obj.scroll(steps=20))
self.assertEqual(self.jsonrpc.scrollForward.call_args_list,
[call(self.obj.selector, False, steps), call(self.obj.selector, False, steps), call(self.obj.selector, True, steps), call(self.obj.selector, True, 20)])
self.jsonrpc.scrollBackward.return_value = True
self.assertTrue(self.obj.scroll.horiz.backward())
self.assertTrue(self.obj.scroll.horizentally.backward())
self.assertTrue(self.obj.scroll.vert.backward())
self.assertTrue(self.obj.scroll.vertically.backward(steps=20))
self.assertEqual(self.jsonrpc.scrollBackward.call_args_list,
[call(self.obj.selector, False, steps), call(self.obj.selector, False, steps), call(self.obj.selector, True, steps), call(self.obj.selector, True, 20)])
self.jsonrpc.scrollToBeginning.return_value = True
self.assertTrue(self.obj.scroll.horiz.toBeginning())
self.assertTrue(self.obj.scroll.horizentally.toBeginning())
self.assertTrue(self.obj.scroll.vert.toBeginning())
self.assertTrue(self.obj.scroll.vertically.toBeginning(steps=20, max_swipes=100))
self.assertEqual(self.jsonrpc.scrollToBeginning.call_args_list,
[call(self.obj.selector, False, max_swipes, steps), call(self.obj.selector, False, max_swipes, steps), call(self.obj.selector, True, max_swipes, steps), call(self.obj.selector, True, 100, 20)])
self.jsonrpc.scrollToEnd.return_value = True
self.assertTrue(self.obj.scroll.horiz.toEnd())
self.assertTrue(self.obj.scroll.horizentally.toEnd())
self.assertTrue(self.obj.scroll.vert.toEnd())
self.assertTrue(self.obj.scroll.vertically.toEnd(steps=20, max_swipes=100))
self.assertEqual(self.jsonrpc.scrollToEnd.call_args_list,
[call(self.obj.selector, False, max_swipes, steps), call(self.obj.selector, False, max_swipes, steps), call(self.obj.selector, True, max_swipes, steps), call(self.obj.selector, True, 100, 20)])
info = {"text": "..."}
self.jsonrpc.scrollTo.return_value = True
self.assertTrue(self.obj.scroll.horiz.to(**info))
self.assertTrue(self.obj.scroll.horizentally.to(**info))
self.assertTrue(self.obj.scroll.vert.to(**info))
self.assertTrue(self.obj.scroll.vertically.to(**info))
self.assertEqual(self.jsonrpc.scrollTo.call_args_list,
[call(self.obj.selector, Selector(**info), False), call(self.obj.selector, Selector(**info), False), call(self.obj.selector, Selector(**info), True), call(self.obj.selector, Selector(**info), True)])
def test_wait(self):
timeout = 3000
self.jsonrpc_wrap.return_value.waitUntilGone.return_value = True
self.assertTrue(self.obj.wait.gone())
self.jsonrpc_wrap.return_value.waitUntilGone.assert_called_once_with(self.obj.selector, timeout)
self.jsonrpc_wrap.return_value.waitForExists.return_value = True
self.assertTrue(self.obj.wait.exists(timeout=10))
self.jsonrpc_wrap.return_value.waitForExists.assert_called_once_with(self.obj.selector, 10)
def test_child_by_text(self):
self.jsonrpc.childByText.return_value = "myname"
kwargs = {"className": "android", "text": "patern match text"}
generic_obj = self.obj.child_by_text("child text", **kwargs)
self.jsonrpc.childByText.assert_called_once_with(Selector(**self.kwargs), Selector(**kwargs), "child text")
self.assertEqual("myname", generic_obj.selector)
def test_child_by_text_allow_scroll_search(self):
self.jsonrpc.childByText.return_value = "myname"
kwargs = {"className": "android", "text": "patern match text"}
generic_obj = self.obj.child_by_text("child text", allow_scroll_search=False, **kwargs)
self.jsonrpc.childByText.assert_called_once_with(
Selector(**self.kwargs), Selector(**kwargs), "child text", False)
self.assertEqual("myname", generic_obj.selector)
def test_child_by_description(self):
self.jsonrpc.childByDescription.return_value = "myname"
kwargs = {"className": "android", "text": "patern match text"}
generic_obj = self.obj.child_by_description("child text", **kwargs)
self.jsonrpc.childByDescription.assert_called_once_with(
Selector(**self.kwargs), Selector(**kwargs), "child text")
self.assertEqual("myname", generic_obj.selector)
def test_child_by_description_allow_scroll_search(self):
self.jsonrpc.childByDescription.return_value = "myname"
kwargs = {"className": "android", "text": "patern match text"}
generic_obj = self.obj.child_by_description("child text", allow_scroll_search=False, **kwargs)
self.jsonrpc.childByDescription.assert_called_once_with(
Selector(**self.kwargs), Selector(**kwargs), "child text", False)
self.assertEqual("myname", generic_obj.selector)
def test_child_by_instance(self):
self.jsonrpc.childByInstance.return_value = "myname"
kwargs = {"className": "android", "text": "patern match text"}
generic_obj = self.obj.child_by_instance(1234, **kwargs)
self.jsonrpc.childByInstance.assert_called_once_with(Selector(**self.kwargs), Selector(**kwargs), 1234)
self.assertEqual("myname", generic_obj.selector)
def test_count(self):
self.jsonrpc.count.return_value = 10
self.assertEqual(self.obj.count, 10)
self.jsonrpc.count.assert_called_once_with(Selector(**self.kwargs))
def test_len(self):
self.jsonrpc.count.return_value = 10
self.assertEqual(len(self.obj), 10)
def test_instance_list(self):
count = 10
self.jsonrpc.count.return_value = count
for i in range(count):
self.assertEqual(self.obj[i].selector["instance"], i)
with self.assertRaises(IndexError):
self.obj[count]
self.jsonrpc.count.return_value = 1
self.assertEqual(self.obj[0], self.obj)
def test_instance_iter(self):
count = 10
self.jsonrpc.count.return_value = count
for index, inst in enumerate(self.obj):
self.assertEqual(inst.selector["instance"], index)
def test_left(self):
self.jsonrpc.objInfo.side_effect = [
{"bounds": {'top': 200, 'bottom': 250, 'left': 100, 'right': 150}},
{"bounds": {'top': 250, 'bottom': 300, 'left': 150, 'right': 200}},
{"bounds": {'top': 200, 'bottom': 300, 'left': 150, 'right': 200}},
{"bounds": {'top': 200, 'bottom': 300, 'left': 50, 'right': 100}}
]
self.jsonrpc.count.return_value = 3
self.assertEqual(self.obj.left().selector["instance"], 2)
def test_right(self):
self.jsonrpc.objInfo.side_effect = [
{"bounds": {'top': 200, 'bottom': 250, 'left': 100, 'right': 150}},
{"bounds": {'top': 250, 'bottom': 300, 'left': 150, 'right': 200}},
{"bounds": {'top': 200, 'bottom': 300, 'left': 50, 'right': 100}},
{"bounds": {'top': 200, 'bottom': 300, 'left': 150, 'right': 200}}
]
self.jsonrpc.count.return_value = 3
self.assertEqual(self.obj.right().selector["instance"], 2)
def test_up(self):
self.jsonrpc.objInfo.side_effect = [
{"bounds": {'top': 200, 'bottom': 250, 'left': 100, 'right': 150}},
{"bounds": {'top': 250, 'bottom': 300, 'left': 100, 'right': 150}},
{"bounds": {'top': 150, 'bottom': 200, 'left': 150, 'right': 200}},
{"bounds": {'top': 150, 'bottom': 200, 'left': 100, 'right': 200}}
]
self.jsonrpc.count.return_value = 3
self.assertEqual(self.obj.up().selector["instance"], 2)
def test_down(self):
self.jsonrpc.objInfo.side_effect = [
{"bounds": {'top': 200, 'bottom': 250, 'left': 100, 'right': 150}},
{"bounds": {'top': 250, 'bottom': 300, 'left': 150, 'right': 200}},
{"bounds": {'top': 150, 'bottom': 200, 'left': 150, 'right': 200}},
{"bounds": {'top': 250, 'bottom': 300, 'left': 100, 'right': 150}}
]
self.jsonrpc.count.return_value = 3
self.assertEqual(self.obj.down().selector["instance"], 2)
def test_multiple_matched_down(self):
self.jsonrpc.objInfo.side_effect = [
{"bounds": {'top': 200, 'bottom': 250, 'left': 100, 'right': 150}},
{"bounds": {'top': 250, 'bottom': 300, 'left': 150, 'right': 200}},
{"bounds": {'top': 150, 'bottom': 200, 'left': 150, 'right': 200}},
{"bounds": {'top': 275, 'bottom': 300, 'left': 100, 'right': 150}},
{"bounds": {'top': 300, 'bottom': 350, 'left': 100, 'right': 150}},
{"bounds": {'top': 250, 'bottom': 275, 'left': 100, 'right': 150}}
]
self.jsonrpc.count.return_value = 5
self.assertEqual(self.obj.down().selector["instance"], 4)
class TestAutomatorDeviceNamedUiObject(unittest.TestCase):
def setUp(self):
self.device = MagicMock()
self.jsonrpc = self.device.server.jsonrpc = MagicMock()
self.name = "my-name"
self.obj = AutomatorDeviceNamedUiObject(self.device, self.name)
def test_child(self):
self.jsonrpc.getChild.return_value = "another-name"
kwargs = {"className": "android", "text": "patern match text"}
generic_obj = self.obj.child(**kwargs)
self.jsonrpc.getChild.assert_called_once_with(self.name, Selector(**kwargs))
self.assertEqual(generic_obj.selector, self.jsonrpc.getChild.return_value)
def test_sibling(self):
self.jsonrpc.getFromParent.return_value = "another-name"
kwargs = {"className": "android", "text": "patern match text"}
generic_obj = self.obj.sibling(**kwargs)
self.jsonrpc.getFromParent.assert_called_once_with(self.name, Selector(**kwargs))
self.assertEqual(generic_obj.selector, self.jsonrpc.getFromParent.return_value)
| |
# -*- coding: utf-8 -*-
"""
Presence analyzer unit tests.
"""
from __future__ import unicode_literals
import datetime
import json
import os.path
import unittest
from presence_analyzer import main, views, utils
TEST_DATA_CSV = os.path.join(
os.path.dirname(__file__), '..', '..', 'runtime', 'data', 'test_data.csv',
)
TEST_DATA_XML = os.path.join(
os.path.dirname(__file__), '..', '..', 'runtime', 'data', 'users_test.xml',
)
TEST_CACHE_DATA_CSV = os.path.join(
os.path.dirname(__file__), '..', '..', 'runtime', 'data',
'test_cache_data.csv',
)
# pylint: disable=E1103
class PresenceAnalyzerViewsTestCase(unittest.TestCase):
"""
Views tests.
"""
def setUp(self):
"""
Before each test, set up a environment.
"""
main.app.config.update({
'DATA_CSV': TEST_DATA_CSV,
'DATA_XML': TEST_DATA_XML,
'DATA_CACHE': TEST_CACHE_DATA_CSV
})
self.client = main.app.test_client()
def tearDown(self):
"""
Get rid of unused objects after each test.
"""
pass
def test_mainpage(self):
"""
Test main page redirect.
"""
resp = self.client.get('/')
self.assertEqual(resp.status_code, 302)
assert resp.headers['Location'].endswith('/presence_weekday')
def test_api_users(self):
"""
Test users listing.
"""
resp = self.client.get('/api/v1/users')
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.content_type, 'application/json')
test_data = json.loads(resp.data)
self.assertEqual(len(test_data), 2)
self.assertDictEqual(test_data[0], {'user_id': 10, 'name': 'User 10'})
def test_api_users_v2(self):
"""
Test users listing v2.
"""
resp = self.client.get('/api/v2/users')
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.content_type, 'application/json')
test_data = json.loads(resp.data)
self.assertEqual(len(test_data), 2)
self.assertEqual(
test_data[0], [
141, {
'image':
'https://intranet.stxnext.pl/api/images/users/141',
'name':
'Adam P.'
}
]
)
def test_mean_time_weekday_view(self):
"""
Checking inversed presence time of given user grouped by weekday.
"""
resp = self.client.get('/api/v1/mean_time_weekday/10')
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.content_type, 'application/json')
self.assertEqual(json.loads(resp.data), [
['Mon', 0],
['Tue', 30047.0],
['Wed', 24465.0],
['Th', 23705.0],
['Fri', 0],
['Sat', 0],
['Sun', 0],
])
def test_presence_weekday_view(self):
"""
Checking inversed totla presence time of given user grouped by
weekaday.
"""
resp = self.client.get('api/v1/presence_weekday/10')
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.content_type, 'application/json')
self.assertEqual(json.loads(resp.data), [
['Weekday', 'Presence (s)'],
['Mon', 0],
['Tue', 30047.0],
['Wed', 24465.0],
['Th', 23705.0],
['Fri', 0],
['Sat', 0],
['Sun', 0],
])
def test_presence_start_end_view(self):
"""
Testing returned avg starts, ends of given user grouped by weekeday.
"""
resp = self.client.get('/api/v1/presence_start_end/10')
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.content_type, 'application/json')
self.assertEqual(json.loads(resp.data), [
['Mon', 0, 0],
['Tue', 34745.0, 64792.0],
['Wed', 33592.0, 58057.0],
['Th', 38926.0, 62631.0],
['Fri', 0, 0],
['Sat', 0, 0],
['Sun', 0, 0],
])
def test_templates_render(self):
"""
Testing returned templates.
"""
resp = self.client.get('/presence_weekday')
self.assertIn('Presence by weekday', resp.data)
self.assertEqual(resp.status_code, 200)
resp = self.client.get('/mean_time_weekdays')
self.assertIn('Presence mean time by weekday', resp.data)
self.assertEqual(resp.status_code, 200)
resp = self.client.get('/presence_start_end')
self.assertIn('Presence start end', resp.data)
self.assertEqual(resp.status_code, 200)
resp = self.client.get('/bad_page_name')
self.assertIn('page not found', resp.data)
self.assertEqual(resp.status_code, 404)
class PresenceAnalyzerUtilsTestCase(unittest.TestCase):
"""
Utility functions tests.
"""
def setUp(self):
"""
Before each test, set up a environment.
"""
main.app.config.update({
'DATA_CSV': TEST_DATA_CSV,
'DATA_XML': TEST_DATA_XML,
'DATA_CACHE': TEST_CACHE_DATA_CSV
})
def tearDown(self):
"""
Get rid of unused objects after each test.
"""
pass
def test_get_data(self):
"""
Test parsing of CSV file.
"""
test_data = utils.get_data()
self.assertIsInstance(test_data, dict)
self.assertItemsEqual(test_data.keys(), [10, 11])
sample_date = datetime.date(2013, 9, 10)
self.assertIn(sample_date, test_data[10])
self.assertItemsEqual(
test_data[10][sample_date].keys(), ['start', 'end']
)
self.assertEqual(test_data[10][sample_date]['start'],
datetime.time(9, 39, 5))
def test_get_xml_data(self):
"""
Test parsing XML file.
"""
test_data = utils.get_xml_data()
self.assertIsInstance(test_data, dict)
self.assertEqual({
'image': 'https://intranet.stxnext.pl/api/images/users/141',
'name': 'Adam P.'
}, test_data[141])
self.assertIn({
'image': 'https://intranet.stxnext.pl/api/images/users/176',
'name': 'Adrian K.'
}, test_data.values())
self.assertItemsEqual(test_data[141].keys(), ['image', 'name'])
def test_group_by_weekday(self):
"""
Test groups presence entries by weeekday.
"""
sample_data = utils.get_data()
result = utils.group_by_weekday(sample_data[10])
self.assertDictEqual({
0: [],
1: [30047],
2: [24465],
3: [23705],
4: [],
5: [],
6: [],
}, result)
self.assertIsInstance(utils.group_by_weekday(sample_data[11]), dict)
self.assertDictEqual({
0: [24123],
1: [16564],
2: [25321],
3: [22969, 22999],
4: [6426],
5: [],
6: [],
}, result)
def test_seconds_since_midnight(self):
"""
Testing results of seconds_since_midnight function.
"""
self.assertEqual(
utils.seconds_since_midnight(datetime.time(00, 00, 30)), 30
)
self.assertEqual(
utils.seconds_since_midnight(datetime.time(00, 10, 30)), 630
)
self.assertEqual(
utils.seconds_since_midnight(datetime.time(10, 10, 30)), 36630
)
def test_interval(self):
"""
Testing calculated time between end and start.
"""
test_data = utils.interval(
datetime.time(01, 00, 00), datetime.time(10, 00, 00)
)
self.assertEqual(test_data, 32400)
test_data = utils.interval(
datetime.time(01, 00, 00), datetime.time(17, 00, 00)
)
self.assertEqual(test_data, 57600)
test_data = utils.interval(
datetime.time(01, 05, 00), datetime.time(11, 10, 15)
)
self.assertEqual(test_data, 36315)
def test_mean(self):
"""
Testing calculated avgerage of list.
"""
self.assertEqual(utils.mean([]), 0)
self.assertEqual(utils.mean([
1,
2,
3,
78,
119,
]), 40.6)
self.assertAlmostEqual(utils.mean([
1.5,
2.2,
78.31054,
19.86465484,
]), 25.4687987)
self.assertAlmostEqual(utils.mean([
74.51,
0.9243,
78.64,
19.545,
]), 43.404825)
self.assertAlmostEqual(utils.mean([
74.53,
53.22,
24.75,
19.5345,
]), 43.00862501)
self.assertEqual(utils.mean([
54.2,
234.4,
59.93,
43.4,
]), 97.9825)
def test_count_avg_group_by_weekday(self):
"""
Testing returned presence starts, ends by weekday.
"""
sample_data = utils.get_data()
test_data = utils.count_avg_group_by_weekday(
sample_data[10],
)
self.assertEqual(
test_data, {
0: {'start': [], 'end': []},
1: {'start': [34745], 'end': [64792]},
2: {'start': [33592], 'end': [58057]},
3: {'start': [38926], 'end': [62631]},
4: {'start': [], 'end': []},
5: {'start': [], 'end': []},
6: {'start': [], 'end': []},
}
)
test_data = utils.count_avg_group_by_weekday(
sample_data[11],
)
self.assertAlmostEqual(
test_data, {
0: {'start': [33134], 'end': [57257]},
1: {'start': [33590], 'end': [50154]},
2: {'start': [33206], 'end': [58527]},
3: {'start': [37116, 34088], 'end': [60085, 57087]},
4: {'start': [47816], 'end': [54242]},
5: {'start': [], 'end': []},
6: {'start': [], 'end': []},
}
)
def test_cache(self):
"""
Cache test.
"""
first_data = utils.get_data()
main.app.config.update({'DATA_CSV': TEST_CACHE_DATA_CSV})
second_data = utils.get_data()
self.assertDictEqual(first_data, second_data)
utils.CACHE = {}
second_data = utils.get_data()
self.assertNotEqual(first_data, second_data)
utils.CACHE = {}
def suite():
"""
Default test suite.
"""
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(PresenceAnalyzerViewsTestCase))
suite.addTest(unittest.makeSuite(PresenceAnalyzerUtilsTestCase))
return suite
if __name__ == '__main__':
unittest.main()
| |
# Copyright 2015 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""User-friendly container for Google Cloud Bigtable Row."""
import struct
import six
from google.cloud._helpers import _datetime_from_microseconds
from google.cloud._helpers import _microseconds_from_datetime
from google.cloud._helpers import _to_bytes
from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2
_PACK_I64 = struct.Struct(">q").pack
MAX_MUTATIONS = 100000
"""The maximum number of mutations that a row can accumulate."""
class Row(object):
"""Base representation of a Google Cloud Bigtable Row.
This class has three subclasses corresponding to the three
RPC methods for sending row mutations:
* :class:`DirectRow` for ``MutateRow``
* :class:`ConditionalRow` for ``CheckAndMutateRow``
* :class:`AppendRow` for ``ReadModifyWriteRow``
:type row_key: bytes
:param row_key: The key for the current row.
:type table: :class:`Table <google.cloud.bigtable.table.Table>`
:param table: (Optional) The table that owns the row.
"""
def __init__(self, row_key, table=None):
self._row_key = _to_bytes(row_key)
self._table = table
@property
def row_key(self):
"""Row key.
For example:
.. literalinclude:: snippets_table.py
:start-after: [START bigtable_row_row_key]
:end-before: [END bigtable_row_row_key]
:rtype: bytes
:returns: The key for the current row.
"""
return self._row_key
@property
def table(self):
"""Row table.
For example:
.. literalinclude:: snippets_table.py
:start-after: [START bigtable_row_table]
:end-before: [END bigtable_row_table]
:rtype: table: :class:`Table <google.cloud.bigtable.table.Table>`
:returns: table: The table that owns the row.
"""
return self._table
class _SetDeleteRow(Row):
"""Row helper for setting or deleting cell values.
Implements helper methods to add mutations to set or delete cell contents:
* :meth:`set_cell`
* :meth:`delete`
* :meth:`delete_cell`
* :meth:`delete_cells`
:type row_key: bytes
:param row_key: The key for the current row.
:type table: :class:`Table <google.cloud.bigtable.table.Table>`
:param table: The table that owns the row.
"""
ALL_COLUMNS = object()
"""Sentinel value used to indicate all columns in a column family."""
def _get_mutations(self, state=None):
"""Gets the list of mutations for a given state.
This method intended to be implemented by subclasses.
``state`` may not need to be used by all subclasses.
:type state: bool
:param state: The state that the mutation should be
applied in.
:raises: :class:`NotImplementedError <exceptions.NotImplementedError>`
always.
"""
raise NotImplementedError
def _set_cell(self, column_family_id, column, value, timestamp=None, state=None):
"""Helper for :meth:`set_cell`
Adds a mutation to set the value in a specific cell.
``state`` is unused by :class:`DirectRow` but is used by
subclasses.
:type column_family_id: str
:param column_family_id: The column family that contains the column.
Must be of the form
``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``.
:type column: bytes
:param column: The column within the column family where the cell
is located.
:type value: bytes or :class:`int`
:param value: The value to set in the cell. If an integer is used,
will be interpreted as a 64-bit big-endian signed
integer (8 bytes).
:type timestamp: :class:`datetime.datetime`
:param timestamp: (Optional) The timestamp of the operation.
:type state: bool
:param state: (Optional) The state that is passed along to
:meth:`_get_mutations`.
"""
column = _to_bytes(column)
if isinstance(value, six.integer_types):
value = _PACK_I64(value)
value = _to_bytes(value)
if timestamp is None:
# Use -1 for current Bigtable server time.
timestamp_micros = -1
else:
timestamp_micros = _microseconds_from_datetime(timestamp)
# Truncate to millisecond granularity.
timestamp_micros -= timestamp_micros % 1000
mutation_val = data_v2_pb2.Mutation.SetCell(
family_name=column_family_id,
column_qualifier=column,
timestamp_micros=timestamp_micros,
value=value,
)
mutation_pb = data_v2_pb2.Mutation(set_cell=mutation_val)
self._get_mutations(state).append(mutation_pb)
def _delete(self, state=None):
"""Helper for :meth:`delete`
Adds a delete mutation (for the entire row) to the accumulated
mutations.
``state`` is unused by :class:`DirectRow` but is used by
subclasses.
:type state: bool
:param state: (Optional) The state that is passed along to
:meth:`_get_mutations`.
"""
mutation_val = data_v2_pb2.Mutation.DeleteFromRow()
mutation_pb = data_v2_pb2.Mutation(delete_from_row=mutation_val)
self._get_mutations(state).append(mutation_pb)
def _delete_cells(self, column_family_id, columns, time_range=None, state=None):
"""Helper for :meth:`delete_cell` and :meth:`delete_cells`.
``state`` is unused by :class:`DirectRow` but is used by
subclasses.
:type column_family_id: str
:param column_family_id: The column family that contains the column
or columns with cells being deleted. Must be
of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``.
:type columns: :class:`list` of :class:`str` /
:func:`unicode <unicode>`, or :class:`object`
:param columns: The columns within the column family that will have
cells deleted. If :attr:`ALL_COLUMNS` is used then
the entire column family will be deleted from the row.
:type time_range: :class:`TimestampRange`
:param time_range: (Optional) The range of time within which cells
should be deleted.
:type state: bool
:param state: (Optional) The state that is passed along to
:meth:`_get_mutations`.
"""
mutations_list = self._get_mutations(state)
if columns is self.ALL_COLUMNS:
mutation_val = data_v2_pb2.Mutation.DeleteFromFamily(
family_name=column_family_id
)
mutation_pb = data_v2_pb2.Mutation(delete_from_family=mutation_val)
mutations_list.append(mutation_pb)
else:
delete_kwargs = {}
if time_range is not None:
delete_kwargs["time_range"] = time_range.to_pb()
to_append = []
for column in columns:
column = _to_bytes(column)
# time_range will never change if present, but the rest of
# delete_kwargs will
delete_kwargs.update(
family_name=column_family_id, column_qualifier=column
)
mutation_val = data_v2_pb2.Mutation.DeleteFromColumn(**delete_kwargs)
mutation_pb = data_v2_pb2.Mutation(delete_from_column=mutation_val)
to_append.append(mutation_pb)
# We don't add the mutations until all columns have been
# processed without error.
mutations_list.extend(to_append)
class DirectRow(_SetDeleteRow):
"""Google Cloud Bigtable Row for sending "direct" mutations.
These mutations directly set or delete cell contents:
* :meth:`set_cell`
* :meth:`delete`
* :meth:`delete_cell`
* :meth:`delete_cells`
These methods can be used directly::
>>> row = table.row(b'row-key1')
>>> row.set_cell(u'fam', b'col1', b'cell-val')
>>> row.delete_cell(u'fam', b'col2')
.. note::
A :class:`DirectRow` accumulates mutations locally via the
:meth:`set_cell`, :meth:`delete`, :meth:`delete_cell` and
:meth:`delete_cells` methods. To actually send these mutations to the
Google Cloud Bigtable API, you must call :meth:`commit`.
:type row_key: bytes
:param row_key: The key for the current row.
:type table: :class:`Table <google.cloud.bigtable.table.Table>`
:param table: (Optional) The table that owns the row. This is
used for the :meth: `commit` only. Alternatively,
DirectRows can be persisted via
:meth:`~google.cloud.bigtable.table.Table.mutate_rows`.
"""
def __init__(self, row_key, table=None):
super(DirectRow, self).__init__(row_key, table)
self._pb_mutations = []
def _get_mutations(self, state=None): # pylint: disable=unused-argument
"""Gets the list of mutations for a given state.
``state`` is unused by :class:`DirectRow` but is used by
subclasses.
:type state: bool
:param state: The state that the mutation should be
applied in.
:rtype: list
:returns: The list to add new mutations to (for the current state).
"""
return self._pb_mutations
def get_mutations_size(self):
""" Gets the total mutations size for current row
For example:
.. literalinclude:: snippets_table.py
:start-after: [START bigtable_row_get_mutations_size]
:end-before: [END bigtable_row_get_mutations_size]
"""
mutation_size = 0
for mutation in self._get_mutations():
mutation_size += mutation.ByteSize()
return mutation_size
def set_cell(self, column_family_id, column, value, timestamp=None):
"""Sets a value in this row.
The cell is determined by the ``row_key`` of this :class:`DirectRow`
and the ``column``. The ``column`` must be in an existing
:class:`.ColumnFamily` (as determined by ``column_family_id``).
.. note::
This method adds a mutation to the accumulated mutations on this
row, but does not make an API request. To actually
send an API request (with the mutations) to the Google Cloud
Bigtable API, call :meth:`commit`.
For example:
.. literalinclude:: snippets_table.py
:start-after: [START bigtable_row_set_cell]
:end-before: [END bigtable_row_set_cell]
:type column_family_id: str
:param column_family_id: The column family that contains the column.
Must be of the form
``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``.
:type column: bytes
:param column: The column within the column family where the cell
is located.
:type value: bytes or :class:`int`
:param value: The value to set in the cell. If an integer is used,
will be interpreted as a 64-bit big-endian signed
integer (8 bytes).
:type timestamp: :class:`datetime.datetime`
:param timestamp: (Optional) The timestamp of the operation.
"""
self._set_cell(column_family_id, column, value, timestamp=timestamp, state=None)
def delete(self):
"""Deletes this row from the table.
.. note::
This method adds a mutation to the accumulated mutations on this
row, but does not make an API request. To actually
send an API request (with the mutations) to the Google Cloud
Bigtable API, call :meth:`commit`.
For example:
.. literalinclude:: snippets_table.py
:start-after: [START bigtable_row_delete]
:end-before: [END bigtable_row_delete]
"""
self._delete(state=None)
def delete_cell(self, column_family_id, column, time_range=None):
"""Deletes cell in this row.
.. note::
This method adds a mutation to the accumulated mutations on this
row, but does not make an API request. To actually
send an API request (with the mutations) to the Google Cloud
Bigtable API, call :meth:`commit`.
For example:
.. literalinclude:: snippets_table.py
:start-after: [START bigtable_row_delete_cell]
:end-before: [END bigtable_row_delete_cell]
:type column_family_id: str
:param column_family_id: The column family that contains the column
or columns with cells being deleted. Must be
of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``.
:type column: bytes
:param column: The column within the column family that will have a
cell deleted.
:type time_range: :class:`TimestampRange`
:param time_range: (Optional) The range of time within which cells
should be deleted.
"""
self._delete_cells(
column_family_id, [column], time_range=time_range, state=None
)
def delete_cells(self, column_family_id, columns, time_range=None):
"""Deletes cells in this row.
.. note::
This method adds a mutation to the accumulated mutations on this
row, but does not make an API request. To actually
send an API request (with the mutations) to the Google Cloud
Bigtable API, call :meth:`commit`.
For example:
.. literalinclude:: snippets_table.py
:start-after: [START bigtable_row_delete_cells]
:end-before: [END bigtable_row_delete_cells]
:type column_family_id: str
:param column_family_id: The column family that contains the column
or columns with cells being deleted. Must be
of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``.
:type columns: :class:`list` of :class:`str` /
:func:`unicode <unicode>`, or :class:`object`
:param columns: The columns within the column family that will have
cells deleted. If :attr:`ALL_COLUMNS` is used then
the entire column family will be deleted from the row.
:type time_range: :class:`TimestampRange`
:param time_range: (Optional) The range of time within which cells
should be deleted.
"""
self._delete_cells(column_family_id, columns, time_range=time_range, state=None)
def commit(self):
"""Makes a ``MutateRow`` API request.
If no mutations have been created in the row, no request is made.
Mutations are applied atomically and in order, meaning that earlier
mutations can be masked / negated by later ones. Cells already present
in the row are left unchanged unless explicitly changed by a mutation.
After committing the accumulated mutations, resets the local
mutations to an empty list.
For example:
.. literalinclude:: snippets_table.py
:start-after: [START bigtable_row_commit]
:end-before: [END bigtable_row_commit]
:raises: :exc:`~.table.TooManyMutationsError` if the number of
mutations is greater than 100,000.
"""
self._table.mutate_rows([self])
self.clear()
def clear(self):
"""Removes all currently accumulated mutations on the current row.
For example:
.. literalinclude:: snippets_table.py
:start-after: [START bigtable_row_clear]
:end-before: [END bigtable_row_clear]
"""
del self._pb_mutations[:]
class ConditionalRow(_SetDeleteRow):
"""Google Cloud Bigtable Row for sending mutations conditionally.
Each mutation has an associated state: :data:`True` or :data:`False`.
When :meth:`commit`-ed, the mutations for the :data:`True`
state will be applied if the filter matches any cells in
the row, otherwise the :data:`False` state will be applied.
A :class:`ConditionalRow` accumulates mutations in the same way a
:class:`DirectRow` does:
* :meth:`set_cell`
* :meth:`delete`
* :meth:`delete_cell`
* :meth:`delete_cells`
with the only change the extra ``state`` parameter::
>>> row_cond = table.row(b'row-key2', filter_=row_filter)
>>> row_cond.set_cell(u'fam', b'col', b'cell-val', state=True)
>>> row_cond.delete_cell(u'fam', b'col', state=False)
.. note::
As with :class:`DirectRow`, to actually send these mutations to the
Google Cloud Bigtable API, you must call :meth:`commit`.
:type row_key: bytes
:param row_key: The key for the current row.
:type table: :class:`Table <google.cloud.bigtable.table.Table>`
:param table: The table that owns the row.
:type filter_: :class:`.RowFilter`
:param filter_: Filter to be used for conditional mutations.
"""
def __init__(self, row_key, table, filter_):
super(ConditionalRow, self).__init__(row_key, table)
self._filter = filter_
self._true_pb_mutations = []
self._false_pb_mutations = []
def _get_mutations(self, state=None):
"""Gets the list of mutations for a given state.
Over-ridden so that the state can be used in:
* :meth:`set_cell`
* :meth:`delete`
* :meth:`delete_cell`
* :meth:`delete_cells`
:type state: bool
:param state: The state that the mutation should be
applied in.
:rtype: list
:returns: The list to add new mutations to (for the current state).
"""
if state:
return self._true_pb_mutations
else:
return self._false_pb_mutations
def commit(self):
"""Makes a ``CheckAndMutateRow`` API request.
If no mutations have been created in the row, no request is made.
The mutations will be applied conditionally, based on whether the
filter matches any cells in the :class:`ConditionalRow` or not. (Each
method which adds a mutation has a ``state`` parameter for this
purpose.)
Mutations are applied atomically and in order, meaning that earlier
mutations can be masked / negated by later ones. Cells already present
in the row are left unchanged unless explicitly changed by a mutation.
After committing the accumulated mutations, resets the local
mutations.
For example:
.. literalinclude:: snippets_table.py
:start-after: [START bigtable_row_commit]
:end-before: [END bigtable_row_commit]
:rtype: bool
:returns: Flag indicating if the filter was matched (which also
indicates which set of mutations were applied by the server).
:raises: :class:`ValueError <exceptions.ValueError>` if the number of
mutations exceeds the :data:`MAX_MUTATIONS`.
"""
true_mutations = self._get_mutations(state=True)
false_mutations = self._get_mutations(state=False)
num_true_mutations = len(true_mutations)
num_false_mutations = len(false_mutations)
if num_true_mutations == 0 and num_false_mutations == 0:
return
if num_true_mutations > MAX_MUTATIONS or num_false_mutations > MAX_MUTATIONS:
raise ValueError(
"Exceed the maximum allowable mutations (%d). Had %s true "
"mutations and %d false mutations."
% (MAX_MUTATIONS, num_true_mutations, num_false_mutations)
)
data_client = self._table._instance._client.table_data_client
resp = data_client.check_and_mutate_row(
table_name=self._table.name,
row_key=self._row_key,
predicate_filter=self._filter.to_pb(),
app_profile_id=self._table._app_profile_id,
true_mutations=true_mutations,
false_mutations=false_mutations,
)
self.clear()
return resp.predicate_matched
# pylint: disable=arguments-differ
def set_cell(self, column_family_id, column, value, timestamp=None, state=True):
"""Sets a value in this row.
The cell is determined by the ``row_key`` of this
:class:`ConditionalRow` and the ``column``. The ``column`` must be in
an existing :class:`.ColumnFamily` (as determined by
``column_family_id``).
.. note::
This method adds a mutation to the accumulated mutations on this
row, but does not make an API request. To actually
send an API request (with the mutations) to the Google Cloud
Bigtable API, call :meth:`commit`.
For example:
.. literalinclude:: snippets_table.py
:start-after: [START bigtable_row_set_cell]
:end-before: [END bigtable_row_set_cell]
:type column_family_id: str
:param column_family_id: The column family that contains the column.
Must be of the form
``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``.
:type column: bytes
:param column: The column within the column family where the cell
is located.
:type value: bytes or :class:`int`
:param value: The value to set in the cell. If an integer is used,
will be interpreted as a 64-bit big-endian signed
integer (8 bytes).
:type timestamp: :class:`datetime.datetime`
:param timestamp: (Optional) The timestamp of the operation.
:type state: bool
:param state: (Optional) The state that the mutation should be
applied in. Defaults to :data:`True`.
"""
self._set_cell(
column_family_id, column, value, timestamp=timestamp, state=state
)
def delete(self, state=True):
"""Deletes this row from the table.
.. note::
This method adds a mutation to the accumulated mutations on this
row, but does not make an API request. To actually
send an API request (with the mutations) to the Google Cloud
Bigtable API, call :meth:`commit`.
For example:
.. literalinclude:: snippets_table.py
:start-after: [START bigtable_row_delete]
:end-before: [END bigtable_row_delete]
:type state: bool
:param state: (Optional) The state that the mutation should be
applied in. Defaults to :data:`True`.
"""
self._delete(state=state)
def delete_cell(self, column_family_id, column, time_range=None, state=True):
"""Deletes cell in this row.
.. note::
This method adds a mutation to the accumulated mutations on this
row, but does not make an API request. To actually
send an API request (with the mutations) to the Google Cloud
Bigtable API, call :meth:`commit`.
For example:
.. literalinclude:: snippets_table.py
:start-after: [START bigtable_row_delete_cell]
:end-before: [END bigtable_row_delete_cell]
:type column_family_id: str
:param column_family_id: The column family that contains the column
or columns with cells being deleted. Must be
of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``.
:type column: bytes
:param column: The column within the column family that will have a
cell deleted.
:type time_range: :class:`TimestampRange`
:param time_range: (Optional) The range of time within which cells
should be deleted.
:type state: bool
:param state: (Optional) The state that the mutation should be
applied in. Defaults to :data:`True`.
"""
self._delete_cells(
column_family_id, [column], time_range=time_range, state=state
)
def delete_cells(self, column_family_id, columns, time_range=None, state=True):
"""Deletes cells in this row.
.. note::
This method adds a mutation to the accumulated mutations on this
row, but does not make an API request. To actually
send an API request (with the mutations) to the Google Cloud
Bigtable API, call :meth:`commit`.
For example:
.. literalinclude:: snippets_table.py
:start-after: [START bigtable_row_delete_cells]
:end-before: [END bigtable_row_delete_cells]
:type column_family_id: str
:param column_family_id: The column family that contains the column
or columns with cells being deleted. Must be
of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``.
:type columns: :class:`list` of :class:`str` /
:func:`unicode <unicode>`, or :class:`object`
:param columns: The columns within the column family that will have
cells deleted. If :attr:`ALL_COLUMNS` is used then the
entire column family will be deleted from the row.
:type time_range: :class:`TimestampRange`
:param time_range: (Optional) The range of time within which cells
should be deleted.
:type state: bool
:param state: (Optional) The state that the mutation should be
applied in. Defaults to :data:`True`.
"""
self._delete_cells(
column_family_id, columns, time_range=time_range, state=state
)
# pylint: enable=arguments-differ
def clear(self):
"""Removes all currently accumulated mutations on the current row.
For example:
.. literalinclude:: snippets_table.py
:start-after: [START bigtable_row_clear]
:end-before: [END bigtable_row_clear]
"""
del self._true_pb_mutations[:]
del self._false_pb_mutations[:]
class AppendRow(Row):
"""Google Cloud Bigtable Row for sending append mutations.
These mutations are intended to augment the value of an existing cell
and uses the methods:
* :meth:`append_cell_value`
* :meth:`increment_cell_value`
The first works by appending bytes and the second by incrementing an
integer (stored in the cell as 8 bytes). In either case, if the
cell is empty, assumes the default empty value (empty string for
bytes or 0 for integer).
:type row_key: bytes
:param row_key: The key for the current row.
:type table: :class:`Table <google.cloud.bigtable.table.Table>`
:param table: The table that owns the row.
"""
def __init__(self, row_key, table):
super(AppendRow, self).__init__(row_key, table)
self._rule_pb_list = []
def clear(self):
"""Removes all currently accumulated modifications on current row.
For example:
.. literalinclude:: snippets_table.py
:start-after: [START bigtable_row_clear]
:end-before: [END bigtable_row_clear]
"""
del self._rule_pb_list[:]
def append_cell_value(self, column_family_id, column, value):
"""Appends a value to an existing cell.
.. note::
This method adds a read-modify rule protobuf to the accumulated
read-modify rules on this row, but does not make an API
request. To actually send an API request (with the rules) to the
Google Cloud Bigtable API, call :meth:`commit`.
For example:
.. literalinclude:: snippets_table.py
:start-after: [START bigtable_row_append_cell_value]
:end-before: [END bigtable_row_append_cell_value]
:type column_family_id: str
:param column_family_id: The column family that contains the column.
Must be of the form
``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``.
:type column: bytes
:param column: The column within the column family where the cell
is located.
:type value: bytes
:param value: The value to append to the existing value in the cell. If
the targeted cell is unset, it will be treated as
containing the empty string.
"""
column = _to_bytes(column)
value = _to_bytes(value)
rule_pb = data_v2_pb2.ReadModifyWriteRule(
family_name=column_family_id, column_qualifier=column, append_value=value
)
self._rule_pb_list.append(rule_pb)
def increment_cell_value(self, column_family_id, column, int_value):
"""Increments a value in an existing cell.
Assumes the value in the cell is stored as a 64 bit integer
serialized to bytes.
.. note::
This method adds a read-modify rule protobuf to the accumulated
read-modify rules on this row, but does not make an API
request. To actually send an API request (with the rules) to the
Google Cloud Bigtable API, call :meth:`commit`.
For example:
.. literalinclude:: snippets_table.py
:start-after: [START bigtable_row_increment_cell_value]
:end-before: [END bigtable_row_increment_cell_value]
:type column_family_id: str
:param column_family_id: The column family that contains the column.
Must be of the form
``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``.
:type column: bytes
:param column: The column within the column family where the cell
is located.
:type int_value: int
:param int_value: The value to increment the existing value in the cell
by. If the targeted cell is unset, it will be treated
as containing a zero. Otherwise, the targeted cell
must contain an 8-byte value (interpreted as a 64-bit
big-endian signed integer), or the entire request
will fail.
"""
column = _to_bytes(column)
rule_pb = data_v2_pb2.ReadModifyWriteRule(
family_name=column_family_id,
column_qualifier=column,
increment_amount=int_value,
)
self._rule_pb_list.append(rule_pb)
def commit(self):
"""Makes a ``ReadModifyWriteRow`` API request.
This commits modifications made by :meth:`append_cell_value` and
:meth:`increment_cell_value`. If no modifications were made, makes
no API request and just returns ``{}``.
Modifies a row atomically, reading the latest existing
timestamp / value from the specified columns and writing a new value by
appending / incrementing. The new cell created uses either the current
server time or the highest timestamp of a cell in that column (if it
exceeds the server time).
After committing the accumulated mutations, resets the local mutations.
For example:
.. literalinclude:: snippets_table.py
:start-after: [START bigtable_row_commit]
:end-before: [END bigtable_row_commit]
:rtype: dict
:returns: The new contents of all modified cells. Returned as a
dictionary of column families, each of which holds a
dictionary of columns. Each column contains a list of cells
modified. Each cell is represented with a two-tuple with the
value (in bytes) and the timestamp for the cell.
:raises: :class:`ValueError <exceptions.ValueError>` if the number of
mutations exceeds the :data:`MAX_MUTATIONS`.
"""
num_mutations = len(self._rule_pb_list)
if num_mutations == 0:
return {}
if num_mutations > MAX_MUTATIONS:
raise ValueError(
"%d total append mutations exceed the maximum "
"allowable %d." % (num_mutations, MAX_MUTATIONS)
)
data_client = self._table._instance._client.table_data_client
row_response = data_client.read_modify_write_row(
table_name=self._table.name,
row_key=self._row_key,
rules=self._rule_pb_list,
app_profile_id=self._table._app_profile_id,
)
# Reset modifications after commit-ing request.
self.clear()
# NOTE: We expect row_response.key == self._row_key but don't check.
return _parse_rmw_row_response(row_response)
def _parse_rmw_row_response(row_response):
"""Parses the response to a ``ReadModifyWriteRow`` request.
:type row_response: :class:`.data_v2_pb2.Row`
:param row_response: The response row (with only modified cells) from a
``ReadModifyWriteRow`` request.
:rtype: dict
:returns: The new contents of all modified cells. Returned as a
dictionary of column families, each of which holds a
dictionary of columns. Each column contains a list of cells
modified. Each cell is represented with a two-tuple with the
value (in bytes) and the timestamp for the cell. For example:
.. code:: python
{
u'col-fam-id': {
b'col-name1': [
(b'cell-val', datetime.datetime(...)),
(b'cell-val-newer', datetime.datetime(...)),
],
b'col-name2': [
(b'altcol-cell-val', datetime.datetime(...)),
],
},
u'col-fam-id2': {
b'col-name3-but-other-fam': [
(b'foo', datetime.datetime(...)),
],
},
}
"""
result = {}
for column_family in row_response.row.families:
column_family_id, curr_family = _parse_family_pb(column_family)
result[column_family_id] = curr_family
return result
def _parse_family_pb(family_pb):
"""Parses a Family protobuf into a dictionary.
:type family_pb: :class:`._generated.data_pb2.Family`
:param family_pb: A protobuf
:rtype: tuple
:returns: A string and dictionary. The string is the name of the
column family and the dictionary has column names (within the
family) as keys and cell lists as values. Each cell is
represented with a two-tuple with the value (in bytes) and the
timestamp for the cell. For example:
.. code:: python
{
b'col-name1': [
(b'cell-val', datetime.datetime(...)),
(b'cell-val-newer', datetime.datetime(...)),
],
b'col-name2': [
(b'altcol-cell-val', datetime.datetime(...)),
],
}
"""
result = {}
for column in family_pb.columns:
result[column.qualifier] = cells = []
for cell in column.cells:
val_pair = (cell.value, _datetime_from_microseconds(cell.timestamp_micros))
cells.append(val_pair)
return family_pb.name, result
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_get_request(
location: str,
publisher_name: str,
type: str,
version: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2017-12-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmextension/types/{type}/versions/{version}')
path_format_arguments = {
"location": _SERIALIZER.url("location", location, 'str'),
"publisherName": _SERIALIZER.url("publisher_name", publisher_name, 'str'),
"type": _SERIALIZER.url("type", type, 'str'),
"version": _SERIALIZER.url("version", version, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_types_request(
location: str,
publisher_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2017-12-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmextension/types')
path_format_arguments = {
"location": _SERIALIZER.url("location", location, 'str'),
"publisherName": _SERIALIZER.url("publisher_name", publisher_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_versions_request(
location: str,
publisher_name: str,
type: str,
subscription_id: str,
*,
filter: Optional[str] = None,
top: Optional[int] = None,
orderby: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
api_version = "2017-12-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmextension/types/{type}/versions')
path_format_arguments = {
"location": _SERIALIZER.url("location", location, 'str'),
"publisherName": _SERIALIZER.url("publisher_name", publisher_name, 'str'),
"type": _SERIALIZER.url("type", type, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if filter is not None:
query_parameters['$filter'] = _SERIALIZER.query("filter", filter, 'str')
if top is not None:
query_parameters['$top'] = _SERIALIZER.query("top", top, 'int')
if orderby is not None:
query_parameters['$orderby'] = _SERIALIZER.query("orderby", orderby, 'str')
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class VirtualMachineExtensionImagesOperations(object):
"""VirtualMachineExtensionImagesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2017_12_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def get(
self,
location: str,
publisher_name: str,
type: str,
version: str,
**kwargs: Any
) -> "_models.VirtualMachineExtensionImage":
"""Gets a virtual machine extension image.
:param location: The name of a supported Azure region.
:type location: str
:param publisher_name:
:type publisher_name: str
:param type:
:type type: str
:param version:
:type version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualMachineExtensionImage, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2017_12_01.models.VirtualMachineExtensionImage
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachineExtensionImage"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
location=location,
publisher_name=publisher_name,
type=type,
version=version,
subscription_id=self._config.subscription_id,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualMachineExtensionImage', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmextension/types/{type}/versions/{version}'} # type: ignore
@distributed_trace
def list_types(
self,
location: str,
publisher_name: str,
**kwargs: Any
) -> List["_models.VirtualMachineExtensionImage"]:
"""Gets a list of virtual machine extension image types.
:param location: The name of a supported Azure region.
:type location: str
:param publisher_name:
:type publisher_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of VirtualMachineExtensionImage, or the result of cls(response)
:rtype: list[~azure.mgmt.compute.v2017_12_01.models.VirtualMachineExtensionImage]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.VirtualMachineExtensionImage"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_list_types_request(
location=location,
publisher_name=publisher_name,
subscription_id=self._config.subscription_id,
template_url=self.list_types.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('[VirtualMachineExtensionImage]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_types.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmextension/types'} # type: ignore
@distributed_trace
def list_versions(
self,
location: str,
publisher_name: str,
type: str,
filter: Optional[str] = None,
top: Optional[int] = None,
orderby: Optional[str] = None,
**kwargs: Any
) -> List["_models.VirtualMachineExtensionImage"]:
"""Gets a list of virtual machine extension image versions.
:param location: The name of a supported Azure region.
:type location: str
:param publisher_name:
:type publisher_name: str
:param type:
:type type: str
:param filter: The filter to apply on the operation.
:type filter: str
:param top:
:type top: int
:param orderby:
:type orderby: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of VirtualMachineExtensionImage, or the result of cls(response)
:rtype: list[~azure.mgmt.compute.v2017_12_01.models.VirtualMachineExtensionImage]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.VirtualMachineExtensionImage"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_list_versions_request(
location=location,
publisher_name=publisher_name,
type=type,
subscription_id=self._config.subscription_id,
filter=filter,
top=top,
orderby=orderby,
template_url=self.list_versions.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('[VirtualMachineExtensionImage]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_versions.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmextension/types/{type}/versions'} # type: ignore
| |
# Copyright (c) 2012 Santosh Philip
# Copyright (c) 2021 Jeremy Lerond
# =======================================================================
# Distributed under the MIT License.
# (See accompanying file LICENSE or copy at
# http://opensource.org/licenses/MIT)
# =======================================================================
"""py.test for modeleditor"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from itertools import product
import os
import warnings
import os.path
import shutil
from pathlib import Path
import pytest
from six import StringIO
from six import string_types
from eppy import modeleditor
from eppy.bunch_subclass import Bunch
from eppy.iddcurrent import iddcurrent
from eppy.modeleditor import IDF
from eppy.pytest_helpers import almostequal
import eppy.snippet as snippet
iddsnippet = iddcurrent.iddtxt
idfsnippet = snippet.idfsnippet
# idffhandle = StringIO(idfsnippet)
# iddfhandle = StringIO(iddsnippet)
# bunchdt, data, commdct, gdict = idfreader.idfreader(idffhandle, iddfhandle, None)
# idd is read only once in this test
# if it has already been read from some other test, it will continue with
# the old reading
iddfhandle = StringIO(iddcurrent.iddtxt)
if IDF.getiddname() == None:
IDF.setiddname(iddfhandle)
def test_poptrailing():
"""py.test for poptrailing"""
tdata = (
([1, 2, 3, "", 56, "", "", "", ""], [1, 2, 3, "", 56]), # lst, popped
([1, 2, 3, "", 56], [1, 2, 3, "", 56]), # lst, popped
([1, 2, 3, 56], [1, 2, 3, 56]), # lst, popped
)
for before, after in iter(tdata):
assert modeleditor.poptrailing(before) == after
def test_extendlist():
"""py.test for extendlist"""
tdata = (
([1, 2, 3], 2, 0, [1, 2, 3]), # lst, i, value, nlst
([1, 2, 3], 3, 0, [1, 2, 3, 0]), # lst, i, value, nlst
([1, 2, 3], 5, 0, [1, 2, 3, 0, 0, 0]), # lst, i, value, nlst
([1, 2, 3], 7, 0, [1, 2, 3, 0, 0, 0, 0, 0]), # lst, i, value, nlst
)
for lst, i, value, nlst in tdata:
modeleditor.extendlist(lst, i, value=value)
assert lst == nlst
def test_namebunch():
"""py.test for namebunch"""
thedata = (
(Bunch(dict(Name="", a=5)), "yay", "yay"), # abunch, aname, thename
(Bunch(dict(Name=None, a=5)), "yay", None), # abunch, aname, thename
)
for abunch, aname, thename in thedata:
result = modeleditor.namebunch(abunch, aname)
assert result.Name == thename
def test_getnamedargs():
"""py.test for getnamedargs"""
result = dict(a=1, b=2, c=3)
assert result == modeleditor.getnamedargs(a=1, b=2, c=3)
assert result == modeleditor.getnamedargs(dict(a=1, b=2, c=3))
assert result == modeleditor.getnamedargs(dict(a=1, b=2), c=3)
assert result == modeleditor.getnamedargs(dict(a=1), c=3, b=2)
def test_getrefnames():
"""py.test for getrefnames"""
tdata = (
(
"ZONE",
[
"ZoneNames",
"OutFaceEnvNames",
"ZoneAndZoneListNames",
"AirflowNetworkNodeAndZoneNames",
],
), # objkey, therefs
(
"FluidProperties:Name".upper(),
["FluidNames", "FluidAndGlycolNames"],
), # objkey, therefs
("Building".upper(), []), # objkey, therefs
)
for objkey, therefs in tdata:
fhandle = StringIO("")
idf = IDF(fhandle)
result = modeleditor.getrefnames(idf, objkey)
assert result == therefs
def test_getallobjlists():
"""py.test for getallobjlists"""
tdata = (
(
"TransformerNames",
[("ElectricLoadCenter:Distribution".upper(), "TransformerNames", [10])],
), # refname, objlists
)
for refname, objlists in tdata:
fhandle = StringIO("")
idf = IDF(fhandle)
result = modeleditor.getallobjlists(idf, refname)
assert result == objlists
def test_rename():
"""py.test for rename"""
idftxt = """Material,
G01a 19mm gypsum board, !- Name
MediumSmooth, !- Roughness
0.019, !- Thickness {m}
0.16, !- Conductivity {W/m-K}
800, !- Density {kg/m3}
1090; !- Specific Heat {J/kg-K}
Construction,
Interior Wall, !- Name
G01a 19mm gypsum board, !- Outside Layer
F04 Wall air space resistance, !- Layer 2
G01a 19mm gypsum board; !- Layer 3
Construction,
Other Wall, !- Name
G01a 19mm gypsum board, !- Outside Layer
G01a 19mm gypsum board, !- Layer 2
G01a 19mm gypsum board; !- Layer 3
"""
ridftxt = """Material,
peanut butter, !- Name
MediumSmooth, !- Roughness
0.019, !- Thickness {m}
0.16, !- Conductivity {W/m-K}
800, !- Density {kg/m3}
1090; !- Specific Heat {J/kg-K}
Construction,
Interior Wall, !- Name
peanut butter, !- Outside Layer
F04 Wall air space resistance, !- Layer 2
peanut butter; !- Layer 3
Construction,
Other Wall, !- Name
peanut butter, !- Outside Layer
peanut butter, !- Layer 2
peanut butter; !- Layer 3
"""
fhandle = StringIO(idftxt)
idf = IDF(fhandle)
result = modeleditor.rename(
idf, "Material".upper(), "G01a 19mm gypsum board", "peanut butter"
)
assert result.Name == "peanut butter"
assert idf.idfobjects["CONSTRUCTION"][0].Outside_Layer == "peanut butter"
assert idf.idfobjects["CONSTRUCTION"][0].Layer_3 == "peanut butter"
def test_zonearea_zonevolume():
"""py.test for zonearea and zonevolume"""
idftxt = """Zone, 473222, 0.0, 0.0, 0.0, 0.0, , 1;
BuildingSurface:Detailed, F7289B, Floor, Exterior Floor, 473222,
Ground, ,
NoSun, NoWind, , 4, 2.23, 2.56, 0.0, 2.23, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
2.56, 0.0; BuildingSurface:Detailed, F3659B, Wall, Exterior Wall,
473222, Outdoors, , SunExposed, WindExposed, , 4, 2.23, 2.56, 1.49,
2.23, 2.56, 0.0, 0.0, 2.56, 0.0, 0.0, 2.56, 1.49;
BuildingSurface:Detailed, 46C6C9, Wall, Exterior Wall, 473222,
Outdoors, , SunExposed, WindExposed, , 4, 2.23, 0.0, 1.49, 2.23,
0.0, 0.0, 2.23, 1.02548139464, 0.0, 2.23, 1.02548139464, 1.49;
BuildingSurface:Detailed, 4287DD, Wall, Exterior Wall, 473222,
Outdoors, , SunExposed, WindExposed, , 4, 0.0, 2.56, 1.49, 0.0,
2.56, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.49;
BuildingSurface:Detailed, 570C2E, Wall, Exterior Wall, 473222,
Outdoors, , SunExposed, WindExposed, , 4, 0.0, 0.0, 1.49, 0.0, 0.0,
0.0, 2.23, 0.0, 0.0, 2.23, 0.0, 1.49; BuildingSurface:Detailed,
BAEA99, Roof, Exterior Roof, 473222, Outdoors, , SunExposed,
WindExposed, , 4, 0.0, 2.56, 1.49, 0.0, 0.0, 1.49, 2.23, 0.0, 1.49,
2.23, 2.56, 1.49; BuildingSurface:Detailed, C879FE, Floor,
Exterior Floor, 473222, Ground, , NoSun, NoWind, , 4, 3.22,
2.52548139464, 0.0, 3.22, 1.02548139464, 0.0, 2.23,
1.02548139464, 0.0, 2.23, 2.52548139464, 0.0;
BuildingSurface:Detailed, 25B601, Wall, Exterior Wall, 473222,
Outdoors, , SunExposed, WindExposed, , 4, 2.23,
1.02548139464, 1.49, 2.23, 1.02548139464, 0.0, 2.23, 2.52548139464,
0.0, 2.23, 2.52548139464, 1.49; BuildingSurface:Detailed, F5EADC,
Wall, Exterior Wall, 473222, Outdoors, , SunExposed, WindExposed, ,
4, 2.23, 1.02548139464, 1.49, 2.23, 1.02548139464, 0.0, 3.22,
1.02548139464, 0.0, 3.22, 1.02548139464, 1.49;
BuildingSurface:Detailed, D0AABE, Wall, Exterior Wall, 473222,
Outdoors, , SunExposed, WindExposed, , 4, 3.22, 1.02548139464,
1.49, 3.22, 1.02548139464, 0.0, 3.22, 2.52548139464, 0.0, 3.22,
2.52548139464, 1.49; BuildingSurface:Detailed, B0EA02, Wall,
Exterior Wall, 473222, Outdoors, , SunExposed, WindExposed, ,
4, 3.22, 2.52548139464, 1.49, 3.22, 2.52548139464, 0.0, 2.23,
2.52548139464, 0.0, 2.23, 2.52548139464, 1.49;
BuildingSurface:Detailed, E6DF3B, Roof, Exterior Roof, 473222,
Outdoors, , SunExposed, WindExposed, , 4, 2.23, 2.52548139464, 1.49,
2.23, 1.02548139464, 1.49, 3.22, 1.02548139464, 1.49, 3.22,
2.52548139464, 1.49; BuildingSurface:Detailed, 4F8681, Wall,
Exterior Wall, 473222, Outdoors, , SunExposed, WindExposed, , 4,
2.23, 2.52548139464, 1.49, 2.23, 2.52548139464, 0.0, 2.23, 2.56,
0.0, 2.23, 2.56, 1.49; """
idf = IDF(StringIO(idftxt))
result = modeleditor.zonearea(idf, "473222")
assert almostequal(result, 7.1938)
result = modeleditor.zonearea_floor(idf, "473222")
assert almostequal(result, 7.1938)
result = modeleditor.zonearea_roofceiling(idf, "473222")
assert almostequal(result, 7.1938)
result = modeleditor.zone_floor2roofheight(idf, "473222")
assert almostequal(result, 1.49)
result = modeleditor.zoneheight(idf, "473222")
assert almostequal(result, 1.49)
result = modeleditor.zone_floor2roofheight(idf, "473222")
assert almostequal(result, 1.49)
result = modeleditor.zonevolume(idf, "473222")
assert almostequal(result, 10.718762)
# remove floor
zone = idf.getobject("ZONE", "473222")
surfs = idf.idfobjects["BuildingSurface:Detailed".upper()]
zone_surfs = [s for s in surfs if s.Zone_Name == zone.Name]
floors = [s for s in zone_surfs if s.Surface_Type.upper() == "FLOOR"]
for floor in floors:
idf.removeidfobject(floor)
result = modeleditor.zonearea_floor(idf, "473222")
assert almostequal(result, 0)
result = modeleditor.zonearea_roofceiling(idf, "473222")
assert almostequal(result, 7.1938)
result = modeleditor.zonearea(idf, "473222")
assert almostequal(result, 7.1938)
result = modeleditor.zoneheight(idf, "473222")
assert almostequal(result, 1.49)
result = modeleditor.zonevolume(idf, "473222")
assert almostequal(result, 10.718762)
# reload idf and remove roof/ceiling
idf = IDF(StringIO(idftxt))
zone = idf.getobject("ZONE", "473222")
surfs = idf.idfobjects["BuildingSurface:Detailed".upper()]
zone_surfs = [s for s in surfs if s.Zone_Name == zone.Name]
roofs = [s for s in zone_surfs if s.Surface_Type.upper() == "ROOF"]
ceilings = [s for s in zone_surfs if s.Surface_Type.upper() == "CEILING"]
topsurfaces = roofs + ceilings
for surf in topsurfaces:
idf.removeidfobject(surf)
result = modeleditor.zonearea_roofceiling(idf, "473222")
assert almostequal(result, 0)
result = modeleditor.zonearea(idf, "473222")
assert almostequal(result, 7.1938)
result = modeleditor.zoneheight(idf, "473222")
assert almostequal(result, 1.49)
result = modeleditor.zonevolume(idf, "473222")
assert almostequal(result, 10.718762)
def test_new():
"""py.test for IDF.new()"""
idf = IDF()
idf.new()
# assert idf.idfobjects['building'.upper()] == Idf_MSequence()
assert idf.idfobjects["building".upper()].list1 == []
assert idf.idfobjects["building".upper()].list2 == []
def test_newidfobject():
"""py.test for newidfobject"""
# make a blank idf
# make a function for this and then continue.
idf = IDF()
idf.new()
objtype = "material:airgap".upper()
obj = idf.newidfobject(objtype, Name="Argon")
obj = idf.newidfobject(objtype, Name="Krypton")
obj = idf.newidfobject(objtype, Name="Xenon")
assert idf.model.dt[objtype] == [
["MATERIAL:AIRGAP", "Argon"],
["MATERIAL:AIRGAP", "Krypton"],
["MATERIAL:AIRGAP", "Xenon"],
]
# remove an object
idf.popidfobject(objtype, 1)
assert idf.model.dt[objtype] == [
["MATERIAL:AIRGAP", "Argon"],
["MATERIAL:AIRGAP", "Xenon"],
]
lastobject = idf.idfobjects[objtype][-1]
idf.removeidfobject(lastobject)
assert idf.model.dt[objtype] == [["MATERIAL:AIRGAP", "Argon"]]
# copyidfobject
onlyobject = idf.idfobjects[objtype][0]
idf.copyidfobject(onlyobject)
assert idf.model.dt[objtype] == [
["MATERIAL:AIRGAP", "Argon"],
["MATERIAL:AIRGAP", "Argon"],
]
# remove all objects
idf.removeallidfobjects(objtype)
assert len(idf.idfobjects[objtype]) == 0
# test some functions
objtype = "FENESTRATIONSURFACE:DETAILED"
obj = idf.newidfobject(objtype, Name="A Wall")
assert obj.coords == []
assert obj.fieldvalues[1] == "A Wall"
# test defaultvalues=True and defaultvalues=False
sim_deftrue = idf.newidfobject("SimulationControl".upper(), defaultvalues=True)
assert sim_deftrue.Do_Zone_Sizing_Calculation == "No"
sim_deffalse = idf.newidfobject("SimulationControl".upper(), defaultvalues=False)
assert sim_deffalse.Do_Zone_Sizing_Calculation == ""
def test_newidfobject_warning():
"""Test that the warning for newidfobject created with `aname` is working.
Fails if the warning is not issued when `aname` is used, or if the warning
is issued when `aname` is not used.
"""
# make a blank idf
# make a function for this and then continue.
idf = IDF()
idf.new()
objtype = "material:airgap".upper()
# expect warnings here
with pytest.warns(UserWarning):
idf.newidfobject(objtype, aname="Krypton")
with pytest.warns(UserWarning):
idf.newidfobject(objtype, "Krypton")
# expect no warnings here - we pass None so as not to trigger the `Failed: DID NOT WARN` message from pytest
with pytest.warns(None) as captured_warnings:
idf.newidfobject(objtype, Name="Krypton")
assert len(captured_warnings) == 0
def test_save():
"""
Test the IDF.save() function using a filehandle to avoid external effects.
"""
file_text = "Material,TestMaterial, !- Name"
idf = IDF(StringIO(file_text))
# test save with just a filehandle
file_handle = StringIO()
idf.save(file_handle)
expected = "TestMaterial"
file_handle.seek(0)
result = file_handle.read()
# minimal test that TestMaterial is being written to the file handle
assert expected in result
# Structure of a test:
#
# 1. Arrange -> pytest fixture
# 2. Act
# 3. Assert
# 4. Cleanup -> pytest fixture
@pytest.fixture
def createidfinafolder():
"""make a temp dir and save an idf in it
:Teardown: remove the temp dir and it'a contents"""
# make a temp dir
tdirname = "./atempdir1"
abspath = os.path.abspath(tdirname)
os.mkdir(tdirname)
# make blank file
idftxt = "" # empty string
fhandle = StringIO(idftxt)
idf = IDF(fhandle)
# put in some objects - building and version
building = idf.newidfobject("building")
building.Name = "Taj"
idf.newidfobject("version")
# save it in subdir1
fname = f"{tdirname}/a.idf"
idf.saveas(fname)
yield idf
# Teardown
# remove the temp dir
shutil.rmtree(abspath)
# abspath ensure removal from any dir
@pytest.fixture
def changedir():
"""make a temp dir and change to that dir
:Teardown: return to original dir and delete this temp dir"""
# make a temp dir
origdir = os.path.abspath(os.path.curdir)
tdirname = "./atempdir2"
abspath = os.path.abspath(tdirname)
os.mkdir(tdirname)
# change to that directory
os.chdir(tdirname)
yield tdirname
# Teardown
# remove the temp dir
os.chdir(origdir)
shutil.rmtree(abspath)
# abspath ensure removal from any dir
def test_save_with_dir_change(createidfinafolder, changedir):
"""py.test of save when dir has been changed"""
change, expected = "Mahal", "Mahal"
# createidfinafolder creates the idf
idf = createidfinafolder
idfabs = idf.idfabsname
# changedir chnages dir
newdir = changedir
# make a change to the idf
building = idf.idfobjects["building"][0]
building.Name = change
# and save while in new dir
idf.save() # should save in orig dir
# read the file again
idf1 = IDF(idfabs)
building1 = idf1.idfobjects["building"][0]
assert building1.Name == expected
# test if it works with filepath.Path
fname_path = Path(idfabs)
assert isinstance(fname_path, Path)
idf2 = IDF(fname_path)
building2 = idf2.idfobjects["building"][0]
assert building2.Name == expected
def test_save_with_lineendings_and_encodings():
"""
Test the IDF.save() function with combinations of encodings and line
endings.
"""
file_text = "Material,TestMaterial, !- Name"
idf = IDF(StringIO(file_text))
lineendings = ("windows", "unix", "default")
encodings = ("ascii", "latin-1", "UTF-8")
for le, enc in product(lineendings, encodings):
file_handle = StringIO()
idf.save(file_handle, encoding=enc, lineendings=le)
file_handle.seek(0)
result = file_handle.read().encode(enc)
if le == "windows":
assert b"\r\n" in result
elif le == "unix":
assert b"\r\n" not in result
elif le == "default":
assert os.linesep.encode(enc) in result
def test_saveas():
"""Test the IDF.saveas() function."""
file_text = "Material,TestMaterial, !- Name"
idf = IDF(StringIO(file_text))
idf.idfname = "test.idf"
try:
idf.saveas() # this should raise an error as no filename is passed
assert False
except TypeError:
pass
file_handle = StringIO()
idf.saveas(file_handle) # save with a filehandle
expected = "TestMaterial"
file_handle.seek(0)
result = file_handle.read()
assert expected in result
# test the idfname attribute has been changed
assert idf.idfname != "test.idf"
def test_savecopy():
"""Test the IDF.savecopy() function."""
file_text = "Material,TestMaterial, !- Name"
idf = IDF(StringIO(file_text))
idf.idfname = "test.idf"
try:
idf.savecopy() # this should raise an error as no filename is passed
assert False
except TypeError:
pass
file_handle = StringIO()
idf.savecopy(file_handle) # save a copy with a different filename
expected = "TestMaterial"
file_handle.seek(0)
result = file_handle.read()
assert expected in result
# test the idfname attribute has not been changed
assert idf.idfname == "test.idf"
def test_initread():
"""Test for IDF.initread() with filename in unicode and as python str."""
# setup
idf = IDF()
idf.initreadtxt(idfsnippet)
idf.saveas("tmp.idf")
# test fname as unicode
fname = "tmp.idf"
assert isinstance(fname, string_types)
idf = IDF()
idf.initread(fname)
assert idf.getobject("BUILDING", "Building")
# test fname as str
fname = str("tmp.idf")
assert isinstance(fname, string_types)
idf = IDF()
idf.initread(fname)
assert idf.getobject("BUILDING", "Building")
# test that a nonexistent file raises an IOError
fname = "notarealfilename.notreal"
idf = IDF()
try:
idf.initread(fname)
assert False # shouldn't reach here
except IOError:
pass
# teardown
os.remove("tmp.idf")
def test_initreadtxt():
"""Test for IDF.initreadtxt()."""
idftxt = """
Material,
G01a 19mm gypsum board, !- Name
MediumSmooth, !- Roughness
0.019, !- Thickness {m}
0.16, !- Conductivity {W/m-K}
800, !- Density {kg/m3}
1090; !- Specific Heat {J/kg-K}
Construction,
Interior Wall, !- Name
G01a 19mm gypsum board, !- Outside Layer
F04 Wall air space resistance, !- Layer 2
G01a 19mm gypsum board; !- Layer 3
"""
idf = IDF()
idf.initreadtxt(idftxt)
assert idf.getobject("MATERIAL", "G01a 19mm gypsum board")
def test_idfstr():
"""Test all outputtype options in IDF.idfstr()."""
idf = IDF()
idf.initreadtxt(idfsnippet)
assert idf.outputtype == "standard" # start with the default
original = idf.idfstr()
assert "!-" in original # has comment
assert "\n" in original # has line break
assert "\n\n" in original # has empty line
idf.outputtype = "standard"
s = idf.idfstr()
assert "!-" in s # has comment
assert "\n" in s # has line break
assert "\n\n" in s # has empty line
assert s == original # is unchanged
idf.outputtype = "nocomment"
s = idf.idfstr()
assert "!-" not in s # has no comments
assert "\n" in s # has line break
assert "\n\n" in s # has empty line
assert s != original # is changed
idf.outputtype = "nocomment1"
s = idf.idfstr()
assert "!-" not in s # has no comments
assert "\n" in s # has line break
assert "\n\n" in s # has empty lines
assert s != original # is changed
idf.outputtype = "nocomment2"
s = idf.idfstr()
assert "!-" not in s # has no comments
assert "\n" in s # has line break
assert "\n\n" not in s # has no empty lines
assert s != original # is changed
idf.outputtype = "compressed"
s = idf.idfstr()
assert "!-" not in s # has no comments
assert "\n" not in s # has no line breaks
assert "\n\n" not in s # has no empty lines
assert s != original # is changed
def test_refname2key():
"""py.test for refname2key"""
tdata = (
(
"TransformerNames",
["ElectricLoadCenter:Distribution".upper()],
), # refname, key
(
"AllCurves",
[
"PUMP:VARIABLESPEED",
"PUMP:CONSTANTSPEED",
"BOILER:HOTWATER",
"ENERGYMANAGEMENTSYSTEM:CURVEORTABLEINDEXVARIABLE",
],
), # refname, key
)
for refname, key in tdata:
fhandle = StringIO("")
idf = IDF(fhandle)
result = modeleditor.refname2key(idf, refname)
assert result == key
def test_getiddgroupdict():
"""py.test for IDF.getiddgroupdict()"""
data = (({None: ["Lead Input", "Simulation Data"]},),) # gdict,
for (gdict,) in data:
fhandle = StringIO("")
idf = IDF(fhandle)
result = idf.getiddgroupdict()
assert result[None] == gdict[None]
def test_idfinmsequence():
"""py.test for setting of theidf in Idf_MSequence"""
idftxt = """Version, 6.0;"""
# theidf set in Idf_MSequence.__init__
idf = IDF(StringIO(idftxt))
versions = idf.idfobjects["version".upper()]
assert versions.theidf == idf
assert versions[0].theidf == idf
# theidf set in Idf_MSequence.insert()
material = idf.newidfobject("material".upper())
assert material.theidf == idf
# theidf set when you pop an item
newmaterial = idf.newidfobject("material".upper())
materials = idf.idfobjects["material".upper()]
material = materials.pop(0)
assert material.theidf == None
assert materials[0].theidf == idf
def test_idd_index():
"""py.test to see if idd_index is returned"""
idftxt = """"""
idf = IDF(StringIO(idftxt))
assert idf.idd_index == {}
| |
import os, errno, uuid, threading
from django.conf import settings
from django.core import validators
from django.core.files import base, temp
from django.utils.encoding import force_text
from six.moves import queue as six_queue
try:
from django.utils.deconstruct import deconstructible
except ImportError:
deconstructible = lambda x: x
try:
from django.apps import apps
from django.core.exceptions import AppRegistryNotReady
except ImportError:
from django.db import models
class AppRegistryNotReady(BaseException): pass
class BackwardsApps(object):
ready = True
def is_installed(self, app_name):
return app_name in settings.INSTALLED_APPS
def get_model(self, app_label, model_name=None):
if model_name is None:
app_label, model_name = app_label.split('.')
return models.get_model(app_label, model_name)
apps = BackwardsApps()
__all__ = [
'VALUE_NOT_SET', 'ProcessingError', 'NamedTemporaryFile', 'UploadTo',
'AsynchronousFileReader'
]
def get_model_name(instance):
return getattr(instance._meta, 'model_name',
instance._meta.object_name.lower())
def get_empty_values(field):
return getattr(field, 'empty_values', list(validators.EMPTY_VALUES))
class VALUE_NOT_SET(object):
pass
class ProcessingError(Exception):
pass
class NamedTemporaryFile(base.File):
"""This class is required for FileStorage to make an attempt in moving the
file, instead of copying it by chunks in memory. Borrowed implementation from
`django.core.files.uploadedfile.TemporaryUploadedFile`
"""
def __init__(self, **kwargs):
file = temp.NamedTemporaryFile(**kwargs)
super(NamedTemporaryFile, self).__init__(file)
def temporary_file_path(self):
"""
Returns the full path of this file.
"""
return self.file.name
def close(self):
# make sure the size is recorded before closing the file
_ = self.size
try:
return self.file.close()
except OSError as e:
if e.errno != errno.ENOENT:
raise
@deconstructible
class UploadTo(object):
"""This is an upload filename generator to be used to create a function to be
passed as an ``upload_to`` keyword argument to the ``models.FileField`` and
it's derivatives. It will generate the path in the form:
basefolder/app_label/model_name/parent_pk/subfolder/pk/field_name/filename.ext
:keyword str basefolder: path that will be prepended to the filename.
:keyword str subfolder: path that will be a container of the model instances
:keyword str filename: will replace the actual file name completely, ex:
``filename='file.ext' -> in_filename='foo.bar' -> out_filename='file.ext'``
:keyword str name: will replace the name portion of the file, ex:
``name='file' -> in_filename='foo.bar' -> out_filename='file.bar'``
:keyword str ext: will replace the extension portion of the file, ex:
``ext='pdf' -> in_filename='foo.bar' -> out_filename='foo.pdf'``
:keyword str app_label: if ``None`` will insert the ``app_label`` retrieved from
the model instance (Default). Otherwise specify a string to enforce a
specific app_label or anything else evaluating to ``False`` except ``None``
in order to skip insertion of an app_label.
:keyword str model_name: if ``None`` will insert the ``model_name`` retrieved
from the model instance (Default). Otherwise specify a string to enforce a
specific model_name or anything else evaluating to ``False`` except ``None``
in order to skip insertion of a model_name.
:keyword str field_name: if ``None`` will skip insertion of a field_name
(Default), otherwise ``field_name`` string will be inserted before the subfolder.
:keyword function generator: function that will generate a new name portion
of the file in case if it is set to ``True`` default generator :func:`uuid.uuid1`
will be used.
:keyword str parent_field_name: name of the ForeignKey or OneToOneField, that
is considered a parent for this field's model. Set to an empty string to ignore parent_pk
"""
def __init__(self, basefolder=None, subfolder=None, filename=None, name=None, ext=None,
app_label=None, model_name=None, parent_field_name=None, field_name=None,
add_pk=True, generator=None):
assert filename is None or name is None, \
"Cannot have 'filename' and 'name' specified at the same time."
assert generator is None or (filename is None and name is None), \
"Cannot specify a name and enforce name generation"
assert generator is None or generator is True or callable(generator), \
"Supply a function for a generator or set it to True to use default generator."
self.basefolder = basefolder
self.subfolder = subfolder
self.filename = filename
self.name = name
self.ext = ext
self.app_label = app_label
self.model_name = model_name
self.parent_field_name = parent_field_name
self.field_name = field_name
self.add_pk = add_pk
self.generator = uuid.uuid1 if generator is True else generator
def __eq__(self, other):
return (type(self) is type(other) and
self.basefolder == other.basefolder and
self.subfolder == other.subfolder and
self.filename == other.filename and
self.name == other.name and
self.ext == other.ext and
self.app_label == other.app_label and
self.model_name == other.model_name and
self.parent_field_name == other.parent_field_name and
self.field_name == other.field_name and
self.add_pk == other.add_pk and
self.generator is other.generator)
def __call__(self, instance, filename):
structure = []
if self.basefolder is not None:
structure.append(self.basefolder)
if self.app_label is None:
structure.append(instance._meta.app_label)
elif self.app_label:
structure.append(self.app_label)
if self.model_name is None:
structure.append(get_model_name(instance))
elif self.model_name:
structure.append(self.model_name)
parent_pk = self.get_parent_pk(instance)
if parent_pk is not None:
structure.append(parent_pk)
if self.subfolder is not None:
structure.append(self.subfolder)
if self.add_pk and instance.pk is not None:
structure.append(force_text(instance.pk))
if self.field_name is not None:
structure.append(self.field_name)
structure.append(self.get_filename(filename, instance))
return os.path.join(*structure)
def get_filename(self, filename, instance):
if self.filename is not None:
filename = self.filename
else:
name, ext = os.path.splitext(filename)
if ext: # remove a dot, but only if there is an extension
ext = ext[1:]
if self.generator is not None:
name = self.generator()
elif self.name is not None:
if callable(self.name):
name = self.name(name, instance)
else:
name = self.name
if self.ext is not None:
ext = self.ext
if ext:
filename = "%s.%s" % (name, ext)
else:
filename = name
return filename
def get_parent_pk(self, instance):
parent_field_name = None
if self.parent_field_name is None:
parent_field_name = getattr(instance, 'parent_field_name', None)
elif self.parent_field_name:
parent_field_name = self.parent_field_name
if parent_field_name is not None:
parent = getattr(instance, parent_field_name)
if parent is not None:
return force_text(parent.pk)
class AsynchronousFileReader(threading.Thread):
'''
Helper class to implement asynchronous reading of a file
in a separate thread. Pushes read lines on a queue to
be consumed in another thread.
source: http://stefaanlippens.net/python-asynchronous-subprocess-pipe-reading
'''
def __init__(self, fd, queue):
assert isinstance(queue, six_queue.Queue)
assert callable(fd.readline)
super(AsynchronousFileReader, self).__init__()
self._fd = fd
self._queue = queue
def run(self):
'''The body of the tread: read lines and put them on the queue.'''
for line in iter(self._fd.readline, ''):
self._queue.put(line)
def eof(self):
'''Check whether there is no more content to expect.'''
return not self.is_alive() and self._queue.empty()
| |
import re
from typing import TYPE_CHECKING, Optional
from urllib.parse import urljoin
from django.utils.translation import gettext_lazy as _
import attr
import requests
from oauthlib.oauth2 import LegacyApplicationClient, BackendApplicationClient
from requests import Session
from requests.adapters import HTTPAdapter
from requests.auth import AuthBase, HTTPBasicAuth, HTTPDigestAuth
from requests.exceptions import RequestException
from requests_oauthlib import OAuth1, OAuth2Session
from corehq.motech.exceptions import ConfigurationError
from corehq.motech.utils import get_endpoint_url
if TYPE_CHECKING:
from corehq.motech.models import ConnectionSettings
@attr.s(auto_attribs=True, frozen=True, kw_only=True)
class OAuth1ApiEndpoints:
"""
Endpoints of a particular OAuth1 API
"""
# Endpoint for token to identify HQ. e.g. '/oauth/request_token' (Twitter)
request_token_endpoint: str
# Endpoint for user to authorize HQ. e.g. '/oauth/authorize'
authorization_endpoint: str
# Endpoint to fetch access token. e.g. '/oauth/access_token'
access_token_endpoint: str
oauth1_api_endpoints = tuple(
# No integrations using OAuth1 authentication (yet?)
)
oauth2_api_settings = (
('dhis2_auth_settings', 'DHIS2 OAuth 2.0'),
('moveit_automation_settings', 'MOVEit Automation'),
)
api_auth_settings_choices = [
(None, _('(Not Applicable)')),
*oauth1_api_endpoints,
*oauth2_api_settings,
]
class HTTPBearerAuth(AuthBase):
def __init__(self, username, plaintext_password):
self.username = username
self.password = plaintext_password
def _find_bearer_base(self, r):
m = re.compile('https.*/api/v[0-9]+/').match(r.url)
if m:
return m.group(0)
else:
raise RequestException(None, r, "HTTP endpoint is not not valid for bearer auth")
def _get_auth_token(self, r):
token_base = self._find_bearer_base(r)
token_request_url = urljoin(token_base, "token")
post_data = {
"grant_type": "password",
"username": self.username,
"password": self.password,
}
token_response = requests.post(token_request_url, data=post_data)
try:
return token_response.json()['access_token']
except Exception:
raise RequestException(
None, r,
f"Unable to retrieve access token for request: {token_response.content}"
)
def __call__(self, r):
token = self._get_auth_token(r)
r.headers["Authorization"] = f"Bearer {token}"
return r
class PublicOnlyHttpAdapter(HTTPAdapter):
def __init__(self, domain_name, src):
self.domain_name = domain_name
self.src = src
super().__init__()
def get_connection(self, url, proxies=None):
from corehq.motech.requests import validate_user_input_url_for_repeaters
validate_user_input_url_for_repeaters(url, domain=self.domain_name, src=self.src)
return super().get_connection(url, proxies=proxies)
def make_session_public_only(session, domain_name, src):
"""
Modifies `session` to validate urls before sending and accept only hosts resolving to public IPs
Once this function has been called on a session, session.request, etc., will
raise PossibleSSRFAttempt whenever called with a url host that resolves to a non-public IP.
"""
# the following two lines entirely replace the default adapters with our custom ones
# by redefining the adapter to use for the two default prefixes
session.mount('http://', PublicOnlyHttpAdapter(domain_name=domain_name, src=src))
session.mount('https://', PublicOnlyHttpAdapter(domain_name=domain_name, src=src))
class AuthManager:
def get_auth(self) -> Optional[AuthBase]:
"""
Returns an instance of requests.auth.AuthBase, to be passed to
an outgoing API request, or None if not applicable.
"""
return None
def get_session(self, domain_name: str) -> Session:
"""
Returns an instance of requests.Session. Manages authentication
tokens, if applicable.
"""
session = Session()
make_session_public_only(session, domain_name, src='sent_attempt')
session.auth = self.get_auth()
return session
class BasicAuthManager(AuthManager):
def __init__(self, username, password):
self.username = username
self.password = password
def get_auth(self):
return HTTPBasicAuth(self.username, self.password)
class DigestAuthManager(AuthManager):
def __init__(self, username, password):
self.username = username
self.password = password
def get_auth(self):
return HTTPDigestAuth(self.username, self.password)
class OAuth1Manager(AuthManager):
def __init__(
self,
client_id: str,
client_secret: str,
api_endpoints: OAuth1ApiEndpoints,
connection_settings: 'ConnectionSettings',
):
self.client_id = client_id
self.client_secret = client_secret
self.api_endpoints = api_endpoints
self.connection_settings = connection_settings
@property
def last_token(self) -> Optional[dict]:
return self.connection_settings.last_token
def get_auth(self):
if not self.last_token:
raise ConfigurationError(_(
'OAuth1 authentication workflow has not been followed for '
f'Connection "{self.connection_settings}"'
))
resource_owner_key = self.last_token['oauth_token']
resource_owner_secret = self.last_token['oauth_token_secret']
return OAuth1(
self.client_id,
client_secret=self.client_secret,
resource_owner_key=resource_owner_key,
resource_owner_secret=resource_owner_secret
)
class BearerAuthManager(AuthManager):
"""
Like OAuth 2.0 Password Grant, but doesn't use a client ID or
client secret.
"""
def __init__(self, username, password):
self.username = username
self.password = password
def get_auth(self):
return HTTPBearerAuth(self.username, self.password)
class OAuth2ClientGrantManager(AuthManager):
"""
Follows the OAuth 2.0 client credentials grant type flow
"""
def __init__(
self,
base_url: str,
client_id: str,
client_secret: str,
token_url: str,
refresh_url: str,
connection_settings: 'ConnectionSettings',
):
self.base_url = base_url
self.client_id = client_id
self.client_secret = client_secret
self.token_url = token_url
self.refresh_url = refresh_url
self.connection_settings = connection_settings
@property
def last_token(self) -> Optional[dict]:
return self.connection_settings.last_token
@last_token.setter
def last_token(self, value: dict):
"""
Save ``ConnectionSettings.last_token`` whenever it is set or
refreshed so that it can be reused in the future.
"""
self.connection_settings.last_token = value
self.connection_settings.save()
def get_session(self, domain_name: str) -> Session:
def set_last_token(token):
# Used by OAuth2Session
self.last_token = token
if not self.last_token:
client = BackendApplicationClient(client_id=self.client_id)
session = OAuth2Session(client=client)
self.last_token = session.fetch_token(
token_url=self.token_url,
client_id=self.client_id,
client_secret=self.client_secret,
)
refresh_kwargs = {
'client_id': self.client_id,
'client_secret': self.client_secret,
}
session = OAuth2Session(
self.client_id,
token=self.last_token,
auto_refresh_url=self.refresh_url,
auto_refresh_kwargs=refresh_kwargs,
token_updater=set_last_token
)
make_session_public_only(session, domain_name, src='oauth_sent_attempt')
return session
class OAuth2PasswordGrantManager(AuthManager):
"""
Follows the OAuth 2.0 resource owner password credentials (aka
password) grant type flow.
"""
def __init__(
self,
base_url: str,
username: str,
password: str,
client_id: str,
client_secret: str,
token_url: str,
refresh_url: str,
pass_credentials_in_header: bool,
connection_settings: 'ConnectionSettings',
):
self.base_url = base_url
self.username = username
self.password = password
self.client_id = client_id
self.client_secret = client_secret
self.token_url = token_url
self.refresh_url = refresh_url
self.pass_credentials_in_header = pass_credentials_in_header
self.connection_settings = connection_settings
@property
def last_token(self) -> Optional[dict]:
return self.connection_settings.last_token
@last_token.setter
def last_token(self, value: dict):
"""
Save ``ConnectionSettings.last_token`` whenever it is set or
refreshed so that it can be reused in the future.
"""
self.connection_settings.last_token = value
self.connection_settings.save()
def get_session(self, domain_name: str) -> Session:
def set_last_token(token):
# Used by OAuth2Session
self.last_token = token
if not self.last_token:
client = LegacyApplicationClient(client_id=self.client_id)
session = OAuth2Session(client=client)
if self.pass_credentials_in_header:
auth = HTTPBasicAuth(self.client_id, self.client_secret)
self.last_token = session.fetch_token(
token_url=self.token_url,
username=self.username,
password=self.password,
auth=auth,
)
else:
self.last_token = session.fetch_token(
token_url=self.token_url,
username=self.username,
password=self.password,
client_id=self.client_id,
client_secret=self.client_secret,
)
# Return session that refreshes token automatically
refresh_kwargs = {
'client_id': self.client_id,
'client_secret': self.client_secret,
}
session = OAuth2Session(
self.client_id,
token=self.last_token,
auto_refresh_url=self.refresh_url,
auto_refresh_kwargs=refresh_kwargs,
token_updater=set_last_token
)
make_session_public_only(session, domain_name, src='oauth_sent_attempt')
return session
| |
"""
pghoard - pg_basebackup handler
Copyright (c) 2016 Ohmu Ltd
See LICENSE for details
"""
# pylint: disable=superfluous-parens
from . import common, version, wal
from .common import (
connection_string_using_pgpass,
replication_connection_string_and_slot_using_pgpass,
set_stream_nonblocking,
set_subprocess_stdout_and_stderr_nonblocking,
terminate_subprocess,
)
from .patchedtarfile import tarfile
from pghoard.rohmu import dates, errors, rohmufile
from pghoard.rohmu.compat import suppress
from queue import Empty, Queue
from tempfile import NamedTemporaryFile
from threading import Thread
import datetime
import io
import logging
import os
import psycopg2
import select
import stat
import subprocess
import time
BASEBACKUP_NAME = "pghoard_base_backup"
EMPTY_DIRS = [
"pg_dynshmem",
"pg_log",
"pg_replslot",
"pg_snapshot",
"pg_stat_tmp",
"pg_tblspc",
"pg_wal",
"pg_wal/archive_status",
"pg_xlog",
"pg_xlog/archive_status",
]
class BackupFailure(Exception):
"""Backup failed - post a failure to callback_queue and allow the thread to terminate"""
class NoException(BaseException):
"""Exception that's never raised, used in conditional except blocks"""
class PGBaseBackup(Thread):
def __init__(self, config, site, connection_info, basebackup_path,
compression_queue, stats, transfer_queue=None,
callback_queue=None, pg_version_server=None):
super().__init__()
self.log = logging.getLogger("PGBaseBackup")
self.config = config
self.site = site
self.connection_info = connection_info
self.basebackup_path = basebackup_path
self.callback_queue = callback_queue
self.compression_queue = compression_queue
self.stats = stats
self.transfer_queue = transfer_queue
self.running = True
self.pid = None
self.pg_version_server = pg_version_server
self.latest_activity = datetime.datetime.utcnow()
def run(self):
try:
basebackup_mode = self.config["backup_sites"][self.site]["basebackup_mode"]
if basebackup_mode == "basic":
self.run_basic_basebackup()
elif basebackup_mode == "local-tar":
self.run_local_tar_basebackup()
elif basebackup_mode == "pipe":
self.run_piped_basebackup()
else:
raise errors.InvalidConfigurationError("Unsupported basebackup_mode {!r}".format(basebackup_mode))
except Exception as ex: # pylint: disable=broad-except
if isinstance(ex, (BackupFailure, errors.InvalidConfigurationError)):
self.log.error(str(ex))
else:
self.log.exception("Backup unexpectedly failed")
self.stats.unexpected_exception(ex, where="PGBaseBackup")
if self.callback_queue:
# post a failure event
self.callback_queue.put({"success": False})
finally:
self.running = False
@staticmethod
def get_paths_for_backup(basebackup_path):
i = 0
while True:
tsdir = datetime.datetime.utcnow().strftime("%Y-%m-%d") + "_" + str(i)
raw_basebackup = os.path.join(basebackup_path + "_incoming", tsdir)
compressed_basebackup = os.path.join(basebackup_path, tsdir)
# The backup directory names need not to be a sequence, so we lean towards skipping over any
# partial or leftover progress below. Make sure we only return paths if we're able to create the
# raw_basebackup directory.
if not os.path.exists(raw_basebackup) and not os.path.exists(compressed_basebackup):
with suppress(FileExistsError):
os.makedirs(raw_basebackup)
return raw_basebackup, compressed_basebackup
i += 1
def get_command_line(self, output_name):
command = [
self.config["backup_sites"][self.site]["pg_basebackup_path"],
"--format", "tar",
"--label", BASEBACKUP_NAME,
"--verbose",
"--pgdata", output_name,
]
if self.pg_version_server < 90300:
conn_info = self.connection_info
if "user" in conn_info:
command.extend(["--user", conn_info["user"]])
if "port" in conn_info:
command.extend(["--port", conn_info["port"]])
if "host" in conn_info:
command.extend(["--host", conn_info["host"]])
else:
connection_string, _ = replication_connection_string_and_slot_using_pgpass(self.connection_info)
command.extend([
"--progress",
"--dbname", connection_string
])
if self.pg_version_server >= 100000:
command.extend(["--wal-method=none"])
return command
def check_command_success(self, proc, output_file):
rc = terminate_subprocess(proc, log=self.log)
msg = "Ran: {!r}, took: {:.3f}s to run, returncode: {}".format(
proc.args, time.monotonic() - proc.basebackup_start_time, rc)
if rc == 0 and os.path.exists(output_file):
self.log.info(msg)
return True
if output_file:
with suppress(FileNotFoundError):
os.unlink(output_file)
raise BackupFailure(msg)
def basebackup_compression_pipe(self, proc, basebackup_path):
rsa_public_key = None
encryption_key_id = self.config["backup_sites"][self.site]["encryption_key_id"]
if encryption_key_id:
rsa_public_key = self.config["backup_sites"][self.site]["encryption_keys"][encryption_key_id]["public"]
compression_algorithm = self.config["compression"]["algorithm"]
compression_level = self.config["compression"]["level"]
self.log.debug("Compressing basebackup directly to file: %r", basebackup_path)
set_stream_nonblocking(proc.stderr)
with NamedTemporaryFile(prefix=basebackup_path, suffix=".tmp-compress") as output_obj:
def progress_callback():
stderr_data = proc.stderr.read()
if stderr_data:
self.latest_activity = datetime.datetime.utcnow()
self.log.debug("pg_basebackup stderr: %r", stderr_data)
original_input_size, compressed_file_size = rohmufile.write_file(
input_obj=proc.stdout,
output_obj=output_obj,
compression_algorithm=compression_algorithm,
compression_level=compression_level,
rsa_public_key=rsa_public_key,
progress_callback=progress_callback,
log_func=self.log.info,
)
os.link(output_obj.name, basebackup_path)
if original_input_size:
size_ratio = compressed_file_size / original_input_size
self.stats.gauge(
"pghoard.compressed_size_ratio", size_ratio,
tags={
"algorithm": compression_algorithm,
"site": self.site,
"type": "basebackup",
})
metadata = {
"compression-algorithm": compression_algorithm,
"encryption-key-id": encryption_key_id,
}
return original_input_size, compressed_file_size, metadata
def run_piped_basebackup(self):
# In a piped basebackup we're not able to read backup_label and must figure out the start wal segment
# on our own. Note that this WAL file value will only be correct if no other basebackups are run in
# parallel. PGHoard itself will never do this itself but if the user starts one on his own we'll get
# an incorrect start-wal-time since the pg_basebackup from pghoard will not generate a new checkpoint.
# This means that this WAL information would not be the oldest required to restore from this
# basebackup.
connection_string, _ = replication_connection_string_and_slot_using_pgpass(self.connection_info)
start_wal_segment = wal.get_current_wal_from_identify_system(connection_string)
temp_basebackup_dir, compressed_basebackup = self.get_paths_for_backup(self.basebackup_path)
command = self.get_command_line("-")
self.log.debug("Starting to run: %r", command)
proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
setattr(proc, "basebackup_start_time", time.monotonic())
self.pid = proc.pid
self.log.info("Started: %r, running as PID: %r, basebackup_location: %r",
command, self.pid, compressed_basebackup)
stream_target = os.path.join(temp_basebackup_dir, "data.tmp")
original_input_size, compressed_file_size, metadata = \
self.basebackup_compression_pipe(proc, stream_target)
self.check_command_success(proc, stream_target)
os.rename(stream_target, compressed_basebackup)
# Since we can't parse the backup label we cheat with the start-wal-segment and
# start-time a bit. The start-wal-segment is the segment currently being written before
# the backup and the start_time is taken _after_ the backup has completed and so is conservatively
# in the future but not exactly correct. These both are valid only as long as no other
# basebackups than those controlled by pghoard are currently running at the same time.
# pg_basebackups are taken simultaneously directly or through other backup managers the WAL
# file will be incorrect since a new checkpoint will not be issued for a parallel backup
metadata.update({
"start-time": datetime.datetime.now(datetime.timezone.utc).isoformat(),
"start-wal-segment": start_wal_segment,
"original-file-size": original_input_size,
"pg-version": self.pg_version_server,
})
self.transfer_queue.put({
"callback_queue": self.callback_queue,
"file_size": compressed_file_size,
"filetype": "basebackup",
"local_path": compressed_basebackup,
"metadata": metadata,
"site": self.site,
"type": "UPLOAD",
})
def parse_backup_label(self, backup_label_data):
if isinstance(backup_label_data, str):
backup_label_data = backup_label_data.encode("utf-8")
for line in backup_label_data.split(b"\n"):
if line.startswith(b"START WAL LOCATION"):
start_wal_segment = line.split()[5].strip(b")").decode("utf8")
elif line.startswith(b"START TIME: "):
start_time_text = line[len("START TIME: "):].decode("utf8")
start_time_dt = dates.parse_timestamp(start_time_text, assume_local=True)
start_time = start_time_dt.isoformat()
self.log.debug("Found: %r as starting wal segment, start_time: %r",
start_wal_segment, start_time)
return start_wal_segment, start_time
def parse_backup_label_in_tar(self, basebackup_path):
with tarfile.TarFile(name=basebackup_path, mode="r") as tar:
content = tar.extractfile("backup_label").read() # pylint: disable=no-member
return self.parse_backup_label(content)
def run_basic_basebackup(self):
basebackup_directory, _ = self.get_paths_for_backup(self.basebackup_path)
basebackup_tar_file = os.path.join(basebackup_directory, "base.tar")
command = self.get_command_line(basebackup_directory)
self.log.debug("Starting to run: %r", command)
proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
setattr(proc, "basebackup_start_time", time.monotonic())
self.pid = proc.pid
self.log.info("Started: %r, running as PID: %r, basebackup_location: %r",
command, self.pid, basebackup_tar_file)
set_subprocess_stdout_and_stderr_nonblocking(proc)
while self.running:
rlist, _, _ = select.select([proc.stdout, proc.stderr], [], [], 1.0)
for fd in rlist:
content = fd.read()
if content:
self.log.debug(content)
self.latest_activity = datetime.datetime.utcnow()
if proc.poll() is not None:
break
self.check_command_success(proc, basebackup_tar_file)
start_wal_segment, start_time = self.parse_backup_label_in_tar(basebackup_tar_file)
self.compression_queue.put({
"callback_queue": self.callback_queue,
"full_path": basebackup_tar_file,
"metadata": {
"start-time": start_time,
"start-wal-segment": start_wal_segment,
},
"type": "CLOSE_WRITE",
})
def get_control_entries_for_tar(self, *, metadata, pg_control, backup_label):
mtime = time.time()
blob = io.BytesIO(common.json_encode(metadata, binary=True))
ti = tarfile.TarInfo(name=".pghoard_tar_metadata.json")
ti.size = len(blob.getbuffer())
ti.mtime = mtime
yield ti, blob, False
# Backup the latest version of pg_control
blob = io.BytesIO(pg_control)
ti = tarfile.TarInfo(name=os.path.join("pgdata", "global", "pg_control"))
ti.size = len(blob.getbuffer())
ti.mtime = mtime
yield ti, blob, False
# Add the given backup_label to the tar after calling pg_stop_backup()
blob = io.BytesIO(backup_label)
ti = tarfile.TarInfo(name=os.path.join("pgdata", "backup_label"))
ti.size = len(blob.getbuffer())
ti.mtime = mtime
yield ti, blob, False
# Create directory entries for empty directories
for dirname in EMPTY_DIRS:
ti = tarfile.TarInfo(name=os.path.join("pgdata", dirname))
ti.type = tarfile.DIRTYPE
ti.mode = 0o700
ti.mtime = mtime
yield ti, None, False
def write_files_to_tar(self, *, files, tar):
for archive_path, local_path, missing_ok in files:
if not self.running:
raise BackupFailure("thread termination requested")
if isinstance(archive_path, tarfile.TarInfo):
tar.addfile(archive_path, local_path)
continue
try:
tar.add(local_path, arcname=archive_path, recursive=False)
except (FileNotFoundError if missing_ok else NoException):
self.log.warning("File %r went away while writing to tar, ignoring", local_path)
def find_files_to_backup(self, *, pgdata, tablespaces):
def add_directory(archive_parent, local_parent, *, missing_ok):
# Scan and add a single directory
try:
contents = os.listdir(local_parent)
except (FileNotFoundError if missing_ok else NoException):
self.log.warning("Directory %r went away while scanning, ignoring", local_parent)
return
for fn in contents:
# Ignore all temporary files and directories as well as well
# as pg_control, we'll grab the latest version of pg_control
# after everything else has been copied.
if fn == "pg_control" or fn.startswith("pgsql_tmp"):
continue
local_path = os.path.join(local_parent, fn)
archive_path = os.path.join(archive_parent, fn)
yield from add_entry(archive_path, local_path, missing_ok=missing_ok)
def add_entry(archive_path, local_path, *, missing_ok):
# Recursively add files and directories
try:
st_mode = os.stat(local_path).st_mode
except (FileNotFoundError if missing_ok else NoException):
self.log.warning("File %r went away while scanning, ignoring", local_path)
return
if stat.S_ISREG(st_mode) or stat.S_ISLNK(st_mode):
yield archive_path, local_path, missing_ok
elif stat.S_ISDIR(st_mode):
yield archive_path, local_path, missing_ok
# Everything but top-level items are allowed to be missing
yield from add_directory(archive_path, local_path, missing_ok=True)
else:
self.log.error("File %r is not a directory, file or symlink, ignoring", local_path)
# Iterate over top-level $PGDATA
for fn in os.listdir(pgdata):
local_path = os.path.join(pgdata, fn)
archive_path = os.path.join("pgdata", fn)
# Skip temporary / runtime files such as postmaster.pid, postmaster.opts and files ending with ~,
# .tmp or .old or starting with .s. or pgsql_tmp. These are some of the filename matches and patterns
# PostgreSQL own replication code recognizes.
# NOTE: backup_label and various empty directories are handled by write_init_entries_to_tar
# NOTE: We also ignore tablespace_map because we store tablespace information elsewhere and
# reconstruct tablespace links in restore.py using our custom metadata and/or user supplied
# options.
# TODO: Use a top-level whitelist?
if fn in EMPTY_DIRS or \
fn == "postmaster.opts" or \
fn == "postmaster.pid" or \
fn == "backup_label" or \
fn == "tablespace_map" or \
fn.endswith(".old") or \
fn.endswith(".tmp") or \
fn.endswith("~") or \
fn.startswith(".s.") or \
fn.startswith("pgsql_tmp"):
continue
yield from add_entry(archive_path, local_path, missing_ok=False)
# Add a "tablespaces" directory with same metadata as $PGDATA
for spcname, spcinfo in tablespaces.items():
local_path = spcinfo["path"]
archive_path = os.path.join("tablespaces", spcname)
yield archive_path, local_path, False
yield from add_directory(archive_path, local_path, missing_ok=False)
def tar_one_file(self, *, temp_dir, chunk_path, files_to_backup, callback_queue,
filetype="basebackup_chunk", extra_metadata=None):
start_time = time.monotonic()
encryption_key_id = self.config["backup_sites"][self.site]["encryption_key_id"]
if encryption_key_id:
rsa_public_key = self.config["backup_sites"][self.site]["encryption_keys"][encryption_key_id]["public"]
else:
rsa_public_key = None
with NamedTemporaryFile(dir=temp_dir, prefix=os.path.basename(chunk_path), suffix=".tmp") as raw_output_obj:
# pylint: disable=bad-continuation
with rohmufile.file_writer(
compression_algorithm=self.config["compression"]["algorithm"],
compression_level=self.config["compression"]["level"],
rsa_public_key=rsa_public_key,
fileobj=raw_output_obj) as output_obj:
with tarfile.TarFile(fileobj=output_obj, mode="w") as output_tar:
self.write_files_to_tar(files=files_to_backup, tar=output_tar)
input_size = output_obj.tell()
result_size = raw_output_obj.tell()
# Make the file persist over the with-block with this hardlink
os.link(raw_output_obj.name, chunk_path)
rohmufile.log_compression_result(
encrypted=True if encryption_key_id else False,
elapsed=time.monotonic() - start_time,
original_size=input_size,
result_size=result_size,
source_name="$PGDATA files ({})".format(len(files_to_backup)),
log_func=self.log.info,
)
metadata = {
"compression-algorithm": self.config["compression"]["algorithm"],
"encryption-key-id": encryption_key_id,
"format": "pghoard-bb-v2",
"original-file-size": input_size,
}
if extra_metadata:
metadata.update(extra_metadata)
self.transfer_queue.put({
"callback_queue": callback_queue,
"file_size": result_size,
"filetype": filetype,
"local_path": chunk_path,
"metadata": metadata,
"site": self.site,
"type": "UPLOAD",
})
# Get the name of the chunk and the name of the parent directory (ie backup "name")
chunk_name = "/".join(chunk_path.split("/")[-2:])
return chunk_name, input_size, result_size
def wait_for_chunk_transfer_to_complete(self, chunk_count, upload_results, chunk_callback_queue, start_time):
try:
upload_results.append(chunk_callback_queue.get(timeout=3.0))
self.log.info("Completed a chunk transfer successfully: %r", upload_results[-1])
return True
except Empty:
self.log.warning("Upload status: %r/%r handled, time taken: %r", len(upload_results), chunk_count,
time.monotonic() - start_time)
return False
def create_and_upload_chunks(self, chunks, data_file_format, temp_base_dir):
start_time = time.monotonic()
chunk_files = []
upload_results = []
chunk_callback_queue = Queue()
chunks_on_disk = 0
i = 0
while i < len(chunks):
if chunks_on_disk < self.config["backup_sites"][self.site]["basebackup_chunks_in_progress"]:
chunk_id = i + 1
one_chunk_files = chunks[i]
chunk_name, input_size, result_size = self.tar_one_file(
callback_queue=chunk_callback_queue,
chunk_path=data_file_format(chunk_id),
temp_dir=temp_base_dir,
files_to_backup=one_chunk_files,
)
chunk_files.append(
{
"chunk_filename": chunk_name,
"input_size": input_size,
"result_size": result_size,
"files": [chunk[0] for chunk in one_chunk_files]
}
)
chunks_on_disk += 1
i += 1
self.log.info("Queued backup chunk %r for transfer, chunks_on_disk: %r, current: %r, total_chunks: %r",
chunk_name, chunks_on_disk, i, len(chunks))
elif self.wait_for_chunk_transfer_to_complete(len(chunks), upload_results, chunk_callback_queue, start_time):
chunks_on_disk -= 1
while len(upload_results) < len(chunk_files):
self.wait_for_chunk_transfer_to_complete(len(chunks), upload_results, chunk_callback_queue, start_time)
return chunk_files
def run_local_tar_basebackup(self):
pgdata = self.config["backup_sites"][self.site]["pg_data_directory"]
if not os.path.isdir(pgdata):
raise errors.InvalidConfigurationError("pg_data_directory {!r} does not exist".format(pgdata))
temp_base_dir, compressed_base = self.get_paths_for_backup(self.basebackup_path)
os.makedirs(compressed_base)
data_file_format = "{}/{}.{{0:08d}}.pghoard".format(compressed_base, os.path.basename(compressed_base)).format
# Default to 2GB chunks of uncompressed data
target_chunk_size = self.config["backup_sites"][self.site]["basebackup_chunk_size"]
self.log.debug("Connecting to database to start backup process")
connection_string = connection_string_using_pgpass(self.connection_info)
with psycopg2.connect(connection_string) as db_conn:
cursor = db_conn.cursor()
if self.pg_version_server >= 90600:
# We'll always use the the non-exclusive backup mode on 9.6 and newer
cursor.execute("SELECT pg_start_backup(%s, true, false)", [BASEBACKUP_NAME])
backup_label = None
backup_mode = "non-exclusive"
else:
# On older versions, first check if we're in recovery, and find out the version of a possibly
# installed pgespresso extension. We use pgespresso's backup control functions when they're
# available, and require them in case we're running on a replica. We also make sure the
# extension version is 1.2 or newer to prevent crashing when using tablespaces.
cursor.execute("SELECT pg_is_in_recovery(), "
" (SELECT extversion FROM pg_extension WHERE extname = 'pgespresso')")
in_recovery, pgespresso_version = cursor.fetchone()
if in_recovery and (not pgespresso_version or pgespresso_version < "1.2"):
raise errors.InvalidConfigurationError("pgespresso version 1.2 or higher must be installed "
"to take `local-tar` backups from a replica")
if pgespresso_version and pgespresso_version >= "1.2":
cursor.execute("SELECT pgespresso_start_backup(%s, false)", [BASEBACKUP_NAME])
backup_label = cursor.fetchone()[0]
backup_mode = "pgespresso"
else:
try:
cursor.execute("SELECT pg_start_backup(%s)", [BASEBACKUP_NAME])
except psycopg2.OperationalError as ex:
self.log.warning("Exclusive pg_start_backup() failed: %s: %s", ex.__class__.__name__, ex)
db_conn.rollback()
if "a backup is already in progress" not in str(ex):
raise
self.log.info("Calling pg_stop_backup() and retrying")
cursor.execute("SELECT pg_stop_backup()")
cursor.execute("SELECT pg_start_backup(%s)", [BASEBACKUP_NAME])
with open(os.path.join(pgdata, "backup_label"), "r") as fp:
backup_label = fp.read()
backup_mode = "legacy"
backup_stopped = False
try:
# Look up tablespaces and resolve their current filesystem locations
cursor.execute("SELECT oid, spcname FROM pg_tablespace WHERE spcname NOT IN ('pg_default', 'pg_global')")
tablespaces = {
spcname: {
"path": os.readlink(os.path.join(pgdata, "pg_tblspc", str(oid))),
"oid": oid,
}
for oid, spcname in cursor.fetchall()
}
db_conn.commit()
self.log.info("Starting to backup %r and %r tablespaces to %r",
pgdata, len(tablespaces), compressed_base)
start_time = time.monotonic()
total_file_count = 0
one_chunk_size = 0
one_chunk_files = []
chunks = []
# Generate a list of chunks
for archive_path, local_path, missing_ok in \
self.find_files_to_backup(pgdata=pgdata, tablespaces=tablespaces):
file_size = os.path.getsize(local_path)
# Switch chunks if the current chunk has at least 20% data and the new chunk would tip it over
if one_chunk_size > target_chunk_size / 5 and one_chunk_size + file_size > target_chunk_size:
chunks.append(one_chunk_files)
one_chunk_size = 0
one_chunk_files = []
total_file_count += 1
one_chunk_size += file_size
one_chunk_files.append([archive_path, local_path, missing_ok])
chunks.append(one_chunk_files)
# Tar up the chunks and submit them for upload; note that we start from chunk 1 here; chunk 0
# is reserved for special files and metadata and will be generated last.
chunk_files = self.create_and_upload_chunks(chunks, data_file_format, temp_base_dir)
# Everything is now tarred up, grab the latest pg_control and stop the backup process
with open(os.path.join(pgdata, "global", "pg_control"), "rb") as fp:
pg_control = fp.read()
# Call the stop backup functions now to get backup label for 9.6+ non-exclusive backups
if backup_mode == "non-exclusive":
cursor.execute("SELECT labelfile FROM pg_stop_backup(false)")
backup_label = cursor.fetchone()[0]
elif backup_mode == "pgespresso":
cursor.execute("SELECT pgespresso_stop_backup(%s)", [backup_label])
else:
cursor.execute("SELECT pg_stop_backup()")
db_conn.commit()
backup_stopped = True
total_size_plain = sum(item["input_size"] for item in chunk_files)
total_size_enc = sum(item["result_size"] for item in chunk_files)
self.log.info("Basebackup generation finished, %r files, %r chunks, "
"%r byte input, %r byte output, took %r seconds, waiting to upload",
total_file_count, len(chunk_files),
total_size_plain, total_size_enc, time.monotonic() - start_time)
finally:
db_conn.rollback()
if not backup_stopped:
if backup_mode == "non-exclusive":
cursor.execute("SELECT pg_stop_backup(false)")
elif backup_mode == "pgespresso":
cursor.execute("SELECT pgespresso_stop_backup(%s)", [backup_label])
else:
cursor.execute("SELECT pg_stop_backup()")
db_conn.commit()
backup_label_data = backup_label.encode("utf-8")
backup_start_wal_segment, backup_start_time = self.parse_backup_label(backup_label_data)
backup_end_wal_segment, backup_end_time = self.get_backup_end_segment_and_time(db_conn, backup_mode)
# Generate and upload the metadata chunk
metadata = {
"backup_end_time": backup_end_time,
"backup_end_wal_segment": backup_end_wal_segment,
"backup_start_time": backup_start_time,
"backup_start_wal_segment": backup_start_wal_segment,
"chunks": chunk_files,
"pgdata": pgdata,
"pghoard_object": "basebackup",
"pghoard_version": version.__version__,
"tablespaces": tablespaces,
}
control_files = list(self.get_control_entries_for_tar(
metadata=metadata,
pg_control=pg_control,
backup_label=backup_label_data,
))
self.tar_one_file(
callback_queue=self.callback_queue,
chunk_path=data_file_format(0),
temp_dir=temp_base_dir,
files_to_backup=control_files,
filetype="basebackup",
extra_metadata={
"end-time": backup_end_time,
"end-wal-segment": backup_end_wal_segment,
"pg-version": self.pg_version_server,
"start-time": backup_start_time,
"start-wal-segment": backup_start_wal_segment,
"total-size-plain": total_size_plain,
"total-size-enc": total_size_enc,
},
)
def get_backup_end_segment_and_time(self, db_conn, backup_mode):
"""Grab a timestamp and WAL segment name after the end of the backup: this is a point in time to which
we must be able to recover to, and the last WAL segment that is required for the backup to be
consistent.
Note that pg_switch_xlog()/pg_switch_wal() is a superuser-only function, but since pg_start_backup() and
pg_stop_backup() cause an WAL switch we'll call them instead. The downside is an unnecessary
checkpoint.
"""
cursor = db_conn.cursor()
# Get backup end time and end segment and forcibly register a transaction in the current segment
# Note that we can't call pg_walfile_name() or pg_current_wal_lsn() in recovery
cursor.execute("SELECT now(), pg_is_in_recovery()")
backup_end_time, in_recovery = cursor.fetchone()
if in_recovery:
db_conn.commit()
return None, backup_end_time
if self.pg_version_server >= 100000:
cursor.execute("SELECT pg_walfile_name(pg_current_wal_lsn()), txid_current()")
else:
cursor.execute("SELECT pg_xlogfile_name(pg_current_xlog_location()), txid_current()")
backup_end_wal_segment, _ = cursor.fetchone()
db_conn.commit()
# Now force switch of the WAL segment to make sure we have archived a segment with a known
# timestamp after pg_stop_backup() was called.
backup_end_name = "pghoard_end_of_backup"
if backup_mode == "non-exclusive":
cursor.execute("SELECT pg_start_backup(%s, true, false)", [backup_end_name])
cursor.execute("SELECT pg_stop_backup(false)")
elif backup_mode == "pgespresso":
cursor.execute("SELECT pgespresso_start_backup(%s, false)", [backup_end_name])
backup_label = cursor.fetchone()[0]
cursor.execute("SELECT pgespresso_stop_backup(%s)", [backup_label])
else:
cursor.execute("SELECT pg_start_backup(%s)", [backup_end_name])
cursor.execute("SELECT pg_stop_backup()")
db_conn.commit()
return backup_end_wal_segment, backup_end_time
| |
# Copyright (c) 2014 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystoneclient import auth as ks_auth
from keystoneclient.auth.identity import v2 as v2_auth
from keystoneclient import session as ks_session
from novaclient import client as nova_client
from novaclient import exceptions as nova_exceptions
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import importutils
from oslo_utils import uuidutils
from sqlalchemy.orm import attributes as sql_attr
from neutron.common import constants
from neutron import context
from neutron.i18n import _LE, _LI, _LW
from neutron import manager
from neutron.notifiers import batch_notifier
LOG = logging.getLogger(__name__)
VIF_UNPLUGGED = 'network-vif-unplugged'
VIF_PLUGGED = 'network-vif-plugged'
VIF_DELETED = 'network-vif-deleted'
NEUTRON_NOVA_EVENT_STATUS_MAP = {constants.PORT_STATUS_ACTIVE: 'completed',
constants.PORT_STATUS_ERROR: 'failed',
constants.PORT_STATUS_DOWN: 'completed'}
NOVA_API_VERSION = "2"
class DefaultAuthPlugin(v2_auth.Password):
"""A wrapper around standard v2 user/pass to handle bypass url.
This is only necessary because novaclient doesn't support endpoint_override
yet - bug #1403329.
When this bug is fixed we can pass the endpoint_override to the client
instead and remove this class.
"""
def __init__(self, **kwargs):
self._endpoint_override = kwargs.pop('endpoint_override', None)
super(DefaultAuthPlugin, self).__init__(**kwargs)
def get_endpoint(self, session, **kwargs):
if self._endpoint_override:
return self._endpoint_override
return super(DefaultAuthPlugin, self).get_endpoint(session, **kwargs)
class Notifier(object):
def __init__(self):
# FIXME(jamielennox): A notifier is being created for each Controller
# and each Notifier is handling it's own auth. That means that we are
# authenticating the exact same thing len(controllers) times. This
# should be an easy thing to optimize.
auth = ks_auth.load_from_conf_options(cfg.CONF, 'nova')
endpoint_override = None
if not auth:
LOG.warning(_LW('Authenticating to nova using nova_admin_* options'
' is deprecated. This should be done using'
' an auth plugin, like password'))
if cfg.CONF.nova_admin_tenant_id:
endpoint_override = "%s/%s" % (cfg.CONF.nova_url,
cfg.CONF.nova_admin_tenant_id)
auth = DefaultAuthPlugin(
auth_url=cfg.CONF.nova_admin_auth_url,
username=cfg.CONF.nova_admin_username,
password=cfg.CONF.nova_admin_password,
tenant_id=cfg.CONF.nova_admin_tenant_id,
tenant_name=cfg.CONF.nova_admin_tenant_name,
endpoint_override=endpoint_override)
session = ks_session.Session.load_from_conf_options(cfg.CONF,
'nova',
auth=auth)
# NOTE(andreykurilin): novaclient.v1_1 was renamed to v2 and there is
# no way to import the contrib module directly without referencing v2,
# which would only work for novaclient >= 2.21.0.
novaclient_cls = nova_client.get_client_class(NOVA_API_VERSION)
server_external_events = importutils.import_module(
novaclient_cls.__module__.replace(
".client", ".contrib.server_external_events"))
self.nclient = novaclient_cls(
session=session,
region_name=cfg.CONF.nova.region_name,
extensions=[server_external_events])
self.batch_notifier = batch_notifier.BatchNotifier(
cfg.CONF.send_events_interval, self.send_events)
def _is_compute_port(self, port):
try:
if (port['device_id'] and uuidutils.is_uuid_like(port['device_id'])
and port['device_owner'].startswith('compute:')):
return True
except (KeyError, AttributeError):
pass
return False
def _get_network_changed_event(self, device_id):
return {'name': 'network-changed',
'server_uuid': device_id}
def _get_port_delete_event(self, port):
return {'server_uuid': port['device_id'],
'name': VIF_DELETED,
'tag': port['id']}
@property
def _plugin(self):
# NOTE(arosen): this cannot be set in __init__ currently since
# this class is initialized at the same time as NeutronManager()
# which is decorated with synchronized()
if not hasattr(self, '_plugin_ref'):
self._plugin_ref = manager.NeutronManager.get_plugin()
return self._plugin_ref
def send_network_change(self, action, original_obj,
returned_obj):
"""Called when a network change is made that nova cares about.
:param action: the event that occurred.
:param original_obj: the previous value of resource before action.
:param returned_obj: the body returned to client as result of action.
"""
if not cfg.CONF.notify_nova_on_port_data_changes:
return
# When neutron re-assigns floating ip from an original instance
# port to a new instance port without disassociate it first, an
# event should be sent for original instance, that will make nova
# know original instance's info, and update database for it.
if (action == 'update_floatingip'
and returned_obj['floatingip'].get('port_id')
and original_obj.get('port_id')):
disassociate_returned_obj = {'floatingip': {'port_id': None}}
event = self.create_port_changed_event(action, original_obj,
disassociate_returned_obj)
self.batch_notifier.queue_event(event)
event = self.create_port_changed_event(action, original_obj,
returned_obj)
self.batch_notifier.queue_event(event)
def create_port_changed_event(self, action, original_obj, returned_obj):
port = None
if action in ['update_port', 'delete_port']:
port = returned_obj['port']
elif action in ['update_floatingip', 'create_floatingip',
'delete_floatingip']:
# NOTE(arosen) if we are associating a floatingip the
# port_id is in the returned_obj. Otherwise on disassociate
# it's in the original_object
port_id = (returned_obj['floatingip'].get('port_id') or
original_obj.get('port_id'))
if port_id is None:
return
ctx = context.get_admin_context()
port = self._plugin.get_port(ctx, port_id)
if port and self._is_compute_port(port):
if action == 'delete_port':
return self._get_port_delete_event(port)
else:
return self._get_network_changed_event(port['device_id'])
def record_port_status_changed(self, port, current_port_status,
previous_port_status, initiator):
"""Determine if nova needs to be notified due to port status change.
"""
# clear out previous _notify_event
port._notify_event = None
# If there is no device_id set there is nothing we can do here.
if not port.device_id:
LOG.debug("device_id is not set on port yet.")
return
if not port.id:
LOG.warning(_LW("Port ID not set! Nova will not be notified of "
"port status change."))
return
# We only want to notify about nova ports.
if not self._is_compute_port(port):
return
# We notify nova when a vif is unplugged which only occurs when
# the status goes from ACTIVE to DOWN.
if (previous_port_status == constants.PORT_STATUS_ACTIVE and
current_port_status == constants.PORT_STATUS_DOWN):
event_name = VIF_UNPLUGGED
# We only notify nova when a vif is plugged which only occurs
# when the status goes from:
# NO_VALUE/DOWN/BUILD -> ACTIVE/ERROR.
elif (previous_port_status in [sql_attr.NO_VALUE,
constants.PORT_STATUS_DOWN,
constants.PORT_STATUS_BUILD]
and current_port_status in [constants.PORT_STATUS_ACTIVE,
constants.PORT_STATUS_ERROR]):
event_name = VIF_PLUGGED
# All the remaining state transitions are of no interest to nova
else:
LOG.debug("Ignoring state change previous_port_status: "
"%(pre_status)s current_port_status: %(cur_status)s"
" port_id %(id)s",
{'pre_status': previous_port_status,
'cur_status': current_port_status,
'id': port.id})
return
port._notify_event = (
{'server_uuid': port.device_id,
'name': event_name,
'status': NEUTRON_NOVA_EVENT_STATUS_MAP.get(current_port_status),
'tag': port.id})
def send_port_status(self, mapper, connection, port):
event = getattr(port, "_notify_event", None)
self.batch_notifier.queue_event(event)
port._notify_event = None
def send_events(self, batched_events):
LOG.debug("Sending events: %s", batched_events)
try:
response = self.nclient.server_external_events.create(
batched_events)
except nova_exceptions.NotFound:
LOG.warning(_LW("Nova returned NotFound for event: %s"),
batched_events)
except Exception:
LOG.exception(_LE("Failed to notify nova on events: %s"),
batched_events)
else:
if not isinstance(response, list):
LOG.error(_LE("Error response returned from nova: %s"),
response)
return
response_error = False
for event in response:
try:
code = event['code']
except KeyError:
response_error = True
continue
if code != 200:
LOG.warning(_LW("Nova event: %s returned with failed "
"status"), event)
else:
LOG.info(_LI("Nova event response: %s"), event)
if response_error:
LOG.error(_LE("Error response returned from nova: %s"),
response)
| |
from __future__ import unicode_literals
import os
import copy
from ship.utils.fileloaders import fileloader
from ship.utils import filetools as ft
from integration_tests import utils
class TuflowLoadTests(object):
def runTests(self):
cwd = os.getcwd()
path1 = "integration_tests/test_data/model1/tuflow/runs/test_run1.tcf"
path2 = "integration_tests/test_data/model1/tuflow/runs/test_run_noexist.tcf"
main_path = os.path.normpath(os.path.join(cwd, path1))
missing_path = os.path.normpath(os.path.join(cwd, path2))
self.loadTuflowModel(missing_path)
self.test_nonExistingControlFiles()
del self.tuflow
self.loadTuflowModel(main_path)
utils.softAssertion(self.tuflow.missing_model_files, [])
# self.test_deactiveLogic()
self.test_writeTuflowModel()
self.test_controlFileTypes()
self.test_allFilepaths()
self.test_variables()
self.test_files()
self.test_seVals()
def loadTuflowModel(self, path):
print ('Loading tuflow model...')
loader = fileloader.FileLoader()
self.tuflow = loader.loadFile(path)
print ('Tuflow model load complete.')
def test_nonExistingControlFiles(self):
"""Checks that a model still loads and returns the missing files.
When a TuflowModel is loaded but can't find one or more of the control
files on disk it should finish the load and set the missing_model_files
variables to a list with the missing file paths.
"""
test_files = [
os.path.normpath(os.path.join(self.tuflow.root, "..\\model\\test_tgc_NOEXIST.tgc")),
os.path.normpath(os.path.join(self.tuflow.root, "..\\model\\test_tbc_NOEXIST.tbc")),
]
utils.softAssertion(set(self.tuflow.missing_model_files), set(test_files))
def test_writeTuflowModel(self):
"""Note this will write the outputs of the tuflow model to disk.
No comparisons of the data are done here. It's just a convenience so
that you can check the output files and make sure that they look like
they should.
Outputs will be written to SHIP/integration_tests/test_output/asread/
"""
print ('Test writing tuflow model...')
cwd = os.path.join(os.getcwd(), 'integration_tests', 'test_output')
need_dirs = [cwd,
os.path.join(cwd, 'asread'),
os.path.join(cwd, 'asread/model1'),
os.path.join(cwd, 'asread/model1/tuflow'),
os.path.join(cwd, 'asread/model1/tuflow/runs'),
os.path.join(cwd, 'asread/model1/tuflow/model')
]
try:
for n in need_dirs:
if not os.path.isdir(n):
os.mkdir(n)
except IOError:
print ('\t Could not make test directeries - aborting test')
print ('\nFail!\n')
tuflow = copy.deepcopy(self.tuflow)
new_root = os.path.normpath(need_dirs[4]) # ending 'runs'
root_compare = os.path.normpath(need_dirs[3]) # ending 'tuflow'
tuflow.root = new_root
contents = {}
for ckey, cval in tuflow.control_files.items():
if not ckey in contents.keys():
contents[ckey] = {}
temp = cval.getPrintableContents()
for tkey, tval in temp.items():
# print ('root compare: ' + root_compare)
# print ('tkey: ' + tkey)
utils.softAssertionIn(root_compare, tkey)
contents[ckey][tkey] = tval
for ctype, c in contents.items():
for pkey, val in c.items():
ft.writeFile(val, pkey)
del tuflow
print ('Done')
def test_deactiveLogic(self):
pass
# print ('Test deactivation logic...')
# tuflow = copy.deepcopy(self.tuflow)
# logic = tuflow.control_files['TGC'].logic[1]
# logic.active = False
# trd = tuflow.control_files['TGC'].contains(filename="test_trd2")[0]
# trd.active = False
# gis = tuflow.control_files['TGC'].contains(filename="whatevs_shiptest_tgc_v1_P")[0]
# gis.active = False
# gis2 = tuflow.control_files['TGC'].contains(filename="whatevs_shiptest_tgc_v2_P")[0]
# gis2.active = False
# trd2 = tuflow.control_files['TGC'].contains(filename="test_trd1")[0]
# trd2.active = False
# trd2 = tuflow.control_files['TGC'].contains(filename="test_trd3")[0]
# trd2.active = False
# del tuflow
# print ('pass')
def test_controlFileTypes(self):
print ('Testing control_files keys...')
ckeys = self.tuflow.control_files.keys()
test_keys = ['TCF', 'ECF', 'TGC', 'TBC']
utils.softAssertion(set(ckeys), set(test_keys))
print ('Done')
def test_seVals(self):
print ('Testing se_vals filepaths keys...')
se_vals = self.tuflow.user_variables.seValsToDict()
test_paths = [
'2d_loc_shiptest_tgc_v1_L.shp',
'2d_code_shiptest_tgc_v1_R.shp',
'2d_bc_hx_shiptest_tgc_v1_R.shp',
'2d_whatevs_shiptest_tgc_v2_P.shp',
'test_trd1.trd',
'2d_zln_shiptest_trd_v1_L.shp',
'2d_zln_shiptest_trd_v1_P.shp',
'some_zshp_trd_v1_R.shp',
'summit_event_zln_trd_v2_L.shp',
'test_trd2.trd',
'2d_zln_shiptest_trd_v2_L.shp',
'2d_zln_shiptest_trd_v2_P.shp',
'shiptest_tgc_v1_DTM_2m.asc',
'2d_zln_shiptest_tgc_v1_L.shp',
'2d_zln_shiptest_tgc_v1_P.shp',
'2d_zpt_shiptest_tgc_v1_R.shp',
'2d_mat_shiptest_tgc_v1_R.shp'
]
paths = self.tuflow.control_files['TGC'].filepaths(se_vals=se_vals)
utils.softAssertion(set(test_paths), set(paths))
print ('Done')
print ('Testing se_vals variables count...')
vars = self.tuflow.control_files['TGC'].variables(se_vals=se_vals)
utils.softAssertion(len(vars), 6)
print ('Done')
def test_files(self):
print ('Testing no_duplicates=True files count...')
test_filecounts = {
'TCF': 13,
'ECF': 4,
'TGC': 14,
'TBC': 2,
}
for key, c in self.tuflow.control_files.items():
print ('Checking model_type: ' + key + '...')
vars = c.files()
utils.softAssertion(len(vars), test_filecounts[key])
print ('Done')
print ('Testing no_duplicates=False files count...')
test_filecounts = {
'TCF': 14,
'ECF': 4,
'TGC': 23,
'TBC': 4,
}
for key, c in self.tuflow.control_files.items():
print ('Checking model_type: ' + key + '...')
vars = c.files(no_duplicates=False)
utils.softAssertion(len(vars), test_filecounts[key])
print ('Done')
def test_variables(self):
print ('Testing no_duplicates=True variables count...')
test_varcounts = {
'TCF': 16,
'ECF': 6,
'TGC': 6,
'TBC': 0,
}
for key, c in self.tuflow.control_files.items():
print ('Checking model_type: ' + key + '...')
vars = c.variables()
utils.softAssertion(len(vars), test_varcounts[key])
print ('Done')
print ('Testing no_duplciates=False variables count...')
test_varcounts = {
'TCF': 18,
'ECF': 6,
'TGC': 9,
'TBC': 0,
}
for key, c in self.tuflow.control_files.items():
print ('Checking model_type: ' + key + '...')
vars = c.variables(no_duplicates=False)
utils.softAssertion(len(vars), test_varcounts[key])
print ('Done')
def test_allFilepaths(self):
print ('Testing all filepaths...')
test_paths = {
'TCF': [
'materials_shiptest_v1.csv',
'1d_nodes_shiptest_v1_P.shp',
'1d_nwk_shiptest_v1_L.shp',
'1d_WLL_shiptest_v1_L.shp',
'bc_dbase_shiptest_v1.csv',
'test_run1.ecf',
'test_tgc1.tgc',
'test_tbc1.tbc',
'test_tbc2.tbc',
'2d_oz_ZoneA_shiptest_v1_R.shp',
'2d_po_shiptest_v1_L.shp'
],
'ECF': [
'Projection.prj',
'bc_dbase_shiptest_v1.csv'
],
'TGC': [
'2d_loc_shiptest_tgc_v1_L.shp',
'2d_code_shiptest_tgc_v1_R.shp',
'2d_bc_hx_shiptest_tgc_v1_R.shp',
'2d_whatevs_shiptest_tgc_v1_P.shp',
'2d_whatevs_shiptest_tgc_v2_P.shp',
'test_trd1.trd',
'2d_zln_shiptest_trd_v1_L.shp',
'2d_zln_shiptest_trd_v1_P.shp',
'some_zshp_trd_v1_R.shp',
'some_zshp_trd_v2_R.shp',
'summit_event_zln_trd_v1_L.shp',
'summit_event_zln_trd_v2_L.shp',
'test_trd2.trd',
'2d_zln_shiptest_trd_v2_L.shp',
'2d_zln_shiptest_trd_v2_P.shp',
'test_trd3.trd',
'2d_zln_shiptest_trd_v3_L.shp',
'2d_zln_shiptest_trd_v3_P.shp',
'shiptest_tgc_v1_DTM_2m.asc',
'2d_zln_shiptest_tgc_v1_L.shp',
'2d_zln_shiptest_tgc_v1_P.shp',
'2d_zpt_shiptest_tgc_v1_R.shp',
'2d_mat_shiptest_tgc_v1_R.shp'
],
'TBC': [
'2d_bc_hx_shiptest_tbc_v1_L.shp',
'2d_bc_cn_shiptest_tbc_v1_L.shp',
'2d_bc_hx_shiptest_tbc_v2_L.shp',
'2d_bc_cn_shiptest_tbc_v2_L.shp'
],
}
for key, c in self.tuflow.control_files.items():
filepaths = c.filepaths()
print ('Checking model_type: ' + key + '...')
utils.softAssertion(set(test_paths[key]), set(filepaths))
print ('Done')
| |
from __future__ import unicode_literals
from datetime import date
import unittest
from django.core.exceptions import FieldError
from django.db import models
from django.db import connection
from django.test import TestCase
from .models import Author
class Div3Lookup(models.Lookup):
lookup_name = 'div3'
def as_sql(self, qn, connection):
lhs, params = self.process_lhs(qn, connection)
rhs, rhs_params = self.process_rhs(qn, connection)
params.extend(rhs_params)
return '%s %%%% 3 = %s' % (lhs, rhs), params
def as_oracle(self, qn, connection):
lhs, params = self.process_lhs(qn, connection)
rhs, rhs_params = self.process_rhs(qn, connection)
params.extend(rhs_params)
return 'mod(%s, 3) = %s' % (lhs, rhs), params
class Div3Transform(models.Transform):
lookup_name = 'div3'
def as_sql(self, qn, connection):
lhs, lhs_params = qn.compile(self.lhs)
return '%s %%%% 3' % (lhs,), lhs_params
def as_oracle(self, qn, connection):
lhs, lhs_params = qn.compile(self.lhs)
return 'mod(%s, 3)' % lhs, lhs_params
class YearTransform(models.Transform):
lookup_name = 'year'
def as_sql(self, qn, connection):
lhs_sql, params = qn.compile(self.lhs)
return connection.ops.date_extract_sql('year', lhs_sql), params
@property
def output_field(self):
return models.IntegerField()
@YearTransform.register_lookup
class YearExact(models.lookups.Lookup):
lookup_name = 'exact'
def as_sql(self, qn, connection):
# We will need to skip the extract part, and instead go
# directly with the originating field, that is self.lhs.lhs
lhs_sql, lhs_params = self.process_lhs(qn, connection, self.lhs.lhs)
rhs_sql, rhs_params = self.process_rhs(qn, connection)
# Note that we must be careful so that we have params in the
# same order as we have the parts in the SQL.
params = lhs_params + rhs_params + lhs_params + rhs_params
# We use PostgreSQL specific SQL here. Note that we must do the
# conversions in SQL instead of in Python to support F() references.
return ("%(lhs)s >= (%(rhs)s || '-01-01')::date "
"AND %(lhs)s <= (%(rhs)s || '-12-31')::date" %
{'lhs': lhs_sql, 'rhs': rhs_sql}, params)
@YearTransform.register_lookup
class YearLte(models.lookups.LessThanOrEqual):
"""
The purpose of this lookup is to efficiently compare the year of the field.
"""
def as_sql(self, qn, connection):
# Skip the YearTransform above us (no possibility for efficient
# lookup otherwise).
real_lhs = self.lhs.lhs
lhs_sql, params = self.process_lhs(qn, connection, real_lhs)
rhs_sql, rhs_params = self.process_rhs(qn, connection)
params.extend(rhs_params)
# Build SQL where the integer year is concatenated with last month
# and day, then convert that to date. (We try to have SQL like:
# WHERE somecol <= '2013-12-31')
# but also make it work if the rhs_sql is field reference.
return "%s <= (%s || '-12-31')::date" % (lhs_sql, rhs_sql), params
class SQLFunc(models.Lookup):
def __init__(self, name, *args, **kwargs):
super(SQLFunc, self).__init__(*args, **kwargs)
self.name = name
def as_sql(self, qn, connection):
return '%s()', [self.name]
@property
def output_field(self):
return CustomField()
class SQLFuncFactory(object):
def __init__(self, name):
self.name = name
def __call__(self, *args, **kwargs):
return SQLFunc(self.name, *args, **kwargs)
class CustomField(models.TextField):
def get_lookup(self, lookup_name):
if lookup_name.startswith('lookupfunc_'):
key, name = lookup_name.split('_', 1)
return SQLFuncFactory(name)
return super(CustomField, self).get_lookup(lookup_name)
def get_transform(self, lookup_name):
if lookup_name.startswith('transformfunc_'):
key, name = lookup_name.split('_', 1)
return SQLFuncFactory(name)
return super(CustomField, self).get_transform(lookup_name)
class CustomModel(models.Model):
field = CustomField()
# We will register this class temporarily in the test method.
class InMonth(models.lookups.Lookup):
"""
InMonth matches if the column's month is the same as value's month.
"""
lookup_name = 'inmonth'
def as_sql(self, qn, connection):
lhs, lhs_params = self.process_lhs(qn, connection)
rhs, rhs_params = self.process_rhs(qn, connection)
# We need to be careful so that we get the params in right
# places.
params = lhs_params + rhs_params + lhs_params + rhs_params
return ("%s >= date_trunc('month', %s) and "
"%s < date_trunc('month', %s) + interval '1 months'" %
(lhs, rhs, lhs, rhs), params)
class LookupTests(TestCase):
def test_basic_lookup(self):
a1 = Author.objects.create(name='a1', age=1)
a2 = Author.objects.create(name='a2', age=2)
a3 = Author.objects.create(name='a3', age=3)
a4 = Author.objects.create(name='a4', age=4)
models.IntegerField.register_lookup(Div3Lookup)
try:
self.assertQuerysetEqual(
Author.objects.filter(age__div3=0),
[a3], lambda x: x
)
self.assertQuerysetEqual(
Author.objects.filter(age__div3=1).order_by('age'),
[a1, a4], lambda x: x
)
self.assertQuerysetEqual(
Author.objects.filter(age__div3=2),
[a2], lambda x: x
)
self.assertQuerysetEqual(
Author.objects.filter(age__div3=3),
[], lambda x: x
)
finally:
models.IntegerField._unregister_lookup(Div3Lookup)
@unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific SQL used")
def test_birthdate_month(self):
a1 = Author.objects.create(name='a1', birthdate=date(1981, 2, 16))
a2 = Author.objects.create(name='a2', birthdate=date(2012, 2, 29))
a3 = Author.objects.create(name='a3', birthdate=date(2012, 1, 31))
a4 = Author.objects.create(name='a4', birthdate=date(2012, 3, 1))
models.DateField.register_lookup(InMonth)
try:
self.assertQuerysetEqual(
Author.objects.filter(birthdate__inmonth=date(2012, 1, 15)),
[a3], lambda x: x
)
self.assertQuerysetEqual(
Author.objects.filter(birthdate__inmonth=date(2012, 2, 1)),
[a2], lambda x: x
)
self.assertQuerysetEqual(
Author.objects.filter(birthdate__inmonth=date(1981, 2, 28)),
[a1], lambda x: x
)
self.assertQuerysetEqual(
Author.objects.filter(birthdate__inmonth=date(2012, 3, 12)),
[a4], lambda x: x
)
self.assertQuerysetEqual(
Author.objects.filter(birthdate__inmonth=date(2012, 4, 1)),
[], lambda x: x
)
finally:
models.DateField._unregister_lookup(InMonth)
def test_div3_extract(self):
models.IntegerField.register_lookup(Div3Transform)
try:
a1 = Author.objects.create(name='a1', age=1)
a2 = Author.objects.create(name='a2', age=2)
a3 = Author.objects.create(name='a3', age=3)
a4 = Author.objects.create(name='a4', age=4)
baseqs = Author.objects.order_by('name')
self.assertQuerysetEqual(
baseqs.filter(age__div3=2),
[a2], lambda x: x)
self.assertQuerysetEqual(
baseqs.filter(age__div3__lte=3),
[a1, a2, a3, a4], lambda x: x)
self.assertQuerysetEqual(
baseqs.filter(age__div3__in=[0, 2]),
[a2, a3], lambda x: x)
finally:
models.IntegerField._unregister_lookup(Div3Transform)
class YearLteTests(TestCase):
def setUp(self):
models.DateField.register_lookup(YearTransform)
self.a1 = Author.objects.create(name='a1', birthdate=date(1981, 2, 16))
self.a2 = Author.objects.create(name='a2', birthdate=date(2012, 2, 29))
self.a3 = Author.objects.create(name='a3', birthdate=date(2012, 1, 31))
self.a4 = Author.objects.create(name='a4', birthdate=date(2012, 3, 1))
def tearDown(self):
models.DateField._unregister_lookup(YearTransform)
@unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific SQL used")
def test_year_lte(self):
baseqs = Author.objects.order_by('name')
self.assertQuerysetEqual(
baseqs.filter(birthdate__year__lte=2012),
[self.a1, self.a2, self.a3, self.a4], lambda x: x)
self.assertQuerysetEqual(
baseqs.filter(birthdate__year=2012),
[self.a2, self.a3, self.a4], lambda x: x)
self.assertNotIn('BETWEEN', str(baseqs.filter(birthdate__year=2012).query))
self.assertQuerysetEqual(
baseqs.filter(birthdate__year__lte=2011),
[self.a1], lambda x: x)
# The non-optimized version works, too.
self.assertQuerysetEqual(
baseqs.filter(birthdate__year__lt=2012),
[self.a1], lambda x: x)
@unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific SQL used")
def test_year_lte_fexpr(self):
self.a2.age = 2011
self.a2.save()
self.a3.age = 2012
self.a3.save()
self.a4.age = 2013
self.a4.save()
baseqs = Author.objects.order_by('name')
self.assertQuerysetEqual(
baseqs.filter(birthdate__year__lte=models.F('age')),
[self.a3, self.a4], lambda x: x)
self.assertQuerysetEqual(
baseqs.filter(birthdate__year__lt=models.F('age')),
[self.a4], lambda x: x)
def test_year_lte_sql(self):
# This test will just check the generated SQL for __lte. This
# doesn't require running on PostgreSQL and spots the most likely
# error - not running YearLte SQL at all.
baseqs = Author.objects.order_by('name')
self.assertIn(
'<= (2011 || ', str(baseqs.filter(birthdate__year__lte=2011).query))
self.assertIn(
'-12-31', str(baseqs.filter(birthdate__year__lte=2011).query))
def test_postgres_year_exact(self):
baseqs = Author.objects.order_by('name')
self.assertIn(
'= (2011 || ', str(baseqs.filter(birthdate__year=2011).query))
self.assertIn(
'-12-31', str(baseqs.filter(birthdate__year=2011).query))
def test_custom_implementation_year_exact(self):
try:
# Two ways to add a customized implementation for different backends:
# First is MonkeyPatch of the class.
def as_custom_sql(self, qn, connection):
lhs_sql, lhs_params = self.process_lhs(qn, connection, self.lhs.lhs)
rhs_sql, rhs_params = self.process_rhs(qn, connection)
params = lhs_params + rhs_params + lhs_params + rhs_params
return ("%(lhs)s >= str_to_date(concat(%(rhs)s, '-01-01'), '%%%%Y-%%%%m-%%%%d') "
"AND %(lhs)s <= str_to_date(concat(%(rhs)s, '-12-31'), '%%%%Y-%%%%m-%%%%d')" %
{'lhs': lhs_sql, 'rhs': rhs_sql}, params)
setattr(YearExact, 'as_' + connection.vendor, as_custom_sql)
self.assertIn(
'concat(',
str(Author.objects.filter(birthdate__year=2012).query))
finally:
delattr(YearExact, 'as_' + connection.vendor)
try:
# The other way is to subclass the original lookup and register the subclassed
# lookup instead of the original.
class CustomYearExact(YearExact):
# This method should be named "as_mysql" for MySQL, "as_postgresql" for postgres
# and so on, but as we don't know which DB we are running on, we need to use
# setattr.
def as_custom_sql(self, qn, connection):
lhs_sql, lhs_params = self.process_lhs(qn, connection, self.lhs.lhs)
rhs_sql, rhs_params = self.process_rhs(qn, connection)
params = lhs_params + rhs_params + lhs_params + rhs_params
return ("%(lhs)s >= str_to_date(CONCAT(%(rhs)s, '-01-01'), '%%%%Y-%%%%m-%%%%d') "
"AND %(lhs)s <= str_to_date(CONCAT(%(rhs)s, '-12-31'), '%%%%Y-%%%%m-%%%%d')" %
{'lhs': lhs_sql, 'rhs': rhs_sql}, params)
setattr(CustomYearExact, 'as_' + connection.vendor, CustomYearExact.as_custom_sql)
YearTransform.register_lookup(CustomYearExact)
self.assertIn(
'CONCAT(',
str(Author.objects.filter(birthdate__year=2012).query))
finally:
YearTransform._unregister_lookup(CustomYearExact)
YearTransform.register_lookup(YearExact)
class TrackCallsYearTransform(YearTransform):
lookup_name = 'year'
call_order = []
def as_sql(self, qn, connection):
lhs_sql, params = qn.compile(self.lhs)
return connection.ops.date_extract_sql('year', lhs_sql), params
@property
def output_field(self):
return models.IntegerField()
def get_lookup(self, lookup_name):
self.call_order.append('lookup')
return super(TrackCallsYearTransform, self).get_lookup(lookup_name)
def get_transform(self, lookup_name):
self.call_order.append('transform')
return super(TrackCallsYearTransform, self).get_transform(lookup_name)
class LookupTransformCallOrderTests(TestCase):
def test_call_order(self):
models.DateField.register_lookup(TrackCallsYearTransform)
try:
# junk lookup - tries lookup, then transform, then fails
with self.assertRaises(FieldError):
Author.objects.filter(birthdate__year__junk=2012)
self.assertEqual(TrackCallsYearTransform.call_order,
['lookup', 'transform'])
TrackCallsYearTransform.call_order = []
# junk transform - tries transform only, then fails
with self.assertRaises(FieldError):
Author.objects.filter(birthdate__year__junk__more_junk=2012)
self.assertEqual(TrackCallsYearTransform.call_order,
['transform'])
TrackCallsYearTransform.call_order = []
# Just getting the year (implied __exact) - lookup only
Author.objects.filter(birthdate__year=2012)
self.assertEqual(TrackCallsYearTransform.call_order,
['lookup'])
TrackCallsYearTransform.call_order = []
# Just getting the year (explicit __exact) - lookup only
Author.objects.filter(birthdate__year__exact=2012)
self.assertEqual(TrackCallsYearTransform.call_order,
['lookup'])
finally:
models.DateField._unregister_lookup(TrackCallsYearTransform)
class CustomisedMethodsTests(TestCase):
def test_overridden_get_lookup(self):
q = CustomModel.objects.filter(field__lookupfunc_monkeys=3)
self.assertIn('monkeys()', str(q.query))
def test_overridden_get_transform(self):
q = CustomModel.objects.filter(field__transformfunc_banana=3)
self.assertIn('banana()', str(q.query))
def test_overridden_get_lookup_chain(self):
q = CustomModel.objects.filter(field__transformfunc_banana__lookupfunc_elephants=3)
self.assertIn('elephants()', str(q.query))
def test_overridden_get_transform_chain(self):
q = CustomModel.objects.filter(field__transformfunc_banana__transformfunc_pear=3)
self.assertIn('pear()', str(q.query))
| |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base classes for our unit tests.
Allows overriding of flags for use of fakes, and some black magic for
inline callbacks.
"""
import contextlib
import datetime
import eventlet
eventlet.monkey_patch(os=False)
import copy
import inspect
import mock
import os
import fixtures
from oslo_cache import core as cache
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_config import fixture as config_fixture
from oslo_log.fixture import logging_error as log_fixture
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import timeutils
from oslotest import moxstubout
import six
import testtools
from nova import context
from nova import db
from nova.network import manager as network_manager
from nova.network.security_group import openstack_driver
from nova.objects import base as objects_base
from nova.tests import fixtures as nova_fixtures
from nova.tests.unit import conf_fixture
from nova.tests.unit import policy_fixture
from nova import utils
CONF = cfg.CONF
logging.register_options(CONF)
CONF.set_override('use_stderr', False)
logging.setup(CONF, 'nova')
cache.configure(CONF)
_TRUE_VALUES = ('True', 'true', '1', 'yes')
if six.PY2:
nested = contextlib.nested
else:
@contextlib.contextmanager
def nested(*contexts):
with contextlib.ExitStack() as stack:
yield [stack.enter_context(c) for c in contexts]
class SampleNetworks(fixtures.Fixture):
"""Create sample networks in the database."""
def __init__(self, host=None):
self.host = host
def setUp(self):
super(SampleNetworks, self).setUp()
ctxt = context.get_admin_context()
network = network_manager.VlanManager(host=self.host)
bridge_interface = CONF.flat_interface or CONF.vlan_interface
network.create_networks(ctxt,
label='test',
cidr='10.0.0.0/8',
multi_host=CONF.multi_host,
num_networks=CONF.num_networks,
network_size=CONF.network_size,
cidr_v6=CONF.fixed_range_v6,
gateway=CONF.gateway,
gateway_v6=CONF.gateway_v6,
bridge=CONF.flat_network_bridge,
bridge_interface=bridge_interface,
vpn_start=CONF.vpn_start,
vlan_start=CONF.vlan_start,
dns1=CONF.flat_network_dns)
for net in db.network_get_all(ctxt):
network.set_network_host(ctxt, net)
class TestingException(Exception):
pass
class skipIf(object):
def __init__(self, condition, reason):
self.condition = condition
self.reason = reason
def __call__(self, func_or_cls):
condition = self.condition
reason = self.reason
if inspect.isfunction(func_or_cls):
@six.wraps(func_or_cls)
def wrapped(*args, **kwargs):
if condition:
raise testtools.TestCase.skipException(reason)
return func_or_cls(*args, **kwargs)
return wrapped
elif inspect.isclass(func_or_cls):
orig_func = getattr(func_or_cls, 'setUp')
@six.wraps(orig_func)
def new_func(self, *args, **kwargs):
if condition:
raise testtools.TestCase.skipException(reason)
orig_func(self, *args, **kwargs)
func_or_cls.setUp = new_func
return func_or_cls
else:
raise TypeError('skipUnless can be used only with functions or '
'classes')
def _patch_mock_to_raise_for_invalid_assert_calls():
def raise_for_invalid_assert_calls(wrapped):
def wrapper(_self, name):
valid_asserts = [
'assert_called_with',
'assert_called_once_with',
'assert_has_calls',
'assert_any_calls']
if name.startswith('assert') and name not in valid_asserts:
raise AttributeError('%s is not a valid mock assert method'
% name)
return wrapped(_self, name)
return wrapper
mock.Mock.__getattr__ = raise_for_invalid_assert_calls(
mock.Mock.__getattr__)
# NOTE(gibi): needs to be called only once at import time
# to patch the mock lib
_patch_mock_to_raise_for_invalid_assert_calls()
class TestCase(testtools.TestCase):
"""Test case base class for all unit tests.
Due to the slowness of DB access, please consider deriving from
`NoDBTestCase` first.
"""
USES_DB = True
USES_DB_SELF = False
REQUIRES_LOCKING = False
TIMEOUT_SCALING_FACTOR = 1
def setUp(self):
"""Run before each test method to initialize test environment."""
super(TestCase, self).setUp()
self.useFixture(nova_fixtures.Timeout(
os.environ.get('OS_TEST_TIMEOUT', 0),
self.TIMEOUT_SCALING_FACTOR))
self.useFixture(fixtures.NestedTempfile())
self.useFixture(fixtures.TempHomeDir())
self.useFixture(log_fixture.get_logging_handle_error_fixture())
self.useFixture(nova_fixtures.OutputStreamCapture())
self.useFixture(nova_fixtures.StandardLogging())
# NOTE(sdague): because of the way we were using the lock
# wrapper we eneded up with a lot of tests that started
# relying on global external locking being set up for them. We
# consider all of these to be *bugs*. Tests should not require
# global external locking, or if they do, they should
# explicitly set it up themselves.
#
# The following REQUIRES_LOCKING class parameter is provided
# as a bridge to get us there. No new tests should be added
# that require it, and existing classes and tests should be
# fixed to not need it.
if self.REQUIRES_LOCKING:
lock_path = self.useFixture(fixtures.TempDir()).path
self.fixture = self.useFixture(
config_fixture.Config(lockutils.CONF))
self.fixture.config(lock_path=lock_path,
group='oslo_concurrency')
self.useFixture(conf_fixture.ConfFixture(CONF))
self.useFixture(nova_fixtures.RPCFixture('nova.test'))
if self.USES_DB:
self.useFixture(nova_fixtures.Database())
self.useFixture(nova_fixtures.Database(database='api'))
self.useFixture(nova_fixtures.DefaultFlavorsFixture())
elif not self.USES_DB_SELF:
self.useFixture(nova_fixtures.DatabasePoisonFixture())
# NOTE(blk-u): WarningsFixture must be after the Database fixture
# because sqlalchemy-migrate messes with the warnings filters.
self.useFixture(nova_fixtures.WarningsFixture())
# NOTE(danms): Make sure to reset us back to non-remote objects
# for each test to avoid interactions. Also, backup the object
# registry.
objects_base.NovaObject.indirection_api = None
self._base_test_obj_backup = copy.copy(
objects_base.NovaObjectRegistry._registry._obj_classes)
self.addCleanup(self._restore_obj_registry)
self.useFixture(nova_fixtures.StableObjectJsonFixture())
# NOTE(mnaser): All calls to utils.is_neutron() are cached in
# nova.utils._IS_NEUTRON. We set it to None to avoid any
# caching of that value.
utils._IS_NEUTRON = None
mox_fixture = self.useFixture(moxstubout.MoxStubout())
self.mox = mox_fixture.mox
self.stubs = mox_fixture.stubs
self.addCleanup(self._clear_attrs)
self.useFixture(fixtures.EnvironmentVariable('http_proxy'))
self.policy = self.useFixture(policy_fixture.PolicyFixture())
self.useFixture(nova_fixtures.PoisonFunctions())
openstack_driver.DRIVER_CACHE = {}
self.useFixture(nova_fixtures.ForbidNewLegacyNotificationFixture())
def _restore_obj_registry(self):
objects_base.NovaObjectRegistry._registry._obj_classes = \
self._base_test_obj_backup
def _clear_attrs(self):
# Delete attributes that don't start with _ so they don't pin
# memory around unnecessarily for the duration of the test
# suite
for key in [k for k in self.__dict__.keys() if k[0] != '_']:
# NOTE(gmann): Skip attribute 'id' because if tests are being
# generated using testscenarios then, 'id' attribute is being
# added during cloning the tests. And later that 'id' attribute
# is being used by test suite to generate the results for each
# newly generated tests by testscenarios.
if key != 'id':
del self.__dict__[key]
def stub_out(self, old, new):
"""Replace a function for the duration of the test.
Use the monkey patch fixture to replace a function for the
duration of a test. Useful when you want to provide fake
methods instead of mocks during testing.
This should be used instead of self.stubs.Set (which is based
on mox) going forward.
"""
self.useFixture(fixtures.MonkeyPatch(old, new))
def flags(self, **kw):
"""Override flag variables for a test."""
group = kw.pop('group', None)
for k, v in six.iteritems(kw):
CONF.set_override(k, v, group)
def start_service(self, name, host=None, **kwargs):
svc = self.useFixture(
nova_fixtures.ServiceFixture(name, host, **kwargs))
return svc.service
def assertJsonEqual(self, expected, observed):
"""Asserts that 2 complex data structures are json equivalent.
We use data structures which serialize down to json throughout
the code, and often times we just need to know that these are
json equivalent. This means that list order is not important,
and should be sorted.
Because this is a recursive set of assertions, when failure
happens we want to expose both the local failure and the
global view of the 2 data structures being compared. So a
MismatchError which includes the inner failure as the
mismatch, and the passed in expected / observed as matchee /
matcher.
"""
if isinstance(expected, six.string_types):
expected = jsonutils.loads(expected)
if isinstance(observed, six.string_types):
observed = jsonutils.loads(observed)
def sort_key(x):
if isinstance(x, (set, list)) or isinstance(x, datetime.datetime):
return str(x)
if isinstance(x, dict):
items = ((sort_key(key), sort_key(value))
for key, value in x.items())
return sorted(items)
return x
def inner(expected, observed):
if isinstance(expected, dict) and isinstance(observed, dict):
self.assertEqual(len(expected), len(observed))
expected_keys = sorted(expected)
observed_keys = sorted(observed)
self.assertEqual(expected_keys, observed_keys)
for key in list(six.iterkeys(expected)):
inner(expected[key], observed[key])
elif (isinstance(expected, (list, tuple, set)) and
isinstance(observed, (list, tuple, set))):
self.assertEqual(len(expected), len(observed))
expected_values_iter = iter(sorted(expected, key=sort_key))
observed_values_iter = iter(sorted(observed, key=sort_key))
for i in range(len(expected)):
inner(next(expected_values_iter),
next(observed_values_iter))
else:
self.assertEqual(expected, observed)
try:
inner(expected, observed)
except testtools.matchers.MismatchError as e:
inner_mismatch = e.mismatch
# inverting the observed / expected because testtools
# error messages assume expected is second. Possibly makes
# reading the error messages less confusing.
raise testtools.matchers.MismatchError(observed, expected,
inner_mismatch, verbose=True)
def assertPublicAPISignatures(self, baseinst, inst):
def get_public_apis(inst):
methods = {}
def findmethods(object):
return inspect.ismethod(object) or inspect.isfunction(object)
for (name, value) in inspect.getmembers(inst, findmethods):
if name.startswith("_"):
continue
methods[name] = value
return methods
baseclass = baseinst.__class__.__name__
basemethods = get_public_apis(baseinst)
implmethods = get_public_apis(inst)
extranames = []
for name in sorted(implmethods.keys()):
if name not in basemethods:
extranames.append(name)
self.assertEqual([], extranames,
"public APIs not listed in base class %s" %
baseclass)
for name in sorted(implmethods.keys()):
baseargs = inspect.getargspec(basemethods[name])
implargs = inspect.getargspec(implmethods[name])
self.assertEqual(baseargs, implargs,
"%s args don't match base class %s" %
(name, baseclass))
class APICoverage(object):
cover_api = None
def test_api_methods(self):
self.assertTrue(self.cover_api is not None)
api_methods = [x for x in dir(self.cover_api)
if not x.startswith('_')]
test_methods = [x[5:] for x in dir(self)
if x.startswith('test_')]
self.assertThat(
test_methods,
testtools.matchers.ContainsAll(api_methods))
class TimeOverride(fixtures.Fixture):
"""Fixture to start and remove time override."""
def setUp(self):
super(TimeOverride, self).setUp()
timeutils.set_time_override()
self.addCleanup(timeutils.clear_time_override)
class NoDBTestCase(TestCase):
"""`NoDBTestCase` differs from TestCase in that DB access is not supported.
This makes tests run significantly faster. If possible, all new tests
should derive from this class.
"""
USES_DB = False
class BaseHookTestCase(NoDBTestCase):
def assert_has_hook(self, expected_name, func):
self.assertTrue(hasattr(func, '__hook_name__'))
self.assertEqual(expected_name, func.__hook_name__)
class MatchType(object):
"""Matches any instance of a specified type
The MatchType class is a helper for use with the
mock.assert_called_with() method that lets you
assert that a particular parameter has a specific
data type. It enables strict check than the built
in mock.ANY helper, and is the equivalent of the
mox.IsA() function from the legacy mox library
Example usage could be:
mock_some_method.assert_called_once_with(
"hello",
MatchType(objects.Instance),
mock.ANY,
"world",
MatchType(objects.KeyPair))
"""
def __init__(self, wanttype):
self.wanttype = wanttype
def __eq__(self, other):
return type(other) == self.wanttype
def __ne__(self, other):
return type(other) != self.wanttype
def __repr__(self):
return "<MatchType:" + str(self.wanttype) + ">"
| |
#!/usr/bin/env python2
# Copyright (c) 2014 The VeriCoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test pruning code
# ********
# WARNING:
# This test uses 4GB of disk space.
# This test takes 30 mins or more (up to 2 hours)
# ********
from test_framework.test_framework import VeriCoinTestFramework
from test_framework.util import *
import os.path
def calc_usage(blockdir):
return sum(os.path.getsize(blockdir+f) for f in os.listdir(blockdir) if os.path.isfile(blockdir+f))/(1024*1024)
class PruneTest(VeriCoinTestFramework):
def __init__(self):
self.utxo = []
self.address = ["",""]
# Some pre-processing to create a bunch of OP_RETURN txouts to insert into transactions we create
# So we have big transactions and full blocks to fill up our block files
# create one script_pubkey
script_pubkey = "6a4d0200" #OP_RETURN OP_PUSH2 512 bytes
for i in xrange (512):
script_pubkey = script_pubkey + "01"
# concatenate 128 txouts of above script_pubkey which we'll insert before the txout for change
self.txouts = "81"
for k in xrange(128):
# add txout value
self.txouts = self.txouts + "0000000000000000"
# add length of script_pubkey
self.txouts = self.txouts + "fd0402"
# add script_pubkey
self.txouts = self.txouts + script_pubkey
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 3)
def setup_network(self):
self.nodes = []
self.is_network_split = False
# Create nodes 0 and 1 to mine
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5"], timewait=900))
self.nodes.append(start_node(1, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5"], timewait=900))
# Create node 2 to test pruning
self.nodes.append(start_node(2, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-prune=550"], timewait=900))
self.prunedir = self.options.tmpdir+"/node2/regtest/blocks/"
self.address[0] = self.nodes[0].getnewaddress()
self.address[1] = self.nodes[1].getnewaddress()
# Determine default relay fee
self.relayfee = self.nodes[0].getnetworkinfo()["relayfee"]
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[1], 2)
connect_nodes(self.nodes[2], 0)
sync_blocks(self.nodes[0:3])
def create_big_chain(self):
# Start by creating some coinbases we can spend later
self.nodes[1].generate(200)
sync_blocks(self.nodes[0:2])
self.nodes[0].generate(150)
# Then mine enough full blocks to create more than 550MB of data
for i in xrange(645):
self.mine_full_block(self.nodes[0], self.address[0])
sync_blocks(self.nodes[0:3])
def test_height_min(self):
if not os.path.isfile(self.prunedir+"blk00000.dat"):
raise AssertionError("blk00000.dat is missing, pruning too early")
print "Success"
print "Though we're already using more than 550MB, current usage:", calc_usage(self.prunedir)
print "Mining 25 more blocks should cause the first block file to be pruned"
# Pruning doesn't run until we're allocating another chunk, 20 full blocks past the height cutoff will ensure this
for i in xrange(25):
self.mine_full_block(self.nodes[0],self.address[0])
waitstart = time.time()
while os.path.isfile(self.prunedir+"blk00000.dat"):
time.sleep(0.1)
if time.time() - waitstart > 10:
raise AssertionError("blk00000.dat not pruned when it should be")
print "Success"
usage = calc_usage(self.prunedir)
print "Usage should be below target:", usage
if (usage > 550):
raise AssertionError("Pruning target not being met")
def create_chain_with_staleblocks(self):
# Create stale blocks in manageable sized chunks
print "Mine 24 (stale) blocks on Node 1, followed by 25 (main chain) block reorg from Node 0, for 12 rounds"
for j in xrange(12):
# Disconnect node 0 so it can mine a longer reorg chain without knowing about node 1's soon-to-be-stale chain
# Node 2 stays connected, so it hears about the stale blocks and then reorg's when node0 reconnects
# Stopping node 0 also clears its mempool, so it doesn't have node1's transactions to accidentally mine
stop_node(self.nodes[0],0)
self.nodes[0]=start_node(0, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5"], timewait=900)
# Mine 24 blocks in node 1
self.utxo = self.nodes[1].listunspent()
for i in xrange(24):
if j == 0:
self.mine_full_block(self.nodes[1],self.address[1])
else:
self.nodes[1].generate(1) #tx's already in mempool from previous disconnects
# Reorg back with 25 block chain from node 0
self.utxo = self.nodes[0].listunspent()
for i in xrange(25):
self.mine_full_block(self.nodes[0],self.address[0])
# Create connections in the order so both nodes can see the reorg at the same time
connect_nodes(self.nodes[1], 0)
connect_nodes(self.nodes[2], 0)
sync_blocks(self.nodes[0:3])
print "Usage can be over target because of high stale rate:", calc_usage(self.prunedir)
def reorg_test(self):
# Node 1 will mine a 300 block chain starting 287 blocks back from Node 0 and Node 2's tip
# This will cause Node 2 to do a reorg requiring 288 blocks of undo data to the reorg_test chain
# Reboot node 1 to clear its mempool (hopefully make the invalidate faster)
# Lower the block max size so we don't keep mining all our big mempool transactions (from disconnected blocks)
stop_node(self.nodes[1],1)
self.nodes[1]=start_node(1, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=5000", "-checkblocks=5", "-disablesafemode"], timewait=900)
height = self.nodes[1].getblockcount()
print "Current block height:", height
invalidheight = height-287
badhash = self.nodes[1].getblockhash(invalidheight)
print "Invalidating block at height:",invalidheight,badhash
self.nodes[1].invalidateblock(badhash)
# We've now switched to our previously mined-24 block fork on node 1, but thats not what we want
# So invalidate that fork as well, until we're on the same chain as node 0/2 (but at an ancestor 288 blocks ago)
mainchainhash = self.nodes[0].getblockhash(invalidheight - 1)
curhash = self.nodes[1].getblockhash(invalidheight - 1)
while curhash != mainchainhash:
self.nodes[1].invalidateblock(curhash)
curhash = self.nodes[1].getblockhash(invalidheight - 1)
assert(self.nodes[1].getblockcount() == invalidheight - 1)
print "New best height", self.nodes[1].getblockcount()
# Reboot node1 to clear those giant tx's from mempool
stop_node(self.nodes[1],1)
self.nodes[1]=start_node(1, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=5000", "-checkblocks=5", "-disablesafemode"], timewait=900)
print "Generating new longer chain of 300 more blocks"
self.nodes[1].generate(300)
print "Reconnect nodes"
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[2], 1)
sync_blocks(self.nodes[0:3])
print "Verify height on node 2:",self.nodes[2].getblockcount()
print "Usage possibly still high bc of stale blocks in block files:", calc_usage(self.prunedir)
print "Mine 220 more blocks so we have requisite history (some blocks will be big and cause pruning of previous chain)"
self.nodes[0].generate(220) #node 0 has many large tx's in its mempool from the disconnects
sync_blocks(self.nodes[0:3])
usage = calc_usage(self.prunedir)
print "Usage should be below target:", usage
if (usage > 550):
raise AssertionError("Pruning target not being met")
return invalidheight,badhash
def reorg_back(self):
# Verify that a block on the old main chain fork has been pruned away
try:
self.nodes[2].getblock(self.forkhash)
raise AssertionError("Old block wasn't pruned so can't test redownload")
except JSONRPCException as e:
print "Will need to redownload block",self.forkheight
# Verify that we have enough history to reorg back to the fork point
# Although this is more than 288 blocks, because this chain was written more recently
# and only its other 299 small and 220 large block are in the block files after it,
# its expected to still be retained
self.nodes[2].getblock(self.nodes[2].getblockhash(self.forkheight))
first_reorg_height = self.nodes[2].getblockcount()
curchainhash = self.nodes[2].getblockhash(self.mainchainheight)
self.nodes[2].invalidateblock(curchainhash)
goalbestheight = self.mainchainheight
goalbesthash = self.mainchainhash2
# As of 0.10 the current block download logic is not able to reorg to the original chain created in
# create_chain_with_stale_blocks because it doesn't know of any peer thats on that chain from which to
# redownload its missing blocks.
# Invalidate the reorg_test chain in node 0 as well, it can successfully switch to the original chain
# because it has all the block data.
# However it must mine enough blocks to have a more work chain than the reorg_test chain in order
# to trigger node 2's block download logic.
# At this point node 2 is within 288 blocks of the fork point so it will preserve its ability to reorg
if self.nodes[2].getblockcount() < self.mainchainheight:
blocks_to_mine = first_reorg_height + 1 - self.mainchainheight
print "Rewind node 0 to prev main chain to mine longer chain to trigger redownload. Blocks needed:", blocks_to_mine
self.nodes[0].invalidateblock(curchainhash)
assert(self.nodes[0].getblockcount() == self.mainchainheight)
assert(self.nodes[0].getbestblockhash() == self.mainchainhash2)
goalbesthash = self.nodes[0].generate(blocks_to_mine)[-1]
goalbestheight = first_reorg_height + 1
print "Verify node 2 reorged back to the main chain, some blocks of which it had to redownload"
waitstart = time.time()
while self.nodes[2].getblockcount() < goalbestheight:
time.sleep(0.1)
if time.time() - waitstart > 900:
raise AssertionError("Node 2 didn't reorg to proper height")
assert(self.nodes[2].getbestblockhash() == goalbesthash)
# Verify we can now have the data for a block previously pruned
assert(self.nodes[2].getblock(self.forkhash)["height"] == self.forkheight)
def mine_full_block(self, node, address):
# Want to create a full block
# We'll generate a 66k transaction below, and 14 of them is close to the 1MB block limit
for j in xrange(14):
if len(self.utxo) < 14:
self.utxo = node.listunspent()
inputs=[]
outputs = {}
t = self.utxo.pop()
inputs.append({ "txid" : t["txid"], "vout" : t["vout"]})
remchange = t["amount"] - 100*self.relayfee # Fee must be above min relay rate for 66kb tx
outputs[address]=remchange
# Create a basic transaction that will send change back to ourself after account for a fee
# And then insert the 128 generated transaction outs in the middle rawtx[92] is where the #
# of txouts is stored and is the only thing we overwrite from the original transaction
rawtx = node.createrawtransaction(inputs, outputs)
newtx = rawtx[0:92]
newtx = newtx + self.txouts
newtx = newtx + rawtx[94:]
# Appears to be ever so slightly faster to sign with SIGHASH_NONE
signresult = node.signrawtransaction(newtx,None,None,"NONE")
txid = node.sendrawtransaction(signresult["hex"], True)
# Mine a full sized block which will be these transactions we just created
node.generate(1)
def run_test(self):
print "Warning! This test requires 4GB of disk space and takes over 30 mins (up to 2 hours)"
print "Mining a big blockchain of 995 blocks"
self.create_big_chain()
# Chain diagram key:
# * blocks on main chain
# +,&,$,@ blocks on other forks
# X invalidated block
# N1 Node 1
#
# Start by mining a simple chain that all nodes have
# N0=N1=N2 **...*(995)
print "Check that we haven't started pruning yet because we're below PruneAfterHeight"
self.test_height_min()
# Extend this chain past the PruneAfterHeight
# N0=N1=N2 **...*(1020)
print "Check that we'll exceed disk space target if we have a very high stale block rate"
self.create_chain_with_staleblocks()
# Disconnect N0
# And mine a 24 block chain on N1 and a separate 25 block chain on N0
# N1=N2 **...*+...+(1044)
# N0 **...**...**(1045)
#
# reconnect nodes causing reorg on N1 and N2
# N1=N2 **...*(1020) *...**(1045)
# \
# +...+(1044)
#
# repeat this process until you have 12 stale forks hanging off the
# main chain on N1 and N2
# N0 *************************...***************************(1320)
#
# N1=N2 **...*(1020) *...**(1045) *.. ..**(1295) *...**(1320)
# \ \ \
# +...+(1044) &.. $...$(1319)
# Save some current chain state for later use
self.mainchainheight = self.nodes[2].getblockcount() #1320
self.mainchainhash2 = self.nodes[2].getblockhash(self.mainchainheight)
print "Check that we can survive a 288 block reorg still"
(self.forkheight,self.forkhash) = self.reorg_test() #(1033, )
# Now create a 288 block reorg by mining a longer chain on N1
# First disconnect N1
# Then invalidate 1033 on main chain and 1032 on fork so height is 1032 on main chain
# N1 **...*(1020) **...**(1032)X..
# \
# ++...+(1031)X..
#
# Now mine 300 more blocks on N1
# N1 **...*(1020) **...**(1032) @@...@(1332)
# \ \
# \ X...
# \ \
# ++...+(1031)X.. ..
#
# Reconnect nodes and mine 220 more blocks on N1
# N1 **...*(1020) **...**(1032) @@...@@@(1552)
# \ \
# \ X...
# \ \
# ++...+(1031)X.. ..
#
# N2 **...*(1020) **...**(1032) @@...@@@(1552)
# \ \
# \ *...**(1320)
# \ \
# ++...++(1044) ..
#
# N0 ********************(1032) @@...@@@(1552)
# \
# *...**(1320)
print "Test that we can rerequest a block we previously pruned if needed for a reorg"
self.reorg_back()
# Verify that N2 still has block 1033 on current chain (@), but not on main chain (*)
# Invalidate 1033 on current chain (@) on N2 and we should be able to reorg to
# original main chain (*), but will require redownload of some blocks
# In order to have a peer we think we can download from, must also perform this invalidation
# on N0 and mine a new longest chain to trigger.
# Final result:
# N0 ********************(1032) **...****(1553)
# \
# X@...@@@(1552)
#
# N2 **...*(1020) **...**(1032) **...****(1553)
# \ \
# \ X@...@@@(1552)
# \
# +..
#
# N1 doesn't change because 1033 on main chain (*) is invalid
print "Done"
if __name__ == '__main__':
PruneTest().main()
| |
from oebakery import die, err, warn, info, debug
from oelite import *
from oelite.dbutil import *
import oelite.recipe
import sys
import os
import copy
import operator
class OEliteRunQueue:
def __init__(self, config, cookbook, rebuild=None, relax=None,
depth_first=True):
self.cookbook = cookbook
self.config = config
# 1: --rebuild, 2: --rebuildall, 3: --reallyrebuildall
self.rebuild = rebuild
self.relax = relax
self.depth_first = depth_first
self._assume_provided = (self.config.get("ASSUME_PROVIDED")
or "").split()
self.runable = []
self.metahashable = []
self.cookbook.db.execute("ATTACH ':memory:' AS runq")
self.dbc = self.cookbook.db.cursor()
self.init_db()
return
def init_db(self):
self.dbc.execute(
"CREATE TABLE IF NOT EXISTS runq.provider ( "
"type TEXT, "
"item TEXT, "
"version TEXT, "
"package INTEGER )")
self.dbc.execute(
"CREATE TABLE IF NOT EXISTS runq.task ( "
"task INTEGER, "
"prime INTEGER, "
"build INTEGER, "
"relax INTEGER, "
"status INTEGER, "
"metahash TEXT, "
"mtime REAL, "
"tmphash TEXT, "
"buildhash TEXT, "
"UNIQUE (task) ON CONFLICT IGNORE )")
self.dbc.execute(
"CREATE TABLE IF NOT EXISTS runq.depend ( "
"id INTEGER PRIMARY KEY, "
"task INTEGER, " # references task.id
"prime INTEGER, " # boolean
"parent_task INTEGER, " # references task.id
"deptype TEXT DEFAULT '', "
"package INTEGER DEFAULT -1, " # package
"filename TEXT, "
"prebake INTEGER, "
"UNIQUE (task, parent_task, deptype, package) "
"ON CONFLICT IGNORE )")
self.dbc.execute(
"CREATE TABLE IF NOT EXISTS runq.recdepend ( "
"deptype TEXT, "
"package INTEGER, "
"parent_package INTEGER )")
return
def assume_provided(self, item):
return "%s:%s"%(item.type, item.name) in self._assume_provided
def add_something(self, something, task_name):
return (self.add_recipe(something, task_name) or
self.add_provider(something, task_name))
def add_provider(self, item, task_name):
#print "add_provider(%s, %s)"%(item, task_name)
package = self.get_provider(item, allow_no_provider=True)
if not package:
return False
return self._add_package(package, task_name)
def add_recipe(self, recipe, task_name):
if not isinstance(recipe, oelite.recipe.OEliteRecipe):
recipe = self.cookbook.get_recipe(name=recipe, strict=False)
if not recipe:
return False
return self._add_recipe(recipe, task_name, primary=True)
def add_package(self, item, task_name):
package = self.cookbook.get_package(name=item.name, type=item.type)
if not package:
return False
return self._add_package(package, task_name)
def _add_package(self, package, task_name):
if not package:
return False
return self._add_recipe(
self.cookbook.get_recipe(package=package.id),
task_name, primary=True)
def _add_recipe(self, recipe, task_name, primary=False):
if primary:
primary_recipe = recipe
else:
primary_recipe = None
#print "_add_recipe %s:%s"%(recipe, task_name)
primary_task = self.cookbook.get_task(recipe=recipe, name=task_name)
if not primary_task:
raise NoSuchTask("%s:%s"%(recipe.name, task_name))
alltasks = set([])
addedtasks = set([primary_task])
while addedtasks:
self.add_runq_tasks(addedtasks)
alltasks.update(addedtasks)
newtasks = set([])
for task in addedtasks:
recipe = self.cookbook.get_recipe(task=task)
if task == primary_task or self.is_task_primary(task):
is_primary_task = True
else:
is_primary_task = False
if recipe == primary_recipe:
is_primary_recipe = True
else:
is_primary_recipe = False
# set rebuild flag (based on
# --rebuild/--rebuildall/--reallyrebuildall)
if ((self.rebuild >= 1 and is_primary_recipe) or
(self.rebuild >= 1 and
(recipe.get("REBUILD") or "0") == "1") or
(self.rebuild == 2 and
recipe.get("REBUILDALL_SKIP") != "1") or
(self.rebuild == 3)):
self.set_task_build(task)
# set relax flag (based on --sloppy/--relaxed)
if (not is_primary_task and
(self.relax == 2) or
(self.relax == 1 and recipe.get("RELAXED"))):
self.set_task_relax(task)
try:
(task_depends, package_depends) = \
self.task_dependencies(task)
self.add_runq_task_depends(task, task_depends)
newtasks.update(task_depends)
for (deptype, depends) in package_depends.items():
self.add_runq_package_depends(task, deptype, depends)
newtasks.update(map(operator.itemgetter(0), depends))
except RecursiveDepends, e:
recipe = self.cookbook.get_recipe(task=task)
raise RecursiveDepends(e.args[0], "%s (%s)"%(
recipe.name, task))
addedtasks = newtasks.difference(alltasks)
self.set_task_primary(primary_task)
return True
def task_dependencies(self, task, flatten=False):
# must return tuple of (task_depends, package_depends), where
# task_depends is a list or set of task_id's
# and package_depends is a dictionary of
# DEPENDTYPE -> set of (task_id, package_id) tuples
# or if flatten=True, just a set of task_id's
recipe = self.cookbook.get_recipe(task=task)
# add recipe-internal task parents
# (ie. addtask X before Y after Z)
#parents = self.cookbook.get_task_parents(task) or []
parents = task.get_parents()
task_depends = set(parents)
package_depends = {}
# helper to add multipe task_depends
def add_task_depends(task_names, recipes):
for task_name in task_names:
for recipe in recipes:
task = self.cookbook.get_task(recipe, task_name)
if task:
task_depends.add(task)
else:
debug("not adding unsupported task %s:%s"%(
recipe, task_name))
def add_package_depends(task_names, deptype, depends):
if not depends:
return
if not deptype in package_depends.keys():
package_depends[deptype] = set([])
for task_name in task_names:
for package in depends:
assert isinstance(package, oelite.package.OElitePackage)
recipe = package.recipe.id
task = self.cookbook.get_task(recipe=recipe, name=task_name)
if not task:
die("cannot add unsupported task %s:%s"%(
package.recipe.name, task_name))
package_depends[deptype].add((task, package))
# add deptask dependencies
# (ie. do_sometask[deptask] = "DEPTYPE:do_someothertask")
for deptype in ("DEPENDS", "RDEPENDS", "FDEPENDS"):
deptasks = task.get_deptasks([deptype])
if deptasks:
# get list of packages providing the dependencies
depends = self.get_depends(
recipe.type, recipe.get_depends([deptype]), deptype,
needed_by='recipe %s'%(recipe))
# add each deptask for each package
add_package_depends(deptasks, deptype, depends)
# add recursive depends tasks
# (ie. do_sometask[recdeptask] = "DEPTYPE:do_someothertask")
for deptype in ("DEPENDS", "RDEPENDS", "FDEPENDS"):
recdeptasks = task.get_recdeptasks([deptype])
if recdeptasks:
# get cumulative list of packages providing the dependencies
# and recursively the corresponding package dependencies
if deptype == "FDEPENDS":
rec_deptype = "DEPENDS"
else:
rec_deptype = deptype
depends = self.get_depends(
recipe.type, recipe.get_depends([deptype]), deptype,
rec_deptype, needed_by='recipe %s'%(recipe))
# add each recdeptask for each package
add_package_depends(recdeptasks, deptype, depends)
# add inter-task dependencies
# (ie. do_sometask[depends] = "itemname:do_someothertask")
taskdepends = task.get_taskdepends()
for taskdepend in taskdepends:
task = self.cookbook.get_task(task=task)
raise Exception("OE-lite does not currently support inter-task dependencies! %s:%s"%(recipe.name, task.name))
if self.assume_provided(taskdepend[0]):
#debug("ASSUME_PROVIDED %s"%(
# self.get_item(taskdepend[0])))
continue
(recipe, package) = self.get_recipe_provider(taskdepend[0])
if not recipe:
raise NoProvider(taskdepend[0], str(task))
add_task_depends([taskdepend[1]], [recipe])
# can self references occur?
#if task in tasks:
# die("self reference for task %s %s"%(
# task, self.cookbook.get_task(task=task)))
# we are _not_ checking for multiple providers of the same
# thing, as it is considered (in theory) to be a valid
# use-case
# add task dependency information to runq db
self.add_runq_task_depends(task, task_depends)
for (deptype, packages) in package_depends.items():
self.add_runq_package_depends(task, deptype, packages)
if flatten:
depends = set([])
for depend in task_depends:
depends.add(depend)
for (deptype, _depends) in package_depends.items():
depends.update(map(operator.itemgetter(0), list(_depends)))
return depends
return (task_depends, package_depends)
def get_depends(self, context, items, deptype, rec_deptype=None,
needed_by=None, ignore_missing=False):
# return list/set of packages
def resolve_dependency(item, recursion_path, deptype):
if not rec_deptype:
if self.assume_provided(item):
debug("ASSUME_PROVIDED %s"%(item))
return set([])
try:
(recipe, package) = self.get_recipe_provider(item)
except NoProvider, e:
if ignore_missing:
return set([])
if len(e.args) < 2 and len(recursion_path[0]):
raise NoProvider(e.args[0], recursion_path[0][-1])
raise
return set([package])
if self.assume_provided(item):
#debug("ASSUME_PROVIDED %s"%(item))
return set([])
try:
(recipe, package) = self.get_recipe_provider(item)
except NoProvider, e:
if ignore_missing:
return set([])
if len(e.args) < 2 and len(recursion_path[0]):
raise NoProvider(e.args[0], recursion_path[0][-1])
raise
# detect circular package dependencies
if str(package) in recursion_path[0]:
# actually, this might not be a bug/problem.......
# Fx: package X rdepends on package Y, and package Y
# rdepends on package X. As long as X and Y can be
# built, anyone rdepend'ing on either X or Y will just
# get both. Bad circular dependencies must be
# detected at runq task level. If we cannot build a
# recipe because of circular task dependencies, it is
# clearly a bug. Improve runq detection of this by
# always simulating runq execution before starting,
# and checking that all tasks can be completed, and if
# some tasks are unbuildable, print out remaining
# tasks and their dependencies.
# on the other hand.... circular dependencies can be
# arbitrarely complex, and it is pretty hard to handle
# them generally, so better refuse to handle any of
# them, to avoid having to add more and more complex
# code to handle growingly sophisticated types of
# circular dependencies.
err("circular dependency while resolving %s"%(item))
depends = []
recursion_path[0].append(package)
recursion_path[1].append(item)
for i in xrange(len(recursion_path[0])):
depend_package = str(recursion_path[0][i])
depend_item = str(recursion_path[1][i])
if depend_item == depend_package:
depends.append(depend_package)
else:
depends.append("%s (%s)"%(depend_package, depend_item))
#raise RecursiveDepends(depends) Simply break the circular
# dependency here. It is not possible to determine if it is a
# problem or not here, as this has to be done at task level
# instead.
return set([])
# Recipe/task based circular dependencies are detected
# later on when the entire runq has been constructed
recursion_path[0].append(str(package))
recursion_path[1].append(str(item))
# try to get cached recdepends list of packages
packages = self.get_recdepends(package, [deptype])
if packages:
return packages + [package]
packages = set([])
depends = self.cookbook.get_package_depends(package, [deptype])
if depends:
for depend in depends:
_recursion_path = copy.deepcopy(recursion_path)
_packages = resolve_dependency(
oelite.item.OEliteItem(depend, (deptype, package.type)),
_recursion_path, deptype)
packages.update(_packages)
self.set_recdepends(package, deptype, packages)
packages.add(package)
return packages
depends = set([])
depends.update(items)
packages = set([])
for depend in depends:
try:
_packages = resolve_dependency(
oelite.item.OEliteItem(depend, (deptype, context)),
([], []), rec_deptype)
except NoProvider, e:
if len(e.args) < 2:
_needed_by = needed_by
else:
_needed_by = e.args[1]
raise die("No provider for %s (needed by %s)"%(
e.args[0], _needed_by))
packages.update(_packages)
return packages
def get_recipe_provider(self, item):
package = self.get_provider(item)
if not package:
raise NoProvider(item)
recipe = self.cookbook.get_recipe(package=package)
return (recipe, package)
def _set_provider(self, item, package):
if item.version is None:
self.dbc.execute(
"INSERT INTO runq.provider (type, item, package) VALUES (?, ?, ?)",
(item.type, item.name, package.id))
else:
self.dbc.execute(
"INSERT INTO runq.provider (type, item, version, package) VALUES (?, ?, ?, ?)",
(item.type, item.name, item.version, package.id))
return
def _get_provider(self, item):
if item.version is None:
package_id = flatten_single_value(self.dbc.execute(
"SELECT package FROM runq.provider WHERE type=? AND item=? AND version IS NULL",
(item.type, item.name)))
else:
package_id = flatten_single_value(self.dbc.execute(
"SELECT package FROM runq.provider WHERE type=? AND item=? AND version=?",
(item.type, item.name, item.version)))
if not package_id:
return None
return self.cookbook.get_package(id=package_id)
def get_provider(self, item, allow_no_provider=False):
"""
Return package provider of item.
"""
assert isinstance(item, oelite.item.OEliteItem)
provider = self._get_provider(item)
if provider:
assert item.version is None or item.version == provider.version
return provider
def choose_provider(providers):
import bb.utils
# filter out all but the highest priority providers
highest_priority = providers[0].priority
for i in range(1, len(providers)):
if providers[i].priority != highest_priority:
del providers[i:]
break
if len(providers) == 1:
self._set_provider(item, providers[0])
return providers[0]
# filter out all but latest versions
latest = {}
for i in range(len(providers)):
if not providers[i].name in latest:
latest[providers[i].name] = [ providers[i] ]
continue
vercmp = bb.utils.vercmp_part(
latest[providers[i].name][0].version, providers[i].version)
if vercmp < 0:
latest[providers[i].name] = [ providers[i] ]
elif vercmp == 0:
latest[providers[i].name].append(providers[i])
if len(latest) == 1:
package = latest.values()[0][0]
self._set_provider(item, package)
return package
if len(latest) > 1:
multiple_providers = []
for provider in latest.itervalues():
multiple_providers.append(str(provider[0]))
raise MultipleProviders(
"multiple providers for %s: "%(item) + " ".join(multiple_providers))
raise Exception("code path should never go here...")
providers = self.cookbook.get_providers(
item.type, item.name, item.version)
if len(providers) == 0:
if allow_no_provider:
return None
raise NoProvider(item)
elif len(providers) == 1:
self._set_provider(item, providers[0])
return providers[0]
return choose_provider(providers)
def update_task(self, task):
get_recipe_datahash(task.recipe)
if task is fetch:
get_recipe_srchash(task.recipe)
get_dependencies_hash(task)
taskhash = hashit(recipehash, srchash, dephash)
run=0
if has_build(task):
if datahash != build_datahash(task):
info("recipe changes trigger run")
run=1
if srchash != build_srchash(task):
info("src changes trigger run")
run=1
if dephash != build_dephash(task):
info("dep changes trigger run")
run=1
else:
info("no existing build")
run=1
if run:
set_runable(task, datahash, srchash, dephash)
# this marks task for run
# and saves a combined taskhash for following
# iterations (into runq_taskdepend.hash)
# and all hashes for saving with build
# result
set_runq_taskdepend_checked(task)
return
def get_runabletask(self):
newrunable = self.get_readytasks()
if newrunable:
if self.depth_first:
self.runable += newrunable
else:
self.runable = newrunable + self.runable
for task_id in newrunable:
task = self.cookbook.get_task(id=task_id)
self.set_task_pending(task)
if not self.runable:
return None
task_id = self.runable.pop()
if not task_id:
return None
return self.cookbook.get_task(id=task_id)
def get_metahashable_task(self):
if not self.metahashable:
self.metahashable = list(self.get_metahashable_tasks())
if not self.metahashable:
return None
task_id = self.metahashable.pop()
if not task_id:
return None
return self.cookbook.get_task(id=task_id)
def mark_done(self, task, delete=True):
return self.set_task_done(task, delete)
def get_recipes_with_tasks_to_build(self):
# We need to use two different cursors
dbc = self.dbc.connection.cursor()
recipes = []
for row in self.dbc.execute(
"SELECT DISTINCT task.recipe "
"FROM runq.task, task "
"WHERE runq.task.build IS NOT NULL "
"AND runq.task.task=task.id"):
r = dbc.execute(
"SELECT recipe.id, recipe.type, recipe.name, recipe.version, "
"COUNT(runq.task.build) "
"FROM runq.task, task, recipe "
"WHERE recipe.id=? "
"AND runq.task.task=task.id AND task.recipe=recipe.id ",
(row[0],))
recipes.append(r.fetchone())
return recipes
def get_tasks(self):
tasks = []
for row in self.dbc.execute("SELECT task FROM runq.task"):
tasks.append(self.cookbook.get_task(row[0]))
return tasks
def print_runq_tasks(self):
runq_tasks = self.dbc.execute(
"SELECT prime,build,status,relax,metahash,tmphash,mtime,task "
"FROM runq.task").fetchall()
for row in runq_tasks:
for col in row:
print "%s "%(col),
print "%s:%s"%(self.get_recipe(task=row[7]).get_name(),
self.cookbook.get_task(task=row[7]))
return
def get_tasks_to_build_description(self, hashinfo=False):
tasks = []
if hashinfo:
hashinfo = ", runq.task.metahash, runq.task.tmphash, runq.task.buildhash"
else:
hashinfo = ""
for row in self.dbc.execute(
"SELECT recipe.type, recipe.name, recipe.version, task.name%s "
"FROM runq.task, task, recipe "
"WHERE runq.task.build IS NOT NULL "
"AND runq.task.task=task.id "
"AND task.recipe=recipe.id "
"ORDER BY recipe.id DESC, task.name"%(hashinfo)):
row = list(row)
if row[0] == "machine":
row[0] = ""
else:
row[0] += ":"
row = tuple(row)
if hashinfo:
tasks.append("%s%s_%s:%s meta=%s tmp=%s build=%s"%row)
else:
tasks.append("%s%s_%s:%s"%row)
return tasks
def number_of_runq_tasks(self):
return flatten_single_value(self.dbc.execute(
"SELECT COUNT(*) FROM runq.task"))
def number_of_tasks_to_build(self):
return flatten_single_value(self.dbc.execute(
"SELECT COUNT(*) FROM runq.task WHERE build IS NOT NULL"))
def add_runq_task(self, task):
assert isinstance(task, int)
self.dbc.execute(
"INSERT INTO runq.task (task) VALUES (?)", (task,))
return
def add_runq_tasks(self, tasks):
def task_id_tuple(v):
return (v.id,)
for task in tasks:
if task.name != 'do_package':
continue
for package in task.recipe.get_packages():
for deptype in ('DEPENDS', 'RDEPENDS'):
provides = package.get_recprovides(deptype,
self.get_depends)
if provides:
task.recipe.meta.set_flag(
'%s_%s'%(deptype, package.name),
'__provides', provides)
tasks = map(task_id_tuple, tasks)
self.dbc.executemany(
"INSERT INTO runq.task (task) VALUES (?)", (tasks))
return
def add_runq_package_depends(self, task, deptype, depends):
if not depends:
return
assert isinstance(task, oelite.task.OEliteTask)
for (parent_task, package) in depends:
self.add_runq_depend(task, parent_task, deptype, package)
def add_runq_depend(self, task, parent_task, deptype=None, package=None):
assert isinstance(task, oelite.task.OEliteTask)
assert isinstance(parent_task, oelite.task.OEliteTask)
if package:
assert deptype in ("DEPENDS", "RDEPENDS", "FDEPENDS")
assert isinstance(package, oelite.package.OElitePackage)
self.dbc.execute(
"INSERT INTO runq.depend "
"(task, parent_task, deptype, package) "
"VALUES (?, ?, ?, ?)",
(task.id, parent_task.id, deptype, package.id))
else:
self.dbc.execute(
"INSERT INTO runq.depend (task, parent_task) "
"VALUES (?, ?)", (task.id, parent_task.id))
return
def add_runq_task_depends(self, task, depends):
def task_tuple(depend):
return (task.id, depend.id)
values = map(task_tuple, depends)
self.dbc.executemany(
"INSERT INTO runq.depend (task, parent_task) VALUES (?, ?)", values)
return
def set_package_filename(self, package, filename, prebake=False):
assert isinstance(package, int)
if prebake:
self.dbc.execute(
"UPDATE runq.depend SET filename=?, prebake=1 "
"WHERE package=?",
(filename, package))
else:
self.dbc.execute(
"UPDATE runq.depend SET filename=? "
"WHERE package=?",
(filename, package))
return
def prune_prebaked_runq_depends(self):
tasks = flatten_single_column_rows(self.dbc.execute(
"SELECT"
" task "
"FROM"
" runq.task "
"WHERE"
" EXISTS " # something depends on it
" (SELECT *"
" FROM runq.depend"
" WHERE parent_task=runq.task.task"
" LIMIT 1)"
" AND NOT EXISTS " # and no task-based dependencies on it
" (SELECT * FROM runq.depend "
" WHERE runq.depend.parent_task=runq.task.task"
" AND (runq.depend.package < 0)"
" LIMIT 1)"
" AND NOT EXISTS " # and no non-prebaked dependencies on it
" (SELECT *"
" FROM runq.depend"
" WHERE runq.depend.parent_task=runq.task.task"
" AND (runq.depend.package >= 0)"
" AND runq.depend.prebake IS NULL"
" LIMIT 1)"
))
for task in tasks:
self.dbc.execute(
"UPDATE runq.depend SET parent_task=NULL WHERE parent_task=?",
(task,))
return
def get_package_filename(self, package):
#assert isinstance(package, int)
assert isinstance(package, oelite.package.OElitePackage)
return flatten_single_value(self.dbc.execute(
"SELECT filename "
"FROM runq.depend "
"WHERE package=? "
"LIMIT 1", (package.id,)))
def set_recdepends(self, package, deptype, recdepends):
if not recdepends:
return
assert isinstance(package, oelite.package.OElitePackage)
def task_tuple(depend):
return (deptype, package.id, depend.id)
recdepends = map(task_tuple, recdepends)
self.dbc.executemany(
"INSERT INTO runq.recdepend "
"(deptype, package, parent_package) "
"VALUES (?, ?, ?)", recdepends)
return
def get_recdepends(self, package, deptypes):
assert isinstance(package, oelite.package.OElitePackage)
assert isinstance(deptypes, list) and len(deptypes) > 0
recdepends = []
for package_id in self.dbc.execute(
"SELECT parent_package "
"FROM runq.recdepend "
"WHERE deptype IN (%s) "%(",".join("?" for i in deptypes)) +
"AND package=?", (deptypes + [package.id])):
recdepends.append(self.cookbook.get_package(id=package_id))
return recdepends
def get_readytasks(self):
return flatten_single_column_rows(self.dbc.execute(
"SELECT"
" task "
"FROM"
" runq.task "
"WHERE"
" build=1 AND status IS NULL "
" AND ("
" NOT EXISTS"
" (SELECT * FROM runq.depend, runq.task AS parent_task"
" WHERE runq.depend.task=runq.task.task"
" AND runq.depend.parent_task IS NOT NULL"
" LIMIT 1)"
" OR NOT EXISTS"
" (SELECT * FROM runq.depend, runq.task AS parent_task"
" WHERE runq.depend.task=runq.task.task"
" AND runq.depend.parent_task=parent_task.task"
" AND parent_task.build IS NOT NULL"
" LIMIT 1))"))
def print_metahashable_tasks(self):
for r in self.dbc.execute(
"SELECT task FROM runq.task WHERE metahash is NULL"):
print self.cookbook.get_task(id=r[0])
for r in self.cookbook.db.execute(
"SELECT parent_task, package "
"FROM runq.depend "
"WHERE task=%s"%(r[0])):
s = str(self.cookbook.get_task(id=r[0]))
if r[1] != -1:
s += " package=%s"%(self.cookbook.get_package(id=r[1]))
print " " +s
def get_metahashable_tasks(self):
return flatten_single_column_rows(self.dbc.execute(
"SELECT task FROM runq.task "
"WHERE metahash IS NULL AND NOT EXISTS "
"(SELECT runq.depend.task"
" FROM runq.depend, runq.task AS runq_task_depend"
" WHERE runq.depend.task = runq.task.task"
" AND runq.depend.parent_task = runq_task_depend.task"
" AND runq_task_depend.metahash IS NULL"
" LIMIT 1"
")"))
def get_unhashed_tasks(self):
tasks = []
for row in self.dbc.execute(
"SELECT task FROM runq.task "
"WHERE metahash IS NULL"):
tasks.append(self.cookbook.get_task(id=row[0]))
return tasks
def get_package_metahash(self, package):
assert isinstance(package, int)
return flatten_single_value(self.dbc.execute(
"SELECT"
" runq.task.metahash "
"FROM"
" runq.task, runq.depend "
"WHERE"
" runq.depend.parent_task=runq.task.task"
" AND runq.depend.package=? "
"LIMIT 1", (package,)))
def get_package_metahash(self, package):
return self._get_package_hash(package, "metahash")
def get_package_buildhash(self, package):
return self._get_package_hash(package, "buildhash")
def _get_package_hash(self, package, hash):
assert isinstance(package, int)
return flatten_single_value(self.dbc.execute(
"SELECT"
" runq.task.%s "
"FROM"
" runq.depend, runq.task, task "
"WHERE"
" runq.depend.parent_task=runq.task.task"
" AND runq.depend.package=?"
" AND runq.task.task=task.id AND task.name='do_package' "
"LIMIT 1"%(hash),
(package,)))
def get_depend_packages(self, task=None, deptype=None):
query = "SELECT DISTINCT package"
query += " FROM runq.depend AS depend, task"
query += " WHERE package >= 0"
if task:
assert isinstance(task, oelite.task.OEliteTask)
query += " AND task=%d"%(task.id)
else:
query += " AND depend.parent_task=task.id"
query += " AND task.name='do_package'"
if deptype:
query += " AND deptype='%s'"%(deptype)
return flatten_single_column_rows(self.dbc.execute(query))
def get_packages_to_build(self):
packages = flatten_single_column_rows(self.dbc.execute(
"SELECT DISTINCT package "
"FROM runq.depend "
"WHERE package >= 0 AND prebake IS NULL"))
return set(packages)
def set_buildhash_for_build_tasks(self):
rowcount = self.dbc.execute(
"UPDATE runq.task SET buildhash=metahash WHERE build=1"
).rowcount
if rowcount == -1:
die("unable to determine rowcount in "
"set_buildhash_for_build_tasks")
return rowcount
def set_buildhash_for_nobuild_tasks(self):
rowcount = self.dbc.execute(
"UPDATE runq.task SET buildhash=tmphash WHERE build IS NULL"
).rowcount
if rowcount == -1:
die("unable to determine rowcount in "
"set_buildhash_for_nobuild_tasks")
return rowcount
def mark_primary_runq_depends(self):
rowcount = self.dbc.execute(
"UPDATE runq.depend SET prime=1 WHERE EXISTS "
"(SELECT * FROM runq.task"
" WHERE runq.task.prime=1 AND runq.task.task=runq.depend.task"
")").rowcount
if rowcount == -1:
die("mark_primary_runq_depends did not work out")
return rowcount
def prune_runq_depends_nobuild(self):
rowcount = 0
while True:
self.dbc.execute(
"UPDATE runq.depend SET parent_task=NULL "
"WHERE parent_task IS NOT NULL AND NOT EXISTS "
"(SELECT * FROM runq.task"
" WHERE runq.task.build=1"
" AND runq.task.task=runq.depend.parent_task"
" LIMIT 1"
")")
if rowcount == -1:
die("prune_runq_depends_nobuild did not work out")
if not self.dbc.rowcount:
break
rowcount += self.dbc.rowcount
if rowcount:
debug("pruned %d dependencies that did not have to be rebuilt"%rowcount)
return rowcount
def prune_runq_depends_with_nobody_depending_on_it(self):
#c = self.dbc.cursor()
rowcount = 0
while True:
self.dbc.execute(
"DELETE FROM runq.depend "
"WHERE prime IS NULL AND NOT EXISTS "
"(SELECT * FROM runq.depend AS next_depend"
" WHERE next_depend.parent_task=runq.depend.task"
" LIMIT 1"
")")
if rowcount == -1:
die("prune_runq_depends_with_no_depending_tasks did not work out")
if not self.dbc.rowcount:
break
rowcount += self.dbc.rowcount
if rowcount:
debug("pruned %d dependencies which where not needed anyway"%rowcount)
return rowcount
def prune_runq_tasks(self):
rowcount = self.dbc.execute(
"UPDATE"
" runq.task "
"SET"
" build=NULL "
"WHERE"
" prime IS NULL AND NOT EXISTS"
" (SELECT *"
" FROM runq.depend"
" WHERE runq.depend.parent_task=runq.task.task"
" LIMIT 1"
")").rowcount
if rowcount == -1:
die("prune_runq_tasks did not work out")
if rowcount:
debug("pruned %d tasks that does not need to be build"%rowcount)
return rowcount
def set_task_stamp(self, task, mtime, tmphash):
assert isinstance(task, oelite.task.OEliteTask)
self.dbc.execute(
"UPDATE runq.task SET mtime=?, tmphash=? WHERE task=?",
(mtime, tmphash, task.id))
return
def set_task_build(self, task):
assert isinstance(task, oelite.task.OEliteTask)
self.dbc.execute(
"UPDATE runq.task SET build=1 WHERE task=?", (task.id,))
return
def set_task_relax(self, task):
assert isinstance(task, oelite.task.OEliteTask)
self.dbc.execute(
"UPDATE runq.task SET relax=1 WHERE task=?", (task.id,))
return
def set_task_primary(self, task):
assert isinstance(task, oelite.task.OEliteTask)
self.dbc.execute(
"UPDATE runq.task SET prime=1 WHERE task=?", (task.id,))
return
def is_task_primary(self, task):
assert isinstance(task, oelite.task.OEliteTask)
primary = self.dbc.execute(
"SELECT prime FROM runq.task WHERE task=?", (task.id,)).fetchone()
return primary[0] == 1
def is_recipe_primary(self, recipe):
primary = self.dbc.execute(
"SELECT runq.task.prime "
"FROM runq.task, task "
"WHERE task.recipe=? AND runq.task.prime IS NOT NULL "
"AND runq.task.task=task.id", (recipe,)).fetchone()
return primary and primary[0] == 1
def set_task_build_on_nostamp_tasks(self):
rowcount = self.dbc.execute(
"UPDATE runq.task SET build=1 "
"WHERE build IS NULL AND EXISTS "
"(SELECT * FROM task"
" WHERE id=runq.task.task AND nostamp=1)").rowcount
if rowcount == -1:
die("set_task_build_on_nostamp_tasks did not work out")
debug("set build flag on %d nostamp tasks"%(rowcount))
return
def set_task_build_on_retired_tasks(self):
rowcount = 0
while True:
self.dbc.execute(
"UPDATE runq.task SET build=1 "
"WHERE build IS NULL AND EXISTS "
"(SELECT * FROM runq.depend, runq.task AS parent_task"
" WHERE runq.depend.task=runq.task.task"
" AND runq.depend.parent_task=parent_task.task"
" AND parent_task.mtime > runq.task.mtime)")
if rowcount == -1:
die("set_task_build_on_retired_tasks did not work out")
if not self.dbc.rowcount:
break
rowcount += self.dbc.rowcount
debug("set build flag on %d retired tasks"%(rowcount))
return
def set_task_build_on_hashdiff(self):
rowcount = 0
while True:
self.dbc.execute(
"UPDATE runq.task SET build=1 "
"WHERE build IS NULL AND relax IS NULL AND tmphash != metahash")
if rowcount == -1:
die("set_task_build_on_hashdiff did not work out")
if not self.dbc.rowcount:
break
rowcount += self.dbc.rowcount
debug("set build flag on %d tasks with tmphash != metahash"%(rowcount))
return
def propagate_runq_task_build(self):
"""always build all tasks depending on other tasks to build"""
rowcount = 0
while True:
self.dbc.execute(
"UPDATE"
" runq.task "
"SET"
" build=1 "
"WHERE"
" build IS NULL"
" AND EXISTS"
" (SELECT *"
" FROM runq.depend, runq.task AS parent_task"
" WHERE runq.depend.task=runq.task.task"
" AND runq.depend.parent_task=parent_task.task"
" AND parent_task.build=1"
" LIMIT 1)")
if rowcount == -1:
die("propagate_runq_task_build did not work out")
if not self.dbc.rowcount:
break
rowcount += self.dbc.rowcount
debug("set build flag on %d tasks due to propagation"%(rowcount))
return
def _set_task_status(self, task, status):
assert isinstance(task, oelite.task.OEliteTask)
self.dbc.execute(
"UPDATE runq.task SET status=? WHERE task=?", (status, task.id))
return
def set_task_pending(self, task):
return self._set_task_status(task, 1)
def set_task_running(self, task):
return self._set_task_status(task, 2)
def set_task_done(self, task, delete):
assert isinstance(task, oelite.task.OEliteTask)
self._set_task_status(task, 3)
#if delete:
# self.dbc.execute(
# "DELETE FROM runq.depend WHERE parent_task=?", (task.id,))
self.dbc.execute(
"UPDATE runq.depend SET parent_task=NULL "
"WHERE parent_task=?", (task.id,))
return
def set_task_failed(self, task):
return self._set_task_status(task, -1)
def prune_done_tasks(self):
self.dbc.execute(
"DELETE FROM runq.depend WHERE EXISTS "
"( SELECT * FROM runq.task "
"WHERE runq.task.task = runq.depend.parent_task AND status=3 )")
return
def set_task_metahash(self, task, metahash):
assert isinstance(task, oelite.task.OEliteTask)
self.dbc.execute(
"UPDATE runq.task SET metahash=? WHERE task=?",
(metahash, task.id))
return
def get_task_metahash(self, task):
assert isinstance(task, oelite.task.OEliteTask)
return flatten_single_value(self.dbc.execute(
"SELECT metahash FROM runq.task WHERE task=?", (task.id,)))
def get_task_buildhash(self, task):
assert isinstance(task, oelite.task.OEliteTask)
return flatten_single_value(self.dbc.execute(
"SELECT buildhash FROM runq.task WHERE task=?", (task.id,)))
| |
from __future__ import print_function
from __future__ import absolute_import
import argparse
import logging
import sys
import traceback
import json
import os
import re
import itertools
import six
from six.moves import urllib
import pkg_resources # part of setuptools
from typing import Any, Dict, List, Union, Pattern, Text, Tuple, cast
from rdflib import Graph, plugin
from rdflib.serializer import Serializer
from . import schema
from . import jsonld_context
from . import makedoc
from . import validate
from . import codegen
from .sourceline import strip_dup_lineno
from .ref_resolver import Loader, file_uri
_logger = logging.getLogger("salad")
from rdflib.plugin import register
from rdflib.parser import Parser
register('json-ld', Parser, 'rdflib_jsonld.parser', 'JsonLDParser')
def printrdf(workflow, # type: str
wf, # type: Union[List[Dict[Text, Any]], Dict[Text, Any]]
ctx, # type: Dict[Text, Any]
sr # type: str
):
# type: (...) -> None
g = jsonld_context.makerdf(workflow, wf, ctx)
print(g.serialize(format=sr, encoding='utf-8').decode('utf-8')) # type: ignore
def regex_chunk(lines, regex):
# type: (List[str], Pattern[str]) -> List[List[str]]
lst = list(itertools.dropwhile(lambda x: not regex.match(x), lines))
arr = []
while lst:
ret = [lst[0]]+list(itertools.takewhile(lambda x: not regex.match(x),
lst[1:]))
arr.append(ret)
lst = list(itertools.dropwhile(lambda x: not regex.match(x),
lst[1:]))
return arr
def chunk_messages(message): # type: (str) -> List[Tuple[int, str]]
file_regex = re.compile(r'^(.+:\d+:\d+:)(\s+)(.+)$')
item_regex = re.compile(r'^\s*\*\s+')
arr = []
for chun in regex_chunk(message.splitlines(), file_regex):
fst = chun[0]
mat = file_regex.match(fst)
if mat:
place = mat.group(1)
indent = len(mat.group(2))
lst = [mat.group(3)]+chun[1:]
if [x for x in lst if item_regex.match(x)]:
for item in regex_chunk(lst, item_regex):
msg = re.sub(item_regex, '', "\n".join(item))
arr.append((indent, place+' '+re.sub(
r'[\n\s]+', ' ', msg)))
else:
msg = re.sub(item_regex, '', "\n".join(lst))
arr.append((indent, place+' '+re.sub(
r'[\n\s]+', ' ', msg)))
return arr
def to_one_line_messages(message): # type: (str) -> str
ret = []
max_elem = (0, '')
for (indent, msg) in chunk_messages(message):
if indent > max_elem[0]:
max_elem = (indent, msg)
else:
ret.append(max_elem[1])
max_elem = (indent, msg)
ret.append(max_elem[1])
return "\n".join(ret)
def reformat_yaml_exception_message(message): # type: (str) -> str
line_regex = re.compile(r'^\s+in "(.+)", line (\d+), column (\d+)$')
fname_regex = re.compile(r'^file://'+re.escape(os.getcwd())+'/')
msgs = message.splitlines()
ret = []
if len(msgs) == 3:
msgs = msgs[1:]
nblanks = 0
elif len(msgs) == 4:
c_msg = msgs[0]
match = line_regex.match(msgs[1])
if match:
c_file, c_line, c_column = match.groups()
c_file = re.sub(fname_regex, '', c_file)
ret.append("%s:%s:%s: %s" % (c_file, c_line, c_column, c_msg))
msgs = msgs[2:]
nblanks = 2
p_msg = msgs[0]
match = line_regex.match(msgs[1])
if match:
p_file, p_line, p_column = match.groups()
p_file = re.sub(fname_regex, '', p_file)
ret.append("%s:%s:%s:%s %s" % (p_file, p_line, p_column, ' '*nblanks, p_msg))
return "\n".join(ret)
def main(argsl=None): # type: (List[str]) -> int
if argsl is None:
argsl = sys.argv[1:]
parser = argparse.ArgumentParser()
parser.add_argument("--rdf-serializer",
help="Output RDF serialization format used by --print-rdf (one of turtle (default), n3, nt, xml)",
default="turtle")
exgroup = parser.add_mutually_exclusive_group()
exgroup.add_argument("--print-jsonld-context", action="store_true",
help="Print JSON-LD context for schema")
exgroup.add_argument(
"--print-rdfs", action="store_true", help="Print RDF schema")
exgroup.add_argument("--print-avro", action="store_true",
help="Print Avro schema")
exgroup.add_argument("--print-rdf", action="store_true",
help="Print corresponding RDF graph for document")
exgroup.add_argument("--print-pre", action="store_true",
help="Print document after preprocessing")
exgroup.add_argument(
"--print-index", action="store_true", help="Print node index")
exgroup.add_argument("--print-metadata",
action="store_true", help="Print document metadata")
exgroup.add_argument("--print-inheritance-dot",
action="store_true", help="Print graphviz file of inheritance")
exgroup.add_argument("--print-fieldrefs-dot",
action="store_true", help="Print graphviz file of field refs")
exgroup.add_argument("--codegen", type=str, metavar="language", help="Generate classes in target language, currently supported: python")
exgroup.add_argument("--print-oneline", action="store_true",
help="Print each error message in oneline")
exgroup = parser.add_mutually_exclusive_group()
exgroup.add_argument("--strict", action="store_true", help="Strict validation (unrecognized or out of place fields are error)",
default=True, dest="strict")
exgroup.add_argument("--non-strict", action="store_false", help="Lenient validation (ignore unrecognized fields)",
default=True, dest="strict")
exgroup = parser.add_mutually_exclusive_group()
exgroup.add_argument("--verbose", action="store_true",
help="Default logging")
exgroup.add_argument("--quiet", action="store_true",
help="Only print warnings and errors.")
exgroup.add_argument("--debug", action="store_true",
help="Print even more logging")
parser.add_argument("schema", type=str, nargs="?", default=None)
parser.add_argument("document", type=str, nargs="?", default=None)
parser.add_argument("--version", "-v", action="store_true",
help="Print version", default=None)
args = parser.parse_args(argsl)
if args.version is None and args.schema is None:
print('%s: error: too few arguments' % sys.argv[0])
return 1
if args.quiet:
_logger.setLevel(logging.WARN)
if args.debug:
_logger.setLevel(logging.DEBUG)
pkg = pkg_resources.require("schema_salad")
if pkg:
if args.version:
print("%s Current version: %s" % (sys.argv[0], pkg[0].version))
return 0
else:
_logger.info("%s Current version: %s", sys.argv[0], pkg[0].version)
# Get the metaschema to validate the schema
metaschema_names, metaschema_doc, metaschema_loader = schema.get_metaschema()
# Load schema document and resolve refs
schema_uri = args.schema
if not (urllib.parse.urlparse(schema_uri)[0] and urllib.parse.urlparse(schema_uri)[0] in [u'http', u'https', u'file']):
schema_uri = file_uri(os.path.abspath(schema_uri))
schema_raw_doc = metaschema_loader.fetch(schema_uri)
try:
schema_doc, schema_metadata = metaschema_loader.resolve_all(
schema_raw_doc, schema_uri)
except (validate.ValidationException) as e:
_logger.error("Schema `%s` failed link checking:\n%s",
args.schema, e, exc_info=(True if args.debug else False))
_logger.debug("Index is %s", list(metaschema_loader.idx.keys()))
_logger.debug("Vocabulary is %s", list(metaschema_loader.vocab.keys()))
return 1
except (RuntimeError) as e:
_logger.error("Schema `%s` read error:\n%s",
args.schema, e, exc_info=(True if args.debug else False))
return 1
# Optionally print the schema after ref resolution
if not args.document and args.print_pre:
print(json.dumps(schema_doc, indent=4))
return 0
if not args.document and args.print_index:
print(json.dumps(list(metaschema_loader.idx.keys()), indent=4))
return 0
# Validate the schema document against the metaschema
try:
schema.validate_doc(metaschema_names, schema_doc,
metaschema_loader, args.strict,
source=schema_metadata.get("name"))
except validate.ValidationException as e:
_logger.error("While validating schema `%s`:\n%s" %
(args.schema, str(e)))
return 1
# Get the json-ld context and RDFS representation from the schema
metactx = {} # type: Dict[str, str]
if isinstance(schema_raw_doc, dict):
metactx = schema_raw_doc.get("$namespaces", {})
if "$base" in schema_raw_doc:
metactx["@base"] = schema_raw_doc["$base"]
if schema_doc is not None:
(schema_ctx, rdfs) = jsonld_context.salad_to_jsonld_context(
schema_doc, metactx)
else:
raise Exception("schema_doc is None??")
# Create the loader that will be used to load the target document.
document_loader = Loader(schema_ctx)
if args.codegen:
codegen.codegen(args.codegen, cast(List[Dict[Text, Any]], schema_doc),
schema_metadata, document_loader)
return 0
# Make the Avro validation that will be used to validate the target
# document
if isinstance(schema_doc, list):
(avsc_names, avsc_obj) = schema.make_avro_schema(
schema_doc, document_loader)
else:
_logger.error("Schema `%s` must be a list.", args.schema)
return 1
if isinstance(avsc_names, Exception):
_logger.error("Schema `%s` error:\n%s", args.schema, # type: ignore
avsc_names, exc_info=(
(type(avsc_names), avsc_names, None) if args.debug
else None))
if args.print_avro:
print(json.dumps(avsc_obj, indent=4))
return 1
# Optionally print Avro-compatible schema from schema
if args.print_avro:
print(json.dumps(avsc_obj, indent=4))
return 0
# Optionally print the json-ld context from the schema
if args.print_jsonld_context:
j = {"@context": schema_ctx}
print(json.dumps(j, indent=4, sort_keys=True))
return 0
# Optionally print the RDFS graph from the schema
if args.print_rdfs:
print(rdfs.serialize(format=args.rdf_serializer).decode('utf-8')) # type: ignore
return 0
if args.print_metadata and not args.document:
print(json.dumps(schema_metadata, indent=4))
return 0
if args.print_inheritance_dot:
schema.print_inheritance(schema_doc, sys.stdout)
return 0
if args.print_fieldrefs_dot:
schema.print_fieldrefs(schema_doc, document_loader, sys.stdout)
return 0
# If no document specified, all done.
if not args.document:
print("Schema `%s` is valid" % args.schema)
return 0
# Load target document and resolve refs
try:
uri = args.document
if not urllib.parse.urlparse(uri)[0]:
doc = "file://" + os.path.abspath(uri)
document, doc_metadata = document_loader.resolve_ref(uri)
except validate.ValidationException as e:
msg = strip_dup_lineno(six.text_type(e))
msg = to_one_line_messages(str(msg)) if args.print_oneline else msg
_logger.error("Document `%s` failed validation:\n%s",
args.document, msg, exc_info=args.debug)
return 1
except RuntimeError as e:
msg = strip_dup_lineno(six.text_type(e))
msg = reformat_yaml_exception_message(str(msg))
msg = to_one_line_messages(msg) if args.print_oneline else msg
_logger.error("Document `%s` failed validation:\n%s",
args.document, msg, exc_info=args.debug)
return 1
# Optionally print the document after ref resolution
if args.print_pre:
print(json.dumps(document, indent=4))
return 0
if args.print_index:
print(json.dumps(list(document_loader.idx.keys()), indent=4))
return 0
# Validate the schema document against the metaschema
try:
schema.validate_doc(avsc_names, document,
document_loader, args.strict)
except validate.ValidationException as e:
msg = to_one_line_messages(str(e)) if args.print_oneline else str(e)
_logger.error("While validating document `%s`:\n%s" %
(args.document, msg))
return 1
# Optionally convert the document to RDF
if args.print_rdf:
if isinstance(document, (dict, list)):
printrdf(args.document, document, schema_ctx, args.rdf_serializer)
return 0
else:
print("Document must be a dictionary or list.")
return 1
if args.print_metadata:
print(json.dumps(doc_metadata, indent=4))
return 0
print("Document `%s` is valid" % args.document)
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
| |
# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for Backup swift code."""
import bz2
import filecmp
import hashlib
import os
import shutil
import tempfile
import threading
from unittest import mock
import zlib
import ddt
from eventlet import tpool
from oslo_config import cfg
from swiftclient import client as swift
import zstd
from cinder.backup import chunkeddriver
from cinder.backup.drivers import swift as swift_dr
from cinder import context
from cinder import db
from cinder import exception
from cinder.i18n import _
from cinder import objects
from cinder.tests.unit.backup import fake_swift_client
from cinder.tests.unit.backup import fake_swift_client2
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import test
CONF = cfg.CONF
ANY = mock.ANY
def fake_md5(arg, usedforsecurity=False):
class result(object):
def hexdigest(self):
return 'fake-md5-sum'
ret = result()
return ret
@ddt.ddt
class BackupSwiftTestCase(test.TestCase):
"""Test Case for swift."""
_DEFAULT_VOLUME_ID = 'c7eb81f4-bec6-4730-a60f-8888885874df'
def _create_volume_db_entry(self, volume_id=_DEFAULT_VOLUME_ID):
vol = {'id': volume_id,
'size': 1,
'status': 'available',
'volume_type_id': self.vt['id']}
return db.volume_create(self.ctxt, vol)['id']
def _create_backup_db_entry(self,
volume_id=_DEFAULT_VOLUME_ID,
container='test-container',
backup_id=fake.BACKUP_ID, parent_id=None,
service_metadata=None):
try:
db.volume_get(self.ctxt, volume_id)
except exception.NotFound:
self._create_volume_db_entry(volume_id=volume_id)
backup = {'id': backup_id,
'size': 1,
'container': container,
'volume_id': volume_id,
'parent_id': parent_id,
'user_id': fake.USER_ID,
'project_id': fake.PROJECT_ID,
'service_metadata': service_metadata,
}
return db.backup_create(self.ctxt, backup)['id']
def _write_effective_compression_file(self, data_size):
"""Ensure file contents can be effectively compressed."""
self.volume_file.seek(0)
self.volume_file.write(bytes([65] * data_size))
self.volume_file.seek(0)
def setUp(self):
super(BackupSwiftTestCase, self).setUp()
service_catalog = [{u'type': u'object-store', u'name': u'swift',
u'endpoints': [{
u'publicURL': u'http://example.com'}]},
{u'type': u'identity', u'name': u'keystone',
u'endpoints': [{
u'publicURL': u'http://example.com'}]}]
self.ctxt = context.RequestContext(user_id=fake.USER_ID,
is_admin=True,
service_catalog=service_catalog)
self.mock_object(swift, 'Connection',
fake_swift_client.FakeSwiftClient.Connection)
self.mock_object(hashlib, 'md5', fake_md5)
self.volume_file = tempfile.NamedTemporaryFile()
self.temp_dir = tempfile.mkdtemp()
self.addCleanup(self.volume_file.close)
# Remove tempdir.
self.addCleanup(shutil.rmtree, self.temp_dir)
self.size_volume_file = 0
for _i in range(0, 64):
self.volume_file.write(os.urandom(1024))
self.size_volume_file += 1024
notify_patcher = mock.patch(
'cinder.volume.volume_utils.notify_about_backup_usage')
notify_patcher.start()
self.addCleanup(notify_patcher.stop)
def test_backup_swift_url(self):
self.ctxt.service_catalog = [{u'type': u'object-store',
u'name': u'swift',
u'endpoints': [{
u'adminURL':
u'http://example.com'}]},
{u'type': u'identity',
u'name': u'keystone',
u'endpoints': [{
u'publicURL':
u'http://example.com'}]}]
self.assertRaises(exception.BackupDriverException,
swift_dr.SwiftBackupDriver,
self.ctxt)
def test_backup_swift_auth_url(self):
self.ctxt.service_catalog = [{u'type': u'object-store',
u'name': u'swift',
u'endpoints': [{
u'publicURL':
u'http://example.com'}]},
{u'type': u'identity',
u'name': u'keystone',
u'endpoints': [{
u'adminURL':
u'http://example.com'}]}]
self.override_config("backup_swift_auth",
"single_user")
self.override_config("backup_swift_user",
"fake_user")
self.assertRaises(exception.BackupDriverException,
swift_dr.SwiftBackupDriver,
self.ctxt)
def test_backup_swift_url_conf(self):
self.ctxt.service_catalog = [{u'type': u'object-store',
u'name': u'swift',
u'endpoints': [{
u'adminURL':
u'http://example.com'}]},
{u'type': u'identity',
u'name': u'keystone',
u'endpoints': [{
u'publicURL':
u'http://example.com'}]}]
self.ctxt.project_id = fake.PROJECT_ID
self.override_config("backup_swift_url",
"http://public.example.com/")
backup = swift_dr.SwiftBackupDriver(self.ctxt)
self.assertEqual("%s%s" % (CONF.backup_swift_url,
self.ctxt.project_id),
backup.swift_url)
def test_backup_swift_url_conf_nocatalog(self):
self.ctxt.service_catalog = []
self.ctxt.project_id = fake.PROJECT_ID
self.override_config("backup_swift_url",
"http://public.example.com/")
backup = swift_dr.SwiftBackupDriver(self.ctxt)
self.assertEqual("%s%s" % (CONF.backup_swift_url,
self.ctxt.project_id),
backup.swift_url)
def test_backup_swift_auth_url_conf(self):
self.ctxt.service_catalog = [{u'type': u'object-store',
u'name': u'swift',
u'endpoints': [{
u'publicURL':
u'http://example.com'}]},
{u'type': u'identity',
u'name': u'keystone',
u'endpoints': [{
u'adminURL':
u'http://example.com'}]}]
self.ctxt.project_id = fake.PROJECT_ID
self.override_config("backup_swift_auth_url",
"http://public.example.com")
self.override_config("backup_swift_auth",
"single_user")
self.override_config("backup_swift_user",
"fake_user")
backup = swift_dr.SwiftBackupDriver(self.ctxt)
self.assertEqual(CONF.backup_swift_auth_url, backup.auth_url)
def test_backup_swift_info(self):
self.override_config("swift_catalog_info", "dummy")
self.assertRaises(exception.BackupDriverException,
swift_dr.SwiftBackupDriver,
self.ctxt)
@ddt.data(
{'auth': 'single_user', 'insecure': True},
{'auth': 'single_user', 'insecure': False},
{'auth': 'per_user', 'insecure': True},
{'auth': 'per_user', 'insecure': False},
)
@ddt.unpack
def test_backup_swift_auth_insecure(self, auth, insecure):
self.override_config("backup_swift_auth_insecure", insecure)
self.override_config('backup_swift_auth', auth)
if auth == 'single_user':
self.override_config('backup_swift_user', 'swift-user')
mock_connection = self.mock_object(swift, 'Connection')
swift_dr.SwiftBackupDriver(self.ctxt)
if auth == 'single_user':
mock_connection.assert_called_once_with(insecure=insecure,
authurl=ANY,
auth_version=ANY,
tenant_name=ANY,
user=ANY,
key=ANY,
os_options={},
retries=ANY,
starting_backoff=ANY,
cacert=ANY)
else:
mock_connection.assert_called_once_with(insecure=insecure,
retries=ANY,
preauthurl=ANY,
preauthtoken=ANY,
starting_backoff=ANY,
cacert=ANY)
@ddt.data(
{'auth_version': '3', 'user_domain': 'UserDomain',
'project': 'Project', 'project_domain': 'ProjectDomain'},
{'auth_version': '3', 'user_domain': None,
'project': 'Project', 'project_domain': 'ProjectDomain'},
{'auth_version': '3', 'user_domain': 'UserDomain',
'project': None, 'project_domain': 'ProjectDomain'},
{'auth_version': '3', 'user_domain': 'UserDomain',
'project': 'Project', 'project_domain': None},
{'auth_version': '3', 'user_domain': None,
'project': None, 'project_domain': None},
)
@ddt.unpack
def test_backup_swift_auth_v3_single_user(self, auth_version, user_domain,
project, project_domain):
self.override_config('backup_swift_auth', 'single_user')
self.override_config('backup_swift_user', 'swift-user')
self.override_config('backup_swift_auth_version', auth_version)
self.override_config('backup_swift_user_domain', user_domain)
self.override_config('backup_swift_project', project)
self.override_config('backup_swift_project_domain', project_domain)
os_options = {}
if user_domain is not None:
os_options['user_domain_name'] = user_domain
if project is not None:
os_options['project_name'] = project
if project_domain is not None:
os_options['project_domain_name'] = project_domain
mock_connection = self.mock_object(swift, 'Connection')
swift_dr.SwiftBackupDriver(self.ctxt)
mock_connection.assert_called_once_with(insecure=ANY,
authurl=ANY,
auth_version=auth_version,
tenant_name=ANY,
user=ANY,
key=ANY,
os_options=os_options,
retries=ANY,
starting_backoff=ANY,
cacert=ANY)
@mock.patch.object(fake_swift_client.FakeSwiftConnection, 'put_container')
def test_default_backup_swift_create_storage_policy(self, mock_put):
service = swift_dr.SwiftBackupDriver(self.ctxt)
service.put_container('missing_container')
mock_put.assert_called_once_with('missing_container', headers=None)
@mock.patch.object(fake_swift_client.FakeSwiftConnection, 'put_container')
def test_backup_swift_create_storage_policy(self, mock_put):
self.override_config('backup_swift_create_storage_policy',
'mypolicy')
service = swift_dr.SwiftBackupDriver(self.ctxt)
service.put_container('missing_container')
mock_put.assert_called_once_with(
'missing_container',
headers={'X-Storage-Policy': 'mypolicy'}
)
def test_default_backup_swift_create_storage_policy_put_socket_error(self):
service = swift_dr.SwiftBackupDriver(self.ctxt)
self.assertRaises(exception.SwiftConnectionFailed,
service.put_container,
'missing_container_socket_error_on_put')
def test_default_backup_swift_create_storage_policy_head_error(self):
service = swift_dr.SwiftBackupDriver(self.ctxt)
self.assertRaises(exception.SwiftConnectionFailed,
service.put_container, 'unauthorized_container')
def test_backup_swift_create_storage_policy_head_error(self):
self.override_config('backup_swift_create_storage_policy',
'mypolicy')
service = swift_dr.SwiftBackupDriver(self.ctxt)
self.assertRaises(exception.SwiftConnectionFailed,
service.put_container,
'unauthorized_container')
def test_default_backup_swift_create_storage_policy_head_sockerr(self):
service = swift_dr.SwiftBackupDriver(self.ctxt)
self.assertRaises(exception.SwiftConnectionFailed,
service.put_container, 'socket_error_on_head')
def test_backup_swift_create_storage_policy_head_socket_error(self):
self.override_config('backup_swift_create_storage_policy',
'mypolicy')
service = swift_dr.SwiftBackupDriver(self.ctxt)
self.assertRaises(exception.SwiftConnectionFailed,
service.put_container, 'socket_error_on_head')
def test_backup_uncompressed(self):
volume_id = '2b9f10a3-42b4-4fdf-b316-000000ceb039'
self._create_backup_db_entry(volume_id=volume_id)
self.flags(backup_compression_algorithm='none')
service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
service.backup(backup, self.volume_file)
def test_backup_uncompressed_casing(self):
volume_id = '2b9f10a3-42b4-dead-b316-000000ceb039'
self._create_backup_db_entry(volume_id=volume_id)
self.flags(backup_compression_algorithm='None')
service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
service.backup(backup, self.volume_file)
def test_backup_bz2(self):
volume_id = 'dc0fee35-b44e-4f13-80d6-000000e1b50c'
self._create_backup_db_entry(volume_id=volume_id)
self.flags(backup_compression_algorithm='bz2')
service = swift_dr.SwiftBackupDriver(self.ctxt)
self._write_effective_compression_file(self.size_volume_file)
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
service.backup(backup, self.volume_file)
def test_backup_zlib(self):
volume_id = '5cea0535-b6fb-4531-9a38-000000bea094'
self._create_backup_db_entry(volume_id=volume_id)
self.flags(backup_compression_algorithm='zlib')
service = swift_dr.SwiftBackupDriver(self.ctxt)
self._write_effective_compression_file(self.size_volume_file)
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
service.backup(backup, self.volume_file)
def test_backup_zstd(self):
volume_id = '471910a0-a197-4259-9c50-0fc3d6a07dbc'
self._create_backup_db_entry(volume_id=volume_id)
self.flags(backup_compression_algorithm='zstd')
service = swift_dr.SwiftBackupDriver(self.ctxt)
self._write_effective_compression_file(self.size_volume_file)
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
service.backup(backup, self.volume_file)
@mock.patch.object(db, 'backup_update', wraps=db.backup_update)
def test_backup_default_container(self, backup_update_mock):
volume_id = '9552017f-c8b9-4e4e-a876-00000053349c'
self._create_backup_db_entry(volume_id=volume_id,
container=None)
service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
service.backup(backup, self.volume_file)
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
self.assertEqual('volumebackups', backup['container'])
self.assertEqual(3, backup_update_mock.call_count)
@mock.patch.object(db, 'backup_update', wraps=db.backup_update)
def test_backup_db_container(self, backup_update_mock):
volume_id = '9552017f-c8b9-4e4e-a876-00000053349c'
self._create_backup_db_entry(volume_id=volume_id,
container='existing_name')
service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
service.backup(backup, self.volume_file)
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
self.assertEqual('existing_name', backup['container'])
# Make sure we are not making a DB update when we are using the same
# value that's already in the DB.
self.assertEqual(2, backup_update_mock.call_count)
@mock.patch.object(db, 'backup_update', wraps=db.backup_update)
def test_backup_driver_container(self, backup_update_mock):
volume_id = '9552017f-c8b9-4e4e-a876-00000053349c'
self._create_backup_db_entry(volume_id=volume_id,
container=None)
service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
with mock.patch.object(service, 'update_container_name',
return_value='driver_name'):
service.backup(backup, self.volume_file)
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
self.assertEqual('driver_name', backup['container'])
self.assertEqual(3, backup_update_mock.call_count)
@mock.patch('cinder.backup.drivers.swift.SwiftBackupDriver.'
'_send_progress_end')
@mock.patch('cinder.backup.drivers.swift.SwiftBackupDriver.'
'_send_progress_notification')
def test_backup_default_container_notify(self, _send_progress,
_send_progress_end):
volume_id = '87dd0eed-2598-4ebd-8ebb-000000ac578a'
self._create_backup_db_entry(volume_id=volume_id,
container=None)
# If the backup_object_number_per_notification is set to 1,
# the _send_progress method will be called for sure.
CONF.set_override("backup_object_number_per_notification", 1)
CONF.set_override("backup_swift_enable_progress_timer", False)
service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
service.backup(backup, self.volume_file)
self.assertTrue(_send_progress.called)
self.assertTrue(_send_progress_end.called)
# If the backup_object_number_per_notification is increased to
# another value, the _send_progress method will not be called.
_send_progress.reset_mock()
_send_progress_end.reset_mock()
CONF.set_override("backup_object_number_per_notification", 10)
service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
service.backup(backup, self.volume_file)
self.assertFalse(_send_progress.called)
self.assertTrue(_send_progress_end.called)
# If the timer is enabled, the _send_progress will be called,
# since the timer can trigger the progress notification.
_send_progress.reset_mock()
_send_progress_end.reset_mock()
CONF.set_override("backup_object_number_per_notification", 10)
CONF.set_override("backup_swift_enable_progress_timer", True)
service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
service.backup(backup, self.volume_file)
self.assertTrue(_send_progress.called)
self.assertTrue(_send_progress_end.called)
def test_backup_custom_container(self):
volume_id = '1da9859e-77e5-4731-bd58-000000ca119e'
container_name = 'fake99'
self._create_backup_db_entry(volume_id=volume_id,
container=container_name)
service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
service.backup(backup, self.volume_file)
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
self.assertEqual(container_name, backup['container'])
def test_backup_shafile(self):
volume_id = '6465dad4-22af-48f7-8a1a-000000218907'
def _fake_generate_object_name_prefix(self, backup):
az = 'az_fake'
backup_name = '%s_backup_%s' % (az, backup['id'])
volume = 'volume_%s' % (backup['volume_id'])
prefix = volume + '_' + backup_name
return prefix
self.mock_object(swift_dr.SwiftBackupDriver,
'_generate_object_name_prefix',
_fake_generate_object_name_prefix)
container_name = self.temp_dir.replace(tempfile.gettempdir() + '/',
'', 1)
self._create_backup_db_entry(volume_id=volume_id,
container=container_name)
self.mock_object(swift, 'Connection',
fake_swift_client2.FakeSwiftClient2.Connection)
service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
service.backup(backup, self.volume_file)
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
self.assertEqual(container_name, backup['container'])
# Verify sha contents
content1 = service._read_sha256file(backup)
self.assertEqual(64 * 1024 / content1['chunk_size'],
len(content1['sha256s']))
def test_backup_cmp_shafiles(self):
volume_id = '1a99ac67-c534-4fe3-b472-0000001785e2'
def _fake_generate_object_name_prefix(self, backup):
az = 'az_fake'
backup_name = '%s_backup_%s' % (az, backup['id'])
volume = 'volume_%s' % (backup['volume_id'])
prefix = volume + '_' + backup_name
return prefix
self.mock_object(swift_dr.SwiftBackupDriver,
'_generate_object_name_prefix',
_fake_generate_object_name_prefix)
container_name = self.temp_dir.replace(tempfile.gettempdir() + '/',
'', 1)
self._create_backup_db_entry(volume_id=volume_id,
container=container_name,
backup_id=fake.BACKUP_ID)
self.mock_object(swift, 'Connection',
fake_swift_client2.FakeSwiftClient2.Connection)
service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
service.backup(backup, self.volume_file)
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
self.assertEqual(container_name, backup['container'])
# Create incremental backup with no change to contents
self._create_backup_db_entry(volume_id=volume_id,
container=container_name,
backup_id=fake.BACKUP2_ID,
parent_id=fake.BACKUP_ID)
self.mock_object(swift, 'Connection',
fake_swift_client2.FakeSwiftClient2.Connection)
service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0)
deltabackup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP2_ID)
service.backup(deltabackup, self.volume_file)
deltabackup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP2_ID)
self.assertEqual(container_name, deltabackup['container'])
# Compare shas from both files
content1 = service._read_sha256file(backup)
content2 = service._read_sha256file(deltabackup)
self.assertEqual(len(content1['sha256s']), len(content2['sha256s']))
self.assertEqual(set(content1['sha256s']), set(content2['sha256s']))
def test_backup_delta_two_objects_change(self):
volume_id = '30dab288-265a-4583-9abe-000000d42c67'
def _fake_generate_object_name_prefix(self, backup):
az = 'az_fake'
backup_name = '%s_backup_%s' % (az, backup['id'])
volume = 'volume_%s' % (backup['volume_id'])
prefix = volume + '_' + backup_name
return prefix
self.mock_object(swift_dr.SwiftBackupDriver,
'_generate_object_name_prefix',
_fake_generate_object_name_prefix)
self.flags(backup_swift_object_size=8 * 1024)
self.flags(backup_swift_block_size=1024)
container_name = self.temp_dir.replace(tempfile.gettempdir() + '/',
'', 1)
self._create_backup_db_entry(volume_id=volume_id,
container=container_name,
backup_id=fake.BACKUP_ID)
self.mock_object(swift, 'Connection',
fake_swift_client2.FakeSwiftClient2.Connection)
service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
service.backup(backup, self.volume_file)
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
self.assertEqual(container_name, backup['container'])
# Create incremental backup with no change to contents
self.volume_file.seek(2 * 8 * 1024)
self.volume_file.write(os.urandom(1024))
self.volume_file.seek(4 * 8 * 1024)
self.volume_file.write(os.urandom(1024))
self._create_backup_db_entry(volume_id=volume_id,
container=container_name,
backup_id=fake.BACKUP2_ID,
parent_id=fake.BACKUP_ID)
self.mock_object(swift, 'Connection',
fake_swift_client2.FakeSwiftClient2.Connection)
service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0)
deltabackup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP2_ID)
service.backup(deltabackup, self.volume_file)
deltabackup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP2_ID)
self.assertEqual(container_name, deltabackup['container'])
content1 = service._read_sha256file(backup)
content2 = service._read_sha256file(deltabackup)
# Verify that two shas are changed at index 16 and 32
self.assertNotEqual(content1['sha256s'][16], content2['sha256s'][16])
self.assertNotEqual(content1['sha256s'][32], content2['sha256s'][32])
def test_backup_delta_two_blocks_in_object_change(self):
volume_id = 'b943e84f-aa67-4331-9ab2-000000cf19ba'
def _fake_generate_object_name_prefix(self, backup):
az = 'az_fake'
backup_name = '%s_backup_%s' % (az, backup['id'])
volume = 'volume_%s' % (backup['volume_id'])
prefix = volume + '_' + backup_name
return prefix
self.mock_object(swift_dr.SwiftBackupDriver,
'_generate_object_name_prefix',
_fake_generate_object_name_prefix)
self.flags(backup_swift_object_size=8 * 1024)
self.flags(backup_swift_block_size=1024)
container_name = self.temp_dir.replace(tempfile.gettempdir() + '/',
'', 1)
self._create_backup_db_entry(volume_id=volume_id,
container=container_name,
backup_id=fake.BACKUP_ID)
self.mock_object(swift, 'Connection',
fake_swift_client2.FakeSwiftClient2.Connection)
service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
service.backup(backup, self.volume_file)
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
self.assertEqual(container_name, backup['container'])
# Create incremental backup with no change to contents
self.volume_file.seek(16 * 1024)
self.volume_file.write(os.urandom(1024))
self.volume_file.seek(20 * 1024)
self.volume_file.write(os.urandom(1024))
self._create_backup_db_entry(volume_id=volume_id,
container=container_name,
backup_id=fake.BACKUP2_ID,
parent_id=fake.BACKUP_ID)
self.mock_object(swift, 'Connection',
fake_swift_client2.FakeSwiftClient2.Connection)
service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0)
deltabackup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP2_ID)
service.backup(deltabackup, self.volume_file)
deltabackup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP2_ID)
self.assertEqual(container_name, deltabackup['container'])
# Verify that two shas are changed at index 16 and 20
content1 = service._read_sha256file(backup)
content2 = service._read_sha256file(deltabackup)
self.assertNotEqual(content1['sha256s'][16], content2['sha256s'][16])
self.assertNotEqual(content1['sha256s'][20], content2['sha256s'][20])
def test_create_backup_put_object_wraps_socket_error(self):
volume_id = 'c09b1ad4-5f0e-4d3f-8b9e-0000004caec8'
container_name = 'socket_error_on_put'
self._create_backup_db_entry(volume_id=volume_id,
container=container_name)
service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
self.assertRaises(exception.SwiftConnectionFailed,
service.backup,
backup, self.volume_file)
def test_backup_backup_metadata_fail(self):
"""Test of when an exception occurs in backup().
In backup(), after an exception occurs in
self._backup_metadata(), we want to check the process of an
exception handler.
"""
volume_id = '020d9142-339c-4876-a445-000000f1520c'
self._create_backup_db_entry(volume_id=volume_id)
self.flags(backup_compression_algorithm='none')
service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
def fake_backup_metadata(self, backup, object_meta):
raise exception.BackupDriverException(reason=_('fake'))
# Raise a pseudo exception.BackupDriverException.
self.mock_object(swift_dr.SwiftBackupDriver, '_backup_metadata',
fake_backup_metadata)
# We expect that an exception be notified directly.
self.assertRaises(exception.BackupDriverException,
service.backup,
backup, self.volume_file)
def test_backup_backup_metadata_fail2(self):
"""Test of when an exception occurs in an exception handler.
In backup(), after an exception occurs in
self._backup_metadata(), we want to check the process when the
second exception occurs in self.delete_backup().
"""
volume_id = '2164421d-f181-4db7-b9bd-000000eeb628'
self._create_backup_db_entry(volume_id=volume_id)
self.flags(backup_compression_algorithm='none')
service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
def fake_backup_metadata(self, backup, object_meta):
raise exception.BackupDriverException(reason=_('fake'))
# Raise a pseudo exception.BackupDriverException.
self.mock_object(swift_dr.SwiftBackupDriver, '_backup_metadata',
fake_backup_metadata)
def fake_delete(self, backup):
raise exception.BackupOperationError()
# Raise a pseudo exception.BackupOperationError.
self.mock_object(swift_dr.SwiftBackupDriver, 'delete_backup',
fake_delete)
# We expect that the second exception is notified.
self.assertRaises(exception.BackupOperationError,
service.backup,
backup, self.volume_file)
def test_restore(self):
volume_id = 'c2a81f09-f480-4325-8424-00000071685b'
self._create_backup_db_entry(volume_id=volume_id)
service = swift_dr.SwiftBackupDriver(self.ctxt)
with tempfile.NamedTemporaryFile() as volume_file:
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
backup.status = objects.fields.BackupStatus.RESTORING
backup.save()
service.restore(backup, volume_id, volume_file)
def test_restore_delta(self):
volume_id = '04d83506-bcf7-4ff5-9c65-00000051bd2e'
def _fake_generate_object_name_prefix(self, backup):
az = 'az_fake'
backup_name = '%s_backup_%s' % (az, backup['id'])
volume = 'volume_%s' % (backup['volume_id'])
prefix = volume + '_' + backup_name
return prefix
self.mock_object(swift_dr.SwiftBackupDriver,
'_generate_object_name_prefix',
_fake_generate_object_name_prefix)
self.flags(backup_swift_object_size=8 * 1024)
self.flags(backup_swift_block_size=1024)
container_name = self.temp_dir.replace(tempfile.gettempdir() + '/',
'', 1)
self._create_backup_db_entry(volume_id=volume_id,
container=container_name,
backup_id=fake.BACKUP_ID)
self.mock_object(swift, 'Connection',
fake_swift_client2.FakeSwiftClient2.Connection)
service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
service.backup(backup, self.volume_file)
# Create incremental backup with no change to contents
self.volume_file.seek(16 * 1024)
self.volume_file.write(os.urandom(1024))
self.volume_file.seek(20 * 1024)
self.volume_file.write(os.urandom(1024))
self._create_backup_db_entry(volume_id=volume_id,
container=container_name,
backup_id=fake.BACKUP2_ID,
parent_id=fake.BACKUP_ID)
self.volume_file.seek(0)
deltabackup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP2_ID)
service.backup(deltabackup, self.volume_file, True)
deltabackup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP2_ID)
with tempfile.NamedTemporaryFile() as restored_file:
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP2_ID)
backup.status = objects.fields.BackupStatus.RESTORING
backup.save()
service.restore(backup, volume_id,
restored_file)
self.assertTrue(filecmp.cmp(self.volume_file.name,
restored_file.name))
def test_restore_wraps_socket_error(self):
volume_id = 'c1160de7-2774-4f20-bf14-0000001ac139'
container_name = 'socket_error_on_get'
self._create_backup_db_entry(volume_id=volume_id,
container=container_name)
service = swift_dr.SwiftBackupDriver(self.ctxt)
with tempfile.NamedTemporaryFile() as volume_file:
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
self.assertRaises(exception.SwiftConnectionFailed,
service.restore,
backup, volume_id, volume_file)
def test_restore_unsupported_version(self):
volume_id = '390db8c1-32d3-42ca-82c9-00000010c703'
container_name = 'unsupported_version'
self._create_backup_db_entry(volume_id=volume_id,
container=container_name)
service = swift_dr.SwiftBackupDriver(self.ctxt)
with tempfile.NamedTemporaryFile() as volume_file:
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
self.assertRaises(exception.InvalidBackup,
service.restore,
backup, volume_id, volume_file)
def test_delete(self):
volume_id = '9ab256c8-3175-4ad8-baa1-0000007f9d31'
object_prefix = 'test_prefix'
self._create_backup_db_entry(volume_id=volume_id,
service_metadata=object_prefix)
service = swift_dr.SwiftBackupDriver(self.ctxt)
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
service.delete_backup(backup)
def test_delete_wraps_socket_error(self):
volume_id = 'f74cb6fa-2900-40df-87ac-0000000f72ea'
container_name = 'socket_error_on_delete'
object_prefix = 'test_prefix'
self._create_backup_db_entry(volume_id=volume_id,
container=container_name,
service_metadata=object_prefix)
service = swift_dr.SwiftBackupDriver(self.ctxt)
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
self.assertRaises(exception.SwiftConnectionFailed,
service.delete_backup,
backup)
def test_delete_without_object_prefix(self):
volume_id = 'ee30d649-72a6-49a5-b78d-000000edb6b1'
def _fake_delete_object(self, container, object_name):
raise AssertionError('delete_object method should not be called.')
self.mock_object(swift_dr.SwiftBackupDriver,
'delete_object',
_fake_delete_object)
self._create_backup_db_entry(volume_id=volume_id)
service = swift_dr.SwiftBackupDriver(self.ctxt)
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
service.delete_backup(backup)
def test_get_compressor(self):
service = swift_dr.SwiftBackupDriver(self.ctxt)
compressor = service._get_compressor('None')
self.assertIsNone(compressor)
compressor = service._get_compressor('zlib')
self.assertEqual(zlib, compressor)
self.assertIsInstance(compressor, tpool.Proxy)
compressor = service._get_compressor('bz2')
self.assertEqual(bz2, compressor)
self.assertIsInstance(compressor, tpool.Proxy)
compressor = service._get_compressor('zstd')
self.assertEqual(zstd, compressor)
self.assertIsInstance(compressor, tpool.Proxy)
self.assertRaises(ValueError, service._get_compressor, 'fake')
def test_prepare_output_data_effective_compression(self):
"""Test compression works on a native thread."""
# Use dictionary to share data between threads
thread_dict = {}
original_compress = zlib.compress
def my_compress(data):
thread_dict['compress'] = threading.current_thread()
return original_compress(data)
self.mock_object(zlib, 'compress', side_effect=my_compress)
service = swift_dr.SwiftBackupDriver(self.ctxt)
# Set up buffer of 128 zeroed bytes
fake_data = b'\0' * 128
result = service._prepare_output_data(fake_data)
self.assertEqual('zlib', result[0])
self.assertGreater(len(fake_data), len(result[1]))
self.assertNotEqual(threading.current_thread(),
thread_dict['compress'])
def test_prepare_output_data_no_compresssion(self):
self.flags(backup_compression_algorithm='none')
service = swift_dr.SwiftBackupDriver(self.ctxt)
# Set up buffer of 128 zeroed bytes
fake_data = b'\0' * 128
result = service._prepare_output_data(fake_data)
self.assertEqual('none', result[0])
self.assertEqual(fake_data, result[1])
def test_prepare_output_data_ineffective_compression(self):
service = swift_dr.SwiftBackupDriver(self.ctxt)
# Set up buffer of 128 zeroed bytes
fake_data = b'\0' * 128
# Pre-compress so that compression in the driver will be ineffective.
already_compressed_data = service.compressor.compress(fake_data)
result = service._prepare_output_data(already_compressed_data)
self.assertEqual('none', result[0])
self.assertEqual(already_compressed_data, result[1])
@mock.patch('cinder.backup.drivers.swift.SwiftBackupDriver.initialize')
def test_no_user_context(self, mock_initialize):
# With no user_id the driver should not initialize itself.
admin_context = context.get_admin_context()
swift_dr.SwiftBackupDriver(admin_context)
mock_initialize.assert_not_called()
class WindowsBackupSwiftTestCase(BackupSwiftTestCase):
# We're running all the parent class tests, while doing
# some patching in order to simulate Windows behavior.
def setUp(self):
self._mock_utilsfactory = mock.Mock()
platform_patcher = mock.patch('sys.platform', 'win32')
platform_patcher.start()
self.addCleanup(platform_patcher.stop)
super(WindowsBackupSwiftTestCase, self).setUp()
read = self.volume_file.read
def win32_read(sz):
# We're simulating the Windows behavior.
if self.volume_file.tell() > fake_get_size():
raise IOError()
return read(sz)
read_patcher = mock.patch.object(
self.volume_file, 'read', win32_read)
read_patcher.start()
self.addCleanup(read_patcher.stop)
def fake_get_size(*args, **kwargs):
pos = self.volume_file.tell()
sz = self.volume_file.seek(0, 2)
self.volume_file.seek(pos)
return sz
self._disk_size_getter_mocker = mock.patch.object(
swift_dr.SwiftBackupDriver,
'_get_win32_phys_disk_size',
fake_get_size)
self._disk_size_getter_mocker.start()
self.addCleanup(self._disk_size_getter_mocker.stop)
def test_invalid_chunk_size(self):
self.flags(backup_swift_object_size=1000)
# We expect multiples of 4096
self.assertRaises(exception.InvalidConfigurationValue,
swift_dr.SwiftBackupDriver,
self.ctxt)
@mock.patch.object(chunkeddriver, 'os_win_utilsfactory', create=True)
def test_get_phys_disk_size(self, mock_utilsfactory):
# We're patching this method in setUp, so we need to
# retrieve the original one. Note that we'll get an unbound
# method.
service = swift_dr.SwiftBackupDriver(self.ctxt)
get_disk_size = self._disk_size_getter_mocker.temp_original
disk_utils = mock_utilsfactory.get_diskutils.return_value
disk_utils.get_device_number_from_device_name.return_value = (
mock.sentinel.dev_num)
disk_utils.get_disk_size.return_value = mock.sentinel.disk_size
disk_size = get_disk_size(service, mock.sentinel.disk_path)
self.assertEqual(mock.sentinel.disk_size, disk_size)
disk_utils.get_device_number_from_device_name.assert_called_once_with(
mock.sentinel.disk_path)
disk_utils.get_disk_size.assert_called_once_with(
mock.sentinel.dev_num)
| |
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
import itertools
from paramz.caching import Cache_this
from .kern import CombinationKernel, Kern
from functools import reduce
class Add(CombinationKernel):
"""
Add given list of kernels together.
propagates gradients through.
This kernel will take over the active dims of it's subkernels passed in.
NOTE: The subkernels will be copies of the original kernels, to prevent
unexpected behavior.
"""
def __init__(self, subkerns, name='sum'):
_newkerns = []
for kern in subkerns:
if isinstance(kern, Add):
for part in kern.parts:
#kern.unlink_parameter(part)
_newkerns.append(part.copy())
else:
_newkerns.append(kern.copy())
super(Add, self).__init__(_newkerns, name)
self._exact_psicomp = self._check_exact_psicomp()
def _check_exact_psicomp(self):
from .. import RBF,Linear,Bias,White
n_kerns = len(self.parts)
n_rbf = len([k for k in self.parts if isinstance(k,RBF)])
n_linear = len([k for k in self.parts if isinstance(k,Linear)])
n_bias = len([k for k in self.parts if isinstance(k,Bias)])
n_white = len([k for k in self.parts if isinstance(k,White)])
n_others = n_kerns - n_rbf - n_linear - n_bias - n_white
if n_rbf+n_linear<=1 and n_bias<=1 and n_white<=1 and n_others==0:
return True
else:
return False
def to_dict(self):
"""
Convert the object into a json serializable dictionary.
Note: It uses the private method _save_to_input_dict of the parent.
:return dict: json serializable dictionary containing the needed information to instantiate the object
"""
input_dict = super(Add, self)._save_to_input_dict()
input_dict["class"] = str("GPy.kern.Add")
return input_dict
@Cache_this(limit=3, force_kwargs=['which_parts'])
def K(self, X, X2=None, which_parts=None):
"""
Add all kernels together.
If a list of parts (of this kernel!) `which_parts` is given, only
the parts of the list are taken to compute the covariance.
"""
if which_parts is None:
which_parts = self.parts
elif not isinstance(which_parts, (list, tuple)):
# if only one part is given
which_parts = [which_parts]
return reduce(np.add, (p.K(X, X2) for p in which_parts))
@Cache_this(limit=3, force_kwargs=['which_parts'])
def Kdiag(self, X, which_parts=None):
if which_parts is None:
which_parts = self.parts
elif not isinstance(which_parts, (list, tuple)):
# if only one part is given
which_parts = [which_parts]
return reduce(np.add, (p.Kdiag(X) for p in which_parts))
def update_gradients_full(self, dL_dK, X, X2=None):
[p.update_gradients_full(dL_dK, X, X2) for p in self.parts if not p.is_fixed]
def update_gradients_diag(self, dL_dK, X):
[p.update_gradients_diag(dL_dK, X) for p in self.parts]
def gradients_X(self, dL_dK, X, X2=None):
"""Compute the gradient of the objective function with respect to X.
:param dL_dK: An array of gradients of the objective function with respect to the covariance function.
:type dL_dK: np.ndarray (num_samples x num_inducing)
:param X: Observed data inputs
:type X: np.ndarray (num_samples x input_dim)
:param X2: Observed data inputs (optional, defaults to X)
:type X2: np.ndarray (num_inducing x input_dim)"""
target = np.zeros(X.shape)
[target.__iadd__(p.gradients_X(dL_dK, X, X2)) for p in self.parts]
return target
def gradients_X_diag(self, dL_dKdiag, X):
target = np.zeros(X.shape)
[target.__iadd__(p.gradients_X_diag(dL_dKdiag, X)) for p in self.parts]
return target
def gradients_XX(self, dL_dK, X, X2):
if X2 is None:
target = np.zeros((X.shape[0], X.shape[0], X.shape[1], X.shape[1]))
else:
target = np.zeros((X.shape[0], X2.shape[0], X.shape[1], X.shape[1]))
#else: # diagonal covariance
# if X2 is None:
# target = np.zeros((X.shape[0], X.shape[0], X.shape[1]))
# else:
# target = np.zeros((X.shape[0], X2.shape[0], X.shape[1]))
[target.__iadd__(p.gradients_XX(dL_dK, X, X2)) for p in self.parts]
return target
def gradients_XX_diag(self, dL_dKdiag, X):
target = np.zeros(X.shape+(X.shape[1],))
[target.__iadd__(p.gradients_XX_diag(dL_dKdiag, X)) for p in self.parts]
return target
@Cache_this(limit=3, force_kwargs=['which_parts'])
def psi0(self, Z, variational_posterior):
if not self._exact_psicomp: return Kern.psi0(self,Z,variational_posterior)
return reduce(np.add, (p.psi0(Z, variational_posterior) for p in self.parts))
@Cache_this(limit=3, force_kwargs=['which_parts'])
def psi1(self, Z, variational_posterior):
if not self._exact_psicomp: return Kern.psi1(self,Z,variational_posterior)
return reduce(np.add, (p.psi1(Z, variational_posterior) for p in self.parts))
@Cache_this(limit=3, force_kwargs=['which_parts'])
def psi2(self, Z, variational_posterior):
if not self._exact_psicomp: return Kern.psi2(self,Z,variational_posterior)
psi2 = reduce(np.add, (p.psi2(Z, variational_posterior) for p in self.parts))
#return psi2
# compute the "cross" terms
from .static import White, Bias
from .rbf import RBF
#from rbf_inv import RBFInv
from .linear import Linear
#ffrom fixed import Fixed
for p1, p2 in itertools.combinations(self.parts, 2):
# i1, i2 = p1._all_dims_active, p2._all_dims_active
# white doesn;t combine with anything
if isinstance(p1, White) or isinstance(p2, White):
pass
# rbf X bias
#elif isinstance(p1, (Bias, Fixed)) and isinstance(p2, (RBF, RBFInv)):
elif isinstance(p1, Bias) and isinstance(p2, (RBF, Linear)):
tmp = p2.psi1(Z, variational_posterior).sum(axis=0)
psi2 += p1.variance * (tmp[:,None]+tmp[None,:]) #(tmp[:, :, None] + tmp[:, None, :])
#elif isinstance(p2, (Bias, Fixed)) and isinstance(p1, (RBF, RBFInv)):
elif isinstance(p2, Bias) and isinstance(p1, (RBF, Linear)):
tmp = p1.psi1(Z, variational_posterior).sum(axis=0)
psi2 += p2.variance * (tmp[:,None]+tmp[None,:]) #(tmp[:, :, None] + tmp[:, None, :])
elif isinstance(p2, (RBF, Linear)) and isinstance(p1, (RBF, Linear)):
assert np.intersect1d(p1._all_dims_active, p2._all_dims_active).size == 0, "only non overlapping kernel dimensions allowed so far"
tmp1 = p1.psi1(Z, variational_posterior)
tmp2 = p2.psi1(Z, variational_posterior)
psi2 += np.einsum('nm,no->mo',tmp1,tmp2)+np.einsum('nm,no->mo',tmp2,tmp1)
#(tmp1[:, :, None] * tmp2[:, None, :]) + (tmp2[:, :, None] * tmp1[:, None, :])
else:
raise NotImplementedError("psi2 cannot be computed for this kernel")
return psi2
@Cache_this(limit=3, force_kwargs=['which_parts'])
def psi2n(self, Z, variational_posterior):
if not self._exact_psicomp: return Kern.psi2n(self, Z, variational_posterior)
psi2 = reduce(np.add, (p.psi2n(Z, variational_posterior) for p in self.parts))
#return psi2
# compute the "cross" terms
from .static import White, Bias
from .rbf import RBF
#from rbf_inv import RBFInv
from .linear import Linear
#ffrom fixed import Fixed
for p1, p2 in itertools.combinations(self.parts, 2):
# i1, i2 = p1._all_dims_active, p2._all_dims_active
# white doesn;t combine with anything
if isinstance(p1, White) or isinstance(p2, White):
pass
# rbf X bias
#elif isinstance(p1, (Bias, Fixed)) and isinstance(p2, (RBF, RBFInv)):
elif isinstance(p1, Bias) and isinstance(p2, (RBF, Linear)):
tmp = p2.psi1(Z, variational_posterior)
psi2 += p1.variance * (tmp[:, :, None] + tmp[:, None, :])
#elif isinstance(p2, (Bias, Fixed)) and isinstance(p1, (RBF, RBFInv)):
elif isinstance(p2, Bias) and isinstance(p1, (RBF, Linear)):
tmp = p1.psi1(Z, variational_posterior)
psi2 += p2.variance * (tmp[:, :, None] + tmp[:, None, :])
elif isinstance(p2, (RBF, Linear)) and isinstance(p1, (RBF, Linear)):
assert np.intersect1d(p1._all_dims_active, p2._all_dims_active).size == 0, "only non overlapping kernel dimensions allowed so far"
tmp1 = p1.psi1(Z, variational_posterior)
tmp2 = p2.psi1(Z, variational_posterior)
psi2 += np.einsum('nm,no->nmo',tmp1,tmp2)+np.einsum('nm,no->nmo',tmp2,tmp1)
#(tmp1[:, :, None] * tmp2[:, None, :]) + (tmp2[:, :, None] * tmp1[:, None, :])
else:
raise NotImplementedError("psi2 cannot be computed for this kernel")
return psi2
def update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
tmp = dL_dpsi2.sum(0)+ dL_dpsi2.sum(1) if len(dL_dpsi2.shape)==2 else dL_dpsi2.sum(2)+ dL_dpsi2.sum(1)
if not self._exact_psicomp: return Kern.update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior)
from .static import White, Bias
for p1 in self.parts:
#compute the effective dL_dpsi1. Extra terms appear becaue of the cross terms in psi2!
eff_dL_dpsi1 = dL_dpsi1.copy()
for p2 in self.parts:
if p2 is p1:
continue
if isinstance(p2, White):
continue
elif isinstance(p2, Bias):
eff_dL_dpsi1 += tmp * p2.variance
else:# np.setdiff1d(p1._all_dims_active, ar2, assume_unique): # TODO: Careful, not correct for overlapping _all_dims_active
eff_dL_dpsi1 += tmp * p2.psi1(Z, variational_posterior)
p1.update_gradients_expectations(dL_dpsi0, eff_dL_dpsi1, dL_dpsi2, Z, variational_posterior)
def gradients_Z_expectations(self, dL_psi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
tmp = dL_dpsi2.sum(0)+ dL_dpsi2.sum(1) if len(dL_dpsi2.shape)==2 else dL_dpsi2.sum(2)+ dL_dpsi2.sum(1)
if not self._exact_psicomp: return Kern.gradients_Z_expectations(self, dL_psi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior)
from .static import White, Bias
target = np.zeros(Z.shape)
for p1 in self.parts:
#compute the effective dL_dpsi1. extra terms appear becaue of the cross terms in psi2!
eff_dL_dpsi1 = dL_dpsi1.copy()
for p2 in self.parts:
if p2 is p1:
continue
if isinstance(p2, White):
continue
elif isinstance(p2, Bias):
eff_dL_dpsi1 += tmp * p2.variance
else:
eff_dL_dpsi1 += tmp * p2.psi1(Z, variational_posterior)
target += p1.gradients_Z_expectations(dL_psi0, eff_dL_dpsi1, dL_dpsi2, Z, variational_posterior)
return target
def gradients_qX_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
tmp = dL_dpsi2.sum(0)+ dL_dpsi2.sum(1) if len(dL_dpsi2.shape)==2 else dL_dpsi2.sum(2)+ dL_dpsi2.sum(1)
if not self._exact_psicomp: return Kern.gradients_qX_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior)
from .static import White, Bias
target_grads = [np.zeros(v.shape) for v in variational_posterior.parameters]
for p1 in self.parameters:
#compute the effective dL_dpsi1. extra terms appear becaue of the cross terms in psi2!
eff_dL_dpsi1 = dL_dpsi1.copy()
for p2 in self.parameters:
if p2 is p1:
continue
if isinstance(p2, White):
continue
elif isinstance(p2, Bias):
eff_dL_dpsi1 += tmp * p2.variance
else:
eff_dL_dpsi1 += tmp * p2.psi1(Z, variational_posterior)
grads = p1.gradients_qX_expectations(dL_dpsi0, eff_dL_dpsi1, dL_dpsi2, Z, variational_posterior)
[np.add(target_grads[i],grads[i],target_grads[i]) for i in range(len(grads))]
return target_grads
#def add(self, other):
# parts = self.parts
# if 0:#isinstance(other, Add):
# #other_params = other.parameters[:]
# for p in other.parts[:]:
# other.unlink_parameter(p)
# parts.extend(other.parts)
# #self.link_parameters(*other_params)
#
# else:
# #self.link_parameter(other)
# parts.append(other)
# #self.input_dim, self._all_dims_active = self.get_input_dim_active_dims(parts)
# return Add([p for p in parts], self.name)
def input_sensitivity(self, summarize=True):
if summarize:
i_s = np.zeros((self.input_dim))
for k in self.parts:
i_s[k._all_dims_active] += k.input_sensitivity(summarize)
return i_s
else:
return super(Add, self).input_sensitivity(summarize)
def sde_update_gradient_full(self, gradients):
"""
Update gradient in the order in which parameters are represented in the
kernel
"""
part_start_param_index = 0
for p in self.parts:
if not p.is_fixed:
part_param_num = len(p.param_array) # number of parameters in the part
p.sde_update_gradient_full(gradients[part_start_param_index:(part_start_param_index+part_param_num)])
part_start_param_index += part_param_num
def sde(self):
"""
Support adding kernels for sde representation
"""
import scipy.linalg as la
F = None
L = None
Qc = None
H = None
Pinf = None
P0 = None
dF = None
dQc = None
dPinf = None
dP0 = None
n = 0
nq = 0
nd = 0
# Assign models
for p in self.parts:
(Ft,Lt,Qct,Ht,Pinft,P0t,dFt,dQct,dPinft,dP0t) = p.sde()
F = la.block_diag(F,Ft) if (F is not None) else Ft
L = la.block_diag(L,Lt) if (L is not None) else Lt
Qc = la.block_diag(Qc,Qct) if (Qc is not None) else Qct
H = np.hstack((H,Ht)) if (H is not None) else Ht
Pinf = la.block_diag(Pinf,Pinft) if (Pinf is not None) else Pinft
P0 = la.block_diag(P0,P0t) if (P0 is not None) else P0t
if dF is not None:
dF = np.pad(dF,((0,dFt.shape[0]),(0,dFt.shape[1]),(0,dFt.shape[2])),
'constant', constant_values=0)
dF[-dFt.shape[0]:,-dFt.shape[1]:,-dFt.shape[2]:] = dFt
else:
dF = dFt
if dQc is not None:
dQc = np.pad(dQc,((0,dQct.shape[0]),(0,dQct.shape[1]),(0,dQct.shape[2])),
'constant', constant_values=0)
dQc[-dQct.shape[0]:,-dQct.shape[1]:,-dQct.shape[2]:] = dQct
else:
dQc = dQct
if dPinf is not None:
dPinf = np.pad(dPinf,((0,dPinft.shape[0]),(0,dPinft.shape[1]),(0,dPinft.shape[2])),
'constant', constant_values=0)
dPinf[-dPinft.shape[0]:,-dPinft.shape[1]:,-dPinft.shape[2]:] = dPinft
else:
dPinf = dPinft
if dP0 is not None:
dP0 = np.pad(dP0,((0,dP0t.shape[0]),(0,dP0t.shape[1]),(0,dP0t.shape[2])),
'constant', constant_values=0)
dP0[-dP0t.shape[0]:,-dP0t.shape[1]:,-dP0t.shape[2]:] = dP0t
else:
dP0 = dP0t
n += Ft.shape[0]
nq += Qct.shape[0]
nd += dFt.shape[2]
assert (F.shape[0] == n and F.shape[1]==n), "SDE add: Check of F Dimensions failed"
assert (L.shape[0] == n and L.shape[1]==nq), "SDE add: Check of L Dimensions failed"
assert (Qc.shape[0] == nq and Qc.shape[1]==nq), "SDE add: Check of Qc Dimensions failed"
assert (H.shape[0] == 1 and H.shape[1]==n), "SDE add: Check of H Dimensions failed"
assert (Pinf.shape[0] == n and Pinf.shape[1]==n), "SDE add: Check of Pinf Dimensions failed"
assert (P0.shape[0] == n and P0.shape[1]==n), "SDE add: Check of P0 Dimensions failed"
assert (dF.shape[0] == n and dF.shape[1]==n and dF.shape[2]==nd), "SDE add: Check of dF Dimensions failed"
assert (dQc.shape[0] == nq and dQc.shape[1]==nq and dQc.shape[2]==nd), "SDE add: Check of dQc Dimensions failed"
assert (dPinf.shape[0] == n and dPinf.shape[1]==n and dPinf.shape[2]==nd), "SDE add: Check of dPinf Dimensions failed"
assert (dP0.shape[0] == n and dP0.shape[1]==n and dP0.shape[2]==nd), "SDE add: Check of dP0 Dimensions failed"
return (F,L,Qc,H,Pinf,P0,dF,dQc,dPinf,dP0)
| |
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class PhoneNumberList(ListResource):
""" """
def __init__(self, version, trunk_sid):
"""
Initialize the PhoneNumberList
:param Version version: Version that contains the resource
:param trunk_sid: The SID of the Trunk that handles calls to the phone number
:returns: twilio.rest.trunking.v1.trunk.phone_number.PhoneNumberList
:rtype: twilio.rest.trunking.v1.trunk.phone_number.PhoneNumberList
"""
super(PhoneNumberList, self).__init__(version)
# Path Solution
self._solution = {'trunk_sid': trunk_sid, }
self._uri = '/Trunks/{trunk_sid}/PhoneNumbers'.format(**self._solution)
def create(self, phone_number_sid):
"""
Create a new PhoneNumberInstance
:param unicode phone_number_sid: The SID of the Incoming Phone Number that you want to associate with the trunk
:returns: Newly created PhoneNumberInstance
:rtype: twilio.rest.trunking.v1.trunk.phone_number.PhoneNumberInstance
"""
data = values.of({'PhoneNumberSid': phone_number_sid, })
payload = self._version.create(
'POST',
self._uri,
data=data,
)
return PhoneNumberInstance(self._version, payload, trunk_sid=self._solution['trunk_sid'], )
def stream(self, limit=None, page_size=None):
"""
Streams PhoneNumberInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.trunking.v1.trunk.phone_number.PhoneNumberInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(page_size=limits['page_size'], )
return self._version.stream(page, limits['limit'], limits['page_limit'])
def list(self, limit=None, page_size=None):
"""
Lists PhoneNumberInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.trunking.v1.trunk.phone_number.PhoneNumberInstance]
"""
return list(self.stream(limit=limit, page_size=page_size, ))
def page(self, page_token=values.unset, page_number=values.unset,
page_size=values.unset):
"""
Retrieve a single page of PhoneNumberInstance records from the API.
Request is executed immediately
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of PhoneNumberInstance
:rtype: twilio.rest.trunking.v1.trunk.phone_number.PhoneNumberPage
"""
params = values.of({'PageToken': page_token, 'Page': page_number, 'PageSize': page_size, })
response = self._version.page(
'GET',
self._uri,
params=params,
)
return PhoneNumberPage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of PhoneNumberInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of PhoneNumberInstance
:rtype: twilio.rest.trunking.v1.trunk.phone_number.PhoneNumberPage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return PhoneNumberPage(self._version, response, self._solution)
def get(self, sid):
"""
Constructs a PhoneNumberContext
:param sid: The unique string that identifies the resource
:returns: twilio.rest.trunking.v1.trunk.phone_number.PhoneNumberContext
:rtype: twilio.rest.trunking.v1.trunk.phone_number.PhoneNumberContext
"""
return PhoneNumberContext(self._version, trunk_sid=self._solution['trunk_sid'], sid=sid, )
def __call__(self, sid):
"""
Constructs a PhoneNumberContext
:param sid: The unique string that identifies the resource
:returns: twilio.rest.trunking.v1.trunk.phone_number.PhoneNumberContext
:rtype: twilio.rest.trunking.v1.trunk.phone_number.PhoneNumberContext
"""
return PhoneNumberContext(self._version, trunk_sid=self._solution['trunk_sid'], sid=sid, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Trunking.V1.PhoneNumberList>'
class PhoneNumberPage(Page):
""" """
def __init__(self, version, response, solution):
"""
Initialize the PhoneNumberPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param trunk_sid: The SID of the Trunk that handles calls to the phone number
:returns: twilio.rest.trunking.v1.trunk.phone_number.PhoneNumberPage
:rtype: twilio.rest.trunking.v1.trunk.phone_number.PhoneNumberPage
"""
super(PhoneNumberPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of PhoneNumberInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.trunking.v1.trunk.phone_number.PhoneNumberInstance
:rtype: twilio.rest.trunking.v1.trunk.phone_number.PhoneNumberInstance
"""
return PhoneNumberInstance(self._version, payload, trunk_sid=self._solution['trunk_sid'], )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Trunking.V1.PhoneNumberPage>'
class PhoneNumberContext(InstanceContext):
""" """
def __init__(self, version, trunk_sid, sid):
"""
Initialize the PhoneNumberContext
:param Version version: Version that contains the resource
:param trunk_sid: The SID of the Trunk from which to fetch the PhoneNumber resource
:param sid: The unique string that identifies the resource
:returns: twilio.rest.trunking.v1.trunk.phone_number.PhoneNumberContext
:rtype: twilio.rest.trunking.v1.trunk.phone_number.PhoneNumberContext
"""
super(PhoneNumberContext, self).__init__(version)
# Path Solution
self._solution = {'trunk_sid': trunk_sid, 'sid': sid, }
self._uri = '/Trunks/{trunk_sid}/PhoneNumbers/{sid}'.format(**self._solution)
def fetch(self):
"""
Fetch a PhoneNumberInstance
:returns: Fetched PhoneNumberInstance
:rtype: twilio.rest.trunking.v1.trunk.phone_number.PhoneNumberInstance
"""
params = values.of({})
payload = self._version.fetch(
'GET',
self._uri,
params=params,
)
return PhoneNumberInstance(
self._version,
payload,
trunk_sid=self._solution['trunk_sid'],
sid=self._solution['sid'],
)
def delete(self):
"""
Deletes the PhoneNumberInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._version.delete('delete', self._uri)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Trunking.V1.PhoneNumberContext {}>'.format(context)
class PhoneNumberInstance(InstanceResource):
""" """
class AddressRequirement(object):
NONE = "none"
ANY = "any"
LOCAL = "local"
FOREIGN = "foreign"
def __init__(self, version, payload, trunk_sid, sid=None):
"""
Initialize the PhoneNumberInstance
:returns: twilio.rest.trunking.v1.trunk.phone_number.PhoneNumberInstance
:rtype: twilio.rest.trunking.v1.trunk.phone_number.PhoneNumberInstance
"""
super(PhoneNumberInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'account_sid': payload.get('account_sid'),
'address_requirements': payload.get('address_requirements'),
'api_version': payload.get('api_version'),
'beta': payload.get('beta'),
'capabilities': payload.get('capabilities'),
'date_created': deserialize.iso8601_datetime(payload.get('date_created')),
'date_updated': deserialize.iso8601_datetime(payload.get('date_updated')),
'friendly_name': payload.get('friendly_name'),
'links': payload.get('links'),
'phone_number': payload.get('phone_number'),
'sid': payload.get('sid'),
'sms_application_sid': payload.get('sms_application_sid'),
'sms_fallback_method': payload.get('sms_fallback_method'),
'sms_fallback_url': payload.get('sms_fallback_url'),
'sms_method': payload.get('sms_method'),
'sms_url': payload.get('sms_url'),
'status_callback': payload.get('status_callback'),
'status_callback_method': payload.get('status_callback_method'),
'trunk_sid': payload.get('trunk_sid'),
'url': payload.get('url'),
'voice_application_sid': payload.get('voice_application_sid'),
'voice_caller_id_lookup': payload.get('voice_caller_id_lookup'),
'voice_fallback_method': payload.get('voice_fallback_method'),
'voice_fallback_url': payload.get('voice_fallback_url'),
'voice_method': payload.get('voice_method'),
'voice_url': payload.get('voice_url'),
}
# Context
self._context = None
self._solution = {'trunk_sid': trunk_sid, 'sid': sid or self._properties['sid'], }
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: PhoneNumberContext for this PhoneNumberInstance
:rtype: twilio.rest.trunking.v1.trunk.phone_number.PhoneNumberContext
"""
if self._context is None:
self._context = PhoneNumberContext(
self._version,
trunk_sid=self._solution['trunk_sid'],
sid=self._solution['sid'],
)
return self._context
@property
def account_sid(self):
"""
:returns: The SID of the Account that created the resource
:rtype: unicode
"""
return self._properties['account_sid']
@property
def address_requirements(self):
"""
:returns: Whether the phone number requires an Address registered with Twilio
:rtype: PhoneNumberInstance.AddressRequirement
"""
return self._properties['address_requirements']
@property
def api_version(self):
"""
:returns: The API version used to start a new TwiML session
:rtype: unicode
"""
return self._properties['api_version']
@property
def beta(self):
"""
:returns: Whether the phone number is new to the Twilio platform
:rtype: bool
"""
return self._properties['beta']
@property
def capabilities(self):
"""
:returns: Indicate if a phone can receive calls or messages
:rtype: dict
"""
return self._properties['capabilities']
@property
def date_created(self):
"""
:returns: The RFC 2822 date and time in GMT when the resource was created
:rtype: datetime
"""
return self._properties['date_created']
@property
def date_updated(self):
"""
:returns: The RFC 2822 date and time in GMT when the resource was last updated
:rtype: datetime
"""
return self._properties['date_updated']
@property
def friendly_name(self):
"""
:returns: The string that you assigned to describe the resource
:rtype: unicode
"""
return self._properties['friendly_name']
@property
def links(self):
"""
:returns: The URLs of related resources
:rtype: unicode
"""
return self._properties['links']
@property
def phone_number(self):
"""
:returns: The phone number in E.164 format
:rtype: unicode
"""
return self._properties['phone_number']
@property
def sid(self):
"""
:returns: The unique string that identifies the resource
:rtype: unicode
"""
return self._properties['sid']
@property
def sms_application_sid(self):
"""
:returns: The SID of the application that handles SMS messages sent to the phone number
:rtype: unicode
"""
return self._properties['sms_application_sid']
@property
def sms_fallback_method(self):
"""
:returns: The HTTP method used with sms_fallback_url
:rtype: unicode
"""
return self._properties['sms_fallback_method']
@property
def sms_fallback_url(self):
"""
:returns: The URL that we call when an error occurs while retrieving or executing the TwiML
:rtype: unicode
"""
return self._properties['sms_fallback_url']
@property
def sms_method(self):
"""
:returns: The HTTP method to use with sms_url
:rtype: unicode
"""
return self._properties['sms_method']
@property
def sms_url(self):
"""
:returns: The URL we call when the phone number receives an incoming SMS message
:rtype: unicode
"""
return self._properties['sms_url']
@property
def status_callback(self):
"""
:returns: The URL to send status information to your application
:rtype: unicode
"""
return self._properties['status_callback']
@property
def status_callback_method(self):
"""
:returns: The HTTP method we use to call status_callback
:rtype: unicode
"""
return self._properties['status_callback_method']
@property
def trunk_sid(self):
"""
:returns: The SID of the Trunk that handles calls to the phone number
:rtype: unicode
"""
return self._properties['trunk_sid']
@property
def url(self):
"""
:returns: The absolute URL of the resource
:rtype: unicode
"""
return self._properties['url']
@property
def voice_application_sid(self):
"""
:returns: The SID of the application that handles calls to the phone number
:rtype: unicode
"""
return self._properties['voice_application_sid']
@property
def voice_caller_id_lookup(self):
"""
:returns: Whether to lookup the caller's name
:rtype: bool
"""
return self._properties['voice_caller_id_lookup']
@property
def voice_fallback_method(self):
"""
:returns: The HTTP method that we use to call voice_fallback_url
:rtype: unicode
"""
return self._properties['voice_fallback_method']
@property
def voice_fallback_url(self):
"""
:returns: The URL we call when an error occurs in TwiML
:rtype: unicode
"""
return self._properties['voice_fallback_url']
@property
def voice_method(self):
"""
:returns: The HTTP method used with the voice_url
:rtype: unicode
"""
return self._properties['voice_method']
@property
def voice_url(self):
"""
:returns: The URL we call when the phone number receives a call
:rtype: unicode
"""
return self._properties['voice_url']
def fetch(self):
"""
Fetch a PhoneNumberInstance
:returns: Fetched PhoneNumberInstance
:rtype: twilio.rest.trunking.v1.trunk.phone_number.PhoneNumberInstance
"""
return self._proxy.fetch()
def delete(self):
"""
Deletes the PhoneNumberInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._proxy.delete()
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Trunking.V1.PhoneNumberInstance {}>'.format(context)
| |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding M2M table for field experiments on 'Dataset'
db.create_table('tardis_portal_dataset_experiments', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('dataset', models.ForeignKey(orm['tardis_portal.dataset'], null=False)),
('experiment', models.ForeignKey(orm['tardis_portal.experiment'], null=False))
))
db.create_unique('tardis_portal_dataset_experiments', ['dataset_id', 'experiment_id'])
def backwards(self, orm):
# Removing M2M table for field experiments on 'Dataset'
db.delete_table('tardis_portal_dataset_experiments')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'tardis_portal.author_experiment': {
'Meta': {'ordering': "['order']", 'unique_together': "(('experiment', 'author'),)", 'object_name': 'Author_Experiment'},
'author': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'experiment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.Experiment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'tardis_portal.datafileparameter': {
'Meta': {'ordering': "['name']", 'object_name': 'DatafileParameter'},
'datetime_value': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.ParameterName']"}),
'numerical_value': ('django.db.models.fields.FloatField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'parameterset': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.DatafileParameterSet']"}),
'string_value': ('django.db.models.fields.TextField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'})
},
'tardis_portal.datafileparameterset': {
'Meta': {'ordering': "['id']", 'object_name': 'DatafileParameterSet'},
'dataset_file': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.Dataset_File']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'schema': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.Schema']"})
},
'tardis_portal.dataset': {
'Meta': {'object_name': 'Dataset'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'experiment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.Experiment']"}),
'experiments': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'datasets'", 'symmetrical': 'False', 'to': "orm['tardis_portal.Experiment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'immutable': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'tardis_portal.dataset_file': {
'Meta': {'object_name': 'Dataset_File'},
'created_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'dataset': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.Dataset']"}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '400'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'md5sum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'mimetype': ('django.db.models.fields.CharField', [], {'max_length': '80', 'blank': 'True'}),
'modification_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'protocol': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'size': ('django.db.models.fields.CharField', [], {'max_length': '400', 'blank': 'True'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '400'})
},
'tardis_portal.datasetparameter': {
'Meta': {'ordering': "['name']", 'object_name': 'DatasetParameter'},
'datetime_value': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.ParameterName']"}),
'numerical_value': ('django.db.models.fields.FloatField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'parameterset': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.DatasetParameterSet']"}),
'string_value': ('django.db.models.fields.TextField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'})
},
'tardis_portal.datasetparameterset': {
'Meta': {'ordering': "['id']", 'object_name': 'DatasetParameterSet'},
'dataset': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.Dataset']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'schema': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.Schema']"})
},
'tardis_portal.experiment': {
'Meta': {'object_name': 'Experiment'},
'approved': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'created_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'end_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'handle': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'institution_name': ('django.db.models.fields.CharField', [], {'default': "'The University of Queensland'", 'max_length': '400'}),
'license': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.License']", 'null': 'True', 'blank': 'True'}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'public_access': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1'}),
'start_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '400'}),
'update_time': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'tardis_portal.experimentacl': {
'Meta': {'ordering': "['experiment__id']", 'object_name': 'ExperimentACL'},
'aclOwnershipType': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'canDelete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'canRead': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'canWrite': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'effectiveDate': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'entityId': ('django.db.models.fields.CharField', [], {'max_length': '320'}),
'experiment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.Experiment']"}),
'expiryDate': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isOwner': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'pluginId': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
'tardis_portal.experimentparameter': {
'Meta': {'ordering': "['name']", 'object_name': 'ExperimentParameter'},
'datetime_value': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.ParameterName']"}),
'numerical_value': ('django.db.models.fields.FloatField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'parameterset': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.ExperimentParameterSet']"}),
'string_value': ('django.db.models.fields.TextField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'})
},
'tardis_portal.experimentparameterset': {
'Meta': {'ordering': "['id']", 'object_name': 'ExperimentParameterSet'},
'experiment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.Experiment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'schema': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.Schema']"})
},
'tardis_portal.freetextsearchfield': {
'Meta': {'object_name': 'FreeTextSearchField'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parameter_name': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.ParameterName']"})
},
'tardis_portal.groupadmin': {
'Meta': {'object_name': 'GroupAdmin'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'tardis_portal.license': {
'Meta': {'object_name': 'License'},
'allows_distribution': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_url': ('django.db.models.fields.URLField', [], {'max_length': '2000', 'blank': 'True'}),
'internal_description': ('django.db.models.fields.TextField', [], {}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '400'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '2000'})
},
'tardis_portal.parametername': {
'Meta': {'ordering': "('order', 'name')", 'unique_together': "(('schema', 'name'),)", 'object_name': 'ParameterName'},
'choices': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'comparison_type': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'data_type': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'full_name': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'immutable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_searchable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '9999', 'null': 'True', 'blank': 'True'}),
'schema': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.Schema']"}),
'units': ('django.db.models.fields.CharField', [], {'max_length': '60', 'blank': 'True'})
},
'tardis_portal.schema': {
'Meta': {'object_name': 'Schema'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'immutable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'namespace': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '255'}),
'subtype': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '1'})
},
'tardis_portal.token': {
'Meta': {'object_name': 'Token'},
'experiment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.Experiment']"}),
'expiry_date': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2012, 6, 14, 0, 0)'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'tardis_portal.userauthentication': {
'Meta': {'object_name': 'UserAuthentication'},
'authenticationMethod': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'userProfile': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.UserProfile']"}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'tardis_portal.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isDjangoAccount': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['tardis_portal']
| |
"""Tests for tensorflow.ops.io_ops."""
import os.path
import time
import tensorflow.python.platform
import tensorflow as tf
import numpy as np
from tensorflow.python.platform import gfile
class SaverTest(tf.test.TestCase):
def testBasics(self):
save_path = os.path.join(self.get_temp_dir(), "basics")
with self.test_session() as sess:
# Build a graph with 2 parameter nodes, and Save and
# Restore nodes for them.
v0 = tf.Variable(10.0, name="v0")
v1 = tf.Variable(20.0, name="v1")
save = tf.train.Saver({"v0": v0, "v1": v1}, restore_sequentially=True)
tf.initialize_all_variables().run()
# Check that the parameter nodes have been initialized.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
# Save the initialized values in the file at "save_path"
val = save.save(sess, save_path)
self.assertTrue(isinstance(val, basestring))
self.assertEqual(save_path, val)
# Start a second session. In that session the parameter nodes
# have not been initialized either.
with self.test_session() as sess:
v0 = tf.Variable(-1.0, name="v0")
v1 = tf.Variable(-1.0, name="v1")
save = tf.train.Saver({"v0": v0, "v1": v1})
with self.assertRaisesWithPredicateMatch(
tf.OpError, lambda e: "uninitialized value v0" in e.message):
sess.run(v0)
with self.assertRaisesWithPredicateMatch(
tf.OpError, lambda e: "uninitialized value v1" in e.message):
sess.run(v1)
# Restore the saved values in the parameter nodes.
save.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
# Build another graph with 2 nodes, initialized
# differently, and a Restore node for them.
with self.test_session() as sess:
v0_2 = tf.Variable(1000.0, name="v0")
v1_2 = tf.Variable(2000.0, name="v1")
save2 = tf.train.Saver({"v0": v0_2, "v1": v1_2})
tf.initialize_all_variables().run()
# Check that the parameter nodes have been initialized.
self.assertEqual(1000.0, v0_2.eval())
self.assertEqual(2000.0, v1_2.eval())
# Restore the values saved earlier in the parameter nodes.
save2.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, v0_2.eval())
self.assertEqual(20.0, v1_2.eval())
def testInt64(self):
save_path = os.path.join(self.get_temp_dir(), "int64")
with self.test_session() as sess:
# Build a graph with 1 node, and save and restore for them.
v = tf.Variable(np.int64(15), name="v")
save = tf.train.Saver({"v": v}, restore_sequentially=True)
tf.initialize_all_variables().run()
# Save the initialized values in the file at "save_path"
val = save.save(sess, save_path)
self.assertTrue(isinstance(val, basestring))
self.assertEqual(save_path, val)
with self.test_session() as sess:
v = tf.Variable(np.int64(-1), name="v")
save = tf.train.Saver({"v": v})
with self.assertRaisesWithPredicateMatch(
tf.OpError, lambda e: "uninitialized value v" in e.message):
sess.run(v)
# Restore the saved values in the parameter nodes.
save.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(np.int64(15), v.eval())
def testSomeErrors(self):
with tf.Graph().as_default():
v0 = tf.Variable([10.0], name="v0")
v1 = tf.Variable([20.0], name="v1")
v2 = tf.Variable([20.0], name="v2")
v2._set_save_slice_info(tf.Variable.SaveSliceInfo("v1", ""))
# By default the name used for "v2" will be "v1" and raise an error.
with self.assertRaisesRegexp(ValueError, "same name: v1"):
tf.train.Saver([v0, v1, v2])
# The names are different and will work.
tf.train.Saver({"vee1": v1, "other": [v2]})
def testBasicsWithListOfVariables(self):
save_path = os.path.join(self.get_temp_dir(), "basics_with_list")
with self.test_session(graph=tf.Graph()) as sess:
# Build a graph with 2 parameter nodes, and Save and
# Restore nodes for them.
v0 = tf.Variable(10.0, name="v0")
v1 = tf.Variable(20.0, name="v1")
save = tf.train.Saver([v0, v1])
tf.initialize_all_variables().run()
# Check that the parameter nodes have been initialized.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
# Save the initialized values in the file at "save_path"
val = save.save(sess, save_path)
self.assertTrue(isinstance(val, basestring))
self.assertEqual(save_path, val)
# Start a second session. In that session the variables
# have not been initialized either.
with self.test_session(graph=tf.Graph()) as sess:
v0 = tf.Variable(-1.0, name="v0")
v1 = tf.Variable(-1.0, name="v1")
save = tf.train.Saver([v0, v1])
with self.assertRaisesWithPredicateMatch(
tf.OpError, lambda e: "uninitialized value v0" in e.message):
sess.run(v0)
with self.assertRaisesWithPredicateMatch(
tf.OpError, lambda e: "uninitialized value v1" in e.message):
sess.run(v1)
# Restore the saved values in the parameter nodes.
save.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
# Build another graph with 2 nodes, initialized
# differently, and a Restore node for them.
with self.test_session(graph=tf.Graph()) as sess:
v0_2 = tf.Variable(1000.0, name="v0")
v1_2 = tf.Variable(2000.0, name="v1")
save2 = tf.train.Saver([v0_2, v1_2])
tf.initialize_all_variables().run()
# Check that the parameter nodes have been initialized.
self.assertEqual(1000.0, v0_2.eval())
self.assertEqual(2000.0, v1_2.eval())
# Restore the values saved earlier in the parameter nodes.
save2.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, v0_2.eval())
self.assertEqual(20.0, v1_2.eval())
def _SaveAndLoad(self, var_name, var_value, other_value, save_path):
with self.test_session() as sess:
var = tf.Variable(var_value, name=var_name)
save = tf.train.Saver({var_name: var})
var.initializer.run()
val = save.save(sess, save_path)
self.assertEqual(save_path, val)
with self.test_session() as sess:
var = tf.Variable(other_value, name=var_name)
save = tf.train.Saver({var_name: var})
save.restore(sess, save_path)
self.assertAllClose(var_value, var.eval())
def testCacheRereadsFile(self):
save_path = os.path.join(self.get_temp_dir(), "cache_rereads")
# Save and reload one Variable named "var0".
self._SaveAndLoad("var0", 0.0, 1.0, save_path)
# Save and reload one Variable named "var1" in the same file.
# The cached readers should know to re-read the file.
self._SaveAndLoad("var1", 1.1, 2.2, save_path)
def testGPU(self):
if not tf.test.IsBuiltWithCuda():
return
save_path = os.path.join(self.get_temp_dir(), "gpu")
with tf.Session("", graph=tf.Graph()) as sess:
with sess.graph.device("/gpu:0"):
v0_1 = tf.Variable(123.45)
save = tf.train.Saver({"v0": v0_1})
tf.initialize_all_variables().run()
save.save(sess, save_path)
with tf.Session("", graph=tf.Graph()) as sess:
with sess.graph.device("/gpu:0"):
v0_2 = tf.Variable(543.21)
save = tf.train.Saver({"v0": v0_2})
tf.initialize_all_variables().run()
self.assertAllClose(543.21, v0_2.eval())
save.restore(sess, save_path)
self.assertAllClose(123.45, v0_2.eval())
def testVariables(self):
save_path = os.path.join(self.get_temp_dir(), "variables")
with tf.Session("", graph=tf.Graph()) as sess:
one = tf.Variable(1.0)
twos = tf.Variable([2.0, 2.0, 2.0])
init = tf.initialize_all_variables()
save = tf.train.Saver(tf.all_variables())
init.run()
save.save(sess, save_path)
with tf.Session("", graph=tf.Graph()) as sess:
one = tf.Variable(0.0)
twos = tf.Variable([0.0, 0.0, 0.0])
# Saver with no arg, defaults to 'all variables'.
save = tf.train.Saver()
save.restore(sess, save_path)
self.assertAllClose(1.0, one.eval())
self.assertAllClose([2.0, 2.0, 2.0], twos.eval())
def testSaveWithGlobalStep(self):
save_path = os.path.join(self.get_temp_dir(), "ckpt_with_global_step")
global_step_int = 5
# Save and reload one Variable named "var0".
self._SaveAndLoad("var0", 0.0, 1.0, save_path)
for use_tensor in [True, False]:
with self.test_session() as sess:
var = tf.Variable(1.0, name="var0")
save = tf.train.Saver({var.op.name: var})
var.initializer.run()
if use_tensor:
global_step = tf.constant(global_step_int)
val = save.save(sess, save_path, global_step=global_step)
else:
val = save.save(sess, save_path, global_step=global_step_int)
expected_save_path = "%s-%d" % (save_path, global_step_int)
self.assertEqual(expected_save_path, val)
class SaveRestoreShardedTest(tf.test.TestCase):
def testBasics(self):
save_path = os.path.join(self.get_temp_dir(), "sharded")
# Build a graph with 2 parameter nodes on different devices.
with tf.Session(
target="",
config=tf.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
v0 = tf.Variable(10, name="v0")
with sess.graph.device("/cpu:1"):
v1 = tf.Variable(20, name="v1")
save = tf.train.Saver({"v0": v0, "v1": v1}, sharded=True)
tf.initialize_all_variables().run()
val = save.save(sess, save_path)
self.assertEqual(save_path + "-?????-of-00002", val)
# Restore a different "v0" from shard 0 of the saved files.
with tf.Session(
target="",
config=tf.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
v0 = tf.Variable(111, name="v0")
save = tf.train.Saver({"v0": v0}, sharded=True)
tf.initialize_all_variables().run()
self.assertEqual(111, v0.eval())
save.restore(sess, save_path + "-00000-of-00002")
self.assertEqual(10, v0.eval())
# Restore a different "v1" from shard 1 of the saved files.
with tf.Session(
target="",
config=tf.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
v1 = tf.Variable(222)
save = tf.train.Saver({"v1": v1}, sharded=True)
tf.initialize_all_variables().run()
self.assertEqual(222, v1.eval())
save.restore(sess, save_path + "-00001-of-00002")
self.assertEqual(20, v1.eval())
# Now try a restore with the sharded filename.
with tf.Session(
target="",
config=tf.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
v0 = tf.Variable(111, name="v0")
with sess.graph.device("/cpu:1"):
v1 = tf.Variable(222, name="v1")
save = tf.train.Saver({"v0": v0, "v1": v1}, sharded=True)
tf.initialize_all_variables().run()
self.assertEqual(111, v0.eval())
self.assertEqual(222, v1.eval())
save_path = os.path.join(self.get_temp_dir(), "sharded")
save.restore(sess, save_path + "-?????-of-?????")
self.assertEqual(10, v0.eval())
self.assertEqual(20, v1.eval())
def testSaverDef(self):
with self.test_session():
v0 = tf.Variable(123, name="v0")
save = tf.train.Saver({"v0": v0}, sharded=True)
sd = save.as_saver_def()
self.assertTrue(sd.sharded)
class MaxToKeepTest(tf.test.TestCase):
def testNonSharded(self):
save_dir = os.path.join(self.get_temp_dir(), "max_to_keep_non_sharded")
try:
gfile.DeleteRecursively(save_dir)
except gfile.GOSError as _:
pass # Ignore
gfile.MakeDirs(save_dir)
with self.test_session() as sess:
v = tf.Variable(10.0, name="v")
save = tf.train.Saver({"v": v}, max_to_keep=2)
tf.initialize_all_variables().run()
self.assertEqual([], save.last_checkpoints)
s1 = save.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([s1], save.last_checkpoints)
self.assertTrue(gfile.Exists(s1))
s2 = save.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([s1, s2], save.last_checkpoints)
self.assertTrue(gfile.Exists(s1))
self.assertTrue(gfile.Exists(s2))
s3 = save.save(sess, os.path.join(save_dir, "s3"))
self.assertEqual([s2, s3], save.last_checkpoints)
self.assertFalse(gfile.Exists(s1))
self.assertTrue(gfile.Exists(s2))
self.assertTrue(gfile.Exists(s3))
# Create a second helper, identical to the first.
save2 = tf.train.Saver(saver_def=save.as_saver_def())
save2.set_last_checkpoints(save.last_checkpoints)
# Create a third helper, with the same configuration but no knowledge of
# previous checkpoints.
save3 = tf.train.Saver(saver_def=save.as_saver_def())
# Exercise the first helper.
# Adding s2 again (old s2 is removed first, then new s2 appended)
s2 = save.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([s3, s2], save.last_checkpoints)
self.assertFalse(gfile.Exists(s1))
self.assertTrue(gfile.Exists(s3))
self.assertTrue(gfile.Exists(s2))
# Adding s1 (s3 should now be deleted as oldest in list)
s1 = save.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([s2, s1], save.last_checkpoints)
self.assertFalse(gfile.Exists(s3))
self.assertTrue(gfile.Exists(s2))
self.assertTrue(gfile.Exists(s1))
# Exercise the second helper.
# Adding s2 again (old s2 is removed first, then new s2 appended)
s2 = save2.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([s3, s2], save2.last_checkpoints)
# Created by the first helper.
self.assertTrue(gfile.Exists(s1))
# Deleted by the first helper.
self.assertFalse(gfile.Exists(s3))
self.assertTrue(gfile.Exists(s2))
# Adding s1 (s3 should now be deleted as oldest in list)
s1 = save2.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([s2, s1], save2.last_checkpoints)
self.assertFalse(gfile.Exists(s3))
self.assertTrue(gfile.Exists(s2))
self.assertTrue(gfile.Exists(s1))
# Exercise the third helper.
# Adding s2 again (but helper is unaware of previous s2)
s2 = save3.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([s2], save3.last_checkpoints)
# Created by the first helper.
self.assertTrue(gfile.Exists(s1))
# Deleted by the first helper.
self.assertFalse(gfile.Exists(s3))
self.assertTrue(gfile.Exists(s2))
# Adding s1 (s3 should not be deleted because helper is unaware of it)
s1 = save3.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([s2, s1], save3.last_checkpoints)
self.assertFalse(gfile.Exists(s3))
self.assertTrue(gfile.Exists(s2))
self.assertTrue(gfile.Exists(s1))
def testSharded(self):
save_dir = os.path.join(self.get_temp_dir(), "max_to_keep_sharded")
try:
gfile.DeleteRecursively(save_dir)
except gfile.GOSError as _:
pass # Ignore
gfile.MakeDirs(save_dir)
with tf.Session(
target="",
config=tf.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
v0 = tf.Variable(111, name="v0")
with sess.graph.device("/cpu:1"):
v1 = tf.Variable(222, name="v1")
save = tf.train.Saver({"v0": v0, "v1": v1}, sharded=True, max_to_keep=2)
tf.initialize_all_variables().run()
self.assertEqual([], save.last_checkpoints)
s1 = save.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([s1], save.last_checkpoints)
self.assertEquals(2, len(gfile.Glob(s1)))
s2 = save.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([s1, s2], save.last_checkpoints)
self.assertEquals(2, len(gfile.Glob(s1)))
self.assertEquals(2, len(gfile.Glob(s2)))
s3 = save.save(sess, os.path.join(save_dir, "s3"))
self.assertEqual([s2, s3], save.last_checkpoints)
self.assertEquals(0, len(gfile.Glob(s1)))
self.assertEquals(2, len(gfile.Glob(s2)))
self.assertEquals(2, len(gfile.Glob(s3)))
class KeepCheckpointEveryNHoursTest(tf.test.TestCase):
def testNonSharded(self):
save_dir = os.path.join(self.get_temp_dir(),
"keep_checkpoint_every_n_hours")
try:
gfile.DeleteRecursively(save_dir)
except gfile.GOSError as _:
pass # Ignore
gfile.MakeDirs(save_dir)
with self.test_session() as sess:
v = tf.Variable([10.0], name="v")
# Run the initializer NOW to avoid the 0.5s overhead of the first Run()
# call, which throws the test timing off in fastbuild mode.
tf.initialize_all_variables().run()
# Create a saver that will keep the last 2 checkpoints plus one every 0.7
# seconds.
start_time = time.time()
save = tf.train.Saver({"v": v}, max_to_keep=2,
keep_checkpoint_every_n_hours=0.7 / 3600)
self.assertEqual([], save.last_checkpoints)
# Wait till 0.7 second have elapsed so s1 will be old enough to keep.
time.sleep((time.time() + 0.7) - start_time)
s1 = save.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([s1], save.last_checkpoints)
s2 = save.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([s1, s2], save.last_checkpoints)
# We now have 2 'last_checkpoints': [s1, s2]. The next call to Save(),
# would normally delete s1, because max_to_keep is 2. However, s1 is
# older than 0.7s so we must keep it.
s3 = save.save(sess, os.path.join(save_dir, "s3"))
self.assertEqual([s2, s3], save.last_checkpoints)
# s1 should still be here, we are Not checking now to reduce time
# variance in the test.
# We now have 2 'last_checkpoints': [s2, s3], and s1 on disk. The next
# call to Save(), will delete s2, because max_to_keep is 2, and because
# we already kept the old s1. s2 is very close in time to s1 so it gets
# deleted.
s4 = save.save(sess, os.path.join(save_dir, "s4"))
self.assertEqual([s3, s4], save.last_checkpoints)
# Check that s1 is still here, but s2 is gone.
self.assertTrue(gfile.Exists(s1))
self.assertFalse(gfile.Exists(s2))
self.assertTrue(gfile.Exists(s3))
self.assertTrue(gfile.Exists(s4))
class SaveRestoreWithVariableNameMap(tf.test.TestCase):
def testNonReshape(self):
save_path = os.path.join(self.get_temp_dir(), "basics")
with self.test_session() as sess:
# Build a graph with 2 parameter nodes, and Save and
# Restore nodes for them.
v0 = tf.Variable(10.0, name="v0")
v1 = tf.Variable(20.0, name="v1")
save = tf.train.Saver({"save_prefix/v0": v0, "save_prefix/v1": v1})
tf.initialize_all_variables().run()
# Check that the parameter nodes have been initialized.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
# Save the initialized values in the file at "save_path"
# Use a variable name map to set the saved tensor names
val = save.save(sess, save_path)
self.assertTrue(isinstance(val, basestring))
self.assertEqual(save_path, val)
# Verify that the original names are not in the Saved file
save = tf.train.Saver({"v0": v0, "v1": v1})
with self.assertRaisesOpError("not found in checkpoint"):
save.restore(sess, save_path)
# Verify that the mapped names are present in the Saved file and can be
# Restored using remapped names.
with self.test_session() as sess:
v0 = tf.Variable(-1.0, name="v0")
v1 = tf.Variable(-1.0, name="v1")
with self.assertRaisesOpError("uninitialized value v0"):
sess.run(v0)
with self.assertRaisesOpError("uninitialized value v1"):
sess.run(v1)
save = tf.train.Saver({"save_prefix/v0": v0, "save_prefix/v1": v1})
save.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
# Add a prefix to the node names in the current graph and Restore using
# remapped names.
with self.test_session() as sess:
v0 = tf.Variable(-1.0, name="restore_prefix/v0")
v1 = tf.Variable(-1.0, name="restore_prefix/v1")
with self.assertRaisesOpError("uninitialized value restore_prefix/v0"):
sess.run(v0)
with self.assertRaisesOpError("uninitialized value restore_prefix/v1"):
sess.run(v1)
# Restore the saved values in the parameter nodes.
save = tf.train.Saver({"save_prefix/v0": v0, "save_prefix/v1": v1})
save.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
if __name__ == "__main__":
tf.test.main()
| |
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from abc import ABCMeta, abstractmethod
from zoo.chronos.model.tcmf import DeepGLO
from zoo.orca.automl.metrics import Evaluator
from zoo.orca.automl.model.abstract import BaseModel
from zoo.orca.data import SparkXShards, XShards
import pickle
import numpy as np
import pandas as pd
class TCMF(BaseModel):
"""
MF regularized TCN + TCN. This version is not for automated searching yet.
"""
def __init__(self):
"""
Initialize hyper parameters
:param check_optional_config:
:param future_seq_len:
"""
# models
self.model = None
self.model_init = False
def build(self, config):
"""
build the models and initialize.
:param config: hyper parameters for building the model
:return:
"""
self.model = DeepGLO(
vbsize=config.get("vbsize", 128),
hbsize=config.get("hbsize", 256),
num_channels_X=config.get("num_channels_X", [32, 32, 32, 32, 32, 1]),
num_channels_Y=config.get("num_channels_Y", [16, 16, 16, 16, 16, 1]),
kernel_size=config.get("kernel_size", 7),
dropout=config.get("dropout", 0.1),
rank=config.get("rank", 64),
kernel_size_Y=config.get("kernel_size_Y", 7),
lr=config.get("learning_rate", 0.0005),
normalize=config.get("normalize", False),
use_time=config.get("use_time", True),
svd=config.get("svd", True),
forward_cov=False
)
self.model_init = True
def fit_eval(self, data, verbose=0, num_workers=None, **config):
"""
Fit on the training data from scratch.
Since the rolling process is very customized in this model,
we enclose the rolling process inside this method.
:param data: could be a tuple with numpy ndarray with form (x, y)
x: training data, an array in shape (nd, Td),
nd is the number of series, Td is the time dimension
y: None. target is extracted from x directly
:param verbose:
:param num_workers: number of workers to use.
:return: the evaluation metric value
"""
x = data[0]
if not self.model_init:
self.build(config)
if num_workers is None:
num_workers = TCMF.get_default_num_workers()
covariates = config.get('covariates', None)
dti = config.get("dti", None)
self._check_covariates_dti(covariates=covariates, dti=dti, ts_len=x.shape[1])
val_loss = self.model.train_all_models(x,
val_len=config.get("val_len", 24),
start_date=config.get("start_date", "2020-4-1"),
freq=config.get("freq", "1H"),
covariates=covariates,
dti=dti,
period=config.get("period", 24),
init_epochs=config.get("init_FX_epoch", 100),
alt_iters=config.get("alt_iters", 10),
y_iters=config.get("y_iters", 10),
max_FX_epoch=config.get("max_FX_epoch", 300),
max_TCN_epoch=config.get("max_TCN_epoch", 300),
num_workers=num_workers,
)
return {"val_loss": val_loss}
def fit_incremental(self, x, covariates_new=None, dti_new=None):
"""
Incremental fitting given a pre-trained model.
:param x: incremental data
:param covariates_new: covariates corresponding to the incremental x
:param dti_new: dti corresponding to the incremental x
:return:
"""
if x is None:
raise ValueError("Input invalid x of None")
if self.model is None:
raise Exception("Needs to call fit_eval or restore first before calling "
"fit_incremental")
self._check_covariates_dti(covariates=covariates_new, dti=dti_new, ts_len=x.shape[1],
method_name='fit_incremental')
self.model.inject_new(x,
covariates_new=covariates_new,
dti_new=dti_new)
@staticmethod
def get_default_num_workers():
from zoo.ray import RayContext
try:
ray_ctx = RayContext.get(initialize=False)
num_workers = ray_ctx.num_ray_nodes
except:
num_workers = 1
return num_workers
def predict(self, x=None, horizon=24, mc=False,
future_covariates=None,
future_dti=None,
num_workers=None):
"""
Predict horizon time-points ahead the input x in fit_eval
:param x: We don't support input x currently.
:param horizon: horizon length to predict
:param mc:
:param future_covariates: covariates corresponding to future horizon steps data to predict.
:param future_dti: dti corresponding to future horizon steps data to predict.
:param num_workers: the number of workers to use. Note that there has to be an activate
RayContext if num_workers > 1.
:return:
"""
if x is not None:
raise ValueError("We don't support input x directly.")
if self.model is None:
raise Exception("Needs to call fit_eval or restore first before calling predict")
self._check_covariates_dti(covariates=future_covariates, dti=future_dti, ts_len=horizon,
method_name="predict")
if num_workers is None:
num_workers = TCMF.get_default_num_workers()
if num_workers > 1:
import ray
from zoo.ray import RayContext
try:
RayContext.get(initialize=False)
except:
try:
# detect whether ray has been started.
ray.put(None)
except:
raise RuntimeError(f"There must be an activate ray context while running with "
f"{num_workers} workers. You can either start and init a "
f"RayContext by init_orca_context(..., init_ray_on_spark="
f"True) or start Ray with ray.init()")
out = self.model.predict_horizon(
future=horizon,
bsize=90,
num_workers=num_workers,
future_covariates=future_covariates,
future_dti=future_dti,
)
return out[:, -horizon::]
def evaluate(self, x=None, y=None, metrics=None, target_covariates=None,
target_dti=None, num_workers=None):
"""
Evaluate on the prediction results and y. We predict horizon time-points ahead the input x
in fit_eval before evaluation, where the horizon length equals the second dimension size of
y.
:param x: We don't support input x currently.
:param y: target. We interpret the second dimension of y as the horizon length for
evaluation.
:param metrics: a list of metrics in string format
:param target_covariates: covariates corresponding to target_value.
2-D ndarray or None.
The shape of ndarray should be (r, horizon), where r is the number of covariates.
Global covariates for all time series. If None, only default time coveriates will be
used while use_time is True. If not, the time coveriates used is the stack of input
covariates and default time coveriates.
:param target_dti: dti corresponding to target_value.
DatetimeIndex or None.
If None, use default fixed frequency DatetimeIndex generated with the last date of x in
fit and freq.
:param num_workers: the number of workers to use in evaluate. It defaults to 1.
:return: a list of metric evaluation results
"""
if x is not None:
raise ValueError("We don't support input x directly.")
if y is None:
raise ValueError("Input invalid y of None")
if self.model is None:
raise Exception("Needs to call fit_eval or restore first before calling predict")
if len(y.shape) == 1:
y = np.expand_dims(y, axis=1)
horizon = 1
else:
horizon = y.shape[1]
result = self.predict(x=None, horizon=horizon,
future_covariates=target_covariates,
future_dti=target_dti,
num_workers=num_workers)
if y.shape[1] == 1:
multioutput = 'uniform_average'
else:
multioutput = 'raw_values'
return [Evaluator.evaluate(m, y, result, multioutput=multioutput) for m in metrics]
def save(self, model_file):
pickle.dump(self.model, open(model_file, "wb"))
def restore(self, model_file):
with open(model_file, 'rb') as f:
self.model = pickle.load(f)
self.model_init = True
def _get_optional_parameters(self):
return {}
def _get_required_parameters(self):
return {}
def _check_covariates_dti(self, covariates=None, dti=None, ts_len=24, method_name='fit'):
if covariates is not None and not isinstance(covariates, np.ndarray):
raise ValueError("Input covariates must be a ndarray. Got", type(covariates))
if covariates is not None and not covariates.ndim == 2:
raise ValueError("You should input a 2-D ndarray of covariates. But Got dimension of",
covariates.ndim)
if covariates is not None and not covariates.shape[1] == ts_len:
raise ValueError(f"The second dimension shape of covariates should be {ts_len}, "
f"but got {covariates.shape[1]} instead.")
if dti is not None and not isinstance(dti, pd.DatetimeIndex):
raise ValueError("Input dti must be a pandas DatetimeIndex. Got", type(dti))
if dti is not None and len(dti) != ts_len:
raise ValueError(f"Input dti length should be equal to {ts_len}, "
f"but got {len(dti)} instead.")
if method_name != 'fit':
# covariates and dti should be consistent with that in fit
if self.model.covariates is None and covariates is not None:
raise ValueError(f"Find valid covariates in {method_name} but invalid covariates "
f"in fit. Please keep them in consistence!")
if self.model.covariates is not None and covariates is None:
raise ValueError(f"Find valid covariates in fit but invalid covariates in "
f"{method_name}. Please keep them in consistence!")
if self.model.covariates is not None \
and self.model.covariates.shape[0] != covariates.shape[0]:
raise ValueError(f"The input covariates number in {method_name} should be the same "
f"as the input covariates number in fit. Got {covariates.shape[0]}"
f"and {self.model.covariates.shape[0]} respectively.")
if self.model.dti is None and dti is not None:
raise ValueError(f"Find valid dti in {method_name} but invalid dti in fit. "
f"Please keep them in consistence!")
if self.model.dti is not None and dti is None:
raise ValueError(f"Find valid dti in fit but invalid dti in {method_name}. "
f"Please keep them in consistence!")
class ModelWrapper(metaclass=ABCMeta):
@abstractmethod
def fit(self, **kwargs):
pass
@abstractmethod
def evaluate(self, **kwargs):
pass
@abstractmethod
def predict(self, **kwargs):
pass
@abstractmethod
def is_xshards_distributed(self, **kwargs):
pass
@abstractmethod
def save(self, **kwargs):
pass
@abstractmethod
def load(self, **kwargs):
pass
class TCMFXshardsModelWrapper(ModelWrapper):
def __init__(self, config):
self.internal = None
self.config = config
def fit(self, x, num_workers=None, **fit_params):
if num_workers:
raise ValueError("We don't support passing num_workers in fit "
"with input of xShards of dict")
def orca_train_model(d, config):
tcmf = TCMF()
tcmf.build(config)
id_arr, train_data = split_id_and_data(d, True)
tcmf.fit_eval((train_data, None), **fit_params)
return [id_arr, tcmf]
if isinstance(x, SparkXShards):
if x._get_class_name() == "dict":
self.internal = x.transform_shard(orca_train_model, self.config)
else:
raise ValueError("value of x should be an xShards of dict, "
"but is an xShards of " + x._get_class_name())
else:
raise ValueError("value of x should be an xShards of dict, "
"but isn't an xShards")
def fit_incremental(self, x_incr, covariates_incr=None, dti_incr=None):
raise NotImplementedError
def evaluate(self, y, metric=None, target_covariates=None,
target_dti=None, num_workers=None):
"""
Evaluate the model
:param x: input
:param y: target
:param metric:
:param num_workers:
:param target_covariates:
:param target_dti:
:return: a list of metric evaluation results
"""
raise NotImplementedError
def predict(self, horizon=24,
future_covariates=None,
future_dti=None,
num_workers=None):
"""
Prediction.
:param horizon:
:param future_covariates: covariates corresponding to future horizon steps data to predict.
:param future_dti: dti corresponding to future horizon steps data to predict.
:param num_workers
:return: result
"""
if num_workers and num_workers != 1:
raise ValueError("We don't support passing num_workers in predict "
"with input of xShards of dict")
def orca_predict(data):
id_arr = data[0]
tcmf = data[1]
predict_results = tcmf.predict(x=None, horizon=horizon,
future_covariates=future_covariates,
future_dti=future_dti,)
result = dict()
result['id'] = id_arr
result["prediction"] = predict_results
return result
return self.internal.transform_shard(orca_predict)
def is_xshards_distributed(self):
return True
def save(self, model_path):
"""
save model to file.
:param model_path: the model file path to be saved to.
:return:
"""
if self.internal is not None:
self.internal.save_pickle(model_path)
def load(self, model_path, minPartitions=None):
"""
restore model from model file and config.
:param model_path: the model file
:return: the restored model
"""
self.internal = XShards.load_pickle(model_path, minPartitions=minPartitions)
class TCMFNdarrayModelWrapper(ModelWrapper):
def __init__(self, config):
self.internal = TCMF()
self.config = config
self.internal.build(self.config)
self.id_arr = None
def fit(self, x, num_workers=None, **fit_params):
if isinstance(x, dict):
self.id_arr, train_data = split_id_and_data(x, False)
self.internal.fit_eval((train_data, None), num_workers=num_workers, **fit_params)
else:
raise ValueError("value of x should be a dict of ndarray")
def _rearrange_data_by_id(self, id_new, data_new, method_name="fit_incremental"):
if np.array_equal(self.id_arr, id_new) or id_new is None:
return data_new
if self.id_arr is None:
raise ValueError(f"Got valid id in {method_name} and invalid id in fit.")
if set(id_new) != set(self.id_arr):
raise ValueError(f"The input ids in {method_name} differs from input ids in fit.")
return data_new[[id_new.index(_) for _ in self.id_arr]]
def fit_incremental(self, x_incr, covariates_incr=None, dti_incr=None):
"""
incrementally fit the model. Note that we only incrementally fit X_seq (TCN in global model)
:param x_incr: 2-D numpy array in shape (n, T_incr), where n is the number of target time
series, T_incr is the number of time steps incremented.
incremental data to be fitted.
:param covariates_incr: covariates corresponding to x_incr. 2-D ndarray or None.
The shape of ndarray should be (r, T_incr), where r is the number of covariates.
Global covariates for all time series. If None, only default time coveriates will be
used while use_time is True. If not, the time coveriates used is the stack of input
covariates and default time coveriates.
:param dti_incr: dti corresponding to the x_incr. DatetimeIndex or None.
If None, use default fixed frequency DatetimeIndex generated with the last date of x in
fit and freq.
:return:
"""
if isinstance(x_incr, dict):
incr_id_arr, incr_train_data = split_id_and_data(x_incr, False)
incr_train_data = self._rearrange_data_by_id(id_new=incr_id_arr,
data_new=incr_train_data,
method_name="fit_incremental")
self.internal.fit_incremental(incr_train_data,
covariates_new=covariates_incr,
dti_new=dti_incr)
else:
raise ValueError("value of x should be a dict of ndarray")
def evaluate(self, y, metric=None, target_covariates=None,
target_dti=None, num_workers=None):
"""
Evaluate the model
:param y: target
:param metric:
:param target_covariates:
:param target_dti
:param num_workers:
:return: a list of metric evaluation results
"""
if isinstance(y, dict):
id_arr, y = split_id_and_data(y, False)
y = self._rearrange_data_by_id(id_new=id_arr, data_new=y, method_name='evaluate')
return self.internal.evaluate(y=y, metrics=metric,
target_covariates=target_covariates,
target_dti=target_dti,
num_workers=num_workers)
else:
raise ValueError("value of y should be a dict of ndarray")
def predict(self, horizon=24,
future_covariates=None,
future_dti=None,
num_workers=None):
"""
Prediction.
:param horizon
:param future_covariates: covariates corresponding to future horizon steps data to predict.
:param future_dti: dti corresponding to future horizon steps data to predict.
:param num_workers
:return: result
"""
pred = self.internal.predict(horizon=horizon, num_workers=num_workers,
future_covariates=future_covariates,
future_dti=future_dti,)
result = dict()
if self.id_arr is not None:
result['id'] = self.id_arr
result["prediction"] = pred
return result
def is_xshards_distributed(self):
return False
def save(self, model_path):
"""
save model to file.
:param model_path: the model file path to be saved to.
:return:
"""
with open(model_path + '/id.pkl', 'wb') as f:
pickle.dump(self.id_arr, f)
self.internal.save(model_path + "/model")
def load(self, model_path):
"""
restore model from model file and config.
:param model_path: the model file
:return: the restored model
"""
self.internal = TCMF()
with open(model_path + '/id.pkl', 'rb') as f:
self.id_arr = pickle.load(f)
self.internal.restore(model_path + "/model")
def split_id_and_data(d, is_xshards_distributed=False):
if 'y' in d:
train_data = d['y']
if not isinstance(train_data, np.ndarray):
raise ValueError("the value of y should be an ndarray")
else:
raise ValueError("key `y` doesn't exist in x")
id_arr = None
if 'id' in d:
id_arr = d['id']
if len(id_arr) != train_data.shape[0]:
raise ValueError("the length of the id array should be equal to the number of "
"rows in the y")
elif is_xshards_distributed:
raise ValueError("key `id` doesn't exist in x")
return id_arr, train_data
| |
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
"""
classes and helper moment matrices and localizing matrices,
which takes contraints as input produce the right outputs
for the cvxopt SDP solver
Sketchy: tested using sympy 0.7.6 (the default distribution did not work)
and cvxopt
"""
#from __future__ import division
import sympy as sp
import numpy as np
from cvxopt import matrix, sparse, spmatrix
import sympy.polys.monomials as mn
from sympy.polys.orderings import monomial_key
import scipy.linalg # for schur decomp, which np doesnt have
import scipy.sparse #
import numpy.linalg # for its norm, which suits us better than scipy
from collections import defaultdict
import ipdb
EPS = 1e-5
def problem_to_str(obj, gs = None, hs = None, plain = True):
strret = ''
gpresent = gs is not None and len(gs)>0
hpresent = hs is not None and len(hs)>0
if not plain:
strret += 'Minimizing '
strret += '$\mathcal{L}(%s)$' % sp.latex(obj)
if gpresent or hpresent:
strret += ('\n\nsubject to \t')
if gpresent:
for g in gs:
strret += ' $%s$, \t' % sp.latex(g)
strret += '\n'
if hpresent:
for h in hs:
strret += '$\mathcal{L}(%s) = 0$, \t' % sp.latex(h)
strret = strret.strip(',')
else:
strret += '\t subject to no constraints'
else:
strret += 'Minimizing '
strret += ' L(%s) ' % str(obj)
if gpresent or hpresent:
strret += ('\nsubject to \t')
if gpresent:
for g in gs:
strret += ' %s, \t' % str(g)
strret += '\n'
if hpresent:
for h in hs:
strret += '$L(%s) = 0$, \t' % str(h)
strret = strret.strip(',')
else:
strret += '\t subject to no constraints'
return strret
class Measure(object):
"""
Class representing a K atomic measure
"""
def __init__(self, variables):
if type(variables) is not list and type(variables) is not tuple:
raise TypeError('variables need to be a list, even for single vars')
# list of variables
self.vars = variables
# list of dictionaries keyed by variables
self.atoms = []
# corresponding pis for those variables
self.weights = []
def integrate(self, expr):
integral = 0.0
for i,w in enumerate(self.weights):
integral += w* expr.subs(zip(self.vars, self.atoms[i]))
return integral
def normalize(self):
Z = float(sum(self.weights))
for i in xrange(len(self.weights)):
self.weights[i] = self.weights[i] / Z
def __add__(self, other):
if type(other) == Measure:
self.weights.append(other.weights)
self.atoms.append(other.atoms)
elif type(other) == tuple:
self.weights.append(other[0])
if type(other[1]) == np.ndarray:
self.atoms.append(other[1].tolist())
elif type(other[1]) == list:
self.atoms.append(other[1])
elif type(other[1]) == int or type(other[1]) == float:
self.atoms.append([other[1]])
else:
raise NotImplementedError
else:
raise NotImplementedError
return self
def merge(self):
return NotImplemented
class MomentMatrix(object):
"""
class to handle moment matrices and localizing matrices.
Contains functions to process constraints
degree: max degree of the basic monomials corresponding to each row, so the
highest degree monomial in the entire moment matrix would be twice
the provided degree.
"""
def __init__(self, degree, variables, morder='grevlex', monos=None):
"""
@param degree - highest degree of the first row/column of the
moment matrix
@param variables - list of sympy symbols
@param morder the monomial order lex, grlex, grevlex, ilex, igrlex, igrevlex
"""
self.degree = degree
self.vars = variables
self.num_vars = len(self.vars)
# this object is a list of all monomials
# in num_vars variables up to degree degree
if monos is None:
rawmonos = mn.itermonomials(self.vars, self.degree)
else:
rawmonos = monos
# the reverse here is a bit random..., but has to be done.
# also try grlex sometimes
self.row_monos = sorted(rawmonos,\
key=monomial_key(morder, self.vars[::-1]))
# expanded monos is the flattened moment matrix itself
self.expanded_monos = []
for yi in self.row_monos:
for yj in self.row_monos:
self.expanded_monos.append(yi*yj)
# This list correspond to the actual variables in the sdp solver
self.matrix_monos = sorted(set(self.expanded_monos),\
key=monomial_key(morder, self.vars[::-1]))
self.num_matrix_monos = len(self.matrix_monos)
# mapping from a monomial to a list of indices of
# where the monomial appears in the moment matrix
self.term_to_indices_dict = defaultdict(list)
for i,yi in enumerate(self.expanded_monos):
self.term_to_indices_dict[yi].append(i)
def __str__(self):
return 'moment matrix for %d variables: %s' % (self.num_vars, str(self.vars))
def __len__(self):
"""returns m for this m by m matrix"""
return len(self.row_monos)
def __get_rowofA(self, constr):
"""
@param - constr is a polynomial constraint expressed as a sympy
polynomial. constr is h_j in Lasserre's notation,
and represents contraints on entries of the moment matrix.
"""
Ai = np.zeros(self.num_matrix_monos)
constrpoly = constr.as_poly()
#ipdb.set_trace()
for i,yi in enumerate(self.matrix_monos):
try:
Ai[i] = constrpoly.coeff_monomial(yi)
except ValueError:
Ai[i] = 0
return Ai
def get_LMI_coefficients(self):
"""
M = sum_\alpha y_alpha B_alpha, this function returns a list of B_alphas, as sparse matrices
"""
Balpha = []
constterm = True
for yi in self.matrix_monos:
indices = self.term_to_indices_dict[yi]
term = spmatrix(-1,[0]*len(indices), indices, size=(1,len(self.expanded_monos)), tc='d')
Balpha += [term]
return Balpha
def get_Bflat(self):
""" M_flattened = sum_i y_i Bflat_i
"""
rowsM = len(self.row_monos)
lenys = len(self.matrix_monos)
# consider using sparse Bf
Bf = scipy.sparse.lil_matrix((lenys, rowsM*rowsM))
for i,yi in enumerate(self.matrix_monos):
indices = self.term_to_indices_dict[yi]
Bf[i, indices] = 1
return scipy.sparse.csr_matrix(Bf)
def get_Ab(self, constraints=None, cvxoptmode=True):
num_constrs = len(constraints) if constraints is not None else 0
Anp = np.zeros((num_constrs+1, self.num_matrix_monos))
bnp = np.zeros((num_constrs+1,1))
if constraints is not None:
for i,constr in enumerate(constraints):
Anp[i,:] = self.__get_rowofA(constr)
Anp[-1,0] = 1
bnp[-1] = 1
# Remove redundant equations
if cvxoptmode:
Q, R = scipy.linalg.qr(Anp, mode='economic')
Anp = R
bnp = Q.T.dot(bnp)
# Remove zero rows
idx = np.sum(abs(Anp), 1) > EPS
Anp = Anp[idx, :]
bnp = bnp[idx, :]
return Anp, bnp
def get_Ab_slack(self, constraints=None, abs_slack=1e-2, rel_slack=1e-2, slackvector=0):
print 'slacks'
print abs_slack
num_constrs = len(constraints) if constraints is not None else 0
Anp = np.zeros((num_constrs, self.num_matrix_monos))
bnp = np.zeros((num_constrs,1))
if constraints is not None:
for i,constr in enumerate(constraints):
Anp[i,:] = self.__get_rowofA(constr)
Aslack = np.zeros((2*num_constrs+2, self.num_matrix_monos))
bslack = np.zeros((2*num_constrs+2,1))
Aslack[0:num_constrs,:] = Anp
Aslack[num_constrs:-2,:] = -Anp
Aslack[-1,0] = 1
bslack[-1] = 1
Aslack[-2,0] = -1
bslack[-2] = -1
#ipdb.set_trace()
#Aslack[0:num_constrs,0] += np.abs(Aslack[0:num_constrs,0])*1e-2
#Aslack[num_constrs:-2,0] += np.abs(Aslack[num_constrs:-2,0])*1e-2
bslack[0:num_constrs] += abs_slack + slackvector
bslack[num_constrs:-2] += abs_slack + slackvector
return Aslack, bslack
def numeric_instance(self, ys, maxdeg = None):
"""
assign the matrix_monos ys and return an np matrix
@params - ys: a list of numbers corresponding to self.matrix_monos
@params - maxdeg: cutoff the matrix at this degree
"""
assert len(ys)==len(self.matrix_monos), 'the lengths of the moment sequence is wrong'
G = self.get_LMI_coefficients()
num_inst = np.zeros(len(self.row_monos)**2)
for i,val in enumerate(ys):
num_inst += -val*np.array(matrix(G[i])).flatten()
num_row_monos = len(self.row_monos)
mat = num_inst.reshape(num_row_monos,num_row_monos)
if maxdeg is not None:
deglist = [sp.poly(rm, self.vars).total_degree() for rm in self.row_monos]
cutoffind = sum([int(d<=maxdeg) for d in deglist])
mat = mat[0:cutoffind, 0:cutoffind]
return mat
def pretty_print(self, sol):
"""
print the moment matrix in a nice format?
"""
for i,mono in enumerate(self.matrix_monos):
print '%s:\t%f\t' % (str(mono), sol['x'][i])
class LocalizingMatrix(object):
'''
poly_g is a polynomial that multiplies termwise with a basic
moment matrix of smaller size to give the localizing matrices This
class depends on the moment matrix class and has exactly the same
monomials as the base moment matrix. So the SDP variables still
corresponds to matrix_monos
'''
def __init__(self, mm, poly_g, morder='grevlex'):
"""
@param - mm is a MomentMatrix object
@param - poly_g the localizing polynomial
"""
self.mm = mm
self.poly_g = poly_g
self.deg_g = poly_g.as_poly().total_degree()
#there is no point to a constant localization matrix,
#and it will cause crash because how sympy handles 1
assert(self.deg_g>0)
# change this to be everything still in mm.monos post multiplication
rawmonos = mn.itermonomials(self.mm.vars, self.mm.degree-self.deg_g);
self.row_monos = sorted(rawmonos,\
key=monomial_key(morder, mm.vars[::-1]))
self.expanded_polys = [];
for yi in self.row_monos:
for yj in self.row_monos:
self.expanded_polys.append(sp.expand(poly_g*yi*yj))
# mapping from a monomial to a list of indices of
# where the monomial appears in the moment matrix
self.term_to_indices_dict = defaultdict(list)
for i,pi in enumerate(self.expanded_polys):
coeffdict = pi.as_coefficients_dict()
for mono in coeffdict:
coeff = coeffdict[mono]
self.term_to_indices_dict[mono].append( (i,float(coeff)) )
def get_LMI_coefficients(self):
"""
polynomial here is called g in Lasserre's notation
and defines the underlying set K some parallel with
MomentMatrix.get_LMI_coefficients. Except now expanded_monos becomes
expanded_polys
"""
Balpha = []
for yi in self.mm.matrix_monos:
indices = [k for k,v in self.term_to_indices_dict[yi]]
values = [-v for k,v in self.term_to_indices_dict[yi]]
Balpha += [spmatrix(values, [0]*len(indices), \
indices, size=(1,len(self.expanded_polys)), tc='d')]
return Balpha
if __name__=='__main__':
# simple test to make sure things run
from cvxopt import solvers
print 'testing simple unimixture with a skipped observation, just to test that things run'
x = sp.symbols('x')
M = MomentMatrix(3, [x], morder='grevlex')
constrs = [x-1.5, x**2-2.5, x**4-8.5]
#constrs = [x-1.5, x**2-2.5, x**3-4.5, x**4-8.5]
Ab = M.get_Ab(constrs)
gs = [3-x, 3+x]
locmatrices = [LocalizingMatrix(M, g) for g in gs]
Ghs = [lm.get_cvxopt_Gh() for lm in locmatrices]
| |
"""(Deprecated) Fit functions. Although these are still supported, use
fit functions of type FitFunction instead
"""
from __future__ import division
from __future__ import print_function
import numpy as np
# SIMPLE FITS
def polyfit4(x, a, b, c, d, e, *constants):
return _polyfitn(x, [a, b, c, d, e], *constants)
def polyfit3(x, a, b, c, d, *constants):
return _polyfitn(x, [a, b, c, d], *constants)
def polyfit2(x, a, b, c, *constants):
return _polyfitn(x, [a, b, c], *constants)
# noinspection PyUnusedLocal
def polyfit1(x, a, b, *constants):
return _polyfitn(x, [a, b], *constants)
# noinspection PyUnusedLocal
def _polyfitn(x, params, *constants):
return np.polyval(params, x)
# noinspection PyUnusedLocal
def expfit1(x, a, b, c, *constants):
return a * np.exp(b * x) + c
# noinspection PyUnusedLocal
def logfit1(x, a, b, c, *constants):
return a * np.log(b ** 2 * x + 1) + c
# noinspection PyUnusedLocal
def logbasefit1(x, a, b, c, *constants):
return a * np.log(x) / np.log(b ** 2 + 1) + c
# noinspection PyUnusedLocal
def powerfit1(x, a, b, c, *constants):
return a * np.power(x, b) + c
# noinspection PyUnusedLocal
def sqrtfit1(x, a, b, *constants):
return a * np.sqrt(x) + b
# noinspection PyUnusedLocal
def rel1(x, a, b, c, d, *constants):
return a * np.sqrt(b * x ** 2 + c) + d
# noinspection PyUnusedLocal
def invfit1(x, a, b, *constants):
return a / (x + 1) + b
# noinspection PyUnusedLocal
def linvfit1(x, a, b, *constants):
return a * x / (x + 1) + b
# noinspection PyUnusedLocal
def asymptote1(x, a, b, c, *constants):
return a * (1 - b / x) + c
# INVOLVING CONSTANTS
def asymptote2_linear_joff2_tz_dependence(x, a, b1, b2, c, *constants):
if len(constants) == 0:
return a * (- 1 / x**2) + c
else:
const_list, const_dict = constants
qnums = const_dict['qnums']
n, l, j, tz = qnums
joff2 = (j-1) * abs(j-1)
return a*(-1/x**2)+(b1*joff2+b2*tz)*x+c
def linear_j_and_tz_dependence(x, a, b, c, d, *constants):
if len(constants) == 0:
return a * x + c
else:
const_list, const_dict = constants
quantum_numbers = const_dict['qnums']
j = quantum_numbers.j
tz = quantum_numbers.tz
return a * x + b * j * x + c * tz * x + d
def linear_fit_linear_n_j_tz_ephw_dependence_common_zero(
x, c, c1, c3, c4, c5, d, *constants):
xp = x - d
if len(constants) == 0:
return np.polyval([c, 0], xp)
else:
const_list, const_dict = constants
qnums, e, hw = const_list[0:3]
n, l, j, tz = qnums
return np.polyval([c+c1*n+c3*j+c4*tz+c5*(e+hw), 0], xp)
def quadratic_j_and_tz_dependence(x, a, b, c, d, e, f, g, *constants):
if len(constants) == 0:
return np.polyval([a, d, g], x)
else:
const_list, const_dict = constants
qnums = const_dict['qnums']
j = qnums.j
tz = qnums.tz
return np.polyval([a + b*j + c*tz, d + e*j + f*tz, g], x)
# Fit function generators
def asymptote1_with_forced_zero(zero):
# noinspection PyUnusedLocal
def fn(x, a, *constants):
return a * (1 / zero - 1 / x)
fn.__name__ = ('asymptote1_with_forced_zero'
'_at_{z}').format(z=zero)
return fn
def asymptote2_with_forced_zero(zero):
# noinspection PyUnusedLocal
def fn(x, a, *constants):
return a * (1 / zero ** 2 - 1 / x ** 2)
fn.__name__ = ('asymptote2_with_forced_zero'
'_at_{z}').format(z=zero)
return fn
def asymptote_n_with_forced_zero(zero):
# noinspection PyUnusedLocal
def fn(x, a, n, *constants):
return a * (1 / zero ** n - 1 / x ** n)
fn.__name__ = ('asymptote_n_with_forced_zero'
'_at_{z}').format(z=zero)
return fn
def asymptote2_asymptotic_y0_dependence_with_forced_zero(zero):
def fn(x, a, a1, *constants):
if len(constants) == 0:
return a * (1 / zero**2 - 1 / x**2)
else:
const_list, const_dict = constants
y0 = const_dict['y0']
return (a + a1*y0) * (1/zero**2 - 1/x**2)
fn.__name__ = ('asymptote2_asymptotic_y0_dependence_with_forced_zero'
'_at_{}'.format(zero))
return fn
def asymptote2_asymptotic_y0pzbt0_dependence_with_forced_zero(zero):
def fn(x, a, a1, *constants):
if len(constants) == 0:
return a * (1 / zero**2 - 1 / x**2)
else:
const_list, const_dict = constants
y0 = const_dict['y0']
zbt0 = const_dict['zbt_arr'][0]
return (a + a1*(y0+zbt0)) * (1/zero**2 - 1/x**2)
fn.__name__ = ('asymptote2_asymptotic_y0pzbt0_dependence_with_forced_zero'
'_at_{}'.format(zero))
return fn
def asymptote2_asymptotic_joff2_dependence_with_forced_zero(zero):
def fn(x, a, a1, *constants):
if len(constants) == 0:
return a * (1 / zero**2 - 1 / x**2)
else:
const_list, const_dict = constants
qnums = const_dict['qnums']
n, l, j, tz = qnums
joff2 = (j - 1) * abs(j - 1)
return (a + a1*joff2) * (1/zero**2 - 1/x**2)
fn.__name__ = ('asymptote2_asymptotic_joff2_dependence_with_forced_zero'
'_at_{}'.format(zero))
return fn
def asymptote2_asymptotic_and_linear_y0_dependence_with_forced_zero(zero):
def fn(x, a, a1, b1, *constants):
if len(constants) == 0:
return a * (1 / zero**2 - 1 / x**2)
else:
const_list, const_dict = constants
y0 = const_dict['y0']
return (a + a1*y0) * (1/zero**2 - 1/x**2) + b1*y0*(x-zero)
fn.__name__ = ('asymptote2_asymptotic_and_linear_y0_dependence_with_'
'forced_zero'
'_at_{}'.format(zero))
return fn
def asymptote2_linear_y0_dependence_with_forced_zero(zero):
def fn(x, a, b1, *constants):
if len(constants) == 0:
return a * (1 / zero**2 - 1 / x**2)
else:
const_list, const_dict = constants
y0 = const_dict['y0']
return a * (1/zero**2 - 1/x**2) + (b1*y0) * (x-zero)
fn.__name__ = ('asymptote2_linear_y0_dependence_with_forced_zero'
'_at_{}'.format(zero))
return fn
def asymptote2_linear_y0pzbt0_dependence_with_forced_zero(zero):
def fn(x, a, b1, *constants):
if len(constants) == 0:
return a * (1 / zero**2 - 1 / x**2)
else:
const_list, const_dict = constants
y0 = const_dict['y0']
zbt0 = const_dict['zbt_arr'][0]
return a * (1/zero**2 - 1/x**2) + b1*(y0+zbt0)*(x-zero)
fn.__name__ = ('asymptote2_linear_y0pzbt0_dependence_with_forced_zero'
'_at_{}'.format(zero))
return fn
def asymptote2_linear_y0_zbt0_dependence_with_forced_zero(zero):
def fn(x, a, b1, b2, *constants):
if len(constants) == 0:
return a * (1 / zero**2 - 1 / x**2)
else:
const_list, const_dict = constants
y0 = const_dict['y0']
zbt0 = const_dict['zbt_arr'][0]
return a*(1/zero**2 - 1/x**2) + (b1*y0 + b2*zbt0)*(x-zero)
fn.__name__ = ('asymptote2_linear_y0_zbt0_dependence_with_forced_zero'
'_at_{}'.format(zero))
return fn
def asymptote2_linear_j_tz_dependence_with_forced_zero(zero):
def fn(x, a, b, c, *constants):
if len(constants) == 0:
return a * (1 / zero**2 - 1 / x**2)
else:
const_list, const_dict = constants
qnums = const_dict['qnums']
n, l, j, tz = qnums
return a * (1/zero**2 - 1/x**2) + (b*j + c*tz) * (x-zero)
fn.__name__ = ('asymptote2_linear_j_tz_dependence_with_forced_zero'
'_at_{}'.format(zero))
return fn
def asymptote1_linear_joff2_tz_dependence_with_forced_zero(zero):
def fn(x, a, b1, b2, *constants):
if len(constants) == 0:
return a * (1 / zero - 1 / x)
else:
const_list, const_dict = constants
qnums = const_dict['qnums']
n, l, j, tz = qnums
xp = x - zero
joff2 = (j-1) * abs(j-1)
return a*(1/zero-1/x)+(b1*joff2+b2*tz)*xp
fn.__name__ = ('asymptote1_linear_joff2_tz_dependence_with_forced_zero'
'_at_{}'.format(zero))
return fn
def asymptote2_linear_joff2_tz_dependence_with_forced_zero(zero):
def fn(x, a, b1, b2, *constants):
if len(constants) == 0:
return a * (1 / zero**2 - 1 / x**2)
else:
const_list, const_dict = constants
qnums = const_dict['qnums']
n, l, j, tz = qnums
xp = x - zero
joff2 = (j-1) * abs(j-1)
return a*(1/zero**2-1/x**2)+(b1*joff2+b2*tz)*xp
fn.__name__ = ('asymptote2_linear_joff2_tz_dependence_with_forced_zero'
'_at_{}'.format(zero))
return fn
def asymptote12_linear_joff2_tz_dependence_with_forced_zero(zero):
def fn(x, a, b, c1, c2, *constants):
if len(constants) == 0:
return a*(1/zero**2 - 1/x**2) + b*(1/zero - 1/x)
else:
const_list, const_dict = constants
qnums = const_dict['qnums']
n, l, j, tz = qnums
xp = x - zero
joff2 = (j-1) * abs(j-1)
return a*(1/zero**2-1/x**2)+b*(1/zero-1/x)+(c1*joff2+c2*tz)*xp
fn.__name__ = ('asymptote12_linear_joff2_tz_dependence_with_forced_zero'
'_at_{}'.format(zero))
return fn
def asymptote2_linear_j_y0_dependence_with_forced_zero(zero):
def fn(x, a, b1, b2, *constants):
if len(constants) == 0:
return a * (1 / zero**2 - 1 / x**2)
else:
const_list, const_dict = constants
qnums = const_dict['qnums']
y0 = const_dict['y0']
n, l, j, tz = qnums
return a * (1/zero**2 - 1/x**2) + (b1*j + b2*y0) * (x-zero)
fn.__name__ = ('asymptote2_linear_j_y0_dependence_with_forced_zero'
'_at_{}'.format(zero))
return fn
def asymptote2_linear_joff2_y0_dependence_with_forced_zero(zero):
def fn(x, a, b1, b2, *constants):
if len(constants) == 0:
return a * (1 / zero**2 - 1 / x**2)
else:
const_list, const_dict = constants
qnums = const_dict['qnums']
y0 = const_dict['y0']
n, l, j, tz = qnums
joff2 = (j - 1) * abs(j - 1)
return a * (1/zero**2 - 1/x**2) + (b1*joff2 + b2*y0) * (x-zero)
fn.__name__ = ('asymptote2_linear_joff2_y0_dependence_with_forced_zero'
'_at_{}'.format(zero))
return fn
def asymptote2_linear_j_tz_y0_dependence_with_forced_zero(zero):
def fn(x, a, b1, b2, b3, *constants):
if len(constants) == 0:
return a * (1 / zero**2 - 1 / x**2)
else:
const_list, const_dict = constants
qnums = const_dict['qnums']
y0 = const_dict['y0']
n, l, j, tz = qnums
return a * (1/zero**2 - 1/x**2) + (b1*j + b2*tz + b3*y0) * (x-zero)
fn.__name__ = ('asymptote2_linear_j_tz_y0_dependence_with_forced_zero'
'_at_{}'.format(zero))
return fn
def asymptote2_quadratic_j_linear_tz_dependence_with_forced_zero(zero):
def fn(x, a, b, c1, c2, *constants):
if len(constants) == 0:
return a * (1 / zero**2 - 1 / x**2)
else:
const_list, const_dict = constants
qnums = const_list[0]
n, l, j, tz = qnums
xp = x - zero
return a*(1/zero**2-1/x**2)+(b*j)*xp**2+(c1*j+c2*tz)*xp
fn.__name__ = ('asymptote2_quadratic_j_linear_tz_dependence_with_forced_'
'zero'
'_at_{}'.format(zero))
return fn
def asymptote2_linear_with_linear_joff2_tz_dependence_with_forced_zero(zero):
def fn(x, a, b, b1, b2, *constants):
if len(constants) == 0:
return a * (1 / zero**2 - 1 / x**2)
else:
const_list, const_dict = constants
qnums = const_list[0]
n, l, j, tz = qnums
xp = x - zero
joff2 = (j-1) * abs(j-1)
return a*(1/zero**2-1/x**2)+(b+b1*joff2+b2*tz)*xp
fn.__name__ = ('asymptote2_linear_with_linear_joff2_tz_dependence_with'
'_forced_zero'
'_at_{}'.format(zero))
return fn
def asymptote2_linear_jjoff_tz_dependence_with_forced_zero(zero):
def fn(x, a, b1, b2, *constants):
if len(constants) == 0:
return a * (1 / zero**2 - 1 / x**2)
else:
const_list, const_dict = constants
qnums = const_list[0]
n, l, j, tz = qnums
xp = x - zero
jjoff = (j-1) * j
return a*(1/zero**2-1/x**2)+(b1*jjoff+b2*tz)*xp
fn.__name__ = ('asymptote2_linear_jjoff_tz_dependence_with_forced_zero'
'_at_{}'.format(zero))
return fn
def asymptote2_quadratic_with_linear_joff2_tz_dependence_with_forced_zero(zero):
def fn(x, a, b, c, c1, c2, *constants):
if len(constants) == 0:
return a * (1 / zero**2 - 1 / x**2)
else:
const_list, const_dict = constants
qnums = const_dict['qnums']
n, l, j, tz = qnums
xp = x - zero
joff2 = (j-1) * abs(j-1)
return a*(1/zero**2-1/x**2)+b*xp**2+(c+c1*joff2+c2*tz)*xp
fn.__name__ = ('asymptote2_quadratic_with_linear_joff2_tz_dependence_with'
'_forced_zero'
'_at_{}'.format(zero))
return fn
def linear_fit_linear_n_j_tz_ephw_dependence_with_forced_zero(zero):
def fn(x, c, c1, c3, c4, c5,
*constants):
xp = x - zero
if len(constants) == 0:
return np.polyval([c, 0], xp)
else:
const_list, const_dict = constants
qnums, e, hw = const_list[0:3]
n, l, j, tz = qnums
return np.polyval([c+c1*n+c3*j+c4*tz+c5*(e+hw), 0], xp)
fn.__name__ = ('linear_fit_linear_n_j_tz_ephw_dependence_with_forced_zero'
'_at_{z}'.format(z=zero))
return fn
def quadratic_j_and_tz_dependence_with_forced_zero(zero):
def fn(x, a, b, c, d, e, f, *constants):
if len(constants) == 0:
return a*(x - zero)**2 + d*(x - zero)
else:
const_list, const_dict = constants
qnums = const_list[0]
j = qnums.j
tz = qnums.tz
return (a + b*j + c*tz)*(x-zero)**2 + (d + e*j + f*tz)*(x-zero)
fn.__name__ = ('quadratic_j_and_tz_dependence_with_force_zero_at_'
'{z}'.format(z=zero))
return fn
def quadratic_fit_linear_j_tz_dependence_with_forced_zero(zero):
def fn(x, a, d, e, f, *constants):
if len(constants) == 0:
return a*(x - zero)**2 + d*(x - zero)
else:
const_list, const_dict = constants
qnums = const_list[0]
j = qnums.j
tz = qnums.tz
return a*(x-zero)**2 + (d + e*j + f*tz)*(x-zero)
fn.__name__ = ('quadratic_fit_linear_j_tz_dependence_with_forced_zero_at_'
'{z}'.format(z=zero))
return fn
# noinspection PyPep8
def quadratic_fit_quadratic_e_hw_linear_n_j_tz_dependence_with_forced_zero(zero):
def fn(x, c, c1, c2, d, d1, d3, d4, d5, d6,
*constants):
xp = x - zero
if len(constants) == 0:
return np.polyval([c, d, 0], xp)
else:
const_list, const_dict = constants
qnums, e, hw = const_list[0:3]
n, l, j, tz = qnums
return np.polyval([c+c1*e+c2*hw, d+d1*n+d3*j+d4*tz+d5*e+d6*hw, 0],
xp)
fn.__name__ = ('quadratic_fit_quadratic_e_hw_linear_n_j_tz_'
'dependence_with_forced_zero'
'_at_{z}'.format(z=zero))
return fn
def quadratic_fit_quadratic_j_tz_linear_ephw_dependence_with_forced_zero(zero):
def fn(x, c, c1, c2, d, d1,
*constants):
xp = x - zero
if len(constants) == 0:
return np.polyval([c, d, 0], xp)
else:
const_list, const_dict = constants
qnums, e, hw = const_list[0:3]
n, l, j, tz = qnums
return np.polyval([c+c1*j+c2*tz,
d+d1*(e+hw),
0],
xp)
fn.__name__ = ('quadratic_fit_quadratic_j_tz_linear_ephw_'
'dependence_with_forced_zero'
'_at_{z}'.format(z=zero))
return fn
# noinspection PyPep8
def quadratic_fit_quadratic_j_tz_linear_n_ephw_dependence_with_forced_zero(zero):
def fn(x, c, c1, c2, d, d1, d3,
*constants):
xp = x - zero
if len(constants) == 0:
return np.polyval([c, d, 0], xp)
else:
const_list, const_dict = constants
qnums, e, hw = const_list[0:3]
n, l, j, tz = qnums
return np.polyval([c+c1*j+c2*tz, d+d1*(e+hw)+d3*n, 0], xp)
fn.__name__ = ('quadratic_fit_quadratic_j_tz_linear_n_ephw_'
'dependence_with_forced_zero'
'_at_{z}'.format(z=zero))
return fn
# noinspection PyPep8
def quadratic_fit_quadratic_j_tz_linear_n_e_hw_dependence_with_forced_zero(zero):
def fn(x, c, c1, c2, d, d1, d3, d4, d5, d6,
*constants):
xp = x - zero
if len(constants) == 0:
return np.polyval([c, d, 0], xp)
else:
const_list, const_dict = constants
qnums, e, hw = const_list[0:3]
n, l, j, tz = qnums
return np.polyval([c+c1*j+c2*tz, d+d1*n+d3*j+d4*tz+d5*e+d6*hw, 0],
xp)
fn.__name__ = ('quadratic_fit_quadratic_j_tz_linear_n_e_hw_'
'dependence_with_forced_zero'
'_at_{z}'.format(z=zero))
return fn
# noinspection PyPep8
def quadratic_fit_quadratic_n_j_tz_linear_e_hw_dependence_with_forced_zero(zero):
def fn(x, c, c1, c3, c4, d, d1, d3, d4, d5, d6,
*constants):
xp = x - zero
if len(constants) == 0:
return np.polyval([c, d, 0], xp)
else:
const_list, const_dict = constants
qnums, e, hw = const_list[0:3]
n, l, j, tz = qnums
return np.polyval([c+c1*n+c3*j+c4*tz,
d+d1*n+d3*j+d4*tz+d5*e+d6*hw,
0],
xp)
fn.__name__ = ('quadratic_fit_quadratic_n_j_tz_linear_e_hw_'
'dependence_with_forced_zero'
'_at_{z}'.format(z=zero))
return fn
def cubic_fit_linear_j_tz_dependence_with_forced_zero(zero):
def fn(x, a, b, d, e, f, *constants):
if len(constants) == 0:
return a*(x - zero)**3 + b*(x-zero)**2 + d*(x - zero)
else:
const_list, const_dict = constants
qnums = const_list[0]
j = qnums.j
tz = qnums.tz
xp = x - zero
return np.polyval([a, b, d + e*j + f*tz, 0], xp)
fn.__name__ = ('cubic_fit_linear_j_tz_dependence_with_forced_zero_at_'
'{z}'.format(z=zero))
return fn
def cubic_fit_linear_n_j_tz_e_hw_dependence_with_forced_zero(zero):
def fn(x, a, b, d, d1, d3, d4, d5, d6,
*constants):
xp = x - zero
if len(constants) == 0:
return np.polyval([a, b, d, 0], xp)
else:
const_list, const_dict = constants
qnums, e, hw = const_list[0:3]
n, l, j, tz = qnums
return np.polyval([a, b, d+d1*n+d3*j+d4*tz+d5*e+d6*hw, 0], xp)
fn.__name__ = ('cubic_fit_linear_n_j_tz_e_hw_dependence_with_forced_zero'
'_at_{z}'.format(z=zero))
return fn
def poly4_fit_linear_j_tz_dependence_with_forced_zero(zero):
def fn(x, a, b, c, d, e, f, *constants):
xp = x - zero
if len(constants) == 0:
return np.polyval([a, b, c, d, 0], xp)
else:
const_list, const_dict = constants
qnums = const_list[0]
j = qnums.j
tz = qnums.tz
return np.polyval([a, b, c, d + e*j + f*tz, 0], xp)
fn.__name__ = ('poly4_fit_linear_j_tz_dependence_with_forced_zero_at_'
'{z}'.format(z=zero))
return fn
def poly4_fit_linear_j_tz_jtz_dependence_with_forced_zero(zero):
def fn(x, a, b, c, d, d1, d2, d12, *constants):
xp = x - zero
if len(constants) == 0:
return np.polyval([a, b, c, d, 0], xp)
else:
const_list, const_dict = constants
qnums = const_list[0]
j = qnums.j
tz = qnums.tz
return np.polyval([a, b, c, d + d1*j + d2*tz + d12*j*tz, 0], xp)
fn.__name__ = ('poly4_fit_linear_j_tz_jtz_dependence_with_forced_zero'
'_at_{z}'.format(z=zero))
return fn
def poly4_fit_linear_n_j_tz_dependence_with_forced_zero(zero):
def fn(x, a, b, c, d, d1, d3, d4, *constants):
xp = x - zero
if len(constants) == 0:
return np.polyval([a, b, c, d, 0], xp)
else:
const_list, const_dict = constants
qnums = const_list[0]
n, l, j, tz = qnums
return np.polyval([a, b, c, d+d1*n+d3*j+d4*tz, 0], xp)
fn.__name__ = ('poly4_fit_linear_n_j_tz_dependence_with_forced_zero'
'_at_{z}'.format(z=zero))
return fn
def poly4_fit_quadratic_j_tz_dependence_with_forced_zero(zero):
def fn(x, a, b, c, c1, c2, d, d1, d2, *constants):
xp = x - zero
if len(constants) == 0:
return np.polyval([a, b, c, d, 0], xp)
else:
const_list, const_dict = constants
qnums = const_list[0]
j = qnums.j
tz = qnums.tz
return np.polyval([a, b, c+c1*j+c2*tz, d+d1*j+d2*tz, 0], xp)
fn.__name__ = ('poly4_fit_quadratic_j_tz_dependence_with_forced_zero'
'_at_{z}'.format(z=zero))
return fn
def poly4_fit_quadratic_j_tz_linear_n_dependence_with_forced_zero(zero):
def fn(x, a, b, c, c1, c2, d, d1, d3, d4,
*constants):
xp = x - zero
if len(constants) == 0:
return np.polyval([a, b, c, d, 0], xp)
else:
const_list, const_dict = constants
qnums, e, hw = const_list[0:3]
n, l, j, tz = qnums
return np.polyval([a, b, c+c1*j+c2*tz, d+d1*n+d3*j+d4*tz, 0], xp)
fn.__name__ = ('poly4_fit_quadratic_j_tz_linear_n_'
'dependence_with_forced_zero'
'_at_{z}'.format(z=zero))
return fn
def poly4_fit_quadratic_n_j_tz_dependence_with_forced_zero(zero):
def fn(x, a, b, c, c1, c3, c4, d, d1, d3, d4, *constants):
xp = x - zero
if len(constants) == 0:
return np.polyval([a, b, c, d, 0], xp)
else:
const_list, const_dict = constants
qnums = const_list[0]
n, l, j, tz = qnums
return np.polyval([a, b, c+c1*n+c3*j+c4*tz, d+d1*n+d3*j+d4*tz, 0],
xp)
fn.__name__ = ('poly4_fit_quadratic_n_j_tz_dependence_with_forced_zero'
'_at_{z}'.format(z=zero))
return fn
def poly4_fit_linear_n_j_tz_e_hw_dependence_with_forced_zero(zero):
def fn(x, a, b, c, d, d1, d3, d4, d5, d6, *constants):
xp = x - zero
if len(constants) == 0:
return np.polyval([a, b, c, d, 0], xp)
else:
const_list, const_dict = constants
qnums = const_list[0]
e = const_list[1]
hw = const_list[2]
n, l, j, tz = qnums
return np.polyval([a, b, c, d+d1*n+d3*j+d4*tz+d5*e+d6*hw, 0], xp)
fn.__name__ = ('poly4_fit_linear_n_j_tz_e_hw_dependence_with_forced_zero'
'_at_{z}'.format(z=zero))
return fn
def poly4_fit_quadratic_j_tz_linear_n_ephw_dependence_with_forced_zero(zero):
def fn(x, a, b, c, c1, c2, d, d1, d3, d4, d5,
*constants):
xp = x - zero
if len(constants) == 0:
return np.polyval([a, b, c, d, 0], xp)
else:
const_list, const_dict = constants
qnums, e, hw = const_list[0:3]
n, l, j, tz = qnums
return np.polyval([a, b, c+c1*j+c2*tz, d+d1*n+d3*j+d4*tz+d5*(e+hw),
0],
xp)
fn.__name__ = ('poly4_fit_quadratic_j_tz_linear_n_ephw_'
'dependence_with_forced_zero'
'_at_{z}'.format(z=zero))
return fn
def poly4_fit_quadratic_j_tz_linear_n_e_hw_dependence_with_forced_zero(zero):
def fn(x, a, b, c, c1, c2, d, d1, d3, d4, d5, d6,
*constants):
xp = x - zero
if len(constants) == 0:
return np.polyval([a, b, c, d, 0], xp)
else:
const_list, const_dict = constants
qnums, e, hw = const_list[0:3]
n, l, j, tz = qnums
return np.polyval([a, b, c+c1*j+c2*tz, d+d1*n+d3*j+d4*tz+d5*e+d6*hw,
0],
xp)
fn.__name__ = ('poly4_fit_quadratic_j_tz_linear_n_e_hw_'
'dependence_with_forced_zero'
'_at_{z}'.format(z=zero))
return fn
def poly4_fit_quadratic_n_j_tz_linear_ephw_dependence_with_forced_zero(zero):
def fn(x, a, b, c, c1, c3, c4, d, d1, d3, d4, d5,
*constants):
xp = x - zero
if len(constants) == 0:
return np.polyval([a, b, c, d, 0], xp)
else:
const_list, const_dict = constants
qnums, e, hw = const_list[0:3]
n, l, j, tz = qnums
return np.polyval([a, b, c+c1*n+c3*j+c4*tz,
d+d1*n+d3*j+d4*tz+d5*(e+hw), 0],
xp)
fn.__name__ = ('poly4_fit_quadratic_n_j_tz_linear_ephw_'
'dependence_with_forced_zero'
'_at_{z}'.format(z=zero))
return fn
def poly4_fit_quadratic_n_j_tz_linear_e_hw_dependence_with_forced_zero(zero):
def fn(x, a, b, c, c1, c3, c4, d, d1, d3, d4, d5, d6,
*constants):
xp = x - zero
if len(constants) == 0:
return np.polyval([a, b, c, d, 0], xp)
else:
const_list, const_dict = constants
qnums, e, hw = const_list[0:3]
n, l, j, tz = qnums
return np.polyval([a, b, c+c1*n+c3*j+c4*tz,
d+d1*n+d3*j+d4*tz+d5*e+d6*hw, 0],
xp)
fn.__name__ = ('poly4_fit_quadratic_n_j_tz_linear_e_hw_'
'dependence_with_forced_zero'
'_at_{z}'.format(z=zero))
return fn
def poly4_fit_quadratic_n_j_tz_e_hw_dependence_with_forced_zero(zero):
def fn(x, a, b, c, c1, c3, c4, c5, c6, d, d1, d3, d4, d5, d6,
*constants):
xp = x - zero
if len(constants) == 0:
return np.polyval([a, b, c, d, 0], xp)
else:
const_list, const_dict = constants
qnums, e, hw = const_list[0:3]
n, l, j, tz = qnums
return np.polyval([a, b, c+c1*n+c3*j+c4*tz+c5*e+c6*hw,
d+d1*n+d3*j+d4*tz+d5*e+d6*hw, 0],
xp)
fn.__name__ = ('poly4_fit_quadratic_n_j_tz_e_hw_'
'dependence_with_forced_zero'
'_at_{z}'.format(z=zero))
return fn
| |
# Copyright 2015 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import deepcopy, copy
from datetime import date, datetime
import logging
import six
import warnings
from cassandra import util
from cassandra.cqltypes import DateType, SimpleDateType
from cassandra.cqlengine import ValidationError
from cassandra.cqlengine.functions import get_total_seconds
log = logging.getLogger(__name__)
class BaseValueManager(object):
def __init__(self, instance, column, value):
self.instance = instance
self.column = column
self.previous_value = deepcopy(value)
self.value = value
self.explicit = False
@property
def deleted(self):
return self.value is None and self.previous_value is not None
@property
def changed(self):
"""
Indicates whether or not this value has changed.
:rtype: boolean
"""
return self.value != self.previous_value
def reset_previous_value(self):
self.previous_value = copy(self.value)
def getval(self):
return self.value
def setval(self, val):
self.value = val
def delval(self):
self.value = None
def get_property(self):
_get = lambda slf: self.getval()
_set = lambda slf, val: self.setval(val)
_del = lambda slf: self.delval()
if self.column.can_delete:
return property(_get, _set, _del)
else:
return property(_get, _set)
class Column(object):
# the cassandra type this column maps to
db_type = None
value_manager = BaseValueManager
instance_counter = 0
primary_key = False
"""
bool flag, indicates this column is a primary key. The first primary key defined
on a model is the partition key (unless partition keys are set), all others are cluster keys
"""
partition_key = False
"""
indicates that this column should be the partition key, defining
more than one partition key column creates a compound partition key
"""
index = False
"""
bool flag, indicates an index should be created for this column
"""
db_field = None
"""
the fieldname this field will map to in the database
"""
default = None
"""
the default value, can be a value or a callable (no args)
"""
required = False
"""
boolean, is the field required? Model validation will raise and
exception if required is set to True and there is a None value assigned
"""
clustering_order = None
"""
only applicable on clustering keys (primary keys that are not partition keys)
determines the order that the clustering keys are sorted on disk
"""
polymorphic_key = False
"""
*Deprecated*
see :attr:`~.discriminator_column`
"""
discriminator_column = False
"""
boolean, if set to True, this column will be used for discriminating records
of inherited models.
Should only be set on a column of an abstract model being used for inheritance.
There may only be one discriminator column per model. See :attr:`~.__discriminator_value__`
for how to specify the value of this column on specialized models.
"""
static = False
"""
boolean, if set to True, this is a static column, with a single value per partition
"""
def __init__(self,
primary_key=False,
partition_key=False,
index=False,
db_field=None,
default=None,
required=False,
clustering_order=None,
polymorphic_key=False,
discriminator_column=False,
static=False):
self.partition_key = partition_key
self.primary_key = partition_key or primary_key
self.index = index
self.db_field = db_field
self.default = default
self.required = required
self.clustering_order = clustering_order
if polymorphic_key:
msg = "polymorphic_key is deprecated. Use discriminator_column instead."
warnings.warn(msg, DeprecationWarning)
log.warning(msg)
self.discriminator_column = discriminator_column or polymorphic_key
self.polymorphic_key = self.discriminator_column
# the column name in the model definition
self.column_name = None
self.static = static
self.value = None
# keep track of instantiation order
self.position = Column.instance_counter
Column.instance_counter += 1
def validate(self, value):
"""
Returns a cleaned and validated value. Raises a ValidationError
if there's a problem
"""
if value is None:
if self.required:
raise ValidationError('{0} - None values are not allowed'.format(self.column_name or self.db_field))
return value
def to_python(self, value):
"""
Converts data from the database into python values
raises a ValidationError if the value can't be converted
"""
return value
def to_database(self, value):
"""
Converts python value into database value
"""
if value is None and self.has_default:
return self.get_default()
return value
@property
def has_default(self):
return self.default is not None
@property
def is_primary_key(self):
return self.primary_key
@property
def can_delete(self):
return not self.primary_key
def get_default(self):
if self.has_default:
if callable(self.default):
return self.default()
else:
return self.default
def get_column_def(self):
"""
Returns a column definition for CQL table definition
"""
static = "static" if self.static else ""
return '{0} {1} {2}'.format(self.cql, self.db_type, static)
# TODO: make columns use cqltypes under the hood
# until then, this bridges the gap in using types along with cassandra.metadata for CQL generation
def cql_parameterized_type(self):
return self.db_type
def set_column_name(self, name):
"""
Sets the column name during document class construction
This value will be ignored if db_field is set in __init__
"""
self.column_name = name
@property
def db_field_name(self):
""" Returns the name of the cql name of this column """
return self.db_field or self.column_name
@property
def db_index_name(self):
""" Returns the name of the cql index """
return 'index_{0}'.format(self.db_field_name)
@property
def cql(self):
return self.get_cql()
def get_cql(self):
return '"{0}"'.format(self.db_field_name)
def _val_is_null(self, val):
""" determines if the given value equates to a null value for the given column type """
return val is None
@property
def sub_columns(self):
return []
class Blob(Column):
"""
Stores a raw binary value
"""
db_type = 'blob'
def to_database(self, value):
if not isinstance(value, (six.binary_type, bytearray)):
raise Exception("expecting a binary, got a %s" % type(value))
val = super(Bytes, self).to_database(value)
return bytearray(val)
def to_python(self, value):
return value
Bytes = Blob
class Ascii(Column):
"""
Stores a US-ASCII character string
"""
db_type = 'ascii'
class Inet(Column):
"""
Stores an IP address in IPv4 or IPv6 format
"""
db_type = 'inet'
class Text(Column):
"""
Stores a UTF-8 encoded string
"""
db_type = 'text'
def __init__(self, min_length=None, max_length=None, **kwargs):
"""
:param int min_length: Sets the minimum length of this string, for validation purposes.
Defaults to 1 if this is a ``required`` column. Otherwise, None.
:param int max_lemgth: Sets the maximum length of this string, for validation purposes.
"""
self.min_length = min_length or (1 if kwargs.get('required', False) else None)
self.max_length = max_length
super(Text, self).__init__(**kwargs)
def validate(self, value):
value = super(Text, self).validate(value)
if value is None:
return
if not isinstance(value, (six.string_types, bytearray)) and value is not None:
raise ValidationError('{0} {1} is not a string'.format(self.column_name, type(value)))
if self.max_length:
if len(value) > self.max_length:
raise ValidationError('{0} is longer than {1} characters'.format(self.column_name, self.max_length))
if self.min_length:
if len(value) < self.min_length:
raise ValidationError('{0} is shorter than {1} characters'.format(self.column_name, self.min_length))
return value
class Integer(Column):
"""
Stores a 32-bit signed integer value
"""
db_type = 'int'
def validate(self, value):
val = super(Integer, self).validate(value)
if val is None:
return
try:
return int(val)
except (TypeError, ValueError):
raise ValidationError("{0} {1} can't be converted to integral value".format(self.column_name, value))
def to_python(self, value):
return self.validate(value)
def to_database(self, value):
return self.validate(value)
class TinyInt(Integer):
"""
Stores an 8-bit signed integer value
.. versionadded:: 2.6.0
requires C* 2.2+ and protocol v4+
"""
db_type = 'tinyint'
class SmallInt(Integer):
"""
Stores a 16-bit signed integer value
.. versionadded:: 2.6.0
requires C* 2.2+ and protocol v4+
"""
db_type = 'smallint'
class BigInt(Integer):
"""
Stores a 64-bit signed integer value
"""
db_type = 'bigint'
class VarInt(Column):
"""
Stores an arbitrary-precision integer
"""
db_type = 'varint'
def validate(self, value):
val = super(VarInt, self).validate(value)
if val is None:
return
try:
return int(val)
except (TypeError, ValueError):
raise ValidationError(
"{0} {1} can't be converted to integral value".format(self.column_name, value))
def to_python(self, value):
return self.validate(value)
def to_database(self, value):
return self.validate(value)
class CounterValueManager(BaseValueManager):
def __init__(self, instance, column, value):
super(CounterValueManager, self).__init__(instance, column, value)
self.value = self.value or 0
self.previous_value = self.previous_value or 0
class Counter(Integer):
"""
Stores a counter that can be inremented and decremented
"""
db_type = 'counter'
value_manager = CounterValueManager
def __init__(self,
index=False,
db_field=None,
required=False):
super(Counter, self).__init__(
primary_key=False,
partition_key=False,
index=index,
db_field=db_field,
default=0,
required=required,
)
class DateTime(Column):
"""
Stores a datetime value
"""
db_type = 'timestamp'
def to_python(self, value):
if value is None:
return
if isinstance(value, datetime):
return value
elif isinstance(value, date):
return datetime(*(value.timetuple()[:6]))
try:
return datetime.utcfromtimestamp(value)
except TypeError:
return datetime.utcfromtimestamp(DateType.deserialize(value))
def to_database(self, value):
value = super(DateTime, self).to_database(value)
if value is None:
return
if not isinstance(value, datetime):
if isinstance(value, date):
value = datetime(value.year, value.month, value.day)
else:
raise ValidationError("{0} '{1}' is not a datetime object".format(self.column_name, value))
epoch = datetime(1970, 1, 1, tzinfo=value.tzinfo)
offset = get_total_seconds(epoch.tzinfo.utcoffset(epoch)) if epoch.tzinfo else 0
return int((get_total_seconds(value - epoch) - offset) * 1000)
class Date(Column):
"""
Stores a simple date, with no time-of-day
.. versionchanged:: 2.6.0
removed overload of Date and DateTime. DateTime is a drop-in replacement for legacy models
requires C* 2.2+ and protocol v4+
"""
db_type = 'date'
def to_database(self, value):
value = super(Date, self).to_database(value)
if value is None:
return
# need to translate to int version because some dates are not representable in
# string form (datetime limitation)
d = value if isinstance(value, util.Date) else util.Date(value)
return d.days_from_epoch + SimpleDateType.EPOCH_OFFSET_DAYS
class Time(Column):
"""
Stores a timezone-naive time-of-day, with nanosecond precision
.. versionadded:: 2.6.0
requires C* 2.2+ and protocol v4+
"""
db_type = 'time'
def to_database(self, value):
value = super(Time, self).to_database(value)
if value is None:
return
# str(util.Time) yields desired CQL encoding
return value if isinstance(value, util.Time) else util.Time(value)
class UUID(Column):
"""
Stores a type 1 or 4 UUID
"""
db_type = 'uuid'
def validate(self, value):
val = super(UUID, self).validate(value)
if val is None:
return
from uuid import UUID as _UUID
if isinstance(val, _UUID):
return val
if isinstance(val, six.string_types):
try:
return _UUID(val)
except ValueError:
# fall-through to error
pass
raise ValidationError("{0} {1} is not a valid uuid".format(
self.column_name, value))
def to_python(self, value):
return self.validate(value)
def to_database(self, value):
return self.validate(value)
from uuid import UUID as pyUUID, getnode
class TimeUUID(UUID):
"""
UUID containing timestamp
"""
db_type = 'timeuuid'
@classmethod
def from_datetime(self, dt):
"""
generates a UUID for a given datetime
:param dt: datetime
:type dt: datetime
:return: uuid1
.. deprecated:: 2.6.0
Use :func:`cassandra.util.uuid_from_time`
"""
msg = "cqlengine.columns.TimeUUID.from_datetime is deprecated. Use cassandra.util.uuid_from_time instead."
warnings.warn(msg, DeprecationWarning)
log.warning(msg)
return util.uuid_from_time(dt)
class Boolean(Column):
"""
Stores a boolean True or False value
"""
db_type = 'boolean'
def validate(self, value):
""" Always returns a Python boolean. """
value = super(Boolean, self).validate(value)
if value is not None:
value = bool(value)
return value
def to_python(self, value):
return self.validate(value)
class BaseFloat(Column):
def validate(self, value):
value = super(BaseFloat, self).validate(value)
if value is None:
return
try:
return float(value)
except (TypeError, ValueError):
raise ValidationError("{0} {1} is not a valid float".format(self.column_name, value))
def to_python(self, value):
return self.validate(value)
def to_database(self, value):
return self.validate(value)
class Float(BaseFloat):
"""
Stores a single-precision floating-point value
"""
db_type = 'float'
def __init__(self, double_precision=None, **kwargs):
if double_precision is None or bool(double_precision):
msg = "Float(double_precision=True) is deprecated. Use Double() type instead."
double_precision = True
warnings.warn(msg, DeprecationWarning)
log.warning(msg)
self.db_type = 'double' if double_precision else 'float'
super(Float, self).__init__(**kwargs)
class Double(BaseFloat):
"""
Stores a double-precision floating-point value
"""
db_type = 'double'
class Decimal(Column):
"""
Stores a variable precision decimal value
"""
db_type = 'decimal'
def validate(self, value):
from decimal import Decimal as _Decimal
from decimal import InvalidOperation
val = super(Decimal, self).validate(value)
if val is None:
return
try:
return _Decimal(repr(val)) if isinstance(val, float) else _Decimal(val)
except InvalidOperation:
raise ValidationError("{0} '{1}' can't be coerced to decimal".format(self.column_name, val))
def to_python(self, value):
return self.validate(value)
def to_database(self, value):
return self.validate(value)
class BaseContainerColumn(Column):
"""
Base Container type for collection-like columns.
https://cassandra.apache.org/doc/cql3/CQL.html#collections
"""
def __init__(self, value_type, **kwargs):
"""
:param value_type: a column class indicating the types of the value
"""
inheritance_comparator = issubclass if isinstance(value_type, type) else isinstance
if not inheritance_comparator(value_type, Column):
raise ValidationError('value_type must be a column class')
if inheritance_comparator(value_type, BaseContainerColumn):
raise ValidationError('container types cannot be nested')
if value_type.db_type is None:
raise ValidationError('value_type cannot be an abstract column type')
if isinstance(value_type, type):
self.value_type = value_type
self.value_col = self.value_type()
else:
self.value_col = value_type
self.value_type = self.value_col.__class__
super(BaseContainerColumn, self).__init__(**kwargs)
def validate(self, value):
value = super(BaseContainerColumn, self).validate(value)
# It is dangerous to let collections have more than 65535.
# See: https://issues.apache.org/jira/browse/CASSANDRA-5428
if value is not None and len(value) > 65535:
raise ValidationError("{0} Collection can't have more than 65535 elements.".format(self.column_name))
return value
def _val_is_null(self, val):
return not val
@property
def sub_columns(self):
return [self.value_col]
class Set(BaseContainerColumn):
"""
Stores a set of unordered, unique values
http://www.datastax.com/documentation/cql/3.1/cql/cql_using/use_set_t.html
"""
def __init__(self, value_type, strict=True, default=set, **kwargs):
"""
:param value_type: a column class indicating the types of the value
:param strict: sets whether non set values will be coerced to set
type on validation, or raise a validation error, defaults to True
"""
self.strict = strict
self.db_type = 'set<{0}>'.format(value_type.db_type)
super(Set, self).__init__(value_type, default=default, **kwargs)
def validate(self, value):
val = super(Set, self).validate(value)
if val is None:
return
types = (set,) if self.strict else (set, list, tuple)
if not isinstance(val, types):
if self.strict:
raise ValidationError('{0} {1} is not a set object'.format(self.column_name, val))
else:
raise ValidationError('{0} {1} cannot be coerced to a set object'.format(self.column_name, val))
if None in val:
raise ValidationError("{0} None not allowed in a set".format(self.column_name))
return set(self.value_col.validate(v) for v in val)
def to_python(self, value):
if value is None:
return set()
return set(self.value_col.to_python(v) for v in value)
def to_database(self, value):
if value is None:
return None
return set(self.value_col.to_database(v) for v in value)
class List(BaseContainerColumn):
"""
Stores a list of ordered values
http://www.datastax.com/documentation/cql/3.1/cql/cql_using/use_list_t.html
"""
def __init__(self, value_type, default=list, **kwargs):
"""
:param value_type: a column class indicating the types of the value
"""
self.db_type = 'list<{0}>'.format(value_type.db_type)
return super(List, self).__init__(value_type=value_type, default=default, **kwargs)
def validate(self, value):
val = super(List, self).validate(value)
if val is None:
return
if not isinstance(val, (set, list, tuple)):
raise ValidationError('{0} {1} is not a list object'.format(self.column_name, val))
if None in val:
raise ValidationError("{0} None is not allowed in a list".format(self.column_name))
return [self.value_col.validate(v) for v in val]
def to_python(self, value):
if value is None:
return []
return [self.value_col.to_python(v) for v in value]
def to_database(self, value):
if value is None:
return None
return [self.value_col.to_database(v) for v in value]
class Map(BaseContainerColumn):
"""
Stores a key -> value map (dictionary)
http://www.datastax.com/documentation/cql/3.1/cql/cql_using/use_map_t.html
"""
def __init__(self, key_type, value_type, default=dict, **kwargs):
"""
:param key_type: a column class indicating the types of the key
:param value_type: a column class indicating the types of the value
"""
self.db_type = 'map<{0}, {1}>'.format(key_type.db_type, value_type.db_type)
inheritance_comparator = issubclass if isinstance(key_type, type) else isinstance
if not inheritance_comparator(key_type, Column):
raise ValidationError('key_type must be a column class')
if inheritance_comparator(key_type, BaseContainerColumn):
raise ValidationError('container types cannot be nested')
if key_type.db_type is None:
raise ValidationError('key_type cannot be an abstract column type')
if isinstance(key_type, type):
self.key_type = key_type
self.key_col = self.key_type()
else:
self.key_col = key_type
self.key_type = self.key_col.__class__
super(Map, self).__init__(value_type, default=default, **kwargs)
def validate(self, value):
val = super(Map, self).validate(value)
if val is None:
return
if not isinstance(val, dict):
raise ValidationError('{0} {1} is not a dict object'.format(self.column_name, val))
if None in val:
raise ValidationError("{0} None is not allowed in a map".format(self.column_name))
return dict((self.key_col.validate(k), self.value_col.validate(v)) for k, v in val.items())
def to_python(self, value):
if value is None:
return {}
if value is not None:
return dict((self.key_col.to_python(k), self.value_col.to_python(v)) for k, v in value.items())
def to_database(self, value):
if value is None:
return None
return dict((self.key_col.to_database(k), self.value_col.to_database(v)) for k, v in value.items())
@property
def sub_columns(self):
return [self.key_col, self.value_col]
class UDTValueManager(BaseValueManager):
@property
def changed(self):
return self.value != self.previous_value or self.value.has_changed_fields()
def reset_previous_value(self):
self.value.reset_changed_fields()
self.previous_value = copy(self.value)
class UserDefinedType(Column):
"""
User Defined Type column
http://www.datastax.com/documentation/cql/3.1/cql/cql_using/cqlUseUDT.html
These columns are represented by a specialization of :class:`cassandra.cqlengine.usertype.UserType`.
Please see :ref:`user_types` for examples and discussion.
"""
value_manager = UDTValueManager
def __init__(self, user_type, **kwargs):
"""
:param type user_type: specifies the :class:`~.cqlengine.usertype.UserType` model of the column
"""
self.user_type = user_type
self.db_type = "frozen<%s>" % user_type.type_name()
super(UserDefinedType, self).__init__(**kwargs)
@property
def sub_columns(self):
return list(self.user_type._fields.values())
def resolve_udts(col_def, out_list):
for col in col_def.sub_columns:
resolve_udts(col, out_list)
if isinstance(col_def, UserDefinedType):
out_list.append(col_def.user_type)
class _PartitionKeysToken(Column):
"""
virtual column representing token of partition columns.
Used by filter(pk__token=Token(...)) filters
"""
def __init__(self, model):
self.partition_columns = model._partition_keys.values()
super(_PartitionKeysToken, self).__init__(partition_key=True)
@property
def db_field_name(self):
return 'token({0})'.format(', '.join(['"{0}"'.format(c.db_field_name) for c in self.partition_columns]))
def to_database(self, value):
from cqlengine.functions import Token
assert isinstance(value, Token)
value.set_columns(self.partition_columns)
return value
def get_cql(self):
return "token({0})".format(", ".join(c.cql for c in self.partition_columns))
| |
import os
import re
import sys
import time
from nose import SkipTest
from nose.tools import raises
from random import randint
from textwrap import dedent
from rbtools.clients import RepositoryInfo
from rbtools.clients.git import GitClient
from rbtools.clients.mercurial import MercurialClient
from rbtools.clients.perforce import PerforceClient
from rbtools.clients.svn import SVNRepositoryInfo
from rbtools.tests import OptionsStub
from rbtools.utils.filesystem import load_config_files
from rbtools.utils.process import execute
from rbtools.utils.testbase import RBTestBase
class SCMClientTests(RBTestBase):
def setUp(self):
self.options = OptionsStub()
class GitClientTests(SCMClientTests):
TESTSERVER = "http://127.0.0.1:8080"
def _gitcmd(self, command, env=None, split_lines=False,
ignore_errors=False, extra_ignore_errors=(),
translate_newlines=True, git_dir=None):
if git_dir:
full_command = ['git', '--git-dir=%s/.git' % git_dir]
else:
full_command = ['git']
full_command.extend(command)
return execute(full_command, env, split_lines, ignore_errors,
extra_ignore_errors, translate_newlines)
def _git_add_file_commit(self, file, data, msg):
"""Add a file to a git repository with the content of data
and commit with msg.
"""
foo = open(file, 'w')
foo.write(data)
foo.close()
self._gitcmd(['add', file])
self._gitcmd(['commit', '-m', msg])
def setUp(self):
super(GitClientTests, self).setUp()
if not self.is_exe_in_path('git'):
raise SkipTest('git not found in path')
self.git_dir = self.chdir_tmp()
self._gitcmd(['init'], git_dir=self.git_dir)
foo = open(os.path.join(self.git_dir, 'foo.txt'), 'w')
foo.write(FOO)
foo.close()
self._gitcmd(['add', 'foo.txt'])
self._gitcmd(['commit', '-m', 'initial commit'])
self.clone_dir = self.chdir_tmp(self.git_dir)
self._gitcmd(['clone', self.git_dir, self.clone_dir])
self.client = GitClient(options=self.options)
self.user_config = {}
self.configs = []
self.client.user_config = self.user_config
self.client.configs = self.configs
self.options.parent_branch = None
def test_get_repository_info_simple(self):
"""Test GitClient get_repository_info, simple case"""
ri = self.client.get_repository_info()
self.assert_(isinstance(ri, RepositoryInfo))
self.assertEqual(ri.base_path, '')
self.assertEqual(ri.path.rstrip("/.git"), self.git_dir)
self.assertTrue(ri.supports_parent_diffs)
self.assertFalse(ri.supports_changesets)
def test_scan_for_server_simple(self):
"""Test GitClient scan_for_server, simple case"""
ri = self.client.get_repository_info()
server = self.client.scan_for_server(ri)
self.assert_(server is None)
def test_scan_for_server_reviewboardrc(self):
"Test GitClient scan_for_server, .reviewboardrc case"""
rc = open(os.path.join(self.clone_dir, '.reviewboardrc'), 'w')
rc.write('REVIEWBOARD_URL = "%s"' % self.TESTSERVER)
rc.close()
self.client.user_config, configs = load_config_files(self.clone_dir)
ri = self.client.get_repository_info()
server = self.client.scan_for_server(ri)
self.assertEqual(server, self.TESTSERVER)
def test_scan_for_server_property(self):
"""Test GitClient scan_for_server using repo property"""
self._gitcmd(['config', 'reviewboard.url', self.TESTSERVER])
ri = self.client.get_repository_info()
self.assertEqual(self.client.scan_for_server(ri), self.TESTSERVER)
def test_diff_simple(self):
"""Test GitClient simple diff case"""
diff = "diff --git a/foo.txt b/foo.txt\n" \
"index 634b3e8ff85bada6f928841a9f2c505560840b3a..5e98e9540e1" \
"b741b5be24fcb33c40c1c8069c1fb 100644\n" \
"--- a/foo.txt\n" \
"+++ b/foo.txt\n" \
"@@ -6,7 +6,4 @@ multa quoque et bello passus, dum conderet u" \
"rbem,\n" \
" inferretque deos Latio, genus unde Latinum,\n" \
" Albanique patres, atque altae moenia Romae.\n" \
" Musa, mihi causas memora, quo numine laeso,\n" \
"-quidve dolens, regina deum tot volvere casus\n" \
"-insignem pietate virum, tot adire labores\n" \
"-impulerit. Tantaene animis caelestibus irae?\n" \
" \n"
self.client.get_repository_info()
self._git_add_file_commit('foo.txt', FOO1, 'delete and modify stuff')
self.assertEqual(self.client.diff(None), (diff, None))
def test_diff_simple_multiple(self):
"""Test GitClient simple diff with multiple commits case"""
diff = "diff --git a/foo.txt b/foo.txt\n" \
"index 634b3e8ff85bada6f928841a9f2c505560840b3a..63036ed3fca" \
"fe870d567a14dd5884f4fed70126c 100644\n" \
"--- a/foo.txt\n" \
"+++ b/foo.txt\n" \
"@@ -1,12 +1,11 @@\n" \
" ARMA virumque cano, Troiae qui primus ab oris\n" \
"+ARMA virumque cano, Troiae qui primus ab oris\n" \
" Italiam, fato profugus, Laviniaque venit\n" \
" litora, multum ille et terris iactatus et alto\n" \
" vi superum saevae memorem Iunonis ob iram;\n" \
"-multa quoque et bello passus, dum conderet urbem,\n" \
"+dum conderet urbem,\n" \
" inferretque deos Latio, genus unde Latinum,\n" \
" Albanique patres, atque altae moenia Romae.\n" \
"+Albanique patres, atque altae moenia Romae.\n" \
" Musa, mihi causas memora, quo numine laeso,\n" \
"-quidve dolens, regina deum tot volvere casus\n" \
"-insignem pietate virum, tot adire labores\n" \
"-impulerit. Tantaene animis caelestibus irae?\n" \
" \n"
self.client.get_repository_info()
self._git_add_file_commit('foo.txt', FOO1, 'commit 1')
self._git_add_file_commit('foo.txt', FOO2, 'commit 1')
self._git_add_file_commit('foo.txt', FOO3, 'commit 1')
self.assertEqual(self.client.diff(None), (diff, None))
def test_diff_branch_diverge(self):
"""Test GitClient diff with divergent branches"""
diff1 = "diff --git a/foo.txt b/foo.txt\n" \
"index 634b3e8ff85bada6f928841a9f2c505560840b3a..e619c1387f" \
"5feb91f0ca83194650bfe4f6c2e347 100644\n" \
"--- a/foo.txt\n" \
"+++ b/foo.txt\n" \
"@@ -1,4 +1,6 @@\n" \
" ARMA virumque cano, Troiae qui primus ab oris\n" \
"+ARMA virumque cano, Troiae qui primus ab oris\n" \
"+ARMA virumque cano, Troiae qui primus ab oris\n" \
" Italiam, fato profugus, Laviniaque venit\n" \
" litora, multum ille et terris iactatus et alto\n" \
" vi superum saevae memorem Iunonis ob iram;\n" \
"@@ -6,7 +8,4 @@ multa quoque et bello passus, dum conderet " \
"urbem,\n" \
" inferretque deos Latio, genus unde Latinum,\n" \
" Albanique patres, atque altae moenia Romae.\n" \
" Musa, mihi causas memora, quo numine laeso,\n" \
"-quidve dolens, regina deum tot volvere casus\n" \
"-insignem pietate virum, tot adire labores\n" \
"-impulerit. Tantaene animis caelestibus irae?\n" \
" \n"
diff2 = "diff --git a/foo.txt b/foo.txt\n" \
"index 634b3e8ff85bada6f928841a9f2c505560840b3a..5e98e9540e1" \
"b741b5be24fcb33c40c1c8069c1fb 100644\n" \
"--- a/foo.txt\n" \
"+++ b/foo.txt\n" \
"@@ -6,7 +6,4 @@ multa quoque et bello passus, dum conderet "\
"urbem,\n" \
" inferretque deos Latio, genus unde Latinum,\n" \
" Albanique patres, atque altae moenia Romae.\n" \
" Musa, mihi causas memora, quo numine laeso,\n" \
"-quidve dolens, regina deum tot volvere casus\n" \
"-insignem pietate virum, tot adire labores\n" \
"-impulerit. Tantaene animis caelestibus irae?\n" \
" \n"
self._git_add_file_commit('foo.txt', FOO1, 'commit 1')
self._gitcmd(['checkout', '-b', 'mybranch', '--track',
'origin/master'])
self._git_add_file_commit('foo.txt', FOO2, 'commit 2')
self.client.get_repository_info()
self.assertEqual(self.client.diff(None), (diff1, None))
self._gitcmd(['checkout', 'master'])
self.client.get_repository_info()
self.assertEqual(self.client.diff(None), (diff2, None))
def test_diff_tracking_no_origin(self):
"""Test GitClient diff with a tracking branch, but no origin remote"""
diff = "diff --git a/foo.txt b/foo.txt\n" \
"index 634b3e8ff85bada6f928841a9f2c505560840b3a..5e98e9540e1b" \
"741b5be24fcb33c40c1c8069c1fb 100644\n" \
"--- a/foo.txt\n" \
"+++ b/foo.txt\n" \
"@@ -6,7 +6,4 @@ multa quoque et bello passus, dum conderet " \
"urbem,\n" \
" inferretque deos Latio, genus unde Latinum,\n" \
" Albanique patres, atque altae moenia Romae.\n" \
" Musa, mihi causas memora, quo numine laeso,\n" \
"-quidve dolens, regina deum tot volvere casus\n" \
"-insignem pietate virum, tot adire labores\n" \
"-impulerit. Tantaene animis caelestibus irae?\n" \
" \n"
self._gitcmd(['remote', 'add', 'quux', self.git_dir])
self._gitcmd(['fetch', 'quux'])
self._gitcmd(['checkout', '-b', 'mybranch', '--track', 'quux/master'])
self._git_add_file_commit('foo.txt', FOO1, 'delete and modify stuff')
self.client.get_repository_info()
self.assertEqual(self.client.diff(None), (diff, None))
def test_diff_local_tracking(self):
"""Test GitClient diff with a local tracking branch"""
diff = "diff --git a/foo.txt b/foo.txt\n" \
"index 634b3e8ff85bada6f928841a9f2c505560840b3a..e619c1387f5" \
"feb91f0ca83194650bfe4f6c2e347 100644\n" \
"--- a/foo.txt\n" \
"+++ b/foo.txt\n" \
"@@ -1,4 +1,6 @@\n" \
" ARMA virumque cano, Troiae qui primus ab oris\n" \
"+ARMA virumque cano, Troiae qui primus ab oris\n" \
"+ARMA virumque cano, Troiae qui primus ab oris\n" \
" Italiam, fato profugus, Laviniaque venit\n" \
" litora, multum ille et terris iactatus et alto\n" \
" vi superum saevae memorem Iunonis ob iram;\n" \
"@@ -6,7 +8,4 @@ multa quoque et bello passus, dum conderet " \
"urbem,\n" \
" inferretque deos Latio, genus unde Latinum,\n" \
" Albanique patres, atque altae moenia Romae.\n" \
" Musa, mihi causas memora, quo numine laeso,\n" \
"-quidve dolens, regina deum tot volvere casus\n" \
"-insignem pietate virum, tot adire labores\n" \
"-impulerit. Tantaene animis caelestibus irae?\n" \
" \n"
self._git_add_file_commit('foo.txt', FOO1, 'commit 1')
self._gitcmd(['checkout', '-b', 'mybranch', '--track', 'master'])
self._git_add_file_commit('foo.txt', FOO2, 'commit 2')
self.client.get_repository_info()
self.assertEqual(self.client.diff(None), (diff, None))
def test_diff_tracking_override(self):
"""Test GitClient diff with option override for tracking branch"""
diff = "diff --git a/foo.txt b/foo.txt\n" \
"index 634b3e8ff85bada6f928841a9f2c505560840b3a..5e98e9540e1" \
"b741b5be24fcb33c40c1c8069c1fb 100644\n" \
"--- a/foo.txt\n" \
"+++ b/foo.txt\n" \
"@@ -6,7 +6,4 @@ multa quoque et bello passus, dum conderet " \
"urbem,\n" \
" inferretque deos Latio, genus unde Latinum,\n" \
" Albanique patres, atque altae moenia Romae.\n" \
" Musa, mihi causas memora, quo numine laeso,\n" \
"-quidve dolens, regina deum tot volvere casus\n" \
"-insignem pietate virum, tot adire labores\n" \
"-impulerit. Tantaene animis caelestibus irae?\n" \
" \n"
self.options.tracking = 'origin/master'
self._gitcmd(['remote', 'add', 'bad', self.git_dir])
self._gitcmd(['fetch', 'bad'])
self._gitcmd(['checkout', '-b', 'mybranch', '--track', 'bad/master'])
self._git_add_file_commit('foo.txt', FOO1, 'commit 1')
self.client.get_repository_info()
self.assertEqual(self.client.diff(None), (diff, None))
def test_diff_slash_tracking(self):
"""
Test GitClient diff with tracking branch that has slash in its name.
"""
diff = "diff --git a/foo.txt b/foo.txt\n" \
"index 5e98e9540e1b741b5be24fcb33c40c1c8069c1fb..e619c1387f5f" \
"eb91f0ca83194650bfe4f6c2e347 100644\n" \
"--- a/foo.txt\n" \
"+++ b/foo.txt\n" \
"@@ -1,4 +1,6 @@\n" \
" ARMA virumque cano, Troiae qui primus ab oris\n" \
"+ARMA virumque cano, Troiae qui primus ab oris\n" \
"+ARMA virumque cano, Troiae qui primus ab oris\n" \
" Italiam, fato profugus, Laviniaque venit\n" \
" litora, multum ille et terris iactatus et alto\n" \
" vi superum saevae memorem Iunonis ob iram;\n"
os.chdir(self.git_dir)
self._gitcmd(['checkout', '-b', 'not-master'])
self._git_add_file_commit('foo.txt', FOO1, 'commit 1')
os.chdir(self.clone_dir)
self._gitcmd(['fetch', 'origin'])
self._gitcmd(['checkout', '-b', 'my/branch', '--track',
'origin/not-master'])
self._git_add_file_commit('foo.txt', FOO2, 'commit 2')
self.client.get_repository_info()
self.assertEqual(self.client.diff(None), (diff, None))
class MercurialTestBase(SCMClientTests):
def setUp(self):
super(MercurialTestBase, self).setUp()
self._hg_env = {}
def _hgcmd(self, command, split_lines=False,
ignore_errors=False, extra_ignore_errors=(),
translate_newlines=True, hg_dir=None):
if hg_dir:
full_command = ['hg', '--cwd', hg_dir]
else:
full_command = ['hg']
# We're *not* doing `env = env or {}` here because
# we want the caller to be able to *enable* reading
# of user and system-level hgrc configuration.
env = self._hg_env.copy()
if not env:
env = {
'HGRCPATH': os.devnull,
'HGPLAIN': '1',
}
full_command.extend(command)
return execute(full_command, env, split_lines, ignore_errors,
extra_ignore_errors, translate_newlines)
def _hg_add_file_commit(self, filename, data, msg):
outfile = open(filename, 'w')
outfile.write(data)
outfile.close()
self._hgcmd(['add', filename])
self._hgcmd(['commit', '-m', msg])
class MercurialClientTests(MercurialTestBase):
TESTSERVER = 'http://127.0.0.1:8080'
CLONE_HGRC = dedent("""
[paths]
default = %(hg_dir)s
cloned = %(clone_dir)s
[reviewboard]
url = %(test_server)s
[diff]
git = true
""").rstrip()
def setUp(self):
super(MercurialClientTests, self).setUp()
if not self.is_exe_in_path('hg'):
raise SkipTest('hg not found in path')
self.hg_dir = self.chdir_tmp()
self._hgcmd(['init'], hg_dir=self.hg_dir)
foo = open(os.path.join(self.hg_dir, 'foo.txt'), 'w')
foo.write(FOO)
foo.close()
self._hgcmd(['add', 'foo.txt'])
self._hgcmd(['commit', '-m', 'initial commit'])
self.clone_dir = self.chdir_tmp(self.hg_dir)
self._hgcmd(['clone', self.hg_dir, self.clone_dir])
self.client = MercurialClient(options=self.options)
clone_hgrc = open(self.clone_hgrc_path, 'wb')
clone_hgrc.write(self.CLONE_HGRC % {
'hg_dir': self.hg_dir,
'clone_dir': self.clone_dir,
'test_server': self.TESTSERVER,
})
clone_hgrc.close()
self.client.get_repository_info()
self.user_config = {}
self.configs = []
self.client.user_config = self.user_config
self.client.configs = self.configs
self.options.parent_branch = None
@property
def clone_hgrc_path(self):
return os.path.join(self.clone_dir, '.hg', 'hgrc')
@property
def hgrc_path(self):
return os.path.join(self.hg_dir, '.hg', 'hgrc')
def testGetRepositoryInfoSimple(self):
"""Test MercurialClient get_repository_info, simple case"""
ri = self.client.get_repository_info()
self.assertTrue(isinstance(ri, RepositoryInfo))
self.assertEqual('', ri.base_path)
hgpath = ri.path
if os.path.basename(hgpath) == '.hg':
hgpath = os.path.dirname(hgpath)
self.assertEqual(self.hg_dir, hgpath)
self.assertTrue(ri.supports_parent_diffs)
self.assertFalse(ri.supports_changesets)
def testScanForServerSimple(self):
"""Test MercurialClient scan_for_server, simple case"""
os.rename(self.clone_hgrc_path,
os.path.join(self.clone_dir, '._disabled_hgrc'))
self.client.hgrc = {}
self.client._load_hgrc()
ri = self.client.get_repository_info()
server = self.client.scan_for_server(ri)
self.assertTrue(server is None)
def testScanForServerWhenPresentInHgrc(self):
"""Test MercurialClient scan_for_server when present in hgrc"""
ri = self.client.get_repository_info()
server = self.client.scan_for_server(ri)
self.assertEqual(self.TESTSERVER, server)
def testScanForServerReviewboardrc(self):
"""Test MercurialClient scan_for_server when in .reviewboardrc"""
rc = open(os.path.join(self.clone_dir, '.reviewboardrc'), 'w')
rc.write('REVIEWBOARD_URL = "%s"' % self.TESTSERVER)
rc.close()
ri = self.client.get_repository_info()
server = self.client.scan_for_server(ri)
self.assertEqual(self.TESTSERVER, server)
def testDiffSimple(self):
"""Test MercurialClient diff, simple case"""
self.client.get_repository_info()
self._hg_add_file_commit('foo.txt', FOO1, 'delete and modify stuff')
diff_result = self.client.diff(None)
self.assertEqual((EXPECTED_HG_DIFF_0, None), diff_result)
def testDiffSimpleMultiple(self):
"""Test MercurialClient diff with multiple commits"""
self.client.get_repository_info()
self._hg_add_file_commit('foo.txt', FOO1, 'commit 1')
self._hg_add_file_commit('foo.txt', FOO2, 'commit 2')
self._hg_add_file_commit('foo.txt', FOO3, 'commit 3')
diff_result = self.client.diff(None)
self.assertEqual((EXPECTED_HG_DIFF_1, None), diff_result)
def testDiffBranchDiverge(self):
"""Test MercurialClient diff with diverged branch"""
self._hg_add_file_commit('foo.txt', FOO1, 'commit 1')
self._hgcmd(['branch', 'diverged'])
self._hg_add_file_commit('foo.txt', FOO2, 'commit 2')
self.client.get_repository_info()
self.assertEqual((EXPECTED_HG_DIFF_2, None), self.client.diff(None))
self._hgcmd(['update', '-C', 'default'])
self.client.get_repository_info()
self.assertEqual((EXPECTED_HG_DIFF_3, None), self.client.diff(None))
class MercurialSubversionClientTests(MercurialTestBase):
TESTSERVER = "http://127.0.0.1:8080"
def __init__(self, *args, **kwargs):
self._tmpbase = ''
self.clone_dir = ''
self.svn_repo = ''
self.svn_checkout = ''
self.client = None
self._svnserve_pid = 0
self._max_svnserve_pid_tries = 12
self._svnserve_port = os.environ.get('SVNSERVE_PORT')
self._required_exes = ('svnadmin', 'svnserve', 'svn')
MercurialTestBase.__init__(self, *args, **kwargs)
def setUp(self):
super(MercurialSubversionClientTests, self).setUp()
self._hg_env = {'FOO': 'BAR'}
for exe in self._required_exes:
if not self.is_exe_in_path(exe):
raise SkipTest('missing svn stuff! giving up!')
if not self._has_hgsubversion():
raise SkipTest('unable to use `hgsubversion` extension! '
'giving up!')
if not self._tmpbase:
self._tmpbase = self.create_tmp_dir()
self._create_svn_repo()
self._fire_up_svnserve()
self._fill_in_svn_repo()
try:
self._get_testing_clone()
except (OSError, IOError):
msg = 'could not clone from svn repo! skipping...'
raise SkipTest(msg), None, sys.exc_info()[2]
self._spin_up_client()
self._stub_in_config_and_options()
def _has_hgsubversion(self):
output = self._hgcmd(['svn', '--help'],
ignore_errors=True, extra_ignore_errors=(255))
return not re.search("unknown command ['\"]svn['\"]", output, re.I)
def tearDown(self):
os.kill(self._svnserve_pid, 9)
def _svn_add_file_commit(self, filename, data, msg):
outfile = open(filename, 'w')
outfile.write(data)
outfile.close()
execute(['svn', 'add', filename])
execute(['svn', 'commit', '-m', msg])
def _create_svn_repo(self):
self.svn_repo = os.path.join(self._tmpbase, 'svnrepo')
execute(['svnadmin', 'create', self.svn_repo])
def _fire_up_svnserve(self):
if not self._svnserve_port:
self._svnserve_port = str(randint(30000, 40000))
pid_file = os.path.join(self._tmpbase, 'svnserve.pid')
execute(['svnserve', '--pid-file', pid_file, '-d',
'--listen-port', self._svnserve_port, '-r', self._tmpbase])
for i in range(0, self._max_svnserve_pid_tries):
try:
self._svnserve_pid = int(open(pid_file).read().strip())
return
except (IOError, OSError):
time.sleep(0.25)
# This will re-raise the last exception, which will be either
# IOError or OSError if the above fails and this branch is reached
raise
def _fill_in_svn_repo(self):
self.svn_checkout = os.path.join(self._tmpbase, 'checkout.svn')
execute(['svn', 'checkout', 'file://%s' % self.svn_repo,
self.svn_checkout])
os.chdir(self.svn_checkout)
for subtree in ('trunk', 'branches', 'tags'):
execute(['svn', 'mkdir', subtree])
execute(['svn', 'commit', '-m', 'filling in T/b/t'])
os.chdir(os.path.join(self.svn_checkout, 'trunk'))
for i, data in enumerate([FOO, FOO1, FOO2]):
self._svn_add_file_commit('foo.txt', data, 'foo commit %s' % i)
def _get_testing_clone(self):
self.clone_dir = os.path.join(self._tmpbase, 'checkout.hg')
self._hgcmd([
'clone', 'svn://127.0.0.1:%s/svnrepo' % self._svnserve_port,
self.clone_dir,
])
def _spin_up_client(self):
os.chdir(self.clone_dir)
self.client = MercurialClient(options=self.options)
def _stub_in_config_and_options(self):
self.user_config = {}
self.configs = []
self.client.user_config = self.user_config
self.client.configs = self.configs
self.options.parent_branch = None
def testGetRepositoryInfoSimple(self):
"""Test MercurialClient (+svn) get_repository_info, simple case"""
ri = self.client.get_repository_info()
self.assertEqual('svn', self.client._type)
self.assertEqual('/trunk', ri.base_path)
self.assertEqual('svn://127.0.0.1:%s/svnrepo' % self._svnserve_port,
ri.path)
def testScanForServerSimple(self):
"""Test MercurialClient (+svn) scan_for_server, simple case"""
ri = self.client.get_repository_info()
server = self.client.scan_for_server(ri)
self.assertTrue(server is None)
def testScanForServerReviewboardrc(self):
"""Test MercurialClient (+svn) scan_for_server in .reviewboardrc"""
rc_filename = os.path.join(self.clone_dir, '.reviewboardrc')
rc = open(rc_filename, 'w')
rc.write('REVIEWBOARD_URL = "%s"' % self.TESTSERVER)
rc.close()
self.client.user_config, configs = load_config_files(self.clone_dir)
ri = self.client.get_repository_info()
server = self.client.scan_for_server(ri)
self.assertEqual(self.TESTSERVER, server)
def testScanForServerProperty(self):
"""Test MercurialClient (+svn) scan_for_server in svn property"""
os.chdir(self.svn_checkout)
execute(['svn', 'update'])
execute(['svn', 'propset', 'reviewboard:url', self.TESTSERVER,
self.svn_checkout])
execute(['svn', 'commit', '-m', 'adding reviewboard:url property'])
os.chdir(self.clone_dir)
self._hgcmd(['pull'])
self._hgcmd(['update', '-C'])
ri = self.client.get_repository_info()
self.assertEqual(self.TESTSERVER, self.client.scan_for_server(ri))
def testDiffSimple(self):
"""Test MercurialClient (+svn) diff, simple case"""
self.client.get_repository_info()
self._hg_add_file_commit('foo.txt', FOO4, 'edit 4')
self.assertEqual(EXPECTED_HG_SVN_DIFF_0, self.client.diff(None)[0])
def testDiffSimpleMultiple(self):
"""Test MercurialClient (+svn) diff with multiple commits"""
self.client.get_repository_info()
self._hg_add_file_commit('foo.txt', FOO4, 'edit 4')
self._hg_add_file_commit('foo.txt', FOO5, 'edit 5')
self._hg_add_file_commit('foo.txt', FOO6, 'edit 6')
self.assertEqual(EXPECTED_HG_SVN_DIFF_1, self.client.diff(None)[0])
class SVNClientTests(SCMClientTests):
def setUp(self):
super(SVNClientTests, self).setUp()
def test_relative_paths(self):
"""Testing SVNRepositoryInfo._get_relative_path"""
info = SVNRepositoryInfo('http://svn.example.com/svn/', '/', '')
self.assertEqual(info._get_relative_path('/foo', '/bar'), None)
self.assertEqual(info._get_relative_path('/', '/trunk/myproject'),
None)
self.assertEqual(info._get_relative_path('/trunk/myproject', '/'),
'/trunk/myproject')
self.assertEqual(
info._get_relative_path('/trunk/myproject', ''),
'/trunk/myproject')
self.assertEqual(
info._get_relative_path('/trunk/myproject', '/trunk'),
'/myproject')
self.assertEqual(
info._get_relative_path('/trunk/myproject', '/trunk/myproject'),
'/')
class PerforceClientTests(SCMClientTests):
def setUp(self):
super(PerforceClientTests, self).setUp()
@raises(SystemExit)
def test_error_on_revision_range(self):
"""Testing that passing a revision_range causes the client to exit."""
self.options.revision_range = "12345"
client = PerforceClient(options=self.options)
client.check_options()
FOO = """\
ARMA virumque cano, Troiae qui primus ab oris
Italiam, fato profugus, Laviniaque venit
litora, multum ille et terris iactatus et alto
vi superum saevae memorem Iunonis ob iram;
multa quoque et bello passus, dum conderet urbem,
inferretque deos Latio, genus unde Latinum,
Albanique patres, atque altae moenia Romae.
Musa, mihi causas memora, quo numine laeso,
quidve dolens, regina deum tot volvere casus
insignem pietate virum, tot adire labores
impulerit. Tantaene animis caelestibus irae?
"""
FOO1 = """\
ARMA virumque cano, Troiae qui primus ab oris
Italiam, fato profugus, Laviniaque venit
litora, multum ille et terris iactatus et alto
vi superum saevae memorem Iunonis ob iram;
multa quoque et bello passus, dum conderet urbem,
inferretque deos Latio, genus unde Latinum,
Albanique patres, atque altae moenia Romae.
Musa, mihi causas memora, quo numine laeso,
"""
FOO2 = """\
ARMA virumque cano, Troiae qui primus ab oris
ARMA virumque cano, Troiae qui primus ab oris
ARMA virumque cano, Troiae qui primus ab oris
Italiam, fato profugus, Laviniaque venit
litora, multum ille et terris iactatus et alto
vi superum saevae memorem Iunonis ob iram;
multa quoque et bello passus, dum conderet urbem,
inferretque deos Latio, genus unde Latinum,
Albanique patres, atque altae moenia Romae.
Musa, mihi causas memora, quo numine laeso,
"""
FOO3 = """\
ARMA virumque cano, Troiae qui primus ab oris
ARMA virumque cano, Troiae qui primus ab oris
Italiam, fato profugus, Laviniaque venit
litora, multum ille et terris iactatus et alto
vi superum saevae memorem Iunonis ob iram;
dum conderet urbem,
inferretque deos Latio, genus unde Latinum,
Albanique patres, atque altae moenia Romae.
Albanique patres, atque altae moenia Romae.
Musa, mihi causas memora, quo numine laeso,
"""
FOO4 = """\
Italiam, fato profugus, Laviniaque venit
litora, multum ille et terris iactatus et alto
vi superum saevae memorem Iunonis ob iram;
dum conderet urbem,
inferretque deos Latio, genus unde Latinum,
Albanique patres, atque altae moenia Romae.
Musa, mihi causas memora, quo numine laeso,
"""
FOO5 = """\
litora, multum ille et terris iactatus et alto
Italiam, fato profugus, Laviniaque venit
vi superum saevae memorem Iunonis ob iram;
dum conderet urbem,
Albanique patres, atque altae moenia Romae.
Albanique patres, atque altae moenia Romae.
Musa, mihi causas memora, quo numine laeso,
inferretque deos Latio, genus unde Latinum,
ARMA virumque cano, Troiae qui primus ab oris
ARMA virumque cano, Troiae qui primus ab oris
"""
FOO6 = """\
ARMA virumque cano, Troiae qui primus ab oris
ARMA virumque cano, Troiae qui primus ab oris
Italiam, fato profugus, Laviniaque venit
litora, multum ille et terris iactatus et alto
vi superum saevae memorem Iunonis ob iram;
dum conderet urbem, inferretque deos Latio, genus
unde Latinum, Albanique patres, atque altae
moenia Romae. Albanique patres, atque altae
moenia Romae. Musa, mihi causas memora, quo numine laeso,
"""
EXPECTED_HG_DIFF_0 = """\
diff --git a/foo.txt b/foo.txt
--- a/foo.txt
+++ b/foo.txt
@@ -6,7 +6,4 @@
inferretque deos Latio, genus unde Latinum,
Albanique patres, atque altae moenia Romae.
Musa, mihi causas memora, quo numine laeso,
-quidve dolens, regina deum tot volvere casus
-insignem pietate virum, tot adire labores
-impulerit. Tantaene animis caelestibus irae?
\n"""
EXPECTED_HG_DIFF_1 = """\
diff --git a/foo.txt b/foo.txt
--- a/foo.txt
+++ b/foo.txt
@@ -1,12 +1,11 @@
+ARMA virumque cano, Troiae qui primus ab oris
ARMA virumque cano, Troiae qui primus ab oris
Italiam, fato profugus, Laviniaque venit
litora, multum ille et terris iactatus et alto
vi superum saevae memorem Iunonis ob iram;
-multa quoque et bello passus, dum conderet urbem,
+dum conderet urbem,
inferretque deos Latio, genus unde Latinum,
Albanique patres, atque altae moenia Romae.
+Albanique patres, atque altae moenia Romae.
Musa, mihi causas memora, quo numine laeso,
-quidve dolens, regina deum tot volvere casus
-insignem pietate virum, tot adire labores
-impulerit. Tantaene animis caelestibus irae?
\n"""
EXPECTED_HG_DIFF_2 = """\
diff --git a/foo.txt b/foo.txt
--- a/foo.txt
+++ b/foo.txt
@@ -1,3 +1,5 @@
+ARMA virumque cano, Troiae qui primus ab oris
+ARMA virumque cano, Troiae qui primus ab oris
ARMA virumque cano, Troiae qui primus ab oris
Italiam, fato profugus, Laviniaque venit
litora, multum ille et terris iactatus et alto
"""
EXPECTED_HG_DIFF_3 = """\
diff --git a/foo.txt b/foo.txt
--- a/foo.txt
+++ b/foo.txt
@@ -6,7 +6,4 @@
inferretque deos Latio, genus unde Latinum,
Albanique patres, atque altae moenia Romae.
Musa, mihi causas memora, quo numine laeso,
-quidve dolens, regina deum tot volvere casus
-insignem pietate virum, tot adire labores
-impulerit. Tantaene animis caelestibus irae?
\n"""
EXPECTED_HG_SVN_DIFF_0 = """\
Index: foo.txt
===================================================================
--- foo.txt\t(revision 4)
+++ foo.txt\t(working copy)
@@ -1,4 +1,1 @@
-ARMA virumque cano, Troiae qui primus ab oris
-ARMA virumque cano, Troiae qui primus ab oris
-ARMA virumque cano, Troiae qui primus ab oris
Italiam, fato profugus, Laviniaque venit
@@ -6,3 +3,8 @@
vi superum saevae memorem Iunonis ob iram;
-multa quoque et bello passus, dum conderet urbem,
+dum conderet urbem,
+
+
+
+
+
inferretque deos Latio, genus unde Latinum,
"""
EXPECTED_HG_SVN_DIFF_1 = """\
Index: foo.txt
===================================================================
--- foo.txt\t(revision 4)
+++ foo.txt\t(working copy)
@@ -1,2 +1,1 @@
-ARMA virumque cano, Troiae qui primus ab oris
ARMA virumque cano, Troiae qui primus ab oris
@@ -6,6 +5,6 @@
vi superum saevae memorem Iunonis ob iram;
-multa quoque et bello passus, dum conderet urbem,
-inferretque deos Latio, genus unde Latinum,
-Albanique patres, atque altae moenia Romae.
-Musa, mihi causas memora, quo numine laeso,
+dum conderet urbem, inferretque deos Latio, genus
+unde Latinum, Albanique patres, atque altae
+moenia Romae. Albanique patres, atque altae
+moenia Romae. Musa, mihi causas memora, quo numine laeso,
\n"""
| |
import json
import logging
import os.path
import Bio
import Bio.GenBank
from Bio.GenBank.Scanner import GenBankScanner
from Bio.SeqFeature import ExactPosition, BeforePosition, AfterPosition
import pynpact.track
logger = logging.getLogger(__name__)
def parse_seq_rec(gbkfile, do_features=False):
"Parse the gbkfile into a Bio.SeqRecord.SeqRecord"
with open(gbkfile, 'r') as handle:
# this is pretty much the exact code of Bio.Seq.read, except
# we can access the do_features flag. Disabling feature
# parsing is substantially faster.
return GenBankScanner(debug=0).parse(handle, do_features=False)
# def parse_header(gbkfile):
# """Parse just the header of the Genbank file
# This is using internal methods from BioPython to do a relatively
# minimal amount of work.
# This doesn't work because the biggest thing we want out of it is
# the length; but that's not there
# """
# from Bio.GenBank import _FeatureConsumer
# from Bio.GenBank.utils import FeatureValueCleaner
# from Bio.GenBank.Scanner import GenBankScanner
# scanner = GenBankScanner()
# # from Bio.GenBank.Scanner.InsdcScanner.parse
# consumer = _FeatureConsumer(use_fuzziness=1,
# feature_cleaner=FeatureValueCleaner())
# with open(gbkfile) as f:
# #from Bio.GenBank.Scanner.InsdcScanner.feed
# scanner.set_handle(f)
# if not scanner.find_start():
# #Could not find (another) record
# consumer.data = None
# return False
# scanner._feed_first_line(consumer, scanner.line)
# scanner._feed_header_lines(consumer, scanner.parse_header())
# return consumer.data
# def reduce_genbank(gbkfile):
# """An attempt to create a version of the gbk file that has all the
# features but not the sequence in it, didn't end up being a
# siginificant savings.
# """
# def filterfun(outfile):
# with open(gbkfile, 'r') as infile:
# for l in infile:
# outfile.write(l)
# if l.startswith("ORIGIN"):
# outfile.write("//\n")
# return
# return util.safe_produce_new(
# util.derivative_filename(gbkfile, ".noseq", replace_ext=False),
# filterfun, logger=logger)
# def open_parse_gb_rec(gbkfile, reduce_first=False):
# """Open the GenBank file using the underlying biopython libraries
# so we can get at the do_features keyword (False is generally quite
# a bit faster)
# Returns a the Bio.GenBank specialized record type.
# """
# if reduce_first:
# raise NotImplementedError("reduce_first option must be False for now")
# #rec = GenBankScanner().parse(open('NC_007912.gbk','r'), do_features=False)
# #SeqIO.read(gbkfile,"genbank")
# with open(gbkfile, 'r') as handle:
# rp = Bio.GenBank.RecordParser()
# #rp._scanner = Bio.GenBank.Scanner.GenBankScanner()
# rp._consumer = Bio.GenBank._RecordConsumer()
# rp._scanner.feed(handle, rp._consumer, do_features=False)
# return rp._consumer.data
def open_parse_seq_rec(gbkfile, reduce_first=False, do_features=False):
"""Open the GenBank file using the underlying biopython libraries
so we can get at the do_features keyword (False is generally quite
a bit faster)
Returns a SeqRecord object--the same as Bio.SeqIO.read(<file>,'genbank')"""
if reduce_first:
raise NotImplementedError("reduce_first option must be False for now")
logger.info("Parsing genbank file (features:%s): %r",
do_features, gbkfile)
#rec = GenBankScanner().parse(open('NC_007912.gbk','r'), do_features=False)
#SeqIO.read(gbkfile,"genbank")
with open(gbkfile, 'r') as handle:
rp = Bio.GenBank.FeatureParser()
rp._consumer = Bio.GenBank._FeatureConsumer(
rp.use_fuzziness, rp._cleaner)
rp._scanner.feed(handle, rp._consumer, do_features=do_features)
return rp._consumer.data
def _write_gbk_track_json(fh, dicts):
fh.write('{"name":"Input CDSs", "type":"extracts", "active":true,\n')
fh.write('"data":[\n')
for r in dicts[:-1]:
json.dump(r, fh)
fh.write(',\n')
json.dump(dicts[-1], fh)
fh.write(']}')
def gbk_to_track_json(gbkfile, outfilename):
rec = Bio.SeqIO.read(gbkfile, 'genbank')
rtn = []
cdsidx = 0
for feat in rec.features:
if feat.type != 'CDS':
continue
q = feat.qualifiers.copy()
for (k, v) in q.items():
if isinstance(v, list) and len(v) == 1:
q[k] = v[0]
d = {'qualifiers': q}
d['cdsidx'] = cdsidx
cdsidx += 1
d['start'] = feat.location.start.real + 1 # account for python off by one
d['end'] = feat.location.end.real
d['start_approximate'] = not(isinstance(feat.location.start, ExactPosition))
d['end_approximate'] = not(isinstance(feat.location.end, ExactPosition))
d['approximate'] = not(isinstance(feat.location.start, ExactPosition)
and isinstance(feat.location.end, ExactPosition))
d['complement'] = feat.location.strand == -1
d['type'] = 'CDS'
d['phase'] = getPhase(d)
if not d.get('name'):
d['name'] = d.get('qualifiers').get('gene')
if not d.get('name'):
d['name'] = d.get('qualifiers').get('locus_tag')
rtn.append(d)
if isinstance(outfilename, file):
_write_gbk_track_json(outfilename, rtn)
else:
with open(outfilename, 'w') as fh:
_write_gbk_track_json(fh, rtn)
def _cds_to_feature(cdsdict):
start_pos = cdsdict.get('start')-1
start = ExactPosition(start_pos)
if cdsdict.get('start_approximate'):
start = BeforePosition(start_pos)
end = ExactPosition(cdsdict.get('end'))
if cdsdict.get('end_approximate'):
end = AfterPosition(cdsdict.get('end'))
# these are lists in the original object
q = cdsdict.get('qualifiers')
for (k, v) in q.items():
if not isinstance(v, list):
q[k] = [v]
strand = -1 if cdsdict.get('complement') else 1
f = Bio.SeqFeature.SeqFeature(
type='CDS', qualifiers=q,
location=Bio.SeqFeature.FeatureLocation(
start=start, end=end, strand=strand))
return f
def _sort_feats(f, g):
s0 = f.location.start.real
s1 = g.location.start.real
if s0 == s1:
return 1 if f.type < g.type else -1
else:
return s0 - s1
def track_json_to_gbk(gbkfile, outpath, track_json=None):
rec = Bio.SeqIO.read(gbkfile, 'genbank')
jsonfeats = track_json.get('data')
cdsToReplace = {}
cdsToAdd = []
cdsidxToRemove = []
for v in jsonfeats:
if v.get('cdsidx'):
cdsToReplace[v.get('cdsidx')] = v
else:
cdsToAdd.append(v)
cdsidx = 0
feats = []
for (i, f) in enumerate(rec.features):
if f.type == 'CDS':
if cdsidx in cdsToReplace:
feats.append(_cds_to_feature(cdsToReplace[cdsidx]))
cdsidx += 1
else:
feats.append(f)
for cdict in cdsToAdd:
feats.append(_cds_to_feature(cdict))
feats.sort(cmp=_sort_feats)
rec.features = feats
with open(outpath, 'w') as fh:
Bio.SeqIO.write(rec, fh, 'genbank')
return rec
def get_track_dicts(paths):
dicts=[]
for p in paths:
if p.endswith('json'):
with open(p) as f:
dicts.append(json.load(f))
else:
dicts.append(pynpact.track.Track(filename=p).todict())
return dicts
def combine_track_files(paths, root=None):
if root:
paths = [os.path.join(root, p) for p in paths]
res = combine_track_jsons(get_track_dicts(paths))
if root and res.get('data'):
for d in res.get('data'):
q = d.get('qualifiers')
if q and q.get('trackfile'):
q['trackfile'] = q['trackfile'].replace(root, '')
return res
def combine_track_jsons(track_json_dicts):
c = {}
c.update(track_json_dicts[0])
c['data'] = list(c.get('data'))
if not track_json_dicts:
return None
for tr in track_json_dicts:
if not tr.get('data'):
continue
for f in tr.get('data'):
if 'qualifiers' not in f:
f['qualifiers'] = {}
f['qualifiers']['trackfile']=tr.get('filename')
# print len(track_json_dicts), track_json_dicts[0].get('filename'), track_json_dicts[1].get('filename')
for tr in track_json_dicts[1:]:
c.get('data').extend(list(tr.get('data')))
return c
#return (c, track_json_dicts)
def getPhase(orf):
pc = orf['start'] if orf['complement'] else orf['end']
return (pc - 1) % 3
| |
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = ""
cfg.parentdir_prefix = "pynigma-"
cfg.versionfile_source = "pynigma/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
| |
import pytest
import glob
import os
import shutil
import time
import logging
import re
import tempfile
import subprocess
import sys
import errno
import pprint
from collections import OrderedDict
from cassandra.cluster import Cluster as PyCluster
from cassandra.cluster import NoHostAvailable
from cassandra.cluster import EXEC_PROFILE_DEFAULT
from cassandra.policies import WhiteListRoundRobinPolicy
from ccmlib.common import get_version_from_build, is_win
from ccmlib.cluster import Cluster
from dtest import (get_ip_from_node, make_execution_profile, get_auth_provider, get_port_from_node,
get_eager_protocol_version)
from distutils.version import LooseVersion
from tools.context import log_filter
from tools.funcutils import merge_dicts
logger = logging.getLogger(__name__)
def retry_till_success(fun, *args, **kwargs):
timeout = kwargs.pop('timeout', 60)
bypassed_exception = kwargs.pop('bypassed_exception', Exception)
deadline = time.time() + timeout
while True:
try:
return fun(*args, **kwargs)
except bypassed_exception:
if time.time() > deadline:
raise
else:
# brief pause before next attempt
time.sleep(0.25)
class DTestSetup(object):
def __init__(self, dtest_config=None, setup_overrides=None, cluster_name="test"):
self.dtest_config = dtest_config
self.setup_overrides = setup_overrides
self.cluster_name = cluster_name
self.ignore_log_patterns = []
self.cluster = None
self.cluster_options = []
self.replacement_node = None
self.allow_log_errors = False
self.connections = []
self.log_saved_dir = "logs"
try:
os.mkdir(self.log_saved_dir)
except OSError:
pass
self.last_log = os.path.join(self.log_saved_dir, "last")
self.test_path = self.get_test_path()
self.enable_for_jolokia = False
self.subprocs = []
self.log_watch_thread = None
self.last_test_dir = "last_test_dir"
self.jvm_args = []
self.create_cluster_func = None
self.iterations = 0
def get_test_path(self):
test_path = tempfile.mkdtemp(prefix='dtest-')
# ccm on cygwin needs absolute path to directory - it crosses from cygwin space into
# regular Windows space on wmic calls which will otherwise break pathing
if sys.platform == "cygwin":
process = subprocess.Popen(["cygpath", "-m", test_path], stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
test_path = process.communicate()[0].rstrip()
return test_path
def glob_data_dirs(self, path, ks="ks"):
result = []
for node in self.cluster.nodelist():
for data_dir in node.data_directories():
ks_dir = os.path.join(data_dir, ks, path)
result.extend(glob.glob(ks_dir))
return result
def begin_active_log_watch(self):
"""
Calls into ccm to start actively watching logs.
In the event that errors are seen in logs, ccm will call back to _log_error_handler.
When the cluster is no longer in use, stop_active_log_watch should be called to end log watching.
(otherwise a 'daemon' thread will (needlessly) run until the process exits).
"""
self.log_watch_thread = self.cluster.actively_watch_logs_for_error(self._log_error_handler, interval=0.25)
def _log_error_handler(self, errordata):
"""
Callback handler used in conjunction with begin_active_log_watch.
When called, prepares exception instance, we will use pytest.fail
to kill the current test being executed and mark it as failed
@param errordata is a dictonary mapping node name to failure list.
"""
# in some cases self.allow_log_errors may get set after proactive log checking has been enabled
# so we need to double-check first thing before proceeding
if self.allow_log_errors:
return
reportable_errordata = OrderedDict()
for nodename, errors in list(errordata.items()):
filtered_errors = list(self.__filter_errors(['\n'.join(msg) for msg in errors]))
if len(filtered_errors) != 0:
reportable_errordata[nodename] = filtered_errors
# no errors worthy of halting the test
if not reportable_errordata:
return
message = "Errors seen in logs for: {nodes}".format(nodes=", ".join(list(reportable_errordata.keys())))
for nodename, errors in list(reportable_errordata.items()):
for error in errors:
message += "\n{nodename}: {error}".format(nodename=nodename, error=error)
logger.debug('Errors were just seen in logs, ending test (if not ending already)!')
pytest.fail("Error details: \n{message}".format(message=message))
def copy_logs(self, directory=None, name=None):
"""Copy the current cluster's log files somewhere, by default to LOG_SAVED_DIR with a name of 'last'"""
if directory is None:
directory = self.log_saved_dir
if name is None:
name = self.last_log
else:
name = os.path.join(directory, name)
if not os.path.exists(directory):
os.mkdir(directory)
logs = [(node.name, node.logfilename(), node.debuglogfilename(), node.gclogfilename(),
node.compactionlogfilename())
for node in self.cluster.nodelist()]
if len(logs) != 0:
basedir = str(int(time.time() * 1000)) + '_' + str(id(self))
logdir = os.path.join(directory, basedir)
os.mkdir(logdir)
for n, log, debuglog, gclog, compactionlog in logs:
if os.path.exists(log):
assert os.path.getsize(log) >= 0
shutil.copyfile(log, os.path.join(logdir, n + ".log"))
if os.path.exists(debuglog):
assert os.path.getsize(debuglog) >= 0
shutil.copyfile(debuglog, os.path.join(logdir, n + "_debug.log"))
if os.path.exists(gclog):
assert os.path.getsize(gclog) >= 0
shutil.copyfile(gclog, os.path.join(logdir, n + "_gc.log"))
if os.path.exists(compactionlog):
assert os.path.getsize(compactionlog) >= 0
shutil.copyfile(compactionlog, os.path.join(logdir, n + "_compaction.log"))
if os.path.exists(name):
os.unlink(name)
if not is_win():
os.symlink(basedir, name)
def cql_connection(self, node, keyspace=None, user=None,
password=None, compression=True, protocol_version=None, port=None, ssl_opts=None, **kwargs):
return self._create_session(node, keyspace, user, password, compression,
protocol_version, port=port, ssl_opts=ssl_opts, **kwargs)
def exclusive_cql_connection(self, node, keyspace=None, user=None,
password=None, compression=True, protocol_version=None, port=None, ssl_opts=None,
**kwargs):
node_ip = get_ip_from_node(node)
wlrr = WhiteListRoundRobinPolicy([node_ip])
return self._create_session(node, keyspace, user, password, compression,
protocol_version, port=port, ssl_opts=ssl_opts, load_balancing_policy=wlrr,
**kwargs)
def _create_session(self, node, keyspace, user, password, compression, protocol_version,
port=None, ssl_opts=None, execution_profiles=None, **kwargs):
node_ip = get_ip_from_node(node)
if not port:
port = get_port_from_node(node)
if protocol_version is None:
protocol_version = get_eager_protocol_version(node.cluster.version())
if user is not None:
auth_provider = get_auth_provider(user=user, password=password)
else:
auth_provider = None
profiles = {EXEC_PROFILE_DEFAULT: make_execution_profile(**kwargs)
} if not execution_profiles else execution_profiles
cluster = PyCluster([node_ip],
auth_provider=auth_provider,
compression=compression,
protocol_version=protocol_version,
port=port,
ssl_options=ssl_opts,
connect_timeout=15,
allow_beta_protocol_version=True,
execution_profiles=profiles)
session = cluster.connect(wait_for_all_pools=True)
if keyspace is not None:
session.set_keyspace(keyspace)
self.connections.append(session)
return session
def patient_cql_connection(self, node, keyspace=None,
user=None, password=None, timeout=30, compression=True,
protocol_version=None, port=None, ssl_opts=None, **kwargs):
"""
Returns a connection after it stops throwing NoHostAvailables due to not being ready.
If the timeout is exceeded, the exception is raised.
"""
if is_win():
timeout *= 2
expected_log_lines = ('Control connection failed to connect, shutting down Cluster:',
'[control connection] Error connecting to ')
with log_filter('cassandra.cluster', expected_log_lines):
session = retry_till_success(
self.cql_connection,
node,
keyspace=keyspace,
user=user,
password=password,
timeout=timeout,
compression=compression,
protocol_version=protocol_version,
port=port,
ssl_opts=ssl_opts,
bypassed_exception=NoHostAvailable,
**kwargs
)
return session
def patient_exclusive_cql_connection(self, node, keyspace=None,
user=None, password=None, timeout=30, compression=True,
protocol_version=None, port=None, ssl_opts=None, **kwargs):
"""
Returns a connection after it stops throwing NoHostAvailables due to not being ready.
If the timeout is exceeded, the exception is raised.
"""
if is_win():
timeout *= 2
return retry_till_success(
self.exclusive_cql_connection,
node,
keyspace=keyspace,
user=user,
password=password,
timeout=timeout,
compression=compression,
protocol_version=protocol_version,
port=port,
ssl_opts=ssl_opts,
bypassed_exception=NoHostAvailable,
**kwargs
)
def check_logs_for_errors(self):
for node in self.cluster.nodelist():
errors = list(self.__filter_errors(
['\n'.join(msg) for msg in node.grep_log_for_errors()]))
if len(errors) != 0:
for error in errors:
print("Unexpected error in {node_name} log, error: \n{error}".format(node_name=node.name, error=error))
return True
def __filter_errors(self, errors):
"""Filter errors, removing those that match self.ignore_log_patterns"""
if not hasattr(self, 'ignore_log_patterns'):
self.ignore_log_patterns = []
for e in errors:
for pattern in self.ignore_log_patterns:
if re.search(pattern, e):
break
else:
yield e
def get_jfr_jvm_args(self):
"""
@return The JVM arguments required for attaching flight recorder to a Java process.
"""
return ["-XX:+UnlockCommercialFeatures", "-XX:+FlightRecorder"]
def start_jfr_recording(self, nodes):
"""
Start Java flight recorder provided the cluster was started with the correct jvm arguments.
"""
for node in nodes:
p = subprocess.Popen(['jcmd', str(node.pid), 'JFR.start'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
logger.debug(stdout)
logger.debug(stderr)
def dump_jfr_recording(self, nodes):
"""
Save Java flight recorder results to file for analyzing with mission control.
"""
for node in nodes:
p = subprocess.Popen(['jcmd', str(node.pid), 'JFR.dump',
'recording=1', 'filename=recording_{}.jfr'.format(node.address())],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
logger.debug(stdout)
logger.debug(stderr)
def supports_v5_protocol(self, cluster_version):
return cluster_version >= LooseVersion('4.0')
def cleanup_last_test_dir(self):
if os.path.exists(self.last_test_dir):
os.remove(self.last_test_dir)
def stop_active_log_watch(self):
"""
Joins the log watching thread, which will then exit.
Should be called after each test, ideally after nodes are stopped but before cluster files are removed.
Can be called multiple times without error.
If not called, log watching thread will remain running until the parent process exits.
"""
self.log_watch_thread.join(timeout=60)
def cleanup_cluster(self):
with log_filter('cassandra'): # quiet noise from driver when nodes start going down
if self.dtest_config.keep_test_dir:
self.cluster.stop(gently=self.dtest_config.enable_jacoco_code_coverage)
else:
# when recording coverage the jvm has to exit normally
# or the coverage information is not written by the jacoco agent
# otherwise we can just kill the process
if self.dtest_config.enable_jacoco_code_coverage:
self.cluster.stop(gently=True)
# Cleanup everything:
try:
if self.log_watch_thread:
self.stop_active_log_watch()
finally:
logger.debug("removing ccm cluster {name} at: {path}".format(name=self.cluster.name,
path=self.test_path))
self.cluster.remove()
logger.debug("clearing ssl stores from [{0}] directory".format(self.test_path))
for filename in ('keystore.jks', 'truststore.jks', 'ccm_node.cer'):
try:
os.remove(os.path.join(self.test_path, filename))
except OSError as e:
# ENOENT = no such file or directory
assert e.errno == errno.ENOENT
os.rmdir(self.test_path)
self.cleanup_last_test_dir()
def cleanup_and_replace_cluster(self):
for con in self.connections:
con.cluster.shutdown()
self.connections = []
self.cleanup_cluster()
self.test_path = self.get_test_path()
self.initialize_cluster(self.create_cluster_func)
def init_default_config(self):
# the failure detector can be quite slow in such tests with quick start/stop
phi_values = {'phi_convict_threshold': 5}
# enable read time tracking of repaired data between replicas by default
if self.cluster.version() >= '4':
repaired_data_tracking_values = {'repaired_data_tracking_for_partition_reads_enabled': 'true',
'repaired_data_tracking_for_range_reads_enabled': 'true',
'report_unconfirmed_repaired_data_mismatches': 'true'}
else:
repaired_data_tracking_values = {}
timeout = 15000
if self.cluster_options is not None and len(self.cluster_options) > 0:
values = merge_dicts(self.cluster_options, phi_values, repaired_data_tracking_values)
else:
values = merge_dicts(phi_values, repaired_data_tracking_values, {
'read_request_timeout_in_ms': timeout,
'range_request_timeout_in_ms': timeout,
'write_request_timeout_in_ms': timeout,
'truncate_request_timeout_in_ms': timeout,
'request_timeout_in_ms': timeout
})
if self.setup_overrides is not None and len(self.setup_overrides.cluster_options) > 0:
values = merge_dicts(values, self.setup_overrides.cluster_options)
# No more thrift in 4.0, and start_rpc doesn't exists anymore
if self.cluster.version() >= '4' and 'start_rpc' in values:
del values['start_rpc']
if self.cluster.version() >= '4':
values['corrupted_tombstone_strategy'] = 'exception'
if self.dtest_config.use_vnodes:
self.cluster.set_configuration_options(
values={'initial_token': None, 'num_tokens': self.dtest_config.num_tokens})
else:
self.cluster.set_configuration_options(values={'num_tokens': None})
if self.dtest_config.use_off_heap_memtables:
self.cluster.set_configuration_options(values={'memtable_allocation_type': 'offheap_objects'})
self.cluster.set_configuration_options(values)
logger.debug("Done setting configuration options:\n" + pprint.pformat(self.cluster._config_options, indent=4))
def maybe_setup_jacoco(self, cluster_name='test'):
"""Setup JaCoCo code coverage support"""
if not self.dtest_config.enable_jacoco_code_coverage:
return
# use explicit agent and execfile locations
# or look for a cassandra build if they are not specified
agent_location = os.environ.get('JACOCO_AGENT_JAR',
os.path.join(self.dtest_config.cassandra_dir, 'build/lib/jars/jacocoagent.jar'))
jacoco_execfile = os.environ.get('JACOCO_EXECFILE',
os.path.join(self.dtest_config.cassandra_dir, 'build/jacoco/jacoco.exec'))
if os.path.isfile(agent_location):
logger.debug("Jacoco agent found at {}".format(agent_location))
with open(os.path.join(
self.test_path, cluster_name, 'cassandra.in.sh'), 'w') as f:
f.write('JVM_OPTS="$JVM_OPTS -javaagent:{jar_path}=destfile={exec_file}"'
.format(jar_path=agent_location, exec_file=jacoco_execfile))
if os.path.isfile(jacoco_execfile):
logger.debug("Jacoco execfile found at {}, execution data will be appended".format(jacoco_execfile))
else:
logger.debug("Jacoco execfile will be created at {}".format(jacoco_execfile))
else:
logger.debug("Jacoco agent not found or is not file. Execution will not be recorded.")
@staticmethod
def create_ccm_cluster(dtest_setup):
logger.info("cluster ccm directory: " + dtest_setup.test_path)
version = dtest_setup.dtest_config.cassandra_version
if version:
cluster = Cluster(dtest_setup.test_path, dtest_setup.cluster_name, cassandra_version=version)
else:
cluster = Cluster(dtest_setup.test_path, dtest_setup.cluster_name, cassandra_dir=dtest_setup.dtest_config.cassandra_dir)
cluster.set_datadir_count(dtest_setup.dtest_config.data_dir_count)
cluster.set_environment_variable('CASSANDRA_LIBJEMALLOC', dtest_setup.dtest_config.jemalloc_path)
return cluster
def set_cluster_log_levels(self):
"""
The root logger gets configured in the fixture named fixture_logging_setup.
Based on the logging configuration options the user invoked pytest with,
that fixture sets the root logger to that configuration. We then ensure all
Cluster objects we work with "inherit" these logging settings (which we can
lookup off the root logger)
"""
if logging.root.level != 'NOTSET':
log_level = logging.getLevelName(logging.INFO)
else:
log_level = logging.root.level
self.cluster.set_log_level(log_level)
def initialize_cluster(self, create_cluster_func):
"""
This method is responsible for initializing and configuring a ccm
cluster for the next set of tests. This can be called for two
different reasons:
* A class of tests is starting
* A test method failed/errored, so the cluster has been wiped
Subclasses that require custom initialization should generally
do so by overriding post_initialize_cluster().
"""
# connections = []
# cluster_options = []
self.iterations += 1
self.create_cluster_func = create_cluster_func
self.cluster = self.create_cluster_func(self)
self.init_default_config()
self.maybe_setup_jacoco()
self.set_cluster_log_levels()
# cls.init_config()
# write_last_test_file(cls.test_path, cls.cluster)
# cls.post_initialize_cluster()
def reinitialize_cluster_for_different_version(self):
"""
This method is used by upgrade tests to re-init the cluster to work with a specific
version that may not be compatible with the existing configuration options
"""
self.init_default_config()
| |
#!/usr/bin/env python3
# Copyright (c) 2010 ArtForz -- public domain half-a-node
# Copyright (c) 2012 Jeff Garzik
# Copyright (c) 2010-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Bitcoin P2P network half-a-node.
This python code was modified from ArtForz' public domain half-a-node, as
found in the mini-node branch of http://github.com/jgarzik/pynode.
P2PConnection: A low-level connection object to a node's P2P interface
P2PInterface: A high-level interface object for communicating to a node over P2P
P2PDataStore: A p2p interface class that keeps a store of transactions and blocks
and can respond correctly to getdata and getheaders messages"""
import asyncio
from collections import defaultdict
from io import BytesIO
import logging
import struct
import sys
import threading
from test_framework.messages import *
from test_framework.util import wait_until
logger = logging.getLogger("TestFramework.mininode")
MESSAGEMAP = {
b"addr": msg_addr,
b"block": msg_block,
b"blocktxn": msg_blocktxn,
b"cmpctblock": msg_cmpctblock,
b"feefilter": msg_feefilter,
b"getaddr": msg_getaddr,
b"getblocks": msg_getblocks,
b"getblocktxn": msg_getblocktxn,
b"getdata": msg_getdata,
b"getheaders": msg_getheaders,
b"headers": msg_headers,
b"inv": msg_inv,
b"mempool": msg_mempool,
b"ping": msg_ping,
b"pong": msg_pong,
b"reject": msg_reject,
b"sendcmpct": msg_sendcmpct,
b"sendheaders": msg_sendheaders,
b"tx": msg_tx,
b"verack": msg_verack,
b"version": msg_version,
}
MAGIC_BYTES = {
"mainnet": b"\xf9\xbe\xb4\xd9", # mainnet
"testnet3": b"\x0b\x11\x09\x07", # testnet3
"regtest": b"\xfa\xbf\xb5\xda", # regtest
}
class P2PConnection(asyncio.Protocol):
"""A low-level connection object to a node's P2P interface.
This class is responsible for:
- opening and closing the TCP connection to the node
- reading bytes from and writing bytes to the socket
- deserializing and serializing the P2P message header
- logging messages as they are sent and received
This class contains no logic for handing the P2P message payloads. It must be
sub-classed and the on_message() callback overridden."""
def __init__(self):
# The underlying transport of the connection.
# Should only call methods on this from the NetworkThread, c.f. call_soon_threadsafe
self._transport = None
@property
def is_connected(self):
return self._transport is not None
def peer_connect(self, dstaddr, dstport, net="regtest"):
assert not self.is_connected
self.dstaddr = dstaddr
self.dstport = dstport
# The initial message to send after the connection was made:
self.on_connection_send_msg = None
self.recvbuf = b""
self.network = net
logger.debug('Connecting to Bitcoin Node: %s:%d' % (self.dstaddr, self.dstport))
loop = NetworkThread.network_event_loop
conn_gen_unsafe = loop.create_connection(lambda: self, host=self.dstaddr, port=self.dstport)
conn_gen = lambda: loop.call_soon_threadsafe(loop.create_task, conn_gen_unsafe)
return conn_gen
def peer_disconnect(self):
# Connection could have already been closed by other end.
NetworkThread.network_event_loop.call_soon_threadsafe(lambda: self._transport and self._transport.abort())
# Connection and disconnection methods
def connection_made(self, transport):
"""asyncio callback when a connection is opened."""
assert not self._transport
logger.debug("Connected & Listening: %s:%d" % (self.dstaddr, self.dstport))
self._transport = transport
if self.on_connection_send_msg:
self.send_message(self.on_connection_send_msg)
self.on_connection_send_msg = None # Never used again
self.on_open()
def connection_lost(self, exc):
"""asyncio callback when a connection is closed."""
if exc:
logger.warning("Connection lost to {}:{} due to {}".format(self.dstaddr, self.dstport, exc))
else:
logger.debug("Closed connection to: %s:%d" % (self.dstaddr, self.dstport))
self._transport = None
self.recvbuf = b""
self.on_close()
# Socket read methods
def data_received(self, t):
"""asyncio callback when data is read from the socket."""
if len(t) > 0:
self.recvbuf += t
self._on_data()
def _on_data(self):
"""Try to read P2P messages from the recv buffer.
This method reads data from the buffer in a loop. It deserializes,
parses and verifies the P2P header, then passes the P2P payload to
the on_message callback for processing."""
try:
while True:
if len(self.recvbuf) < 4:
return
if self.recvbuf[:4] != MAGIC_BYTES[self.network]:
raise ValueError("got garbage %s" % repr(self.recvbuf))
if len(self.recvbuf) < 4 + 12 + 4 + 4:
return
command = self.recvbuf[4:4+12].split(b"\x00", 1)[0]
msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
checksum = self.recvbuf[4+12+4:4+12+4+4]
if len(self.recvbuf) < 4 + 12 + 4 + 4 + msglen:
return
msg = self.recvbuf[4+12+4+4:4+12+4+4+msglen]
th = sha256(msg)
h = sha256(th)
if checksum != h[:4]:
raise ValueError("got bad checksum " + repr(self.recvbuf))
self.recvbuf = self.recvbuf[4+12+4+4+msglen:]
if command not in MESSAGEMAP:
raise ValueError("Received unknown command from %s:%d: '%s' %s" % (self.dstaddr, self.dstport, command, repr(msg)))
f = BytesIO(msg)
t = MESSAGEMAP[command]()
t.deserialize(f)
self._log_message("receive", t)
self.on_message(t)
except Exception as e:
logger.exception('Error reading message:', repr(e))
raise
def on_message(self, message):
"""Callback for processing a P2P payload. Must be overridden by derived class."""
raise NotImplementedError
# Socket write methods
def send_message(self, message):
"""Send a P2P message over the socket.
This method takes a P2P payload, builds the P2P header and adds
the message to the send buffer to be sent over the socket."""
if not self.is_connected:
raise IOError('Not connected')
self._log_message("send", message)
tmsg = self._build_message(message)
NetworkThread.network_event_loop.call_soon_threadsafe(lambda: self._transport and self._transport.write(tmsg))
# Class utility methods
def _build_message(self, message):
"""Build a serialized P2P message"""
command = message.command
data = message.serialize()
tmsg = MAGIC_BYTES[self.network]
tmsg += command
tmsg += b"\x00" * (12 - len(command))
tmsg += struct.pack("<I", len(data))
th = sha256(data)
h = sha256(th)
tmsg += h[:4]
tmsg += data
return tmsg
def _log_message(self, direction, msg):
"""Logs a message being sent or received over the connection."""
if direction == "send":
log_message = "Send message to "
elif direction == "receive":
log_message = "Received message from "
log_message += "%s:%d: %s" % (self.dstaddr, self.dstport, repr(msg)[:500])
if len(log_message) > 500:
log_message += "... (msg truncated)"
logger.debug(log_message)
class P2PInterface(P2PConnection):
"""A high-level P2P interface class for communicating with a Bitcoin node.
This class provides high-level callbacks for processing P2P message
payloads, as well as convenience methods for interacting with the
node over P2P.
Individual testcases should subclass this and override the on_* methods
if they want to alter message handling behaviour."""
def __init__(self):
super().__init__()
# Track number of messages of each type received and the most recent
# message of each type
self.message_count = defaultdict(int)
self.last_message = {}
# A count of the number of ping messages we've sent to the node
self.ping_counter = 1
# The network services received from the peer
self.nServices = 0
def peer_connect(self, *args, services=NODE_NETWORK|NODE_WITNESS, send_version=True, **kwargs):
create_conn = super().peer_connect(*args, **kwargs)
if send_version:
# Send a version msg
vt = msg_version()
vt.nServices = services
vt.addrTo.ip = self.dstaddr
vt.addrTo.port = self.dstport
vt.addrFrom.ip = "0.0.0.0"
vt.addrFrom.port = 0
self.on_connection_send_msg = vt # Will be sent soon after connection_made
return create_conn
# Message receiving methods
def on_message(self, message):
"""Receive message and dispatch message to appropriate callback.
We keep a count of how many of each message type has been received
and the most recent message of each type."""
with mininode_lock:
try:
command = message.command.decode('ascii')
self.message_count[command] += 1
self.last_message[command] = message
getattr(self, 'on_' + command)(message)
except:
print("ERROR delivering %s (%s)" % (repr(message), sys.exc_info()[0]))
raise
# Callback methods. Can be overridden by subclasses in individual test
# cases to provide custom message handling behaviour.
def on_open(self):
pass
def on_close(self):
pass
def on_addr(self, message): pass
def on_block(self, message): pass
def on_blocktxn(self, message): pass
def on_cmpctblock(self, message): pass
def on_feefilter(self, message): pass
def on_getaddr(self, message): pass
def on_getblocks(self, message): pass
def on_getblocktxn(self, message): pass
def on_getdata(self, message): pass
def on_getheaders(self, message): pass
def on_headers(self, message): pass
def on_mempool(self, message): pass
def on_pong(self, message): pass
def on_reject(self, message): pass
def on_sendcmpct(self, message): pass
def on_sendheaders(self, message): pass
def on_tx(self, message): pass
def on_inv(self, message):
want = msg_getdata()
for i in message.inv:
if i.type != 0:
want.inv.append(i)
if len(want.inv):
self.send_message(want)
def on_ping(self, message):
self.send_message(msg_pong(message.nonce))
def on_verack(self, message):
self.verack_received = True
def on_version(self, message):
assert message.nVersion >= MIN_VERSION_SUPPORTED, "Version {} received. Test framework only supports versions greater than {}".format(message.nVersion, MIN_VERSION_SUPPORTED)
self.send_message(msg_verack())
self.nServices = message.nServices
# Connection helper methods
def wait_for_disconnect(self, timeout=60):
test_function = lambda: not self.is_connected
wait_until(test_function, timeout=timeout, lock=mininode_lock)
# Message receiving helper methods
def wait_for_block(self, blockhash, timeout=60):
test_function = lambda: self.last_message.get("block") and self.last_message["block"].block.rehash() == blockhash
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_getdata(self, timeout=60):
"""Waits for a getdata message.
Receiving any getdata message will satisfy the predicate. the last_message["getdata"]
value must be explicitly cleared before calling this method, or this will return
immediately with success. TODO: change this method to take a hash value and only
return true if the correct block/tx has been requested."""
test_function = lambda: self.last_message.get("getdata")
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_getheaders(self, timeout=60):
"""Waits for a getheaders message.
Receiving any getheaders message will satisfy the predicate. the last_message["getheaders"]
value must be explicitly cleared before calling this method, or this will return
immediately with success. TODO: change this method to take a hash value and only
return true if the correct block header has been requested."""
test_function = lambda: self.last_message.get("getheaders")
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_inv(self, expected_inv, timeout=60):
"""Waits for an INV message and checks that the first inv object in the message was as expected."""
if len(expected_inv) > 1:
raise NotImplementedError("wait_for_inv() will only verify the first inv object")
test_function = lambda: self.last_message.get("inv") and \
self.last_message["inv"].inv[0].type == expected_inv[0].type and \
self.last_message["inv"].inv[0].hash == expected_inv[0].hash
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_verack(self, timeout=60):
test_function = lambda: self.message_count["verack"]
wait_until(test_function, timeout=timeout, lock=mininode_lock)
# Message sending helper functions
def send_and_ping(self, message):
self.send_message(message)
self.sync_with_ping()
# Sync up with the node
def sync_with_ping(self, timeout=60):
self.send_message(msg_ping(nonce=self.ping_counter))
test_function = lambda: self.last_message.get("pong") and self.last_message["pong"].nonce == self.ping_counter
wait_until(test_function, timeout=timeout, lock=mininode_lock)
self.ping_counter += 1
# One lock for synchronizing all data access between the network event loop (see
# NetworkThread below) and the thread running the test logic. For simplicity,
# P2PConnection acquires this lock whenever delivering a message to a P2PInterface.
# This lock should be acquired in the thread running the test logic to synchronize
# access to any data shared with the P2PInterface or P2PConnection.
mininode_lock = threading.RLock()
class NetworkThread(threading.Thread):
network_event_loop = None
def __init__(self):
super().__init__(name="NetworkThread")
# There is only one event loop and no more than one thread must be created
assert not self.network_event_loop
NetworkThread.network_event_loop = asyncio.new_event_loop()
def run(self):
"""Start the network thread."""
self.network_event_loop.run_forever()
def close(self, timeout=10):
"""Close the connections and network event loop."""
self.network_event_loop.call_soon_threadsafe(self.network_event_loop.stop)
wait_until(lambda: not self.network_event_loop.is_running(), timeout=timeout)
self.network_event_loop.close()
self.join(timeout)
class P2PDataStore(P2PInterface):
"""A P2P data store class.
Keeps a block and transaction store and responds correctly to getdata and getheaders requests."""
def __init__(self):
super().__init__()
self.reject_code_received = None
self.reject_reason_received = None
# store of blocks. key is block hash, value is a CBlock object
self.block_store = {}
self.last_block_hash = ''
# store of txs. key is txid, value is a CTransaction object
self.tx_store = {}
self.getdata_requests = []
def on_getdata(self, message):
"""Check for the tx/block in our stores and if found, reply with an inv message."""
for inv in message.inv:
self.getdata_requests.append(inv.hash)
if (inv.type & MSG_TYPE_MASK) == MSG_TX and inv.hash in self.tx_store.keys():
self.send_message(msg_tx(self.tx_store[inv.hash]))
elif (inv.type & MSG_TYPE_MASK) == MSG_BLOCK and inv.hash in self.block_store.keys():
self.send_message(msg_block(self.block_store[inv.hash]))
else:
logger.debug('getdata message type {} received.'.format(hex(inv.type)))
def on_getheaders(self, message):
"""Search back through our block store for the locator, and reply with a headers message if found."""
locator, hash_stop = message.locator, message.hashstop
# Assume that the most recent block added is the tip
if not self.block_store:
return
headers_list = [self.block_store[self.last_block_hash]]
maxheaders = 2000
while headers_list[-1].sha256 not in locator.vHave:
# Walk back through the block store, adding headers to headers_list
# as we go.
prev_block_hash = headers_list[-1].hashPrevBlock
if prev_block_hash in self.block_store:
prev_block_header = CBlockHeader(self.block_store[prev_block_hash])
headers_list.append(prev_block_header)
if prev_block_header.sha256 == hash_stop:
# if this is the hashstop header, stop here
break
else:
logger.debug('block hash {} not found in block store'.format(hex(prev_block_hash)))
break
# Truncate the list if there are too many headers
headers_list = headers_list[:-maxheaders - 1:-1]
response = msg_headers(headers_list)
if response is not None:
self.send_message(response)
def on_reject(self, message):
"""Store reject reason and code for testing."""
self.reject_code_received = message.code
self.reject_reason_received = message.reason
def send_blocks_and_test(self, blocks, rpc, success=True, request_block=True, reject_code=None, reject_reason=None, timeout=60):
"""Send blocks to test node and test whether the tip advances.
- add all blocks to our block_store
- send a headers message for the final block
- the on_getheaders handler will ensure that any getheaders are responded to
- if request_block is True: wait for getdata for each of the blocks. The on_getdata handler will
ensure that any getdata messages are responded to
- if success is True: assert that the node's tip advances to the most recent block
- if success is False: assert that the node's tip doesn't advance
- if reject_code and reject_reason are set: assert that the correct reject message is received"""
with mininode_lock:
self.reject_code_received = None
self.reject_reason_received = None
for block in blocks:
self.block_store[block.sha256] = block
self.last_block_hash = block.sha256
self.send_message(msg_headers([CBlockHeader(blocks[-1])]))
if request_block:
wait_until(lambda: blocks[-1].sha256 in self.getdata_requests, timeout=timeout, lock=mininode_lock)
if success:
wait_until(lambda: rpc.getbestblockhash() == blocks[-1].hash, timeout=timeout)
else:
assert rpc.getbestblockhash() != blocks[-1].hash
if reject_code is not None:
wait_until(lambda: self.reject_code_received == reject_code, lock=mininode_lock)
if reject_reason is not None:
wait_until(lambda: self.reject_reason_received == reject_reason, lock=mininode_lock)
def send_txs_and_test(self, txs, rpc, success=True, expect_disconnect=False, reject_code=None, reject_reason=None):
"""Send txs to test node and test whether they're accepted to the mempool.
- add all txs to our tx_store
- send tx messages for all txs
- if success is True/False: assert that the txs are/are not accepted to the mempool
- if expect_disconnect is True: Skip the sync with ping
- if reject_code and reject_reason are set: assert that the correct reject message is received."""
with mininode_lock:
self.reject_code_received = None
self.reject_reason_received = None
for tx in txs:
self.tx_store[tx.sha256] = tx
for tx in txs:
self.send_message(msg_tx(tx))
if expect_disconnect:
self.wait_for_disconnect()
else:
self.sync_with_ping()
raw_mempool = rpc.getrawmempool()
if success:
# Check that all txs are now in the mempool
for tx in txs:
assert tx.hash in raw_mempool, "{} not found in mempool".format(tx.hash)
else:
# Check that none of the txs are now in the mempool
for tx in txs:
assert tx.hash not in raw_mempool, "{} tx found in mempool".format(tx.hash)
if reject_code is not None:
wait_until(lambda: self.reject_code_received == reject_code, lock=mininode_lock)
if reject_reason is not None:
wait_until(lambda: self.reject_reason_received == reject_reason, lock=mininode_lock)
| |
# -*- coding: utf-8 -*-
import copy
import datetime
import hashlib
import logging
import os
import alerts
import enhancements
import jsonschema
import ruletypes
import yaml
import yaml.scanner
from opsgenie import OpsGenieAlerter
from staticconf.loader import yaml_loader
from util import dt_to_ts
from util import dt_to_ts_with_format
from util import dt_to_unix
from util import dt_to_unixms
from util import EAException
from util import ts_to_dt
from util import ts_to_dt_with_format
from util import unix_to_dt
from util import unixms_to_dt
# schema for rule yaml
rule_schema = jsonschema.Draft4Validator(yaml.load(open(os.path.join(os.path.dirname(__file__), 'schema.yaml'))))
# Required global (config.yaml) and local (rule.yaml) configuration options
required_globals = frozenset(['run_every', 'rules_folder', 'es_host', 'es_port', 'writeback_index', 'buffer_time'])
required_locals = frozenset(['alert', 'type', 'name', 'index'])
# Used to map the names of rules to their classes
rules_mapping = {
'frequency': ruletypes.FrequencyRule,
'any': ruletypes.AnyRule,
'spike': ruletypes.SpikeRule,
'blacklist': ruletypes.BlacklistRule,
'whitelist': ruletypes.WhitelistRule,
'change': ruletypes.ChangeRule,
'flatline': ruletypes.FlatlineRule,
'new_term': ruletypes.NewTermsRule,
'cardinality': ruletypes.CardinalityRule
}
# Used to map names of alerts to their classes
alerts_mapping = {
'email': alerts.EmailAlerter,
'jira': alerts.JiraAlerter,
'opsgenie': OpsGenieAlerter,
'debug': alerts.DebugAlerter,
'command': alerts.CommandAlerter,
'sns': alerts.SnsAlerter,
'hipchat': alerts.HipChatAlerter,
'slack': alerts.SlackAlerter,
'pagerduty': alerts.PagerDutyAlerter,
'twilio': alerts.TwilioAlerter,
'victorops': alerts.VictorOpsAlerter,
'telegram': alerts.TelegramAlerter,
'gitter': alerts.GitterAlerter,
'servicenow': alerts.ServiceNowAlerter
}
# A partial ordering of alert types. Relative order will be preserved in the resulting alerts list
# For example, jira goes before email so the ticket # will be added to the resulting email.
alerts_order = {
'jira': 0,
'email': 1
}
base_config = {}
def get_module(module_name):
""" Loads a module and returns a specific object.
module_name should 'module.file.object'.
Returns object or raises EAException on error. """
try:
module_path, module_class = module_name.rsplit('.', 1)
base_module = __import__(module_path, globals(), locals(), [module_class])
module = getattr(base_module, module_class)
except (ImportError, AttributeError, ValueError) as e:
raise EAException("Could not import module %s: %s" % (module_name, e))
return module
def load_configuration(filename, conf, args=None):
""" Load a yaml rule file and fill in the relevant fields with objects.
:param filename: The name of a rule configuration file.
:param conf: The global configuration dictionary, used for populating defaults.
:return: The rule configuration, a dictionary.
"""
try:
rule = yaml_loader(filename)
except yaml.scanner.ScannerError as e:
raise EAException('Could not parse file %s: %s' % (filename, e))
rule['rule_file'] = filename
load_options(rule, conf, args)
load_modules(rule, args)
return rule
def load_options(rule, conf, args=None):
""" Converts time objects, sets defaults, and validates some settings.
:param rule: A dictionary of parsed YAML from a rule config file.
:param conf: The global configuration dictionary, used for populating defaults.
"""
try:
rule_schema.validate(rule)
except jsonschema.ValidationError as e:
raise EAException("Invalid Rule: %s\n%s" % (rule.get('name'), e))
try:
# Set all time based parameters
if 'timeframe' in rule:
rule['timeframe'] = datetime.timedelta(**rule['timeframe'])
if 'realert' in rule:
rule['realert'] = datetime.timedelta(**rule['realert'])
else:
rule['realert'] = datetime.timedelta(minutes=1)
if 'aggregation' in rule and not rule['aggregation'].get('schedule'):
rule['aggregation'] = datetime.timedelta(**rule['aggregation'])
if 'query_delay' in rule:
rule['query_delay'] = datetime.timedelta(**rule['query_delay'])
if 'buffer_time' in rule:
rule['buffer_time'] = datetime.timedelta(**rule['buffer_time'])
if 'exponential_realert' in rule:
rule['exponential_realert'] = datetime.timedelta(**rule['exponential_realert'])
if 'kibana4_start_timedelta' in rule:
rule['kibana4_start_timedelta'] = datetime.timedelta(**rule['kibana4_start_timedelta'])
if 'kibana4_end_timedelta' in rule:
rule['kibana4_end_timedelta'] = datetime.timedelta(**rule['kibana4_end_timedelta'])
except (KeyError, TypeError) as e:
raise EAException('Invalid time format used: %s' % (e))
# Set defaults, copy defaults from config.yaml
for key, val in base_config.items():
rule.setdefault(key, val)
rule.setdefault('realert', datetime.timedelta(seconds=0))
rule.setdefault('aggregation', datetime.timedelta(seconds=0))
rule.setdefault('query_delay', datetime.timedelta(seconds=0))
rule.setdefault('timestamp_field', '@timestamp')
rule.setdefault('filter', [])
rule.setdefault('timestamp_type', 'iso')
rule.setdefault('timestamp_format', '%Y-%m-%dT%H:%M:%SZ')
rule.setdefault('_source_enabled', True)
rule.setdefault('use_local_time', True)
rule.setdefault('description', "")
# Set timestamp_type conversion function, used when generating queries and processing hits
rule['timestamp_type'] = rule['timestamp_type'].strip().lower()
if rule['timestamp_type'] == 'iso':
rule['ts_to_dt'] = ts_to_dt
rule['dt_to_ts'] = dt_to_ts
elif rule['timestamp_type'] == 'unix':
rule['ts_to_dt'] = unix_to_dt
rule['dt_to_ts'] = dt_to_unix
elif rule['timestamp_type'] == 'unix_ms':
rule['ts_to_dt'] = unixms_to_dt
rule['dt_to_ts'] = dt_to_unixms
elif rule['timestamp_type'] == 'custom':
def _ts_to_dt_with_format(ts):
return ts_to_dt_with_format(ts, ts_format=rule['timestamp_format'])
def _dt_to_ts_with_format(dt):
return dt_to_ts_with_format(dt, ts_format=rule['timestamp_format'])
rule['ts_to_dt'] = _ts_to_dt_with_format
rule['dt_to_ts'] = _dt_to_ts_with_format
else:
raise EAException('timestamp_type must be one of iso, unix, or unix_ms')
# Set HipChat options from global config
rule.setdefault('hipchat_msg_color', 'red')
rule.setdefault('hipchat_domain', 'api.hipchat.com')
rule.setdefault('hipchat_notify', True)
rule.setdefault('hipchat_from', '')
rule.setdefault('hipchat_ignore_ssl_errors', False)
# Make sure we have required options
if required_locals - frozenset(rule.keys()):
raise EAException('Missing required option(s): %s' % (', '.join(required_locals - frozenset(rule.keys()))))
if 'include' in rule and type(rule['include']) != list:
raise EAException('include option must be a list')
if isinstance(rule.get('query_key'), list):
rule['compound_query_key'] = rule['query_key']
rule['query_key'] = ','.join(rule['query_key'])
if isinstance(rule.get('aggregate_key'), list):
rule['compound_aggregate_key'] = rule['aggregate_key']
rule['aggregate_key'] = ','.join(rule['aggregate_key'])
# Add QK, CK and timestamp to include
include = rule.get('include', ['*'])
if 'query_key' in rule:
include.append(rule['query_key'])
if 'compound_query_key' in rule:
include += rule['compound_query_key']
if 'compound_aggregate_key' in rule:
include += rule['compound_aggregate_key']
if 'compare_key' in rule:
include.append(rule['compare_key'])
if 'top_count_keys' in rule:
include += rule['top_count_keys']
include.append(rule['timestamp_field'])
rule['include'] = list(set(include))
# Change top_count_keys to .raw
if 'top_count_keys' in rule and rule.get('raw_count_keys', True):
keys = rule.get('top_count_keys')
rule['top_count_keys'] = [key + '.raw' if not key.endswith('.raw') else key for key in keys]
# Check that generate_kibana_url is compatible with the filters
if rule.get('generate_kibana_link'):
for es_filter in rule.get('filter'):
if es_filter:
if 'not' in es_filter:
es_filter = es_filter['not']
if 'query' in es_filter:
es_filter = es_filter['query']
if es_filter.keys()[0] not in ('term', 'query_string', 'range'):
raise EAException('generate_kibana_link is incompatible with filters other than term, query_string and range. '
'Consider creating a dashboard and using use_kibana_dashboard instead.')
# Check that doc_type is provided if use_count/terms_query
if rule.get('use_count_query') or rule.get('use_terms_query'):
if 'doc_type' not in rule:
raise EAException('doc_type must be specified.')
# Check that query_key is set if use_terms_query
if rule.get('use_terms_query'):
if 'query_key' not in rule:
raise EAException('query_key must be specified with use_terms_query')
# Warn if use_strf_index is used with %y, %M or %D
# (%y = short year, %M = minutes, %D = full date)
if rule.get('use_strftime_index'):
for token in ['%y', '%M', '%D']:
if token in rule.get('index'):
logging.warning('Did you mean to use %s in the index? '
'The index will be formatted like %s' % (token,
datetime.datetime.now().strftime(rule.get('index'))))
def load_modules(rule, args=None):
""" Loads things that could be modules. Enhancements, alerts and rule type. """
# Set match enhancements
match_enhancements = []
for enhancement_name in rule.get('match_enhancements', []):
if enhancement_name in dir(enhancements):
enhancement = getattr(enhancements, enhancement_name)
else:
enhancement = get_module(enhancement_name)
if not issubclass(enhancement, enhancements.BaseEnhancement):
raise EAException("Enhancement module %s not a subclass of BaseEnhancement" % (enhancement_name))
match_enhancements.append(enhancement(rule))
rule['match_enhancements'] = match_enhancements
# Convert rule type into RuleType object
if rule['type'] in rules_mapping:
rule['type'] = rules_mapping[rule['type']]
else:
rule['type'] = get_module(rule['type'])
if not issubclass(rule['type'], ruletypes.RuleType):
raise EAException('Rule module %s is not a subclass of RuleType' % (rule['type']))
# Make sure we have required alert and type options
reqs = rule['type'].required_options
if reqs - frozenset(rule.keys()):
raise EAException('Missing required option(s): %s' % (', '.join(reqs - frozenset(rule.keys()))))
# Instantiate rule
try:
rule['type'] = rule['type'](rule, args)
except (KeyError, EAException) as e:
raise EAException('Error initializing rule %s: %s' % (rule['name'], e))
# Instantiate alert
rule['alert'] = load_alerts(rule, alert_field=rule['alert'])
def get_file_paths(conf, use_rule=None):
# Passing a filename directly can bypass rules_folder and .yaml checks
if use_rule and os.path.isfile(use_rule):
return [use_rule]
rule_folder = conf['rules_folder']
rule_files = []
if conf['scan_subdirectories']:
for root, folders, files in os.walk(rule_folder):
for filename in files:
if use_rule and use_rule != filename:
continue
if filename.endswith('.yaml'):
rule_files.append(os.path.join(root, filename))
else:
for filename in os.listdir(rule_folder):
fullpath = os.path.join(rule_folder, filename)
if os.path.isfile(fullpath) and filename.endswith('.yaml'):
rule_files.append(fullpath)
return rule_files
def load_alerts(rule, alert_field):
def normalize_config(alert):
"""Alert config entries are either "alertType" or {"alertType": {"key": "data"}}.
This function normalizes them both to the latter format. """
if isinstance(alert, basestring):
return alert, rule
elif isinstance(alert, dict):
name, config = iter(alert.items()).next()
config_copy = copy.copy(rule)
config_copy.update(config) # warning, this (intentionally) mutates the rule dict
return name, config_copy
else:
raise EAException()
def create_alert(alert, alert_config):
alert_class = alerts_mapping.get(alert) or get_module(alert)
if not issubclass(alert_class, alerts.Alerter):
raise EAException('Alert module %s is not a subclass of Alerter' % (alert))
missing_options = (rule['type'].required_options | alert_class.required_options) - frozenset(alert_config or [])
if missing_options:
raise EAException('Missing required option(s): %s' % (', '.join(missing_options)))
return alert_class(alert_config)
try:
if type(alert_field) != list:
alert_field = [alert_field]
alert_field = [normalize_config(x) for x in alert_field]
alert_field = sorted(alert_field, key=lambda (a, b): alerts_order.get(a, -1))
# Convert all alerts into Alerter objects
alert_field = [create_alert(a, b) for a, b in alert_field]
except (KeyError, EAException) as e:
raise EAException('Error initiating alert %s: %s' % (rule['alert'], e))
return alert_field
def load_rules(args):
""" Creates a conf dictionary for ElastAlerter. Loads the global
config file and then each rule found in rules_folder.
:param args: The parsed arguments to ElastAlert
:return: The global configuration, a dictionary.
"""
names = []
filename = args.config
conf = yaml_loader(filename)
use_rule = args.rule
# Make sure we have all required globals
if required_globals - frozenset(conf.keys()):
raise EAException('%s must contain %s' % (filename, ', '.join(required_globals - frozenset(conf.keys()))))
conf.setdefault('max_query_size', 10000)
conf.setdefault('scroll_keepalive', '30s')
conf.setdefault('disable_rules_on_error', True)
conf.setdefault('scan_subdirectories', True)
# Convert run_every, buffer_time into a timedelta object
try:
conf['run_every'] = datetime.timedelta(**conf['run_every'])
conf['buffer_time'] = datetime.timedelta(**conf['buffer_time'])
if 'alert_time_limit' in conf:
conf['alert_time_limit'] = datetime.timedelta(**conf['alert_time_limit'])
else:
conf['alert_time_limit'] = datetime.timedelta(days=2)
if 'old_query_limit' in conf:
conf['old_query_limit'] = datetime.timedelta(**conf['old_query_limit'])
else:
conf['old_query_limit'] = datetime.timedelta(weeks=1)
except (KeyError, TypeError) as e:
raise EAException('Invalid time format used: %s' % (e))
global base_config
base_config = copy.deepcopy(conf)
# Load each rule configuration file
rules = []
rule_files = get_file_paths(conf, use_rule)
for rule_file in rule_files:
try:
rule = load_configuration(rule_file, conf, args)
if rule['name'] in names:
raise EAException('Duplicate rule named %s' % (rule['name']))
except EAException as e:
raise EAException('Error loading file %s: %s' % (rule_file, e))
rules.append(rule)
names.append(rule['name'])
conf['rules'] = rules
return conf
def get_rule_hashes(conf, use_rule=None):
rule_files = get_file_paths(conf, use_rule)
rule_mod_times = {}
for rule_file in rule_files:
with open(rule_file) as fh:
rule_mod_times[rule_file] = hashlib.sha1(fh.read()).digest()
return rule_mod_times
| |
# Copyright DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import unittest2 as unittest
except ImportError:
import unittest # noqa
from cassandra import OperationTimedOut, WriteTimeout
from cassandra.cluster import Cluster, ExecutionProfile, ResponseFuture
from cassandra.query import SimpleStatement
from cassandra.policies import ConstantSpeculativeExecutionPolicy, RoundRobinPolicy, RetryPolicy, WriteType
from tests.integration import PROTOCOL_VERSION, greaterthancass21, requiressimulacron, SIMULACRON_JAR, \
CASSANDRA_VERSION
from tests.integration.simulacron.utils import start_and_prime_singledc, prime_query, \
stop_simulacron, NO_THEN, clear_queries
from itertools import count
class BadRoundRobinPolicy(RoundRobinPolicy):
def make_query_plan(self, working_keyspace=None, query=None):
pos = self._position
self._position += 1
hosts = []
for _ in range(10):
hosts.extend(self._live_hosts)
return hosts
# This doesn't work well with Windows clock granularity
@requiressimulacron
class SpecExecTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
if SIMULACRON_JAR is None or CASSANDRA_VERSION < "2.1":
return
start_and_prime_singledc()
cls.cluster = Cluster(protocol_version=PROTOCOL_VERSION, compression=False)
cls.session = cls.cluster.connect(wait_for_all_pools=True)
spec_ep_brr = ExecutionProfile(load_balancing_policy=BadRoundRobinPolicy(),
speculative_execution_policy=ConstantSpeculativeExecutionPolicy(1, 6),
request_timeout=12)
spec_ep_rr = ExecutionProfile(speculative_execution_policy=ConstantSpeculativeExecutionPolicy(.5, 10),
request_timeout=12)
spec_ep_rr_lim = ExecutionProfile(load_balancing_policy=BadRoundRobinPolicy(),
speculative_execution_policy=ConstantSpeculativeExecutionPolicy(0.5, 1),
request_timeout=12)
spec_ep_brr_lim = ExecutionProfile(load_balancing_policy=BadRoundRobinPolicy(),
speculative_execution_policy=ConstantSpeculativeExecutionPolicy(4, 10))
cls.cluster.add_execution_profile("spec_ep_brr", spec_ep_brr)
cls.cluster.add_execution_profile("spec_ep_rr", spec_ep_rr)
cls.cluster.add_execution_profile("spec_ep_rr_lim", spec_ep_rr_lim)
cls.cluster.add_execution_profile("spec_ep_brr_lim", spec_ep_brr_lim)
@classmethod
def tearDownClass(cls):
if SIMULACRON_JAR is None or CASSANDRA_VERSION < "2.1":
return
cls.cluster.shutdown()
stop_simulacron()
def tearDown(self):
clear_queries()
@greaterthancass21
def test_speculative_execution(self):
"""
Test to ensure that speculative execution honors LBP, and that they retry appropriately.
This test will use various LBP, and ConstantSpeculativeExecutionPolicy settings and ensure the proper number of hosts are queried
@since 3.7.0
@jira_ticket PYTHON-218
@expected_result speculative retries should honor max retries, idempotent state of queries, and underlying lbp.
@test_category metadata
"""
query_to_prime = "INSERT INTO test3rf.test (k, v) VALUES (0, 1);"
prime_query(query_to_prime, then={"delay_in_ms": 10000})
statement = SimpleStatement(query_to_prime, is_idempotent=True)
statement_non_idem = SimpleStatement(query_to_prime, is_idempotent=False)
# This LBP should repeat hosts up to around 30
result = self.session.execute(statement, execution_profile='spec_ep_brr')
self.assertEqual(7, len(result.response_future.attempted_hosts))
# This LBP should keep host list to 3
result = self.session.execute(statement, execution_profile='spec_ep_rr')
self.assertEqual(3, len(result.response_future.attempted_hosts))
# Spec_execution policy should limit retries to 1
result = self.session.execute(statement, execution_profile='spec_ep_rr_lim')
self.assertEqual(2, len(result.response_future.attempted_hosts))
# Spec_execution policy should not be used if the query is not idempotent
result = self.session.execute(statement_non_idem, execution_profile='spec_ep_brr')
self.assertEqual(1, len(result.response_future.attempted_hosts))
# Default policy with non_idem query
result = self.session.execute(statement_non_idem, timeout=12)
self.assertEqual(1, len(result.response_future.attempted_hosts))
# Should be able to run an idempotent query against default execution policy with no speculative_execution_policy
result = self.session.execute(statement, timeout=12)
self.assertEqual(1, len(result.response_future.attempted_hosts))
# Test timeout with spec_ex
with self.assertRaises(OperationTimedOut):
self.session.execute(statement, execution_profile='spec_ep_rr', timeout=.5)
prepared_query_to_prime = "SELECT * FROM test3rf.test where k = ?"
when = {"params": {"k": "0"}, "param_types": {"k": "ascii"}}
prime_query(prepared_query_to_prime, when=when, then={"delay_in_ms": 4000})
# PYTHON-736 Test speculation policy works with a prepared statement
prepared_statement = self.session.prepare(prepared_query_to_prime)
# non-idempotent
result = self.session.execute(prepared_statement, ("0",), execution_profile='spec_ep_brr')
self.assertEqual(1, len(result.response_future.attempted_hosts))
# idempotent
prepared_statement.is_idempotent = True
result = self.session.execute(prepared_statement, ("0",), execution_profile='spec_ep_brr')
self.assertLess(1, len(result.response_future.attempted_hosts))
def test_speculative_and_timeout(self):
"""
Test to ensure the timeout is honored when using speculative execution
@since 3.10
@jira_ticket PYTHON-750
@expected_result speculative retries be schedule every fixed period, during the maximum
period of the timeout.
@test_category metadata
"""
query_to_prime = "INSERT INTO testkeyspace.testtable (k, v) VALUES (0, 1);"
prime_query(query_to_prime, then=NO_THEN)
statement = SimpleStatement(query_to_prime, is_idempotent=True)
# An OperationTimedOut is placed here in response_future,
# that's why we can't call session.execute,which would raise it, but
# we have to directly wait for the event
response_future = self.session.execute_async(statement, execution_profile='spec_ep_brr_lim',
timeout=14)
response_future._event.wait(16)
self.assertIsInstance(response_future._final_exception, OperationTimedOut)
# This is because 14 / 4 + 1 = 4
self.assertEqual(len(response_future.attempted_hosts), 4)
def test_delay_can_be_0(self):
"""
Test to validate that the delay can be zero for the ConstantSpeculativeExecutionPolicy
@since 3.13
@jira_ticket PYTHON-836
@expected_result all the queries are executed immediately
@test_category policy
"""
query_to_prime = "INSERT INTO madeup_keyspace.madeup_table(k, v) VALUES (1, 2)"
prime_query(query_to_prime, then={"delay_in_ms": 5000})
number_of_requests = 4
spec = ExecutionProfile(load_balancing_policy=RoundRobinPolicy(),
speculative_execution_policy=ConstantSpeculativeExecutionPolicy(0, number_of_requests))
cluster = Cluster()
cluster.add_execution_profile("spec", spec)
session = cluster.connect(wait_for_all_pools=True)
self.addCleanup(cluster.shutdown)
counter = count()
def patch_and_count(f):
def patched(*args, **kwargs):
next(counter)
print("patched")
f(*args, **kwargs)
return patched
self.addCleanup(setattr, ResponseFuture, "send_request", ResponseFuture.send_request)
ResponseFuture.send_request = patch_and_count(ResponseFuture.send_request)
stmt = SimpleStatement(query_to_prime)
stmt.is_idempotent = True
results = session.execute(stmt, execution_profile="spec")
self.assertEqual(len(results.response_future.attempted_hosts), 3)
# send_request is called number_of_requests times for the speculative request
# plus one for the call from the main thread.
self.assertEqual(next(counter), number_of_requests + 1)
class CustomRetryPolicy(RetryPolicy):
def on_write_timeout(self, query, consistency, write_type,
required_responses, received_responses, retry_num):
if retry_num != 0:
return self.RETHROW, None
elif write_type == WriteType.SIMPLE:
return self.RETHROW, None
elif write_type == WriteType.CDC:
return self.IGNORE, None
class CounterRetryPolicy(RetryPolicy):
def __init__(self):
self.write_timeout = count()
self.read_timeout = count()
self.unavailable = count()
def on_read_timeout(self, query, consistency, required_responses,
received_responses, data_retrieved, retry_num):
next(self.read_timeout)
return self.IGNORE, None
def on_write_timeout(self, query, consistency, write_type,
required_responses, received_responses, retry_num):
next(self.write_timeout)
return self.IGNORE, None
def on_unavailable(self, query, consistency, required_replicas, alive_replicas, retry_num):
next(self.unavailable)
return self.IGNORE, None
def reset_counters(self):
self.write_timeout = count()
self.read_timeout = count()
self.unavailable = count()
@requiressimulacron
class RetryPolicyTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
if SIMULACRON_JAR is None or CASSANDRA_VERSION < "2.1":
return
start_and_prime_singledc()
@classmethod
def tearDownClass(cls):
if SIMULACRON_JAR is None or CASSANDRA_VERSION < "2.1":
return
stop_simulacron()
def tearDown(self):
clear_queries()
def test_retry_policy_ignores_and_rethrows(self):
"""
Test to verify :class:`~cassandra.protocol.WriteTimeoutErrorMessage` is decoded correctly and that
:attr:`.~cassandra.policies.RetryPolicy.RETHROW` and
:attr:`.~cassandra.policies.RetryPolicy.IGNORE` are respected
to localhost
@since 3.12
@jira_ticket PYTHON-812
@expected_result the retry policy functions as expected
@test_category connection
"""
self.set_cluster(CustomRetryPolicy())
query_to_prime_simple = "SELECT * from simulacron_keyspace.simple"
query_to_prime_cdc = "SELECT * from simulacron_keyspace.cdc"
then = {
"result": "write_timeout",
"delay_in_ms": 0,
"consistency_level": "LOCAL_QUORUM",
"received": 1,
"block_for": 2,
"write_type": "SIMPLE",
"ignore_on_prepare": True
}
prime_query(query_to_prime_simple, then=then, rows=None, column_types=None)
then["write_type"] = "CDC"
prime_query(query_to_prime_cdc, then=then, rows=None, column_types=None)
with self.assertRaises(WriteTimeout):
self.session.execute(query_to_prime_simple)
#CDC should be ignored
self.session.execute(query_to_prime_cdc)
def test_retry_policy_with_prepared(self):
"""
Test to verify that the retry policy is called as expected
for bound and prepared statements when set at the cluster level
@since 3.13
@jira_ticket PYTHON-861
@expected_result the appropriate retry policy is called
@test_category connection
"""
counter_policy = CounterRetryPolicy()
self.set_cluster(counter_policy)
query_to_prime = "SELECT * from simulacron_keyspace.simulacron_table"
then = {
"result": "write_timeout",
"delay_in_ms": 0,
"consistency_level": "LOCAL_QUORUM",
"received": 1,
"block_for": 2,
"write_type": "SIMPLE",
"ignore_on_prepare": True
}
prime_query(query_to_prime, then=then, rows=None, column_types=None)
self.session.execute(query_to_prime)
self.assertEqual(next(counter_policy.write_timeout), 1)
counter_policy.reset_counters()
query_to_prime_prepared = "SELECT * from simulacron_keyspace.simulacron_table WHERE key = :key"
when = {"params": {"key": "0"}, "param_types": {"key": "ascii"}}
prime_query(query_to_prime_prepared, when=when, then=then, rows=None, column_types=None)
prepared_stmt = self.session.prepare(query_to_prime_prepared)
bound_stm = prepared_stmt.bind({"key": "0"})
self.session.execute(bound_stm)
self.assertEqual(next(counter_policy.write_timeout), 1)
counter_policy.reset_counters()
self.session.execute(prepared_stmt, ("0",))
self.assertEqual(next(counter_policy.write_timeout), 1)
def test_setting_retry_policy_to_statement(self):
"""
Test to verify that the retry policy is called as expected
for bound and prepared statements when set to the prepared statement
@since 3.13
@jira_ticket PYTHON-861
@expected_result the appropriate retry policy is called
@test_category connection
"""
retry_policy = RetryPolicy()
self.set_cluster(retry_policy)
then = {
"result": "write_timeout",
"delay_in_ms": 0,
"consistency_level": "LOCAL_QUORUM",
"received": 1,
"block_for": 2,
"write_type": "SIMPLE",
"ignore_on_prepare": True
}
query_to_prime_prepared = "SELECT * from simulacron_keyspace.simulacron_table WHERE key = :key"
when = {"params": {"key": "0"}, "param_types": {"key": "ascii"}}
prime_query(query_to_prime_prepared, when=when, then=then, rows=None, column_types=None)
counter_policy = CounterRetryPolicy()
prepared_stmt = self.session.prepare(query_to_prime_prepared)
prepared_stmt.retry_policy = counter_policy
self.session.execute(prepared_stmt, ("0",))
self.assertEqual(next(counter_policy.write_timeout), 1)
counter_policy.reset_counters()
bound_stmt = prepared_stmt.bind({"key": "0"})
bound_stmt.retry_policy = counter_policy
self.session.execute(bound_stmt)
self.assertEqual(next(counter_policy.write_timeout), 1)
def set_cluster(self, retry_policy):
self.cluster = Cluster(protocol_version=PROTOCOL_VERSION, compression=False,
default_retry_policy=retry_policy)
self.session = self.cluster.connect(wait_for_all_pools=True)
self.addCleanup(self.cluster.shutdown)
| |
import collections
import functools
import itertools
from soap import logger
from soap.datatype import int_type, ArrayType
from soap.expression import (
AccessExpr, InputVariable, operators, UpdateExpr, Variable,
)
from soap.semantics import label, Label, label_to_expr
from soap.semantics.schedule.common import (
DependenceType, iter_point_count,
resource_ceil, resource_map_add, resource_map_min
)
from soap.semantics.schedule.extract import ForLoopNestExtractor
from soap.semantics.schedule.distance import dependence_eval
from soap.semantics.schedule.ii import rec_init_int_search, res_init_int
from soap.semantics.schedule.graph.sequential import SequentialScheduleGraph
from soap.transformer.linalg import subscripts_always_equal
_edge_type_map = {
(operators.INDEX_ACCESS_OP, operators.INDEX_ACCESS_OP):
DependenceType.independent,
(operators.INDEX_ACCESS_OP, operators.INDEX_UPDATE_OP):
DependenceType.flow,
(operators.INDEX_UPDATE_OP, operators.INDEX_ACCESS_OP):
DependenceType.anti,
(operators.INDEX_UPDATE_OP, operators.INDEX_UPDATE_OP):
DependenceType.output,
}
class LoopScheduleGraph(SequentialScheduleGraph):
def __init__(
self, fix_expr, round_values=None, sequentialize_loops=True,
scheduler=None, **kwargs):
extractor = ForLoopNestExtractor(fix_expr)
is_pipelined = extractor.is_for_loop_nest
iter_vars = extractor.iter_vars
kernel = extractor.label_kernel
out_vars = sorted(kernel, key=str)
super().__init__(
kernel, out_vars, round_values=round_values,
sequentialize_loops=sequentialize_loops,
scheduler=scheduler)
self.is_pipelined = is_pipelined
self.fix_expr = fix_expr
self.iter_vars = iter_vars
self.iter_slices = extractor.iter_slices
self.is_for_loop = extractor.is_for_loop
if self.is_for_loop:
self.iter_slice = extractor.iter_slice
self._init_loop_graph()
def _init_loop_graph(self):
if not self.is_pipelined:
return
loop_graph = self.graph.copy()
recurrences = set()
self._init_variable_loops(loop_graph, recurrences)
self._init_array_loops(loop_graph, recurrences)
self.loop_graph = loop_graph
self.recurrences = frozenset(recurrences)
def _init_variable_loops(self, loop_graph, recurrences):
for from_node in self.graph.nodes():
if not isinstance(from_node, InputVariable):
continue
if isinstance(from_node.dtype, ArrayType):
continue
out_var = Variable(from_node.name, from_node.dtype)
if out_var not in self.env:
continue
# variable is input & output, should have a self-loop
attr_dict = {
'type': DependenceType.flow,
'latency': 0,
'distance': 1,
}
loop_graph.add_edge(from_node, out_var, attr_dict)
recurrences.add((out_var, out_var, 1))
def _init_array_loops(self, loop_graph, recurrences):
nodes = self._array_nodes(self.graph)
for from_node, to_node in itertools.product(nodes, repeat=2):
self._add_array_loop(
loop_graph, recurrences, from_node, to_node)
def _add_array_loop(
self, loop_graph, recurrences, from_node, to_node):
# we do it for flow dependence only WAR and WAW are not dependences
# that impact II, as read/write accesses can always be performed
# consecutively.
from_expr = label_to_expr(from_node)
to_expr = label_to_expr(to_node)
if from_expr.true_var() != to_expr.true_var():
# access different arrays
return
from_op, to_op = from_expr.op, to_expr.op
check = (from_op == operators.INDEX_ACCESS_OP and
to_op == operators.INDEX_UPDATE_OP)
if not check:
return
dep_type = _edge_type_map[from_op, to_op]
if dep_type == DependenceType.independent:
# RAR is not a dependence
return
elif dep_type == DependenceType.flow:
latency = self.node_latency(to_node)
elif dep_type == DependenceType.anti:
latency = 1 - self.node_latency(from_node)
elif dep_type == DependenceType.output:
latency = 1 + self.node_latency(to_node)
latency -= self.node_latency(from_node)
else:
raise TypeError('Unrecognized dependence type.')
source_expr = to_expr.subscript
sink_expr = from_expr.subscript
if subscripts_always_equal(source_expr, sink_expr):
# quick hack for the case when read/write accesses same location,
# Vivado HLS can simpliy iterate on a register
latency = -self.node_latency(from_node)
distance = dependence_eval(
self.iter_vars, self.iter_slices, source_expr, sink_expr)
if distance is None:
# no dependence
return
attr_dict = {
'type': dep_type,
'latency': latency,
'distance': distance,
}
loop_graph.add_edge(from_node, to_node, attr_dict)
if dep_type != DependenceType.flow:
return
from_expr = AccessExpr(from_expr.true_var(), from_expr.subscript)
to_expr = UpdateExpr(
to_expr.true_var(), to_expr.subscript, Variable('__dont_care'))
recurrences.add((from_expr, to_expr, distance))
def init_graph(self):
try:
return self._init_graph
except AttributeError:
pass
init_state = self.fix_expr.init_state
if isinstance(init_state, Label):
return None
_, init_env = label(self.fix_expr.init_state, None, None, fusion=False)
self._init_graph = SequentialScheduleGraph(
init_env, init_env, round_values=self.round_values,
sequentialize_loops=self.sequentialize_loops,
scheduler=self.scheduler)
return self._init_graph
def resource_counts(self):
try:
return self._resource_counts
except AttributeError:
pass
operator_map = collections.defaultdict(int)
memory_map = collections.defaultdict(int)
for node in self.graph.nodes():
expr, dtype, op = self.node_expr(node)
if expr is None:
continue
if op in [operators.INDEX_ACCESS_OP, operators.INDEX_UPDATE_OP]:
memory_map[label_to_expr(node).true_var()] += 1
continue
operator_map[dtype, op] += 1
self._resource_counts = (operator_map, memory_map)
return self._resource_counts
def initiation_interval(self):
try:
return self._initiation_interval
except AttributeError:
pass
if not self.is_pipelined:
self._initiation_interval = self.depth()
else:
_, access_map = self.resource_counts()
res_mii = res_init_int(access_map)
self._initiation_interval = rec_init_int_search(
self.loop_graph, res_mii, round_values=self.round_values)
return self._initiation_interval
def depth(self):
return self.sequential_latency()
def trip_count(self):
if not self.is_pipelined:
if self.is_for_loop:
return iter_point_count(self.iter_slice)
else:
logger.warning(
'Failed to find trip count for loop',
self.fix_expr.format())
return float('inf')
trip_counts = [iter_point_count(s) for s in self.iter_slices]
return functools.reduce(lambda x, y: x * y, trip_counts)
def loop_latency(self):
try:
return self._loop_latency
except AttributeError:
pass
init_graph = self.init_graph()
if init_graph:
init_latency = init_graph.sequential_latency()
else:
init_latency = 0
trip_count = self.trip_count()
depth = self.depth()
ii = self.initiation_interval()
loop_latency = (trip_count - 1) * ii + depth
self._loop_latency = init_latency + loop_latency
logger.debug(
'Initiation interval: {}, trip_count: {}, depth: {}, latency: {}'
.format(ii, trip_count, depth, loop_latency))
return self._loop_latency
latency = loop_latency
def loop_resource(self):
try:
return self._loop_resource
except AttributeError:
pass
if not self.is_pipelined:
total_map, alloc_map = self.sequential_resource()
else:
total_map, _ = self.resource_counts()
alloc_map = {}
ii = self.initiation_interval()
alloc_map = {
dtype_op: count / ii
for dtype_op, count in total_map.items()}
if self.round_values:
resource_ceil(alloc_map)
# add additional adders for incrementing loop nest iterators
resource_map_add(total_map, {
(int_type, operators.ADD_OP): len(self.iter_vars) - 1,
})
init_graph = self.init_graph()
if init_graph:
init_total_map, init_min_alloc_map = \
init_graph.sequential_resource()
resource_map_add(total_map, init_total_map)
resource_map_min(alloc_map, init_min_alloc_map)
self._loop_resource = (total_map, alloc_map)
return self._loop_resource
resource = loop_resource
| |
"""Implementation of DPLL algorithm
Further improvements: eliminate calls to pl_true, implement branching rules,
efficient unit propagation.
References:
- http://en.wikipedia.org/wiki/DPLL_algorithm
- http://bioinformatics.louisville.edu/ouyang/MingOuyangThesis.pdf
"""
from sympy.core import Symbol
from sympy import Predicate
from sympy.logic.boolalg import Or, Not, conjuncts, disjuncts, to_cnf, \
to_int_repr
from sympy.logic.inference import pl_true, literal_symbol
def dpll_satisfiable(expr):
"""
Check satisfiability of a propositional sentence.
It returns a model rather than True when it succeeds
>>> from sympy import symbols
>>> from sympy.abc import A, B
>>> from sympy.logic.algorithms.dpll import dpll_satisfiable
>>> dpll_satisfiable(A & ~B)
{A: True, B: False}
>>> dpll_satisfiable(A & ~A)
False
"""
symbols = list(expr.atoms(Symbol, Predicate))
symbols_int_repr = set(range(1, len(symbols) + 1))
clauses = conjuncts(to_cnf(expr))
clauses_int_repr = to_int_repr(clauses, symbols)
result = dpll_int_repr(clauses_int_repr, symbols_int_repr, {})
if not result:
return result
output = {}
for key in result:
output.update({symbols[key-1]: result[key]})
return output
def dpll(clauses, symbols, model):
"""
Compute satisfiability in a partial model.
Clauses is an array of conjuncts.
>>> from sympy.abc import A, B, D
>>> from sympy.logic.algorithms.dpll import dpll
>>> dpll([A, B, D], [A, B], {D: False})
False
"""
# compute DP kernel
P, value = find_unit_clause(clauses, model)
while P:
model.update({P: value})
symbols.remove(P)
if not value: P = ~P
clauses = unit_propagate(clauses, P)
P, value = find_unit_clause(clauses, model)
P, value = find_pure_symbol(symbols, clauses)
while P:
model.update({P: value})
symbols.remove(P)
if not value: P = ~P
clauses = unit_propagate(clauses, P)
P, value = find_pure_symbol(symbols, clauses)
# end DP kernel
unknown_clauses = []
for c in clauses:
val = pl_true(c, model)
if val == False:
return False
if val != True:
unknown_clauses.append(c)
if not unknown_clauses:
return model
if not clauses: return model
P = symbols.pop()
model_copy = model.copy()
model.update({P: True})
model_copy.update({P: False})
symbols_copy = symbols[:]
return (dpll(unit_propagate(unknown_clauses, P), symbols, model) or
dpll(unit_propagate(unknown_clauses, Not(P)), symbols_copy, model_copy))
def dpll_int_repr(clauses, symbols, model):
"""
Compute satisfiability in a partial model.
Arguments are expected to be in integer representation
>>> from sympy.logic.algorithms.dpll import dpll_int_repr
>>> dpll_int_repr([set([1]), set([2]), set([3])], set([1, 2]), {3: False})
False
"""
# compute DP kernel
P, value = find_unit_clause_int_repr(clauses, model)
while P:
model.update({P: value})
symbols.remove(P)
if not value:
P = -P
clauses = unit_propagate_int_repr(clauses, P)
P, value = find_unit_clause_int_repr(clauses, model)
P, value = find_pure_symbol_int_repr(symbols, clauses)
while P:
model.update({P: value})
symbols.remove(P)
if not value:
P = -P
clauses = unit_propagate_int_repr(clauses, P)
P, value = find_pure_symbol_int_repr(symbols, clauses)
# end DP kernel
unknown_clauses = []
for c in clauses:
val = pl_true_int_repr(c, model)
if val is False:
return False
if val is not True:
unknown_clauses.append(c)
if not unknown_clauses:
return model
P = symbols.pop()
model_copy = model.copy()
model.update({P: True})
model_copy.update({P: False})
symbols_copy = symbols.copy()
return (dpll_int_repr(unit_propagate_int_repr(unknown_clauses, P), symbols, model) or
dpll_int_repr(unit_propagate_int_repr(unknown_clauses, -P), symbols_copy, model_copy))
### helper methods for DPLL
def pl_true_int_repr(clause, model={}):
"""
Lightweight version of pl_true.
Argument clause represents the set of args of an Or clause. This is used
inside dpll_int_repr, it is not meant to be used directly.
>>> from sympy.logic.algorithms.dpll import pl_true_int_repr
>>> pl_true_int_repr(set([1, 2]), {1: False})
>>> pl_true_int_repr(set([1, 2]), {1: False, 2: False})
False
"""
result = False
for lit in clause:
if lit < 0:
p = model.get(-lit)
if p is not None:
p = not p
else:
p = model.get(lit)
if p is True:
return True
elif p is None:
result = None
return result
def unit_propagate(clauses, symbol):
"""
Returns an equivalent set of clauses
If a set of clauses contains the unit clause l, the other clauses are
simplified by the application of the two following rules:
1. every clause containing l is removed
2. in every clause that contains ~l this literal is deleted
Arguments are expected to be in CNF.
>>> from sympy import symbols
>>> from sympy.abc import A, B, D
>>> from sympy.logic.algorithms.dpll import unit_propagate
>>> unit_propagate([A | B, D | ~B, B], B)
[D, B]
"""
output = []
for c in clauses:
if c.func != Or:
output.append(c)
continue
for arg in c.args:
if arg == ~symbol:
output.append(Or(*[x for x in c.args if x != ~symbol]))
break
if arg == symbol:
break
else:
output.append(c)
return output
def unit_propagate_int_repr(clauses, s):
"""
Same as unit_propagate, but arguments are expected to be in integer
representation
>>> from sympy.logic.algorithms.dpll import unit_propagate_int_repr
>>> unit_propagate_int_repr([set([1, 2]), set([3, -2]), set([2])], 2)
[set([3])]
"""
negated = set([-s])
return [clause - negated for clause in clauses if s not in clause]
def find_pure_symbol(symbols, unknown_clauses):
"""
Find a symbol and its value if it appears only as a positive literal
(or only as a negative) in clauses.
>>> from sympy import symbols
>>> from sympy.abc import A, B, D
>>> from sympy.logic.algorithms.dpll import find_pure_symbol
>>> find_pure_symbol([A, B, D], [A|~B,~B|~D,D|A])
(A, True)
"""
for sym in symbols:
found_pos, found_neg = False, False
for c in unknown_clauses:
if not found_pos and sym in disjuncts(c): found_pos = True
if not found_neg and Not(sym) in disjuncts(c): found_neg = True
if found_pos != found_neg: return sym, found_pos
return None, None
def find_pure_symbol_int_repr(symbols, unknown_clauses):
"""
Same as find_pure_symbol, but arguments are expected
to be in integer representation
>>> from sympy.logic.algorithms.dpll import find_pure_symbol_int_repr
>>> find_pure_symbol_int_repr(set([1,2,3]), [set([1, -2]), set([-2, -3]), set([3, 1])])
(1, True)
"""
all_symbols = reduce(set.union, unknown_clauses, set())
found_pos = all_symbols.intersection(symbols)
found_neg = all_symbols.intersection([-s for s in symbols])
for p in found_pos:
if -p not in found_neg:
return p, True
for p in found_neg:
if -p not in found_pos:
return -p, False
return None, None
def find_unit_clause(clauses, model):
"""
A unit clause has only 1 variable that is not bound in the model.
>>> from sympy import symbols
>>> from sympy.abc import A, B, D
>>> from sympy.logic.algorithms.dpll import find_unit_clause
>>> find_unit_clause([A | B | D, B | ~D, A | ~B], {A:True})
(B, False)
"""
for clause in clauses:
num_not_in_model = 0
for literal in disjuncts(clause):
sym = literal_symbol(literal)
if sym not in model:
num_not_in_model += 1
P, value = sym, not (literal.func is Not)
if num_not_in_model == 1:
return P, value
return None, None
def find_unit_clause_int_repr(clauses, model):
"""
Same as find_unit_clause, but arguments are expected to be in
integer representation.
>>> from sympy.logic.algorithms.dpll import find_unit_clause_int_repr
>>> find_unit_clause_int_repr([set([1, 2, 3]), set([2, -3]), set([1, -2])], {1: True})
(2, False)
"""
bound = set(model) | set(-sym for sym in model)
for clause in clauses:
unbound = clause - bound
if len(unbound) == 1:
p = unbound.pop()
if p < 0:
return -p, False
else:
return p, True
return None, None
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ActionRulesOperations(object):
"""ActionRulesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.alertsmanagement.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_subscription(
self,
target_resource_group=None, # type: Optional[str]
target_resource_type=None, # type: Optional[str]
target_resource=None, # type: Optional[str]
severity=None, # type: Optional[Union[str, "_models.Severity"]]
monitor_service=None, # type: Optional[Union[str, "_models.MonitorService"]]
impacted_scope=None, # type: Optional[str]
description=None, # type: Optional[str]
alert_rule_id=None, # type: Optional[str]
action_group=None, # type: Optional[str]
name=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ActionRulesList"]
"""Get all action rule in a given subscription.
List all action rules of the subscription and given input filters.
:param target_resource_group: Filter by target resource group name. Default value is select
all.
:type target_resource_group: str
:param target_resource_type: Filter by target resource type. Default value is select all.
:type target_resource_type: str
:param target_resource: Filter by target resource( which is full ARM ID) Default value is
select all.
:type target_resource: str
:param severity: Filter by severity. Default value is select all.
:type severity: str or ~azure.mgmt.alertsmanagement.models.Severity
:param monitor_service: Filter by monitor service which generates the alert instance. Default
value is select all.
:type monitor_service: str or ~azure.mgmt.alertsmanagement.models.MonitorService
:param impacted_scope: filter by impacted/target scope (provide comma separated list for
multiple scopes). The value should be an well constructed ARM id of the scope.
:type impacted_scope: str
:param description: filter by alert rule description.
:type description: str
:param alert_rule_id: filter by alert rule id.
:type alert_rule_id: str
:param action_group: filter by action group configured as part of action rule.
:type action_group: str
:param name: filter by action rule name.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ActionRulesList or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.alertsmanagement.models.ActionRulesList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ActionRulesList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-05-05-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_subscription.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if target_resource_group is not None:
query_parameters['targetResourceGroup'] = self._serialize.query("target_resource_group", target_resource_group, 'str')
if target_resource_type is not None:
query_parameters['targetResourceType'] = self._serialize.query("target_resource_type", target_resource_type, 'str')
if target_resource is not None:
query_parameters['targetResource'] = self._serialize.query("target_resource", target_resource, 'str')
if severity is not None:
query_parameters['severity'] = self._serialize.query("severity", severity, 'str')
if monitor_service is not None:
query_parameters['monitorService'] = self._serialize.query("monitor_service", monitor_service, 'str')
if impacted_scope is not None:
query_parameters['impactedScope'] = self._serialize.query("impacted_scope", impacted_scope, 'str')
if description is not None:
query_parameters['description'] = self._serialize.query("description", description, 'str')
if alert_rule_id is not None:
query_parameters['alertRuleId'] = self._serialize.query("alert_rule_id", alert_rule_id, 'str')
if action_group is not None:
query_parameters['actionGroup'] = self._serialize.query("action_group", action_group, 'str')
if name is not None:
query_parameters['name'] = self._serialize.query("name", name, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ActionRulesList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_subscription.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.AlertsManagement/actionRules'} # type: ignore
def list_by_resource_group(
self,
resource_group_name, # type: str
target_resource_group=None, # type: Optional[str]
target_resource_type=None, # type: Optional[str]
target_resource=None, # type: Optional[str]
severity=None, # type: Optional[Union[str, "_models.Severity"]]
monitor_service=None, # type: Optional[Union[str, "_models.MonitorService"]]
impacted_scope=None, # type: Optional[str]
description=None, # type: Optional[str]
alert_rule_id=None, # type: Optional[str]
action_group=None, # type: Optional[str]
name=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ActionRulesList"]
"""Get all action rules created in a resource group.
List all action rules of the subscription, created in given resource group and given input
filters.
:param resource_group_name: Resource group name where the resource is created.
:type resource_group_name: str
:param target_resource_group: Filter by target resource group name. Default value is select
all.
:type target_resource_group: str
:param target_resource_type: Filter by target resource type. Default value is select all.
:type target_resource_type: str
:param target_resource: Filter by target resource( which is full ARM ID) Default value is
select all.
:type target_resource: str
:param severity: Filter by severity. Default value is select all.
:type severity: str or ~azure.mgmt.alertsmanagement.models.Severity
:param monitor_service: Filter by monitor service which generates the alert instance. Default
value is select all.
:type monitor_service: str or ~azure.mgmt.alertsmanagement.models.MonitorService
:param impacted_scope: filter by impacted/target scope (provide comma separated list for
multiple scopes). The value should be an well constructed ARM id of the scope.
:type impacted_scope: str
:param description: filter by alert rule description.
:type description: str
:param alert_rule_id: filter by alert rule id.
:type alert_rule_id: str
:param action_group: filter by action group configured as part of action rule.
:type action_group: str
:param name: filter by action rule name.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ActionRulesList or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.alertsmanagement.models.ActionRulesList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ActionRulesList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-05-05-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if target_resource_group is not None:
query_parameters['targetResourceGroup'] = self._serialize.query("target_resource_group", target_resource_group, 'str')
if target_resource_type is not None:
query_parameters['targetResourceType'] = self._serialize.query("target_resource_type", target_resource_type, 'str')
if target_resource is not None:
query_parameters['targetResource'] = self._serialize.query("target_resource", target_resource, 'str')
if severity is not None:
query_parameters['severity'] = self._serialize.query("severity", severity, 'str')
if monitor_service is not None:
query_parameters['monitorService'] = self._serialize.query("monitor_service", monitor_service, 'str')
if impacted_scope is not None:
query_parameters['impactedScope'] = self._serialize.query("impacted_scope", impacted_scope, 'str')
if description is not None:
query_parameters['description'] = self._serialize.query("description", description, 'str')
if alert_rule_id is not None:
query_parameters['alertRuleId'] = self._serialize.query("alert_rule_id", alert_rule_id, 'str')
if action_group is not None:
query_parameters['actionGroup'] = self._serialize.query("action_group", action_group, 'str')
if name is not None:
query_parameters['name'] = self._serialize.query("name", name, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ActionRulesList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AlertsManagement/actionRules'} # type: ignore
def get_by_name(
self,
resource_group_name, # type: str
action_rule_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ActionRule"
"""Get action rule by name.
Get a specific action rule.
:param resource_group_name: Resource group name where the resource is created.
:type resource_group_name: str
:param action_rule_name: The name of action rule that needs to be fetched.
:type action_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ActionRule, or the result of cls(response)
:rtype: ~azure.mgmt.alertsmanagement.models.ActionRule
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ActionRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-05-05-preview"
accept = "application/json"
# Construct URL
url = self.get_by_name.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'actionRuleName': self._serialize.url("action_rule_name", action_rule_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
deserialized = self._deserialize('ActionRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
get_by_name.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AlertsManagement/actionRules/{actionRuleName}'} # type: ignore
def create_update(
self,
resource_group_name, # type: str
action_rule_name, # type: str
action_rule, # type: "_models.ActionRule"
**kwargs # type: Any
):
# type: (...) -> "_models.ActionRule"
"""Create/update an action rule.
Creates/Updates a specific action rule.
:param resource_group_name: Resource group name where the resource is created.
:type resource_group_name: str
:param action_rule_name: The name of action rule that needs to be created/updated.
:type action_rule_name: str
:param action_rule: action rule to be created/updated.
:type action_rule: ~azure.mgmt.alertsmanagement.models.ActionRule
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ActionRule, or the result of cls(response)
:rtype: ~azure.mgmt.alertsmanagement.models.ActionRule
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ActionRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-05-05-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_update.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'actionRuleName': self._serialize.url("action_rule_name", action_rule_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(action_rule, 'ActionRule')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
deserialized = self._deserialize('ActionRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
create_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AlertsManagement/actionRules/{actionRuleName}'} # type: ignore
def delete(
self,
resource_group_name, # type: str
action_rule_name, # type: str
**kwargs # type: Any
):
# type: (...) -> bool
"""Delete action rule.
Deletes a given action rule.
:param resource_group_name: Resource group name where the resource is created.
:type resource_group_name: str
:param action_rule_name: The name that needs to be deleted.
:type action_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: bool, or the result of cls(response)
:rtype: bool
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[bool]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-05-05-preview"
accept = "application/json"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'actionRuleName': self._serialize.url("action_rule_name", action_rule_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
deserialized = self._deserialize('bool', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AlertsManagement/actionRules/{actionRuleName}'} # type: ignore
def update(
self,
resource_group_name, # type: str
action_rule_name, # type: str
action_rule_patch, # type: "_models.PatchObject"
**kwargs # type: Any
):
# type: (...) -> "_models.ActionRule"
"""Patch action rule.
Update enabled flag and/or tags for the given action rule.
:param resource_group_name: Resource group name where the resource is created.
:type resource_group_name: str
:param action_rule_name: The name that needs to be updated.
:type action_rule_name: str
:param action_rule_patch: Parameters supplied to the operation.
:type action_rule_patch: ~azure.mgmt.alertsmanagement.models.PatchObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ActionRule, or the result of cls(response)
:rtype: ~azure.mgmt.alertsmanagement.models.ActionRule
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ActionRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-05-05-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'actionRuleName': self._serialize.url("action_rule_name", action_rule_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(action_rule_patch, 'PatchObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
deserialized = self._deserialize('ActionRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AlertsManagement/actionRules/{actionRuleName}'} # type: ignore
| |
#!/usr/bin/env python
#
# Copyright (C) 2014
# Brian Caswell <bmc@lungetech.com>
# Narf Industries <info@narfindustries.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from generator.actions import Actions, Variable
from random import choice, randint
import string
import struct
def random_string(a, b):
"""
'Safe' is qualified as strings that don't contain:
1) "()" - Avoid potentially-SEGFAULTing canonicalization.
2) "\x06" - Avoid STRING_TERMINATOR
3) "$SRC$" & "$!SRC$" - Avoid accidentially introducing links.
"(" and ")" are only problems if it appears adjacent to each other,
triggering buggy canonicalization. Therefore, we allow "(" and ")" in the
general case, but make a pass to eliminate any "()" instances.
"""
safe_printable = string.printable.replace("\x06", "")
return ''.join(choice(safe_printable) for _ in \
range(randint(a, b))).replace("()", ")_").replace("$SRC$", "$CRS$").replace("$!SRC$", "$!CRS$")
def canonicalize_path(path):
"""
Provided a non-canonicalized path, returns a canonicalized one. Used to
verify CB canonicalization behavior.
Canonicalization rules:
1) "()" are stripped.
"""
cp = path.replace("()", "")
return cp
class GreyMatter(Actions):
DEBUG = 0
SZ_MAX_DOC = 0x2000
SZ_MAX_PATH = 0x100
STRING_TERMINATOR = "\x06"
def _respond(self, doc_path):
"""
Provided a path, produce a file that possibly contains links to other
files and send that file to the CB. Keep track of the outstanding
referenced files.
The CB will recurse depth-first through links. We need to mirror this
behavior here if we want an opportunity to verify canonicalization.
As such, it is easiest if this function recurses as well, following
sequential, depth-first order.
"""
doc = ""
sz_doc = 0
parens_path = False
if self.DEBUG:
if type(doc_path) == Variable:
str_doc_path = "<INDEX VARIABLE>"
else:
str_doc_path = doc_path
print "Currently serving '%s'" % str_doc_path
# If we're not in the index document special case, we need to do a
# read() and verify the requested document against the correctly
# canonicalized path.
if type(doc_path) != Variable:
self.read(
delim=self.STRING_TERMINATOR,
expect=doc_path,
)
# We choose a random number of paths to include in our document.
if self.chance(0.25): # Keep path explosion (relatively) under control.
num_links = randint(1, 4)
if self.chance(0.25):
# In some small percentage of paths, we generate a path that
# contains "()", triggering faulty canonicalization.
parens_path = True
else:
num_links = 0
if self.DEBUG:
print "\t%d links in this doc" % num_links
# We generate our paths, keeping track of the amount of space consumed.
# Do this before the fluff loop below because we need to guarantee that
# the links make it into the document.
paths = [] # These are links without the bookends.
links = []
for _ in range(num_links):
# Why do we sometimes go slightly over on patch length?
# Subtracting 10 is empirically suffcient.
new_path = random_string(1, self.SZ_MAX_PATH-10)
sz_new_path = len(new_path)
# If we've decided that we're including "()"s ...
if parens_path:
num_parens = randint(1, 8)
for _ in range(num_parens):
idx_bisect = randint(0, sz_new_path)
new_path = new_path[:idx_bisect] + "()" + new_path[idx_bisect:]
# We truncate the path so that we don't accidentally overflow
# the vulnerable buffer.
# While we might assume that we could code this to allow paths
# longer than the vulnerable buffer length (because parens
# shouldn't count against the total), this is actually
# substantially more work because the CB will immediately
# reject any path thats overly long (its first length check
# is correct), resulting in desynchronization between CB and
# poller.
new_path = new_path[:self.SZ_MAX_PATH-10]
paths.append(new_path)
link = "$SRC$" + new_path + "$!SRC$"
if self.DEBUG:
print "Responding to '%s'; generated a link: '%s'" % (str_doc_path, link)
links.append(link)
sz_doc += len(link)
# We populate the document with a random amount of random characters
# separating our paths. We make sure to not exceed MAX_USHORT.
for i in range(num_links):
fluff = random_string(0, self.SZ_MAX_DOC-sz_doc)
doc += fluff
sz_doc += len(fluff)
doc += links[i] # We're already counting the length of links.
fluff = random_string(0, self.SZ_MAX_DOC-sz_doc)
doc += fluff
sz_doc += len(fluff)
# This will happen if the document has no links.
if 0 == sz_doc:
fluff = random_string(1, self.SZ_MAX_DOC-sz_doc)
doc += fluff
sz_doc += len(fluff)
# 2B: size of document (sz_doc)
# XB: path + STRING_TERMINATOR
# (sz_doc)B: the document
self.write(
struct.pack("<H", sz_doc),
doc_path,
self.STRING_TERMINATOR,
doc
)
# OK, we've sent a document to the CB that contains anywhere between 0
# and 4 links to further documents. We must recurse and handle each
# link in order.
# We need this to be in sequential order, so we can't iterate over
# paths directly (as this is backed by a hash table).
for i in range(len(paths)):
path = paths[i]
cp = canonicalize_path(path)
if self.DEBUG:
print "Recursing to serve '%s', canon'ed to '%s'" % (path, cp)
self._respond(cp)
def start(self):
"""
Intialize state.
"""
# The CB will first request a randomly-named index file. This is a
# unique case because this filename will be a Variable and its value,
# therefore, is inacccessible during poll generation.
idx_path = Variable("idxpath")
idx_path.set_slice(begin=0, end=-1)
self.read(
delim=self.STRING_TERMINATOR,
assign=idx_path)
self.state['idx_path'] = idx_path
def respond(self):
"""
If there are any outstanding requests, we randomly choose one and
respond to it.
If there are no remaining requests, we must have responded to all the
requests and we're done.
"""
idx_path = self.state['idx_path']
self._respond(idx_path)
| |
"""
path.py - An object representing a path to a file or directory.
Current Author : Kyle Rockman
Special thanks to:
John Crocker
Jason Orendorff
Mikhail Gusarov <dottedmag@dottedmag.net>
Marc Abramowitz <marc@marc-abramowitz.com>
Jason R. Coombs <jaraco@jaraco.com>
Jason Chu <jchu@xentac.net>
Vojislav Stojkovic <vstojkovic@syntertainment.com>
"""
import os
import imp
import sys
import stat
import shutil
import hashlib
import ctypes
import logging
import re
import fnmatch
import glob
import errno
from DTL.conf import settings
__all__ = ['Path']
#------------------------------------------------------------
#Handle Symlink for any OS
__CSL = None
try :
__CSL = os.symlink
def symlink(source, link_name, flags=0):
__CSL(source, link_name)
except:
__CSL = None
def symlink(source, link_name, flags=0):
'''symlink(source, link_name)
Creates a symbolic link pointing to source named link_name'''
global __CSL
if __CSL is None:
csl = ctypes.windll.kernel32.CreateSymbolicLinkW
csl.argtypes = (ctypes.c_wchar_p, ctypes.c_wchar_p, ctypes.c_uint32)
csl.restype = ctypes.c_ubyte
__CSL = csl
if __CSL(source, link_name, flags) == 0:
raise ctypes.WinError()
os.symlink = symlink
#------------------------------------------------------------
#------------------------------------------------------------
class ClassProperty(property):
def __get__(self, cls, owner):
return self.fget.__get__(None, owner)()
#------------------------------------------------------------
def handleRemoveReadOnly(func, path, exc):
excvalue = exc[1]
if func in (os.rmdir, os.remove) and excvalue.errno == errno.EACCES:
os.chmod(path, stat.S_IRWXU| stat.S_IRWXG| stat.S_IRWXO) #0777
func(path)
else:
raise
#------------------------------------------------------------
#------------------------------------------------------------
class Path(unicode):
""" Represents a filesystem path.
For documentation on individual methods, consult their
counterparts in os.path.
"""
module = os.path #The module to use for path operations.
_branch = None
def __new__(cls, value):
if not isinstance(value, basestring):
raise TypeError("path must be a string")
value = value.replace('\\',os.sep)
value = value.replace('/',os.sep)
self = super(Path, cls).__new__(cls, value)
return self
#------------------------------------------------------------
# Path object class methods
#------------------------------------------------------------
@ClassProperty
@classmethod
def _next_class(cls):
"""
What class should be used to construct new instances from this class
"""
return cls
@classmethod
def getcwd(cls):
""" Return the current working directory as a path object. """
return cls(os.getcwdu())
@classmethod
def getHomeDir(cls):
""" This will get us the user's home directory"""
return cls('~').expand()
@classmethod
def getTempPath(cls):
""" This will get us a temp directory location"""
return cls(settings.PKG_DATA_DIR).join('tmp')
#------------------------------------------------------------
# Path object dunder methods
#------------------------------------------------------------
def __repr__(self):
return '{0}({1})'.format(type(self).__name__, super(Path, self).__repr__())
def __eq__(self, other):
return self.lower() == other.lower()
def __ne__(self, other):
return not self.__eq__(other)
def __add__(self, more):
"""Adding a path and a string yields a path."""
try:
return self._next_class(super(Path, self).__add__(more))
except TypeError: # Python bug
return NotImplemented
def __radd__(self, other):
if not isinstance(other, basestring):
return NotImplemented
return self._next_class(other.__add__(self))
def __div__(self, rel):
""" fp.__div__(rel) == fp / rel == fp.joinpath(rel)
Join two path components, adding a separator character if
needed.
"""
return self._next_class(self.module.join(self, rel))
# Make the / operator work even when true division is enabled.
__truediv__ = __div__
def __enter__(self):
self._old_dir = self.getcwd()
os.chdir(self)
return self
def __exit__(self, *_):
os.chdir(self._old_dir)
#------------------------------------------------------------
# os module wrappers
#------------------------------------------------------------
def stat(self): return os.stat(self)
def lstat(self): return os.lstat(self)
def chmod(self, mode): os.chmod(self, mode)
def rename(self, new): os.rename(self, new); return self._next_class(new)
def symlink(self, link):
if not self.exists():
os.symlink(self, link, 0)
def symlinkdir(self, link):
if not self.exists():
os.symlink(self, link, 1)
def unlink(self):
try:
os.unlink(self)
except OSError as exception:
if exception.errno == errno.EACCES:
os.chmod(self, stat.S_IRWXU| stat.S_IRWXG| stat.S_IRWXO) #0777
os.unlink(self)
else:
raise
except:
raise
#------------------------------------------------------------
# os.path module wrappers
#------------------------------------------------------------
def isabs(self): return self.module.isabs(self)
def exists(self): return self.module.exists(self)
def isdir(self): return self.module.isdir(self)
def isfile(self): return self.module.isfile(self)
def islink(self): return self.module.islink(self)
def ismount(self): return self.module.ismount(self)
def samefile(self): return self.module.samefile(self)
def atime(self): return self.module.getatime(self)
def mtime(self): return self.module.getmtime(self)
def ctime(self): return self.module.getctime(self)
def size(self): return self.module.getsize(self)
#------------------------------------------------------------
def isdirEmpty(self): return len(self.listdir()) == 0
#------------------------------------------------------------
# os.path module wrappers that returns path objects
#------------------------------------------------------------
def abspath(self): return self._next_class(self.module.abspath(self))
def normcase(self): return self._next_class(self.module.normcase(self))
def normpath(self): return self._next_class(self.module.normpath(self))
def realpath(self): return self._next_class(self.module.realpath(self))
def expanduser(self): return self._next_class(self.module.expanduser(self))
def expandvars(self): return self._next_class(self.module.expandvars(self))
def dirname(self): return self._next_class(self.module.dirname(self))
def basename(self):return self._next_class(self.module.basename(self))
#------------------------------------------------------------
def splitpath(self):
""" p.splitpath() -> Return (p.parent, p.name). """
parent, child = self.module.split(self)
return self._next_class(parent), child
#------------------------------------------------------------
def splitdrive(self):
""" p.splitdrive() -> Return (p.drive, <the rest of p>).
Split the drive specifier from this path. If there is
no drive specifier, p.drive is empty, so the return value
is simply (path(''), p). This is always the case on Unix.
"""
drive, rel = self.module.splitdrive(self)
return self._next_class(drive), rel
#------------------------------------------------------------
def splitext(self):
""" p.splitext() -> Return (p.stripext(), p.ext).
Split the filename extension from this path and return
the two parts. Either part may be empty.
The extension is everything from '.' to the end of the
last path segment. This has the property that if
(a, b) == p.splitext(), then a + b == p.
"""
filename, ext = self.module.splitext(self)
return self._next_class(filename), ext
#------------------------------------------------------------
def stripext(self):
""" p.stripext() -> Remove one file extension from the path.
For example, path('/home/guido/python.tar.gz').stripext()
returns path('/home/guido/python.tar').
"""
return self.splitext()[0]
#------------------------------------------------------------
def expand(self):
""" Clean up a filename by calling expandvars(),
expanduser(), and normpath() on it.
This is commonly everything needed to clean up a filename
read from a configuration file, for example.
"""
return self.expandvars().expanduser().normpath()
#------------------------------------------------------------
def join(self, *args):
""" Join two or more path components, adding a separator
character (os.sep) if needed. Returns a new path
object.
"""
return self._next_class(self.module.join(self, *args))
#------------------------------------------------------------
def listdir(self, pattern=None):
""" D.listdir() -> List of items in this directory.
Use D.files() or D.dirs() instead if you want a listing
of just files or just subdirectories.
The elements of the list are path objects.
With the optional 'pattern' argument, this only lists
items whose names match the given pattern.
"""
names = os.listdir(self)
if pattern is not None:
names = fnmatch.filter(names, pattern)
return [self / child for child in names]
#------------------------------------------------------------
def dirs(self, pattern=None):
""" D.dirs() -> List of this directory's subdirectories.
The elements of the list are path objects.
This does not walk recursively into subdirectories
(but see path.walkdirs).
With the optional 'pattern' argument, this only lists
directories whose names match the given pattern. For
example, ``d.dirs('build-*')``.
"""
return [p for p in self.listdir(pattern) if p.isdir()]
#------------------------------------------------------------
def files(self, pattern=None):
""" D.files() -> List of the files in this directory.
The elements of the list are path objects.
This does not walk into subdirectories (see path.walk).
With the optional 'pattern' argument, this only lists files
whose names match the given pattern. For example,
``d.files('*.pyc')``.
"""
return [p for p in self.listdir(pattern) if p.isfile()]
#------------------------------------------------------------
def walk(self, pattern=None, topdown=False, return_files=True, return_dirs=True):
"""Returns children files and dirs recusively as path objects"""
for root, dirs, files in os.walk(self, topdown=topdown):
if return_files :
for name in files:
next_path = self._next_class(os.path.join(root, name))
if pattern is None or next_path.fnmatch(pattern):
yield next_path
if return_dirs :
for name in dirs:
next_path = self._next_class(os.path.join(root, name))
if pattern is None or next_path.fnmatch(pattern):
yield next_path
#------------------------------------------------------------
def regex(self, pattern, inclusive=False, topdown=False):
""" Return a list of path objects that match the pattern,
pattern - a regular expression
"""
output_files = []
output_dirs = []
regexObj = re.compile(pattern)
for root, dirs, files in os.walk(self, topdown=topdown):
for name in files:
next_path = self._next_class(os.path.join(root, name))
if bool(regexObj.search(next_path)) == bool(not inclusive):
output_files.append(next_path)
for name in dirs:
next_path = self._next_class(os.path.join(root, name))
if pattern is None or next_path.fnmatch(pattern):
output_dirs.append(next_path)
return output_files, output_dirs
#------------------------------------------------------------
def fnmatch(self, pattern):
""" Return True if self.name matches the given pattern.
pattern - A filename pattern with wildcards,
for example ``'*.py'``.
"""
return fnmatch.fnmatch(self.name, pattern)
#------------------------------------------------------------
def glob(self, pattern):
""" Return a list of path objects that match the pattern.
pattern - a path relative to this directory, with wildcards.
For example, path('/users').glob('*/bin/*') returns a list
of all the files users have in their bin directories.
"""
cls = self._next_class
return [cls(s) for s in glob.glob(self / pattern)]
#------------------------------------------------------------
def open(self, mode='r'):
""" Open this file. Return a file object. """
return open(self, mode)
def chunks(self, size, *args, **kwargs):
""" Returns a generator yielding chunks of the file, so it can
be read piece by piece with a simple for loop.
Any argument you pass after `size` will be passed to `open()`.
:example:
>>> for chunk in path("file.txt").chunk(8192):
... print(chunk)
This will read the file by chunks of 8192 bytes.
"""
with open(self, *args, **kwargs) as f:
while True:
d = f.read(size)
if not d:
break
yield d
def _hash(self, hash_name):
""" Returns a hash object for the file at the current path.
`hash_name` should be a hash algo name such as 'md5' or 'sha1'
that's available in the `hashlib` module.
"""
m = hashlib.new(hash_name)
for chunk in self.chunks(8192):
m.update(chunk)
return m
def read_hash(self, hash_name):
""" Calculate given hash for this file.
List of supported hashes can be obtained from hashlib package. This
reads the entire file.
"""
return self._hash(hash_name).digest()
def read_hexhash(self, hash_name):
""" Calculate given hash for this file, returning hexdigest.
List of supported hashes can be obtained from hashlib package. This
reads the entire file.
"""
return self._hash(hash_name).hexdigest()
def read_md5(self):
""" Calculate the md5 hash for this file.
This reads through the entire file.
"""
return self.read_hash('md5')
#------------------------------------------------------------
# Modifying operations on files and directories
#------------------------------------------------------------
def mkdir(self, mode=511):
try:
os.mkdir(self.dir(), mode)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
#------------------------------------------------------------
def makedirs(self, mode=511):
try:
os.makedirs(self.dir(), mode)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
#------------------------------------------------------------
def rmdir(self):
try:
os.rmdir(self.dir())
except OSError, e:
if e.errno != errno.ENOTEMPTY and e.errno != errno.EEXIST:
raise
#------------------------------------------------------------
def removedirs(self):
try:
os.removedirs(self.dir())
except OSError, e:
if e.errno != errno.ENOTEMPTY and e.errno != errno.EEXIST:
raise
copyfile = shutil.copyfile
copymode = shutil.copymode
copystat = shutil.copystat
copy = shutil.copy
copy2 = shutil.copy2
copytree = shutil.copytree
move = shutil.move
#------------------------------------------------------------
def rmtree(self):
try:
shutil.rmtree(self, ignore_errors=False, onerror=handleRemoveReadOnly)
except OSError, e:
if e.errno != errno.ENOENT:
raise
#------------------------------------------------------------
def touch(self):
""" Set the access/modified times of this file to the current time.
Create the file if it does not exist.
"""
fd = os.open(self, os.O_WRONLY | os.O_CREAT, 438)
os.close(fd)
os.utime(self, None)
#------------------------------------------------------------
def remove(self):
try:
os.remove(self)
except OSError, e:
if e.errno == errno.EACCES:
self.chmod(0777)
os.remove(self)
if e.errno != errno.ENOENT:
raise
def compare(self, target_filepath, size_only=False, time_only=False, time_window=2):
target_filepath = Path(target_filepath)
source_stats = self.stat()
target_stats = target_filepath.stat()
size_compare = bool(source_stats.st_size == target_stats.st_size)
time_compare = bool((source_stats.st_mtime - time_window) > target_stats.st_mtime)
if size_only :
return size_compare
if time_only:
return time_compare
return bool(size_compare == time_compare)
#------------------------------------------------------------
def rsync(self, other,
dry_run=False,
recursive=True, flatten=False,
delete=True,
update=True, existing=False, size_only=False, time_only=True, time_window=2):
target_root = Path(other)
if not self.isdir() or not target_root.isdir():
raise Exception('Source and Target paths must be directories!\nSOURCE:{0}\nTARGET:{1}'.format(self, target_root))
#This incurs no speed cost because generators are returned
if recursive :
source_files = self.walk(return_dirs=False)
target_files = target_root.walk(return_dirs=False)
else:
source_files = self.files()
target_files = target_root.files()
#Handle the source to target sync first
for source in source_files :
if flatten :
target = target_root.join(source.name)
else:
target = Path(source.replace(self, target_root))
if update:
#If we don't care about existing only and target doesn't exist copy it over
if not existing and not target.exists():
logging.info('Copying {0} at {1}'.format(source, target))
if not dry_run :
target.makedirs()
source.copy2(target)
#If the target does exist compare based on size and time to see if we need to update it
if target.exists() :
if source.compare(target, size_only, time_only, time_window):
logging.info('Updating {0} at {1}'.format(source, target))
if not dry_run :
source.copy2(target)
else :
logging.info('Copying {0} at {1}'.format(source, target))
if not dry_run :
target.makedirs()
source.copy2(target)
#If the files on the target_root doesn't exist in the current path, then delete
if delete :
for target in target_files :
if flatten :
matched_source = self.join(target.name)
else:
matched_source = Path(target.replace(target_root, self))
if not matched_source.exists():
logging.info('Removeing {0}'.format(target))
if not dry_run :
target.remove()
#------------------------------------------------------------
# Path Object Properties
#------------------------------------------------------------
@property
def ext(self):
return self.splitext()[-1]
#------------------------------------------------------------
@property
def parent(self):
""" The parent directory, as a new path object.
Example: path('/usr/local/lib/libpython.so').parent == path('/usr/local/lib')
"""
return self.dirname()
#------------------------------------------------------------
@property
def name(self):
""" The name of this file or directory without the full path.
Example: path('/usr/local/lib/libpython.so').name == 'libpython.so'
"""
return self.basename()
#------------------------------------------------------------
@property
def drive(self):
""" The drive specifier, for example 'C:'.
This is always empty on systems that don't use drive specifiers.
"""
return self.splitdrive()[0]
#------------------------------------------------------------
# Misc utility methods
#------------------------------------------------------------
def dir(self):
""" Validates if this path is a directory and returns it
if not then it returns the parent of this path
"""
if not self.exists() : #This is an atempt at best guess
head, tail = self.splitpath()
if os.path.splitext(tail)[1] == '' :
return self
return head
if self.isdir():
return self
else:
return self.parent
#------------------------------------------------------------
def caseSensative(self):
"""Path objects are stored in all lower with python escaped backwards slashes
If you need a caseSensative path use this
"""
return self.__class__.getCaseSensativePath(self)
#------------------------------------------------------------
def branchRelative(self):
"""Path objects are stored as absolute paths but the branch is stored on the class
if you need a branch relative path use this
"""
return self.__class__.getBranchRelativePath(self)
#------------------------------------------------------------
def mayaPath(self):
"""Returns a path suitible for maya"""
return self.caseSensative().replace('\\','/')
#------------------------------------------------------------
# Utilties used for paths but not limited to path objects
#------------------------------------------------------------
@staticmethod
def branch():
'''Returns the directory of the Project Root for use in branch relative paths
Will find the branch relative to the location of this python file
It will be stored as a class variable for faster access in the future'''
if Path._branch is None :
path = os.path.abspath(sys.argv[0])
while 1:
path, tail = os.path.split(path)
if (os.path.isfile(os.path.join(path,"ProjectRoot"))):
break
if (len(tail)==0):
path = ""
break
Path._branch = path
return Path._branch
#------------------------------------------------------------
@staticmethod
def getCaseSensativePath(path):
'''Returns a case sensative path on any system'''
path = os.path.abspath(path)
rest, last = os.path.split(path)
if rest == path:
drive, path = os.path.splitdrive(path)
drive = drive.upper()
return os.path.normpath(os.path.join(drive, path))
if not last:
return Path.getCaseSensativePath(rest) + os.sep
if os.path.exists(rest):
options = [x for x in os.listdir(rest) if x.lower() == last.lower()]
if len(options) == 0:
options = [last]
else:
options = [last]
path = os.path.join(Path.getCaseSensativePath(rest), options[0])
return path
#------------------------------------------------------------
@staticmethod
def getBranchRelativePath(path):
'''Removes the current branch from the given path and returns it'''
if Path.branch():
path = path.replace(Path.branch(),'')
return os.path.normpath(path)
| |
import time
from django.conf import settings
from django.template import Context
from sekizai.context import SekizaiContext
from cms.api import add_plugin, create_page, create_title
from cms.cache import _get_cache_version, invalidate_cms_page_cache
from cms.cache.placeholder import (
_get_placeholder_cache_version_key,
_get_placeholder_cache_version,
_set_placeholder_cache_version,
_get_placeholder_cache_key,
set_placeholder_cache,
get_placeholder_cache,
clear_placeholder_cache,
)
from cms.exceptions import PluginAlreadyRegistered
from cms.models import Page
from cms.plugin_pool import plugin_pool
from cms.test_utils.project.placeholderapp.models import Example1
from cms.test_utils.project.pluginapp.plugins.caching.cms_plugins import (
DateTimeCacheExpirationPlugin,
LegacyCachePlugin,
NoCachePlugin,
SekizaiPlugin,
TimeDeltaCacheExpirationPlugin,
TTLCacheExpirationPlugin,
VaryCacheOnPlugin,
)
from cms.test_utils.testcases import CMSTestCase
from cms.test_utils.util.fuzzy_int import FuzzyInt
from cms.toolbar.toolbar import CMSToolbar
from cms.utils.conf import get_cms_setting
from cms.utils.helpers import get_timezone_name
class CacheTestCase(CMSTestCase):
def tearDown(self):
from django.core.cache import cache
super().tearDown()
cache.clear()
def setUp(self):
from django.core.cache import cache
super().setUp()
cache.clear()
def test_cache_placeholder(self):
template = "{% load cms_tags %}{% placeholder 'body' %}{% placeholder 'right-column' %}"
page1 = create_page('test page 1', 'nav_playground.html', 'en', published=True)
page1_url = page1.get_absolute_url()
placeholder = page1.placeholders.filter(slot="body")[0]
add_plugin(placeholder, "TextPlugin", 'en', body="English")
add_plugin(placeholder, "TextPlugin", 'de', body="Deutsch")
request = self.get_request(page1_url)
request.current_page = Page.objects.get(pk=page1.pk)
request.toolbar = CMSToolbar(request)
with self.assertNumQueries(FuzzyInt(5, 9)):
self.render_template_obj(template, {}, request)
request = self.get_request(page1_url)
request.session['cms_edit'] = True
request.current_page = Page.objects.get(pk=page1.pk)
request.toolbar = CMSToolbar(request)
template = "{% load cms_tags %}{% placeholder 'body' %}{% placeholder 'right-column' %}"
with self.assertNumQueries(2):
self.render_template_obj(template, {}, request)
# toolbar
with self.login_user_context(self.get_superuser()):
request = self.get_request(page1_url)
request.session['cms_edit'] = True
request.current_page = Page.objects.get(pk=page1.pk)
request.toolbar = CMSToolbar(request)
request.toolbar.show_toolbar = True
template = "{% load cms_tags %}{% placeholder 'body' %}{% placeholder 'right-column' %}"
with self.assertNumQueries(4):
self.render_template_obj(template, {}, request)
page1.publish('en')
exclude = [
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware'
]
overrides = dict(
CMS_PAGE_CACHE=False,
MIDDLEWARE=[mw for mw in settings.MIDDLEWARE if mw not in exclude],
)
with self.settings(**overrides):
with self.assertNumQueries(FuzzyInt(13, 25)):
self.client.get(page1_url)
with self.assertNumQueries(FuzzyInt(5, 14)):
self.client.get(page1_url)
overrides['CMS_PLACEHOLDER_CACHE'] = False
with self.settings(**overrides):
with self.assertNumQueries(FuzzyInt(7, 18)):
self.client.get(page1_url)
def test_no_cache_plugin(self):
page1 = create_page('test page 1', 'nav_playground.html', 'en',
published=True)
page1_url = page1.get_absolute_url()
placeholder1 = page1.placeholders.filter(slot='body')[0]
placeholder2 = page1.placeholders.filter(slot='right-column')[0]
try:
plugin_pool.register_plugin(NoCachePlugin)
except PluginAlreadyRegistered:
pass
add_plugin(placeholder1, 'TextPlugin', 'en', body="English")
add_plugin(placeholder2, 'TextPlugin', 'en', body="Deutsch")
template = "{% load cms_tags %}{% placeholder 'body' %}{% placeholder 'right-column' %}"
# Ensure that we're testing in an environment WITHOUT the MW cache, as
# we are testing the internal page cache, not the MW cache.
exclude = [
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.cache.CacheMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware'
]
overrides = {
'MIDDLEWARE': [mw for mw in settings.MIDDLEWARE if mw not in exclude]
}
with self.settings(**overrides):
# Request the page without the 'no-cache' plugin
request = self.get_request(page1_url)
request.current_page = Page.objects.get(pk=page1.pk)
request.toolbar = CMSToolbar(request)
with self.assertNumQueries(FuzzyInt(18, 25)):
response1 = self.client.get(page1_url)
content1 = response1.content
# Fetch it again, it is cached.
request = self.get_request(page1_url)
request.current_page = Page.objects.get(pk=page1.pk)
request.toolbar = CMSToolbar(request)
with self.assertNumQueries(0):
response2 = self.client.get(page1_url)
content2 = response2.content
self.assertEqual(content1, content2)
# Once again with PAGE_CACHE=False, to prove the cache can
# be disabled
request = self.get_request(page1_url)
request.current_page = Page.objects.get(pk=page1.pk)
request.toolbar = CMSToolbar(request)
with self.settings(CMS_PAGE_CACHE=False):
with self.assertNumQueries(FuzzyInt(5, 24)):
response3 = self.client.get(page1_url)
content3 = response3.content
self.assertEqual(content1, content3)
# Add the 'no-cache' plugin
add_plugin(placeholder1, "NoCachePlugin", 'en')
page1.publish('en')
request = self.get_request(page1_url)
request.current_page = Page.objects.get(pk=page1.pk)
request.toolbar = CMSToolbar(request)
with self.assertNumQueries(FuzzyInt(4, 6)):
output = self.render_template_obj(template, {}, request)
with self.assertNumQueries(FuzzyInt(14, 24)):
response = self.client.get(page1_url)
self.assertTrue("no-cache" in response['Cache-Control'])
resp1 = response.content.decode('utf8').split("$$$")[1]
request = self.get_request(page1_url)
request.current_page = Page.objects.get(pk=page1.pk)
request.toolbar = CMSToolbar(request)
with self.assertNumQueries(5):
output2 = self.render_template_obj(template, {}, request)
with self.settings(CMS_PAGE_CACHE=False):
with self.assertNumQueries(FuzzyInt(8, 17)):
response = self.client.get(page1_url)
resp2 = response.content.decode('utf8').split("$$$")[1]
self.assertNotEqual(output, output2)
self.assertNotEqual(resp1, resp2)
plugin_pool.unregister_plugin(NoCachePlugin)
def test_timedelta_cache_plugin(self):
page1 = create_page('test page 1', 'nav_playground.html', 'en',
published=True)
placeholder1 = page1.placeholders.filter(slot="body")[0]
placeholder2 = page1.placeholders.filter(slot="right-column")[0]
plugin_pool.register_plugin(TimeDeltaCacheExpirationPlugin)
add_plugin(placeholder1, "TextPlugin", 'en', body="English")
add_plugin(placeholder2, "TextPlugin", 'en', body="Deutsch")
# Add *TimeDeltaCacheExpirationPlugin, expires in 45s.
add_plugin(placeholder1, "TimeDeltaCacheExpirationPlugin", 'en')
# Ensure that we're testing in an environment WITHOUT the MW cache, as
# we are testing the internal page cache, not the MW cache.
exclude = [
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.cache.CacheMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware',
]
overrides = {
'MIDDLEWARE': [mw for mw in settings.MIDDLEWARE if mw not in exclude]
}
with self.settings(**overrides):
page1.publish('en')
request = self.get_request(page1.get_absolute_url())
request.current_page = Page.objects.get(pk=page1.pk)
request.toolbar = CMSToolbar(request)
with self.assertNumQueries(FuzzyInt(14, 25)): # was 14, 24
response = self.client.get(page1.get_absolute_url())
self.assertTrue('max-age=45' in response['Cache-Control'], response['Cache-Control'])
plugin_pool.unregister_plugin(TimeDeltaCacheExpirationPlugin)
def test_datetime_cache_plugin(self):
page1 = create_page('test page 1', 'nav_playground.html', 'en',
published=True)
page1_url = page1.get_absolute_url()
placeholder1 = page1.placeholders.filter(slot="body")[0]
placeholder2 = page1.placeholders.filter(slot="right-column")[0]
try:
plugin_pool.register_plugin(DateTimeCacheExpirationPlugin)
except PluginAlreadyRegistered:
pass
add_plugin(placeholder1, "TextPlugin", 'en', body="English")
add_plugin(placeholder2, "TextPlugin", 'en', body="Deutsch")
# Add *CacheExpirationPlugins, one expires in 50s, the other in 40s.
# The page should expire in the least of these, or 40s.
add_plugin(placeholder1, "DateTimeCacheExpirationPlugin", 'en')
# Ensure that we're testing in an environment WITHOUT the MW cache, as
# we are testing the internal page cache, not the MW cache.
exclude = [
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.cache.CacheMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware',
]
overrides = {
'MIDDLEWARE': [mw for mw in settings.MIDDLEWARE if mw not in exclude]
}
with self.settings(**overrides):
page1.publish('en')
request = self.get_request(page1_url)
request.current_page = Page.objects.get(pk=page1.pk)
request.toolbar = CMSToolbar(request)
with self.assertNumQueries(FuzzyInt(14, 25)): # was 14, 24
response = self.client.get(page1_url)
self.assertTrue('max-age=40' in response['Cache-Control'], response['Cache-Control'])
plugin_pool.unregister_plugin(DateTimeCacheExpirationPlugin)
def TTLCacheExpirationPlugin(self):
page1 = create_page('test page 1', 'nav_playground.html', 'en',
published=True)
placeholder1 = page1.placeholders.filter(slot="body")[0]
placeholder2 = page1.placeholders.filter(slot="right-column")[0]
plugin_pool.register_plugin(TTLCacheExpirationPlugin)
add_plugin(placeholder1, "TextPlugin", 'en', body="English")
add_plugin(placeholder2, "TextPlugin", 'en', body="Deutsch")
# Add *CacheExpirationPlugins, one expires in 50s, the other in 40s.
# The page should expire in the least of these, or 40s.
add_plugin(placeholder1, "TTLCacheExpirationPlugin", 'en')
# Ensure that we're testing in an environment WITHOUT the MW cache, as
# we are testing the internal page cache, not the MW cache.
exclude = [
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.cache.CacheMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware',
]
overrides = {
'MIDDLEWARE': [mw for mw in settings.MIDDLEWARE if mw not in exclude]
}
with self.settings(**overrides):
page1.publish('en')
request = self.get_request('/en/')
request.current_page = Page.objects.get(pk=page1.pk)
request.toolbar = CMSToolbar(request)
with self.assertNumQueries(FuzzyInt(14, 25)): # was 14, 24
response = self.client.get('/en/')
self.assertTrue('max-age=50' in response['Cache-Control'], response['Cache-Control'])
plugin_pool.unregister_plugin(TTLCacheExpirationPlugin)
def test_expiration_cache_plugins(self):
"""
Tests that when used in combination, the page is cached to the
shortest TTL.
"""
page1 = create_page('test page 1', 'nav_playground.html', 'en',
published=True)
page1_url = page1.get_absolute_url()
placeholder1 = page1.placeholders.filter(slot="body")[0]
placeholder2 = page1.placeholders.filter(slot="right-column")[0]
plugin_pool.register_plugin(TTLCacheExpirationPlugin)
try:
plugin_pool.register_plugin(DateTimeCacheExpirationPlugin)
except PluginAlreadyRegistered:
pass
try:
plugin_pool.register_plugin(NoCachePlugin)
except PluginAlreadyRegistered:
pass
add_plugin(placeholder1, "TextPlugin", 'en', body="English")
add_plugin(placeholder2, "TextPlugin", 'en', body="Deutsch")
# Add *CacheExpirationPlugins, one expires in 50s, the other in 40s.
# The page should expire in the least of these, or 40s.
add_plugin(placeholder1, "TTLCacheExpirationPlugin", 'en')
add_plugin(placeholder2, "DateTimeCacheExpirationPlugin", 'en')
# Ensure that we're testing in an environment WITHOUT the MW cache, as
# we are testing the internal page cache, not the MW cache.
exclude = [
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.cache.CacheMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware',
]
overrides = {
'MIDDLEWARE': [mw for mw in settings.MIDDLEWARE if mw not in exclude]
}
with self.settings(**overrides):
page1.publish('en')
request = self.get_request(page1_url)
request.current_page = Page.objects.get(pk=page1.pk)
request.toolbar = CMSToolbar(request)
with self.assertNumQueries(FuzzyInt(14, 26)):
response = self.client.get(page1_url)
resp1 = response.content.decode('utf8').split("$$$")[1]
self.assertTrue('max-age=40' in response['Cache-Control'], response['Cache-Control']) # noqa
cache_control1 = response['Cache-Control']
expires1 = response['Expires']
time.sleep(1) # This ensures that the cache has aged measurably
# Request it again, this time, it comes from the cache
request = self.get_request(page1_url)
request.current_page = Page.objects.get(pk=page1.pk)
request.toolbar = CMSToolbar(request)
with self.assertNumQueries(0):
response = self.client.get(page1_url)
resp2 = response.content.decode('utf8').split("$$$")[1]
# Content will be the same
self.assertEqual(resp2, resp1)
# Cache-Control will be different because the cache has aged
self.assertNotEqual(response['Cache-Control'], cache_control1)
# However, the Expires timestamp will be the same
self.assertEqual(response['Expires'], expires1)
plugin_pool.unregister_plugin(TTLCacheExpirationPlugin)
plugin_pool.unregister_plugin(DateTimeCacheExpirationPlugin)
plugin_pool.unregister_plugin(NoCachePlugin)
def test_dual_legacy_cache_plugins(self):
page1 = create_page('test page 1', 'nav_playground.html', 'en',
published=True)
page1_url = page1.get_absolute_url()
placeholder1 = page1.placeholders.filter(slot="body")[0]
placeholder2 = page1.placeholders.filter(slot="right-column")[0]
plugin_pool.register_plugin(LegacyCachePlugin)
add_plugin(placeholder1, "TextPlugin", 'en', body="English")
add_plugin(placeholder2, "TextPlugin", 'en', body="Deutsch")
# Adds a no-cache plugin. In older versions of the CMS, this would
# prevent the page from caching in, but since this plugin also defines
# get_cache_expiration() it is ignored.
add_plugin(placeholder1, "LegacyCachePlugin", 'en')
# Ensure that we're testing in an environment WITHOUT the MW cache, as
# we are testing the internal page cache, not the MW cache.
exclude = [
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.cache.CacheMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware',
]
overrides = {
'MIDDLEWARE': [mw for mw in settings.MIDDLEWARE if mw not in exclude]
}
with self.settings(**overrides):
page1.publish('en')
request = self.get_request(page1_url)
request.current_page = Page.objects.get(pk=page1.pk)
request.toolbar = CMSToolbar(request)
with self.assertNumQueries(FuzzyInt(14, 25)):
response = self.client.get(page1_url)
self.assertTrue('no-cache' not in response['Cache-Control'])
plugin_pool.unregister_plugin(LegacyCachePlugin)
def test_cache_page(self):
# Ensure that we're testing in an environment WITHOUT the MW cache...
exclude = [
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware'
]
overrides = {
'MIDDLEWARE': [mw for mw in settings.MIDDLEWARE if mw not in exclude]
}
with self.settings(**overrides):
# Silly to do these tests if this setting isn't True
page_cache_setting = get_cms_setting('PAGE_CACHE')
self.assertTrue(page_cache_setting)
# Create a test page
page1 = create_page('test page 1', 'nav_playground.html', 'en', published=True)
page1_url = page1.get_absolute_url()
# Add some content
placeholder = page1.placeholders.filter(slot="body")[0]
add_plugin(placeholder, "TextPlugin", 'en', body="English")
add_plugin(placeholder, "TextPlugin", 'de', body="Deutsch")
# Create a request object
request = self.get_request(page1_url, 'en')
# Ensure that user is NOT authenticated
self.assertFalse(request.user.is_authenticated)
# Test that the page is initially uncached
with self.assertNumQueries(FuzzyInt(1, 24)):
response = self.client.get(page1_url)
self.assertEqual(response.status_code, 200)
#
# Test that subsequent requests of the same page are cached by
# asserting that they require fewer queries.
#
with self.assertNumQueries(0):
response = self.client.get(page1_url)
self.assertEqual(response.status_code, 200)
#
# Test that the cache is invalidated on unpublishing the page
#
old_version = _get_cache_version()
page1.unpublish('en')
self.assertGreater(_get_cache_version(), old_version)
#
# Test that this means the page is actually not cached.
#
page1.publish('en')
with self.assertNumQueries(FuzzyInt(1, 24)):
response = self.client.get(page1_url)
self.assertEqual(response.status_code, 200)
#
# Test that the above behavior is different when CMS_PAGE_CACHE is
# set to False (disabled)
#
with self.settings(CMS_PAGE_CACHE=False):
# Test that the page is initially un-cached
with self.assertNumQueries(FuzzyInt(1, 20)):
response = self.client.get(page1_url)
self.assertEqual(response.status_code, 200)
#
# Test that subsequent requests of the same page are still requires DB
# access.
#
with self.assertNumQueries(FuzzyInt(1, 20)):
response = self.client.get(page1_url)
self.assertEqual(response.status_code, 200)
def test_no_page_cache_on_toolbar_edit(self):
with self.settings(CMS_PAGE_CACHE=True):
# Create a test page
page1 = create_page('test page 1', 'nav_playground.html', 'en')
page1_url = page1.get_absolute_url()
# Add some content
placeholder = page1.placeholders.filter(slot="body")[0]
add_plugin(placeholder, "TextPlugin", 'en', body="English")
add_plugin(placeholder, "TextPlugin", 'de', body="Deutsch")
# Publish
page1.publish('en')
# Set edit mode
session = self.client.session
session['cms_edit'] = True
session.save()
# Make an initial ?edit request
with self.assertNumQueries(FuzzyInt(1, 24)):
response = self.client.get(page1_url)
self.assertEqual(response.status_code, 200)
# Disable edit mode
session = self.client.session
session['cms_edit'] = False
session.save()
# Set the cache
with self.assertNumQueries(FuzzyInt(1, 24)):
response = self.client.get(page1_url)
self.assertEqual(response.status_code, 200)
# Assert cached content was used
with self.assertNumQueries(0):
response = self.client.get(page1_url)
self.assertEqual(response.status_code, 200)
# Set edit mode once more
session = self.client.session
session['cms_edit'] = True
session.save()
# Assert no cached content was used
with self.assertNumQueries(FuzzyInt(1, 24)):
response = self.client.get('{}?edit'.format(page1_url))
self.assertEqual(response.status_code, 200)
def test_invalidate_restart(self):
# Ensure that we're testing in an environment WITHOUT the MW cache...
exclude = [
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware'
]
overrides = {
'MIDDLEWARE': [mw for mw in settings.MIDDLEWARE if mw not in exclude]
}
with self.settings(**overrides):
# Silly to do these tests if this setting isn't True
page_cache_setting = get_cms_setting('PAGE_CACHE')
self.assertTrue(page_cache_setting)
# Create a test page
page1 = create_page('test page 1', 'nav_playground.html', 'en', published=True)
page1_url = page1.get_absolute_url()
# Add some content
placeholder = page1.placeholders.filter(slot="body")[0]
add_plugin(placeholder, "TextPlugin", 'en', body="English")
add_plugin(placeholder, "TextPlugin", 'de', body="Deutsch")
# Create a request object
request = self.get_request(page1.get_path(), 'en')
# Ensure that user is NOT authenticated
self.assertFalse(request.user.is_authenticated)
# Test that the page is initially uncached
with self.assertNumQueries(FuzzyInt(1, 24)):
response = self.client.get(page1_url)
self.assertEqual(response.status_code, 200)
#
# Test that subsequent requests of the same page are cached by
# asserting that they require fewer queries.
#
with self.assertNumQueries(0):
response = self.client.get(page1_url)
self.assertEqual(response.status_code, 200)
old_plugins = plugin_pool.plugins
plugin_pool.clear()
plugin_pool.discover_plugins()
plugin_pool.plugins = old_plugins
with self.assertNumQueries(FuzzyInt(1, 20)):
response = self.client.get(page1_url)
self.assertEqual(response.status_code, 200)
def test_sekizai_plugin(self):
page1 = create_page('test page 1', 'nav_playground.html', 'en',
published=True)
placeholder1 = page1.placeholders.filter(slot="body")[0]
placeholder2 = page1.placeholders.filter(slot="right-column")[0]
plugin_pool.register_plugin(SekizaiPlugin)
add_plugin(placeholder1, "SekizaiPlugin", 'en')
add_plugin(placeholder2, "TextPlugin", 'en', body="Deutsch")
page1.publish('en')
response = self.client.get(page1.get_absolute_url())
self.assertContains(response, 'alert(')
response = self.client.get(page1.get_absolute_url())
self.assertContains(response, 'alert(')
def test_cache_invalidation(self):
# Ensure that we're testing in an environment WITHOUT the MW cache...
exclude = [
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware'
]
overrides = {
'MIDDLEWARE': [mw for mw in settings.MIDDLEWARE if mw not in exclude]
}
with self.settings(**overrides):
# Silly to do these tests if this setting isn't True
page_cache_setting = get_cms_setting('PAGE_CACHE')
self.assertTrue(page_cache_setting)
page1 = create_page('test page 1', 'nav_playground.html', 'en',
published=True)
page1_url = page1.get_absolute_url()
placeholder = page1.placeholders.get(slot="body")
add_plugin(placeholder, "TextPlugin", 'en', body="First content")
page1.publish('en')
response = self.client.get(page1_url)
self.assertContains(response, 'First content')
response = self.client.get(page1_url)
self.assertContains(response, 'First content')
add_plugin(placeholder, "TextPlugin", 'en', body="Second content")
page1.publish('en')
response = self.client.get(page1_url)
self.assertContains(response, 'Second content')
def test_render_placeholder_cache(self):
"""
Regression test for #4223
Assert that placeholder cache is cleared correctly when a plugin is saved
"""
invalidate_cms_page_cache()
ex = Example1(
char_1='one',
char_2='two',
char_3='tree',
char_4='four'
)
ex.save()
ph1 = ex.placeholder
###
# add the test plugin
##
test_plugin = add_plugin(ph1, "TextPlugin", "en", body="Some text")
test_plugin.save()
request = self.get_request()
content_renderer = self.get_content_renderer(request)
# asserting initial text
context = SekizaiContext()
context['request'] = self.get_request()
text = content_renderer.render_placeholder(ph1, context)
self.assertEqual(text, "Some text")
# deleting local plugin cache
del ph1._plugins_cache
test_plugin.body = 'Other text'
test_plugin.save()
# plugin text has changed, so the placeholder rendering
text = content_renderer.render_placeholder(ph1, context)
self.assertEqual(text, "Other text")
def test_render_placeholderfield_cache_in_custom_model(self):
"""
Regression test for #6912
Assert that placeholder of a placeholderfield in custom model has its cache cleared correctly when mark_as_dirty is called in the admin
"""
invalidate_cms_page_cache()
# Create an instance of a custom model containing a placeholderfield
ex = Example1(char_1="one", char_2="two", char_3="tree", char_4="four")
ex.save()
ph1 = ex.placeholder
# Add a first plugin
test_plugin = add_plugin(ph1, "TextPlugin", "en", body="Some text")
test_plugin.save()
# Create a first request using render_placeholder to ensure that the content is equal to the first plugin content
request = self.get_request()
content_renderer = self.get_content_renderer(request)
context = SekizaiContext()
context["request"] = self.get_request()
text = content_renderer.render_placeholder(ph1, context, use_cache=True)
self.assertEqual(text, "Some text")
# Add a second plugin in the placeholder
test_plugin = add_plugin(ph1, "TextPlugin", "en", body="Some other text")
test_plugin.save()
# Clear plugins cache to ensure that cms.utils.plugins.get_plugins() will refetch the plugins
del ph1._plugins_cache
# Create a second request using render_placeholder to ensure that the content is still equal to the first plugin content as cache was not cleared yet
request = self.get_request()
content_renderer = self.get_content_renderer(request)
context = SekizaiContext()
context["request"] = self.get_request()
text = content_renderer.render_placeholder(ph1, context, use_cache=True)
self.assertEqual(text, "Some text")
# Mark placeholder as dirty as it is done in cms.admin.placeholderadmin file
ph1.mark_as_dirty("en", clear_cache=False)
# Create a last request to ensure that rendered content contains the two plugins content
request = self.get_request()
content_renderer = self.get_content_renderer(request)
context = SekizaiContext()
context["request"] = self.get_request()
text = content_renderer.render_placeholder(ph1, context, use_cache=True)
self.assertEqual(text, "Some textSome other text")
class PlaceholderCacheTestCase(CMSTestCase):
def setUp(self):
from django.core.cache import cache
super().setUp()
cache.clear()
self.page = create_page(
'en test page', 'nav_playground.html', 'en', published=True)
# Now create and publish as 'de' title
create_title('de', "de test page", self.page)
self.page.publish('de')
self.placeholder = self.page.placeholders.filter(slot="body")[0]
plugin_pool.register_plugin(VaryCacheOnPlugin)
add_plugin(self.placeholder, 'TextPlugin', 'en', body='English')
add_plugin(self.placeholder, 'TextPlugin', 'de', body='Deutsch')
add_plugin(self.placeholder, 'VaryCacheOnPlugin', 'en')
add_plugin(self.placeholder, 'VaryCacheOnPlugin', 'de')
self.en_request = self.get_request('/en/')
self.en_request.current_page = Page.objects.get(pk=self.page.pk)
self.en_us_request = self.get_request('/en/')
self.en_us_request.META['HTTP_COUNTRY_CODE'] = 'US'
self.en_uk_request = self.get_request('/en/')
self.en_uk_request.META['HTTP_COUNTRY_CODE'] = 'UK'
self.de_request = self.get_request('/de/')
self.de_request.current_page = Page.objects.get(pk=self.page.pk)
def tearDown(self):
from django.core.cache import cache
super().tearDown()
plugin_pool.unregister_plugin(VaryCacheOnPlugin)
cache.clear()
def test_get_placeholder_cache_version_key(self):
cache_version_key = '{prefix}|placeholder_cache_version|id:{id}|lang:{lang}|site:{site}'.format(
prefix=get_cms_setting('CACHE_PREFIX'),
id=self.placeholder.pk,
lang='en',
site=1,
)
self.assertEqual(
_get_placeholder_cache_version_key(self.placeholder, 'en', 1),
cache_version_key
)
def test_set_clear_get_placeholder_cache_version(self):
initial, _ = _get_placeholder_cache_version(self.placeholder, 'en', 1)
clear_placeholder_cache(self.placeholder, 'en', 1)
version, _ = _get_placeholder_cache_version(self.placeholder, 'en', 1)
self.assertGreater(version, initial)
def test_get_placeholder_cache_key(self):
version, vary_on_list = _get_placeholder_cache_version(self.placeholder, 'en', 1)
desired_key = '{prefix}|render_placeholder|id:{id}|lang:{lang}|site:{site}|tz:{tz}|v:{version}|country-code:{cc}'.format( # noqa
prefix=get_cms_setting('CACHE_PREFIX'),
id=self.placeholder.pk,
lang='en',
site=1,
tz=get_timezone_name(),
version=version,
cc='_',
)
_set_placeholder_cache_version(self.placeholder, 'en', 1, version, vary_on_list=vary_on_list, duration=1)
actual_key = _get_placeholder_cache_key(self.placeholder, 'en', 1, self.en_request)
self.assertEqual(actual_key, desired_key)
en_key = _get_placeholder_cache_key(self.placeholder, 'en', 1, self.en_request)
de_key = _get_placeholder_cache_key(self.placeholder, 'de', 1, self.de_request)
self.assertNotEqual(en_key, de_key)
en_us_key = _get_placeholder_cache_key(self.placeholder, 'en', 1, self.en_us_request)
self.assertNotEqual(en_key, en_us_key)
desired_key = '{prefix}|render_placeholder|id:{id}|lang:{lang}|site:{site}|tz:{tz}|v:{version}|country-code:{cc}'.format( # noqa
prefix=get_cms_setting('CACHE_PREFIX'),
id=self.placeholder.pk,
lang='en',
site=1,
tz=get_timezone_name(),
version=version,
cc='US',
)
self.assertEqual(en_us_key, desired_key)
def test_set_get_placeholder_cache(self):
# Test with a super-long prefix
en_renderer = self.get_content_renderer(self.en_request)
en_context = Context({
'request': self.en_request,
})
en_us_renderer = self.get_content_renderer(self.en_us_request)
en_us_context = Context({
'request': self.en_us_request,
})
en_uk_renderer = self.get_content_renderer(self.en_uk_request)
en_uk_context = Context({
'request': self.en_uk_request,
})
en_content = en_renderer.render_placeholder(self.placeholder, en_context, 'en', width=350)
en_us_content = en_us_renderer.render_placeholder(self.placeholder, en_us_context, 'en', width=350)
en_uk_content = en_uk_renderer.render_placeholder(self.placeholder, en_uk_context, 'en', width=350)
del self.placeholder._plugins_cache
de_renderer = self.get_content_renderer(self.de_request)
de_context = Context({
'request': self.de_request,
})
de_content = de_renderer.render_placeholder(self.placeholder, de_context, 'de', width=350)
self.assertNotEqual(en_content, de_content)
set_placeholder_cache(self.placeholder, 'en', 1, en_content, self.en_request)
cached_en_content = get_placeholder_cache(self.placeholder, 'en', 1, self.en_request)
self.assertEqual(cached_en_content, en_content)
set_placeholder_cache(self.placeholder, 'de', 1, de_content, self.de_request)
cached_de_content = get_placeholder_cache(self.placeholder, 'de', 1, self.de_request)
self.assertNotEqual(cached_en_content, cached_de_content)
set_placeholder_cache(self.placeholder, 'en', 1, en_us_content, self.en_us_request)
cached_en_us_content = get_placeholder_cache(self.placeholder, 'en', 1, self.en_us_request)
self.assertNotEqual(cached_en_content, cached_en_us_content)
set_placeholder_cache(self.placeholder, 'en', 1, en_uk_content, self.en_uk_request)
cached_en_uk_content = get_placeholder_cache(self.placeholder, 'en', 1, self.en_uk_request)
self.assertNotEqual(cached_en_us_content, cached_en_uk_content)
def test_set_get_placeholder_cache_with_long_prefix(self):
"""
This is for testing that everything continues to work even when the
cache-keys are hashed.
"""
# Use an absurdly long cache prefix to get us in the right neighborhood...
with self.settings(CMS_CACHE_PREFIX="super_lengthy_prefix" * 9): # 180 chars
en_crazy_request = self.get_request('/en/')
en_crazy_renderer = self.get_content_renderer(self.de_request)
# Use a ridiculously long "country code" (80 chars), already we're at 260 chars.
en_crazy_request.META['HTTP_COUNTRY_CODE'] = 'US' * 40 # 80 chars
en_crazy_context = Context({'request': en_crazy_request})
en_crazy_content = en_crazy_renderer.render_placeholder(
self.placeholder,
en_crazy_context,
language='en',
width=350,
)
set_placeholder_cache(self.placeholder, 'en', 1, en_crazy_content, en_crazy_request)
# Prove that it is hashed...
crazy_cache_key = _get_placeholder_cache_key(self.placeholder, 'en', 1, en_crazy_request)
key_length = len(crazy_cache_key)
# 221 = 180 (prefix length) + 1 (separator) + 40 (sha1 hash)
self.assertTrue('render_placeholder' not in crazy_cache_key and key_length == 221)
# Prove it still works as expected
cached_en_crazy_content = get_placeholder_cache(self.placeholder, 'en', 1, en_crazy_request)
self.assertEqual(en_crazy_content, cached_en_crazy_content)
| |
from ctypes import c_char_p
import logging
import traceback
from multiprocessing import Semaphore, Condition, Lock, Value, Pipe, Process
class Task:
class State:
def __init__(self):
pass
NEW = 'NEW'
RUNNING = 'RUNNING'
DONE = 'DONE'
FAILED = 'FAILED'
def parse_state(self, string):
if string == Task.State.NEW:
return Task.State.NEW
elif string == Task.State.RUNNING:
return Task.State.RUNNING
elif string == Task.State.DONE:
return Task.State.DONE
elif string == Task.State.FAILED:
return Task.State.FAILED
else:
raise AttributeError('Invalid state: %s', string)
def __init__(self, name):
self.name = name
self.dependencies = set()
self.state = Task.State.NEW
self.result_proxy = None
def __str__(self):
return self.name
def update(self):
if self.result_proxy is not None and self.result_proxy.value is not '':
logging.debug("Updating task %s to status %s", self.name, self.result_proxy.value)
self.state = self.parse_state(self.result_proxy.value)
self.result_proxy = None #reset to None to avoid 'RuntimeError: Synchronized objects should only be shared between processes through inheritance'
def has_resolved_dependencies(self):
"""Return True if all dependencies are in State.DONE"""
for dependency in self.dependencies:
if dependency.state != Task.State.DONE:
return False
return True
def is_new(self):
return self.state == Task.State.NEW
def dependencies_as_list(self):
"""Returns a list of dependency names."""
dependencies = []
for dependency in self.dependencies:
dependencies.append(dependency.name)
return dependencies
def dependencies_as_string(self):
"""Returns a comma separated list of dependency names."""
return ",".join(self.dependencies_as_list())
def ordered_dependencies(self):
ordered_dependencies = self._all_dependencies()
return ordered_dependencies
def _all_dependencies(self):
deps = []
unprocessed_deps = [self]
processed_deps = []
while unprocessed_deps:
dep = unprocessed_deps.pop()
if dep.dependencies and dep not in processed_deps and \
not set(dep.dependencies).issubset(set(processed_deps)):
unprocessed_deps += [dep] + list(dep.dependencies)
processed_deps.append(dep)
elif dep not in deps and dep is not self:
deps.append(dep)
return deps
def has_dependencies(self):
return len(self.dependencies) > 0
class Tasks:
def __init__(self):
self.tasks = {}
self.dirty = True
def get_task(self, name):
"""Get task by name or create it if it does not exists."""
if name in self.tasks.keys():
task = self.tasks[name]
else:
task = Task(name)
self.tasks[name] = task
return task
def add(self, task_name, dependency_names=set()):
task = self.get_task(task_name)
for dependency_name in dependency_names:
dependency = self.get_task(dependency_name)
task.dependencies.add(dependency)
self.dirty = True
def get_next(self):
"""Return next task from the stack that has all dependencies resolved.
Return None if there are no tasks with resolved dependencies or is there are no more tasks on stack.
Use `count` to check is there are still some task left on the stack.
raise ValueError if total ordering is not possible."""
self.update_tasks_status()
if self.dirty:
self.tsort()
self.dirty = False
for key, task in self.tasks.iteritems():
if task.is_new() and task.has_resolved_dependencies():
return task
return None
def count(self, state):
self.update_tasks_status()
count = 0
for key, task in self.tasks.iteritems():
if task.state == state:
count += 1
return count
def print_name(self, state):
list = ""
for key, task in self.tasks.iteritems():
if task.state == state:
if list != "":
list += " "+task.name
else:
list = task.name
return list
def update_tasks_status(self):
for key, task in self.tasks.iteritems():
task.update()
def are_dependencies_buildable(self, task):
for dependency in task.dependencies:
if dependency.state is Task.State.FAILED:
return False
else:
if not self.are_dependencies_buildable(dependency):
return False
return True
def count_buildable_tasks(self):
"""Count tasks that are new and have dependencies in non FAILED state."""
self.update_tasks_status()
buildable_tasks_count = 0
for key, task in self.tasks.iteritems():
if task.state is Task.State.NEW:
if self.are_dependencies_buildable(task):
buildable_tasks_count += 1
logging.debug("Buildable task: %s" % task.name )
else:
logging.debug("Task %s has broken dependencies." % task.name )
return buildable_tasks_count
def filter_tasks(self, task_names, keep_dependencies=False):
"""If filter is applied only tasks with given name and its dependencies (if keep_keep_dependencies=True) are kept in the list of tasks."""
new_tasks = {}
for task_name in task_names:
task = self.get_task(task_name)
if task not in new_tasks:
new_tasks[task.name] = task
if keep_dependencies:
for dependency in task.ordered_dependencies():
if dependency not in new_tasks:
new_tasks[dependency.name] = dependency
else:
#strip dependencies
task.dependencies = set()
self.tasks = new_tasks
#todo private
def tsort(self):
"""Given a partial ordering, return a totally ordered list.
part is a dict of partial orderings. Each value is a set,
which the key depends on.
The return value is a list of sets, each of which has only
dependencies on items in previous entries in the list.
raise ValueError if ordering is not possible (check for circular or missing dependencies)"""
task_dict = {}
for key, task in self.tasks.iteritems():
task_dict[task] = task.dependencies
# parts = parts.copy()
parts = task_dict.copy()
result = []
while True:
level = set([name for name, deps in parts.iteritems() if not deps])
if not level:
break
result.append(level)
parts = dict([(name, deps - level) for name, deps in parts.iteritems() if name not in level])
if parts:
raise ValueError, 'total ordering not possible (check for circular or missing dependencies)'
return result
def get_all(self):
return self.tasks
class TaskRunner:
"""TaskRunner is used for parallel execution of tasks (replacement for make)"""
def __init__(self, run_build):
self.run_build = run_build
def wait_tasks_to_complete(self, parallel_threads, process_finished_notify, semaphore):
logging.debug("Checking if there are running tasks.")
if semaphore.get_value() < parallel_threads: #is any task running
process_finished_notify.acquire()
logging.debug("Waiting for tasks to complete.")
process_finished_notify.wait()
logging.debug("Finished waiting tasks to complete.")
process_finished_notify.release()
def run(self, tasks, build_config, parallel_threads):
semaphore = Semaphore(parallel_threads)
process_finished_notify = Condition(Lock())
while tasks.count_buildable_tasks() > 0:
task = tasks.get_next()
if task is None:
self.wait_tasks_to_complete(parallel_threads, process_finished_notify, semaphore)
continue
semaphore.acquire()
task.state = Task.State.RUNNING
logging.debug("Starting task %s", task.name)
self.start_new_process(process_finished_notify, semaphore, self.process_job, task, build_config)
self.wait_tasks_to_complete(parallel_threads, process_finished_notify, semaphore)
if tasks.count(Task.State.FAILED) > 0:
logging.error('Some packages failed to build.')
logging.error(" %s", tasks.print_name(Task.State.FAILED))
return 1
if tasks.count(Task.State.RUNNING) > 0:
logging.error('Something went wrong, there are still some running tasks.')
return 1
if tasks.count(Task.State.NEW) > 0:
logging.error('Something went wrong, there are still unprocessed tasks.')
return 1
logging.info("Build completed successfully.")
return 0
def start_new_process(self, process_finished_notify, semaphore, target_method, task, build_config):
result_val = Value(c_char_p, '')
task_conn, task_conn_remote = Pipe()
config_conn, config_conn_remote = Pipe()
p = Process(target=target_method, args=[semaphore, process_finished_notify, task_conn_remote, config_conn_remote, result_val])
p.daemon = True
logging.debug("Sending task: %s", task.name)
task_conn.send(task)
config_conn.send(build_config)
task.result_proxy = result_val
p.start()
def process_job(self, semaphore, process_finished_notify, task_conn, config_conn, result_proxy):
task = task_conn.recv()
build_config = config_conn.recv()
try:
exit_status = self.run_build(task, build_config)
except Exception:
print traceback.format_exc()
exit_status = 1
if exit_status != 0:
result_proxy.value = Task.State.FAILED
else:
result_proxy.value = Task.State.DONE
process_finished_notify.acquire()
semaphore.release()
process_finished_notify.notify()
process_finished_notify.release()
logging.debug("Task %s finished.", task.name)
| |
'''
Offline tests
'''
from builtins import object
from unittest import TestCase
from mock import MagicMock
from kafka_monitor import KafkaMonitor
from plugins.base_handler import BaseHandler
from kafka.common import OffsetOutOfRangeError
from jsonschema import Draft4Validator
class ExampleHandler(BaseHandler):
schema = "crazy_schema.json"
def setup(self, settings):
pass
class TestKafkaMonitor(TestCase):
def setUp(self):
self.kafka_monitor = KafkaMonitor("settings.py", True)
self.kafka_monitor.settings = self.kafka_monitor.wrapper.load("settings.py")
self.kafka_monitor.logger = MagicMock()
def test_load_plugins(self):
# test loading default plugins
assert_keys = [100, 200, 300, 400]
self.kafka_monitor._load_plugins()
self.assertEqual(list(self.kafka_monitor.plugins_dict.keys()), assert_keys)
# test removing a plugin from settings
assert_keys = [200, 300, 400]
self.kafka_monitor.settings['PLUGINS'] \
['plugins.scraper_handler.ScraperHandler'] = None
self.kafka_monitor._load_plugins()
self.assertEqual(list(self.kafka_monitor.plugins_dict.keys()), assert_keys)
self.kafka_monitor.settings['PLUGINS'] \
['plugins.scraper_handler.ScraperHandler'] = 100
# fail if the class is not found
self.kafka_monitor.settings['PLUGINS'] \
['plugins.crazy_class.CrazyHandler'] = 300
self.assertRaises(ImportError, self.kafka_monitor._load_plugins)
del self.kafka_monitor.settings['PLUGINS'] \
['plugins.crazy_class.CrazyHandler']
# Throw error if schema could not be found
self.kafka_monitor.settings['PLUGINS'] \
['tests.test_kafka_monitor.ExampleHandler'] = 300,
self.assertRaises(IOError, self.kafka_monitor._load_plugins)
del self.kafka_monitor.settings['PLUGINS'] \
['tests.test_kafka_monitor.ExampleHandler']
def test_load_stats_total(self):
# test no rolling stats, only total
self.kafka_monitor.stats_dict = {}
self.kafka_monitor.settings['STATS_TIMES'] = []
self.kafka_monitor._setup_stats_total(MagicMock())
self.assertEquals(list(self.kafka_monitor.stats_dict['total'].keys()), ['lifetime'])
self.assertEquals(list(self.kafka_monitor.stats_dict['fail'].keys()), ['lifetime'])
# test good/bad rolling stats
self.kafka_monitor.stats_dict = {}
self.kafka_monitor.settings['STATS_TIMES'] = [
'SECONDS_15_MINUTE',
'SECONDS_1_HOUR',
'SECONDS_DUMB',
]
good = [
'lifetime', # for totals, not DUMB
'900',
'3600',
]
self.kafka_monitor._setup_stats_total(MagicMock())
self.assertEquals(
sorted([str(x) for x in self.kafka_monitor.stats_dict['total'].keys()]),
sorted(good))
self.assertEquals(
sorted([str(x) for x in self.kafka_monitor.stats_dict['fail'].keys()]),
sorted(good))
k1 = 'stats:kafka-monitor:total'
k2 = 'stats:kafka-monitor:fail'
for time_key in self.kafka_monitor.stats_dict['total']:
if time_key == 0:
self.assertEquals(
self.kafka_monitor.stats_dict['total'][0].key,
'{k}:lifetime'.format(k=k1)
)
else:
self.assertEquals(
self.kafka_monitor.stats_dict['total'][time_key].key,
'{k}:{t}'.format(k=k1, t=time_key)
)
for time_key in self.kafka_monitor.stats_dict['fail']:
if time_key == 0:
self.assertEquals(
self.kafka_monitor.stats_dict['fail'][0].key,
'{k}:lifetime'.format(k=k2)
)
else:
self.assertEquals(
self.kafka_monitor.stats_dict['fail'][time_key].key,
'{k}:{t}'.format(k=k2, t=time_key)
)
def test_load_stats_plugins(self):
# lets assume we are loading the default plugins
self.kafka_monitor._load_plugins()
# test no rolling stats
self.kafka_monitor.stats_dict = {}
self.kafka_monitor.settings['STATS_TIMES'] = []
self.kafka_monitor._setup_stats_plugins(MagicMock())
defaults = [
'ScraperHandler',
'ActionHandler',
'StatsHandler',
'ZookeeperHandler'
]
self.assertEquals(
sorted(list(self.kafka_monitor.stats_dict['plugins'].keys())),
sorted(defaults))
for key in self.kafka_monitor.plugins_dict:
plugin_name = self.kafka_monitor.plugins_dict[key]['instance'].__class__.__name__
self.assertEquals(
list(self.kafka_monitor.stats_dict['plugins'][plugin_name].keys()),
['lifetime'])
# test good/bad rolling stats
self.kafka_monitor.stats_dict = {}
self.kafka_monitor.settings['STATS_TIMES'] = [
'SECONDS_15_MINUTE',
'SECONDS_1_HOUR',
'SECONDS_DUMB',
]
good = [
'lifetime', # for totals, not DUMB
'900',
'3600',
]
self.kafka_monitor._setup_stats_plugins(MagicMock())
self.assertEquals(
sorted(self.kafka_monitor.stats_dict['plugins'].keys()),
sorted(defaults))
for key in self.kafka_monitor.plugins_dict:
plugin_name = self.kafka_monitor.plugins_dict[key]['instance'].__class__.__name__
self.assertEquals(
sorted([str(x) for x in self.kafka_monitor.stats_dict['plugins'][plugin_name].keys()]),
sorted(good))
for plugin_key in self.kafka_monitor.stats_dict['plugins']:
k1 = 'stats:kafka-monitor:{p}'.format(p=plugin_key)
for time_key in self.kafka_monitor.stats_dict['plugins'][plugin_key]:
if time_key == 0:
self.assertEquals(
self.kafka_monitor.stats_dict['plugins'][plugin_key][0].key,
'{k}:lifetime'.format(k=k1)
)
else:
self.assertEquals(
self.kafka_monitor.stats_dict['plugins'][plugin_key][time_key].key,
'{k}:{t}'.format(k=k1, t=time_key)
)
def test_process_messages(self):
self.kafka_monitor.consumer = MagicMock()
self.kafka_monitor.stats_dict = {}
# handle kafka offset errors
self.kafka_monitor.consumer = MagicMock(
side_effect=OffsetOutOfRangeError("1"))
try:
self.kafka_monitor._process_messages()
except OffsetOutOfRangeError:
self.fail("_process_messages did not handle Kafka Offset Error")
# handle bad json errors
message_string = "{\"sdasdf sd}"
# fake class so we can use dot notation
class a(object):
pass
m = a()
m.value = message_string
messages = [m]
self.kafka_monitor.consumer = MagicMock()
self.kafka_monitor.consumer.__iter__.return_value = messages
try:
self.kafka_monitor._process_messages()
except OffsetOutOfRangeError:
self.fail("_process_messages did not handle bad json")
# set up to process messages
self.kafka_monitor._load_plugins()
self.kafka_monitor.validator = self.kafka_monitor.extend_with_default(Draft4Validator)
list(self.kafka_monitor.plugins_dict.items())[0][1]['instance'].handle = MagicMock(side_effect=AssertionError("scrape"))
list(self.kafka_monitor.plugins_dict.items())[1][1]['instance'].handle = MagicMock(side_effect=AssertionError("action"))
# test that handler function is called for the scraper
message_string = "{\"url\":\"www.stuff.com\",\"crawlid\":\"1234\"," \
"\"appid\":\"testapp\"}"
m.value = message_string
messages = [m]
self.kafka_monitor.consumer.__iter__.return_value = messages
try:
self.kafka_monitor._process_messages()
self.fail("Scrape not called")
except AssertionError as e:
self.assertEquals("scrape", str(e))
# test that handler function is called for the actions
message_string = "{\"uuid\":\"blah\",\"crawlid\":\"1234\"," \
"\"appid\":\"testapp\",\"action\":\"info\",\"spiderid\":\"link\"}"
m.value = message_string
messages = [m]
self.kafka_monitor.consumer.__iter__.return_value = messages
try:
self.kafka_monitor._process_messages()
self.fail("Action not called")
except AssertionError as e:
self.assertEquals("action", str(e))
| |
# flake8: noqa: E501
#
# Author: Joris Vankerschaver 2013
#
import numpy as np
import scipy.linalg
from scipy._lib import doccer
from scipy.special import gammaln
from scipy._lib._util import check_random_state
from scipy.stats import mvn
_LOG_2PI = np.log(2 * np.pi)
_LOG_2 = np.log(2)
_LOG_PI = np.log(np.pi)
_doc_random_state = """\
random_state : {None, int, np.random.RandomState, np.random.Generator}, optional
Used for drawing random variates.
If `seed` is `None` the `~np.random.RandomState` singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used, seeded
with seed.
If `seed` is already a ``RandomState`` or ``Generator`` instance,
then that object is used.
Default is None.
"""
def _squeeze_output(out):
"""
Remove single-dimensional entries from array and convert to scalar,
if necessary.
"""
out = out.squeeze()
if out.ndim == 0:
out = out[()]
return out
def _eigvalsh_to_eps(spectrum, cond=None, rcond=None):
"""
Determine which eigenvalues are "small" given the spectrum.
This is for compatibility across various linear algebra functions
that should agree about whether or not a Hermitian matrix is numerically
singular and what is its numerical matrix rank.
This is designed to be compatible with scipy.linalg.pinvh.
Parameters
----------
spectrum : 1d ndarray
Array of eigenvalues of a Hermitian matrix.
cond, rcond : float, optional
Cutoff for small eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are
considered zero.
If None or -1, suitable machine precision is used.
Returns
-------
eps : float
Magnitude cutoff for numerical negligibility.
"""
if rcond is not None:
cond = rcond
if cond in [None, -1]:
t = spectrum.dtype.char.lower()
factor = {'f': 1E3, 'd': 1E6}
cond = factor[t] * np.finfo(t).eps
eps = cond * np.max(abs(spectrum))
return eps
def _pinv_1d(v, eps=1e-5):
"""
A helper function for computing the pseudoinverse.
Parameters
----------
v : iterable of numbers
This may be thought of as a vector of eigenvalues or singular values.
eps : float
Values with magnitude no greater than eps are considered negligible.
Returns
-------
v_pinv : 1d float ndarray
A vector of pseudo-inverted numbers.
"""
return np.array([0 if abs(x) <= eps else 1/x for x in v], dtype=float)
class _PSD(object):
"""
Compute coordinated functions of a symmetric positive semidefinite matrix.
This class addresses two issues. Firstly it allows the pseudoinverse,
the logarithm of the pseudo-determinant, and the rank of the matrix
to be computed using one call to eigh instead of three.
Secondly it allows these functions to be computed in a way
that gives mutually compatible results.
All of the functions are computed with a common understanding as to
which of the eigenvalues are to be considered negligibly small.
The functions are designed to coordinate with scipy.linalg.pinvh()
but not necessarily with np.linalg.det() or with np.linalg.matrix_rank().
Parameters
----------
M : array_like
Symmetric positive semidefinite matrix (2-D).
cond, rcond : float, optional
Cutoff for small eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are
considered zero.
If None or -1, suitable machine precision is used.
lower : bool, optional
Whether the pertinent array data is taken from the lower
or upper triangle of M. (Default: lower)
check_finite : bool, optional
Whether to check that the input matrices contain only finite
numbers. Disabling may give a performance gain, but may result
in problems (crashes, non-termination) if the inputs do contain
infinities or NaNs.
allow_singular : bool, optional
Whether to allow a singular matrix. (Default: True)
Notes
-----
The arguments are similar to those of scipy.linalg.pinvh().
"""
def __init__(self, M, cond=None, rcond=None, lower=True,
check_finite=True, allow_singular=True):
# Compute the symmetric eigendecomposition.
# Note that eigh takes care of array conversion, chkfinite,
# and assertion that the matrix is square.
s, u = scipy.linalg.eigh(M, lower=lower, check_finite=check_finite)
eps = _eigvalsh_to_eps(s, cond, rcond)
if np.min(s) < -eps:
raise ValueError('the input matrix must be positive semidefinite')
d = s[s > eps]
if len(d) < len(s) and not allow_singular:
raise np.linalg.LinAlgError('singular matrix')
s_pinv = _pinv_1d(s, eps)
U = np.multiply(u, np.sqrt(s_pinv))
# Initialize the eagerly precomputed attributes.
self.rank = len(d)
self.U = U
self.log_pdet = np.sum(np.log(d))
# Initialize an attribute to be lazily computed.
self._pinv = None
@property
def pinv(self):
if self._pinv is None:
self._pinv = np.dot(self.U, self.U.T)
return self._pinv
class multi_rv_generic(object):
"""
Class which encapsulates common functionality between all multivariate
distributions.
"""
def __init__(self, seed=None):
super(multi_rv_generic, self).__init__()
self._random_state = check_random_state(seed)
@property
def random_state(self):
""" Get or set the RandomState object for generating random variates.
This can be either None, int, a RandomState instance, or a
np.random.Generator instance.
If None (or np.random), use the RandomState singleton used by
np.random.
If already a RandomState or Generator instance, use it.
If an int, use a new RandomState instance seeded with seed.
"""
return self._random_state
@random_state.setter
def random_state(self, seed):
self._random_state = check_random_state(seed)
def _get_random_state(self, random_state):
if random_state is not None:
return check_random_state(random_state)
else:
return self._random_state
class multi_rv_frozen(object):
"""
Class which encapsulates common functionality between all frozen
multivariate distributions.
"""
@property
def random_state(self):
return self._dist._random_state
@random_state.setter
def random_state(self, seed):
self._dist._random_state = check_random_state(seed)
_mvn_doc_default_callparams = """\
mean : array_like, optional
Mean of the distribution (default zero)
cov : array_like, optional
Covariance matrix of the distribution (default one)
allow_singular : bool, optional
Whether to allow a singular covariance matrix. (Default: False)
"""
_mvn_doc_callparams_note = \
"""Setting the parameter `mean` to `None` is equivalent to having `mean`
be the zero-vector. The parameter `cov` can be a scalar, in which case
the covariance matrix is the identity times that value, a vector of
diagonal entries for the covariance matrix, or a two-dimensional
array_like.
"""
_mvn_doc_frozen_callparams = ""
_mvn_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
mvn_docdict_params = {
'_mvn_doc_default_callparams': _mvn_doc_default_callparams,
'_mvn_doc_callparams_note': _mvn_doc_callparams_note,
'_doc_random_state': _doc_random_state
}
mvn_docdict_noparams = {
'_mvn_doc_default_callparams': _mvn_doc_frozen_callparams,
'_mvn_doc_callparams_note': _mvn_doc_frozen_callparams_note,
'_doc_random_state': _doc_random_state
}
class multivariate_normal_gen(multi_rv_generic):
r"""
A multivariate normal random variable.
The `mean` keyword specifies the mean. The `cov` keyword specifies the
covariance matrix.
Methods
-------
``pdf(x, mean=None, cov=1, allow_singular=False)``
Probability density function.
``logpdf(x, mean=None, cov=1, allow_singular=False)``
Log of the probability density function.
``cdf(x, mean=None, cov=1, allow_singular=False, maxpts=1000000*dim, abseps=1e-5, releps=1e-5)``
Cumulative distribution function.
``logcdf(x, mean=None, cov=1, allow_singular=False, maxpts=1000000*dim, abseps=1e-5, releps=1e-5)``
Log of the cumulative distribution function.
``rvs(mean=None, cov=1, size=1, random_state=None)``
Draw random samples from a multivariate normal distribution.
``entropy()``
Compute the differential entropy of the multivariate normal.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_mvn_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix the mean
and covariance parameters, returning a "frozen" multivariate normal
random variable:
rv = multivariate_normal(mean=None, cov=1, allow_singular=False)
- Frozen object with the same methods but holding the given
mean and covariance fixed.
Notes
-----
%(_mvn_doc_callparams_note)s
The covariance matrix `cov` must be a (symmetric) positive
semi-definite matrix. The determinant and inverse of `cov` are computed
as the pseudo-determinant and pseudo-inverse, respectively, so
that `cov` does not need to have full rank.
The probability density function for `multivariate_normal` is
.. math::
f(x) = \frac{1}{\sqrt{(2 \pi)^k \det \Sigma}}
\exp\left( -\frac{1}{2} (x - \mu)^T \Sigma^{-1} (x - \mu) \right),
where :math:`\mu` is the mean, :math:`\Sigma` the covariance matrix,
and :math:`k` is the dimension of the space where :math:`x` takes values.
.. versionadded:: 0.14.0
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.stats import multivariate_normal
>>> x = np.linspace(0, 5, 10, endpoint=False)
>>> y = multivariate_normal.pdf(x, mean=2.5, cov=0.5); y
array([ 0.00108914, 0.01033349, 0.05946514, 0.20755375, 0.43939129,
0.56418958, 0.43939129, 0.20755375, 0.05946514, 0.01033349])
>>> fig1 = plt.figure()
>>> ax = fig1.add_subplot(111)
>>> ax.plot(x, y)
The input quantiles can be any shape of array, as long as the last
axis labels the components. This allows us for instance to
display the frozen pdf for a non-isotropic random variable in 2D as
follows:
>>> x, y = np.mgrid[-1:1:.01, -1:1:.01]
>>> pos = np.dstack((x, y))
>>> rv = multivariate_normal([0.5, -0.2], [[2.0, 0.3], [0.3, 0.5]])
>>> fig2 = plt.figure()
>>> ax2 = fig2.add_subplot(111)
>>> ax2.contourf(x, y, rv.pdf(pos))
"""
def __init__(self, seed=None):
super(multivariate_normal_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, mvn_docdict_params)
def __call__(self, mean=None, cov=1, allow_singular=False, seed=None):
"""
Create a frozen multivariate normal distribution.
See `multivariate_normal_frozen` for more information.
"""
return multivariate_normal_frozen(mean, cov,
allow_singular=allow_singular,
seed=seed)
def _process_parameters(self, dim, mean, cov):
"""
Infer dimensionality from mean or covariance matrix, ensure that
mean and covariance are full vector resp. matrix.
"""
# Try to infer dimensionality
if dim is None:
if mean is None:
if cov is None:
dim = 1
else:
cov = np.asarray(cov, dtype=float)
if cov.ndim < 2:
dim = 1
else:
dim = cov.shape[0]
else:
mean = np.asarray(mean, dtype=float)
dim = mean.size
else:
if not np.isscalar(dim):
raise ValueError("Dimension of random variable must be "
"a scalar.")
# Check input sizes and return full arrays for mean and cov if
# necessary
if mean is None:
mean = np.zeros(dim)
mean = np.asarray(mean, dtype=float)
if cov is None:
cov = 1.0
cov = np.asarray(cov, dtype=float)
if dim == 1:
mean.shape = (1,)
cov.shape = (1, 1)
if mean.ndim != 1 or mean.shape[0] != dim:
raise ValueError("Array 'mean' must be a vector of length %d." %
dim)
if cov.ndim == 0:
cov = cov * np.eye(dim)
elif cov.ndim == 1:
cov = np.diag(cov)
elif cov.ndim == 2 and cov.shape != (dim, dim):
rows, cols = cov.shape
if rows != cols:
msg = ("Array 'cov' must be square if it is two dimensional,"
" but cov.shape = %s." % str(cov.shape))
else:
msg = ("Dimension mismatch: array 'cov' is of shape %s,"
" but 'mean' is a vector of length %d.")
msg = msg % (str(cov.shape), len(mean))
raise ValueError(msg)
elif cov.ndim > 2:
raise ValueError("Array 'cov' must be at most two-dimensional,"
" but cov.ndim = %d" % cov.ndim)
return dim, mean, cov
def _process_quantiles(self, x, dim):
"""
Adjust quantiles array so that last axis labels the components of
each data point.
"""
x = np.asarray(x, dtype=float)
if x.ndim == 0:
x = x[np.newaxis]
elif x.ndim == 1:
if dim == 1:
x = x[:, np.newaxis]
else:
x = x[np.newaxis, :]
return x
def _logpdf(self, x, mean, prec_U, log_det_cov, rank):
"""
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function
mean : ndarray
Mean of the distribution
prec_U : ndarray
A decomposition such that np.dot(prec_U, prec_U.T)
is the precision matrix, i.e. inverse of the covariance matrix.
log_det_cov : float
Logarithm of the determinant of the covariance matrix
rank : int
Rank of the covariance matrix.
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
dev = x - mean
maha = np.sum(np.square(np.dot(dev, prec_U)), axis=-1)
return -0.5 * (rank * _LOG_2PI + log_det_cov + maha)
def logpdf(self, x, mean=None, cov=1, allow_singular=False):
"""
Log of the multivariate normal probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_mvn_doc_default_callparams)s
Returns
-------
pdf : ndarray or scalar
Log of the probability density function evaluated at `x`
Notes
-----
%(_mvn_doc_callparams_note)s
"""
dim, mean, cov = self._process_parameters(None, mean, cov)
x = self._process_quantiles(x, dim)
psd = _PSD(cov, allow_singular=allow_singular)
out = self._logpdf(x, mean, psd.U, psd.log_pdet, psd.rank)
return _squeeze_output(out)
def pdf(self, x, mean=None, cov=1, allow_singular=False):
"""
Multivariate normal probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_mvn_doc_default_callparams)s
Returns
-------
pdf : ndarray or scalar
Probability density function evaluated at `x`
Notes
-----
%(_mvn_doc_callparams_note)s
"""
dim, mean, cov = self._process_parameters(None, mean, cov)
x = self._process_quantiles(x, dim)
psd = _PSD(cov, allow_singular=allow_singular)
out = np.exp(self._logpdf(x, mean, psd.U, psd.log_pdet, psd.rank))
return _squeeze_output(out)
def _cdf(self, x, mean, cov, maxpts, abseps, releps):
"""
Parameters
----------
x : ndarray
Points at which to evaluate the cumulative distribution function.
mean : ndarray
Mean of the distribution
cov : array_like
Covariance matrix of the distribution
maxpts: integer
The maximum number of points to use for integration
abseps: float
Absolute error tolerance
releps: float
Relative error tolerance
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'cdf' instead.
.. versionadded:: 1.0.0
"""
lower = np.full(mean.shape, -np.inf)
# mvnun expects 1-d arguments, so process points sequentially
func1d = lambda x_slice: mvn.mvnun(lower, x_slice, mean, cov,
maxpts, abseps, releps)[0]
out = np.apply_along_axis(func1d, -1, x)
return _squeeze_output(out)
def logcdf(self, x, mean=None, cov=1, allow_singular=False, maxpts=None,
abseps=1e-5, releps=1e-5):
"""
Log of the multivariate normal cumulative distribution function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_mvn_doc_default_callparams)s
maxpts: integer, optional
The maximum number of points to use for integration
(default `1000000*dim`)
abseps: float, optional
Absolute error tolerance (default 1e-5)
releps: float, optional
Relative error tolerance (default 1e-5)
Returns
-------
cdf : ndarray or scalar
Log of the cumulative distribution function evaluated at `x`
Notes
-----
%(_mvn_doc_callparams_note)s
.. versionadded:: 1.0.0
"""
dim, mean, cov = self._process_parameters(None, mean, cov)
x = self._process_quantiles(x, dim)
# Use _PSD to check covariance matrix
_PSD(cov, allow_singular=allow_singular)
if not maxpts:
maxpts = 1000000 * dim
out = np.log(self._cdf(x, mean, cov, maxpts, abseps, releps))
return out
def cdf(self, x, mean=None, cov=1, allow_singular=False, maxpts=None,
abseps=1e-5, releps=1e-5):
"""
Multivariate normal cumulative distribution function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_mvn_doc_default_callparams)s
maxpts: integer, optional
The maximum number of points to use for integration
(default `1000000*dim`)
abseps: float, optional
Absolute error tolerance (default 1e-5)
releps: float, optional
Relative error tolerance (default 1e-5)
Returns
-------
cdf : ndarray or scalar
Cumulative distribution function evaluated at `x`
Notes
-----
%(_mvn_doc_callparams_note)s
.. versionadded:: 1.0.0
"""
dim, mean, cov = self._process_parameters(None, mean, cov)
x = self._process_quantiles(x, dim)
# Use _PSD to check covariance matrix
_PSD(cov, allow_singular=allow_singular)
if not maxpts:
maxpts = 1000000 * dim
out = self._cdf(x, mean, cov, maxpts, abseps, releps)
return out
def rvs(self, mean=None, cov=1, size=1, random_state=None):
"""
Draw random samples from a multivariate normal distribution.
Parameters
----------
%(_mvn_doc_default_callparams)s
size : integer, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray or scalar
Random variates of size (`size`, `N`), where `N` is the
dimension of the random variable.
Notes
-----
%(_mvn_doc_callparams_note)s
"""
dim, mean, cov = self._process_parameters(None, mean, cov)
random_state = self._get_random_state(random_state)
out = random_state.multivariate_normal(mean, cov, size)
return _squeeze_output(out)
def entropy(self, mean=None, cov=1):
"""
Compute the differential entropy of the multivariate normal.
Parameters
----------
%(_mvn_doc_default_callparams)s
Returns
-------
h : scalar
Entropy of the multivariate normal distribution
Notes
-----
%(_mvn_doc_callparams_note)s
"""
dim, mean, cov = self._process_parameters(None, mean, cov)
_, logdet = np.linalg.slogdet(2 * np.pi * np.e * cov)
return 0.5 * logdet
multivariate_normal = multivariate_normal_gen()
class multivariate_normal_frozen(multi_rv_frozen):
def __init__(self, mean=None, cov=1, allow_singular=False, seed=None,
maxpts=None, abseps=1e-5, releps=1e-5):
"""
Create a frozen multivariate normal distribution.
Parameters
----------
mean : array_like, optional
Mean of the distribution (default zero)
cov : array_like, optional
Covariance matrix of the distribution (default one)
allow_singular : bool, optional
If this flag is True then tolerate a singular
covariance matrix (default False).
seed : {None, int, `~np.random.RandomState`, `~np.random.Generator`}, optional
This parameter defines the object to use for drawing random
variates.
If `seed` is `None` the `~np.random.RandomState` singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used, seeded
with seed.
If `seed` is already a ``RandomState`` or ``Generator`` instance,
then that object is used.
Default is None.
maxpts: integer, optional
The maximum number of points to use for integration of the
cumulative distribution function (default `1000000*dim`)
abseps: float, optional
Absolute error tolerance for the cumulative distribution function
(default 1e-5)
releps: float, optional
Relative error tolerance for the cumulative distribution function
(default 1e-5)
Examples
--------
When called with the default parameters, this will create a 1D random
variable with mean 0 and covariance 1:
>>> from scipy.stats import multivariate_normal
>>> r = multivariate_normal()
>>> r.mean
array([ 0.])
>>> r.cov
array([[1.]])
"""
self._dist = multivariate_normal_gen(seed)
self.dim, self.mean, self.cov = self._dist._process_parameters(
None, mean, cov)
self.cov_info = _PSD(self.cov, allow_singular=allow_singular)
if not maxpts:
maxpts = 1000000 * self.dim
self.maxpts = maxpts
self.abseps = abseps
self.releps = releps
def logpdf(self, x):
x = self._dist._process_quantiles(x, self.dim)
out = self._dist._logpdf(x, self.mean, self.cov_info.U,
self.cov_info.log_pdet, self.cov_info.rank)
return _squeeze_output(out)
def pdf(self, x):
return np.exp(self.logpdf(x))
def logcdf(self, x):
return np.log(self.cdf(x))
def cdf(self, x):
x = self._dist._process_quantiles(x, self.dim)
out = self._dist._cdf(x, self.mean, self.cov, self.maxpts, self.abseps,
self.releps)
return _squeeze_output(out)
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.mean, self.cov, size, random_state)
def entropy(self):
"""
Computes the differential entropy of the multivariate normal.
Returns
-------
h : scalar
Entropy of the multivariate normal distribution
"""
log_pdet = self.cov_info.log_pdet
rank = self.cov_info.rank
return 0.5 * (rank * (_LOG_2PI + 1) + log_pdet)
_mvt_doc_default_callparams = \
"""
loc : array_like, optional
Location of the distribution. (default ``0``)
shape : array_like, optional
Positive semidefinite matrix of the distribution. (default ``1``)
df : float, optional
Degrees of freedom of the distribution; must be greater than zero.
If ``np.inf`` then results are multivariate normal. The default is ``1``.
allow_singular : bool, optional
Whether to allow a singular matrix. (default ``False``)
"""
_mvt_doc_callparams_note = \
"""Setting the parameter `loc` to ``None`` is equivalent to having `loc`
be the zero-vector. The parameter `shape` can be a scalar, in which case
the shape matrix is the identity times that value, a vector of
diagonal entries for the shape matrix, or a two-dimensional array_like.
"""
_mvt_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
mvt_docdict_params = {
'_mvt_doc_default_callparams': _mvt_doc_default_callparams,
'_mvt_doc_callparams_note': _mvt_doc_callparams_note,
'_doc_random_state': _doc_random_state
}
mvt_docdict_noparams = {
'_mvt_doc_default_callparams': "",
'_mvt_doc_callparams_note': _mvt_doc_frozen_callparams_note,
'_doc_random_state': _doc_random_state
}
class multivariate_t_gen(multi_rv_generic):
r"""
A multivariate t-distributed random variable.
The `loc` parameter specifies the location. The `shape` parameter specifies
the positive semidefinite shape matrix. The `df` parameter specifies the
degrees of freedom.
In addition to calling the methods below, the object itself may be called
as a function to fix the location, shape matrix, and degrees of freedom
parameters, returning a "frozen" multivariate t-distribution random.
Methods
-------
``pdf(x, loc=None, shape=1, df=1, allow_singular=False)``
Probability density function.
``logpdf(x, loc=None, shape=1, df=1, allow_singular=False)``
Log of the probability density function.
``rvs(loc=None, shape=1, df=1, size=1, random_state=None)``
Draw random samples from a multivariate t-distribution.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_mvt_doc_default_callparams)s
%(_doc_random_state)s
Notes
-----
%(_mvt_doc_callparams_note)s
The matrix `shape` must be a (symmetric) positive semidefinite matrix. The
determinant and inverse of `shape` are computed as the pseudo-determinant
and pseudo-inverse, respectively, so that `shape` does not need to have
full rank.
The probability density function for `multivariate_t` is
.. math::
f(x) = \frac{\Gamma(\nu + p)/2}{\Gamma(\nu/2)\nu^{p/2}\pi^{p/2}|\Sigma|^{1/2}}
\exp\left[1 + \frac{1}{\nu} (\mathbf{x} - \boldsymbol{\mu})^{\top}
\boldsymbol{\Sigma}^{-1}
(\mathbf{x} - \boldsymbol{\mu}) \right]^{-(\nu + p)/2},
where :math:`p` is the dimension of :math:`\mathbf{x}`,
:math:`\boldsymbol{\mu}` is the :math:`p`-dimensional location,
:math:`\boldsymbol{\Sigma}` the :math:`p \times p`-dimensional shape
matrix, and :math:`\nu` is the degrees of freedom.
.. versionadded:: 1.6.0
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.stats import multivariate_t
>>> x, y = np.mgrid[-1:3:.01, -2:1.5:.01]
>>> pos = np.dstack((x, y))
>>> rv = multivariate_t([1.0, -0.5], [[2.1, 0.3], [0.3, 1.5]], df=2)
>>> fig, ax = plt.subplots(1, 1)
>>> ax.set_aspect('equal')
>>> plt.contourf(x, y, rv.pdf(pos))
"""
def __init__(self, seed=None):
"""
Initialize a multivariate t-distributed random variable.
Parameters
----------
seed : Random state.
"""
super(multivariate_t_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, mvt_docdict_params)
self._random_state = check_random_state(seed)
def __call__(self, loc=None, shape=1, df=1, allow_singular=False,
seed=None):
"""
Create a frozen multivariate t-distribution. See
`multivariate_t_frozen` for parameters.
"""
if df == np.inf:
return multivariate_normal_frozen(mean=loc, cov=shape,
allow_singular=allow_singular,
seed=seed)
return multivariate_t_frozen(loc=loc, shape=shape, df=df,
allow_singular=allow_singular, seed=seed)
def pdf(self, x, loc=None, shape=1, df=1, allow_singular=False):
"""
Multivariate t-distribution probability density function.
Parameters
----------
x : array_like
Points at which to evaluate the probability density function.
%(_mvt_doc_default_callparams)s
Returns
-------
pdf : Probability density function evaluated at `x`.
Examples
--------
>>> from scipy.stats import multivariate_t
>>> x = [0.4, 5]
>>> loc = [0, 1]
>>> shape = [[1, 0.1], [0.1, 1]]
>>> df = 7
>>> multivariate_t.pdf(x, loc, shape, df)
array([0.00075713])
"""
dim, loc, shape, df = self._process_parameters(loc, shape, df)
x = self._process_quantiles(x, dim)
shape_info = _PSD(shape, allow_singular=allow_singular)
logpdf = self._logpdf(x, loc, shape_info.U, shape_info.log_pdet, df,
dim, shape_info.rank)
return np.exp(logpdf)
def logpdf(self, x, loc=None, shape=1, df=1):
"""
Log of the multivariate t-distribution probability density function.
Parameters
----------
x : array_like
Points at which to evaluate the log of the probability density
function.
%(_mvt_doc_default_callparams)s
Returns
-------
logpdf : Log of the probability density function evaluated at `x`.
Examples
--------
>>> from scipy.stats import multivariate_t
>>> x = [0.4, 5]
>>> loc = [0, 1]
>>> shape = [[1, 0.1], [0.1, 1]]
>>> df = 7
>>> multivariate_t.logpdf(x, loc, shape, df)
array([-7.1859802])
See Also
--------
pdf : Probability density function.
"""
dim, loc, shape, df = self._process_parameters(loc, shape, df)
x = self._process_quantiles(x, dim)
shape_info = _PSD(shape)
return self._logpdf(x, loc, shape_info.U, shape_info.log_pdet, df, dim,
shape_info.rank)
def _logpdf(self, x, loc, prec_U, log_pdet, df, dim, rank):
"""Utility method `pdf`, `logpdf` for parameters.
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability density
function.
loc : ndarray
Location of the distribution.
prec_U : ndarray
A decomposition such that `np.dot(prec_U, prec_U.T)` is the inverse
of the shape matrix.
log_pdet : float
Logarithm of the determinant of the shape matrix.
df : float
Degrees of freedom of the distribution.
dim : int
Dimension of the quantiles x.
rank : int
Rank of the shape matrix.
Notes
-----
As this function does no argument checking, it should not be called
directly; use 'logpdf' instead.
"""
if df == np.inf:
return multivariate_normal._logpdf(x, loc, prec_U, log_pdet, rank)
dev = x - loc
maha = np.square(np.dot(dev, prec_U)).sum(axis=-1)
t = 0.5 * (df + dim)
A = gammaln(t)
B = gammaln(0.5 * df)
C = dim/2. * np.log(df * np.pi)
D = 0.5 * log_pdet
E = -t * np.log(1 + (1./df) * maha)
return _squeeze_output(A - B - C - D + E)
def rvs(self, loc=None, shape=1, df=1, size=1, random_state=None):
"""
Draw random samples from a multivariate t-distribution.
Parameters
----------
%(_mvt_doc_default_callparams)s
size : integer, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray or scalar
Random variates of size (`size`, `P`), where `P` is the
dimension of the random variable.
Examples
--------
>>> from scipy.stats import multivariate_t
>>> x = [0.4, 5]
>>> loc = [0, 1]
>>> shape = [[1, 0.1], [0.1, 1]]
>>> df = 7
>>> multivariate_t.rvs(loc, shape, df)
array([[0.93477495, 3.00408716]])
"""
# For implementation details, see equation (3):
#
# Hofert, "On Sampling from the Multivariatet Distribution", 2013
# http://rjournal.github.io/archive/2013-2/hofert.pdf
#
dim, loc, shape, df = self._process_parameters(loc, shape, df)
if random_state is not None:
rng = check_random_state(random_state)
else:
rng = self._random_state
if np.isinf(df):
x = np.ones(size)
else:
x = rng.chisquare(df, size=size) / df
z = rng.multivariate_normal(np.zeros(dim), shape, size=size)
samples = loc + z / np.sqrt(x)[:, None]
return _squeeze_output(samples)
def _process_quantiles(self, x, dim):
"""
Adjust quantiles array so that last axis labels the components of
each data point.
"""
x = np.asarray(x, dtype=float)
if x.ndim == 0:
x = x[np.newaxis]
elif x.ndim == 1:
if dim == 1:
x = x[:, np.newaxis]
else:
x = x[np.newaxis, :]
return x
def _process_parameters(self, loc, shape, df):
"""
Infer dimensionality from location array and shape matrix, handle
defaults, and ensure compatible dimensions.
"""
if loc is None and shape is None:
loc = np.asarray(0, dtype=float)
shape = np.asarray(1, dtype=float)
dim = 1
elif loc is None:
shape = np.asarray(shape, dtype=float)
if shape.ndim < 2:
dim = 1
else:
dim = shape.shape[0]
loc = np.zeros(dim)
elif shape is None:
loc = np.asarray(loc, dtype=float)
dim = loc.size
shape = np.eye(dim)
else:
shape = np.asarray(shape, dtype=float)
loc = np.asarray(loc, dtype=float)
dim = loc.size
if dim == 1:
loc.shape = (1,)
shape.shape = (1, 1)
if loc.ndim != 1 or loc.shape[0] != dim:
raise ValueError("Array 'loc' must be a vector of length %d." %
dim)
if shape.ndim == 0:
shape = shape * np.eye(dim)
elif shape.ndim == 1:
shape = np.diag(shape)
elif shape.ndim == 2 and shape.shape != (dim, dim):
rows, cols = shape.shape
if rows != cols:
msg = ("Array 'cov' must be square if it is two dimensional,"
" but cov.shape = %s." % str(shape.shape))
else:
msg = ("Dimension mismatch: array 'cov' is of shape %s,"
" but 'loc' is a vector of length %d.")
msg = msg % (str(shape.shape), len(loc))
raise ValueError(msg)
elif shape.ndim > 2:
raise ValueError("Array 'cov' must be at most two-dimensional,"
" but cov.ndim = %d" % shape.ndim)
# Process degrees of freedom.
if df is None:
df = 1
elif df <= 0:
raise ValueError("'df' must be greater than zero.")
elif np.isnan(df):
raise ValueError("'df' is 'nan' but must be greater than zero or 'np.inf'.")
return dim, loc, shape, df
class multivariate_t_frozen(multi_rv_frozen):
def __init__(self, loc=None, shape=1, df=1, allow_singular=False,
seed=None):
"""
Create a frozen multivariate t distribution.
Parameters
----------
%(_mvt_doc_default_callparams)s
Examples
--------
>>> loc = np.zeros(3)
>>> shape = np.eye(3)
>>> df = 10
>>> dist = multivariate_t(loc, shape, df)
>>> dist.rvs()
array([[ 0.81412036, -1.53612361, 0.42199647]])
>>> dist.pdf([1, 1, 1])
array([0.01237803])
"""
self._dist = multivariate_t_gen(seed)
dim, loc, shape, df = self._dist._process_parameters(loc, shape, df)
self.dim, self.loc, self.shape, self.df = dim, loc, shape, df
self.shape_info = _PSD(shape, allow_singular=allow_singular)
def logpdf(self, x):
x = self._dist._process_quantiles(x, self.dim)
U = self.shape_info.U
log_pdet = self.shape_info.log_pdet
return self._dist._logpdf(x, self.loc, U, log_pdet, self.df, self.dim,
self.shape_info.rank)
def pdf(self, x):
return np.exp(self.logpdf(x))
def rvs(self, size=1, random_state=None):
return self._dist.rvs(loc=self.loc,
shape=self.shape,
df=self.df,
size=size,
random_state=random_state)
multivariate_t = multivariate_t_gen()
# Set frozen generator docstrings from corresponding docstrings in
# matrix_normal_gen and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'rvs']:
method = multivariate_t_gen.__dict__[name]
method_frozen = multivariate_t_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(method.__doc__,
mvt_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, mvt_docdict_params)
| |
# -*- coding: utf-8 -*-
#
# Makahiki documentation build configuration file, created by
# sphinx-quickstart on Mon Feb 6 10:22:39 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import types
sys.path.append(os.path.abspath('../makahiki'))
#sys.path.append(os.path.abspath('../makahiki/apps'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.coverage', 'sphinx.ext.viewcode', 'sphinx.ext.todo']
# Toggle inclusion of todos
todo_include_todos = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Makahiki'
copyright = u'2015, Yongwen Xu, George Lee, Carleton Moore, Philip Johnson, Robert Brewer'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2'
# The full version, including alpha/beta/rc tags.
release = '2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Makahikidoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Makahiki.tex', u'Makahiki Documentation',
u'Yongwen Xu, George Lee, Carleton Moore, Philip Johnson, Robert Brewer', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'makahiki', u'Makahiki Documentation',
[u'Yongwen Xu, George Lee, Carleton Moore, Philip Johnson, Robert Brewer'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Makahiki', u'Makahiki Documentation',
u'Yongwen Xu, George Lee, Carleton Moore, Philip Johnson, Robert Brewer', 'Makahiki', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
#=================================================================================
# Auto-documenting Django Models with Sphinx
# Credit: http://djangosnippets.org/snippets/2533/
#=================================================================================
#THIS_DIR = os.path.dirname(__file__)
#PROJECT_DIR = os.path.join(THIS_DIR, 'relative/path/to/your/project/')
#sys.path.append(PROJECT_DIR)
import inspect
#import settings
#from django.core.management import setup_environ
from django.utils.html import strip_tags
from django.utils.encoding import force_unicode
#setup_environ(settings)
def process_docstring(app, what, name, obj, options, lines):
# This causes import errors if left outside the function
from django.db import models
# Only look at objects that inherit from Django's base model class
if inspect.isclass(obj) and issubclass(obj, models.Model):
# Grab the field list from the meta class
fields = obj._meta.fields
for field in fields:
# Decode and strip any html out of the field's help text
help_text = strip_tags(force_unicode(field.help_text))
# Decode and capitalize the verbose name, for use if there isn't
# any help text
verbose_name = force_unicode(field.verbose_name).capitalize()
if help_text:
# Add the model field to the end of the docstring as a param
# using the help text as the description
lines.append(u':param %s: %s' % (field.attname, help_text))
else:
# Add the model field to the end of the docstring as a param
# using the verbose name as the description
lines.append(u':param %s: %s' % (field.attname, verbose_name))
# Add the field's type to the docstring
lines.append(u':type %s: %s' % (field.attname, type(field).__name__))
# Return the extended docstring
return lines
def skip_challenge_def_in_settings(app, what, name, obj, skip, options):
# Settings file autodoc should only show environment variable definitions.
if name == "Challenge":
return True
def setup(app):
# Register the docstring processor with sphinx
app.connect('autodoc-process-docstring', process_docstring)
app.connect('autodoc-skip-member', skip_challenge_def_in_settings)
#=================================================================================
# Mocking out modules that readthedocs can't import.
# Credit: http://read-the-docs.readthedocs.org/en/latest/faq.html
# Unfortunately, this doesn't work
#=================================================================================
class Mock(object):
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return Mock()
@classmethod
def __getattr__(self, name):
if name in ('__file__', '__path__'):
return '/dev/null'
elif name[0] == name[0].upper():
return type(name, (), {})
else:
return Mock()
@staticmethod
def parse(url):
return {}
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if on_rtd:
TYPE_MODULES = ['south', 'staticfiles', 'django_nose']
for mod_name in TYPE_MODULES:
sys.modules[mod_name] = types.ModuleType(mod_name)
sys.modules[mod_name].__path__ = '/dev/null'
MOCK_MODULES = ['markdown', 'dj_database_url']
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = Mock()
#setup django
#import settings
#from django.core.management import setup_environ
#setup_environ(settings)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings")
| |
import fcntl
import os
import os.path
import sys
import shutil
import platform
import argparse
import flare
import flare_submit
import astropy.units as units
from astropy.cosmology import Planck15 as cosmo
import math
import numpy
import subprocess
import time
import string
#This script controls runs for a multidimensional parameter space survey,
#The survey compares Fisher v Bayes, mergers v not and higher-modes v 22 only.
#Progress is recorded and new runs are issued based on a status list file
#First step is setting up the parameter cases:
Ms=[ "1e2", "1e4", "1e6" ]
qs=[ "2", "8" ]
incs=[ "12", "03", "02" ]
zs=[ "1", "4", "20" ]
#Corresponding distances from astropy.cosmology.Planck15, in Mpc
lum_dist={"1":6791.811049825756, "4":36697.04066923789, "20":230421.93422332808}
modes=[ "lm2", "all" ]
wfs= [ "full", "insp" ]
#orientation: ref (lambda=3pi/4,phi0=pi/3,pol=pi/3,beta=pi/3)
codes=[ "b", "p" ]
print "run_control invoked with args: ",sys.argv
#parse ars
parser = argparse.ArgumentParser(description="Control runs for parameter space survey");
parser.add_argument('name',help="The basename for the status list")
parser.add_argument('-g',help="Generate a new status list (must not exist).",action="store_true")
parser.add_argument('-m',help="select modes option "+str(modes),default=modes[0])
parser.add_argument('-z',help="select redshift option "+str(zs),default=zs[0])
parser.add_argument('-c',help="select code option "+str(codes),default=codes[0])
parser.add_argument('-s',help="status to seek and operate on.",default="new")
parser.add_argument('-f',help="comma-separated extra flags for flare_submit.",default="")
parser.add_argument('--set',help="set status of tag to stat.",nargs=2,metavar=("tag","stat"))
parser.add_argument('-x',help="expect or require this status for set.",default="")
args=parser.parse_args()
extra_flags= string.replace(args.f,","," ")
print "pass-through flags: '",extra_flags,"'"
#A fresh list of runs is generated by: generate_status_list
#The list will contain run_tag - status pairs
#These are processed by get_next_tag, write_status
#Status list access is controlled so that only one process can access the file at a time.
#tags = ["new","processing","submitted","running","need_restart","need_analysis","done"]
def read_file_data(fd):
data=[]
for line in fd:
row=(line[:-1].split())
if(len(row)>0):
data.append(row)
return data
def write_file_data(fd,data):
for row in data:
#print "row=",row
for s in row:
fd.write(s+" ")
fd.write("\n")
def generate_tag(m,q,z,inc,mode,wf,code):
return m+"_"+q+"_"+z+"_"+inc+"_"+mode+"_"+wf+"_"+code+"_p"
def read_tag(tag):
keys=["M","q","z","inc","modes","wf","code","pars"]
vals=tag.split('_')
return dict(zip(keys,vals))
def generate_status_list(status_file,z,mode,code):
data=[]
for M in Ms:
for q in qs:
for inc in incs:
for wf in wfs:
data.append([generate_tag(M,q,z,inc,mode,wf,code),"new"])
#open the file but not if it exists
osfd = os.open(status_file, os.O_WRONLY | os.O_CREAT | os.O_EXCL)
with os.fdopen(osfd,'w') as fd:
write_file_data(fd,data)
def get_next_tag(status_file,seek_status):
with open(status_file,"r+") as fd:
while True:
try :
fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB )
break
except IOError as e:
if e.errno != errno.EAGAIN:
raise
else:
time.sleep(0.1)
data=read_file_data(fd)
print data
vals=numpy.array(data)[:,1].tolist()
found_status=""
for val in vals:
if(val.startswith(seek_status)):found_status=val
if(len(found_status)>0):
i=vals.index(found_status)
tag=data[i][0]
data[i][1]="processing"
fd.seek(0);
fd.truncate();
write_file_data(fd,data)
else: tag=None
fcntl.flock(fd,fcntl.LOCK_UN)
return tag,found_status
def write_status(status_file,tag,new_status, expect=""):
with open(status_file,"r+") as fd:
while True:
try :
fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB )
break
except IOError as e:
if e.errno != errno.EAGAIN:
raise
else:
time.sleep(0.1)
data=read_file_data(fd)
tags=numpy.array(data)[:,0].tolist()
#print "tags=",tags
if(tag in tags):
i=tags.index(tag)
#if(not expect==""):
# print "expect test: '"+expect+"' vs '"+data[i][1][:len(expect)]+"'"
if(expect=="" or expect==data[i][1][:len(expect)]):
data[i][1]=new_status
fd.seek(0);
fd.truncate();
write_file_data(fd,data)
else: print "tag '"+tag+"' not found"
fcntl.flock(fd,fcntl.LOCK_UN)
def get_snr(tag):
p=read_tag(tag)
snr=-1
if("snr" in p["z"]):snr=float(p["z"][3:])
return snr
def get_params_string(tag):
p=read_tag(tag)
mtot=float(p["M"])
q=float(p["q"])
snr=0
if("snr" in p["z"]):
d=0;
else:
d=float(cosmo.luminosity_distance(float(p["z"]))/units.Mpc)
inc=math.pi/float(p["inc"])
m1=mtot/(1.0+1.0/q)
m2=mtot/(1.0+q)
t0=0
phi0=math.pi/3.0
beta=math.pi/3.0
lamb=3.0*math.pi/4.0
pol=math.pi/3.0
val={"m1":m1,"m2":m2,"tRef":t0,"phiRef":phi0,"distance":d,"lambda":lamb,"beta":beta,"inclination":inc,"polarization":pol}
s=""
for par in flare_submit.parnames:
s=s+str(val[par])+" "
return s
def get_code_flag(tag):
p=read_tag(tag)
if(p["code"]=='p'): return "--mcmc "
else: return ""
def find_and_process(status_file,stat):
#actions=["new","restart_at","needs_analysis"]
actions=["new","restart_at","unpack","needFisher"]
if(not stat in actions):
print "Nothing defined for action: ",stat
sys.exit("Quitting.")
tag,found=get_next_tag(status_file,stat)
while(tag!=None):
print "processing tag=",tag
if(stat==actions[0]):#new
#first make the submission script file
argv=tag+" "+str(get_snr(tag))+" "+get_code_flag(tag)+" -p "+get_params_string(tag)+" "+extra_flags
argv=argv.split()
subfile=flare_submit.generate(system,argv,status_file)
print "******* cwd=",os.getcwd();
cmd = submit+" "+subfile
i=subprocess.call(cmd,shell=True)
print "******* -->",i
if(no_wait_submit):write_status(status_file,tag,'submitted')
elif(stat==actions[1]):
restart_label=found[11:]
argv=tag+" "+str(get_snr(tag))+" "+get_code_flag(tag)+" -p "+get_params_string(tag)+" "+extra_flags
argv=argv.split()
argv.append("-r="+restart_label)
subfile=flare_submit.generate(system,argv,status_file)
print "R******* cwd=",os.getcwd();
cmd = submit+" "+subfile
print cmd
subprocess.call(cmd,shell=True)
print "R*******"
if(no_wait_submit):write_status(status_file,tag,'restart_submitted')
elif(stat==actions[2]): #unpack sylvains runs
sourcedir="SylvainsRuns/"
#mkdir
dir=tag+"-sylvains_run"
if(not os.path.exists(dir)):
os.mkdir(dir)
shutil.copyfile(sourcedir+tag+"_post_equal_weights.dat",dir+"/"+tag+"post_equal_weights.dat")
shutil.copy(sourcedir+tag+"_params.txt",dir)
write_status(status_file,tag,actions[3])
elif(stat==actions[3]): #needs Fisher
#set up params
dir=tag+"-sylvains_run"
parstring=get_params_string(tag)
params=[float(p) for p in parstring.split()]
print "params=",params
snr=get_snr(tag)
flags=flare.set_flare_flags(snr,params)
if(read_tag(tag)["modes"]=="lm2"):
flags+=" --nbmodeinj 1 --nbmodetemp 1" #for no higher modes in injection and template
fcmd = flare.getFisherCommand(tag+"-Fisher")+" "+flags
fcmd = fcmd+" 1>"+tag+"-Fisher.out 2>&1 "
if(no_wait_submit):
fcmd = ". ${MODULESHOME}/init/sh;module load other/SSSO_Ana-PyD/SApd_2.4.0_py2.7;module purge;module load comp/intel-15.0.3.187 lib/mkl-15.0.3.187 mpi/impi-5.0.3.048;"+fcmd #specific to discover environment, but expedient...
fcmd = "export ROM_DATA_PATH="+flare.flare_dir+"/"+flare.ROM_DATA_PATH+";"+fcmd
fcmd = "cd "+dir+";"+fcmd
print fcmd
subprocess.call(fcmd,shell=True)
write_status(status_file,tag,'done')
else:
print "No action to take for stat='",str(stat),"'"
tag,found=get_next_tag(status_file,stat)
#detect system
system="discover"
submit="sbatch "
no_wait_submit=True
if(platform.system()=="Darwin"):
system="macos"
submit="tcsh "
no_wait_submit=False
if("status_file.txt" in args.name):
statusfile=args.name
else:
statusfile=os.getcwd()+"/"+args.name+"_status_file.txt"
if(args.g):
generate_status_list(statusfile,args.z,args.m,args.c)
if(args.set is not None):
write_status(statusfile,args.set[0],args.set[1],args.x)
elif(len(args.s)>0):
find_and_process(statusfile,args.s)
| |
"""Test zha light."""
from datetime import timedelta
import pytest
import zigpy.profiles.zha as zha
import zigpy.types
import zigpy.zcl.clusters.general as general
import zigpy.zcl.clusters.lighting as lighting
import zigpy.zcl.foundation as zcl_f
from homeassistant.components.light import DOMAIN, FLASH_LONG, FLASH_SHORT
from homeassistant.components.zha.core.group import GroupMember
from homeassistant.components.zha.light import FLASH_EFFECTS
from homeassistant.const import STATE_OFF, STATE_ON, STATE_UNAVAILABLE
import homeassistant.util.dt as dt_util
from .common import (
async_enable_traffic,
async_find_group_entity_id,
async_test_rejoin,
find_entity_id,
get_zha_gateway,
send_attributes_report,
)
from tests.async_mock import AsyncMock, MagicMock, call, patch, sentinel
from tests.common import async_fire_time_changed
ON = 1
OFF = 0
IEEE_GROUPABLE_DEVICE = "01:2d:6f:00:0a:90:69:e8"
IEEE_GROUPABLE_DEVICE2 = "02:2d:6f:00:0a:90:69:e9"
IEEE_GROUPABLE_DEVICE3 = "03:2d:6f:00:0a:90:69:e7"
LIGHT_ON_OFF = {
1: {
"device_type": zigpy.profiles.zha.DeviceType.ON_OFF_LIGHT,
"in_clusters": [
general.Basic.cluster_id,
general.Identify.cluster_id,
general.OnOff.cluster_id,
],
"out_clusters": [general.Ota.cluster_id],
}
}
LIGHT_LEVEL = {
1: {
"device_type": zigpy.profiles.zha.DeviceType.DIMMABLE_LIGHT,
"in_clusters": [
general.Basic.cluster_id,
general.LevelControl.cluster_id,
general.OnOff.cluster_id,
],
"out_clusters": [general.Ota.cluster_id],
}
}
LIGHT_COLOR = {
1: {
"device_type": zigpy.profiles.zha.DeviceType.COLOR_DIMMABLE_LIGHT,
"in_clusters": [
general.Basic.cluster_id,
general.Identify.cluster_id,
general.LevelControl.cluster_id,
general.OnOff.cluster_id,
lighting.Color.cluster_id,
],
"out_clusters": [general.Ota.cluster_id],
}
}
@pytest.fixture
async def coordinator(hass, zigpy_device_mock, zha_device_joined):
"""Test zha light platform."""
zigpy_device = zigpy_device_mock(
{
1: {
"in_clusters": [general.Groups.cluster_id],
"out_clusters": [],
"device_type": zha.DeviceType.COLOR_DIMMABLE_LIGHT,
}
},
ieee="00:15:8d:00:02:32:4f:32",
nwk=0x0000,
node_descriptor=b"\xf8\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff",
)
zha_device = await zha_device_joined(zigpy_device)
zha_device.available = True
return zha_device
@pytest.fixture
async def device_light_1(hass, zigpy_device_mock, zha_device_joined):
"""Test zha light platform."""
zigpy_device = zigpy_device_mock(
{
1: {
"in_clusters": [
general.OnOff.cluster_id,
general.LevelControl.cluster_id,
lighting.Color.cluster_id,
general.Groups.cluster_id,
general.Identify.cluster_id,
],
"out_clusters": [],
"device_type": zha.DeviceType.COLOR_DIMMABLE_LIGHT,
}
},
ieee=IEEE_GROUPABLE_DEVICE,
nwk=0xB79D,
)
zha_device = await zha_device_joined(zigpy_device)
zha_device.available = True
return zha_device
@pytest.fixture
async def device_light_2(hass, zigpy_device_mock, zha_device_joined):
"""Test zha light platform."""
zigpy_device = zigpy_device_mock(
{
1: {
"in_clusters": [
general.OnOff.cluster_id,
general.LevelControl.cluster_id,
lighting.Color.cluster_id,
general.Groups.cluster_id,
general.Identify.cluster_id,
],
"out_clusters": [],
"device_type": zha.DeviceType.COLOR_DIMMABLE_LIGHT,
}
},
ieee=IEEE_GROUPABLE_DEVICE2,
nwk=0xC79E,
)
zha_device = await zha_device_joined(zigpy_device)
zha_device.available = True
return zha_device
@pytest.fixture
async def device_light_3(hass, zigpy_device_mock, zha_device_joined):
"""Test zha light platform."""
zigpy_device = zigpy_device_mock(
{
1: {
"in_clusters": [
general.OnOff.cluster_id,
general.LevelControl.cluster_id,
lighting.Color.cluster_id,
general.Groups.cluster_id,
general.Identify.cluster_id,
],
"out_clusters": [],
"device_type": zha.DeviceType.COLOR_DIMMABLE_LIGHT,
}
},
ieee=IEEE_GROUPABLE_DEVICE3,
nwk=0xB89F,
)
zha_device = await zha_device_joined(zigpy_device)
zha_device.available = True
return zha_device
@patch("zigpy.zcl.clusters.general.OnOff.read_attributes", new=MagicMock())
async def test_light_refresh(hass, zigpy_device_mock, zha_device_joined_restored):
"""Test zha light platform refresh."""
# create zigpy devices
zigpy_device = zigpy_device_mock(LIGHT_ON_OFF)
zha_device = await zha_device_joined_restored(zigpy_device)
on_off_cluster = zigpy_device.endpoints[1].on_off
entity_id = await find_entity_id(DOMAIN, zha_device, hass)
# allow traffic to flow through the gateway and device
await async_enable_traffic(hass, [zha_device])
on_off_cluster.read_attributes.reset_mock()
# not enough time passed
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(minutes=20))
await hass.async_block_till_done()
assert on_off_cluster.read_attributes.call_count == 0
assert on_off_cluster.read_attributes.await_count == 0
assert hass.states.get(entity_id).state == STATE_OFF
# 1 interval - 1 call
on_off_cluster.read_attributes.return_value = [{"on_off": 1}, {}]
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(minutes=80))
await hass.async_block_till_done()
assert on_off_cluster.read_attributes.call_count == 1
assert on_off_cluster.read_attributes.await_count == 1
assert hass.states.get(entity_id).state == STATE_ON
# 2 intervals - 2 calls
on_off_cluster.read_attributes.return_value = [{"on_off": 0}, {}]
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(minutes=80))
await hass.async_block_till_done()
assert on_off_cluster.read_attributes.call_count == 2
assert on_off_cluster.read_attributes.await_count == 2
assert hass.states.get(entity_id).state == STATE_OFF
@patch(
"zigpy.zcl.clusters.lighting.Color.request",
new=AsyncMock(return_value=[sentinel.data, zcl_f.Status.SUCCESS]),
)
@patch(
"zigpy.zcl.clusters.general.Identify.request",
new=AsyncMock(return_value=[sentinel.data, zcl_f.Status.SUCCESS]),
)
@patch(
"zigpy.zcl.clusters.general.LevelControl.request",
new=AsyncMock(return_value=[sentinel.data, zcl_f.Status.SUCCESS]),
)
@patch(
"zigpy.zcl.clusters.general.OnOff.request",
new=AsyncMock(return_value=[sentinel.data, zcl_f.Status.SUCCESS]),
)
@pytest.mark.parametrize(
"device, reporting",
[(LIGHT_ON_OFF, (1, 0, 0)), (LIGHT_LEVEL, (1, 1, 0)), (LIGHT_COLOR, (1, 1, 3))],
)
async def test_light(
hass, zigpy_device_mock, zha_device_joined_restored, device, reporting
):
"""Test zha light platform."""
# create zigpy devices
zigpy_device = zigpy_device_mock(device)
zha_device = await zha_device_joined_restored(zigpy_device)
entity_id = await find_entity_id(DOMAIN, zha_device, hass)
assert entity_id is not None
cluster_on_off = zigpy_device.endpoints[1].on_off
cluster_level = getattr(zigpy_device.endpoints[1], "level", None)
cluster_color = getattr(zigpy_device.endpoints[1], "light_color", None)
cluster_identify = getattr(zigpy_device.endpoints[1], "identify", None)
assert hass.states.get(entity_id).state == STATE_OFF
await async_enable_traffic(hass, [zha_device], enabled=False)
# test that the lights were created and that they are unavailable
assert hass.states.get(entity_id).state == STATE_UNAVAILABLE
# allow traffic to flow through the gateway and device
await async_enable_traffic(hass, [zha_device])
# test that the lights were created and are off
assert hass.states.get(entity_id).state == STATE_OFF
# test turning the lights on and off from the light
await async_test_on_off_from_light(hass, cluster_on_off, entity_id)
# test turning the lights on and off from the HA
await async_test_on_off_from_hass(hass, cluster_on_off, entity_id)
# test short flashing the lights from the HA
if cluster_identify:
await async_test_flash_from_hass(hass, cluster_identify, entity_id, FLASH_SHORT)
# test turning the lights on and off from the HA
if cluster_level:
await async_test_level_on_off_from_hass(
hass, cluster_on_off, cluster_level, entity_id
)
# test getting a brightness change from the network
await async_test_on_from_light(hass, cluster_on_off, entity_id)
await async_test_dimmer_from_light(
hass, cluster_level, entity_id, 150, STATE_ON
)
# test rejoin
await async_test_off_from_hass(hass, cluster_on_off, entity_id)
clusters = [cluster_on_off]
if cluster_level:
clusters.append(cluster_level)
if cluster_color:
clusters.append(cluster_color)
await async_test_rejoin(hass, zigpy_device, clusters, reporting)
# test long flashing the lights from the HA
if cluster_identify:
await async_test_flash_from_hass(hass, cluster_identify, entity_id, FLASH_LONG)
async def async_test_on_off_from_light(hass, cluster, entity_id):
"""Test on off functionality from the light."""
# turn on at light
await send_attributes_report(hass, cluster, {1: 0, 0: 1, 2: 3})
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == STATE_ON
# turn off at light
await send_attributes_report(hass, cluster, {1: 1, 0: 0, 2: 3})
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == STATE_OFF
async def async_test_on_from_light(hass, cluster, entity_id):
"""Test on off functionality from the light."""
# turn on at light
await send_attributes_report(hass, cluster, {1: -1, 0: 1, 2: 2})
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == STATE_ON
async def async_test_on_off_from_hass(hass, cluster, entity_id):
"""Test on off functionality from hass."""
# turn on via UI
cluster.request.reset_mock()
await hass.services.async_call(
DOMAIN, "turn_on", {"entity_id": entity_id}, blocking=True
)
assert cluster.request.call_count == 1
assert cluster.request.await_count == 1
assert cluster.request.call_args == call(
False, ON, (), expect_reply=True, manufacturer=None, tsn=None
)
await async_test_off_from_hass(hass, cluster, entity_id)
async def async_test_off_from_hass(hass, cluster, entity_id):
"""Test turning off the light from Home Assistant."""
# turn off via UI
cluster.request.reset_mock()
await hass.services.async_call(
DOMAIN, "turn_off", {"entity_id": entity_id}, blocking=True
)
assert cluster.request.call_count == 1
assert cluster.request.await_count == 1
assert cluster.request.call_args == call(
False, OFF, (), expect_reply=True, manufacturer=None, tsn=None
)
async def async_test_level_on_off_from_hass(
hass, on_off_cluster, level_cluster, entity_id
):
"""Test on off functionality from hass."""
on_off_cluster.request.reset_mock()
level_cluster.request.reset_mock()
# turn on via UI
await hass.services.async_call(
DOMAIN, "turn_on", {"entity_id": entity_id}, blocking=True
)
assert on_off_cluster.request.call_count == 1
assert on_off_cluster.request.await_count == 1
assert level_cluster.request.call_count == 0
assert level_cluster.request.await_count == 0
assert on_off_cluster.request.call_args == call(
False, ON, (), expect_reply=True, manufacturer=None, tsn=None
)
on_off_cluster.request.reset_mock()
level_cluster.request.reset_mock()
await hass.services.async_call(
DOMAIN, "turn_on", {"entity_id": entity_id, "transition": 10}, blocking=True
)
assert on_off_cluster.request.call_count == 1
assert on_off_cluster.request.await_count == 1
assert level_cluster.request.call_count == 1
assert level_cluster.request.await_count == 1
assert on_off_cluster.request.call_args == call(
False, ON, (), expect_reply=True, manufacturer=None, tsn=None
)
assert level_cluster.request.call_args == call(
False,
4,
(zigpy.types.uint8_t, zigpy.types.uint16_t),
254,
100.0,
expect_reply=True,
manufacturer=None,
tsn=None,
)
on_off_cluster.request.reset_mock()
level_cluster.request.reset_mock()
await hass.services.async_call(
DOMAIN, "turn_on", {"entity_id": entity_id, "brightness": 10}, blocking=True
)
assert on_off_cluster.request.call_count == 1
assert on_off_cluster.request.await_count == 1
assert level_cluster.request.call_count == 1
assert level_cluster.request.await_count == 1
assert on_off_cluster.request.call_args == call(
False, ON, (), expect_reply=True, manufacturer=None, tsn=None
)
assert level_cluster.request.call_args == call(
False,
4,
(zigpy.types.uint8_t, zigpy.types.uint16_t),
10,
1,
expect_reply=True,
manufacturer=None,
tsn=None,
)
on_off_cluster.request.reset_mock()
level_cluster.request.reset_mock()
await async_test_off_from_hass(hass, on_off_cluster, entity_id)
async def async_test_dimmer_from_light(hass, cluster, entity_id, level, expected_state):
"""Test dimmer functionality from the light."""
await send_attributes_report(
hass, cluster, {1: level + 10, 0: level, 2: level - 10 or 22}
)
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == expected_state
# hass uses None for brightness of 0 in state attributes
if level == 0:
level = None
assert hass.states.get(entity_id).attributes.get("brightness") == level
async def async_test_flash_from_hass(hass, cluster, entity_id, flash):
"""Test flash functionality from hass."""
# turn on via UI
cluster.request.reset_mock()
await hass.services.async_call(
DOMAIN, "turn_on", {"entity_id": entity_id, "flash": flash}, blocking=True
)
assert cluster.request.call_count == 1
assert cluster.request.await_count == 1
assert cluster.request.call_args == call(
False,
64,
(zigpy.types.uint8_t, zigpy.types.uint8_t),
FLASH_EFFECTS[flash],
0,
expect_reply=True,
manufacturer=None,
tsn=None,
)
@patch(
"zigpy.zcl.clusters.lighting.Color.request",
new=AsyncMock(return_value=[sentinel.data, zcl_f.Status.SUCCESS]),
)
@patch(
"zigpy.zcl.clusters.general.Identify.request",
new=AsyncMock(return_value=[sentinel.data, zcl_f.Status.SUCCESS]),
)
@patch(
"zigpy.zcl.clusters.general.LevelControl.request",
new=AsyncMock(return_value=[sentinel.data, zcl_f.Status.SUCCESS]),
)
@patch(
"zigpy.zcl.clusters.general.OnOff.request",
new=AsyncMock(return_value=[sentinel.data, zcl_f.Status.SUCCESS]),
)
async def test_zha_group_light_entity(
hass, device_light_1, device_light_2, device_light_3, coordinator
):
"""Test the light entity for a ZHA group."""
zha_gateway = get_zha_gateway(hass)
assert zha_gateway is not None
zha_gateway.coordinator_zha_device = coordinator
coordinator._zha_gateway = zha_gateway
device_light_1._zha_gateway = zha_gateway
device_light_2._zha_gateway = zha_gateway
member_ieee_addresses = [device_light_1.ieee, device_light_2.ieee]
members = [GroupMember(device_light_1.ieee, 1), GroupMember(device_light_2.ieee, 1)]
assert coordinator.is_coordinator
# test creating a group with 2 members
zha_group = await zha_gateway.async_create_zigpy_group("Test Group", members)
await hass.async_block_till_done()
assert zha_group is not None
assert len(zha_group.members) == 2
for member in zha_group.members:
assert member.device.ieee in member_ieee_addresses
assert member.group == zha_group
assert member.endpoint is not None
device_1_entity_id = await find_entity_id(DOMAIN, device_light_1, hass)
device_2_entity_id = await find_entity_id(DOMAIN, device_light_2, hass)
device_3_entity_id = await find_entity_id(DOMAIN, device_light_3, hass)
assert (
device_1_entity_id != device_2_entity_id
and device_1_entity_id != device_3_entity_id
)
assert device_2_entity_id != device_3_entity_id
group_entity_id = async_find_group_entity_id(hass, DOMAIN, zha_group)
assert hass.states.get(group_entity_id) is not None
assert device_1_entity_id in zha_group.member_entity_ids
assert device_2_entity_id in zha_group.member_entity_ids
assert device_3_entity_id not in zha_group.member_entity_ids
group_cluster_on_off = zha_group.endpoint[general.OnOff.cluster_id]
group_cluster_level = zha_group.endpoint[general.LevelControl.cluster_id]
group_cluster_identify = zha_group.endpoint[general.Identify.cluster_id]
dev1_cluster_on_off = device_light_1.device.endpoints[1].on_off
dev2_cluster_on_off = device_light_2.device.endpoints[1].on_off
dev3_cluster_on_off = device_light_3.device.endpoints[1].on_off
dev1_cluster_level = device_light_1.device.endpoints[1].level
await async_enable_traffic(
hass, [device_light_1, device_light_2, device_light_3], enabled=False
)
await hass.async_block_till_done()
# test that the lights were created and that they are unavailable
assert hass.states.get(group_entity_id).state == STATE_UNAVAILABLE
# allow traffic to flow through the gateway and device
await async_enable_traffic(hass, [device_light_1, device_light_2, device_light_3])
await hass.async_block_till_done()
# test that the lights were created and are off
assert hass.states.get(group_entity_id).state == STATE_OFF
# test turning the lights on and off from the HA
await async_test_on_off_from_hass(hass, group_cluster_on_off, group_entity_id)
# test short flashing the lights from the HA
await async_test_flash_from_hass(
hass, group_cluster_identify, group_entity_id, FLASH_SHORT
)
# test turning the lights on and off from the light
await async_test_on_off_from_light(hass, dev1_cluster_on_off, group_entity_id)
# test turning the lights on and off from the HA
await async_test_level_on_off_from_hass(
hass, group_cluster_on_off, group_cluster_level, group_entity_id
)
# test getting a brightness change from the network
await async_test_on_from_light(hass, dev1_cluster_on_off, group_entity_id)
await async_test_dimmer_from_light(
hass, dev1_cluster_level, group_entity_id, 150, STATE_ON
)
# test long flashing the lights from the HA
await async_test_flash_from_hass(
hass, group_cluster_identify, group_entity_id, FLASH_LONG
)
assert len(zha_group.members) == 2
# test some of the group logic to make sure we key off states correctly
await send_attributes_report(hass, dev1_cluster_on_off, {0: 1})
await send_attributes_report(hass, dev2_cluster_on_off, {0: 1})
await hass.async_block_till_done()
# test that group light is on
assert hass.states.get(device_1_entity_id).state == STATE_ON
assert hass.states.get(device_2_entity_id).state == STATE_ON
assert hass.states.get(group_entity_id).state == STATE_ON
await send_attributes_report(hass, dev1_cluster_on_off, {0: 0})
await hass.async_block_till_done()
# test that group light is still on
assert hass.states.get(device_1_entity_id).state == STATE_OFF
assert hass.states.get(device_2_entity_id).state == STATE_ON
assert hass.states.get(group_entity_id).state == STATE_ON
await send_attributes_report(hass, dev2_cluster_on_off, {0: 0})
await hass.async_block_till_done()
# test that group light is now off
assert hass.states.get(device_1_entity_id).state == STATE_OFF
assert hass.states.get(device_2_entity_id).state == STATE_OFF
assert hass.states.get(group_entity_id).state == STATE_OFF
await send_attributes_report(hass, dev1_cluster_on_off, {0: 1})
await hass.async_block_till_done()
# test that group light is now back on
assert hass.states.get(device_1_entity_id).state == STATE_ON
assert hass.states.get(device_2_entity_id).state == STATE_OFF
assert hass.states.get(group_entity_id).state == STATE_ON
# turn it off to test a new member add being tracked
await send_attributes_report(hass, dev1_cluster_on_off, {0: 0})
await hass.async_block_till_done()
assert hass.states.get(device_1_entity_id).state == STATE_OFF
assert hass.states.get(device_2_entity_id).state == STATE_OFF
assert hass.states.get(group_entity_id).state == STATE_OFF
# add a new member and test that his state is also tracked
await zha_group.async_add_members([GroupMember(device_light_3.ieee, 1)])
await send_attributes_report(hass, dev3_cluster_on_off, {0: 1})
await hass.async_block_till_done()
assert device_3_entity_id in zha_group.member_entity_ids
assert len(zha_group.members) == 3
assert hass.states.get(device_1_entity_id).state == STATE_OFF
assert hass.states.get(device_2_entity_id).state == STATE_OFF
assert hass.states.get(device_3_entity_id).state == STATE_ON
assert hass.states.get(group_entity_id).state == STATE_ON
# make the group have only 1 member and now there should be no entity
await zha_group.async_remove_members(
[GroupMember(device_light_2.ieee, 1), GroupMember(device_light_3.ieee, 1)]
)
assert len(zha_group.members) == 1
assert hass.states.get(group_entity_id) is None
assert device_2_entity_id not in zha_group.member_entity_ids
assert device_3_entity_id not in zha_group.member_entity_ids
# make sure the entity registry entry is still there
assert zha_gateway.ha_entity_registry.async_get(group_entity_id) is not None
# add a member back and ensure that the group entity was created again
await zha_group.async_add_members([GroupMember(device_light_3.ieee, 1)])
await send_attributes_report(hass, dev3_cluster_on_off, {0: 1})
await hass.async_block_till_done()
assert len(zha_group.members) == 2
assert hass.states.get(group_entity_id).state == STATE_ON
# add a 3rd member and ensure we still have an entity and we track the new one
await send_attributes_report(hass, dev1_cluster_on_off, {0: 0})
await send_attributes_report(hass, dev3_cluster_on_off, {0: 0})
await hass.async_block_till_done()
assert hass.states.get(group_entity_id).state == STATE_OFF
# this will test that _reprobe_group is used correctly
await zha_group.async_add_members(
[GroupMember(device_light_2.ieee, 1), GroupMember(coordinator.ieee, 1)]
)
await send_attributes_report(hass, dev2_cluster_on_off, {0: 1})
await hass.async_block_till_done()
assert len(zha_group.members) == 4
assert hass.states.get(group_entity_id).state == STATE_ON
await zha_group.async_remove_members([GroupMember(coordinator.ieee, 1)])
await hass.async_block_till_done()
assert hass.states.get(group_entity_id).state == STATE_ON
assert len(zha_group.members) == 3
# remove the group and ensure that there is no entity and that the entity registry is cleaned up
assert zha_gateway.ha_entity_registry.async_get(group_entity_id) is not None
await zha_gateway.async_remove_zigpy_group(zha_group.group_id)
assert hass.states.get(group_entity_id) is None
assert zha_gateway.ha_entity_registry.async_get(group_entity_id) is None
| |
# Copyright 2016 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import fixtures
import mock
import oslo_messaging as messaging
from oslo_serialization import jsonutils
import testtools
from nova import context
from nova import rpc
from nova import test
# Make a class that resets all of the global variables in nova.rpc
class RPCResetFixture(fixtures.Fixture):
def _setUp(self):
self.trans = copy.copy(rpc.TRANSPORT)
self.noti_trans = copy.copy(rpc.NOTIFICATION_TRANSPORT)
self.noti = copy.copy(rpc.NOTIFIER)
self.all_mods = copy.copy(rpc.ALLOWED_EXMODS)
self.ext_mods = copy.copy(rpc.EXTRA_EXMODS)
self.addCleanup(self._reset_everything)
def _reset_everything(self):
rpc.TRANSPORT = self.trans
rpc.NOTIFICATION_TRANSPORT = self.noti_trans
rpc.NOTIFIER = self.noti
rpc.ALLOWED_EXMODS = self.all_mods
rpc.EXTRA_EXMODS = self.ext_mods
# We can't import nova.test.TestCase because that sets up an RPCFixture
# that pretty much nullifies all of this testing
class TestRPC(testtools.TestCase):
def setUp(self):
super(TestRPC, self).setUp()
self.useFixture(RPCResetFixture())
@mock.patch('oslo_messaging.RPCClient')
def test_cell_client(self, mock_rpcclient):
default_client = mock.Mock()
dynamic_client = mock.Mock()
mock_rpcclient.return_value = dynamic_client
ctxt = mock.Mock()
ctxt.mq_connection = 'fake://'
class FakeAPI(object):
def __init__(self):
self.client = default_client
def rpc_api_function(self, context):
rpc.get_cell_client(context, self.client).do_rpc()
rpcapi = FakeAPI()
rpcapi.rpc_api_function(ctxt)
# verify a dynamic client was created
mock_rpcclient.assert_called_once_with(
ctxt.mq_connection, default_client.target,
version_cap=default_client.version_cap,
serializer=default_client.serializer)
# verify dynamic client handled the rpc
dynamic_client.do_rpc.assert_called_once_with()
@mock.patch('oslo_messaging.RPCClient')
def test_cell_client_no_switch(self, mock_rpcclient):
default_client = mock.Mock()
dynamic_client = mock.Mock()
mock_rpcclient.return_value = dynamic_client
ctxt = mock.Mock()
ctxt.mq_connection = None
class FakeAPI(object):
def __init__(self):
self.client = default_client
def rpc_api_function(self, context):
rpc.get_cell_client(context, self.client).do_rpc()
rpcapi = FakeAPI()
rpcapi.rpc_api_function(ctxt)
# verify a dynamic client was not created
self.assertFalse(mock_rpcclient.called)
# verify default client handled the rpc
default_client.do_rpc.assert_called_once_with()
@mock.patch.object(rpc, 'get_allowed_exmods')
@mock.patch.object(rpc, 'RequestContextSerializer')
@mock.patch.object(messaging, 'get_transport')
@mock.patch.object(messaging, 'get_notification_transport')
@mock.patch.object(messaging, 'Notifier')
def test_init_unversioned(self, mock_notif, mock_noti_trans, mock_trans,
mock_ser, mock_exmods):
# The expected call to get the legacy notifier will require no new
# kwargs, and we expect the new notifier will need the noop driver
expected = [{}, {'driver': 'noop'}]
self._test_init(mock_notif, mock_noti_trans, mock_trans, mock_ser,
mock_exmods, 'unversioned', expected)
@mock.patch.object(rpc, 'get_allowed_exmods')
@mock.patch.object(rpc, 'RequestContextSerializer')
@mock.patch.object(messaging, 'get_transport')
@mock.patch.object(messaging, 'get_notification_transport')
@mock.patch.object(messaging, 'Notifier')
def test_init_both(self, mock_notif, mock_noti_trans, mock_trans,
mock_ser, mock_exmods):
expected = [{}, {'topics': ['versioned_notifications']}]
self._test_init(mock_notif, mock_noti_trans, mock_trans, mock_ser,
mock_exmods, 'both', expected)
@mock.patch.object(rpc, 'get_allowed_exmods')
@mock.patch.object(rpc, 'RequestContextSerializer')
@mock.patch.object(messaging, 'get_transport')
@mock.patch.object(messaging, 'get_notification_transport')
@mock.patch.object(messaging, 'Notifier')
def test_init_versioned(self, mock_notif, mock_noti_trans, mock_trans,
mock_ser, mock_exmods):
expected = [{'driver': 'noop'},
{'topics': ['versioned_notifications']}]
self._test_init(mock_notif, mock_noti_trans, mock_trans, mock_ser,
mock_exmods, 'versioned', expected)
def test_cleanup_transport_null(self):
rpc.NOTIFICATION_TRANSPORT = mock.Mock()
rpc.LEGACY_NOTIFIER = mock.Mock()
rpc.NOTIFIER = mock.Mock()
self.assertRaises(AssertionError, rpc.cleanup)
def test_cleanup_notification_transport_null(self):
rpc.TRANSPORT = mock.Mock()
rpc.NOTIFIER = mock.Mock()
self.assertRaises(AssertionError, rpc.cleanup)
def test_cleanup_legacy_notifier_null(self):
rpc.TRANSPORT = mock.Mock()
rpc.NOTIFICATION_TRANSPORT = mock.Mock()
rpc.NOTIFIER = mock.Mock()
def test_cleanup_notifier_null(self):
rpc.TRANSPORT = mock.Mock()
rpc.LEGACY_NOTIFIER = mock.Mock()
rpc.NOTIFICATION_TRANSPORT = mock.Mock()
self.assertRaises(AssertionError, rpc.cleanup)
def test_cleanup(self):
rpc.LEGACY_NOTIFIER = mock.Mock()
rpc.NOTIFIER = mock.Mock()
rpc.NOTIFICATION_TRANSPORT = mock.Mock()
rpc.TRANSPORT = mock.Mock()
trans_cleanup = mock.Mock()
not_trans_cleanup = mock.Mock()
rpc.TRANSPORT.cleanup = trans_cleanup
rpc.NOTIFICATION_TRANSPORT.cleanup = not_trans_cleanup
rpc.cleanup()
trans_cleanup.assert_called_once_with()
not_trans_cleanup.assert_called_once_with()
self.assertIsNone(rpc.TRANSPORT)
self.assertIsNone(rpc.NOTIFICATION_TRANSPORT)
self.assertIsNone(rpc.LEGACY_NOTIFIER)
self.assertIsNone(rpc.NOTIFIER)
@mock.patch.object(messaging, 'set_transport_defaults')
def test_set_defaults(self, mock_set):
control_exchange = mock.Mock()
rpc.set_defaults(control_exchange)
mock_set.assert_called_once_with(control_exchange)
def test_add_extra_exmods(self):
rpc.EXTRA_EXMODS = []
rpc.add_extra_exmods('foo', 'bar')
self.assertEqual(['foo', 'bar'], rpc.EXTRA_EXMODS)
def test_clear_extra_exmods(self):
rpc.EXTRA_EXMODS = ['foo', 'bar']
rpc.clear_extra_exmods()
self.assertEqual(0, len(rpc.EXTRA_EXMODS))
def test_get_allowed_exmods(self):
rpc.ALLOWED_EXMODS = ['foo']
rpc.EXTRA_EXMODS = ['bar']
exmods = rpc.get_allowed_exmods()
self.assertEqual(['foo', 'bar'], exmods)
@mock.patch.object(messaging, 'TransportURL')
def test_get_transport_url(self, mock_url):
conf = mock.Mock()
rpc.CONF = conf
mock_url.parse.return_value = 'foo'
url = rpc.get_transport_url(url_str='bar')
self.assertEqual('foo', url)
mock_url.parse.assert_called_once_with(conf, 'bar',
rpc.TRANSPORT_ALIASES)
@mock.patch.object(messaging, 'TransportURL')
def test_get_transport_url_null(self, mock_url):
conf = mock.Mock()
rpc.CONF = conf
mock_url.parse.return_value = 'foo'
url = rpc.get_transport_url()
self.assertEqual('foo', url)
mock_url.parse.assert_called_once_with(conf, None,
rpc.TRANSPORT_ALIASES)
@mock.patch.object(rpc, 'RequestContextSerializer')
@mock.patch.object(messaging, 'RPCClient')
def test_get_client(self, mock_client, mock_ser):
rpc.TRANSPORT = mock.Mock()
tgt = mock.Mock()
ser = mock.Mock()
mock_client.return_value = 'client'
mock_ser.return_value = ser
client = rpc.get_client(tgt, version_cap='1.0', serializer='foo')
mock_ser.assert_called_once_with('foo')
mock_client.assert_called_once_with(rpc.TRANSPORT,
tgt, version_cap='1.0',
serializer=ser)
self.assertEqual('client', client)
@mock.patch.object(rpc, 'RequestContextSerializer')
@mock.patch.object(messaging, 'get_rpc_server')
def test_get_server(self, mock_get, mock_ser):
rpc.TRANSPORT = mock.Mock()
ser = mock.Mock()
tgt = mock.Mock()
ends = mock.Mock()
mock_ser.return_value = ser
mock_get.return_value = 'server'
server = rpc.get_server(tgt, ends, serializer='foo')
mock_ser.assert_called_once_with('foo')
mock_get.assert_called_once_with(rpc.TRANSPORT, tgt, ends,
executor='eventlet', serializer=ser)
self.assertEqual('server', server)
def test_get_notifier(self):
rpc.LEGACY_NOTIFIER = mock.Mock()
mock_prep = mock.Mock()
mock_prep.return_value = 'notifier'
rpc.LEGACY_NOTIFIER.prepare = mock_prep
notifier = rpc.get_notifier('service', publisher_id='foo')
mock_prep.assert_called_once_with(publisher_id='foo')
self.assertIsInstance(notifier, rpc.LegacyValidatingNotifier)
self.assertEqual('notifier', notifier.notifier)
def test_get_notifier_null_publisher(self):
rpc.LEGACY_NOTIFIER = mock.Mock()
mock_prep = mock.Mock()
mock_prep.return_value = 'notifier'
rpc.LEGACY_NOTIFIER.prepare = mock_prep
notifier = rpc.get_notifier('service', host='bar')
mock_prep.assert_called_once_with(publisher_id='service.bar')
self.assertIsInstance(notifier, rpc.LegacyValidatingNotifier)
self.assertEqual('notifier', notifier.notifier)
def test_get_versioned_notifier(self):
rpc.NOTIFIER = mock.Mock()
mock_prep = mock.Mock()
mock_prep.return_value = 'notifier'
rpc.NOTIFIER.prepare = mock_prep
notifier = rpc.get_versioned_notifier('service.foo')
mock_prep.assert_called_once_with(publisher_id='service.foo')
self.assertEqual('notifier', notifier)
@mock.patch.object(rpc, 'get_allowed_exmods')
@mock.patch.object(messaging, 'get_transport')
def test_create_transport(self, mock_transport, mock_exmods):
exmods = mock_exmods.return_value
transport = rpc.create_transport(mock.sentinel.url)
self.assertEqual(mock_transport.return_value, transport)
mock_exmods.assert_called_once_with()
mock_transport.assert_called_once_with(rpc.CONF,
url=mock.sentinel.url,
allowed_remote_exmods=exmods,
aliases=rpc.TRANSPORT_ALIASES)
def _test_init(self, mock_notif, mock_noti_trans, mock_trans, mock_ser,
mock_exmods, notif_format, expected_driver_topic_kwargs):
legacy_notifier = mock.Mock()
notifier = mock.Mock()
notif_transport = mock.Mock()
transport = mock.Mock()
serializer = mock.Mock()
conf = mock.Mock()
conf.notification_format = notif_format
mock_exmods.return_value = ['foo']
mock_trans.return_value = transport
mock_noti_trans.return_value = notif_transport
mock_ser.return_value = serializer
mock_notif.side_effect = [legacy_notifier, notifier]
rpc.init(conf)
mock_exmods.assert_called_once_with()
mock_trans.assert_called_once_with(conf,
allowed_remote_exmods=['foo'],
aliases=rpc.TRANSPORT_ALIASES)
self.assertIsNotNone(rpc.TRANSPORT)
self.assertIsNotNone(rpc.LEGACY_NOTIFIER)
self.assertIsNotNone(rpc.NOTIFIER)
self.assertEqual(legacy_notifier, rpc.LEGACY_NOTIFIER)
self.assertEqual(notifier, rpc.NOTIFIER)
expected_calls = []
for kwargs in expected_driver_topic_kwargs:
expected_kwargs = {'serializer': serializer}
expected_kwargs.update(kwargs)
expected_calls.append(((notif_transport,), expected_kwargs))
self.assertEqual(expected_calls, mock_notif.call_args_list,
"The calls to messaging.Notifier() did not create "
"the legacy and versioned notifiers properly.")
class TestJsonPayloadSerializer(test.NoDBTestCase):
def test_serialize_entity(self):
with mock.patch.object(jsonutils, 'to_primitive') as mock_prim:
rpc.JsonPayloadSerializer.serialize_entity('context', 'entity')
mock_prim.assert_called_once_with('entity', convert_instances=True)
class TestRequestContextSerializer(test.NoDBTestCase):
def setUp(self):
super(TestRequestContextSerializer, self).setUp()
self.mock_base = mock.Mock()
self.ser = rpc.RequestContextSerializer(self.mock_base)
self.ser_null = rpc.RequestContextSerializer(None)
def test_serialize_entity(self):
self.mock_base.serialize_entity.return_value = 'foo'
ser_ent = self.ser.serialize_entity('context', 'entity')
self.mock_base.serialize_entity.assert_called_once_with('context',
'entity')
self.assertEqual('foo', ser_ent)
def test_serialize_entity_null_base(self):
ser_ent = self.ser_null.serialize_entity('context', 'entity')
self.assertEqual('entity', ser_ent)
def test_deserialize_entity(self):
self.mock_base.deserialize_entity.return_value = 'foo'
deser_ent = self.ser.deserialize_entity('context', 'entity')
self.mock_base.deserialize_entity.assert_called_once_with('context',
'entity')
self.assertEqual('foo', deser_ent)
def test_deserialize_entity_null_base(self):
deser_ent = self.ser_null.deserialize_entity('context', 'entity')
self.assertEqual('entity', deser_ent)
def test_serialize_context(self):
context = mock.Mock()
self.ser.serialize_context(context)
context.to_dict.assert_called_once_with()
@mock.patch.object(context, 'RequestContext')
def test_deserialize_context(self, mock_req):
self.ser.deserialize_context('context')
mock_req.from_dict.assert_called_once_with('context')
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (division, print_function, absolute_import,
unicode_literals)
__all__ = ['MPIPool']
# If mpi4py is installed, import it.
try:
from mpi4py import MPI
except ImportError:
MPI = None
class _close_pool_message(object):
def __repr__(self):
return '<Close pool message>'
class _function_wrapper(object):
def __init__(self, function):
self.function = function
def _error_function(task):
raise RuntimeError('Pool was sent tasks before being told what '
'function to apply.')
class MPIPool(object):
'''
A pool that distributes tasks over a set of MPI processes. MPI is an
API for distributed memory parallelism. This pool will let you run
emcee without shared memory, letting you use much larger machines
with emcee.
The pool only support the :func:`map` method at the moment because
this is the only functionality that emcee needs. That being said,
this pool is fairly general and it could be used for other purposes.
Contributed by `Joe Zuntz <https://github.com/joezuntz>`_.
:param comm: (optional)
The ``mpi4py`` communicator.
:param debug: (optional)
If ``True``, print out a lot of status updates at each step.
:param loadbalance: (optional)
if ``True`` and ntask > Ncpus, tries to loadbalance by sending
out one task to each cpu first and then sending out the rest
as the cpus get done.
'''
def __init__(self, comm=None, debug=False, loadbalance=False):
if MPI is None:
raise ImportError('Please install mpi4py')
self.comm = MPI.COMM_WORLD if comm is None else comm
self.rank = self.comm.Get_rank()
self.size = self.comm.Get_size() - 1
self.debug = debug
self.function = _error_function
self.loadbalance = loadbalance
if self.size == 0:
raise ValueError('Tried to create an MPI pool, but there '
'was only one MPI process available. '
'Need at least two.')
def is_master(self):
'''
Is the current process the master?
'''
return self.rank == 0
def wait(self):
'''
If this isn't the master process, wait for instructions.
'''
if self.is_master():
raise RuntimeError('Master node told to await jobs.')
status = MPI.Status()
while True:
# Event loop.
# Sit here and await instructions.
if self.debug:
print('Worker {0} waiting for task.'.format(self.rank))
# Blocking receive to wait for instructions.
task = self.comm.recv(source=0, tag=MPI.ANY_TAG, status=status)
if self.debug:
print('Worker {0} got task {1} with tag {2}.'
.format(self.rank, task, status.tag))
# Check if message is special sentinel signaling end.
# If so, stop.
if isinstance(task, _close_pool_message):
if self.debug:
print('Worker {0} told to quit.'.format(self.rank))
break
# Check if message is special type containing new function
# to be applied
if isinstance(task, _function_wrapper):
self.function = task.function
if self.debug:
print('Worker {0} replaced its task function: {1}.'
.format(self.rank, self.function))
continue
# If not a special message, just run the known function on
# the input and return it asynchronously.
result = self.function(task)
if self.debug:
print('Worker {0} sending answer {1} with tag {2}.'
.format(self.rank, result, status.tag))
self.comm.isend(result, dest=0, tag=status.tag)
def map(self, function, tasks):
'''
Like the built-in :func:`map` function, apply a function to all
of the values in a list and return the list of results.
:param function:
The function to apply to the list.
:param tasks:
The list of elements.
'''
ntask = len(tasks)
# If not the master just wait for instructions.
if not self.is_master():
self.wait()
return
if function is not self.function:
if self.debug:
print('Master replacing pool function with {0}.'
.format(function))
self.function = function
F = _function_wrapper(function)
# Tell all the workers what function to use.
requests = []
for i in range(self.size):
r = self.comm.isend(F, dest=i + 1)
requests.append(r)
# Wait until all of the workers have responded. See:
# https://gist.github.com/4176241
MPI.Request.waitall(requests)
if (not self.loadbalance) or (ntask <= self.size):
# Do not perform load-balancing - the default load-balancing
# scheme emcee uses.
# Send all the tasks off and wait for them to be received.
# Again, see the bug in the above gist.
requests = []
for i, task in enumerate(tasks):
worker = i % self.size + 1
if self.debug:
print('Sent task {0} to worker {1} with tag {2}.'
.format(task, worker, i))
r = self.comm.isend(task, dest=worker, tag=i)
requests.append(r)
MPI.Request.waitall(requests)
# Now wait for the answers.
results = []
for i in range(ntask):
worker = i % self.size + 1
if self.debug:
print('Master waiting for worker {0} with tag {1}'
.format(worker, i))
result = self.comm.recv(source=worker, tag=i)
results.append(result)
return results
else:
# Perform load-balancing. The order of the results are likely to
# be different from the previous case.
for i, task in enumerate(tasks[0:self.size]):
worker = i+1
if self.debug:
print('Sent task {0} to worker {1} with tag {2}.'
.format(task, worker, i))
# Send out the tasks asynchronously.
self.comm.isend(task, dest=worker, tag=i)
ntasks_dispatched = self.size
results = [None]*ntask
for itask in range(ntask):
status = MPI.Status()
# Receive input from workers.
result = self.comm.recv(source=MPI.ANY_SOURCE,
tag=MPI.ANY_TAG, status=status)
worker = status.source
i = status.tag
results[i] = result
if self.debug:
print('Master received from worker {0} with tag {1}'
.format(worker, i))
# Now send the next task to this idle worker (if there are any
# left).
if ntasks_dispatched < ntask:
task = tasks[ntasks_dispatched]
i = ntasks_dispatched
if self.debug:
print('Sent task {0} to worker {1} with tag {2}.'
.format(task, worker, i))
# Send out the tasks asynchronously.
self.comm.isend(task, dest=worker, tag=i)
ntasks_dispatched += 1
return results
def bcast(self, *args, **kwargs):
'''
Equivalent to mpi4py :func:`bcast` collective operation.
'''
return self.comm.bcast(*args, **kwargs)
def close(self):
'''
Just send a message off to all the pool members which contains
the special :class:`_close_pool_message` sentinel.
'''
if self.is_master():
for i in range(self.size):
self.comm.isend(_close_pool_message(), dest=i + 1)
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linalg as linalg_lib
from tensorflow.python.platform import test
linalg = linalg_lib
rng = np.random.RandomState(123)
class LinearOperatorShape(linalg.LinearOperator):
"""LinearOperator that implements the methods ._shape and _shape_tensor."""
def __init__(self,
shape,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
is_square=None):
self._stored_shape = shape
super(LinearOperatorShape, self).__init__(
dtype=dtypes.float32,
graph_parents=None,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square)
def _shape(self):
return tensor_shape.TensorShape(self._stored_shape)
def _shape_tensor(self):
return constant_op.constant(self._stored_shape, dtype=dtypes.int32)
def _matmul(self):
raise NotImplementedError("Not needed for this test.")
class LinearOperatorMatmulSolve(linalg.LinearOperator):
"""LinearOperator that wraps a [batch] matrix and implements matmul/solve."""
def __init__(self,
matrix,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
is_square=None):
self._matrix = ops.convert_to_tensor(matrix, name="matrix")
super(LinearOperatorMatmulSolve, self).__init__(
dtype=self._matrix.dtype,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square)
def _shape(self):
return self._matrix.get_shape()
def _shape_tensor(self):
return array_ops.shape(self._matrix)
def _matmul(self, x, adjoint=False, adjoint_arg=False):
x = ops.convert_to_tensor(x, name="x")
return math_ops.matmul(
self._matrix, x, adjoint_a=adjoint, adjoint_b=adjoint_arg)
def _solve(self, rhs, adjoint=False, adjoint_arg=False):
rhs = ops.convert_to_tensor(rhs, name="rhs")
assert not adjoint_arg, "Not implemented for this test class."
return linalg_ops.matrix_solve(self._matrix, rhs, adjoint=adjoint)
class LinearOperatorTest(test.TestCase):
def test_all_shape_properties_defined_by_the_one_property_shape(self):
shape = (1, 2, 3, 4)
operator = LinearOperatorShape(shape)
self.assertAllEqual(shape, operator.shape)
self.assertAllEqual(4, operator.tensor_rank)
self.assertAllEqual((1, 2), operator.batch_shape)
self.assertAllEqual(4, operator.domain_dimension)
self.assertAllEqual(3, operator.range_dimension)
def test_all_shape_methods_defined_by_the_one_method_shape(self):
with self.test_session():
shape = (1, 2, 3, 4)
operator = LinearOperatorShape(shape)
self.assertAllEqual(shape, operator.shape_tensor().eval())
self.assertAllEqual(4, operator.tensor_rank_tensor().eval())
self.assertAllEqual((1, 2), operator.batch_shape_tensor().eval())
self.assertAllEqual(4, operator.domain_dimension_tensor().eval())
self.assertAllEqual(3, operator.range_dimension_tensor().eval())
def test_is_x_properties(self):
operator = LinearOperatorShape(
shape=(2, 2),
is_non_singular=False,
is_self_adjoint=True,
is_positive_definite=False)
self.assertFalse(operator.is_non_singular)
self.assertTrue(operator.is_self_adjoint)
self.assertFalse(operator.is_positive_definite)
def test_generic_to_dense_method_non_square_matrix_static(self):
matrix = rng.randn(2, 3, 4)
operator = LinearOperatorMatmulSolve(matrix)
with self.test_session():
operator_dense = operator.to_dense()
self.assertAllEqual((2, 3, 4), operator_dense.get_shape())
self.assertAllClose(matrix, operator_dense.eval())
def test_generic_to_dense_method_non_square_matrix_tensor(self):
matrix = rng.randn(2, 3, 4)
matrix_ph = array_ops.placeholder(dtypes.float64)
operator = LinearOperatorMatmulSolve(matrix_ph)
with self.test_session():
operator_dense = operator.to_dense()
self.assertAllClose(
matrix, operator_dense.eval(feed_dict={matrix_ph: matrix}))
def test_matvec(self):
matrix = [[1., 0], [0., 2.]]
operator = LinearOperatorMatmulSolve(matrix)
x = [1., 1.]
with self.test_session():
y = operator.matvec(x)
self.assertAllEqual((2,), y.get_shape())
self.assertAllClose([1., 2.], y.eval())
def test_solvevec(self):
matrix = [[1., 0], [0., 2.]]
operator = LinearOperatorMatmulSolve(matrix)
y = [1., 1.]
with self.test_session():
x = operator.solvevec(y)
self.assertAllEqual((2,), x.get_shape())
self.assertAllClose([1., 1 / 2.], x.eval())
def test_is_square_set_to_true_for_square_static_shapes(self):
operator = LinearOperatorShape(shape=(2, 4, 4))
self.assertTrue(operator.is_square)
def test_is_square_set_to_false_for_square_static_shapes(self):
operator = LinearOperatorShape(shape=(2, 3, 4))
self.assertFalse(operator.is_square)
def test_is_square_set_incorrectly_to_false_raises(self):
with self.assertRaisesRegexp(ValueError, "but.*was square"):
_ = LinearOperatorShape(shape=(2, 4, 4), is_square=False).is_square
def test_is_square_set_inconsistent_with_other_hints_raises(self):
with self.assertRaisesRegexp(ValueError, "is always square"):
matrix = array_ops.placeholder(dtypes.float32)
LinearOperatorMatmulSolve(matrix, is_non_singular=True, is_square=False)
with self.assertRaisesRegexp(ValueError, "is always square"):
matrix = array_ops.placeholder(dtypes.float32)
LinearOperatorMatmulSolve(
matrix, is_positive_definite=True, is_square=False)
def test_non_square_operators_raise_on_determinant_and_solve(self):
operator = LinearOperatorShape((2, 3))
with self.assertRaisesRegexp(NotImplementedError, "not be square"):
operator.determinant()
with self.assertRaisesRegexp(NotImplementedError, "not be square"):
operator.log_abs_determinant()
with self.assertRaisesRegexp(NotImplementedError, "not be square"):
operator.solve(rng.rand(2, 2))
with self.assertRaisesRegexp(ValueError, "is always square"):
matrix = array_ops.placeholder(dtypes.float32)
LinearOperatorMatmulSolve(
matrix, is_positive_definite=True, is_square=False)
def test_is_square_manual_set_works(self):
matrix = array_ops.placeholder(dtypes.float32)
# Default is None.
operator = LinearOperatorMatmulSolve(matrix)
self.assertEqual(None, operator.is_square)
# Set to True
operator = LinearOperatorMatmulSolve(matrix, is_square=True)
self.assertTrue(operator.is_square)
if __name__ == "__main__":
test.main()
| |
from __future__ import print_function
from flask import Flask, render_template, flash, redirect, url_for, session, request, jsonify
from passlib.hash import sha256_crypt
from functools import wraps
import string
import chartkick
from forms import *
from job_choosing import *
import os
from apiclient import discovery
from oauth2client import client
import time
import httplib2
app = Flask(__name__)
app.secret_key = "SUPERSECRETKEY"
app.jinja_env.add_extension("chartkick.ext.charts")
def isSuperAdmin():
session_username = session['username']
conn, cur = connection()
cur.execute('SELECT * FROM users WHERE username=%s', [session_username])
name_user = cur.fetchall()
super_admin = name_user[0]['super_admin']
conn.close()
return super_admin
def is_admin():
session_username = session['username']
conn, cur = connection()
cur.execute('SELECT * FROM users WHERE username=%s', [session_username])
name_user = cur.fetchall()
admin = name_user[0]['admin']
conn.close()
return admin
def is_logged_in(f):
@wraps(f)
def wrap(*args, **kwargs):
if 'logged_in' in session:
return f(*args, **kwargs)
else:
flash('Access Denied', 'danger')
return redirect(url_for('login'))
return wrap
def is_logged_in_admin(f):
@wraps(f)
def wrap(*args, **kwargs):
if 'logged_in' in session and is_admin() == 1:
return f(*args, **kwargs)
else:
flash('Access Denied', 'danger')
return redirect(url_for('login'))
return wrap
def is_logged_in_super_admin(f):
@wraps(f)
def wrap(*args, **kwargs):
if 'logged_in' in session and isSuperAdmin() == 1:
return f(*args, **kwargs)
else:
flash('Access Denied', 'danger')
return redirect(url_for('login'))
return wrap
@is_logged_in
def get_session_id():
session_username = session['username']
conn, cur = connection()
cur.execute('SELECT id FROM users WHERE username=%s', [session_username])
name_user = cur.fetchall()
idnum = name_user[0]['id']
conn.close()
return idnum
@app.route('/')
def index():
if 'logged_in' in session:
admin_check = isSuperAdmin()
else:
admin_check = 0
conn, cur = connection()
result = cur.execute('SELECT * FROM quote')
if result > 0:
quote = cur.fetchall()
body = quote[0]['quote_text']
author = quote[0]['author']
return render_template('home.html', admin_check=admin_check, body=body, author=author)
else:
return render_template('home.html', admin_check=admin_check)
def get_group_list():
conn, cur = connection()
cur.execute('SELECT training_group FROM set_attendance')
groups = cur.fetchall()
group_list = []
for i in range(len(groups)):
group_list.append(groups[i]['training_group'])
return group_list
@app.route('/swimmers')
@is_logged_in
def swimmers():
group_list = get_group_list()
conn, cur = connection()
result = cur.execute("SELECT * FROM swimmers ORDER BY name ASC")
swimmers = cur.fetchall()
if result > 0:
conn.close()
return render_template('swimmers.html', swimmers=swimmers, group_list=group_list)
else:
msg = 'No swimmers found'
conn.close()
return render_template('swimmers.html', msg=msg, group_list=group_list)
@app.route('/swimmers/<string:id>')
@is_logged_in
def swimmer(id):
conn, cur = connection()
form = EditAttendanceForm(request.form)
result = cur.execute("SELECT * FROM swimmers WHERE id = %s", [id])
swimmer = cur.fetchone()
result2 = cur.execute("SELECT * FROM attendance WHERE id = %s", [id])
attendance_d = cur.fetchall()
result3 = cur.execute('SELECT * FROM jobs_done_history WHERE id = %s', [id])
jobs = cur.fetchall()
swimmerGroup = swimmer['training_group']
swimmerTotal = swimmer['total']
result4 = cur.execute('SELECT * FROM set_attendance WHERE training_group=%s', [swimmerGroup])
if result4 == 0:
percent = 'NaN'
else:
attendance = cur.fetchall()
attendanceTotal = attendance[0]['total']
if attendanceTotal == 0:
percent = 'NaN'
else:
percent = swimmerTotal / attendanceTotal * 100
cur.execute('SELECT percent, date FROM weekly_attendance_history WHERE id = %s ', [id])
data = cur.fetchall()
datalst = []
for i in range(len(data)):
date = data[i]['date'].isoformat()
percent = data[i]['percent']
datalst.append([date, percent])
conn.close()
return render_template('swimmer.html', swimmer=swimmer, attendance_d=attendance_d, jobs=jobs, percent=percent,
data=datalst, form=form)
@app.route('/swimmers/training_group/<string:group>')
@is_logged_in
def training_group(group):
group_list = get_group_list()
conn, cur = connection()
result = cur.execute("SELECT * FROM swimmers WHERE training_group = %s", [group])
swimmers = cur.fetchall()
if result > 0:
return render_template('swimmers_group.html', swimmers=swimmers, group_list=group_list)
else:
msg = 'No swimmers found'
return render_template('swimmers_group.html', msg=msg, group_list=group_list)
conn.close()
@app.route('/dashboard/training_group/<string:group>', methods=['GET', 'POST'])
@is_logged_in_admin
def group_dashboard(group):
group_list = get_group_list()
form = RemoveAttendanceForm(request.form)
conn, cur = connection()
result = cur.execute("SELECT * FROM swimmers WHERE training_group = %s", [group])
swimmers = cur.fetchall()
cur.execute("SELECT * FROM attendance_amounts ORDER BY amount ASC")
amounts = cur.fetchall()
cur.execute('SELECT * FROM default_values')
default_values = cur.fetchall()
if result > 0:
return render_template('group_dashboard.html', swimmers=swimmers, group=group, form=form, group_list=group_list,
amounts=amounts, default_values=default_values)
else:
msg = 'No swimmers found'
return render_template('group_dashboard.html', msg=msg, group=group, form=form, group_list=group_list)
conn.close()
def username_already_registered(name):
conn, cur = connection()
result = cur.execute('SELECT * FROM users WHERE username = %s', [name])
conn.close()
if result > 0:
return True
else:
return False
def access_code_used(code):
conn, cur = connection()
result = cur.execute('SELECT id FROM swimmers WHERE code = %s', [code])
if result > 0:
ids = cur.fetchall()
swimmer_id = ids[0]['id']
result2 = cur.execute('SELECT * FROM users WHERE linked_swimmer = %s', [swimmer_id])
conn.close()
if result2 > 0:
return 1
else:
return 0
else:
return 2
@app.route('/register', methods=['GET', 'POST'])
def register():
isAdmin = False
isSuperAdmin = False
conn, cur = connection()
result = cur.execute("SELECT * FROM admin")
data = cur.fetchall()
code1 = data[0]['password']
code2 = data[1]['password']
# code = data['password']
form = RegisterForm(request.form)
if form.admin_code.data == code1:
isAdmin = True
elif form.admin_code.data == code2:
isSuperAdmin = True
conn.close()
if request.method == 'POST' and form.validate() and isAdmin:
name = form.name.data
email = form.email.data
username = form.username.data
if username_already_registered(username):
error = 'username is already taken'
return render_template('register.html', form=form, error=error)
password = sha256_crypt.encrypt(str(form.password.data))
conn, cur = connection()
cur.execute("INSERT INTO users(name, email, username, password, admin) VALUES(%s, %s, %s, %s, 1)",
(name, email, username, password))
conn.commit()
conn.close()
flash('You are now registered', 'success')
return redirect(url_for('login'))
elif request.method == 'POST' and form.validate() and isSuperAdmin:
name = form.name.data
email = form.email.data
username = form.username.data
if username_already_registered(username):
error = 'username is already taken'
return render_template('register.html', form=form, error=error)
password = sha256_crypt.encrypt(str(form.password.data))
conn, cur = connection()
cur.execute(
"INSERT INTO users(name, email, username, password, admin, super_admin) VALUES(%s, %s, %s, %s, 1, 1)",
(name, email, username, password))
conn.commit()
conn.close()
flash('You are now registered', 'success')
return redirect(url_for('login'))
elif request.method == 'POST' and form.validate():
name = form.name.data
email = form.email.data
username = form.username.data
access_code = form.access_code.data
if username_already_registered(username):
error = 'username is already taken'
return render_template('register.html', form=form, error=error)
elif access_code_used(access_code) == 1:
error = 'access code already used'
return render_template('register.html', form=form, error=error)
elif access_code_used(access_code) == 2:
error = 'access code not valid'
return render_template('register.html', form=form, error=error)
password = sha256_crypt.encrypt(str(form.password.data))
conn, cur = connection()
cur.execute('SELECT id FROM swimmers WHERE code = %s', [access_code])
ids = cur.fetchall()
swimmer_id = ids[0]['id']
cur.execute(
"INSERT INTO users(name, email, username, password, linked_swimmer, admin) VALUES(%s, %s, %s, %s, %s, 0)",
(name, email, username, password, swimmer_id))
conn.commit()
conn.close()
flash('You are now registered', 'success')
return redirect(url_for('login'))
return render_template('register.html', form=form)
@app.route('/login', methods=['GET', 'POST'])
def login():
if request.method == 'POST':
username = request.form['username']
password_candidate = request.form['password']
conn, cur = connection()
result = cur.execute("SELECT * FROM users WHERE username = %s", [username])
if result > 0:
data = cur.fetchone()
password = data['password']
if sha256_crypt.verify(password_candidate, password):
session['logged_in'] = True
session['username'] = username
flash('You are now logged in', 'success')
return redirect(url_for('index'))
else:
error = 'Password does not match'
return render_template('login.html', error=error)
else:
error = 'Username not found'
return render_template('login.html', error=error)
return render_template('login.html')
@app.route('/dashboard')
@is_logged_in_admin
def dashboard():
group_list = get_group_list()
form = RemoveAttendanceForm(request.form)
conn, cur = connection()
result = cur.execute("SELECT * FROM swimmers")
swimmers = cur.fetchall()
cur.execute("SELECT * FROM attendance_amounts ORDER BY amount ASC")
amounts = cur.fetchall()
cur.execute('SELECT * FROM default_values')
default_values = cur.fetchall()
if result > 0:
return render_template('dashboard.html', swimmers=swimmers, form=form, group_list=group_list, amounts=amounts,
default_values=default_values)
else:
msg = 'No swimmers found'
return render_template('dashboard.html', msg=msg, form=form, group_list=group_list)
conn.close()
def id_generator(size=9, chars=string.ascii_uppercase + string.digits):
return ''.join(random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(size))
@app.route('/add_swimmer', methods=['GET', 'POST'])
@is_logged_in_admin
def add_swimmer():
form = SwimmerForm(request.form)
if request.method == 'POST' and form.validate():
name = form.name.data
group = form.group.data
code_string = id_generator()
conn, cur = connection()
cur.execute("INSERT INTO swimmers(name, total, training_group, code) VALUES(%s, 0, %s, %s)",
(name, group, code_string))
conn.commit()
conn.close()
flash('Swimmer created', 'success')
return redirect(url_for('dashboard'))
return render_template('add_swimmer.html', form=form)
@app.route('/delete_swimmer/<string:id>', methods=['POST'])
@is_logged_in_admin
def delete_swimmer(id):
group_list = get_group_list()
form = RemoveAttendanceForm(request.form)
conn, cur = connection()
cur.execute("SELECT * FROM attendance_amounts")
amounts = cur.fetchall()
count = cur.execute('SELECT * FROM swimmer_limbo')
cur.execute('SELECT * FROM default_values')
default_values = cur.fetchall()
if count > 0:
cur.execute('DELETE FROM swimmer_limbo')
conn.commit()
cur.execute("INSERT INTO swimmer_limbo SELECT * FROM swimmers WHERE id=%s", [id])
conn.commit()
cur.execute("DELETE FROM swimmers WHERE id=%s", [id])
conn.commit()
cur.execute("DELETE FROM attendance WHERE id=%s", [id])
conn.commit()
cur.execute("SELECT * FROM swimmers")
swimmers = cur.fetchall()
conn.close()
flash('Swimmer deleted', 'success')
undo = 'Undo delete?'
return render_template('dashboard.html', undo=undo, swimmers=swimmers, form=form, group_list=group_list,
amounts=amounts, default_values=default_values)
@app.route('/attending', methods=['POST'])
@is_logged_in_admin
def attending():
if request.method == 'POST':
conn, cur = connection()
amount = request.form['amount']
group_id = request.form['group']
id_value = request.form['id']
cur.execute('SELECT total FROM swimmers WHERE id=%s', [id_value])
current_amount = cur.fetchall()[0]['total']
cur.execute("UPDATE swimmers SET attending = 1, total = total + %s WHERE id=%s", (float(amount), id_value))
conn.commit()
cur.execute("INSERT INTO attendance(id, amount) VALUES(%s, %s)", (id_value, float(amount)))
conn.commit()
cur.execute(
'INSERT IGNORE INTO here(id, name, job_total, training_group) SELECT id, name, job_total, training_group FROM swimmers WHERE attending = 1')
conn.commit()
conn.close()
new_amount = current_amount + float(amount)
return jsonify({'id': id_value, 'amount': new_amount})
@app.route('/logout')
def logout():
session.clear()
flash('You are now logged out', 'success')
return redirect(url_for('index'))
@app.route('/reset_all_attending', methods=['POST'])
@is_logged_in_admin
def reset_all_attending():
conn, cur = connection()
cur.execute('UPDATE swimmers SET attending = 0')
conn.commit()
cur.execute('DELETE FROM here')
conn.commit()
conn.close()
return redirect(url_for('dashboard'))
@app.route('/reset_attending/<string:training_g>', methods=['POST'])
@is_logged_in_admin
def reset_attending(training_g):
conn, cur = connection()
cur.execute('UPDATE swimmers SET attending = 0 WHERE training_group=%s', [training_g])
conn.commit()
cur.execute('DELETE FROM here WHERE training_group=%s', [training_g])
conn.commit()
conn.close()
return redirect(url_for('group_dashboard', group=training_g))
@app.route('/here')
@is_logged_in_admin
def here():
conn, cur = connection()
result = cur.execute('SELECT * FROM here')
swimmers = cur.fetchall()
if result > 0:
return render_template('here.html', swimmers=swimmers, count=result)
else:
msg = 'No swimmers attending found'
return render_template('here.html', msg=msg, count=result)
conn.close()
@app.route('/remove_here/<string:id>', methods=['POST'])
@is_logged_in_admin
def remove_here(id):
conn, cur = connection()
cur.execute('DELETE FROM here WHERE id=%s', [id])
conn.commit()
cur.execute('UPDATE swimmers SET attending = 0 WHERE id=%s', [id])
conn.commit()
cur.execute('UPDATE swimmers SET job_total = job_total + 5 WHERE id=%s', [id])
conn.commit()
conn.close()
flash('Swimmer removed from here list', 'success')
return redirect(url_for('here'))
@app.route('/add_job', methods=['GET', 'POST'])
@is_logged_in_admin
def add_job():
form = JobForm(request.form)
if request.method == 'POST' and form.validate():
name = form.name.data
minimum = form.minimum.data
difficulty = form.difficulty.data
dump = form.dump.data
conn, cur = connection()
if dump:
result = cur.execute('SELECT * FROM jobs WHERE dump=1')
if result > 0:
cur.execute('UPDATE jobs SET dump = 0 WHERE dump = 1')
conn.commit()
cur.execute("INSERT INTO jobs(name, minimum, difficulty, dump) VALUES(%s, %s, %s, %s)",
(name, int(minimum), int(difficulty), int(1)))
else:
cur.execute("INSERT INTO jobs(name, minimum, difficulty) VALUES(%s, %s, %s)",
(name, int(minimum), int(difficulty)))
conn.commit()
conn.close()
flash('Job created', 'success')
return redirect(url_for('jobs'))
return render_template('add_job.html', form=form)
@app.route('/jobs')
@is_logged_in_admin
def jobs():
conn, cur = connection()
cur.execute('SELECT * FROM jobs')
jobs = cur.fetchall()
return render_template('jobs.html', jobs=jobs)
@app.route('/remove_job/<string:id>', methods=['POST'])
@is_logged_in_admin
def remove_job(id):
conn, cur = connection()
cur.execute('DELETE FROM jobs WHERE id=%s', [id])
conn.commit()
conn.close()
return redirect(url_for('jobs'))
@app.route('/choose_jobs')
@is_logged_in_admin
def choose_jobs():
if get_min() > get_available():
return render_template('here.html', msg='not enough people')
idList = choose_people()
conn, cur = connection()
cur.execute('SELECT * FROM jobs')
jobs = cur.fetchall()
jobList = []
for i in range(len(jobs)):
jobList.append([jobs[i]['id'], jobs[i]['name'], jobs[i]['minimum'], jobs[i]['difficulty']])
jobList.sort(key=itemgetter(2))
start = 0
for k in range(len(jobList)):
jobId = jobList[k][0]
jobName = jobList[k][1]
jobAmount = jobList[k][3]
jobMinimum = jobList[k][2] + start
for j in range(start, jobMinimum):
selected = idList[j][0]
cur.execute('INSERT IGNORE INTO jobs_done(id, job_name, job_id, amount) VALUES(%s, %s, %s, %s)',
(selected, jobName, jobId, jobAmount))
conn.commit()
cur.execute('INSERT INTO jobs_done_history(id, job_name, job_id, amount) VALUES(%s, %s, %s, %s)',
(selected, jobName, jobId, jobAmount))
conn.commit()
cur.execute('UPDATE swimmers SET job_total = job_total + %s WHERE id=%s', (int(jobAmount), selected))
conn.commit()
start = jobMinimum
conn.close()
return redirect(url_for('chosen_jobs'))
@app.route('/chosen_jobs')
@is_logged_in_admin
def chosen_jobs():
conn, cur = connection()
cur.execute('SELECT * FROM jobs_done')
chosenSwimmers = cur.fetchall()
cur.execute('SELECT * FROM jobs')
jobs = cur.fetchall()
cur.execute('SELECT * FROM swimmers')
swimmers = cur.fetchall()
conn.close()
return render_template('chosen_jobs.html', jobs=jobs, chosenSwimmers=chosenSwimmers, swimmers=swimmers)
@app.route('/delete_chosen')
@is_logged_in_admin
def delete_chosen():
conn, cur = connection()
cur.execute('DELETE FROM jobs_done')
conn.commit()
return redirect(url_for('dashboard'))
@app.route('/take_credit/<string:id>', methods=['POST'])
@is_logged_in_admin
def take_credit(id):
if request.method == 'POST':
conn, cur = connection()
amount = request.form['amount']
amount = int(amount)
id_value = request.form['id']
cur.execute('SELECT job_total FROM swimmers WHERE id = %s', [id_value])
total = cur.fetchall()
total = total[0]['job_total']
if total - amount < 1:
cur.execute('UPDATE swimmers SET job_total = 1 WHERE id = %s', [id_value])
conn.commit()
else:
cur.execute("UPDATE swimmers SET job_total = job_total - %s WHERE id=%s", (float(amount), id_value))
conn.commit()
conn.close()
return redirect(url_for('dashboard'))
def update_percent():
conn, cur = connection()
cur.execute('SELECT name, training_group, total, id FROM swimmers')
swimmers = cur.fetchall()
for i in range(len(swimmers)):
swimmerID = swimmers[i]['id']
swimmerTotal = swimmers[i]['total']
swimmerGroup = swimmers[i]['training_group']
swimmerName = swimmers[i]['name']
cur.execute('SELECT total FROM set_attendance WHERE training_group = %s', [swimmerGroup])
attendance = cur.fetchall()
attendanceTotal = attendance[0]['total']
if attendanceTotal == 0:
percent = None
else:
percent = swimmerTotal / attendanceTotal * 100
cur.execute('UPDATE swimmers SET percent = %s WHERE id=%s', (float(percent), swimmerID))
conn.commit()
conn.close()
@app.route('/attendance')
@is_logged_in_admin
def attendance():
conn, cur = connection()
cur.execute('SELECT * FROM set_attendance')
attendance_totals = cur.fetchall()
cur.execute('SELECT * FROM set_attendance_history')
attendance_history = cur.fetchall()
return render_template('attendance.html', attendance_totals=attendance_totals,
attendance_history=attendance_history)
@app.route('/add_group', methods=['GET', 'POST'])
@is_logged_in_super_admin
def add_group():
form = GroupForm(request.form)
if request.method == 'POST' and form.validate:
conn, cur = connection()
name = form.name.data
cur.execute('INSERT INTO set_attendance(training_group, total) VALUES(%s, 0)', [name])
conn.commit()
conn.close()
return redirect(url_for('attendance'))
return render_template('add_group.html', form=form)
@app.route('/add_attendance', methods=['GET', 'POST'])
@is_logged_in_admin
def add_attendance():
form = AttendanceForm(request.form)
if request.method == 'POST' and form.validate:
conn, cur = connection()
amount = form.amount.data
group = form.group.data
result = cur.execute('SELECT * FROM set_attendance WHERE training_group=%s', [group])
if result == 0:
cur.execute('INSERT INTO set_attendance(training_group, total) VALUES(%s, %s)', (group, float(amount)))
conn.commit()
cur.execute('INSERT INTO set_attendance_history(training_group, amount) VALUES(%s, %s)',
(group, float(amount)))
conn.commit()
update_percent()
conn.close()
return redirect(url_for('attendance'))
else:
cur.execute('UPDATE set_attendance SET total = total + %s WHERE training_group = %s',
(float(amount), group))
conn.commit()
cur.execute('INSERT INTO set_attendance_history(training_group, amount) VALUES(%s, %s)',
(group, float(amount)))
conn.commit()
update_percent()
conn.close()
return redirect(url_for('attendance'))
return render_template('add_attendance.html', form=form)
@app.route('/undo_delete')
@is_logged_in_admin
def undo_delete():
conn, cur = connection()
cur.execute('INSERT INTO swimmers SELECT * FROM swimmer_limbo')
conn.commit()
cur.execute('DELETE FROM swimmer_limbo')
conn.commit()
conn.close()
return redirect(url_for('dashboard'))
@app.route('/archive')
@is_logged_in_super_admin
def archive_page():
return render_template('archive.html')
@app.route('/quote_of_the_day', methods=['GET', 'POST'])
@is_logged_in_super_admin
def quote_of_the_day():
form = QuoteForm(request.form)
if request.method == 'POST' and form.validate():
quote = form.body.data
author = form.author.data
conn, cur = connection()
result = cur.execute('SELECT * FROM quote')
if result > 0:
cur.execute('DELETE FROM quote')
conn.commit()
cur.execute('INSERT INTO quote(quote_text, author) VALUES(%s, %s)', (quote, author))
conn.commit()
conn.close()
return redirect(url_for('index'))
return render_template('add_quote.html', form=form)
@app.route('/remove_attendance/<string:id>', methods=['GET', 'POST'])
@is_logged_in_admin
def remove_attendance(id):
form = RemoveAttendanceForm(request.form)
if request.method == 'POST' and form.validate():
conn, cur = connection()
amount = float(form.amount.data)
if amount > 9.99:
return redirect(url_for('dashboard'))
cur.execute('SELECT total FROM swimmers WHERE id = %s', [id])
total = cur.fetchall()
total = total[0]['total']
if total - amount < 1:
cur.execute('UPDATE swimmers SET total = 0, attending = 0 WHERE id = %s', [id])
conn.commit()
cur.execute('INSERT INTO attendance(id, amount) VALUES(%s, %s)', (id, amount * (-1)))
conn.commit()
else:
cur.execute("UPDATE swimmers SET total = total - %s, attending = 0 WHERE id=%s", (amount, id))
conn.commit()
cur.execute('INSERT INTO attendance(id, amount) VALUES(%s, %s)', (id, amount * (-1)))
conn.commit()
conn.close()
return redirect(url_for('dashboard'))
return redirect(url_for('dashboard'))
@app.route('/custom_amounts')
@is_logged_in_admin
def custom_amounts():
conn, cur = connection()
result = cur.execute('SELECT * FROM attendance_amounts')
amounts = cur.fetchall()
return render_template('custom_amounts.html', amounts=amounts)
@app.route('/add_custom_amounts', methods=['GET', 'POST'])
@is_logged_in_admin
def add_custom_amounts():
form = CustomAmount(request.form)
if request.method == 'POST' and form.validate():
amount = float(form.amount.data)
conn, cur = connection()
result = cur.execute('SELECT * FROM attendance_amounts')
if result > 0:
amounts = cur.fetchall()
for i in range(len(amounts)):
if amounts[i]['amount'] == amount:
error = 'amount already added'
return render_template('add_custom_amount.html', error=error)
cur.execute('INSERT INTO attendance_amounts(amount) VALUES(%s)', [amount])
conn.commit()
conn.close()
return redirect(url_for('custom_amounts'))
return render_template('add_custom_amount.html', form=form)
@app.route('/remove_amount/<string:id>', methods=['POST'])
@is_logged_in_admin
def remove_amount(id):
conn, cur = connection()
cur.execute('DELETE FROM attendance_amounts WHERE id=%s', [id])
conn.commit()
conn.close()
return redirect(url_for('custom_amounts'))
@app.route('/add_default_values', methods=['GET', 'POST'])
@is_logged_in_admin
def add_default_values():
form = DefaultValue(request.form)
if request.method == 'POST' and form.validate():
conn, cur = connection()
amount = float(form.amount.data)
group = form.group.data
result = cur.execute('SELECT * FROM default_values WHERE training_group=%s', [group])
if result > 0:
cur.execute('DELETE FROM default_values WHERE training_group=%s', [group])
conn.commit()
cur.execute('INSERT INTO default_values(training_group, value) VALUES(%s, %s)', (group, amount))
conn.commit()
return redirect(url_for('default_values'))
else:
cur.execute('INSERT INTO default_values(training_group, value) VALUES(%s, %s)', (group, amount))
conn.commit()
return redirect(url_for('default_values'))
conn.close()
return render_template('add_default_values.html', form=form)
@app.route('/default_values')
@is_logged_in_admin
def default_values():
conn, cur = connection()
cur.execute('SELECT * FROM default_values')
values = cur.fetchall()
conn.close()
return render_template('default_values.html', values=values)
@app.route('/remove_default_value/<string:id>', methods=['POST'])
@is_logged_in_admin
def remove_default_value(id):
conn, cur = connection()
cur.execute('DELETE FROM default_values WHERE id=%s', [id])
conn.commit()
conn.close()
return redirect(url_for('default_values'))
@app.route('/configure')
@is_logged_in_admin
def configure():
conn, cur = connection()
idnum = get_session_id()
cur.execute('SELECT default_group FROM users WHERE id=%s', [idnum])
group = cur.fetchall()[0]['default_group']
return render_template('configure.html', group=group)
@app.route('/set_default_group', methods=['GET', 'POST'])
@is_logged_in_admin
def set_default_group():
form = DefaultGroup(request.form)
if request.method == 'POST' and form.validate():
conn, cur = connection()
idnum = get_session_id()
group = form.group.data
cur.execute('UPDATE users SET default_group=%s WHERE id=%s', (group, idnum))
conn.commit()
conn.close()
return redirect(url_for('configure'))
return render_template('set_default_group.html', form=form)
@app.context_processor
def change_dashboard():
if 'logged_in' in session:
conn, cur = connection()
cur.execute('SELECT default_group FROM users WHERE id=%s', [get_session_id()])
navbar_group = cur.fetchall()[0]['default_group']
return dict(navbar_group=navbar_group)
else:
return dict(navbar_group='none')
@is_logged_in_admin
@app.route('/edit/<string:idn>', methods=['GET', 'POST'])
def edit(idn):
form = EditSwimmer(request.form)
if request.method == 'POST':
conn, cur = connection()
group = form.group.data
cur.execute('SELECT * FROM swimmers WHERE id=%s', [idn])
redirect_group = cur.fetchall()[0]['training_group']
cur.execute('UPDATE swimmers SET training_group=%s WHERE id=%s', (group, idn))
conn.commit()
conn.close()
return redirect(url_for('group_dashboard', group=redirect_group))
return render_template('edit_swimmer.html', form=form)
@is_logged_in
@app.route('/tutorial')
def tutorial():
return render_template('gifs.html')
@is_logged_in_admin
@app.route('/change_attendance/<string:idn>/<string:datestr>', methods=['GET', 'POST'])
def change_attendance(idn, datestr):
form = EditAttendanceForm(request.form)
if request.method == 'POST':
conn, cur = connection()
amount = form.amount.data
cur.execute('UPDATE attendance SET amount=%s WHERE id=%s AND date=%s', (amount, idn, datestr))
conn.commit()
conn.close()
return redirect(url_for('swimmer', id=idn))
return redirect(url_for('swimmer', id=idn))
@app.route('/oauth2callback')
def oauth2callback():
flow = client.flow_from_clientsecrets(
'client_secret_local.json',
scope='https://www.googleapis.com/auth/spreadsheets https://www.googleapis.com/auth/drive',
redirect_uri=url_for('oauth2callback', _external=True)
)
flow.params['include_granted_scopes'] = 'true'
# flow.params['access_type'] = 'offline'
if 'code' not in request.args:
auth_uri = flow.step1_get_authorize_url()
return redirect(auth_uri)
else:
auth_code = request.args.get('code')
credentials = flow.step2_exchange(auth_code)
session['credentials'] = credentials.to_json()
return redirect(url_for('archive_page'))
@app.route('/login_to_google')
@is_logged_in_super_admin
def login_to_google():
if 'credentials' not in session:
return redirect(url_for('oauth2callback'))
credentials = client.OAuth2Credentials.from_json(session['credentials'])
if credentials.access_token_expired:
return redirect(url_for('oauth2callback'))
return redirect(url_for('archive_page'))
@app.route('/archive/<string:id>')
@is_logged_in_super_admin
def archive(id):
conn, cur = connection()
if id == 'job_total':
cur.execute('UPDATE swimmers SET job_total = 1, attending = 0')
conn.commit()
conn.close()
return redirect(url_for('archive_page'))
elif id == 'attendance_total':
cur.execute('UPDATE swimmers SET total = 0, attending = 0')
conn.commit()
conn.close()
return redirect(url_for('archive_page'))
elif id == 'group_attendance':
cur.execute('UPDATE set_attendance SET total = 0')
conn.commit()
cur.execute('DELETE FROM set_attendance_history')
conn.commit()
conn.close()
return redirect(url_for('archive_page'))
elif id == 'weekly_attendance_history':
cur.execute('DELETE FROM weekly_attendance_history')
conn.commit()
conn.close()
return redirect(url_for('archive_page'))
else:
return redirect(url_for('dashboard'))
def export_to_sheets(name, data):
if 'credentials' not in session:
return redirect(url_for('oauth2callback'))
credentials = client.OAuth2Credentials.from_json(session['credentials'])
if credentials.access_token_expired:
return redirect(url_for('oauth2callback'))
http_auth = credentials.authorize(httplib2.Http())
SHEETS = discovery.build('sheets', 'v4', http_auth)
head = {'properties': {'title': str(name)}}
res = SHEETS.spreadsheets().create(body=head).execute()
SHEET_ID = res['spreadsheetId']
file_id = str(SHEET_ID)
print(SHEET_ID)
print('Created "%s"' % res['properties']['title'])
SHEETS.spreadsheets().values().update(spreadsheetId=SHEET_ID,
range='A1', body=data, valueInputOption='RAW').execute()
# print('Wrote data to Sheet:')
#rows = SHEETS.spreadsheets().values().get(spreadsheetId=SHEET_ID,
# range='Sheet1').execute().get('values', [])
@app.route('/weekly_attendance/<string:training_group>')
@is_logged_in_super_admin
def weekly_attendance(training_group):
if 'credentials' not in session:
return redirect(url_for('oauth2callback'))
credentials = client.OAuth2Credentials.from_json(session['credentials'])
if credentials.access_token_expired:
return redirect(url_for('oauth2callback'))
conn, cur = connection()
export_to_sheets('Attendance [%s] [%s]' % (training_group, time.ctime()), get_weekly_attendance(training_group))
cur.execute('DELETE FROM weekly_attendance')
conn.commit()
conn.close()
return redirect(url_for('archive_page'))
def get_weekly_attendance(training_group):
conn, cur = connection()
# make selection between all the training groups or just one
if training_group == 'all':
return get_end_attendance()
#cur.execute('SELECT * FROM swimmers')
else:
cur.execute('SELECT * FROM swimmers WHERE training_group = %s', [training_group])
swimmers = cur.fetchall()
# loops through swimmers in a training group
# grabs their group, total, name, id
for i in range(len(swimmers)):
swimmerGroup = swimmers[i]['training_group']
swimmerTotal = swimmers[i]['total']
swimmerName = swimmers[i]['name']
swimmerID = swimmers[i]['id']
result = cur.execute('SELECT * FROM set_attendance WHERE training_group=%s', [swimmerGroup])
# checks to see if any swimmers in group
if result == 0:
# checks if percent == null
percent = None
cur.execute(
'INSERT INTO weekly_attendance(id, name, percent, total, attendance_total) VALUES(%s, %s, %s, %s, 0)',
(int(swimmerID), swimmerName, percent, float(swimmerTotal)))
conn.commit()
else:
# gets attendance from db
attendance = cur.fetchall()
attendanceTotal = attendance[0]['total']
# make sure attendance total does not = 0 so no divide by 0 error
if attendanceTotal == 0:
percent = None
cur.execute(
'INSERT INTO weekly_attendance(id, name, percent, total, attendance_total) VALUES(%s, %s, %s, %s, 0)',
(int(swimmerID), swimmerName, percent, float(swimmerTotal)))
conn.commit()
else:
# calculates percent if attendance total doesn't equal 0
percent = swimmerTotal / attendanceTotal * 100
cur.execute(
'INSERT INTO weekly_attendance(id, name, percent, total, attendance_total) VALUES(%s, %s, %s, %s, %s)',
(int(swimmerID), swimmerName, float(percent), float(swimmerTotal), float(attendanceTotal)))
conn.commit()
# headers at top of spreadsheet
FIELDS = ('Name', 'Percent', 'Total', 'A_Total')
# grab all data from weekly_attendance
cur.execute('SELECT name, percent, total, attendance_total FROM weekly_attendance')
rows = cur.fetchall()
rows = list(rows)
newRows = []
# format DictCursor from db into a list of lists that google sheets can read
for row in rows:
tmp = []
for key, value in row.items():
if key == 'percent':
tmp.append(str(value) + '%')
else:
tmp.append(value)
tmp = tmp[::-1]
newRows.append(tmp)
tmp = []
newRows.insert(0, FIELDS)
data = {'values': [row[0:] for row in newRows]}
#flash(data)
cur.execute(
'INSERT INTO weekly_attendance_history(id, name, percent, total, attendance_total) SELECT id, name, percent, total, attendance_total FROM weekly_attendance')
conn.commit()
cur.execute('DELETE FROM weekly_attendance')
conn.commit()
conn.close()
return data
@app.route('/end_season_attendance')
@is_logged_in_super_admin
def end_season_attendance():
# connects to google if not connected already
if 'credentials' not in session:
return redirect(url_for('oauth2callback'))
credentials = client.OAuth2Credentials.from_json(session['credentials'])
if credentials.access_token_expired:
return redirect(url_for('oauth2callback'))
conn, cur = connection()
export_to_sheets('End of season attendance [%s]' % time.ctime(), get_end_attendance())
cur.execute('DELETE FROM season_attendance')
conn.commit()
conn.close()
return redirect(url_for('archive_page'))
def get_end_attendance():
conn, cur = connection()
cur.execute('SELECT * FROM swimmers')
swimmers = cur.fetchall()
cur.execute('SELECT training_group FROM set_attendance')
groups = cur.fetchall()
for i in range(len(swimmers)):
swimmerGroup = swimmers[i]['training_group']
swimmerTotal = swimmers[i]['total']
swimmerName = swimmers[i]['name']
swimmerID = swimmers[i]['id']
result = cur.execute('SELECT * FROM set_attendance WHERE training_group=%s', [swimmerGroup])
if result == 0:
percent = None
cur.execute(
'INSERT INTO season_attendance(id, name, training_group, percent, total, attendance_total) VALUES(%s, %s, %s, %s, %s, 0)',
(int(swimmerID), swimmerName, swimmerGroup, percent, float(swimmerTotal)))
conn.commit()
else:
attendance = cur.fetchall()
attendanceTotal = attendance[0]['total']
if attendanceTotal == 0:
percent = None
cur.execute(
'INSERT INTO season_attendance(id, name, training_group, percent, total, attendance_total) VALUES(%s, %s, %s, %s, %s, 0)',
(int(swimmerID), swimmerName, swimmerGroup, percent, float(swimmerTotal)))
conn.commit()
else:
percent = swimmerTotal / attendanceTotal * 100
cur.execute(
'INSERT INTO season_attendance(id, name, training_group, percent, total, attendance_total) VALUES(%s, %s, %s, %s, %s, %s)',
(int(swimmerID), swimmerName, swimmerGroup, float(percent), float(swimmerTotal),
float(attendanceTotal)))
conn.commit()
groupList = []
for i in range(len(groups)):
groupList.append(groups[i]['training_group'])
totalRows = []
# top of the spreadsheet
FIELDS = ('Name', 'Percent', 'Total', 'A_Total')
for group in groupList:
cur.execute('SELECT name, percent, total, attendance_total FROM season_attendance WHERE training_group = %s',
[group])
rows = cur.fetchall()
rows = list(rows)
newRows = []
for row in rows:
tmp = []
for key, value in row.items():
if key == 'percent':
tmp.append(str(value) + '%')
else:
tmp.append(value)
tmp = tmp[::-1]
newRows.append(tmp)
tmp = []
header = [group]
for i in range(len(FIELDS) - 1):
header.append('')
header = tuple(header)
spacer = []
for i in range(len(FIELDS)):
spacer.append('')
spacer = tuple(spacer)
newRows.insert(0, header)
# newRows.insert(1, spacer)
newRows.insert(1, FIELDS)
for i in range(2, 6):
newRows.append(spacer)
totalRows.append(newRows)
newRows = []
finalRows = []
for i in range(len(totalRows)):
for row in totalRows[i]:
finalRows.append(row)
# data = {'values': [row[0:] for row in finalRows]}
data = {'values': [row for row in finalRows]}
cur.execute('DELETE FROM season_attendance')
conn.commit()
conn.close()
return data
def get_access_codes():
conn, cur = connection()
cur.execute('SELECT name, code FROM swimmers ORDER BY name ASC')
swimmer_codes = cur.fetchall()
FIELDS = ('Name', 'Access Code')
data_list = []
for i in range(len(swimmer_codes)):
data_list.append([swimmer_codes[i]['name'], swimmer_codes[i]['code']])
data_list.insert(0, FIELDS)
data = {'values': [row for row in data_list]}
return data
@app.route('/print_access_codes')
@is_logged_in_super_admin
def print_access_codes():
# connects to google if not connected already
if 'credentials' not in session:
return redirect(url_for('oauth2callback'))
credentials = client.OAuth2Credentials.from_json(session['credentials'])
if credentials.access_token_expired:
return redirect(url_for('oauth2callback'))
export_to_sheets('Swimmer Access Codes', get_access_codes())
return redirect(url_for('archive_page'))
# comment out this when on local machine
if __name__ == '__main__':
app.run(debug=True)
# comment in this when pushing to webserver
# app.secret_key = str(os.urandom(24))
| |
# -*- coding: utf-8 -*-
#
from . import color as mycol
from . import path as mypath
def draw_line2d(data, obj):
'''Returns the PGFPlots code for an Line2D environment.
'''
content = []
addplot_options = []
# If line is of length 0, do nothing. Otherwise, an empty \addplot table
# will be created, which will be interpreted as an external data source
# in either the file '' or '.tex'. Instead, render nothing.
if len(obj.get_xdata()) == 0:
return data, []
# get the linewidth (in pt)
line_width = _mpl_linewidth2pgfp_linewidth(data, obj.get_linewidth())
if line_width:
addplot_options.append(line_width)
# get line color
color = obj.get_color()
data, line_xcolor, _ = mycol.mpl_color2xcolor(data, color)
addplot_options.append(line_xcolor)
alpha = obj.get_alpha()
if alpha is not None:
addplot_options.append('opacity=%r' % alpha)
show_line, linestyle = _mpl_linestyle2pgfp_linestyle(obj.get_linestyle())
if show_line and linestyle:
addplot_options.append(linestyle)
marker_face_color = obj.get_markerfacecolor()
marker_edge_color = obj.get_markeredgecolor()
data, marker, extra_mark_options = \
_mpl_marker2pgfp_marker(data, obj.get_marker(), marker_face_color)
if marker:
addplot_options.append('mark=' + marker)
mark_size = obj.get_markersize()
if mark_size:
# setting half size because pgfplots counts the radius/half-width
pgf_size = int(0.5 * mark_size)
# make sure we didn't round off to zero by accident
if pgf_size == 0 and mark_size != 0:
pgf_size = 1
addplot_options.append('mark size=%d' % pgf_size)
mark_every = obj.get_markevery()
if mark_every:
addplot_options.append('mark repeat=%d' % mark_every)
mark_options = ['solid']
if extra_mark_options:
mark_options.append(extra_mark_options)
if marker_face_color in [None, 'none']:
mark_options.append('fill opacity=0')
else:
data, face_xcolor, _ = mycol.mpl_color2xcolor(
data,
marker_face_color
)
if face_xcolor != line_xcolor:
mark_options.append('fill=' + face_xcolor)
face_and_edge_have_equal_color = \
marker_edge_color == marker_face_color
# Sometimes, the colors are given as arrays. Collapse them into a
# single boolean.
try:
face_and_edge_have_equal_color = \
all(face_and_edge_have_equal_color)
except TypeError:
pass
if not face_and_edge_have_equal_color:
data, draw_xcolor, _ = mycol.mpl_color2xcolor(
data,
marker_edge_color
)
if draw_xcolor != line_xcolor:
mark_options.append('draw=' + draw_xcolor)
addplot_options.append('mark options={%s}' % ','.join(mark_options))
if marker and not show_line:
addplot_options.append('only marks')
# process options
content.append('\\addplot ')
if addplot_options:
options = ', '.join(addplot_options)
content.append('[' + options + ']\n')
content.append('table {%\n')
# nschloe, Oct 2, 2015:
# The transform call yields warnings and it is unclear why. Perhaps
# the input data is not suitable? Anyhow, this should not happen.
# Comment out for now.
# xdata, ydata = _transform_to_data_coordinates(obj, *obj.get_data())
xdata, ydata = obj.get_data()
try:
has_mask = ydata.mask.any()
except AttributeError:
has_mask = 0
if has_mask:
# matplotlib jumps at masked images, while PGFPlots by default
# interpolates. Hence, if we have a masked plot, make sure that
# PGFPlots jumps as well.
data['extra axis options'].add('unbounded coords=jump')
for (x, y, is_masked) in zip(xdata, ydata, ydata.mask):
if is_masked:
content.append('%.15g nan\n' % x)
else:
content.append('%.15g %.15g\n' % (x, y))
else:
for (x, y) in zip(xdata, ydata):
content.append('%.15g %.15g\n' % (x, y))
content.append('};\n')
return data, content
def draw_linecollection(data, obj):
'''Returns Pgfplots code for a number of patch objects.
'''
content = []
edgecolors = obj.get_edgecolors()
linestyles = obj.get_linestyles()
linewidths = obj.get_linewidths()
paths = obj.get_paths()
for i in range(len(paths)):
path = paths[i]
if i < len(edgecolors):
color = edgecolors[i]
else:
color = edgecolors[0]
if i < len(linestyles):
style = linestyles[i]
else:
style = linestyles[0]
if i < len(linewidths):
width = linewidths[i]
else:
width = linewidths[0]
data, options = mypath.get_draw_options(data, color, None)
width = _mpl_linewidth2pgfp_linewidth(data, width)
if width:
options.append(width)
# linestyle is a string or dash tuple. Legal string values are
# solid|dashed|dashdot|dotted. The dash tuple is (offset, onoffseq)
# where onoffseq is an even length tuple of on and off ink in points.
#
# solid: [(None, None), (None, None), ..., (None, None)]
# dashed: (0, (6.0, 6.0))
# dotted: (0, (1.0, 3.0))
# dashdot: (0, (3.0, 5.0, 1.0, 5.0))
if style[0] is not None:
assert isinstance(style, tuple)
if len(style[1]) == 2:
linestyle = 'dash pattern=on %dpt off %dpt' % \
(int(style[1][0]), int(style[1][1]))
else:
assert len(style[1]) == 4
linestyle = 'dash pattern=on %dpt off %dpt on %dpt off %dpt' \
% (int(style[1][0]), int(style[1][1]),
int(style[1][2]), int(style[1][3]))
options.append(linestyle)
# TODO what about masks?
data, cont = mypath.draw_path(
obj, data, path,
draw_options=options,
simplify=False
)
content.append(cont)
return data, content
def _mpl_linewidth2pgfp_linewidth(data, line_width):
if data['strict']:
# Takes the matplotlib linewidths, and just translate them
# into PGFPlots.
try:
return TIKZ_LINEWIDTHS[line_width]
except KeyError:
# explicit line width
return 'line width=%spt' % line_width
else:
# The following is an alternative approach to line widths.
# The default line width in matplotlib is 1.0pt, in PGFPlots 0.4pt
# ('thin').
# Match the two defaults, and scale for the rest.
scaled_line_width = line_width / 1.0 # scale by default line width
if scaled_line_width == 0.25:
return 'ultra thin'
elif scaled_line_width == 0.5:
return 'very thin'
elif scaled_line_width == 1.0:
pass # PGFPlots default line width, 'thin'
elif scaled_line_width == 1.5:
return 'semithick'
elif scaled_line_width == 2:
return 'thick'
elif scaled_line_width == 3:
return 'very thick'
elif scaled_line_width == 4:
return 'ultra thick'
else:
# explicit line width
return 'line width=%rpt' % (0.4 * line_width)
# for matplotlib markers, see: http://matplotlib.org/api/markers_api.html
_MP_MARKER2PGF_MARKER = {
'.': '*', # point
'o': 'o', # circle
'+': '+', # plus
'x': 'x', # x
'None': None,
' ': None,
'': None
}
# the following markers are only available with PGF's plotmarks library
_MP_MARKER2PLOTMARKS = {
'v': ('triangle', 'rotate=180'), # triangle down
'1': ('triangle', 'rotate=180'),
'^': ('triangle', None), # triangle up
'2': ('triangle', None),
'<': ('triangle', 'rotate=270'), # triangle left
'3': ('triangle', 'rotate=270'),
'>': ('triangle', 'rotate=90'), # triangle right
'4': ('triangle', 'rotate=90'),
's': ('square', None),
'p': ('pentagon', None),
'*': ('asterisk', None),
'h': ('star', None), # hexagon 1
'H': ('star', None), # hexagon 2
'd': ('diamond', None), # diamond
'D': ('diamond', None), # thin diamond
'|': ('|', None), # vertical line
'_': ('-', None) # horizontal line
}
def _mpl_marker2pgfp_marker(data, mpl_marker, marker_face_color):
'''Translates a marker style of matplotlib to the corresponding style
in PGFPlots.
'''
# try default list
try:
pgfplots_marker = _MP_MARKER2PGF_MARKER[mpl_marker]
if (marker_face_color is not None) and pgfplots_marker == 'o':
pgfplots_marker = '*'
data['pgfplots libs'].add('plotmarks')
marker_options = None
return (data, pgfplots_marker, marker_options)
except KeyError:
pass
# try plotmarks list
try:
data['pgfplots libs'].add('plotmarks')
pgfplots_marker, marker_options = _MP_MARKER2PLOTMARKS[mpl_marker]
if marker_face_color is not None and \
marker_face_color.lower() != 'none' and \
pgfplots_marker not in ['|', '-']:
pgfplots_marker += '*'
return (data, pgfplots_marker, marker_options)
except KeyError:
pass
# There's no equivalent for the pixel marker in Pgfplots.
if mpl_marker == ',':
print('Unsupported marker '','' (pixel).')
return (data, None, None)
_MPLLINESTYLE_2_PGFPLOTSLINESTYLE = {
'': None,
'None': None,
'none': None, # happens when using plt.boxplot()
'-': None,
':': 'dotted',
'--': 'dashed',
'-.': 'dash pattern=on 1pt off 3pt on 3pt off 3pt'
}
def _mpl_linestyle2pgfp_linestyle(line_style):
'''Translates a line style of matplotlib to the corresponding style
in PGFPlots.
'''
show_line = (line_style != 'None')
style = _MPLLINESTYLE_2_PGFPLOTSLINESTYLE[line_style]
return show_line, style
# def _transform_to_data_coordinates(obj, xdata, ydata):
# '''The coordinates might not be in data coordinates, but could be partly
# in axes coordinates. For example, the matplotlib command
# axes.axvline(2)
# will have the y coordinates set to 0 and 1, not to the limits. Therefore,
# a two-stage transform has to be applied:
# 1. first transforming to display coordinates, then
# 2. from display to data.
# In case of problems (non-invertible, or whatever), print a warning and
# continue anyways.
# '''
# try:
# import matplotlib.transforms
# points = numpy.array(zip(xdata, ydata))
# transform = matplotlib.transforms.composite_transform_factory(
# obj.get_transform(),
# obj.axes.transData.inverted()
# )
# points_data = transform.transform(points)
# xdata, ydata = zip(*points_data)
# except Exception as e:
# print(xdata, ydata)
# print(('Problem during transformation:\n' +
# ' %s\n' +
# 'Continuing with original data.')
# % e
# )
# return (xdata, ydata)
TIKZ_LINEWIDTHS = {0.1: 'ultra thin',
0.2: 'very thin',
0.4: 'thin',
0.6: 'semithick',
0.8: 'thick',
1.2: 'very thick',
1.6: 'ultra thick'
}
| |
import requests
import demjson
from demjson import JSONDecodeError
import datetime
import time
import pytz
import pymongo
import threading
from pymongo import MongoClient
import logging
import ne_testprep
import bd_testprep
import sf_testprep
import ne_scikit
import bd_scikit
import sf_scikit
LOG_FILENAME = 'TTOBackgroundLogs.log'
logging.basicConfig(filename=LOG_FILENAME,level=logging.DEBUG,format='%(asctime)s, %(levelname)s, %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
uri ='mongodb://rajeevtto:radiostud@ds035315-a0.mongolab.com:35315,ds035315-a1.mongolab.com:35315/newttobackground?replicaSet=rs-ds035315'
client = MongoClient(uri)
newttobackground = client.newttobackground
ttobgcoll = newttobackground.ttobgcoll
count = 4320
Limit = 1
Day_List = ['','Sunday','Monday','Tuesday','Wednesday','Thursday','Friday','Saturday']
mq_directions_key = "" # Your mapquest given api key
mq_traffic_key = "" # Your mapquest given api key
'''****************************************************************************************
Function Name : function_routenewarkedison (Algorithm operation)
Description : Function used get the data from api calls and push the data to mongodb
and intiatiates the testdata prep and scikit algorithm
****************************************************************************************'''
def function_routenewarkedison():
'''
ROUTE --> GardenStatePkwy,Newark,NJ,USA to Edison,NJ,USA <--
'''
global count,Limit
while True:
newarkedison_time = datetime.datetime.now(pytz.timezone('US/Eastern'))
newarkedison_dayname = newarkedison_time.strftime("%A")
newarkedison_hour = int(newarkedison_time.strftime("%H"))
newarkedison_minute = int(newarkedison_time.strftime("%M"))
newarkedison_second = int(newarkedison_time.strftime("%S"))
newarkedison_year = int(newarkedison_time.strftime("%Y"))
newarkedison_month = int(newarkedison_time.strftime("%m"))
newarkedison_day = int(newarkedison_time.strftime("%d"))
ne_calc_minute = newarkedison_minute%10
#ne_calc_second = 60-newarkedison_second
newark_edison_directions = "NONE"
newark_weather = "NONE"
edison_weather = "NONE"
newarkedison_incidents = "NONE"
if (ne_calc_minute == 0):
newarkedison_loctime = datetime.datetime(newarkedison_year,newarkedison_month,newarkedison_day,newarkedison_hour,newarkedison_minute,newarkedison_second)
newarkedison_delayFromFreeFlow = []
newarkedison_delayFromTypical = []
nelist_id = []
netime = []
neroute = "NEWARK-EDISON"
necity1 = "NEWARK"
necity2 = "EDISON"
neretry1 = 0
neretry2 = 0
neretry3 = 0
neretry4 = 0
try:
# DIRECTIONS API
while(neretry1 <=2):
try:
newark_edison_api = requests.get('http://www.mapquestapi.com/directions/v2/route?key='+mq_directions_key+'&from=40.731507,-74.174388&to=40.525525,-74.388231&doReverseGeocode=false')
logging.info( "ne directions api status %s and reason %s "%(newark_edison_api.status_code,newark_edison_api.reason))
neretry1 = 3
newark_edison_data = newark_edison_api.text
newark_edison_directions = demjson.decode(newark_edison_data)
except requests.exceptions.ConnectionError as cne_directions:
neretry1 = neretry1+1
logging.error( "%s,%s is the conn-exception occured at %s for the route %s"%(cne_directions,type(cne_directions),newarkedison_time,neroute))
except requests.exceptions.HTTPError as hne_directions:
logging.error( "%s,%s is the http-exception occured at %s for the route %s"%(hne_directions,type(hne_directions),newarkedison_time,neroute))
except requests.exceptions.ReadTimeout as rne_directions:
logging.error( "%s,%s is the readtimeout-exception occured at %s for the route %s"%(rne_directions,type(rne_directions),newarkedison_time,neroute))
except requests.exceptions.Timeout as tne_directions:
logging.error( "%s,%s is the timeout-exception occured at %s for the route %s"%(tne_directions,type(tne_directions),newarkedison_time,neroute))
except Exception as ene_directions:
logging.error( "%s,%s is the exception occured at %s for the route %s"%(ene_directions,type(ene_directions),newarkedison_time,neroute))
except demjson.JSONDecodeError as nejsonerror_directions:
logging.error( "the directions json error exception %s %s for the route %s at %s " %(nejsonerror_directions,type(nejsonerror_directions),neroute,newarkedison_time))
if (newark_edison_api.status_code==200):
if 'info' in newark_edison_directions:
del newark_edison_directions['info']
else:
pass
# WEATHER API
while(neretry2 <=2):
try:
newarkweather_api = requests.get("https://query.yahooapis.com/v1/public/yql?q=select item.condition from weather.forecast where woeid= 2459299&format=json")
logging.info( "newark weather api status %s and reason %s"%(newarkweather_api.status_code,newarkweather_api.reason))
neretry2 = 3
newarkweather_data = newarkweather_api.text
newark_weather = demjson.decode(newarkweather_data)
except requests.exceptions.ConnectionError as cn_weather:
neretry2 = neretry2+1
logging.error( "%s,%s is the conn-exception occured at %s for the city %s"%(cn_weather,type(cn_weather),newarkedison_time,necity1))
except requests.exceptions.HTTPError as hn_weather:
logging.error( "%s,%s is the conn-exception occured at %s for the city %s"%(hn_weather,type(hn_weather),newarkedison_time,necity1))
except requests.exceptions.ReadTimeout as rn_weather:
logging.error( "%s,%s is the readtimeout-exception occured at %s for the city %s"%(rn_weather,type(rn_weather),newarkedison_time,necity1))
except requests.exceptions.Timeout as tn_weather:
logging.error( "%s,%s is the timeout-exception occured at %s for the city %s"%(tn_weather,type(tn_weather),newarkedison_time,necity1))
except Exception as en_weather:
logging.error( "%s,%s is the exception occured at %s for the city %s"%(en_weather,type(en_weather),newarkedison_time,necity1))
except demjson.JSONDecodeError as njsonerror_weather:
logging.error( "the weather json error exception %s %s for city %s at %s " %(njsonerror_weather,type(njsonerror_weather),necity1,newarkedison_time))
while (neretry3 <=2):
try:
edisonweather_api = requests.get("https://query.yahooapis.com/v1/public/yql?q=select item.condition from weather.forecast where woeid=56250394&format=json")
logging.info( "edison weather api status %s and reason %s"%(edisonweather_api.status_code,edisonweather_api.reason))
neretry3 = 3
edisonweather_data = edisonweather_api.text
edison_weather = demjson.decode(edisonweather_data)
except requests.exceptions.ConnectionError as ce_weather:
neretry3 = neretry3+1
logging.error( "%s,%s is the conn-exception occured at %s for the city %s"%(ce_weather,type(ce_weather),newarkedison_time,necity2))
except requests.exceptions.HTTPError as he_weather:
logging.error( "%s,%s is the http-exception occured at %s for the city %s"%(he_weather,type(he_weather),newarkedison_time,necity2))
except requests.exceptions.ReadTimeout as re_weather:
logging.error( "%s,%s is the readtimeout-exception occured at %s for the city %s"%(re_weather,type(re_weather),newarkedison_time,necity2))
except requests.exceptions.Timeout as te_weather:
logging.error( "%s,%s is the exception occured at %s for the city %s"%(te_weather,type(te_weather),newarkedison_time,necity2))
except Exception as ee_weather:
logging.error( "%s,%s is the exception occured at %s for the city %s"%(ee_weather,type(ee_weather),newarkedison_time,necity2))
except demjson.JSONDecodeError as ejsonerror_weather:
logging.error( "the weather json error exception %s %s for city %s at %s " %(ejsonerror_weather,type(ejsonerror_weather),necity2,newarkedison_time))
# TRAFFIC API
while (neretry4<=2):
try:
newarkedison_incidents_api = requests.get('http://www.mapquestapi.com/traffic/v2/incidents?key='+mq_traffic_key+'&boundingBox=40.7467527,-74.2154055,40.5382588,-74.4480655&filters=construction,incidents,congestion,events&inFormat=kvp&outFormat=json')
logging.info( "ne incidents api status %s and reason %s"%(newarkedison_incidents_api.status_code,newarkedison_incidents_api.reason))
neretry4=3
newarkedison_data = newarkedison_incidents_api.text
newarkedison_incidents = demjson.decode(newarkedison_data)
newarkedison_length = len(newarkedison_incidents['incidents'])
except requests.exceptions.ConnectionError as cne_incidents:
neretry4 = neretry4+1
logging.error( "%s,%s is the conn-exception occured at %s for the route %s"%(cne_incidents,type(cne_incidents),newarkedison_time,neroute))
except requests.exceptions.HTTPError as hne_incidents:
logging.error( "%s,%s is the http-exception occured at %s for the route %s"%(hne_incidents,type(hne_incidents),newarkedison_time,neroute))
except requests.exceptions.ReadTimeout as rne_incidents:
logging.error( "%s,%s is the readtimeout-exception occured at %s for the route %s"%(rne_incidents,type(rne_incidents),newarkedison_time,neroute,newarkedison_time,neroute))
except requests.exceptions.Timeout as tne_incidents:
logging.error( "%s,%s is the timeout-exception occured at %s for the route %s"%(tne_incidents,type(tne_incidents),newarkedison_time,neroute))
except Exception as ene_incidents:
logging.error( "%s,%s is the exception occured at %s for the route %s"%(ene_incidents,type(ene_incidents),newarkedison_time,neroute))
except demjson.JSONDecodeError as nejsonerror_incidents:
logging.error( "the incidents json error exception %s %s for the route %s at %s " % (nejsonerror_incidents,type(nejsonerror_incidents),neroute,newarkedison_time))
if (newarkedison_incidents_api.status_code==200):
if 'info' in newarkedison_incidents:
del newarkedison_incidents['info']
for i in range(newarkedison_length):
newarkedison_delayFromFreeFlow.append(float(newarkedison_incidents["incidents"][i]["delayFromFreeFlow"]))
newarkedison_delayFromTypical.append(float(newarkedison_incidents["incidents"][i]["delayFromTypical"]))
for i in range(newarkedison_length):
newarkedison_incidents["incidents"][i]["delayFromTypical"] = newarkedison_delayFromFreeFlow[i]
newarkedison_incidents["incidents"][i]["delayFromFreeFlow"] = newarkedison_delayFromTypical[i]
else:
pass
except Exception as nemainException:
newark_edison_directions = "NONE"
newark_weather = "NONE"
edison_weather = "NONE"
newarkedison_incidents = "NONE"
logging.error( "%s,%s is the exception occured for nemainException"%(nemainException,type(nemainException)))
ne_flag = False
try:
if (newark_edison_directions == 'NONE' or newark_edison_directions['route']['realTime'] >10000000 or newark_edison_directions['route']['realTime'] == 'None'or newark_edison_directions['route']['distance'] == 'None' or newark_edison_directions['route']['time'] == 'None' or newark_weather == 'NONE' or newark_weather.has_key('error') or newark_weather['query']['results'] == None or newark_weather['query'] == None or newark_weather['query']['results']['channel']['item']['condition']['code'] == 3200 or edison_weather == 'NONE' or edison_weather.has_key('error') or edison_weather['query']['results'] == None or edison_weather['query'] == None or edison_weather['query']['results']['channel']['item']['condition']['code'] == 3200):
ne_flag = False
else:
ne_flag = True
except Exception as flagcheckexception:
logging.error( "%s,%s is the exception occured for flagcheckexception"%(flagcheckexception,type(flagcheckexception)))
newarkedison_doc = {
"route":"NEWARK-EDISON",
"recorddate":newarkedison_loctime,
"recorddayname":newarkedison_dayname,
"directions":newark_edison_directions,
"weather":[newark_weather,edison_weather],
"traffic":newarkedison_incidents,
"Flag":ne_flag
}
newarkedison_docid = ttobgcoll.insert_one(newarkedison_doc)
# printing the time after completion of the tasks
logging.info( "Received newarkedison data by %s"%(str(datetime.datetime.now(pytz.timezone('US/Eastern')))))
# getting the number of documents in the db
try:
necnt = newttobackground.ttobgcoll.find({"route":"NEWARK-EDISON"}).count()
if (necnt>count):
for newarkedison_doc in ttobgcoll.find({"route":"NEWARK-EDISON"}).sort('recorddate',pymongo.ASCENDING).limit(Limit):
nelist_id.append(newarkedison_doc['_id'])
netime.append(newarkedison_doc['recorddate'])
logging.info( "NEWARK-EDISON")
logging.info( nelist_id)
logging.info( netime)
for neid in nelist_id:
newttobackground.ttobgcoll.remove({"_id":neid})
del nelist_id
del netime
except Exception as nerollover_error:
logging.error( "%s,%s is the rollover exception occured at %s for the route %s "%(nerollover_error,type(nerollover_error),newarkedison_time,neroute))
del newarkedison_delayFromFreeFlow
del newarkedison_delayFromTypical
ne_cursor = newttobackground.ttobgcoll.find({"route":"NEWARK-EDISON","recorddate":newarkedison_loctime})
Limit = 1
try:
for neval in ne_cursor:
if (neval['Flag'] == True):
n_t = float(neval['weather'][0]['query']['results']['channel']['item']['condition']['temp'])
n_w = int(neval['weather'][0]['query']['results']['channel']['item']['condition']['code'])
e_t = float(neval['weather'][1]['query']['results']['channel']['item']['condition']['temp'])
e_w = int(neval['weather'][1]['query']['results']['channel']['item']['condition']['code'])
v1 = int(neval['recorddate'].strftime("%H"))
v2 = int(neval['recorddate'].strftime("%M"))
v1 = (v1)*6
v2 = (v2+10)/10
zone = v1+v2
for day in Day_List:
if neval['recorddayname'] == day:
codedday = Day_List.index(day)
newarkedison_doc = {
"route":"NEWARK-EDISON",
"Date":neval['recorddate'],
"Day":neval['recorddayname'],
"Temparature": [n_t,e_t],
"CodedWeather" : [n_w,e_w],
"CodedDay":codedday,
"Zone":zone,
"realTime":neval['directions']['route']['realTime']
}
newarkedison_docid = newttobackground.ttoopvalcoll.insert_one(newarkedison_doc)
else:
pass
except Exception as e:
logging.error("ttoopvalcoll upload error in newarkedison %s,%s"(e,type(e)))
'''INDUCE TIME PROGRAM'''
induceTime = []
induceWeather = []
induceTemparature = []
try:
cnt = newttobackground.ttoinducecoll.find({"route":"NEWARK-EDISON"}).count()
if cnt >0:
cursor = newttobackground.ttoinducecoll.find({"route":"NEWARK-EDISON"})
for doc in cursor:
induceTime.append(doc['induceTime'])
induceWeather.append([doc['induceWeather'][0],doc['induceWeather'][1]])
induceTemparature.append([doc['induceTemparature'][0],doc['induceTemparature'][1]])
else:
pass
except Exception as e:
print e
ne_df,netestprep_return = ne_testprep.nealgo(newarkedison_loctime,induceTime,induceWeather,induceTemparature)
if (netestprep_return == True):
nescikit_return = ne_scikit.ne_scikitalgo(ne_df)
else:
pass
idlist = []
if (newttobackground.ttoinducecoll.find({"route":"NEWARK-EDISON"}).count() > 0):
cursor = newttobackground.ttoinducecoll.find({"route":"NEWARK-EDISON"})
for doc in cursor:
idlist.append(doc['_id'])
newttobackground.ttoinducecoll.remove({'_id':{"$in":idlist}}) # Dangerous line
else:
pass
time.sleep(580-newarkedison_second)
'''****************************************************************************************
Function Name : function_routebrooklyndenville (Algorithm operation)
Description : Function used get the data from api calls and push the data to mongodb
and intiatiates the testdata prep and scikit algorithm
****************************************************************************************'''
def function_routebrooklyndenville():
'''
ROUTE -->AT-Avenue,Brooklyn,NY,USA to Denville,NJ,USA<--
'''
#client = MongoClient(uri)
global count,Limit
while True:
brooklyndenville_time = datetime.datetime.now(pytz.timezone('US/Eastern'))
brooklyndenville_dayname = brooklyndenville_time.strftime("%A")
brooklyndenville_hour = int(brooklyndenville_time.strftime("%H"))
brooklyndenville_minute = int(brooklyndenville_time.strftime("%M"))
brooklyndenville_second = int(brooklyndenville_time.strftime("%S"))
brooklyndenville_year = int(brooklyndenville_time.strftime("%Y"))
brooklyndenville_month = int(brooklyndenville_time.strftime("%m"))
brooklyndenville_day = int(brooklyndenville_time.strftime("%d"))
bd_calc_minute = brooklyndenville_minute%10
#bd_calc_second = 60-brooklyndenville_second
brooklyn_denville_directions = "NONE"
brooklyn_weather = "NONE"
denville_weather = "NONE"
brooklyndenville_incidents = "NONE"
if (bd_calc_minute == 0):
brooklyndenville_loctime = datetime.datetime(brooklyndenville_year,brooklyndenville_month,brooklyndenville_day,brooklyndenville_hour,brooklyndenville_minute,brooklyndenville_second)
brooklyndenville_delayFromFreeFlow = []
brooklyndenville_delayFromTypical = []
bdlist_id = []
bdtime = []
bdroute = "BROOKLYN-DENVILLE"
bdcity1 = "BROOKLYN"
bdcity2 = "DENVILLE"
bdretry1= 0
bdretry2 =0
bdretry3 =0
bdretry4 =0
#logging.info( brooklyndenville_time
try:
#DIRECTIONS API
while (bdretry1<=2):
try:
brooklyn_denville_api = requests.get('http://www.mapquestapi.com/directions/v2/route?key='+mq_directions_key+'&from=40.692529,-73.990996&to=40.889066,-74.4786&doReverseGeocode=false')
logging.info( "bd directions api status %s and reason %s"%(brooklyn_denville_api.status_code,brooklyn_denville_api.reason))
bdretry1 = 3
brooklyn_denville_data = brooklyn_denville_api.text
brooklyn_denville_directions = demjson.decode(brooklyn_denville_data)
except requests.exceptions.ConnectionError as cbd_directions:
bdretry1 = bdretry1+1
logging.error( "%s,%s is the conn-exception occured at %s for the route %s"%(cbd_directions,type(cbd_directions),brooklyndenville_time,bdroute))
except requests.exceptions.HTTPError as hbd_directions:
logging.error( "%s,%s is the http-exception occured at %s for the route %s"%(hbd_directions,type(hbd_directions),brooklyndenville_time,bdroute))
except requests.exceptions.ReadTimeout as rbd_directions:
logging.error( "%s,%s is the readtimeout-exception occured at %s for the route %s"%(rbd_directions,type(rbd_directions),brooklyndenville_time,bdroute))
except requests.exceptions.Timeout as tbd_directions:
logging.error( "%s,%s is the timeout-exception occured at %s for the route %s"%(tbd_directions,type(tbd_directions),brooklyndenville_time,bdroute))
except Exception as ebd_directions:
logging.error( ",%s,%s is the exception occured at %s for the route %s"%(ebd_directions,type(ebd_directions),brooklyndenville_time,bdroute))
except demjson.JSONDecodeError as bdjsonerror_directions:
#logging.error( "the error %s %s"%(bdjsonerror_directions,type(bdjsonerror_directions))
logging.error( "the directions json error %s %s for the route %s at %s"%(bdjsonerror_directions,type(bdjsonerror_directions),bdroute,brooklyndenville_time))
if (brooklyn_denville_api.status_code == 200):
if 'info' in brooklyn_denville_directions:
del brooklyn_denville_directions['info']
else:
pass
#WEATHER API
while (bdretry2<=2):
try:
brooklynweather_api = requests.get("https://query.yahooapis.com/v1/public/yql?q=select item.condition from weather.forecast where woeid=2459115&format=json")
logging.info( "brooklyn weather api status %s and reason %s "%(brooklynweather_api.status_code,brooklynweather_api.reason))
bdretry2 = 3
brooklynweather_data = brooklynweather_api.text
brooklyn_weather = demjson.decode(brooklynweather_data)
except requests.exceptions.ConnectionError as cb_weather:
bdretry2 = bdretry2+1
logging.error( "%s,%s is the conn-exception occured at %s for the city %s"%(cb_weather,type(cb_weather),brooklyndenville_time,bdcity1))
except requests.exceptions.HTTPError as hb_weather:
logging.error( "%s,%s is the http-exception occured at %s for the city %s"%(hb_weather,type(hb_weather),brooklyndenville_time,bdcity1))
except requests.exceptions.ReadTimeout as rb_weather:
logging.error( "%s,%s is the readtimeout-exception occured at %s for the city %s"%(rb_weather,type(rb_weather),brooklyndenville_time,bdcity1))
except requests.exceptions.Timeout as tb_weather:
logging.error( "%s,%s is the timeout-exception occured at %s for the city %s"%(tb_weather,type(tb_weather),brooklyndenville_time,bdcity1))
except Exception as eb_weather:
logging.error( "%s,%s is the exception occured at %s for the city %s"%(eb_weather,type(eb_weather),brooklyndenville_time,bdcity1))
except demjson.JSONDecodeError as bjsonerror_weather:
logging.error( "the weather json error %s %s for city %s at %s"%(bjsonerror_weather,type(bjsonerror_weather),bdcity1,brooklyndenville_time))
while(bdretry3<=2):
try:
denvilleweather_api= requests.get("https://query.yahooapis.com/v1/public/yql?q=select item.condition from weather.forecast where woeid=2391338&format=json")
logging.info( "denville weather api status %s and reason %s"%(denvilleweather_api.status_code,denvilleweather_api.reason))
bdretry3 = 3
denvilleweather_data = denvilleweather_api.text
denville_weather = demjson.decode(denvilleweather_data)
except requests.exceptions.ConnectionError as cd_weather:
bdretry3= bdretry3+1
logging.error( "%s,%s is the conn-exception occured at %s for the city %s"%(cd_weather,type(cd_weather),brooklyndenville_time,bdcity2))
except requests.exceptions.HTTPError as hd_weather:
logging.error( "%s,%s is the http-exception occured at %s for the city %s"%(hd_weather,type(hd_weather),brooklyndenville_time,bdcity2))
except requests.exceptions.ReadTimeout as rd_weather:
logging.error( "%s,%s is the readtimeout-exception occured at %s for the city %s"%(rd_weather,type(rd_weather),brooklyndenville_time,bdcity2))
except requests.exceptions.Timeout as td_weather:
logging.error( "%s,%s is the timeout-exception occured at %s for the city %s"%(td_weather,type(td_weather),brooklyndenville_time,bdcity2))
except Exception as ed_weather:
logging.error( "%s,%s is the exception occured at %s for the city %s"%(ed_weather,type(ed_weather),brooklyndenville_time,bdcity2))
except demjson.JSONDecodeError as ejsonerror_weather:
logging.error( "the weather json error %s %s for city %s at %s"%(ejsonerror_weather,type(ejsonerror_weather),bdcity2,brooklyndenville_time))
#TRAFFIC API
while(bdretry4<=2):
try:
brooklyndenville_incidents_api=requests.get('http://www.mapquestapi.com/traffic/v2/incidents?key='+mq_traffic_key+'&boundingBox=40.6529731,-73.9461878,40.8842586,-74.5561109&filters=construction,incidents,congestion,events&inFormat=kvp&outFormat=json')
logging.info( "bd incidents api status %s and reason %s"%(brooklyndenville_incidents_api.status_code,brooklyndenville_incidents_api.reason))
bdretry4=3
brooklyndenville_data = brooklyndenville_incidents_api.text
brooklyndenville_incidents = demjson.decode(brooklyndenville_data)
brooklyndenville_length = len(brooklyndenville_incidents['incidents'])
except requests.exceptions.ConnectionError as cbd_incidents:
bdretry4=bdretry4+1
logging.error( "%s,%s is the conn-exception occured at %s for the route %s"%(cbd_incidents,type(cbd_incidents),brooklyndenville_time,bdroute))
except requests.exceptions.HTTPError as hbd_incidents:
logging.error( "%s,%s is the http-exception occured at %s for the route %s"%(hbd_incidents,type(hbd_incidents),brooklyndenville_time,bdroute))
except requests.exceptions.ReadTimeout as rbd_incidents:
logging.error( "%s,%s is the readtimeout-exception occured at %s for the route %s"%(rbd_incidents,type(rbd_incidents),brooklyndenville_time,bdroute))
except requests.exceptions.Timeout as tbd_incidents:
logging.error( "%s,%s is the timeout-exception occured at %s for the route %s"%(tbd_incidents,type(tbd_incidents),brooklyndenville_time,bdroute))
except Exception as ebd_incidents:
logging.error( "%s,%s is the exception occured at %s for the route %s"%(ebd_incidents,type(ebd_incidents),brooklyndenville_time,bdroute))
except demjson.JSONDecodeError as bdjsonerror_incidents:
logging.error( "the incidents json error %s %s for the route %s at %s"%(bdjsonerror_incidents,type(bdjsonerror_incidents),bdroute,brooklyndenville_time))
if (brooklyndenville_incidents_api.status_code==200):
if 'info' in brooklyndenville_incidents:
del brooklyndenville_incidents['info']
for i in range(0,brooklyndenville_length):
brooklyndenville_delayFromFreeFlow.append(float(brooklyndenville_incidents["incidents"][i]["delayFromFreeFlow"]))
brooklyndenville_delayFromTypical.append(float(brooklyndenville_incidents["incidents"][i]["delayFromTypical"]))
for i in range(0,brooklyndenville_length):
brooklyndenville_incidents["incidents"][i]["delayFromTypical"] = brooklyndenville_delayFromFreeFlow[i]
brooklyndenville_incidents["incidents"][i]["delayFromFreeFlow"] = brooklyndenville_delayFromTypical[i]
else:
pass
except Exception as bdmainException:
logging.error( "%s,%s is the exception occured for bdmainException"%(bdmainException,type(bdmainException)))
brooklyn_denville_directions = "NONE"
brooklyn_weather = "NONE"
denville_weather = "NONE"
brooklyndenville_incidents = "NONE"
bd_flag = False
try:
if (brooklyn_denville_directions == 'NONE' or brooklyn_denville_directions['route']['realTime'] > 10000000 or brooklyn_denville_directions['route']['realTime'] == 'None'or brooklyn_denville_directions['route']['distance'] == 'None' or brooklyn_denville_directions['route']['time'] == 'None' or brooklyn_weather == 'NONE' or brooklyn_weather.has_key('error') or brooklyn_weather['query']['results'] == None or brooklyn_weather['query'] == None or brooklyn_weather['query']['results']['channel']['item']['condition']['code'] == 3200 or denville_weather == 'NONE' or denville_weather.has_key('error') or denville_weather['query']['results'] == None or denville_weather['query'] == None or denville_weather['query']['results']['channel']['item']['condition']['code'] == 3200):
bd_flag = False
else:
bd_flag = True
except Exception as flagcheckexception:
logging.error( "%s,%s is the exception occured for flagcheckexception"%(flagcheckexception,type(flagcheckexception)))
brooklyndenville_doc = {
"route":"BROOKLYN-DENVILLE",
"recorddate":brooklyndenville_loctime,
"recorddayname" :brooklyndenville_dayname,
"directions":brooklyn_denville_directions,
"weather":[brooklyn_weather,denville_weather],
"traffic":brooklyndenville_incidents,
"Flag":bd_flag
}
brooklyndenville_docid = ttobgcoll.insert_one(brooklyndenville_doc)
logging.info( "Received brooklyndenville data by%s"%(str(datetime.datetime.now(pytz.timezone('US/Eastern')))))
try:
bdcnt = newttobackground.ttobgcoll.find({"route":"BROOKLYN-DENVILLE"}).count()
if (bdcnt > count):
for brooklyndenville_doc in newttobackground.ttobgcoll.find({"route":"BROOKLYN-DENVILLE"}).sort('recorddate',pymongo.ASCENDING).limit(Limit):
bdlist_id.append(brooklyndenville_doc['_id'])
bdtime.append(brooklyndenville_doc['recorddate'])
logging.info("BROOKLYN-DENVILLE")
logging.info(bdlist_id)
logging.info(bdtime)
for bdid in bdlist_id:
# logging.info(bdid)
newttobackground.ttobgcoll.remove({"_id":bdid})
del bdlist_id
del bdtime
except Exception as bdrollover_error:
logging.error( "%s,%s is the rollover exception occured at %s for the route %s"%(bdrollover_error,type(bdrollover_error),brooklyndenville_time,bdroute))
del brooklyndenville_delayFromFreeFlow
del brooklyndenville_delayFromTypical
Limit = 1
bd_cursor = newttobackground.ttobgcoll.find({"route":"BROOKLYN-DENVILLE","recorddate":brooklyndenville_loctime})
try:
for bdval in bd_cursor:
if (bdval['Flag'] == True):
b_t = float(bdval['weather'][0]['query']['results']['channel']['item']['condition']['temp'])
b_w = int(bdval['weather'][0]['query']['results']['channel']['item']['condition']['code'])
d_t = float(bdval['weather'][1]['query']['results']['channel']['item']['condition']['temp'])
d_w = int(bdval['weather'][1]['query']['results']['channel']['item']['condition']['code'])
v1 = int(bdval['recorddate'].strftime("%H"))
v2 = int(bdval['recorddate'].strftime("%M"))
v1 = (v1)*6
v2 = (v2+10)/10
zone = v1+v2
Day_List = ['','Sunday','Monday','Tuesday','Wednesday','Thursday','Friday','Saturday']
for day in Day_List:
if bdval['recorddayname'] == day:
codedday = Day_List.index(day)
brooklyndenville_doc = {
"route":"BROOKLYN-DENVILLE",
"Date":bdval['recorddate'],
"Day":bdval['recorddayname'],
"Temparature": [b_t,d_t],
"CodedWeather" : [b_w,d_w],
"CodedDay":codedday,
"Zone":zone,
"realTime":bdval['directions']['route']['realTime']
}
brooklyndenville_docid = newttobackground.ttoopvalcoll.insert_one(brooklyndenville_doc)
else:
pass
except Exception as e:
logging.error("ttoopvalcoll upload error in brooklyndenville %s,%s"%(e,type(e)))
'''INDUCE TIME PROGRAM'''
induceTime = []
induceWeather = []
induceTemparature = []
try:
cnt = newttobackground.ttoinducecoll.find({"route":"BROOKLYN-DENVILLE"}).count()
if cnt >0:
cursor = newttobackground.ttoinducecoll.find({"route":"BROOKLYN-DENVILLE"})
for doc in cursor:
induceTime.append(doc['induceTime'])
induceWeather.append([doc['induceWeather'][0],doc['induceWeather'][1]])
induceTemparature.append([doc['induceTemparature'][0],doc['induceTemparature'][1]])
else:
pass
except Exception as e:
print e
bd_df,bdtestprep_return = bd_testprep.bdalgo(brooklyndenville_loctime,induceTime,induceWeather,induceTemparature)
if (bdtestprep_return == True):
bdscikit_return = bd_scikit.bd_scikitalgo(bd_df)
else:
pass
idlist = []
if (newttobackground.ttoinducecoll.find({"route":"BROOKLYN-DENVILLE"}).count() > 0):
cursor = newttobackground.ttoinducecoll.find({"route":"BROOKLYN-DENVILLE"})
for doc in cursor:
idlist.append(doc['_id'])
newttobackground.ttoinducecoll.remove({'_id':{"$in":idlist}}) # Dangerous line
else:
pass
time.sleep(580-brooklyndenville_second)
'''****************************************************************************************
Function Name : function_routesanfrancisco (Algorithm operation)
Description : Function used get the data from api calls and push the data to mongodb
and intiatiates the testdata prep and scikit algorithm
****************************************************************************************'''
def function_routesanfrancisco():
'''
ROUTE --> Mount zion Radiology,1600 Divisadero St,SF,USA to
SF General Hospital and Trauma Center,1001 Potrero Ave,San Francisco,CA 94110,USA<--
'''
global count,Limit
while True:
sanfrancisco_time = datetime.datetime.now(pytz.timezone('US/Pacific'))
sanfrancisco_dayname = sanfrancisco_time.strftime("%A")
sanfrancisco_hour = int(sanfrancisco_time.strftime("%H"))
sanfrancisco_minute = int(sanfrancisco_time.strftime("%M"))
sanfrancisco_second = int(sanfrancisco_time.strftime("%S"))
sanfrancisco_year = int(sanfrancisco_time.strftime("%Y"))
sanfrancisco_month =int(sanfrancisco_time.strftime("%m"))
sanfrancisco_day = int(sanfrancisco_time.strftime("%d"))
sf_calc_minute = sanfrancisco_minute%10
#sf_calc_second = 60-sanfrancisco_second
sanfrancisco_directions = "NONE"
sanfrancisco_weather = "NONE"
sanfrancisco_incidents = "NONE"
if (sf_calc_minute == 0):
sanfrancisco_loctime = datetime.datetime(sanfrancisco_year,sanfrancisco_month,sanfrancisco_day,sanfrancisco_hour,sanfrancisco_minute,sanfrancisco_second)
sfroute = "MOUNTZION RADIOLOGY CENTER-SF GENERAL HOSPITAL"
sfcity = "SANFRANCISCO"
sanfrancisco_delayFromFreeFlow = []
sanfrancisco_delayFromTypical = []
sflist_id = []
sftime = []
sfretry1 = 0
sfretry2 = 0
sfretry3 = 0
try:
while (sfretry1 <=2):
try:
sanfrancisco_api = requests.get('http://www.mapquestapi.com/directions/v2/route?key='+mq_directions_key+'&from=37.786452,-122.440168&to=37.749202,-122.41575&doReverseGeocode=false')
logging.info( "sf directions api status %s and reason %s "%(sanfrancisco_api.status_code,sanfrancisco_api.reason))
sfretry1 = 3
sanfrancisco_data = sanfrancisco_api.text
sanfrancisco_directions = demjson.decode(sanfrancisco_data)
except requests.exceptions.ConnectionError as csf_directions:
sfretry1 = sfretry1+1
logging.error( "%s,%s is the conn-exception occured at %s for the route %s"%(csf_directions,type(csf_directions),sanfrancisco_time,sfroute))
except requests.exceptions.HTTPError as hsf_directions:
logging.error( "%s,%s is the http-exception occured at %s for the route %s"%(hsf_directions,type(hsf_directions),sanfrancisco_time,sfroute))
except requests.exceptions.ReadTimeout as rsf_directions:
logging.error( "%s,%s is the readtimeout-exception occured at %s for the route %s"%(rsf_directions,type(rsf_directions),sanfrancisco_time,sfroute))
except requests.exceptions.Timeout as tsf_directions:
logging.error( "%s,%s is the timeout-exception occured at %s for the route %s"%(tsf_directions,type(tsf_directions),sanfrancisco_time,sfroute))
except Exception as esf_directions:
logging.error( "%s,%s is the exception occured at %s for the route %s"%(esf_directions,type(esf_directions),sanfrancisco_time,sfroute))
except demjson.JSONDecodeError as sfjsonerror_directions:
logging.error( "the directions json error %s %s for the route %s at %s"%(sfjsonerror_directions,type(sfjsonerror_directions),sfroute,sanfrancisco_time))
if (sanfrancisco_api.status_code ==200):
if 'info' in sanfrancisco_directions:
del sanfrancisco_directions['info']
else:
pass
while (sfretry2<=2):
try:
sanfranciscoweather_api = requests.get("https://query.yahooapis.com/v1/public/yql?q=select item.condition from weather.forecast where woeid=2487956&format=json")
logging.info( "sf weather api status %s and reason %s"%(sanfranciscoweather_api.status_code,sanfranciscoweather_api.reason))
sfretry2 = 3
sanfranciscoweather_data = sanfranciscoweather_api.text
sanfrancisco_weather = demjson.decode(sanfranciscoweather_data)
except requests.exceptions.ConnectionError as csf_weather:
sfretry2 = sfretry2+1
logging.error( "%s,%s is the conn-exception occured at %s for the route %s"%(csf_weather,type(csf_weather),sanfrancisco_time,sfcity))
except requests.exceptions.HTTPError as hsf_weather:
logging.error( "%s,%s is the http-exception occured at %s for the route %s"%(hsf_weather,type(hsf_weather),sanfrancisco_time,sfcity))
except requests.exceptions.ReadTimeout as rsf_weather:
logging.error( "%s,%s is the readtimeout-exception occured at %s for the route %s"%(rsf_weather,type(rsf_weather),sanfrancisco_time,sfcity))
except requests.exceptions.Timeout as tsf_weather:
logging.error( "%s,%s is the timeout-exception occured at %s for the route %s"%(tsf_weather,type(tsf_weather),sanfrancisco_time,sfcity))
except Exception as esf_weather:
logging.error( "%s,%s is the exception occured at %s for the route %s"%(esf_weather,type(esf_weather),sanfrancisco_time,sfcity))
except demjson.JSONDecodeError as sfjsonerror_weather:
logging.error( "the weather json error %s %s for the city at %s"(sfjsonerror_weather,type(sfjsonerror_weather),sfcity,sanfrancisco_time))
while (sfretry3<=2):
try:
sanfrancisco_incidents_api=requests.get('http://www.mapquestapi.com/traffic/v2/incidents?key='+mq_traffic_key+'&boundingBox=37.78461,-122.4415687,37.7563513,-122.4069454&filters=construction,incidents,congestion,events&inFormat=kvp&outFormat=json')
logging.info( "sf incidents api status %s and reason %s"%(sanfrancisco_incidents_api.status_code,sanfrancisco_incidents_api.reason))
sfretry3=3
sanfrancisco_data = sanfrancisco_incidents_api.text
sanfrancisco_incidents = demjson.decode(sanfrancisco_data)
sanfrancisco_length = len(sanfrancisco_incidents['incidents'])
except requests.exceptions.ConnectionError as csf_incidents:
sfretry3 = sfretry3+1
logging.error( "%s,%s is the conn-exception occured at %s for the route %s"%(csf_incidents,type(csf_incidents),sanfrancisco_time,sfroute))
except requests.exceptions.HTTPError as hsf_incidents:
logging.error( "%s,%s is the http-exception occured at %s for the route %s"%(hsf_incidents,type(hsf_incidents),sanfrancisco_time,sfroute))
except requests.exceptions.ReadTimeout as rsf_incidents:
logging.error( "%s,%s is the readtimeout-exception occured at %s for the route %s"%(rsf_incidents,type(rsf_incidents),sanfrancisco_time,sfroute))
except requests.exceptions.Timeout as tsf_incidents:
logging.error( "%s,%s is the timeout-exception occured at %s for the route %s"%(tsf_incidents,type(tsf_incidents),sanfrancisco_time,sfroute))
except Exception as esf_incidents:
logging.error( "%s,%s is the exception occured at %s for the route %s"%(esf_incidents,type(esf_incidents),sanfrancisco_time,sfroute))
except demjson.JSONDecodeError as sfjsonerror_incidents:
logging.error( "the incidents json error %s %s for the route %s at %s"(sfjsonerror_incidents,type(sfjsonerror_incidents),sfroute,sanfrancisco_time))
if (sanfrancisco_incidents_api.status_code == 200):
if 'info' in sanfrancisco_incidents:
del sanfrancisco_incidents['info']
for i in range(0,sanfrancisco_length):
sanfrancisco_delayFromFreeFlow.append(float(sanfrancisco_incidents["incidents"][i]["delayFromFreeFlow"]))
sanfrancisco_delayFromTypical.append(float(sanfrancisco_incidents["incidents"][i]["delayFromTypical"]))
for i in range(0,sanfrancisco_length):
sanfrancisco_incidents["incidents"][i]["delayFromTypical"] = sanfrancisco_delayFromFreeFlow[i]
sanfrancisco_incidents["incidents"][i]["delayFromFreeFlow"] =sanfrancisco_delayFromTypical[i]
else:
pass
except Exception as sfmainException:
logging.error( "%s,%s is the exception occured for sfmainException"%(sfmainException,type(sfmainException)))
sanfrancisco_directions = "NONE"
sanfrancisco_weather = "NONE"
sanfrancisco_incidents = "NONE"
sf_flag = False
try:
if (sanfrancisco_directions == 'NONE' or sanfrancisco_directions['route']['realTime'] > 10000000 or sanfrancisco_directions['route']['realTime'] == 'None'or sanfrancisco_directions['route']['distance'] == 'None' or sanfrancisco_directions['route']['time'] == 'None' or sanfrancisco_weather == 'NONE' or sanfrancisco_weather.has_key('error') or sanfrancisco_weather['query']['results'] == None or sanfrancisco_weather['query'] == None or sanfrancisco_weather['query']['results']['channel']['item']['condition']['code'] == 3200):
sf_flag = False
else:
sf_flag = True
except Exception as flagcheckexception:
logging.error( "%s,%s is the exception occured for flagcheckexception"%(flagcheckexception,type(flagcheckexception)))
sanfrancisco_doc = {
"route":"MOUNTZION RADIOLOGY CENTER-SF GENERAL HOSPITAL",
"recorddate": sanfrancisco_loctime,
"recorddayname" : sanfrancisco_dayname,
"directions":sanfrancisco_directions,
"weather":[sanfrancisco_weather],
"traffic":sanfrancisco_incidents,
"Flag":sf_flag
}
sanfrancisco_docid = ttobgcoll.insert_one(sanfrancisco_doc)
logging.info( "Received the sanfrancisco data by %s"%(str(datetime.datetime.now(pytz.timezone('US/Pacific')))))
try:
sfcnt = newttobackground.ttobgcoll.find({"route":"MOUNTZION RADIOLOGY CENTER-SF GENERAL HOSPITAL"}).count()
if (sfcnt > count):
for sanfrancisco_doc in ttobgcoll.find({"route":"MOUNTZION RADIOLOGY CENTER-SF GENERAL HOSPITAL"}).sort('recorddate', pymongo.ASCENDING).limit(Limit):
sflist_id.append(sanfrancisco_doc['_id'])
sftime.append(sanfrancisco_doc['recorddate'])
logging.info("MOUNTZION RADIOLOGY CENTER-SF GENERAL HOSPITAL")
logging.info(sflist_id)
logging.info(sftime)
for sfid in sflist_id:
newttobackground.ttobgcoll.remove({"_id":sfid})
del sflist_id
del sftime
except Exception as sfrollover_error:
logging.error( "%s,%s is the rollover exception occured at %s for the route %s"%(sfrollover_error,type(sfrollover_error),sanfrancisco_time,sfroute))
del sanfrancisco_delayFromFreeFlow
del sanfrancisco_delayFromTypical
Limit = 1
sf_cursor = newttobackground.ttobgcoll.find({"route":"MOUNTZION RADIOLOGY CENTER-SF GENERAL HOSPITAL","recorddate":sanfrancisco_loctime})
try:
for sfval in sf_cursor:
if (sfval['Flag'] == True):
sf_t = float(sfval['weather'][0]['query']['results']['channel']['item']['condition']['temp'])
sf_w = int(sfval['weather'][0]['query']['results']['channel']['item']['condition']['code'])
v1 = int(sfval['recorddate'].strftime("%H"))
v2 = int(sfval['recorddate'].strftime("%M"))
v1 = (v1)*6
v2 = (v2+10)/10
zone = v1+v2
Day_List = ['','Sunday','Monday','Tuesday','Wednesday','Thursday','Friday','Saturday']
for day in Day_List:
if sfval['recorddayname'] == day:
codedday = Day_List.index(day)
sanfrancisco_doc = {
"route":"MOUNTZION RADIOLOGY CENTER-SF GENERAL HOSPITAL",
"Date":sfval['recorddate'],
"Day":sfval['recorddayname'],
"Temparature": [sf_t],
"CodedWeather" : [sf_w],
"CodedDay":codedday,
"Zone":zone,
"realTime":sfval['directions']['route']['realTime']
}
sanfrancisco_docid = newttobackground.ttoopvalcoll.insert_one(sanfrancisco_doc)
else:
pass
except Exception as e:
logging.error("ttoopvalcoll upload error in sanfrancisco %s,%s"%(e,type(e)))
'''INDUCE TIME PROGRAM'''
induceTime = []
induceWeather = []
induceTemparature = []
try:
cnt = newttobackground.ttoinducecoll.find({"route":"MOUNTZION RADIOLOGY CENTER-SF GENERAL HOSPITAL"}).count()
if cnt >0:
cursor = newttobackground.ttoinducecoll.find({"route":"MOUNTZION RADIOLOGY CENTER-SF GENERAL HOSPITAL"})
for doc in cursor:
induceTime.append(doc['induceTime'])
induceWeather.append([doc['induceWeather'][0],doc['induceWeather'][1]])
induceTemparature.append([doc['induceTemparature'][0],doc['induceTemparature'][1]])
else:
pass
except Exception as e:
print e
sf_df,sftestprep_return = sf_testprep.sfalgo(sanfrancisco_loctime,induceTime,induceWeather,induceTemparature)
if (sftestprep_return == True):
sfscikit_return= sf_scikit.sf_scikitalgo(sf_df)
else:
pass
idlist = []
if (newttobackground.ttoinducecoll.find({"route":"MOUNTZION RADIOLOGY CENTER-SF GENERAL HOSPITAL"}).count() > 0):
cursor = newttobackground.ttoinducecoll.find({"route":"MOUNTZION RADIOLOGY CENTER-SF GENERAL HOSPITAL"})
for doc in cursor:
idlist.append(doc['_id'])
newttobackground.ttoinducecoll.remove({'_id':{"$in":idlist}}) # Dangerous line
else:
pass
time.sleep(580-sanfrancisco_second)
if __name__ == '__main__':
try:
t1 = threading.Thread(target = function_routenewarkedison)
t2 = threading.Thread(target = function_routebrooklyndenville)
t3 = threading.Thread(target = function_routesanfrancisco)
t1.start()
t2.start()
t3.start()
except:
logging.error( "ERROR : Unable to create thread")
| |
import re
from sqlalchemy import sql, util
from sqlalchemy import types as sqltypes
from sqlalchemy import pool, exc
from sqlalchemy.engine import default, reflection
from sqlalchemy_monetdb.base import MonetExecutionContext, \
MonetIdentifierPreparer
from sqlalchemy_monetdb.compiler import MonetDDLCompiler, MonetTypeCompiler, \
MonetCompiler
from sqlalchemy_monetdb.monetdb_types import MONETDB_TYPE_MAP
try:
import alembic
class MonetImpl(alembic.ddl.impl.DefaultImpl):
__dialect__ = 'monetdb'
except ImportError:
pass
class MonetDialect(default.DefaultDialect):
supports_statement_cache = False
name = "monetdb"
driver = "pymonetdb"
preexecute_pk_sequences = True
supports_pk_autoincrement = True
supports_sequences = True
sequences_optional = True
supports_native_decimal = True
supports_default_values = True
supports_native_boolean = True
poolclass = pool.SingletonThreadPool
supports_unicode_statements = True
statement_compiler = MonetCompiler
ddl_compiler = MonetDDLCompiler
execution_ctx_cls = MonetExecutionContext
preparer = MonetIdentifierPreparer
type_compiler = MonetTypeCompiler
default_paramstyle = 'pyformat'
def __init__(self, **kwargs):
default.DefaultDialect.__init__(self, **kwargs)
@classmethod
def dbapi(cls):
return __import__("pymonetdb", fromlist="sql")
def create_connect_args(self, url):
opts = url.translate_connect_args()
return [], opts
def create_execution_context(self, *args, **kwargs):
return MonetExecutionContext(self, *args, **kwargs)
def get_table_names(self, connection, schema=None, **kw):
"""Return a list of table names for `schema`."""
q = """
SELECT name
FROM sys.tables
WHERE system = false
AND type = 0
AND schema_id = %(schema_id)s
"""
args = {"schema_id": self._schema_id(connection, schema)}
return [row[0] for row in connection.execute(q, args)]
@reflection.cache
def get_temp_table_names(self, connection, **kw):
# 2097 is tmp schema, 30 is table type LOCAL TEMPORARY
s = "SELECT tables.name FROM sys.tables WHERE schema_id = 2097 AND type = 30"
rs = connection.execute(s)
return [row[0] for row in rs]
def has_table(self, connection, table_name, schema=None):
# seems like case gets folded in pg_class...
if schema is None:
cursor = connection.execute(
sql.text(
"select name "
"from sys.tables "
"where system = false "
"and type = 0 "
"and name=:name",
).bindparams(
sql.bindparam('name', util.text_type(table_name),
type_=sqltypes.Unicode)
)
)
else:
cursor = connection.execute(
sql.text(
"SELECT tables.name "
"FROM sys.tables, sys.schemas "
"WHERE tables.system = FALSE "
"AND tables.schema_id = schemas.id "
"AND type = 0 "
"AND tables.name = :name "
"AND schemas.name = :schema",
bindparams=[
sql.bindparam('name',
util.text_type(table_name),
type_=sqltypes.Unicode),
sql.bindparam('schema',
util.text_type(schema),
type_=sqltypes.Unicode)]
)
)
return bool(cursor.first())
def has_sequence(self, connection, sequence_name, schema=None):
q = """
SELECT id
FROM sys.sequences
WHERE name = %(name)s
AND schema_id = %(schema_id)s
"""
args = {
"name": sequence_name,
"schema_id": self._schema_id(connection, schema)
}
cursor = connection.execute(q, args)
return bool(cursor.first())
@reflection.cache
def _schema_id(self, connection, schema_name):
"""Fetch the id for schema"""
if schema_name is None:
schema_name = connection.execute("SELECT current_schema").scalar()
query = """
SELECT id
FROM sys.schemas
WHERE name = %(schema_name)s
"""
args = {"schema_name": schema_name}
cursor = connection.execute(query, args)
schema_id = cursor.scalar()
if schema_id is None:
raise exc.InvalidRequestError(schema_name)
return schema_id
@reflection.cache
def _table_id(self, connection, table_name, schema_name=None):
"""Fetch the id for schema.table_name, defaulting to current schema if
schema is None
"""
q = """
SELECT id
FROM sys.tables
WHERE name = %(name)s
AND schema_id = %(schema_id)s
"""
args = {
"name": table_name,
"schema_id": self._schema_id(connection, schema_name)
}
c = connection.execute(q, args)
table_id = c.scalar()
if table_id is None:
raise exc.NoSuchTableError(table_name)
return table_id
def get_columns(self, connection, table_name, schema=None, **kw):
q = """
SELECT id, name, type, "default", "null", type_digits, type_scale
FROM sys.columns
WHERE columns.table_id = %(table_id)s
"""
args = {"table_id": self._table_id(connection, table_name, schema)}
c = connection.execute(q, args)
result = []
for row in c:
args = ()
kwargs = {}
name = row.name
if row.type in ("char", "varchar"):
args = (row.type_digits,)
elif row.type == "decimal":
args = (row.type_digits, row.type_scale)
elif row.type == 'timestamptz':
kwargs = {'timezone': True}
col_type = MONETDB_TYPE_MAP.get(row.type, None)
if col_type is None:
raise TypeError("Can't resolve type {0} (column '{1}')".format(col_type, name))
col_type = col_type(*args, **kwargs)
# monetdb translates an AUTO INCREMENT into a sequence
autoincrement = False
if row.default is not None:
r = r"""next value for \"(\w*)\"\.\"(\w*)"$"""
match = re.search(r, row.default)
if match is not None:
seq_schema = match.group(1)
seq = match.group(2)
autoincrement = True
column = {
"name": name,
"type": col_type,
"default": row.default,
"autoincrement": autoincrement,
"nullable": row.null,
}
result.append(column)
return result
def get_foreign_keys(self, connection, table_name, schema=None, **kw):
"""Return information about foreign_keys in `table_name`.
Given a string `table_name`, and an optional string `schema`, return
foreign key information as a list of dicts with these keys:
constrained_columns
a list of column names that make up the foreign key
referred_schema
the name of the referred schema
referred_table
the name of the referred table
referred_columns
a list of column names in the referred table that correspond to
constrained_columns
name
optional name of the foreign key constraint.
**kw
other options passed to the dialect's get_foreign_keys() method.
"""
q = """
SELECT
fkkey.name AS name,
fkschema.name AS fktable_schema,
fktable.name AS fktable_name,
fkkeycol.name AS fkcolumn_name,
fktable.id AS fktable_id,
pkschema.name AS pktable_schema,
pktable.name AS pktable_name,
pkkeycol.name AS pkcolumn_name,
pktable.id AS pktable_id,
pkkeycol.nr AS key_seq
FROM sys.keys AS fkkey
JOIN sys.tables AS fktable ON (fktable.id = fkkey.table_id)
JOIN sys.objects AS fkkeycol ON (fkkey.id = fkkeycol.id)
JOIN sys.keys AS pkkey ON (fkkey.rkey = pkkey.id)
JOIN sys.objects AS pkkeycol ON (pkkey.id = pkkeycol.id)
JOIN sys.tables AS pktable ON (pktable.id = pkkey.table_id)
JOIN sys.schemas AS fkschema ON (fkschema.id = fktable.schema_id)
JOIN sys.schemas AS pkschema ON (pkschema.id = pktable.schema_id)
WHERE fkkey.rkey > -1
AND fkkeycol.nr = pkkeycol.nr
AND fktable.id = %(table_id)s
ORDER BY name, key_seq
"""
args = {"table_id": self._table_id(connection, table_name, schema)}
c = connection.execute(q, args)
results = []
key_data = {}
constrained_columns = []
referred_columns = []
last_name = None
for row in c:
if last_name is not None and last_name != row.name:
key_data["constrained_columns"] = constrained_columns
key_data["referred_columns"] = referred_columns
results.append(key_data)
constrained_columns = []
referred_columns = []
if last_name is None or last_name != row.name:
key_data = {
"name": row.name,
"referred_schema": row.pktable_schema,
"referred_table": row.pktable_name,
}
last_name = row.name
constrained_columns.append(row.fkcolumn_name)
referred_columns.append(row.pkcolumn_name)
if key_data:
key_data["constrained_columns"] = constrained_columns
key_data["referred_columns"] = referred_columns
results.append(key_data)
return results
def get_indexes(self, connection, table_name, schema=None, **kw):
q = """
SELECT idxs.name, objects.name AS "column_name"
FROM sys.idxs
JOIN sys.objects USING (id)
WHERE table_id = %(table_id)s
ORDER BY idxs.name, objects.nr
"""
c = connection.execute(q, {
"table_id": self._table_id(connection, table_name, schema)})
results = []
last_name = None
column_names = []
index_data = {}
for row in c:
if last_name is not None and last_name != row.name:
index_data["column_names"] = column_names
results.append(index_data)
column_names = []
if last_name is None or last_name != row.name:
index_data = {
"name": row.name,
"unique": False,
}
last_name = row.name
column_names.append(row.column_name)
if index_data:
index_data["column_names"] = column_names
results.append(index_data)
return results
def do_commit(self, connection):
connection.commit()
def do_rollback(self, connection):
connection.rollback()
@reflection.cache
def get_schema_names(self, connection, **kw):
s = """
SELECT name FROM sys.schemas ORDER BY name
"""
c = connection.execute(s)
schema_names = [row[0] for row in c]
return schema_names
def get_view_definition(self, connection, view_name, schema=None, **kw):
"""Return view definition.
Given a :class:`.Connection`, a string
`view_name`, and an optional string `schema`, return the view
definition.
"""
q = """
SELECT query FROM sys.tables
WHERE type = 1
AND name = %(name)s
AND schema_id = %(schema_id)s
"""
args = {
"name": view_name,
"schema_id": self._schema_id(connection, schema)
}
return connection.execute(q, args)
def get_view_names(self, connection, schema=None, **kw):
"""Return a list of all view names available in the database.
schema:
Optional, retrieve names from a non-default schema.
"""
q = """
SELECT name
FROM sys.tables
WHERE type = 1
AND schema_id = %(schema_id)s
"""
args = {"schema_id": self._schema_id(connection, schema)}
return [row[0] for row in connection.execute(q, args)]
def _get_default_schema_name(self, connection):
"""Return the string name of the currently selected schema from
the given connection.
This is used by the default implementation to populate the
"default_schema_name" attribute and is called exactly
once upon first connect.
"""
return connection.execute("SELECT CURRENT_SCHEMA").scalar()
def get_pk_constraint(self, connection, table_name, schema=None, **kw):
"""Return information about primary key constraint on `table_name`.
Given a string `table_name`, and an optional string `schema`, return
primary key information as a dictionary with these keys:
constrained_columns
a list of column names that make up the primary key
name
optional name of the primary key constraint.
"""
q = """
SELECT "objects"."name" AS col, keys.name AS name
FROM "sys"."keys" AS "keys",
"sys"."objects" AS "objects",
"sys"."tables" AS "tables",
"sys"."schemas" AS "schemas"
WHERE "keys"."id" = "objects"."id"
AND "keys"."table_id" = "tables"."id"
AND "tables"."schema_id" = "schemas"."id"
AND "keys"."type" = 0
AND "tables"."id" = %(table_id)s
"""
args = {"table_id": self._table_id(connection, table_name, schema)}
c = connection.execute(q, args)
table = c.fetchall()
if table:
cols = [c[0] for c in table]
name = table[0][1]
return {'constrained_columns': cols, 'name': name}
else:
return {}
def get_unique_constraints(self, connection, table_name, schema=None, **kw):
"""Return information about unique constraints in `table_name`.
Given a string `table_name` and an optional string `schema`, return
unique constraint information as a list of dicts with these keys:
name
the unique constraint's name
column_names
list of column names in order
**kw
other options passed to the dialect's get_unique_constraints() method.
.. versionadded:: 0.9.0
"""
q = """
SELECT "objects"."name" AS col, keys.name AS name
FROM "sys"."keys" AS "keys",
"sys"."objects" AS "objects",
"sys"."tables" AS "tables",
"sys"."schemas" AS "schemas"
WHERE "keys"."id" = "objects"."id"
AND "keys"."table_id" = "tables"."id"
AND "tables"."schema_id" = "schemas"."id"
AND "keys"."type" = 1
AND "tables"."id" = %(table_id)s
"""
args = {"table_id": self._table_id(connection, table_name, schema)}
c = connection.execute(q, args)
table = c.fetchall()
from collections import defaultdict
col_dict = defaultdict(list)
for col, name in table:
col_dict[name].append(col)
return [{'name': n, 'column_names': c} for n, c in col_dict.items()]
| |
"""Test how the DmsDeviceSource handles available and unavailable devices."""
from __future__ import annotations
import asyncio
from collections.abc import AsyncIterable
import logging
from unittest.mock import ANY, DEFAULT, Mock, patch
from async_upnp_client.exceptions import UpnpConnectionError, UpnpError
from didl_lite import didl_lite
import pytest
from homeassistant.components import ssdp
from homeassistant.components.dlna_dms.const import DOMAIN
from homeassistant.components.dlna_dms.dms import DmsDeviceSource, get_domain_data
from homeassistant.components.media_player.errors import BrowseError
from homeassistant.components.media_source.error import Unresolvable
from homeassistant.core import HomeAssistant
from homeassistant.setup import async_setup_component
from .conftest import (
MOCK_DEVICE_LOCATION,
MOCK_DEVICE_NAME,
MOCK_DEVICE_TYPE,
MOCK_DEVICE_UDN,
MOCK_DEVICE_USN,
NEW_DEVICE_LOCATION,
)
from tests.common import MockConfigEntry
# Auto-use the domain_data_mock for every test in this module
pytestmark = [
pytest.mark.usefixtures("domain_data_mock"),
]
async def setup_mock_component(
hass: HomeAssistant, mock_entry: MockConfigEntry
) -> DmsDeviceSource:
"""Set up a mock DlnaDmrEntity with the given configuration."""
mock_entry.add_to_hass(hass)
assert await async_setup_component(hass, DOMAIN, {}) is True
await hass.async_block_till_done()
domain_data = get_domain_data(hass)
return next(iter(domain_data.devices.values()))
@pytest.fixture
async def connected_source_mock(
hass: HomeAssistant,
config_entry_mock: MockConfigEntry,
ssdp_scanner_mock: Mock,
dms_device_mock: Mock,
) -> AsyncIterable[DmsDeviceSource]:
"""Fixture to set up a mock DmsDeviceSource in a connected state.
Yields the entity. Cleans up the entity after the test is complete.
"""
entity = await setup_mock_component(hass, config_entry_mock)
# Check the entity has registered all needed listeners
assert len(config_entry_mock.update_listeners) == 1
assert ssdp_scanner_mock.async_register_callback.await_count == 2
assert ssdp_scanner_mock.async_register_callback.return_value.call_count == 0
# Run the test
yield entity
# Unload config entry to clean up
assert await hass.config_entries.async_remove(config_entry_mock.entry_id) == {
"require_restart": False
}
# Check entity has cleaned up its resources
assert not config_entry_mock.update_listeners
assert (
ssdp_scanner_mock.async_register_callback.await_count
== ssdp_scanner_mock.async_register_callback.return_value.call_count
)
@pytest.fixture
async def disconnected_source_mock(
hass: HomeAssistant,
upnp_factory_mock: Mock,
config_entry_mock: MockConfigEntry,
ssdp_scanner_mock: Mock,
dms_device_mock: Mock,
) -> AsyncIterable[DmsDeviceSource]:
"""Fixture to set up a mock DmsDeviceSource in a disconnected state.
Yields the entity. Cleans up the entity after the test is complete.
"""
# Cause the connection attempt to fail
upnp_factory_mock.async_create_device.side_effect = UpnpConnectionError
entity = await setup_mock_component(hass, config_entry_mock)
# Check the entity has registered all needed listeners
assert len(config_entry_mock.update_listeners) == 1
assert ssdp_scanner_mock.async_register_callback.await_count == 2
assert ssdp_scanner_mock.async_register_callback.return_value.call_count == 0
# Run the test
yield entity
# Unload config entry to clean up
assert await hass.config_entries.async_remove(config_entry_mock.entry_id) == {
"require_restart": False
}
# Check entity has cleaned up its resources
assert not config_entry_mock.update_listeners
assert (
ssdp_scanner_mock.async_register_callback.await_count
== ssdp_scanner_mock.async_register_callback.return_value.call_count
)
async def test_unavailable_device(
hass: HomeAssistant,
upnp_factory_mock: Mock,
ssdp_scanner_mock: Mock,
config_entry_mock: MockConfigEntry,
) -> None:
"""Test a DlnaDmsEntity with out a connected DmsDevice."""
# Cause connection attempts to fail
upnp_factory_mock.async_create_device.side_effect = UpnpConnectionError
with patch(
"homeassistant.components.dlna_dms.dms.DmsDevice", autospec=True
) as dms_device_constructor_mock:
connected_source_mock = await setup_mock_component(hass, config_entry_mock)
# Check device is not created
dms_device_constructor_mock.assert_not_called()
# Check attempt was made to create a device from the supplied URL
upnp_factory_mock.async_create_device.assert_awaited_once_with(MOCK_DEVICE_LOCATION)
# Check SSDP notifications are registered
ssdp_scanner_mock.async_register_callback.assert_any_call(
ANY, {"USN": MOCK_DEVICE_USN}
)
ssdp_scanner_mock.async_register_callback.assert_any_call(
ANY, {"_udn": MOCK_DEVICE_UDN, "NTS": "ssdp:byebye"}
)
# Quick check of the state to verify the entity has no connected DmsDevice
assert not connected_source_mock.available
# Check the name matches that supplied
assert connected_source_mock.name == MOCK_DEVICE_NAME
# Check attempts to browse and resolve media give errors
with pytest.raises(BrowseError):
await connected_source_mock.async_browse_media("/browse_path")
with pytest.raises(BrowseError):
await connected_source_mock.async_browse_media(":browse_object")
with pytest.raises(BrowseError):
await connected_source_mock.async_browse_media("?browse_search")
with pytest.raises(Unresolvable):
await connected_source_mock.async_resolve_media("/resolve_path")
with pytest.raises(Unresolvable):
await connected_source_mock.async_resolve_media(":resolve_object")
with pytest.raises(Unresolvable):
await connected_source_mock.async_resolve_media("?resolve_search")
# Unload config entry to clean up
assert await hass.config_entries.async_remove(config_entry_mock.entry_id) == {
"require_restart": False
}
# Confirm SSDP notifications unregistered
assert ssdp_scanner_mock.async_register_callback.return_value.call_count == 2
async def test_become_available(
hass: HomeAssistant,
upnp_factory_mock: Mock,
ssdp_scanner_mock: Mock,
config_entry_mock: MockConfigEntry,
dms_device_mock: Mock,
) -> None:
"""Test a device becoming available after the entity is constructed."""
# Cause connection attempts to fail before adding the entity
upnp_factory_mock.async_create_device.side_effect = UpnpConnectionError
connected_source_mock = await setup_mock_component(hass, config_entry_mock)
assert not connected_source_mock.available
# Mock device is now available.
upnp_factory_mock.async_create_device.side_effect = None
upnp_factory_mock.async_create_device.reset_mock()
# Send an SSDP notification from the now alive device
ssdp_callback = ssdp_scanner_mock.async_register_callback.call_args.args[0]
await ssdp_callback(
ssdp.SsdpServiceInfo(
ssdp_usn=MOCK_DEVICE_USN,
ssdp_location=NEW_DEVICE_LOCATION,
ssdp_st=MOCK_DEVICE_TYPE,
upnp={},
),
ssdp.SsdpChange.ALIVE,
)
await hass.async_block_till_done()
# Check device was created from the supplied URL
upnp_factory_mock.async_create_device.assert_awaited_once_with(NEW_DEVICE_LOCATION)
# Quick check of the state to verify the entity has a connected DmsDevice
assert connected_source_mock.available
# Unload config entry to clean up
assert await hass.config_entries.async_remove(config_entry_mock.entry_id) == {
"require_restart": False
}
# Confirm SSDP notifications unregistered
assert ssdp_scanner_mock.async_register_callback.return_value.call_count == 2
async def test_alive_but_gone(
hass: HomeAssistant,
upnp_factory_mock: Mock,
ssdp_scanner_mock: Mock,
disconnected_source_mock: DmsDeviceSource,
) -> None:
"""Test a device sending an SSDP alive announcement, but not being connectable."""
upnp_factory_mock.async_create_device.side_effect = UpnpError
# Send an SSDP notification from the still missing device
ssdp_callback = ssdp_scanner_mock.async_register_callback.call_args.args[0]
await ssdp_callback(
ssdp.SsdpServiceInfo(
ssdp_usn=MOCK_DEVICE_USN,
ssdp_location=NEW_DEVICE_LOCATION,
ssdp_st=MOCK_DEVICE_TYPE,
ssdp_headers={ssdp.ATTR_SSDP_BOOTID: "1"},
upnp={},
),
ssdp.SsdpChange.ALIVE,
)
await hass.async_block_till_done()
# There should be a connection attempt to the device
upnp_factory_mock.async_create_device.assert_awaited()
# Device should still be unavailable
assert not disconnected_source_mock.available
# Send the same SSDP notification, expecting no extra connection attempts
upnp_factory_mock.async_create_device.reset_mock()
await ssdp_callback(
ssdp.SsdpServiceInfo(
ssdp_usn=MOCK_DEVICE_USN,
ssdp_location=NEW_DEVICE_LOCATION,
ssdp_st=MOCK_DEVICE_TYPE,
ssdp_headers={ssdp.ATTR_SSDP_BOOTID: "1"},
upnp={},
),
ssdp.SsdpChange.ALIVE,
)
await hass.async_block_till_done()
upnp_factory_mock.async_create_device.assert_not_called()
upnp_factory_mock.async_create_device.assert_not_awaited()
assert not disconnected_source_mock.available
# Send an SSDP notification with a new BOOTID, indicating the device has rebooted
upnp_factory_mock.async_create_device.reset_mock()
await ssdp_callback(
ssdp.SsdpServiceInfo(
ssdp_usn=MOCK_DEVICE_USN,
ssdp_location=NEW_DEVICE_LOCATION,
ssdp_st=MOCK_DEVICE_TYPE,
ssdp_headers={ssdp.ATTR_SSDP_BOOTID: "2"},
upnp={},
),
ssdp.SsdpChange.ALIVE,
)
await hass.async_block_till_done()
# Rebooted device (seen via BOOTID) should mean a new connection attempt
upnp_factory_mock.async_create_device.assert_awaited()
assert not disconnected_source_mock.available
# Send byebye message to indicate device is going away. Next alive message
# should result in a reconnect attempt even with same BOOTID.
upnp_factory_mock.async_create_device.reset_mock()
await ssdp_callback(
ssdp.SsdpServiceInfo(
ssdp_usn=MOCK_DEVICE_USN,
ssdp_st=MOCK_DEVICE_TYPE,
upnp={},
),
ssdp.SsdpChange.BYEBYE,
)
await ssdp_callback(
ssdp.SsdpServiceInfo(
ssdp_usn=MOCK_DEVICE_USN,
ssdp_location=NEW_DEVICE_LOCATION,
ssdp_st=MOCK_DEVICE_TYPE,
ssdp_headers={ssdp.ATTR_SSDP_BOOTID: "2"},
upnp={},
),
ssdp.SsdpChange.ALIVE,
)
await hass.async_block_till_done()
# Rebooted device (seen via byebye/alive) should mean a new connection attempt
upnp_factory_mock.async_create_device.assert_awaited()
assert not disconnected_source_mock.available
async def test_multiple_ssdp_alive(
hass: HomeAssistant,
upnp_factory_mock: Mock,
ssdp_scanner_mock: Mock,
disconnected_source_mock: DmsDeviceSource,
) -> None:
"""Test multiple SSDP alive notifications is ok, only connects to device once."""
upnp_factory_mock.async_create_device.reset_mock()
# Contacting the device takes long enough that 2 simultaneous attempts could be made
async def create_device_delayed(_location):
"""Delay before continuing with async_create_device.
This gives a chance for parallel calls to `device_connect` to occur.
"""
await asyncio.sleep(0.1)
return DEFAULT
upnp_factory_mock.async_create_device.side_effect = create_device_delayed
# Send two SSDP notifications with the new device URL
ssdp_callback = ssdp_scanner_mock.async_register_callback.call_args.args[0]
await ssdp_callback(
ssdp.SsdpServiceInfo(
ssdp_usn=MOCK_DEVICE_USN,
ssdp_location=NEW_DEVICE_LOCATION,
ssdp_st=MOCK_DEVICE_TYPE,
upnp={},
),
ssdp.SsdpChange.ALIVE,
)
await ssdp_callback(
ssdp.SsdpServiceInfo(
ssdp_usn=MOCK_DEVICE_USN,
ssdp_location=NEW_DEVICE_LOCATION,
ssdp_st=MOCK_DEVICE_TYPE,
upnp={},
),
ssdp.SsdpChange.ALIVE,
)
await hass.async_block_till_done()
# Check device is contacted exactly once
upnp_factory_mock.async_create_device.assert_awaited_once_with(NEW_DEVICE_LOCATION)
# Device should be available
assert disconnected_source_mock.available
async def test_ssdp_byebye(
hass: HomeAssistant,
ssdp_scanner_mock: Mock,
connected_source_mock: DmsDeviceSource,
) -> None:
"""Test device is disconnected when byebye is received."""
# First byebye will cause a disconnect
ssdp_callback = ssdp_scanner_mock.async_register_callback.call_args.args[0]
await ssdp_callback(
ssdp.SsdpServiceInfo(
ssdp_usn=MOCK_DEVICE_USN,
ssdp_udn=MOCK_DEVICE_UDN,
ssdp_headers={"NTS": "ssdp:byebye"},
ssdp_st=MOCK_DEVICE_TYPE,
upnp={},
),
ssdp.SsdpChange.BYEBYE,
)
# Device should be gone
assert not connected_source_mock.available
# Second byebye will do nothing
await ssdp_callback(
ssdp.SsdpServiceInfo(
ssdp_usn=MOCK_DEVICE_USN,
ssdp_udn=MOCK_DEVICE_UDN,
ssdp_headers={"NTS": "ssdp:byebye"},
ssdp_st=MOCK_DEVICE_TYPE,
upnp={},
),
ssdp.SsdpChange.BYEBYE,
)
async def test_ssdp_update_seen_bootid(
hass: HomeAssistant,
ssdp_scanner_mock: Mock,
upnp_factory_mock: Mock,
disconnected_source_mock: DmsDeviceSource,
) -> None:
"""Test device does not reconnect when it gets ssdp:update with next bootid."""
# Start with a disconnected device
entity = disconnected_source_mock
assert not entity.available
# "Reconnect" the device
upnp_factory_mock.async_create_device.reset_mock()
upnp_factory_mock.async_create_device.side_effect = None
# Send SSDP alive with boot ID
ssdp_callback = ssdp_scanner_mock.async_register_callback.call_args.args[0]
await ssdp_callback(
ssdp.SsdpServiceInfo(
ssdp_usn=MOCK_DEVICE_USN,
ssdp_location=MOCK_DEVICE_LOCATION,
ssdp_headers={ssdp.ATTR_SSDP_BOOTID: "1"},
ssdp_st=MOCK_DEVICE_TYPE,
upnp={},
),
ssdp.SsdpChange.ALIVE,
)
await hass.async_block_till_done()
# Device should be connected
assert entity.available
assert upnp_factory_mock.async_create_device.await_count == 1
# Send SSDP update with next boot ID
await ssdp_callback(
ssdp.SsdpServiceInfo(
ssdp_usn=MOCK_DEVICE_USN,
ssdp_udn=MOCK_DEVICE_UDN,
ssdp_headers={
"NTS": "ssdp:update",
ssdp.ATTR_SSDP_BOOTID: "1",
ssdp.ATTR_SSDP_NEXTBOOTID: "2",
},
ssdp_st=MOCK_DEVICE_TYPE,
upnp={},
),
ssdp.SsdpChange.UPDATE,
)
await hass.async_block_till_done()
# Device was not reconnected, even with a new boot ID
assert entity.available
assert upnp_factory_mock.async_create_device.await_count == 1
# Send SSDP update with same next boot ID, again
await ssdp_callback(
ssdp.SsdpServiceInfo(
ssdp_usn=MOCK_DEVICE_USN,
ssdp_udn=MOCK_DEVICE_UDN,
ssdp_headers={
"NTS": "ssdp:update",
ssdp.ATTR_SSDP_BOOTID: "1",
ssdp.ATTR_SSDP_NEXTBOOTID: "2",
},
ssdp_st=MOCK_DEVICE_TYPE,
upnp={},
),
ssdp.SsdpChange.UPDATE,
)
await hass.async_block_till_done()
# Nothing should change
assert entity.available
assert upnp_factory_mock.async_create_device.await_count == 1
# Send SSDP update with bad next boot ID
await ssdp_callback(
ssdp.SsdpServiceInfo(
ssdp_usn=MOCK_DEVICE_USN,
ssdp_udn=MOCK_DEVICE_UDN,
ssdp_headers={
"NTS": "ssdp:update",
ssdp.ATTR_SSDP_BOOTID: "2",
ssdp.ATTR_SSDP_NEXTBOOTID: "7c848375-a106-4bd1-ac3c-8e50427c8e4f",
},
ssdp_st=MOCK_DEVICE_TYPE,
upnp={},
),
ssdp.SsdpChange.UPDATE,
)
await hass.async_block_till_done()
# Nothing should change
assert entity.available
assert upnp_factory_mock.async_create_device.await_count == 1
# Send a new SSDP alive with the new boot ID, device should not reconnect
await ssdp_callback(
ssdp.SsdpServiceInfo(
ssdp_usn=MOCK_DEVICE_USN,
ssdp_location=MOCK_DEVICE_LOCATION,
ssdp_headers={ssdp.ATTR_SSDP_BOOTID: "2"},
ssdp_st=MOCK_DEVICE_TYPE,
upnp={},
),
ssdp.SsdpChange.ALIVE,
)
await hass.async_block_till_done()
assert entity.available
assert upnp_factory_mock.async_create_device.await_count == 1
async def test_ssdp_update_missed_bootid(
hass: HomeAssistant,
ssdp_scanner_mock: Mock,
upnp_factory_mock: Mock,
disconnected_source_mock: DmsDeviceSource,
) -> None:
"""Test device disconnects when it gets ssdp:update bootid it wasn't expecting."""
# Start with a disconnected device
entity = disconnected_source_mock
assert not entity.available
# "Reconnect" the device
upnp_factory_mock.async_create_device.reset_mock()
upnp_factory_mock.async_create_device.side_effect = None
# Send SSDP alive with boot ID
ssdp_callback = ssdp_scanner_mock.async_register_callback.call_args.args[0]
await ssdp_callback(
ssdp.SsdpServiceInfo(
ssdp_usn=MOCK_DEVICE_USN,
ssdp_location=MOCK_DEVICE_LOCATION,
ssdp_headers={ssdp.ATTR_SSDP_BOOTID: "1"},
ssdp_st=MOCK_DEVICE_TYPE,
upnp={},
),
ssdp.SsdpChange.ALIVE,
)
await hass.async_block_till_done()
# Device should be connected
assert entity.available
assert upnp_factory_mock.async_create_device.await_count == 1
# Send SSDP update with skipped boot ID (not previously seen)
await ssdp_callback(
ssdp.SsdpServiceInfo(
ssdp_usn=MOCK_DEVICE_USN,
ssdp_udn=MOCK_DEVICE_UDN,
ssdp_headers={
"NTS": "ssdp:update",
ssdp.ATTR_SSDP_BOOTID: "2",
ssdp.ATTR_SSDP_NEXTBOOTID: "3",
},
ssdp_st=MOCK_DEVICE_TYPE,
upnp={},
),
ssdp.SsdpChange.UPDATE,
)
await hass.async_block_till_done()
# Device should not *re*-connect yet
assert entity.available
assert upnp_factory_mock.async_create_device.await_count == 1
# Send a new SSDP alive with the new boot ID, device should reconnect
await ssdp_callback(
ssdp.SsdpServiceInfo(
ssdp_usn=MOCK_DEVICE_USN,
ssdp_location=MOCK_DEVICE_LOCATION,
ssdp_headers={ssdp.ATTR_SSDP_BOOTID: "3"},
ssdp_st=MOCK_DEVICE_TYPE,
upnp={},
),
ssdp.SsdpChange.ALIVE,
)
await hass.async_block_till_done()
assert entity.available
assert upnp_factory_mock.async_create_device.await_count == 2
async def test_ssdp_bootid(
hass: HomeAssistant,
upnp_factory_mock: Mock,
ssdp_scanner_mock: Mock,
disconnected_source_mock: DmsDeviceSource,
) -> None:
"""Test an alive with a new BOOTID.UPNP.ORG header causes a reconnect."""
# Start with a disconnected device
entity = disconnected_source_mock
assert not entity.available
# "Reconnect" the device
upnp_factory_mock.async_create_device.side_effect = None
upnp_factory_mock.async_create_device.reset_mock()
# Send SSDP alive with boot ID
ssdp_callback = ssdp_scanner_mock.async_register_callback.call_args.args[0]
await ssdp_callback(
ssdp.SsdpServiceInfo(
ssdp_usn=MOCK_DEVICE_USN,
ssdp_location=MOCK_DEVICE_LOCATION,
ssdp_headers={ssdp.ATTR_SSDP_BOOTID: "1"},
ssdp_st=MOCK_DEVICE_TYPE,
upnp={},
),
ssdp.SsdpChange.ALIVE,
)
await hass.async_block_till_done()
assert entity.available
assert upnp_factory_mock.async_create_device.await_count == 1
# Send SSDP alive with same boot ID, nothing should happen
await ssdp_callback(
ssdp.SsdpServiceInfo(
ssdp_usn=MOCK_DEVICE_USN,
ssdp_location=MOCK_DEVICE_LOCATION,
ssdp_headers={ssdp.ATTR_SSDP_BOOTID: "1"},
ssdp_st=MOCK_DEVICE_TYPE,
upnp={},
),
ssdp.SsdpChange.ALIVE,
)
await hass.async_block_till_done()
assert entity.available
assert upnp_factory_mock.async_create_device.await_count == 1
# Send a new SSDP alive with an incremented boot ID, device should be dis/reconnected
await ssdp_callback(
ssdp.SsdpServiceInfo(
ssdp_usn=MOCK_DEVICE_USN,
ssdp_location=MOCK_DEVICE_LOCATION,
ssdp_headers={ssdp.ATTR_SSDP_BOOTID: "2"},
ssdp_st=MOCK_DEVICE_TYPE,
upnp={},
),
ssdp.SsdpChange.ALIVE,
)
await hass.async_block_till_done()
assert entity.available
assert upnp_factory_mock.async_create_device.await_count == 2
async def test_repeated_connect(
caplog: pytest.LogCaptureFixture,
connected_source_mock: DmsDeviceSource,
upnp_factory_mock: Mock,
) -> None:
"""Test trying to connect an already connected device is safely ignored."""
upnp_factory_mock.async_create_device.reset_mock()
# Calling internal function directly to skip trying to time 2 SSDP messages carefully
with caplog.at_level(logging.DEBUG):
await connected_source_mock.device_connect()
assert (
"Trying to connect when device already connected" == caplog.records[-1].message
)
assert not upnp_factory_mock.async_create_device.await_count
async def test_connect_no_location(
caplog: pytest.LogCaptureFixture,
disconnected_source_mock: DmsDeviceSource,
upnp_factory_mock: Mock,
) -> None:
"""Test trying to connect without a location is safely ignored."""
disconnected_source_mock.location = ""
upnp_factory_mock.async_create_device.reset_mock()
# Calling internal function directly to skip trying to time 2 SSDP messages carefully
with caplog.at_level(logging.DEBUG):
await disconnected_source_mock.device_connect()
assert "Not connecting because location is not known" == caplog.records[-1].message
assert not upnp_factory_mock.async_create_device.await_count
async def test_become_unavailable(
hass: HomeAssistant,
connected_source_mock: DmsDeviceSource,
dms_device_mock: Mock,
) -> None:
"""Test a device becoming unavailable."""
# Mock a good resolve result
dms_device_mock.async_browse_metadata.return_value = didl_lite.Item(
id="object_id",
restricted=False,
title="Object",
res=[didl_lite.Resource(uri="foo", protocol_info="http-get:*:audio/mpeg:")],
)
# Check async_resolve_object currently works
await connected_source_mock.async_resolve_media(":object_id")
# Now break the network connection
dms_device_mock.async_browse_metadata.side_effect = UpnpConnectionError
# The device should be considered available until next contacted
assert connected_source_mock.available
# async_resolve_object should fail
with pytest.raises(Unresolvable):
await connected_source_mock.async_resolve_media(":object_id")
# The device should now be unavailable
assert not connected_source_mock.available
| |
"""This module implements a loader and dumper for the svmlight format
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable to
predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
"""
# Authors: Mathieu Blondel <mathieu@mblondel.org>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Olivier Grisel <olivier.grisel@ensta.org>
# License: Simple BSD.
from bz2 import BZ2File
from contextlib import closing
import gzip
import io
import os.path
import numpy as np
import scipy.sparse as sp
from ._svmlight_format import _load_svmlight_file
from .. import __version__
from ..utils import atleast2d_or_csr
def load_svmlight_file(f, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False):
"""Load datasets in the svmlight / libsvm format into sparse CSR matrix
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
Parsing a text based source can be expensive. When working on
repeatedly on the same dataset, it is recommended to wrap this
loader with joblib.Memory.cache to store a memmapped backup of the
CSR results of the first call and benefit from the near instantaneous
loading of memmapped structures for the subsequent calls.
This implementation is naive: it does allocate too much memory and
is slow since written in python. On large datasets it is recommended
to use an optimized loader such as:
https://github.com/mblondel/svmlight-loader
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to contraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
Parameters
----------
f: {str, file-like, int}
(Path to) a file to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. A file-like or file descriptor will not be closed
by this function. A file-like object must be opened in binary mode.
n_features: int or None
The number of features to use. If None, it will be inferred. This
argument is useful to load several files that are subsets of a
bigger sliced dataset: each subset might not have example of
every feature, hence the inferred shape might vary from one
slice to another.
multilabel: boolean, optional
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based: boolean or "auto", optional
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe.
query_id: boolean, defaults to False
If True, will return the query_id array for each file.
Returns
-------
X: scipy.sparse matrix of shape (n_samples, n_features)
y: ndarray of shape (n_samples,), or, in the multilabel a list of
tuples of length n_samples.
query_id: array of shape (n_samples,)
query_id for each sample. Only returned when query_id is set to
True.
See also
--------
load_svmlight_files: similar function for loading multiple files in this
format, enforcing the same number of features/columns on all of them.
"""
return tuple(load_svmlight_files([f], n_features, dtype, multilabel,
zero_based, query_id))
def _gen_open(f):
if isinstance(f, int): # file descriptor
return io.open(f, "rb", closefd=False)
elif not isinstance(f, basestring):
raise TypeError("expected {str, int, file-like}, got %s" % type(f))
_, ext = os.path.splitext(f)
if ext == ".gz":
return gzip.open(f, "rb")
elif ext == ".bz2":
return BZ2File(f, "rb")
else:
return open(f, "rb")
def _open_and_load(f, dtype, multilabel, zero_based, query_id):
if hasattr(f, "read"):
return _load_svmlight_file(f, dtype, multilabel, zero_based, query_id)
# XXX remove closing when Python 2.7+/3.1+ required
with closing(_gen_open(f)) as f:
return _load_svmlight_file(f, dtype, multilabel, zero_based, query_id)
def load_svmlight_files(files, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False):
"""Load dataset from multiple files in SVMlight format
This function is equivalent to mapping load_svmlight_file over a list of
files, except that the results are concatenated into a single, flat list
and the samples vectors are constrained to all have the same number of
features.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
Parameters
----------
files : iterable over {str, file-like, int}
(Paths of) files to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. File-likes and file descriptors will not be
closed by this function. File-like objects must be opened in binary
mode.
n_features: int or None
The number of features to use. If None, it will be inferred from the
maximum column index occurring in any of the files.
multilabel: boolean, optional
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based: boolean or "auto", optional
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe.
query_id: boolean, defaults to False
If True, will return the query_id array for each file.
Returns
-------
[X1, y1, ..., Xn, yn]
where each (Xi, yi) pair is the result from load_svmlight_file(files[i]).
If query_id is set to True, this will return instead [X1, y1, q1,
..., Xn, yn, qn] where (Xi, yi, qi) is the result from
load_svmlight_file(files[i])
Rationale
---------
When fitting a model to a matrix X_train and evaluating it against a
matrix X_test, it is essential that X_train and X_test have the same
number of features (X_train.shape[1] == X_test.shape[1]). This may not
be the case if you load the files individually with load_svmlight_file.
See also
--------
load_svmlight_file
"""
r = [_open_and_load(f, dtype, multilabel, bool(zero_based), bool(query_id))
for f in files]
if (zero_based is False
or zero_based == "auto" and all(np.min(tmp[1]) > 0 for tmp in r)):
for ind in r:
indices = ind[1]
indices -= 1
if n_features is None:
n_features = max(ind[1].max() for ind in r) + 1
result = []
for data, indices, indptr, y, query_values in r:
shape = (indptr.shape[0] - 1, n_features)
X = sp.csr_matrix((data, indices, indptr), shape)
X.sort_indices()
result += X, y
if query_id:
result.append(query_values)
return result
def _dump_svmlight(X, y, f, one_based, comment, query_id):
is_sp = int(hasattr(X, "tocsr"))
if X.dtype == np.float64:
value_pattern = u"%d:%0.16e"
else:
value_pattern = u"%d:%f"
if y.dtype.kind == 'i':
line_pattern = u"%d"
else:
line_pattern = u"%f"
if query_id is not None:
line_pattern += u" qid:%d"
line_pattern += u" %s\n"
if comment:
f.write("# Generated by dump_svmlight_file from scikit-learn %s\n"
% __version__)
f.write("# Column indices are %s-based\n" % ["zero", "one"][one_based])
f.write("#\n")
f.writelines("# %s\n" % line for line in comment.splitlines())
for i in xrange(X.shape[0]):
s = u" ".join([value_pattern % (j + one_based, X[i, j])
for j in X[i].nonzero()[is_sp]])
if query_id is not None:
feat = (y[i], query_id[i], s)
else:
feat = (y[i], s)
f.write((line_pattern % feat).encode('ascii'))
def dump_svmlight_file(X, y, f, zero_based=True, comment=None, query_id=None):
"""Dump the dataset in svmlight / libsvm file format.
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
f : string or file-like in binary mode
If string, specifies the path that will contain the data.
If file-like, data will be written to f. f should be opened in binary
mode.
zero_based : boolean, optional
Whether column indices should be written zero-based (True) or one-based
(False).
comment : string, optional
Comment to insert at the top of the file. This should be either a
Unicode string, which will be encoded as UTF-8, or an ASCII byte
string.
If a comment is given, then it will be preceded by one that identifies
the file as having been dumped by scikit-learn. Note that not all
tools grok comments in SVMlight files.
query_id : array-like, shape = [n_samples]
Array containing pairwise preference constraints (qid in svmlight
format).
"""
if comment is not None:
# Convert comment string to list of lines in UTF-8.
# If a byte string is passed, then check whether it's ASCII;
# if a user wants to get fancy, they'll have to decode themselves.
# Avoid mention of str and unicode types for Python 3.x compat.
if isinstance(comment, bytes):
comment.decode("ascii") # just for the exception
else:
comment = comment.encode("utf-8")
if "\0" in comment:
raise ValueError("comment string contains NUL byte")
y = np.asarray(y)
if y.ndim != 1:
raise ValueError("expected y of shape (n_samples,), got %r"
% (y.shape,))
Xval = atleast2d_or_csr(X)
if Xval.shape[0] != y.shape[0]:
raise ValueError("X.shape[0] and y.shape[0] should be the same, got"
" %r and %r instead." % (Xval.shape[0], y.shape[0]))
# We had some issues with CSR matrices with unsorted indices (e.g. #1501),
# so sort them here, but first make sure we don't modify the user's X.
# TODO We can do this cheaper; sorted_indices copies the whole matrix.
if Xval is X and hasattr(Xval, "sorted_indices"):
X = Xval.sorted_indices()
else:
X = Xval
if hasattr(X, "sort_indices"):
X.sort_indices()
if query_id is not None:
query_id = np.asarray(query_id)
if query_id.shape[0] != y.shape[0]:
raise ValueError("expected query_id of shape (n_samples,), got %r"
% (query_id.shape,))
one_based = not zero_based
if hasattr(f, "write"):
_dump_svmlight(X, y, f, one_based, comment, query_id)
else:
with open(f, "wb") as f:
_dump_svmlight(X, y, f, one_based, comment, query_id)
| |
# Authors: Nils Wagner, Ed Schofield, Pauli Virtanen, John Travers
"""
Tests for numerical integration.
"""
from __future__ import division, print_function, absolute_import
import numpy
from numpy import arange, zeros, array, dot, sqrt, cos, sin, eye, pi, exp, \
allclose
from scipy.lib.six import xrange
from numpy.testing import assert_, TestCase, run_module_suite, \
assert_array_almost_equal, assert_raises, assert_allclose, \
assert_array_equal, assert_equal
from scipy.integrate import odeint, ode, complex_ode
#------------------------------------------------------------------------------
# Test ODE integrators
#------------------------------------------------------------------------------
class TestOdeint(TestCase):
# Check integrate.odeint
def _do_problem(self, problem):
t = arange(0.0, problem.stop_t, 0.05)
z, infodict = odeint(problem.f, problem.z0, t, full_output=True)
assert_(problem.verify(z, t))
def test_odeint(self):
for problem_cls in PROBLEMS:
problem = problem_cls()
if problem.cmplx:
continue
self._do_problem(problem)
class TestOde(TestCase):
# Check integrate.ode
def _do_problem(self, problem, integrator, method='adams'):
# ode has callback arguments in different order than odeint
f = lambda t, z: problem.f(z, t)
jac = None
if hasattr(problem, 'jac'):
jac = lambda t, z: problem.jac(z, t)
ig = ode(f, jac)
ig.set_integrator(integrator,
atol=problem.atol/10,
rtol=problem.rtol/10,
method=method)
ig.set_initial_value(problem.z0, t=0.0)
z = ig.integrate(problem.stop_t)
assert_(ig.successful(), (problem, method))
assert_(problem.verify(array([z]), problem.stop_t), (problem, method))
def test_vode(self):
# Check the vode solver
for problem_cls in PROBLEMS:
problem = problem_cls()
if problem.cmplx:
continue
if not problem.stiff:
self._do_problem(problem, 'vode', 'adams')
self._do_problem(problem, 'vode', 'bdf')
def test_zvode(self):
# Check the zvode solver
for problem_cls in PROBLEMS:
problem = problem_cls()
if not problem.stiff:
self._do_problem(problem, 'zvode', 'adams')
self._do_problem(problem, 'zvode', 'bdf')
def test_lsoda(self):
# Check the lsoda solver
for problem_cls in PROBLEMS:
problem = problem_cls()
if problem.cmplx:
continue
self._do_problem(problem, 'lsoda')
def test_dopri5(self):
# Check the dopri5 solver
for problem_cls in PROBLEMS:
problem = problem_cls()
if problem.cmplx:
continue
if problem.stiff:
continue
if hasattr(problem, 'jac'):
continue
self._do_problem(problem, 'dopri5')
def test_dop853(self):
# Check the dop853 solver
for problem_cls in PROBLEMS:
problem = problem_cls()
if problem.cmplx:
continue
if problem.stiff:
continue
if hasattr(problem, 'jac'):
continue
self._do_problem(problem, 'dop853')
def test_concurrent_fail(self):
for sol in ('vode', 'zvode', 'lsoda'):
f = lambda t, y: 1.0
r = ode(f).set_integrator(sol)
r.set_initial_value(0, 0)
r2 = ode(f).set_integrator(sol)
r2.set_initial_value(0, 0)
r.integrate(r.t + 0.1)
r2.integrate(r2.t + 0.1)
assert_raises(RuntimeError, r.integrate, r.t + 0.1)
def test_concurrent_ok(self):
f = lambda t, y: 1.0
for k in xrange(3):
for sol in ('vode', 'zvode', 'lsoda', 'dopri5', 'dop853'):
r = ode(f).set_integrator(sol)
r.set_initial_value(0, 0)
r2 = ode(f).set_integrator(sol)
r2.set_initial_value(0, 0)
r.integrate(r.t + 0.1)
r2.integrate(r2.t + 0.1)
r2.integrate(r2.t + 0.1)
assert_allclose(r.y, 0.1)
assert_allclose(r2.y, 0.2)
for sol in ('dopri5', 'dop853'):
r = ode(f).set_integrator(sol)
r.set_initial_value(0, 0)
r2 = ode(f).set_integrator(sol)
r2.set_initial_value(0, 0)
r.integrate(r.t + 0.1)
r.integrate(r.t + 0.1)
r2.integrate(r2.t + 0.1)
r.integrate(r.t + 0.1)
r2.integrate(r2.t + 0.1)
assert_allclose(r.y, 0.3)
assert_allclose(r2.y, 0.2)
class TestComplexOde(TestCase):
# Check integrate.complex_ode
def _do_problem(self, problem, integrator, method='adams'):
# ode has callback arguments in different order than odeint
f = lambda t, z: problem.f(z, t)
jac = None
if hasattr(problem, 'jac'):
jac = lambda t, z: problem.jac(z, t)
ig = complex_ode(f, jac)
ig.set_integrator(integrator,
atol=problem.atol/10,
rtol=problem.rtol/10,
method=method)
ig.set_initial_value(problem.z0, t=0.0)
z = ig.integrate(problem.stop_t)
z2 = ig.y
assert_array_equal(z, z2)
assert_(ig.successful(), (problem, method))
assert_(problem.verify(array([z]), problem.stop_t), (problem, method))
def test_vode(self):
# Check the vode solver
for problem_cls in PROBLEMS:
problem = problem_cls()
if not problem.stiff:
self._do_problem(problem, 'vode', 'adams')
else:
self._do_problem(problem, 'vode', 'bdf')
def test_lsoda(self):
# Check the lsoda solver
for problem_cls in PROBLEMS:
problem = problem_cls()
self._do_problem(problem, 'lsoda')
def test_dopri5(self):
# Check the dopri5 solver
for problem_cls in PROBLEMS:
problem = problem_cls()
if problem.stiff:
continue
if hasattr(problem, 'jac'):
continue
self._do_problem(problem, 'dopri5')
def test_dop853(self):
# Check the dop853 solver
for problem_cls in PROBLEMS:
problem = problem_cls()
if problem.stiff:
continue
if hasattr(problem, 'jac'):
continue
self._do_problem(problem, 'dop853')
class TestSolout(TestCase):
# Check integrate.ode correctly handles solout for dopri5 and dop853
def _run_solout_test(self, integrator):
# Check correct usage of solout
ts = []
ys = []
t0 = 0.0
tend = 10.0
y0 = [1.0, 2.0]
def solout(t, y):
ts.append(t)
ys.append(y.copy())
def rhs(t, y):
return [y[0] + y[1], -y[1]**2]
ig = ode(rhs).set_integrator(integrator)
ig.set_solout(solout)
ig.set_initial_value(y0, t0)
ret = ig.integrate(tend)
assert_array_equal(ys[0], y0)
assert_array_equal(ys[-1], ret)
assert_equal(ts[0], t0)
assert_equal(ts[-1], tend)
def test_solout(self):
for integrator in ('dopri5', 'dop853'):
self._run_solout_test(integrator)
def _run_solout_break_test(self, integrator):
# Check correct usage of stopping via solout
ts = []
ys = []
t0 = 0.0
tend = 10.0
y0 = [1.0, 2.0]
def solout(t, y):
ts.append(t)
ys.append(y.copy())
if t > tend/2.0:
return -1
def rhs(t, y):
return [y[0] + y[1], -y[1]**2]
ig = ode(rhs).set_integrator(integrator)
ig.set_solout(solout)
ig.set_initial_value(y0, t0)
ret = ig.integrate(tend)
assert_array_equal(ys[0], y0)
assert_array_equal(ys[-1], ret)
assert_equal(ts[0], t0)
assert_(ts[-1] > tend/2.0)
assert_(ts[-1] < tend)
def test_solout_break(self):
for integrator in ('dopri5', 'dop853'):
self._run_solout_break_test(integrator)
class TestComplexSolout(TestCase):
# Check integrate.ode correctly handles solout for dopri5 and dop853
def _run_solout_test(self, integrator):
# Check correct usage of solout
ts = []
ys = []
t0 = 0.0
tend = 20.0
y0 = [0.0]
def solout(t, y):
ts.append(t)
ys.append(y.copy())
def rhs(t, y):
return [1.0/(t - 10.0 - 1j)]
ig = complex_ode(rhs).set_integrator(integrator)
ig.set_solout(solout)
ig.set_initial_value(y0, t0)
ret = ig.integrate(tend)
assert_array_equal(ys[0], y0)
assert_array_equal(ys[-1], ret)
assert_equal(ts[0], t0)
assert_equal(ts[-1], tend)
def test_solout(self):
for integrator in ('dopri5', 'dop853'):
self._run_solout_test(integrator)
def _run_solout_break_test(self, integrator):
# Check correct usage of stopping via solout
ts = []
ys = []
t0 = 0.0
tend = 20.0
y0 = [0.0]
def solout(t, y):
ts.append(t)
ys.append(y.copy())
if t > tend/2.0:
return -1
def rhs(t, y):
return [1.0/(t - 10.0 - 1j)]
ig = complex_ode(rhs).set_integrator(integrator)
ig.set_solout(solout)
ig.set_initial_value(y0, t0)
ret = ig.integrate(tend)
assert_array_equal(ys[0], y0)
assert_array_equal(ys[-1], ret)
assert_equal(ts[0], t0)
assert_(ts[-1] > tend/2.0)
assert_(ts[-1] < tend)
def test_solout_break(self):
for integrator in ('dopri5', 'dop853'):
self._run_solout_break_test(integrator)
#------------------------------------------------------------------------------
# Test problems
#------------------------------------------------------------------------------
class ODE:
"""
ODE problem
"""
stiff = False
cmplx = False
stop_t = 1
z0 = []
atol = 1e-6
rtol = 1e-5
class SimpleOscillator(ODE):
r"""
Free vibration of a simple oscillator::
m \ddot{u} + k u = 0, u(0) = u_0 \dot{u}(0) \dot{u}_0
Solution::
u(t) = u_0*cos(sqrt(k/m)*t)+\dot{u}_0*sin(sqrt(k/m)*t)/sqrt(k/m)
"""
stop_t = 1 + 0.09
z0 = array([1.0, 0.1], float)
k = 4.0
m = 1.0
def f(self, z, t):
tmp = zeros((2,2), float)
tmp[0,1] = 1.0
tmp[1,0] = -self.k / self.m
return dot(tmp, z)
def verify(self, zs, t):
omega = sqrt(self.k / self.m)
u = self.z0[0]*cos(omega*t)+self.z0[1]*sin(omega*t)/omega
return allclose(u, zs[:,0], atol=self.atol, rtol=self.rtol)
class ComplexExp(ODE):
r"""The equation :lm:`\dot u = i u`"""
stop_t = 1.23*pi
z0 = exp([1j,2j,3j,4j,5j])
cmplx = True
def f(self, z, t):
return 1j*z
def jac(self, z, t):
return 1j*eye(5)
def verify(self, zs, t):
u = self.z0 * exp(1j*t)
return allclose(u, zs, atol=self.atol, rtol=self.rtol)
class Pi(ODE):
r"""Integrate 1/(t + 1j) from t=-10 to t=10"""
stop_t = 20
z0 = [0]
cmplx = True
def f(self, z, t):
return array([1./(t - 10 + 1j)])
def verify(self, zs, t):
u = -2j*numpy.arctan(10)
return allclose(u, zs[-1,:], atol=self.atol, rtol=self.rtol)
PROBLEMS = [SimpleOscillator, ComplexExp, Pi]
#------------------------------------------------------------------------------
def f(t, x):
dxdt = [x[1], -x[0]]
return dxdt
def jac(t, x):
j = array([[0.0, 1.0],
[-1.0, 0.0]])
return j
def f1(t, x, omega):
dxdt = [omega*x[1], -omega*x[0]]
return dxdt
def jac1(t, x, omega):
j = array([[0.0, omega],
[-omega, 0.0]])
return j
def f2(t, x, omega1, omega2):
dxdt = [omega1*x[1], -omega2*x[0]]
return dxdt
def jac2(t, x, omega1, omega2):
j = array([[0.0, omega1],
[-omega2, 0.0]])
return j
def fv(t, x, omega):
dxdt = [omega[0]*x[1], -omega[1]*x[0]]
return dxdt
def jacv(t, x, omega):
j = array([[0.0, omega[0]],
[-omega[1], 0.0]])
return j
class ODECheckParameterUse(object):
"""Call an ode-class solver with several cases of parameter use."""
# This class is intentionally not a TestCase subclass.
# solver_name must be set before tests can be run with this class.
# Set these in subclasses.
solver_name = ''
solver_uses_jac = False
def _get_solver(self, f, jac):
solver = ode(f, jac)
if self.solver_uses_jac:
solver.set_integrator(self.solver_name, atol=1e-9, rtol=1e-7,
with_jacobian=self.solver_uses_jac)
else:
# XXX Shouldn't set_integrator *always* accept the keyword arg
# 'with_jacobian', and perhaps raise an exception if it is set
# to True if the solver can't actually use it?
solver.set_integrator(self.solver_name, atol=1e-9, rtol=1e-7)
return solver
def _check_solver(self, solver):
ic = [1.0, 0.0]
solver.set_initial_value(ic, 0.0)
solver.integrate(pi)
assert_array_almost_equal(solver.y, [-1.0, 0.0])
def test_no_params(self):
solver = self._get_solver(f, jac)
self._check_solver(solver)
def test_one_scalar_param(self):
solver = self._get_solver(f1, jac1)
omega = 1.0
solver.set_f_params(omega)
if self.solver_uses_jac:
solver.set_jac_params(omega)
self._check_solver(solver)
def test_two_scalar_params(self):
solver = self._get_solver(f2, jac2)
omega1 = 1.0
omega2 = 1.0
solver.set_f_params(omega1, omega2)
if self.solver_uses_jac:
solver.set_jac_params(omega1, omega2)
self._check_solver(solver)
def test_vector_param(self):
solver = self._get_solver(fv, jacv)
omega = [1.0, 1.0]
solver.set_f_params(omega)
if self.solver_uses_jac:
solver.set_jac_params(omega)
self._check_solver(solver)
class DOPRI5CheckParameterUse(ODECheckParameterUse, TestCase):
solver_name = 'dopri5'
solver_uses_jac = False
class DOP853CheckParameterUse(ODECheckParameterUse, TestCase):
solver_name = 'dop853'
solver_uses_jac = False
class VODECheckParameterUse(ODECheckParameterUse, TestCase):
solver_name = 'vode'
solver_uses_jac = True
class ZVODECheckParameterUse(ODECheckParameterUse, TestCase):
solver_name = 'zvode'
solver_uses_jac = True
class LSODACheckParameterUse(ODECheckParameterUse, TestCase):
solver_name = 'lsoda'
solver_uses_jac = True
if __name__ == "__main__":
run_module_suite()
| |
"""Orchestrate the execution of all experiments.
The orchestrator is responsible for scheduling experiments specified in the
user-provided settings.
"""
from __future__ import division
import time
import collections
import multiprocessing as mp
import logging
import copy
import sys
import signal
import traceback
from icarus.execution import exec_experiment
from icarus.registry import TOPOLOGY_FACTORY, CACHE_PLACEMENT, CONTENT_PLACEMENT, \
CACHE_POLICY, WORKLOAD, DATA_COLLECTOR, STRATEGY
from icarus.results import ResultSet
from icarus.util import SequenceNumber, timestr
__all__ = ['Orchestrator', 'run_scenario']
logger = logging.getLogger('orchestration')
class Orchestrator(object):
"""Orchestrator.
It is responsible for orchestrating the execution of all experiments and
aggregate results.
"""
def __init__(self, settings, summary_freq=4):
"""Constructor
Parameters
----------
settings : Settings
The settings of the simulator
summary_freq : int
Frequency (in number of experiment) at which summary messages
are displayed
"""
self.settings = settings
self.results = ResultSet()
self.seq = SequenceNumber()
self.exp_durations = collections.deque(maxlen=30)
self.n_success = 0
self.n_fail = 0
self.summary_freq = summary_freq
self._stop = False
if self.settings.PARALLEL_EXECUTION:
self.pool = mp.Pool(settings.N_PROCESSES)
def stop(self):
"""Stop the execution of the orchestrator
"""
logger.info('Orchestrator is stopping')
self._stop = True
if self.settings.PARALLEL_EXECUTION:
self.pool.terminate()
self.pool.join()
def run(self):
"""Run the orchestrator.
This call is blocking, whether multiple processes are used or not. This
methods returns only after all experiments are executed.
"""
# Create queue of experiment configurations
queue = collections.deque(self.settings.EXPERIMENT_QUEUE)
# Calculate number of experiments and number of processes
self.n_exp = len(queue) * self.settings.N_REPLICATIONS
self.n_proc = self.settings.N_PROCESSES \
if self.settings.PARALLEL_EXECUTION \
else 1
logger.info('Starting simulations: %d experiments, %d process(es)'
% (self.n_exp, self.n_proc))
if self.settings.PARALLEL_EXECUTION:
# This job queue is used only to keep track of which jobs have
# finished and which are still running. Currently this information
# is used only to handle keyboard interrupts correctly
job_queue = collections.deque()
# Schedule experiments from the queue
while queue:
experiment = queue.popleft()
for _ in range(self.settings.N_REPLICATIONS):
job_queue.append(self.pool.apply_async(run_scenario,
args=(self.settings, experiment,
self.seq.assign(), self.n_exp),
callback=self.experiment_callback))
self.pool.close()
# This solution is probably not optimal, but at least makes
# KeyboardInterrupt work fine, which is crucial if launching the
# simulation remotely via screen.
# What happens here is that we keep waiting for possible
# KeyboardInterrupts till the last process terminates successfully.
# We may have to wait up to 5 seconds after the last process
# terminates before exiting, which is really negligible
try:
while job_queue:
job = job_queue.popleft()
while not job.ready():
time.sleep(5)
except KeyboardInterrupt:
self.pool.terminate()
self.pool.join()
else: # Single-process execution
while queue:
experiment = queue.popleft()
for _ in range(self.settings.N_REPLICATIONS):
self.experiment_callback(run_scenario(self.settings,
experiment, self.seq.assign(),
self.n_exp))
if self._stop:
self.stop()
logger.info('END | Planned: %d, Completed: %d, Succeeded: %d, Failed: %d',
self.n_exp, self.n_fail + self.n_success, self.n_success, self.n_fail)
def experiment_callback(self, args):
"""Callback method called by run_scenario
Parameters
----------
args : tuple
Tuple of arguments
"""
# If args is None, that means that an exception was raised during the
# execution of the experiment. In such case, ignore it
if not args:
self.n_fail += 1
return
# Extract parameters
params, results, duration = args
self.n_success += 1
# Store results
self.results.add(params, results)
self.exp_durations.append(duration)
if self.n_success % self.summary_freq == 0:
# Number of experiments scheduled to be executed
n_scheduled = self.n_exp - (self.n_fail + self.n_success)
# Compute ETA
n_cores = min(mp.cpu_count(), self.n_proc)
mean_duration = sum(self.exp_durations)/len(self.exp_durations)
eta = timestr(n_scheduled*mean_duration/n_cores, False)
# Print summary
logger.info('SUMMARY | Completed: %d, Failed: %d, Scheduled: %d, ETA: %s',
self.n_success, self.n_fail, n_scheduled, eta)
def run_scenario(settings, params, curr_exp, n_exp):
"""Run a single scenario experiment
Parameters
----------
settings : Settings
The simulator settings
params : Tree
experiment parameters tree
curr_exp : int
sequence number of the experiment
n_exp : int
Number of scheduled experiments
Returns
-------
results : 3-tuple
A (params, results, duration) 3-tuple. The first element is a dictionary
which stores all the attributes of the experiment. The second element
is a dictionary which stores the results. The third element is an
integer expressing the wall-clock duration of the experiment (in
seconds)
"""
try:
start_time = time.time()
proc_name = mp.current_process().name
logger = logging.getLogger('runner-%s' % proc_name)
# Get list of metrics required
metrics = settings.DATA_COLLECTORS
# Copy parameters so that they can be manipulated
tree = copy.deepcopy(params)
# Set topology
topology_spec = tree['topology']
topology_name = topology_spec.pop('name')
if topology_name not in TOPOLOGY_FACTORY:
logger.error('No topology factory implementation for %s was found.'
% topology_name)
return None
topology = TOPOLOGY_FACTORY[topology_name](**topology_spec)
workload_spec = tree['workload']
workload_name = workload_spec.pop('name')
if workload_name not in WORKLOAD:
logger.error('No workload implementation named %s was found.'
% workload_name)
return None
workload = WORKLOAD[workload_name](topology, **workload_spec)
# Assign caches to nodes
if 'cache_placement' in tree:
cachepl_spec = tree['cache_placement']
cachepl_name = cachepl_spec.pop('name')
if cachepl_name not in CACHE_PLACEMENT:
logger.error('No cache placement named %s was found.'
% cachepl_name)
return None
network_cache = cachepl_spec.pop('network_cache')
# Cache budget is the cumulative number of cache entries across
# the whole network
cachepl_spec['cache_budget'] = workload.n_contents * network_cache
CACHE_PLACEMENT[cachepl_name](topology, **cachepl_spec)
# Assign contents to sources
# If there are many contents, after doing this, performing operations
# requiring a topology deep copy, i.e. to_directed/undirected, will
# take long.
contpl_spec = tree['content_placement']
contpl_name = contpl_spec.pop('name')
if contpl_name not in CONTENT_PLACEMENT:
logger.error('No content placement implementation named %s was found.'
% contpl_name)
return None
CONTENT_PLACEMENT[contpl_name](topology, workload.contents, **contpl_spec)
# caching and routing strategy definition
strategy = tree['strategy']
if strategy['name'] not in STRATEGY:
logger.error('No implementation of strategy %s was found.' % strategy['name'])
return None
# cache eviction policy definition
cache_policy = tree['cache_policy']
if cache_policy['name'] not in CACHE_POLICY:
logger.error('No implementation of cache policy %s was found.' % cache_policy['name'])
return None
# Configuration parameters of network model
netconf = tree['netconf']
# Text description of the scenario run to print on screen
scenario = tree['desc'] if 'desc' in tree else "Description N/A"
logger.info('Experiment %d/%d | Preparing scenario: %s', curr_exp, n_exp, scenario)
if any(m not in DATA_COLLECTOR for m in metrics):
logger.error('There are no implementations for at least one data collector specified')
return None
collectors = {m: {} for m in metrics}
logger.info('Experiment %d/%d | Start simulation', curr_exp, n_exp)
results = exec_experiment(topology, workload, netconf, strategy, cache_policy, collectors)
duration = time.time() - start_time
logger.info('Experiment %d/%d | End simulation | Duration %s.',
curr_exp, n_exp, timestr(duration, True))
return (params, results, duration)
except KeyboardInterrupt:
logger.error('Received keyboard interrupt. Terminating')
sys.exit(-signal.SIGINT)
except Exception as e:
err_type = str(type(e)).split("'")[1].split(".")[1]
err_message = e.message
logger.error('Experiment %d/%d | Failed | %s: %s\n%s',
curr_exp, n_exp, err_type, err_message,
traceback.format_exc())
| |
## @file
# This file is used to be the main entrance of ECC tool
#
# Copyright (c) 2009 - 2014, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
##
# Import Modules
#
import os, time, glob, sys
import Common.EdkLogger as EdkLogger
import Database
import EccGlobalData
from MetaDataParser import *
from optparse import OptionParser
from Configuration import Configuration
from Check import Check
import Common.GlobalData as GlobalData
from Common.String import NormPath
from Common.BuildVersion import gBUILD_VERSION
from Common import BuildToolError
from Common.Misc import PathClass
from Common.Misc import DirCache
from MetaFileWorkspace.MetaFileParser import DscParser
from MetaFileWorkspace.MetaFileParser import DecParser
from MetaFileWorkspace.MetaFileParser import InfParser
from MetaFileWorkspace.MetaFileParser import Fdf
from MetaFileWorkspace.MetaFileTable import MetaFileStorage
import c
import re, string
from Exception import *
## Ecc
#
# This class is used to define Ecc main entrance
#
# @param object: Inherited from object class
#
class Ecc(object):
def __init__(self):
# Version and Copyright
self.VersionNumber = ("0.01" + " " + gBUILD_VERSION)
self.Version = "%prog Version " + self.VersionNumber
self.Copyright = "Copyright (c) 2009 - 2010, Intel Corporation All rights reserved."
self.InitDefaultConfigIni()
self.OutputFile = 'output.txt'
self.ReportFile = 'Report.csv'
self.ExceptionFile = 'exception.xml'
self.IsInit = True
self.ScanSourceCode = True
self.ScanMetaData = True
self.MetaFile = ''
self.OnlyScan = None
# Parse the options and args
self.ParseOption()
#
# Check EFI_SOURCE (Edk build convention). EDK_SOURCE will always point to ECP
#
WorkspaceDir = os.path.normcase(os.path.normpath(os.environ["WORKSPACE"]))
os.environ["WORKSPACE"] = WorkspaceDir
if "ECP_SOURCE" not in os.environ:
os.environ["ECP_SOURCE"] = os.path.join(WorkspaceDir, GlobalData.gEdkCompatibilityPkg)
if "EFI_SOURCE" not in os.environ:
os.environ["EFI_SOURCE"] = os.environ["ECP_SOURCE"]
if "EDK_SOURCE" not in os.environ:
os.environ["EDK_SOURCE"] = os.environ["ECP_SOURCE"]
#
# Unify case of characters on case-insensitive systems
#
EfiSourceDir = os.path.normcase(os.path.normpath(os.environ["EFI_SOURCE"]))
EdkSourceDir = os.path.normcase(os.path.normpath(os.environ["EDK_SOURCE"]))
EcpSourceDir = os.path.normcase(os.path.normpath(os.environ["ECP_SOURCE"]))
os.environ["EFI_SOURCE"] = EfiSourceDir
os.environ["EDK_SOURCE"] = EdkSourceDir
os.environ["ECP_SOURCE"] = EcpSourceDir
GlobalData.gWorkspace = WorkspaceDir
GlobalData.gEfiSource = EfiSourceDir
GlobalData.gEdkSource = EdkSourceDir
GlobalData.gEcpSource = EcpSourceDir
GlobalData.gGlobalDefines["WORKSPACE"] = WorkspaceDir
GlobalData.gGlobalDefines["EFI_SOURCE"] = EfiSourceDir
GlobalData.gGlobalDefines["EDK_SOURCE"] = EdkSourceDir
GlobalData.gGlobalDefines["ECP_SOURCE"] = EcpSourceDir
# Generate checkpoints list
EccGlobalData.gConfig = Configuration(self.ConfigFile)
# Generate exception list
EccGlobalData.gException = ExceptionCheck(self.ExceptionFile)
# Init Ecc database
EccGlobalData.gDb = Database.Database(Database.DATABASE_PATH)
EccGlobalData.gDb.InitDatabase(self.IsInit)
#
# Get files real name in workspace dir
#
GlobalData.gAllFiles = DirCache(GlobalData.gWorkspace)
# Build ECC database
# self.BuildDatabase()
self.DetectOnlyScanDirs()
# Start to check
self.Check()
# Show report
self.GenReport()
# Close Database
EccGlobalData.gDb.Close()
def InitDefaultConfigIni(self):
paths = map(lambda p: os.path.join(p, 'Ecc', 'config.ini'), sys.path)
paths = (os.path.realpath('config.ini'),) + tuple(paths)
for path in paths:
if os.path.exists(path):
self.ConfigFile = path
return
self.ConfigFile = 'config.ini'
## DetectOnlyScan
#
# Detect whether only scanned folders have been enabled
#
def DetectOnlyScanDirs(self):
if self.OnlyScan == True:
OnlyScanDirs = []
# Use regex here if multiple spaces or TAB exists in ScanOnlyDirList in config.ini file
for folder in re.finditer(r'\S+', EccGlobalData.gConfig.ScanOnlyDirList):
OnlyScanDirs.append(folder.group())
if len(OnlyScanDirs) != 0:
self.BuildDatabase(OnlyScanDirs)
else:
EdkLogger.error("ECC", BuildToolError.OPTION_VALUE_INVALID, ExtraData="Use -f option need to fill specific folders in config.ini file")
else:
self.BuildDatabase()
## BuildDatabase
#
# Build the database for target
#
def BuildDatabase(self, SpeciDirs = None):
# Clean report table
EccGlobalData.gDb.TblReport.Drop()
EccGlobalData.gDb.TblReport.Create()
# Build database
if self.IsInit:
if self.ScanMetaData:
EdkLogger.quiet("Building database for Meta Data File ...")
self.BuildMetaDataFileDatabase(SpeciDirs)
if self.ScanSourceCode:
EdkLogger.quiet("Building database for Meta Data File Done!")
if SpeciDirs == None:
c.CollectSourceCodeDataIntoDB(EccGlobalData.gTarget)
else:
for specificDir in SpeciDirs:
c.CollectSourceCodeDataIntoDB(os.path.join(EccGlobalData.gTarget, specificDir))
EccGlobalData.gIdentifierTableList = GetTableList((MODEL_FILE_C, MODEL_FILE_H), 'Identifier', EccGlobalData.gDb)
EccGlobalData.gCFileList = GetFileList(MODEL_FILE_C, EccGlobalData.gDb)
EccGlobalData.gHFileList = GetFileList(MODEL_FILE_H, EccGlobalData.gDb)
## BuildMetaDataFileDatabase
#
# Build the database for meta data files
#
def BuildMetaDataFileDatabase(self, SpecificDirs = None):
ScanFolders = []
if SpecificDirs == None:
ScanFolders.append(EccGlobalData.gTarget)
else:
for specificDir in SpecificDirs:
ScanFolders.append(os.path.join(EccGlobalData.gTarget, specificDir))
EdkLogger.quiet("Building database for meta data files ...")
Op = open(EccGlobalData.gConfig.MetaDataFileCheckPathOfGenerateFileList, 'w+')
#SkipDirs = Read from config file
SkipDirs = EccGlobalData.gConfig.SkipDirList
SkipDirString = string.join(SkipDirs, '|')
# p = re.compile(r'.*[\\/](?:%s)[\\/]?.*' % SkipDirString)
p = re.compile(r'.*[\\/](?:%s^\S)[\\/]?.*' % SkipDirString)
for scanFolder in ScanFolders:
for Root, Dirs, Files in os.walk(scanFolder):
if p.match(Root.upper()):
continue
for Dir in Dirs:
Dirname = os.path.join(Root, Dir)
if os.path.islink(Dirname):
Dirname = os.path.realpath(Dirname)
if os.path.isdir(Dirname):
# symlinks to directories are treated as directories
Dirs.remove(Dir)
Dirs.append(Dirname)
for File in Files:
if len(File) > 4 and File[-4:].upper() == ".DEC":
Filename = os.path.normpath(os.path.join(Root, File))
EdkLogger.quiet("Parsing %s" % Filename)
Op.write("%s\r" % Filename)
#Dec(Filename, True, True, EccGlobalData.gWorkspace, EccGlobalData.gDb)
self.MetaFile = DecParser(Filename, MODEL_FILE_DEC, EccGlobalData.gDb.TblDec)
self.MetaFile.Start()
continue
if len(File) > 4 and File[-4:].upper() == ".DSC":
Filename = os.path.normpath(os.path.join(Root, File))
EdkLogger.quiet("Parsing %s" % Filename)
Op.write("%s\r" % Filename)
#Dsc(Filename, True, True, EccGlobalData.gWorkspace, EccGlobalData.gDb)
self.MetaFile = DscParser(PathClass(Filename, Root), MODEL_FILE_DSC, MetaFileStorage(EccGlobalData.gDb.TblDsc.Cur, Filename, MODEL_FILE_DSC, True))
# alwasy do post-process, in case of macros change
self.MetaFile.DoPostProcess()
self.MetaFile.Start()
self.MetaFile._PostProcess()
continue
if len(File) > 4 and File[-4:].upper() == ".INF":
Filename = os.path.normpath(os.path.join(Root, File))
EdkLogger.quiet("Parsing %s" % Filename)
Op.write("%s\r" % Filename)
#Inf(Filename, True, True, EccGlobalData.gWorkspace, EccGlobalData.gDb)
self.MetaFile = InfParser(Filename, MODEL_FILE_INF, EccGlobalData.gDb.TblInf)
self.MetaFile.Start()
continue
if len(File) > 4 and File[-4:].upper() == ".FDF":
Filename = os.path.normpath(os.path.join(Root, File))
EdkLogger.quiet("Parsing %s" % Filename)
Op.write("%s\r" % Filename)
Fdf(Filename, True, EccGlobalData.gWorkspace, EccGlobalData.gDb)
continue
Op.close()
# Commit to database
EccGlobalData.gDb.Conn.commit()
EdkLogger.quiet("Building database for meta data files done!")
##
#
# Check each checkpoint
#
def Check(self):
EdkLogger.quiet("Checking ...")
EccCheck = Check()
EccCheck.Check()
EdkLogger.quiet("Checking done!")
##
#
# Generate the scan report
#
def GenReport(self):
EdkLogger.quiet("Generating report ...")
EccGlobalData.gDb.TblReport.ToCSV(self.ReportFile)
EdkLogger.quiet("Generating report done!")
def GetRealPathCase(self, path):
TmpPath = path.rstrip(os.sep)
PathParts = TmpPath.split(os.sep)
if len(PathParts) == 0:
return path
if len(PathParts) == 1:
if PathParts[0].strip().endswith(':'):
return PathParts[0].upper()
# Relative dir, list . current dir
Dirs = os.listdir('.')
for Dir in Dirs:
if Dir.upper() == PathParts[0].upper():
return Dir
if PathParts[0].strip().endswith(':'):
PathParts[0] = PathParts[0].upper()
ParentDir = PathParts[0]
RealPath = ParentDir
if PathParts[0] == '':
RealPath = os.sep
ParentDir = os.sep
PathParts.remove(PathParts[0]) # need to remove the parent
for Part in PathParts:
Dirs = os.listdir(ParentDir + os.sep)
for Dir in Dirs:
if Dir.upper() == Part.upper():
RealPath += os.sep
RealPath += Dir
break
ParentDir += os.sep
ParentDir += Dir
return RealPath
## ParseOption
#
# Parse options
#
def ParseOption(self):
EdkLogger.quiet("Loading ECC configuration ... done")
(Options, Target) = self.EccOptionParser()
if Options.Workspace:
os.environ["WORKSPACE"] = Options.Workspace
# Check workspace envirnoment
if "WORKSPACE" not in os.environ:
EdkLogger.error("ECC", BuildToolError.ATTRIBUTE_NOT_AVAILABLE, "Environment variable not found",
ExtraData="WORKSPACE")
else:
EccGlobalData.gWorkspace = os.path.normpath(os.getenv("WORKSPACE"))
if not os.path.exists(EccGlobalData.gWorkspace):
EdkLogger.error("ECC", BuildToolError.FILE_NOT_FOUND, ExtraData="WORKSPACE = %s" % EccGlobalData.gWorkspace)
os.environ["WORKSPACE"] = EccGlobalData.gWorkspace
# Set log level
self.SetLogLevel(Options)
# Set other options
if Options.ConfigFile != None:
self.ConfigFile = Options.ConfigFile
if Options.OutputFile != None:
self.OutputFile = Options.OutputFile
if Options.ReportFile != None:
self.ReportFile = Options.ReportFile
if Options.ExceptionFile != None:
self.ExceptionFile = Options.ExceptionFile
if Options.Target != None:
if not os.path.isdir(Options.Target):
EdkLogger.error("ECC", BuildToolError.OPTION_VALUE_INVALID, ExtraData="Target [%s] does NOT exist" % Options.Target)
else:
EccGlobalData.gTarget = self.GetRealPathCase(os.path.normpath(Options.Target))
else:
EdkLogger.warn("Ecc", EdkLogger.ECC_ERROR, "The target source tree was not specified, using current WORKSPACE instead!")
EccGlobalData.gTarget = os.path.normpath(os.getenv("WORKSPACE"))
if Options.keepdatabase != None:
self.IsInit = False
if Options.metadata != None and Options.sourcecode != None:
EdkLogger.error("ECC", BuildToolError.OPTION_CONFLICT, ExtraData="-m and -s can't be specified at one time")
if Options.metadata != None:
self.ScanSourceCode = False
if Options.sourcecode != None:
self.ScanMetaData = False
if Options.folders != None:
self.OnlyScan = True
## SetLogLevel
#
# Set current log level of the tool based on args
#
# @param Option: The option list including log level setting
#
def SetLogLevel(self, Option):
if Option.verbose != None:
EdkLogger.SetLevel(EdkLogger.VERBOSE)
elif Option.quiet != None:
EdkLogger.SetLevel(EdkLogger.QUIET)
elif Option.debug != None:
EdkLogger.SetLevel(Option.debug + 1)
else:
EdkLogger.SetLevel(EdkLogger.INFO)
## Parse command line options
#
# Using standard Python module optparse to parse command line option of this tool.
#
# @retval Opt A optparse.Values object containing the parsed options
# @retval Args Target of build command
#
def EccOptionParser(self):
Parser = OptionParser(description = self.Copyright, version = self.Version, prog = "Ecc.exe", usage = "%prog [options]")
Parser.add_option("-t", "--target sourcepath", action="store", type="string", dest='Target',
help="Check all files under the target workspace.")
Parser.add_option("-c", "--config filename", action="store", type="string", dest="ConfigFile",
help="Specify a configuration file. Defaultly use config.ini under ECC tool directory.")
Parser.add_option("-o", "--outfile filename", action="store", type="string", dest="OutputFile",
help="Specify the name of an output file, if and only if one filename was specified.")
Parser.add_option("-r", "--reportfile filename", action="store", type="string", dest="ReportFile",
help="Specify the name of an report file, if and only if one filename was specified.")
Parser.add_option("-e", "--exceptionfile filename", action="store", type="string", dest="ExceptionFile",
help="Specify the name of an exception file, if and only if one filename was specified.")
Parser.add_option("-m", "--metadata", action="store_true", type=None, help="Only scan meta-data files information if this option is specified.")
Parser.add_option("-s", "--sourcecode", action="store_true", type=None, help="Only scan source code files information if this option is specified.")
Parser.add_option("-k", "--keepdatabase", action="store_true", type=None, help="The existing Ecc database will not be cleaned except report information if this option is specified.")
Parser.add_option("-l", "--log filename", action="store", dest="LogFile", help="""If specified, the tool should emit the changes that
were made by the tool after printing the result message.
If filename, the emit to the file, otherwise emit to
standard output. If no modifications were made, then do not
create a log file, or output a log message.""")
Parser.add_option("-q", "--quiet", action="store_true", type=None, help="Disable all messages except FATAL ERRORS.")
Parser.add_option("-v", "--verbose", action="store_true", type=None, help="Turn on verbose output with informational messages printed, "\
"including library instances selected, final dependency expression, "\
"and warning messages, etc.")
Parser.add_option("-d", "--debug", action="store", type="int", help="Enable debug messages at specified level.")
Parser.add_option("-w", "--workspace", action="store", type="string", dest='Workspace', help="Specify workspace.")
Parser.add_option("-f", "--folders", action="store_true", type=None, help="Only scanning specified folders which are recorded in config.ini file.")
(Opt, Args)=Parser.parse_args()
return (Opt, Args)
##
#
# This acts like the main() function for the script, unless it is 'import'ed into another
# script.
#
if __name__ == '__main__':
# Initialize log system
EdkLogger.Initialize()
EdkLogger.IsRaiseError = False
EdkLogger.quiet(time.strftime("%H:%M:%S, %b.%d %Y ", time.localtime()) + "[00:00]" + "\n")
StartTime = time.clock()
Ecc = Ecc()
FinishTime = time.clock()
BuildDuration = time.strftime("%M:%S", time.gmtime(int(round(FinishTime - StartTime))))
EdkLogger.quiet("\n%s [%s]" % (time.strftime("%H:%M:%S, %b.%d %Y", time.localtime()), BuildDuration))
| |
#!/usr/bin/env python
# Copyright 2016 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
VM change listener (started as a part of vmdkops service).
It monitors VM poweroff events and detaches the DVS managed
volumes from the VM and updates the status in KV
'''
import logging
import os
import os.path
import atexit
import time
import sys
import threadutils
import log_config
import vmdk_utils
import vmdk_ops
from pyVmomi import VmomiSupport, vim, vmodl
# vim api version used - version11
# Capture hostd connection exception.
if sys.version_info.major < 3:
# python 2.x
import httplib
RemoteDisconnected = httplib.HTTPException
else:
# python 3.x
import http.client
RemoteDisconnected = http.client.RemoteDisconnected
VM_POWERSTATE = 'runtime.powerState'
POWERSTATE_POWEROFF = 'poweredOff'
HOSTD_RECONNECT_INTERVAL = 2 #approx time for hostd to comeup is 10-15 seconds
HOSTD_RECONNECT_ATTEMPT = 5
def get_propertycollector():
"""
Connect to hostd. If failed, retry.
Create the property collector with filter to monitor VM power state changes
Return the property collecter and error (if any)
"""
si = vmdk_ops.get_si()
reconnect_interval = HOSTD_RECONNECT_INTERVAL
for i in range(HOSTD_RECONNECT_ATTEMPT):
if si:
break
# If hostd is not up yet, sleep for a while and try again
logging.warn("VMChangeListener couldn't connect to hostd.")
logging.warn("Retrying after %s seconds", reconnect_interval)
time.sleep(reconnect_interval)
si = vmdk_ops.get_si()
# exponential backoff for next retry
reconnect_interval += reconnect_interval
# Proceed further only after you get si instance
if not si:
# could not connect to hostd even after retries
# Something is seriously wrong
return None, "Unable to connect to hostd. Verify that vmware-hostd is running."
pc = si.content.propertyCollector
err_msg = create_vm_powerstate_filter(pc, si.content.rootFolder)
if err_msg:
# Retrying connection to hostd won't make this error go away. Returning.
return None, err_msg
return pc, None
def start_vm_changelistener():
"""
Listen to power state changes of VMs running on current host
"""
threadutils.set_thread_name("VMChangeListener")
pc, error_msg = get_propertycollector()
if error_msg:
logging.warn("Could not start VM Listener: %s", error_msg)
return
# listen to changes
ex = listen_vm_propertychange(pc)
# hostd is down
if isinstance(ex, RemoteDisconnected):
logging.error("VMChangeListener: Hostd connection error %s", str(ex))
# Need to get new SI instance, create a new property collector and property filter
# for it. Can't use the old one due to stale authentication error.
start_vm_changelistener()
# vmdkops process is exiting. Return.
elif isinstance(ex, vmodl.fault.RequestCanceled):
logging.info("VMChangeListener thread exiting")
return
def create_vm_powerstate_filter(pc, from_node):
"""
Create a filter spec to list to VM power state changes
"""
filterSpec = vmodl.query.PropertyCollector.FilterSpec()
objSpec = vmodl.query.PropertyCollector.ObjectSpec(obj=from_node,
selectSet=vm_folder_traversal())
filterSpec.objectSet.append(objSpec)
# Add the property specs
propSpec = vmodl.query.PropertyCollector.PropertySpec(type=vim.VirtualMachine, all=False)
propSpec.pathSet.append(VM_POWERSTATE)
filterSpec.propSet.append(propSpec)
try:
pcFilter = pc.CreateFilter(filterSpec, True)
atexit.register(pcFilter.Destroy)
return None
except Exception as e:
err_msg = "Problem creating PropertyCollector filter: {}".format(str(e))
logging.error(err_msg)
return err_msg
def listen_vm_propertychange(pc):
"""
Waits for updates on powerstate of VMs. If powerstate is poweroff,
detach the dvs managed volumes attached to VM.
"""
logging.info("VMChangeListener thread started")
version = ''
while True:
try:
result = pc.WaitForUpdates(version)
# process the updates result
for filterSet in result.filterSet:
for objectSet in filterSet.objectSet:
if objectSet.kind != 'modify':
continue
for change in objectSet.changeSet:
# if the event was powerOff for a VM, set the status of all
# docker volumes attached to the VM to be detached
if change.name != VM_POWERSTATE or change.val != POWERSTATE_POWEROFF:
continue
moref = getattr(objectSet, 'obj', None)
# Do we need to alert the admin? how?
if not moref:
logging.error("Could not retrieve the VM managed object.")
continue
logging.info("VM poweroff change found for %s", moref.config.name)
set_device_detached(moref)
version = result.version
# Capture hostd down exception
except RemoteDisconnected as e:
return e
# main vmdkops process exits.
except vmodl.fault.RequestCanceled as e:
return e
except Exception as e:
# Do we need to alert the admin? how?
logging.error("VMChangeListener: error %s", str(e))
def vm_folder_traversal():
"""
Build the traversal spec for the property collector to traverse vmFolder
"""
TraversalSpec = vmodl.query.PropertyCollector.TraversalSpec
SelectionSpec = vmodl.query.PropertyCollector.SelectionSpec
# Traversal through vmFolder branch
dcToVmf = TraversalSpec(name='dcToVmf', type=vim.Datacenter, path='vmFolder', skip=False)
dcToVmf.selectSet.append(SelectionSpec(name='visitFolders'))
# Recurse through the folders
visitFolders = TraversalSpec(name='visitFolders', type=vim.Folder, path='childEntity', skip=False)
visitFolders.selectSet.extend((SelectionSpec(name='visitFolders'), SelectionSpec(name='dcToVmf'),))
return SelectionSpec.Array((visitFolders, dcToVmf,))
def set_device_detached(vm_moref):
"""
For all devices in device_list, if it is a DVS volume, set its status to detached in KV
"""
for dev in vm_moref.config.hardware.device:
# if it is a dvs managed volume, set its status as detached
vmdk_path = vmdk_utils.find_dvs_volume(dev)
if vmdk_path:
logging.info("Setting detach status for %s", vmdk_path)
# disk detach and update the status in KV
err_msg = vmdk_ops.disk_detach_int(vmdk_path, vm_moref, dev)
if err_msg:
logging.error("Could not detach %s for %s: %s", vmdk_path,
vm_moref.config.name, err_msg)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.